query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Assumes numSteps an int >= 0, numTrials an int > 0, dClass a subclass of Drunk Simulates numTrials walk of numSteps steps each. Returns a list of the final distances for each trial. | def simWalks(numSteps, numTrials, dClass, name):
Homer = dClass(name)
origin = Location(0, 0)
distances = []
for t in range(numTrials):
f = Field()
f.addDrunk(Homer, origin)
# print(walk(f, Homer, 0))
# print(walk(f, Homer, 1))
# assert False
distances.append(round(walk(f, Homer, numSteps), 1))
return distances | [
"def simWalks(numSteps, numTrials, dClass):\n Homer = dClass(\"\")\n origin = Location(0, 0)\n distances = []\n for t in range(numTrials):\n f = Field()\n f.addDrunk(Homer, origin)\n distances.append(round(walk(f, Homer, numSteps), 1))\n return distances",
"def sim_walks(num_steps, num_trials, dClass):\r\n Homer = dClass()\r\n origin = Location(0, 0)\r\n distances = []\r\n for t in range(num_trials):\r\n f = Field()\r\n f.add_drunk(Homer, origin)\r\n distances.append(round(walk(f, Homer, num_steps), 1)) \r\n # textbook put an intentional bug in line above with num_trials\r\n # instead of num_steps! Always be skepitcal of simulations!\r\n return distances",
"def drunkTest(walkLengths, numTrials, dClass):\n \n for numSteps in walkLengths:\n distances = simWalks(numSteps, numTrials, dClass)\n \n print(dClass.__name__, 'random walk of ', numSteps, 'steps')\n print (' Mean = ', round(sum(distances)/len(distances), 4))\n print (' Max = ', max(distances), ' Min = ', min(distances))",
"def drunk_test(walk_lengths, num_trials, dClass):\r\n for num_steps in walk_lengths:\r\n distances = sim_walks(num_steps, num_trials, dClass)\r\n print(dClass.__name__, 'random walk of', num_steps, 'steps')\r\n print(' Mean =', round(sum(distances)/len(distances), 4))\r\n print(' Max =', max(distances), ' Min =', min(distances))",
"def drunkTest(walkLengths, numTrials, dClass, name):\n\tfor numStep in walkLengths:\n\t\tdistances = simWalks(numStep, numTrials, dClass, name)\n\t\tprint(dClass.__name__, ' random walk of ', numStep, ' steps')\n\t\tprint('Mean =', round(sum(distances)/len(distances), 4))\n\t\tprint('Max =', max(distances), ' Min =', min(distances))",
"def get_step_dist(self, rnn_outputs, condition_dict):",
"def _calc_distance_features_to_class(self, d):\r\n \r\n d = np.asarray(d).reshape(-1,self._num_classes-1)\r\n edge_distances = np.zeros((d.shape[0],self._num_classes))\r\n for j in range(self._num_classes):\r\n edge_distances[:,j] = np.linalg.norm(self._class_normals[j,:]-d, axis=1)\r\n best_classes = np.array(np.argmin(edge_distances,axis=1),dtype=np.int64)\r\n return np.array(self.classes_)[best_classes]",
"def get_distances(name,\n dist_func=movement_towards_hyperplane_normalized,\n num_tasks=5,\n layer=1,\n num_seeds=5,\n runs=None,\n size=None,\n sparsity=None,\n lr_rec=None,\n num_classes_per_task=None,\n lr=None,\n activation_type_hyp=None,\n activation_type='-feedforward',\n mode='across_time',\n pure_ff_sparsity=False):\n distances = []\n\n # iterate over reference tasks\n for start_task in range(num_tasks):\n\n # the last task is usually skipped as a reference task for distance measures\n # this is because we can't compare the distance of the last task to a subsequent task\n if mode in ['across_task_before_learned', 'across_task_first_learned', 'across_time'] and start_task == 4:\n break\n \n # retrieve relevant activations, as well as arrays indicating which tasks and labels (tgt) activations belong to\n _, _, combined_activations, _, _, _, task, tgt, _ = (\n load_data(start_task, mode=mode, layer=layer, runs=runs, size=size, num_seeds=num_seeds, sparsity=sparsity, lr_rec=lr_rec, num_classes_per_task=num_classes_per_task, num_tasks=num_tasks, lr=lr, record_str=activation_type, pure_ff_sparsity=pure_ff_sparsity)\n )\n\n # retrieve activations required to comptue hyperplane for dist_func that require a hyperplane\n if activation_type_hyp:\n _, _, combined_activations_hyp, _, _, _, task, tgt, _ = (\n load_data(start_task, mode=mode, layer=layer, runs=runs, size=size, num_seeds=num_seeds, sparsity=sparsity, lr_rec=lr_rec, num_classes_per_task=num_classes_per_task, num_tasks=num_tasks, lr=lr, record_str=activation_type_hyp, pure_ff_sparsity=pure_ff_sparsity)\n )\n else:\n combined_activations_hyp = combined_activations\n \n # compute distance measures for every random seed\n seed_vals = []\n for seed in range(num_seeds):\n arr = get_inter_task_results_arr(name, dist_func=dist_func, layer=layer, seed=seed,\n combined_activations_hyp=combined_activations_hyp, combined_activations=combined_activations,\n num_tasks=num_tasks,\n start_task=start_task, task=task, tgt=tgt)\n readout_idx = start_task + 1 if mode in ['across_task_before_learned', 'across_task_first_learned'] else num_tasks - 1\n seed_vals.append(arr[start_task,readout_idx])\n \n\n distances.append(seed_vals)\n\n return np.array(distances)",
"def rook_distance(directions,steps):\r\n\r\n if len(directions) != len(steps):\r\n print('Number of directions and number of steps must match.')\r\n return None\r\n longitude = 0\r\n latitude = 0\r\n steps_taken = 0\r\n direction_dict = ['up', 'down', 'left', 'right']\r\n\r\n #iterate through the list of directions\r\n for direction, step in zip(directions,steps):\r\n if direction not in direction_dict:\r\n print ('Exiting...\\nPlease insert the correct directions')\r\n return None\r\n steps_taken += step\r\n if direction == 'up':\r\n longitude += step\r\n elif direction == 'down':\r\n longitude -= step\r\n elif direction == 'left':\r\n latitude -= step\r\n else:\r\n latitude += step\r\n from_starting_point = abs(latitude) + abs(longitude)\r\n return (steps_taken, from_starting_point)",
"def DTW(self):\n\n self.N, d1 = self.referenceTS.shape\n self.M, d2 = self.queryTS.shape\n\n if d1!= d2:\n print(\"Number of features not coherent between reference ({0}) and query ({1})\".format(d1,d2))\n return\n\n self.d = d1 # d = dimensionality/number of features\n\n self.distanceMatrix = pairwise_distances(X = self.referenceTS, Y = self.queryTS, metric = self.dist_measure, n_jobs= self.n_jobs)\n\n self.AccumulatedDistanceComputation(step_pattern = \"symmetric2\")",
"def get_train_distance_to_goal(self, train: Train):\n total_distance = (\n self.sections_mapper.get_distance_between_sections(\n train.current_head_section,\n train.options.finish_section,\n train.is_reversed\n ) - (\n train.relative_position * train.current_head_section.length\n if not train.is_reversed else\n (1 - train.relative_position) * train.current_head_section.length\n ) - train.options.finish_section.length + train.options.length\n )\n return total_distance",
"def get_distances(self):\n return np.sqrt(np.diff(self.x)**2+np.diff(self.y)**2)",
"def set_distances(self):\n\n for metric in tqfunc(self.distance_metrics,desc='Distances'):\n metric_name = metric['metric']\n for group in tqfunc(self.experiment_groups,desc=metric_name):\n group.distance(metric_name)",
"def calc_travelled_distance(path: List[State]) -> float:\n dist = 0\n for i in range(len(path) - 1):\n dist += SearchBaseClass.distance(path[i].position, path[i + 1].position)\n return dist",
"def get_food_distances(self):\n return self.get_distances(self.enemy_food)",
"def _distances(self, negatives):\n # compute distances between positives and negatives\n start_time = time.time()\n logger.info('Step 1: Computing %d distances between %d positive and %d negative points' % (len(self._positives) * len(negatives), len(self._positives), len(negatives)))\n distances = gpu_pairwise_distances(self._positives, negatives, self.distance_function, 0)\n logger.info(\"--- %s seconds ---\" % (time.time() - start_time))\n return distances",
"def getDistance(self):\n self.listOfDistance.append(self.distanceCal(self.listOfatom2cal[0], self.listOfNi[0]))\n i=1\n while i < len(self.listOfNi):\n distance = self.distanceCal(self.listOfatom2cal[i*self.atomRepeat], self.listOfNi[i])\n self.listOfDistance.append(distance)\n i += 1",
"def _positive_distances(self):\n start_time = time.time()\n logger.info(\"Step 5: Computes the distances between positives\")\n distances = gpu_pairwise_distances(self._positives, self._positives, self.distance_function, 0)\n logger.info(\"--- %s seconds ---\" % (time.time() - start_time))\n return distances",
"def nsteps(self):\n return self._nsteps"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assumes walkLengths a sequence of ints >= 0 numTrials an int > 0 dClass a subclass of Drunk For each number of steps in walkLengths, runs simWalks with numTrials walks and prints results | def drunkTest(walkLengths, numTrials, dClass, name):
for numStep in walkLengths:
distances = simWalks(numStep, numTrials, dClass, name)
print(dClass.__name__, ' random walk of ', numStep, ' steps')
print('Mean =', round(sum(distances)/len(distances), 4))
print('Max =', max(distances), ' Min =', min(distances)) | [
"def drunkTest(walkLengths, numTrials, dClass):\n \n for numSteps in walkLengths:\n distances = simWalks(numSteps, numTrials, dClass)\n \n print(dClass.__name__, 'random walk of ', numSteps, 'steps')\n print (' Mean = ', round(sum(distances)/len(distances), 4))\n print (' Max = ', max(distances), ' Min = ', min(distances))",
"def drunk_test(walk_lengths, num_trials, dClass):\r\n for num_steps in walk_lengths:\r\n distances = sim_walks(num_steps, num_trials, dClass)\r\n print(dClass.__name__, 'random walk of', num_steps, 'steps')\r\n print(' Mean =', round(sum(distances)/len(distances), 4))\r\n print(' Max =', max(distances), ' Min =', min(distances))",
"def simWalks(numSteps, numTrials, dClass):\n Homer = dClass(\"\")\n origin = Location(0, 0)\n distances = []\n for t in range(numTrials):\n f = Field()\n f.addDrunk(Homer, origin)\n distances.append(round(walk(f, Homer, numSteps), 1))\n return distances",
"def simWalks(numSteps, numTrials, dClass, name):\n\tHomer = dClass(name)\n\torigin = Location(0, 0)\n\tdistances = []\n\tfor t in range(numTrials):\n\t\tf = Field()\n\t\tf.addDrunk(Homer, origin)\n\t\t# print(walk(f, Homer, 0))\n\t\t# print(walk(f, Homer, 1))\n\t\t# assert False\n\t\tdistances.append(round(walk(f, Homer, numSteps), 1))\n\treturn distances",
"def sim_walks(num_steps, num_trials, dClass):\r\n Homer = dClass()\r\n origin = Location(0, 0)\r\n distances = []\r\n for t in range(num_trials):\r\n f = Field()\r\n f.add_drunk(Homer, origin)\r\n distances.append(round(walk(f, Homer, num_steps), 1)) \r\n # textbook put an intentional bug in line above with num_trials\r\n # instead of num_steps! Always be skepitcal of simulations!\r\n return distances",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,robot_type):\n time_steps = []\n for i in range(num_trials): #trial count\n #anim = ps6_visualize.RobotVisualization(num_robots,width,height)\n anim = ''\n room = RectangularRoom(width,height)\n robots = createRobots(room,speed,robot_type,num_robots)\n time_steps.append(runRobots(robots,room,0,min_coverage,anim))\n return (num_robots, sum(time_steps) / len(time_steps))",
"def _simulate_walks(self):\n G = self.G\n nodes = list(G.nodes())\n\n self._walks = []\n\n print('Walk iteration:')\n\n for walk_iter in range(self.num_walks):\n\n print(str(walk_iter + 1), '/', str(self.num_walks))\n random.shuffle(nodes)\n\n c = 1\n\n for node in nodes:\n\n if c % 10001 == 0:\n print('Processed %d nodes' % c)\n\n c += 1\n\n self._walks.append(self.node2vec_walk(start_node=node))",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n trial_ticks = []\n # now let's run trials\n for i in range(0,num_trials):\n\n # let's instantiate the room\n room = RectangularRoom(width, height)\n\n # let's instantiate our robots\n robots = []\n for i in range(0, num_robots):\n robots.append(robot_type(room, speed))\n\n anim = ps6_visualize.RobotVisualization(num_robots, width, height)\n\n cleaned_coverage = 0.0\n ticks = 0 # track the number of ticks\n while cleaned_coverage < min_coverage:\n # update the position of each of the robots\n for r in robots:\n r.updatePositionAndClean()\n\n # calculate new cleaned pct\n cleaned_coverage = (room.getNumCleanedTiles() / float(room.getNumTiles()))\n # print cleaned_coverage\n\n #increment ticks and animation\n ticks = ticks + 1\n anim.update(room, robots)\n\n\n # finish off\n trial_ticks.append(ticks)\n anim.done()\n\n # print \"Number of Robots: \" + str(num_robots)\n # print \"Robot Type: \" + str(robot_type)\n # print \"Robot Speed: \" + str(speed)\n # print \"Room Size: \" + str(width) + 'x' + str(height)\n # print \"Number of Trials: \" + str(num_trials)\n # print \"Mean clock ticks: \" + str(float(sum(trial_ticks)) / num_trials)\n\n return sum(trial_ticks) / float(len(trial_ticks))",
"def steps_by_random_walk(self, length):\n\n self.debug(\"Initializing random walk of length '{0}'\"\n .format(length))\n\n for node_name in self.chain.random_walk(length=length):\n self.debug(\"Performing step: '{0}'\".format(node_name))\n method = getattr(self, 'step_' + node_name)\n method()",
"def random_walk_simulator(self, num_of_simulations=100, walk_length=10):\n average_fare_by_state = []\n average_duration_by_state = []\n for state_id in range(self.k):\n list_of_random_walks = []\n for _ in range(num_of_simulations):\n random_walk_simulation = self.random_walk(state_id, walk_length)\n list_of_random_walks.append(random_walk_simulation)\n return average_fare_by_state, average_duration_by_state",
"def run_simulation(num_robots, speed, capacity, width, height, dirt_amount, min_coverage, num_trials,\n robot_type):\n # Keep track of time-steps it takes to clean room--will return average number of time steps to clean room\n num_delta_t = []\n # Simulation runs for num_trials trials\n for i in range(num_trials):\n # Initialize variables per trial: a width*height Room, num_robots Robots of robot_type, time steps\n room = EmptyRoom(width, height, dirt_amount)\n # Shouldn't this line be good enough? See note below.\n #robots = [robot_type(room, speed, capacity) for j in range(num_robots)]\n # --> note: Is it necessary to set the robots' positions to a new random position like this?\n # After all, the Robot class constructor already initializes a Robot's position to a random position using\n # room.get_random_position()...\n robots = []\n for j in range(num_robots):\n robot = robot_type(room, speed, capacity)\n robot.set_robot_position(room.get_random_position())\n robots.append(robot)\n delta_t = 0\n # Before starting the trial, start animation\n #anim = ps3_visualize.RobotVisualization(num_robots, width, height, False, delay=0.01)\n # Clean room with robot(s) until the min_coverage is met, counting the number of time steps in the process\n while float(room.get_num_cleaned_tiles()/room.get_num_tiles()) < min_coverage:\n for robot in robots:\n robot.update_position_and_clean()\n delta_t += 1\n # Update the animation\n #anim.update(room, robots)\n # Once min_coverage met, store time_steps it took to compute average later\n num_delta_t.append(delta_t)\n # After the trial is over, finish the animation\n #anim.done()\n # Once simulation is finished, return the average\n return sum(num_delta_t)/num_trials",
"def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,\n robot_type):\n \n \n ticks = 0 \n #robot1 = robot_type(room, speed)\n #current_coverage = room.getNumCleanedTiles()/room.getNumTiles()\n\n for n in range(num_trials):\n robots_all = []\n room = RectangularRoom(width,height)\n\n for n in range(num_robots):\n robot = robot_type(room, speed) \n robots_all.append(robot)\n \n current_coverage = room.getNumCleanedTiles()/room.getNumTiles()\n while (current_coverage<min_coverage):\n for n in range(num_robots):\n robots_all[n].updatePositionAndClean()\n ticks += 1\n #print(\"\\n\")\n current_coverage = room.getNumCleanedTiles()/room.getNumTiles()\n #print(ticks)\n return ticks/num_trials\n #raise NotImplementedError",
"def random_walk(self, start_id, walk_length):\n total_duration, total_fare, states_visited = 0, 0, []\n next_id = start_id\n for i in range(walk_length):\n states_visited.append(next_id)\n s = self.get_state(next_id)\n next_id, fare, duration = s.next_state()\n total_fare += fare\n total_duration += duration\n return states_visited, total_fare, total_duration",
"def run_step(self, measurements, sensor_data, directions, target):",
"def random_walk(num_steps, num_walkers):\n\n\n #store distance squared for each walker\n r_squared = np.zeros(num_steps)\n\n #array to store analytical solutions for the diffusion constant\n diffusion_constant = np.zeros(num_steps)\n\n # specify all step numbers-- for consistent plotting purposes\n step_num = np.arange(1,num_steps+1)\n\n\n plt.figure()\n ax = plt.gca()\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.axis('equal')\n ax.set_title('2D Random Walk:\\n\\\n %s Steps For Each Walker' %(num_steps),\\\n family='monospace',size=12, weight='bold')\n ax.legend(\"\",title='# of Walks= %s' %(num_walkers),loc=1,fontsize=9)\n\n\n for j in range(num_walkers):\n\n # generate random steps +/- 1\n x = ( 2*(rng(num_steps) > 0.5) - 1).cumsum()\n # generate random steps +/- 1\n y = ( 2*(rng(num_steps) > 0.5) - 1).cumsum() # generate random steps +/- 1\n\n #to get distance_sqaured for each walker\n r_squared_temp = np.add(x**2, y**2)\n\n # add the distance squared of current walker to last walker\n #this is done for the collection of 2d walks.\n r_squared = r_squared + r_squared_temp\n\n\n #plot the coodinates of the random walker after each\n #is generated\n plt.plot(x,y)\n\n #normalizes the r_squared data\n normal_r_sqaured = r_squared/num_walkers\n\n\n #analytically solve for the diffuion\n #all solutions are output in the interactive kernel.\n for i in range(num_steps):\n diffusion_constant[i] = normal_r_sqaured[i]/(2*(i+1))\n print(\"At Step Number\",i+1,\":\")\n print(\"Diffusion Constant =\", diffusion_constant[i],\"\\n\" )\n\n plt.show()\n\n\n\n plt.figure()\n ax = plt.gca()\n plt.plot(step_num,normal_r_sqaured,'.')\n plt.xlabel(\"step number = (time)\")\n plt.ylabel(\"<r$^2$>\")\n ax.set_title('2D Random Walk:\\n\\\n The Average of The Sqaure of the Displacement\\n\\\n and %s Steps For Each Walker' %(num_steps),\\\n family='monospace',size=12, weight='bold')\n ax.legend(\"\",title='# of Walks Averaged= %s' %(num_walkers),loc=2,fontsize=9)\n plt.show()\n\n\n return diffusion_constant, normal_r_sqaured",
"def run_for_n(self,steps):\n for step in range(steps):\n self.step_all()\n self.steps += 1",
"def step_simulation(self):\n for n in self.G:\n self.G.node[n]['unit'].step()",
"def random_walk(self, step_size, turn_angle, num_steps):\n step = 0\n while step < num_steps:\n self.left(random.uniform(*turn_angle))\n self.forward(random.uniform(*step_size))\n step = step + 1",
"def run_simulation(num_scanLanes,num_min,prob_perSec,prob_preCheck,time_ID,time_scan,time_preCheckScan):\n \n # if not precheck, run simulation without pre check\n if prob_preCheck == None:\n no_preCheck_results = no_preCheck(num_scanLanes,num_min,prob_perSec,time_ID,time_scan)\n \n # **Output**\n print()\n print(\"Number of scanners:\",num_scanLanes)\n print(\"Simulation Length:\",num_min,\"minutes\")\n print(\"Passenger arrival probability:\",prob_perSec)\n print(\"Simulate PreCheck: NO\")\n print()\n print(\"Number of passengers cleared:\",no_preCheck_results[0])\n print(\"Average wait time:\",no_preCheck_results[1],\"minutes\")\n \n scan_linesData = no_preCheck_results[2]\n for key,values in scan_linesData.items():\n print(\"Avg Lane\",key[-1],\"Wait Time:\", values[0],\"minutes\", \"(\"+str(values[1])+\" people)\") \n \n print()\n print(\"Total number of passengers in line at end of simulation:\",no_preCheck_results[3]) \n print()\n \n # run simulation with precheck\n else:\n preCheck_results = preCheck(num_scanLanes,num_min,prob_perSec,prob_preCheck,time_ID,time_scan,time_preCheckScan)\n \n # **Output**\n print()\n print(\"Number of scanners:\",num_scanLanes)\n print(\"Simulation Length:\",num_min,\"minutes\")\n print(\"Passenger arrival probability:\",prob_perSec)\n print(\"Simulate PreCheck: YES\")\n print()\n print(\"Number of passengers cleared:\",preCheck_results[0])\n print(\"Average wait time:\",preCheck_results[1],\"minutes\")\n \n scan_linesData = preCheck_results[2]\n for key,values in scan_linesData.items():\n if key == 'preCheck_scan':\n print(\"Avg PreCheck Scan Wait Time:\",values[0],\"minutes\",\"(\"+str(values[1])+\" people)\")\n else:\n print(\"Avg Lane\",key[-1],\"Wait Time:\", values[0],\"minutes\", \"(\"+str(values[1])+\" people)\") \n \n print()\n print(\"Total number of passengers in line at end of simulation:\",preCheck_results[3]) \n print()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a trace of a MCTS forward run, use the final value estimates and the actual rewards to backup the tree node values | def backup_mcts_trace(self, sars: BackupTrace, final_value_estimate: float = 0.0) -> float:
value_discounted = final_value_estimate
prev_tree_values = [None] * len(self.tree_backups)
for state, action, reward, next_state, aut_state in reversed(sars):
self.visit_count[state][action] += 1
self.state_action_aut_total[state][action] += self.aut_stats.v(aut_state)
self.state_action_aut_average[state][action] = \
self.state_action_aut_total[state][action] / self.visit_count[state][action]
# Backup values according to the tree operations defined in the constructor
for idx, ((storage, treeop), prev_value) in enumerate(zip(self.tree_backups, prev_tree_values)):
if prev_value is not None:
storage[state][action] = treeop.edge_combinator(storage[state][action], prev_value)
prev_tree_values[idx] = treeop.node_value(storage[state])
value_discounted *= self.discount
value_discounted += reward
self.value[state][action] += value_discounted
self.value_avg[state][action] = self.value[state][action] / self.visit_count[state][action]
return value_discounted | [
"def search(self, n_mcts):\n max_depth, mean_depth = 0, 0\n\n for _ in range(n_mcts):\n mcts_state: MCTSAgent.MCTSState = self.root # reset to root for new trace\n # input(str(self.root.n_value) + \" \" + str(self.root.q_value)) # To Debug the tree\n depth = 0\n\n while True:\n depth += 1\n action_index: int = mcts_state.select()\n\n if mcts_state.child_states[action_index] is not None:\n # MCTS Algorithm: SELECT STAGE\n mcts_state = mcts_state.child_states[action_index]\n continue\n elif mcts_state.state.is_done():\n break\n else:\n # MCTS Algorithm: EXPAND STAGE\n if action_index == len(mcts_state.solution): # This is a commit action\n next_state, _reward, _done, _debug = step(mcts_state.solution, mcts_state.state)\n mcts_state.child_states[action_index] = MCTSAgent.MCTSState(\n next_state, self.model,\n r_previous=0, parent_state=mcts_state, parent_action=action_index)\n else: # This is a swap action\n next_solution = np.copy(mcts_state.solution)\n next_solution[action_index] = True\n reward = evaluate(next_solution, mcts_state.state) - \\\n evaluate(mcts_state.solution, mcts_state.state)\n mcts_state.child_states[action_index] = MCTSAgent.MCTSState(\n mcts_state.state, self.model, next_solution, reward, mcts_state, action_index)\n mcts_state = mcts_state.child_states[action_index]\n break\n\n # MCTS Algorithm: BACKUP STAGE\n total_reward = mcts_state.rollout_reward\n while mcts_state.parent_action is not None:\n total_reward = mcts_state.r_previous + self.HYPERPARAM_DISCOUNT_FACTOR * total_reward\n mcts_state.parent_state.update_q(total_reward, mcts_state.parent_action)\n mcts_state = mcts_state.parent_state\n\n max_depth = max(max_depth, depth)\n mean_depth += depth / n_mcts\n\n return max_depth, mean_depth",
"def mcts_backup(self, reward):\n while(self.node_selected.father != None):\n self.node_selected.value += self.node_selected.player*self.env.win_player*abs(reward) # update the value \n self.node_selected.visit += 1 # update the visit number\n \n \"\"\" just leave the expanded node, all others should be threw \"\"\"\n if self.node_selected.simulation_node == True:\n self.node_selected.simulation_node = False # reset the flag\n self.node_selected.children = [] # clear the children\n self.node_selected = self.node_selected.father # backup from this node\n\n \"\"\" update the root node and reset the env \"\"\"\n self.node_selected.visit += 1 # add the root\n self.deep_copy(store=False) # restore the env to the start of the simulation",
"def __MCTS(self, next_proba: float,\n action_history: NDArray,\n proba_history: NDArray,\n final_value: float,\n end: bool,\n N: Dict[tuple, int],\n Q: Dict[tuple, int],\n P: Dict[tuple, int],\n cur_depth: int):\n\n # The search reached the end\n if end:\n if final_value > self.best_path_reward:\n self.best_path_reward = final_value\n self.best_path = action_history\n return final_value\n\n # Getting the state based on the acton history\n state = tuple(action_history)\n\n # If we reach a new node we should get the value and policy from the network\n if state not in P:\n # Use the model to predict the value and the policy\n proba_history[cur_depth] = next_proba\n self.model.eval()\n pred = self.model(torch.tensor(action_history).float().to(self.device),\n torch.tensor(proba_history).float().to(self.device))\n value, policy = pred[0][0].item(), pred[0][1].item()\n\n # Initialize N, Q and P\n P[state] = policy\n\n for action in self.environment.action_space:\n state_action = (state, action)\n Q[state_action] = 0\n N[state_action] = 0\n\n return value\n\n # If the node is visited we use the criterion from the MCTS to\n # choose the next action\n max_U, best_A = -float(\"inf\"), -1\n tot_N = 0\n for action in self.environment.action_space:\n tot_N += N[(state, action)]\n\n for action in self.environment.action_space:\n state_action = (state, action)\n\n # P[state] represents the probability of action -1 from a given state\n if action == -1:\n U = Q[state_action] + self.c_puct * P[state] * np.sqrt(tot_N) / (\n 1 + N[state_action])\n elif action == 1:\n U = Q[state_action] + self.c_puct * (1 - P[state]) * np.sqrt(tot_N) / (\n 1 + N[state_action])\n\n # Choose action that maximizes U\n if U > max_U:\n best_A = action\n max_U = U\n\n # Get the next state from the environment\n next_proba, action_history, proba_history, final_value, end = \\\n self.environment.step(best_A)\n self.n_visited_nodes += 1\n\n # Propagate down the search to get the value from the state below\n value = self.__MCTS(next_proba, action_history, proba_history, final_value, end,\n N, Q, P, cur_depth + 1)\n\n # Updating Q and N\n state_action = (state, best_A)\n Q[state_action] = (N[state_action] * Q[state_action] + value) / (\n N[state_action] + 1)\n N[state_action] += 1\n\n return value",
"def forwardCalculation(node):\n pass",
"def retraceSteps(myTurtle, myStack):\n while len(myStack) != 0:\n crumb = myStack.pop()\n LSysFns.resetTurtle(myTurtle, crumb)",
"def next_steps(self, orig_query, lineage, reqd_triples) :\n #self.debug.p('orig_query', orig_query)\n guaranteed_steps = []\n \n # the translation_queue is a list of translations that will be searched. \n # TODO if there is a lineage, translation_queue should be a \n # combination of the tq from the end of both merged paths\n if lineage and 'new_lineage' not in lineage[-1] :\n translation_queue = self.translation_matrix[lineage[-1]['translation'].get('id')]\n else :\n translation_queue = list(self.translations)\n # NOTE setting translation_queue to all translations all the time causes\n # errors. This is because we can go in all kinds of weird directions ...\n \n # NOTE: this is a definite hack. The problem is that the \n # translation_matrix can't account for situations where a variable gets\n # turned into a litvar. That variable can be in many triples and all of\n # those triples must be used as initial pruning instead of just the output\n # triples.\n # WARNING: Here, I am only using the middle value of the triple as a test.\n # It will fail when the above situation happens and variables are used in\n # the 2nd position (property) of a triple.\n for triple in reqd_triples :\n for trans in self.translations :\n if triple[1] in [t[1] for t in trans['input']] :\n if trans not in translation_queue :\n translation_queue.append(trans)\n \n # HEURISTIC: stop DFS search at self.depth\n if lineage :\n lineage_depth = sum(s['translation']['step_size'] for s in lineage)\n self.debug.p('lineage_depth', lineage_depth)\n if lineage_depth >= self.depth :\n translation_queue = []\n \n # OPTIMIZATION: skip this translation if it is the inverse of the last\n # translation\n # WARNING: not 100% sure this is always going to work, but it does for\n # now ...\n def test_for_inverse(translation) :\n inverse_function = lineage[-1]['translation'].get('inverse_function') \n if inverse_function :\n if inverse_function == translation['name'] :\n return False\n return True\n translation_queue = filter(test_for_inverse, translation_queue)\n \n # show the list of translations that show up in the queue\n #self.debug.p('tq', [t['name'] for t in translation_queue])\n \n def merge_partial(translation, matched_triples) :\n # this function gets called if this translation was partially matched\n \n found_merge = False\n \n # all of the search paths that have partially matched this translation\n past_partials = self.partials[translation['id']]\n # NOTE: use heuristics to pick which past_partial to try first:\n # *** keep stats about p of tip of branch1 combining with tip of \n # branch2 to fulfil this translation\n # * comparing how recently the two paths diverged might correlate\n # * prefer combined_bindings_set with more litvars\n # TODO: n-way merges instead of just 2-way merges ... ouch!\n # just found a case that needs this \n #self.debug.p('past_partials', len(past_partials))\n for past_lineage, past_query, past_matched_triples in past_partials :\n # make sure that the triples that these two partials atleast cover\n # all input triples\n if len(past_matched_triples.union(matched_triples)) == len(translation['input']) :\n # OPTIMIZATION make sure that past_query isn't a subset of orig_query\n # NOTE: might be faster to compare lineages instead of queries\n if all(triple in orig_query for triple in past_query) :\n continue\n \n # merge past_query and orig_query:\n # as we merge past_query and orig_query, if a litvar and a var bind,\n # keep the litvar, it has a known computation, where the var does not\n # NOTE: there may be problems binding litvars to other litvars. The\n # two could have different values, but we won't know until runtime.\n merged_bindings_set = self.bind_vars(\n orig_query, past_query, False, {}, prefer_litvars = True\n )\n \n # all the ways orig_query and past_query can be merged\n for merged_bindings in merged_bindings_set :\n # see if any variables are mapped to twice ... this may be a big hack\n if len(merged_bindings.values()) != len(set(merged_bindings.values())) :\n continue\n \n new_query, new_triples = sub_var_bindings_track_changes(\n orig_query + past_query, merged_bindings\n )\n \n new_query = remove_duplicate_triples(new_query)\n \n # test to see if this new merged query has enough information to\n # trigger this translation\n ret, more = self.testtranslation(translation, new_query, new_triples)\n if ret == True :\n found_merge = True\n self.debug.open_block('merge for ' + translation['name'])\n self.debug.p('orig_query', orig_query)\n self.debug.p('past_query', past_query)\n self.debug.p('merged_bindings_set', merged_bindings_set)\n yield new_query, translation, more, past_lineage\n self.debug.close_block()\n \n # add this instance to past partials\n #p('storing partial', translation['name'], matched_triples)\n self.partials[translation['id']].append((lineage, orig_query, matched_triples))\n \n def test_and_merge() :\n \"\"\" test each translation against the current query. If there is a \n partial match, also yield all possible merges \"\"\"\n for translation in translation_queue :\n ret, more = self.testtranslation(translation, orig_query, reqd_triples)\n if ret == \"partial\" :\n # in this case more is a list of the triples that matched\n for x in merge_partial(translation, more) :\n yield x\n elif ret == False :\n continue \n else :\n # in this case more is a bindings_set\n yield orig_query, translation, more, False\n \n # main loop\n for query, translation, bindings_set, new_lineage in test_and_merge() :\n # the 2nd value from testtranslation is bindings_set if we've gotten here\n \n # we've found a match, now we just need to find the bindings. This is\n # the step where we unify the new information (generated by output \n # triples) with existing information.\n for bindings in bindings_set :\n # input_bindings map from translation space to query space\n input_bindings = bindings\n # output_bindings map from translation space to query space\n output_bindings = {}\n \n input_bindings_vars = [var for (var, binding) in input_bindings.iteritems() if not is_var(binding)]\n missing_vars = translation['in_lit_vars'] - set(input_bindings_vars)\n if len(missing_vars) :\n continue\n \n # initial_bindings are the bindings that we already know from the \n # input unification that must also hold true for output unification\n # some of the initial_binding vars don't appear in the output triples\n # so we can get rid of them\n output_triples = translation['output']\n initial_bindings = dict(\n (unicode(name), bindings[name]) for name in translation['constant_vars']\n if name in bindings and \n name in translation['output_vars']\n )\n \n # used in a couple places later on\n output_lit_vars = find_vars(translation['output'], is_lit_var).union(\n set(translation.get('add_output_vars', [])))\n \n # if the translation has an input_function, run it here to see if these\n # input_bindings pass the test\n if 'input_function' in translation :\n if not translation['input_function'](input_bindings) :\n continue\n \n # unify output_triples with query\n if not translation['output'] :\n # if there is not output, simply replace input vars with litvars\n # since after the translation is applied they will have values\n output_bindings_set = [\n {unicode(name) : LitVar(input_bindings[name].name)}\n for name in translation['add_output_vars']]\n else :\n output_bindings_set = self.bind_vars(output_triples, query, False, initial_bindings = initial_bindings)\n # if no unification is found, just use the initial_bindings\n if output_bindings_set == False :\n output_bindings_set = [initial_bindings]\n \n for output_bindings in output_bindings_set :\n # if var is a lit var in the output_triples, then its output bindings\n # must bind it to a new variable since it will be computed and set by\n # the translation function and may not have the same value any more\n # WARNING: I think this means that bind_vars might not do the \n # right thing if it thinks that it can bind whatever it wants to \n # lit_vars. lit_vars for example shouldn't bind to literal values\n # this might also have to do with a schema, some things can be bound\n # again (a.is), whereas some can not (u.inches)\n \n # if get_bindings found variable to variable matches, we will need\n # to alter the triples in the existing query (not just add triples)\n # unified_bindings maps old query variables to new query variables\n unified_bindings = {}\n for var in output_lit_vars :\n new_lit_var = LitVar(var+'_out_'+str(self.next_num()))\n if var in output_bindings :\n if is_any_var(output_bindings[var]) :\n if not is_out_lit_var(output_bindings[var]) :\n unified_bindings[output_bindings[var].name] = new_lit_var\n # only replace output_bindings with a lit var if\n # output_bindings isn't already bound to a literal value,\n # like a string or an int\n if var not in output_bindings or is_any_var(output_bindings[var]) :\n output_bindings[var] = new_lit_var\n \n # make sure all vanila vars have unique names\n for var in find_vars(translation['output'], is_var) :\n if var not in output_bindings :\n output_bindings[var] = Var(var+'_'+str(self.next_num()))\n\n # generate the new query by adding the output triples with \n # output bindings substituted in\n new_triples = sub_var_bindings(translation['output'], output_bindings)\n\n new_query, new_query_new_triples = sub_var_bindings_track_changes(query, unified_bindings)\n\n new_query.extend(new_triples)\n new_triples.extend(new_query_new_triples)\n \n # remove output_bindings which are not constant_vars or lit_vars (in\n # the translation's output triples. An example of an instance when\n # a variable would be in output_bindings that we would remove here is\n # when an output_triple has normal variables which are not used in \n # the input, but also aren't bound to anything by the translation fn.\n # we want to know if that variable binds to anything for creating \n # new_triples above, but as far as the evaluator is concerned, it has\n # no value and thus no output binding\n output_bindings = dict(\n (var, binding) for var, binding in output_bindings.iteritems()\n if var in output_lit_vars or\n var in translation['constant_vars']\n )\n \n new_query = remove_duplicate_triples(new_query)\n \n #self.debug.p('new_triples', new_triples)\n #self.debug.p('new_query', new_query)\n #self.debug.p('input_bindings', input_bindings)\n #self.debug.p('output_bindings', output_bindings)\n \n step = {\n 'input_bindings' : input_bindings,\n 'output_bindings' : output_bindings,\n 'translation' : translation,\n 'new_triples' : new_triples,\n 'new_query' : new_query,\n }\n if new_lineage :\n step['new_lineage'] = new_lineage\n yield step",
"def test_tmaze_demo(self):\n\n reward_probabilities = [0.98, 0.02] # probabilities used in the original SPM T-maze demo\n env = TMazeEnv(reward_probs = reward_probabilities)\n\n '''test plotting of the observation likelihood (just plot one slice)'''\n A_gp = env.get_likelihood_dist()\n plot_likelihood(A_gp[1][:,:,0],'Reward Right')\n\n '''test plotting of the transition likelihood (just plot one slice)'''\n B_gp = env.get_transition_dist()\n plot_likelihood(B_gp[1][:,:,0],'Reward Condition Transitions')\n\n A_gm = copy.deepcopy(A_gp) # make a copy of the true observation likelihood to initialize the observation model\n B_gm = copy.deepcopy(B_gp)# make a copy of the true transition likelihood to initialize the transition model\n \n control_fac_idx = [0]\n agent = Agent(A=A_gm, B=B_gm, control_fac_idx=control_fac_idx)\n plot_beliefs(agent.D[0],\"Beliefs about initial location\")\n\n agent.C[1][1] = 3.0 # they like reward\n agent.C[1][2] = -3.0 # they don't like loss\n\n T = 5 # number of timesteps\n\n obs = env.reset() # reset the environment and get an initial observation\n\n # these are useful for displaying read-outs during the loop over time\n reward_conditions = [\"Right\", \"Left\"]\n location_observations = ['CENTER','RIGHT ARM','LEFT ARM','CUE LOCATION']\n reward_observations = ['No reward','Reward!','Loss!']\n cue_observations = ['Cue Right','Cue Left']\n \n for t in range(T):\n qx = agent.infer_states(obs)\n\n q_pi, G = agent.infer_policies()\n\n action = agent.sample_action()\n\n obs = env.step(action)\n\n if int(action[0]) == 3:\n \n # if the reward condition is Reward on RIGHT\n if env.reward_condition == 0:\n self.assertEqual(obs[2], 0) # this tests that the cue observation is 'Cue Right' in case of 'Reward on Right' condition\n\n # if the reward condition is Reward on RIGHT\n if env.reward_condition == 1:\n self.assertEqual(obs[2], 1) # this tests that the cue observation is 'Cue Left' in case of 'Reward on Left' condition\n\n \n plot_beliefs(qx[1],\"Final posterior beliefs about reward condition\")",
"def propagate(self, child_network_return, child_network_reward):\r\n # compute node value - Q, like article (3) page 12 in muzero paper\r\n G_k = discount_factor * child_network_return + child_network_reward\r\n self.network_value_sum = G_k #is this true? page 12 article (3)\r\n self.times_visited += 1\r\n self.action_value = ((self.times_visited - 1)*self.action_value + G_k)/self.times_visited\r\n\r\n if Node.min_tree_value > self.action_value:\r\n Node.min_tree_value = self.action_value\r\n if Node.max_tree_value < self.action_value:\r\n Node.max_tree_value = self.action_value\r\n\r\n # propagate upwards\r\n if not self.is_root():\r\n self.parent.propagate(G_k, self.immediate_reward) # TODO: Figure out if child_value or network value sum\r",
"def backstep(self):\n head, moves = self.history.pop()\n for i in range(self.N):\n if moves[i]:\n Tape._pop(self.stacks[moves[i] < 0][i])\n Tape._append(self.stacks[moves[i] > 0][i], self.head[i])\n self.head[i] = head[i]\n for i in range(self.N):\n self.pos[i] -= moves[i]",
"def think(state):\r\n \"\"\"\r\n root_node = MCTSNode(parent=None, parent_action=None, action_list=state.legal_moves)\r\n me = state.player_turn\r\n move = None\r\n possible_move = {}\r\n\r\n # Copy the game for sampling a playthrough\r\n sampled_game = state.copy()\r\n\r\n # Start at root\r\n node = root_node\r\n #print(MCTSNode.tree_to_string(node))\r\n\r\n for step in range(10):\r\n # Do MCTS - This is all you!\r\n n = traverse_nodes(node, sampled_game, me)\r\n #print (str(n.parent_action))\r\n #return node.parent_action\r\n #while not state.is_terminal():\r\n new_node = expand_leaf(n, sampled_game)\r\n roll = rollout(sampled_game)\r\n for r in roll:\r\n possible_move[r] = roll[r]\r\n if roll[r] > 0:\r\n won = True\r\n else:\r\n won = False\r\n backpropagate(new_node, won)\r\n #print (MCTSNode.tree_to_string(root_node))\r\n #print(\"f\"))\r\n # Return an action, typically the most frequently used action (from the root) or the action with the best\r\n # estimated win rate.\r\n test = float(-inf)\r\n for q in possible_move:\r\n if possible_move[q] > test:\r\n test = possible_move[q]\r\n move = q\r\n return move\r\n \"\"\"\r\n root_node = MCTSNode(parent=None, parent_action=None, action_list=state.legal_moves)\r\n me = state.player_turn\r\n move = None\r\n i = 0;\r\n for step in range(40):\r\n # Copy the game for sampling a playthrough\r\n sampled_game = state.copy()\r\n # Start at root\r\n node = root_node\r\n holder = root_node\r\n print (\"foo\", i)\r\n i += 1\r\n\r\n while not sampled_game.is_terminal():\r\n # Do MCTS - This is all you!\r\n if sampled_game.player_turn!=me:\r\n rollout_move= rollout_bot.think(sampled_game.copy()) #chaged bot(won 9/10, with rollout still 8/10)\r\n sampled_game.apply_move(rollout_move) #applies multiple moves taking away from legal moves to traverse\r\n #print(\"rollout bot\")\r\n continue\r\n node=traverse_nodes(node,sampled_game, me)\r\n sampled_game.apply_move(node.parent_action)\r\n #print(\"traversing result\")\r\n #print(str(node.parent_action))\r\n #while not state.is_terminal():\r\n if not sampled_game.is_terminal():\r\n node=expand_leaf(node,sampled_game)\r\n #sampled_game.apply_move(new.parent_action)\r\n #print(\"expanding leaf result\")\r\n #print(str(node.parent_action))\r\n if not sampled_game.is_terminal():\r\n #print(\"rolling\")\r\n new_roll = rollout(sampled_game)\r\n action = None\r\n for r in new_roll:\r\n action = r\r\n if new_roll[r] > 0:\r\n won = True\r\n else:\r\n won = False\r\n sample = MCTSNode(parent=node, parent_action=action)\r\n node.child_nodes[action] = sample\r\n backpropagate(sample,won)\r\n if(node.parent.parent == root_node):\r\n root_node.visits +=1\r\n\r\n #holder = node\r\n #print(\"q\", root_node.visits)\r\n #print (node.visits)\r\n if me == sampled_game.winner:\r\n won = True\r\n else:\r\n won = False\r\n backpropagate(node, won)\r\n move = traverse_nodes(root_node,sampled_game,me)\r\n\r\n #root_node.visits+=1\r\n #for q in root_node.child_nodes:\r\n #print(root_node.child_nodes[q])\r\n #print(move.parent_action)\r\n print(\"red\") #to tell me which bot it was\r\n return move.parent_action",
"def backstep(self):\n\n self.input.setDelta(self.output.getNetDelta())\n self.output.value = self.history.pop()",
"def take_next_step(self) -> None:\r\n next_path_dic = {} # temporary var used to keep track of the result of the step\r\n paths_to_end = set() # temporary var used to keep track of which paths have met the termination criteria\r\n \r\n for current_path_val in self.path_dic: # loop through each point, or current state of a path\r\n for transition in self.transitions:# loop through each transformation (or card draw)\r\n next_path_val = current_path_val + transition # this is value after a card has been drawn\r\n \r\n if next_path_val >= self.target: # if the path has reached an endpoint, add to a set\r\n # which will be used later to move paths to the endpoint dictionary\r\n paths_to_end.add(next_path_val)\r\n\r\n # doing the transformation\r\n if next_path_val in next_path_dic: #this point has already been found, just need to update its probability\r\n next_path_dic[next_path_val] += self.path_dic[current_path_val] \\\r\n / len(self.transitions)\r\n else: # this point hasn't been found yet, need to create it\r\n next_path_dic[next_path_val] = self.path_dic[current_path_val] / len(self.transitions)\r\n \r\n self.path_dic = next_path_dic # all transformations have been done. The next state is set as the current state\r\n \r\n # now that we've calucated the next steps for all paths, \r\n # loop through paths that met the end condition and move them from\r\n # the path dictionary to the endpoint dictionary\r\n for point in paths_to_end:\r\n if point in self.end_point_dic: # if this endpoint has been reached before, add the\r\n # probability of current path to probablility of endpoint\r\n self.end_point_dic[point] += self.path_dic.pop(point) #pop from the pathDic becuase this path is ended\r\n \r\n else: #havent reached this endpoint before, add it to the dictionary\r\n self.end_point_dic.update({point: self.path_dic.pop(point)})",
"def test_predict_future_reward(self):\n good_sequence = [\n ([0,0,0,0],1,[0,0,0,1]),\n ([0,0,0,1],0,[1,0,1,0]),\n ([1,0,1,0],1,[1,1,1,1]),\n ]\n bad_sequence = [\n ([0,0,0,0],0,[1,0,0,1]),\n ([1,0,0,1],1,[0,0,1,0]),\n ([0,0,1,0],1,[0,1,1,1]),\n ]\n def expand(r, final_reward):\n results = []\n for i,(state,action,new_state) in enumerate(r):\n record = {\n 'state': np.array(state,'f'),\n 'new_state': np.array(new_state,'f'),\n 'action': action,\n 'done': i >= len(r),\n 'reward': final_reward\n }\n results.append(record)\n assert results[-1]['reward'] == final_reward\n return results \n records = expand(good_sequence,1.0) + expand(bad_sequence,-1.0)\n print(records)\n records = records * 256\n model = main.build_model(env)\n main.train_model( model, records, env, batch_size=8)\n for (state,action,new_state) in good_sequence:\n prediction = main.predict(model,state)\n assert np.argmax(prediction) == action, (state,action,prediction)\n \n for (state,action,new_state) in bad_sequence:\n prediction = main.predict(model,state)\n assert np.argmax(prediction) != action, (state,action,prediction)",
"def _reset(self, *args, **kwargs):\n trajectory = self._env.reset(*args, **kwargs)\n self._goal = self.get_goal_from_trajectory(trajectory)\n return self.get_trajectory_with_goal(trajectory, self._goal)",
"def resetTrajectoryAbs(self,t):\n res=self.call((\"rtabs\",t))\n if res==\"Error\":\n raise ValueError(\"resetTrajectoryAbs failed\")\n else:\n return None",
"def postOrderTrav(self):\n for child in self.children:\n if child.children:\n child.postOrderTrav()\n else:\n pass\n #This might be a place where different work paths can be broken down\n #print(\"---Path---\")\n print(child.instruction)",
"def __buildtrees(mat,backgrnd,smat,gmat,bmat,bakmat,t_lim,height,min_size):\n # Reserve maximum size buffer to utilize enhancement by number in later processes\n L = smat.shape[0] # size of the matrix\n score = np.zeros((L,L,t_lim))\n local_parts_array = np.zeros((L,height,t_lim,t_lim,3),dtype=int)\n if height > L: height = L\n traceback = np.zeros((L,L,t_lim),dtype=int) # # LxLxt_lim tensor\n options = np.zeros((height + 1, height + 1))\n intervals = np.zeros(height * 3, dtype=np.int64)\n max_entries = height ** 2 * t_lim\n max_buffer = 4 + height * 3\n partition_buffer = np.zeros((max_entries, max_buffer), dtype=np.int64) \n local_score = np.zeros((height + 1, t_lim + 1))\n local_traceback_k = np.zeros((height + 1, t_lim + 1), dtype=np.int64)\n local_traceback_t = np.zeros((height + 1, t_lim + 1), dtype=np.int64)\n\n # Build trees and backtrack matrices\n index_part = __update_local_partitions(mat, smat, gmat, bmat, bakmat, backgrnd, score, local_score, min_size, t_lim, height, L, options, intervals, local_traceback_k, local_traceback_t, partition_buffer)\n\n # Set traced paths in the original format\n for ind in range(index_part):\n i, j, t, l = partition_buffer[ind,0:4]\n buf = np.copy(partition_buffer[ind,4:4+l].reshape(l // 3, 3))\n local_parts_array[i, j - i, t, :l // 3, :] = buf\n return local_parts_array, score",
"def think(board, state):\n identity_of_bot = board.current_player(state)\n root_node = MCTSNode(parent=None, parent_action=None, action_list=board.legal_actions(state))\n\n for step in range(num_nodes):\n #print(step)\n # Copy the game for sampling a playthrough\n sampled_game = state\n\n # Start at root\n node = root_node\n\n # Do MCTS - This is all you!\n #print(\"traverse\")\n leaf_node = traverse_nodes(node, board, sampled_game, identity_of_bot)\n #print(\"traverse done\")\n #print(\"add\")\n if leaf_node.visits != 0:\n while leaf_node.untried_actions != []:\n new_node = expand_leaf(leaf_node, board, sampled_game)\n leaf_node.untried_actions.pop(0)\n leaf_node = new_node\n #print(\"add done\")\n #print(\"rollout\")\n won = rollout(board, sampled_game)\n #print(\"roll done\")\n \"\"\"\n result = board.win_values(sampled_game)\n if result is not None:\n # Try to normalize it up? Not so sure about this code anyhow.\n player1 = result[1]\n player2 = result[2]\n else:\n player1 = player2 = 0\n if player1 == player2:\n player1 = player2 = 0\n if identity_of_bot == board.current_player(sampled_game):\n if identity_of_bot == 1:\n won = player1\n else:\n won = player2\n else:\n if identity_of_bot == 1:\n won = player2\n else:\n won = player1\n #print(\"backp\")\n \"\"\"\n backpropagate(leaf_node, won[identity_of_bot])\n #print(\"back done\")\n\n #print(node.tree_to_string(5, 1))\n\n moves = board.legal_actions(state)\n #print(moves)\n highest_win = 0\n most_repeated = 0\n best_move = moves[0]\n most_used_move = moves[0]\n #print(node.child_nodes.keys())\n\n for move in moves:\n #print(node.parent_action)\n #print(move)\n x = node.child_nodes.get(move)\n if x is None:\n continue\n #print(x.wins)\n if x.visits != 0:\n win_rate = x.wins / x.visits\n else:\n return move\n if win_rate > highest_win:\n highest_win = win_rate\n best_move = move\n if x.visits > most_repeated:\n most_repeated = x.visits\n most_used_move = move\n #print(\"turn taken\")\n if highest_win < 0:\n return most_used_move\n\n # Return an action, typically the most frequently used action (from the root) or the action with the best\n # estimated win rate.\n return best_move",
"def _trajectory_centric_planning(self, trajectories):\n # Calculate non-parametric values over the trajectories.\n # Iterate backward through trajectories\n for t in range(len(trajectories) - 1, 0, -1):\n elem = trajectories[t][1]\n s_tp1 = tuple(elem.next_info_state)\n s_t = tuple(elem.info_state)\n a_t = elem.action\n r_t = elem.reward\n legal_actions = elem.legal_actions_mask\n if t < len(trajectories) - 1:\n for action in range(len(legal_actions)):\n if not legal_actions[action]:\n continue\n if action == elem.action:\n self._q_np[s_t][a_t] = (r_t + self._discount * self._v_np[s_tp1])\n else:\n self._agent.info_state = torch.Tensor(\n np.expand_dims(elem.info_state, axis=0))\n q_values_parametric = self._agent._q_network(\n self._agent.info_state).detach().numpy()\n self._q_np[s_t][a_t] = q_values_parametric[0][action]\n\n # Set V(s_t)\n if t == len(trajectories) - 1:\n # Sample from the parametric model.\n self._agent.info_state = torch.Tensor(\n np.expand_dims(elem.info_state, axis=0))\n q_values_parametric = self._agent._q_network(\n self._agent.info_state).detach().numpy()\n self._v_np[s_t] = np.max(q_values_parametric)\n else:\n self._v_np[s_t] = max(self._q_np[s_t])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run a batch of MCTS | def mcts_batch(self, env: AutShapingWrapper, state: Any, count: int, batch_size: int, lp_gen=False) -> None:
for _ in range(count):
self.mcts_mini_batch(env, state, batch_size, lp_gen=lp_gen) | [
"def test_mmtl_multitask(self):\n N = 600\n T = 2\n\n tasks = create_tasks(T)\n model = MetalModel(tasks, verbose=False)\n payloads = create_payloads(N, T, batch_size=2)\n metrics_dict = self.trainer.train_model(model, payloads, verbose=False)\n # For 3 payloads, each of 2 tasks each has 2 label sets\n self.assertEqual(len(metrics_dict), len(SPLITS) * T ** 2)\n for metric, score in metrics_dict.items():\n self.assertGreater(score, 0.9)",
"def run_train():\n parser = argparse.ArgumentParser(description=\"GPT training\")\n parser.add_argument('--device_id', type=int, default=0, help=\"Device id, default is 0.\")\n parser.add_argument(\"--device_num\", type=int, default=1, help=\"Use device nums, default is 1.\")\n parser.add_argument(\"--distribute\", type=str, default=\"false\", choices=[\"true\", \"false\"],\n help=\"Run distribute, default is false.\")\n parser.add_argument(\"--optimizer\", type=str, default=\"adam\", choices=[\"adam\", \"lamb\"],\n help=\"select which optimizer to be used, default adam\")\n parser.add_argument(\"--epoch_size\", type=int, default=10, help=\"Epoch size, default is 10.\")\n parser.add_argument(\"--warmup_step\", type=int, default=10000, help=\"Warmup step, default is 10000.\")\n parser.add_argument(\"--data_path\", type=str, default=\"\", help=\"Data path of your MindRecord files.\")\n parser.add_argument(\"--start_lr\", type=float, default=\"5e-5\", help=\"Start learning rate, default is 5e-5.\")\n parser.add_argument(\"--end_lr\", type=float, default=\"1e-10\", help=\"End learning rate, default is 1e-10.\")\n parser.add_argument(\"--sink_size\", type=int, default=100, help=\"Sink size for every iteration, default is 100\")\n parser.add_argument(\"--model_parallel_num\", type=int, default=8, help=\"Num of model parallel, default is 8\")\n\n\n args_opt = parser.parse_args()\n device_id = int(os.getenv(\"DEVICE_ID\", '0'))\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\", device_id=device_id)\n if args_opt.distribute == \"true\":\n D.init()\n device_num = args_opt.device_num\n rank = device_id % device_num\n print(\"device_id is {}, rank_id is {}\".format(device_id, rank))\n\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,\n device_num=device_num)\n\n else:\n rank = 0\n device_num = 1\n\n config = GPTConfig(batch_size=4,\n seq_length=1024,\n vocab_size=50257,\n embedding_size=1024,\n num_layers=24,\n num_heads=16,\n expand_ratio=4,\n post_layernorm_residual=False,\n dropout_rate=0.1,\n compute_dtype=mstype.float16,\n use_past=False)\n gpt = GPT(config)\n model_parallel_num = args_opt.model_parallel_num\n data_parallel_num = int(device_num / model_parallel_num)\n parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,\n model_parallel=model_parallel_num)\n loss = CrossEntropyLoss(parallel_config.dp_mp_config)\n gpt_with_loss = GPTWithLoss(gpt, loss)\n\n ds = create_dataset(config.batch_size, data_path=args_opt.data_path, device_num=device_num, rank=rank)\n\n\n epoch_num = args_opt.epoch_size\n step_per_epoch = ds.get_dataset_size()\n\n lr = LearningRate(learning_rate=args_opt.start_lr,\n end_learning_rate=args_opt.end_lr,\n warmup_steps=args_opt.warmup_step,\n decay_steps=epoch_num*step_per_epoch)\n\n decay_filter = lambda x: 'layernorm' not in x.name.lower() and \"bias\" not in x.name.lower()\n params = gpt.trainable_params()\n decay_params = list(filter(decay_filter, params))\n other_params = list(filter(lambda x: not decay_filter(x), params))\n group_params = [{'params': decay_params, 'weight_decay': 1e-2},\n {'params': other_params, 'weight_decay': 0.0},\n {'order_params': params}]\n\n if args_opt.optimizer == \"lamb\":\n optimizer = nn.Lamb(group_params, learning_rate=lr)\n else:\n optimizer = nn.AdamWeightDecay(group_params, learning_rate=lr)\n\n callback_size = args_opt.sink_size\n actual_epoch_num = int(epoch_num * step_per_epoch/callback_size)\n callback = [TimeMonitor(callback_size), LossMonitor(callback_size)]\n\n config_ck = CheckpointConfig(save_checkpoint_steps=step_per_epoch, keep_checkpoint_max=1)\n ckpoint_cb = ModelCheckpoint(prefix=\"GPT2\", config=config_ck)\n callback.append(ckpoint_cb)\n\n\n update_cell = DynamicLossScaleUpdateCell(loss_scale_value=1024,\n scale_factor=2,\n scale_window=1000)\n\n gpt_with_grads = GPTTrainOneStepWithLossScaleCell(gpt_with_loss, optimizer=optimizer,\n scale_update_cell=update_cell)\n\n\n model = Model(gpt_with_grads)\n model.train(actual_epoch_num, ds, callbacks=callback, dataset_sink_mode=True, sink_size=callback_size)",
"def execute(lines, targets='all'):",
"def run_multistop(simsetup, instance, force_sets):\n nest_id_to_cell_id = get_nest_id_to_cell_id(instance)\n\n grouped_force_sets = force_sets.groupby('time')\n stop_points = np.array(list(grouped_force_sets.groups.keys()))\n\n stop_points = np.sort(np.unique([0] + list(stop_points) + [simsetup.sim_length.tend]))\n\n with pd.option_context('display.max_rows', 10, 'display.max_columns', 500, 'display.width', 500):\n logging.info('Running sim in %d chunks to force values:\\n%s', len(stop_points) + 1, force_sets)\n\n with log_time('run_multistop'):\n nest.ResetNetwork()\n\n try:\n logging.debug('Run\\n%s', sim.pretty_print_params(instance.params))\n\n for i, (t0, t1) in enumerate(zip(pbar(stop_points[:-1], desc='multistop'), stop_points[1:])):\n\n if t0 in grouped_force_sets.groups:\n for (varname, value), to_apply in grouped_force_sets.get_group(t0).groupby(['varname', 'value']):\n\n nest_ids = [int(nid) for nid in instance.cells.loc[to_apply['gid'].values, 'nest_id'].values]\n\n logging.debug('force setting %s to %s for %d cells', varname, value, len(nest_ids))\n nest.SetStatus(nest_ids, {varname: value})\n\n section_length = t1 - t0\n logging.debug('\\nrun section %d/%d [%.1f - %.1f] (%.1f ms)',\n i + 1, len(stop_points) - 1, t0, t1, section_length)\n\n nest.Simulate(section_length)\n\n spikes = simsetup.sp_detector.retrieve()\n voltages = simsetup.voltage_meter.retrieve()\n\n voltages.columns = nest_id_to_cell_id.reindex(voltages.columns).values\n spikes.gid = nest_id_to_cell_id.reindex(spikes.gid).values\n\n results = sim.SimResults(spikes, voltages)\n\n except nest.pynestkernel.NESTError as e:\n logging.exception('NESTError: %s', e)\n\n results = sim.SimResults.bad()\n\n return results",
"def _run_train_step(self, train_set):\n x_corrupted = self._corrupt_input(train_set)\n\n shuff = list(zip(train_set, x_corrupted))\n np.random.shuffle(shuff)\n\n batches = [_ for _ in utilities.gen_batches(shuff, self.batch_size)]\n\n for batch in batches:\n x_batch, x_corr_batch = zip(*batch)\n tr_feed = {self.input_data_orig: x_batch,\n self.input_data: x_corr_batch}\n self.tf_session.run(self.train_step, feed_dict=tr_feed)",
"def run():\n # Camera and scene configuration.\n config_dict = {\"camera\": {\"cls\": \"PerspectiveCamera\", \"fov\": 75}}\n\n # Read sample PLY file.\n vertices, colors, faces = demo_utils.read_ascii_ply(DEMO_PLY_MESH_PATH)\n\n # Add batch dimension.\n vertices = np.expand_dims(vertices, 0)\n faces = np.expand_dims(faces, 0)\n colors = np.expand_dims(colors, 0)\n\n # Create summary writer.\n writer = tf.summary.create_file_writer(FLAGS.logdir)\n\n with writer.as_default():\n for step in range(_MAX_STEPS):\n train_step(vertices, faces, colors, config_dict, step)",
"def run():\n # first we must load the Toxicity 21 datasets from molnet (MoleculeNet) unto our local machine\n tox21_tasks, tox21_datasets, transformers = dc.molnet.load_tox21()\n\n\n # tox21_tasks represent 12 assays or bilogicial targets taht we want to see if our molecule binds to\n print(tox21_tasks)\n\n\n # train_dataset is 6264 molecules with a feature vector of length 1024\n\n\n # it has a feature vector Y, for each of the 12 assays\n train_dataset, valid_dataset, test_dataset = tox21_datasets\n\n # the w represents the weights and a weight of zero means that no experiment was run\n # to see if the molecule binds to that assay\n np.count_nonzero(train_dataset.w == 0)\n\n # this is a BalancingTransformer because most of the molecules do not bind to most targets\n # so most of the labels are zero and a model always predicting zero could actually work (but it would be useless!)\n # BalancingTransformer adjusts dataset's wieghts of individual points so all classes have same total weight\n # Loss function won't have systematic preference for one class\n print(transformers)\n\n train_model(train_dataset, test_dataset, transformers)",
"def parallel_model(trees,argsints,argsflts,windowsize):\n print(\"inside model run function\")\n import numpy as np\n store_counts_parallel = np.empty([windowsize,16,16])\n for idx in xrange(windowsize):\n treenum, sourcebr, destbr, Ne, nsnps, seed = argsints[idx,:]\n mtimerecent, mtimedistant, mrate, mut = argsflts[idx,:]\n mod = Model(tree = trees[treenum],\n admixture_edges = [(sourcebr,destbr,mtimerecent,mtimedistant,mrate)],\n Ne = Ne,\n nsnps = nsnps,\n mut = mut,\n seed = seed,\n ntests = 1)\n mod.run()\n store_counts_parallel[idx,:,:]=mod.counts\n return store_counts_parallel",
"def startBatch(self, reader=None):",
"def _run_batch(self, batch_list):\n # tqdm for progress bar.\n with open(self.path, \"a\") as f:\n desc = f\"{self.curr_batch}/{self.tot_batches}\"\n for i in tqdm(batch_list, desc = desc):\n # For list of str or dataframe of 1 column\n if (len(self.data.columns) == 1 \n or len(self.included_cols) == 1 \n and self.included_cols[0] == 'address'):\n lat, lon, address = self._process_address_string(i)\n\n # For DataFrame or list of dicts\n else:\n lat, lon, address = self._process_address_components(i)\n\n # Add processed addresses to self.locations\n self.locations.loc[i, ['latitude', \n 'longitude', \n 'address']] = lat, lon, address\n\n # Save processed address to path.\n f.write(f'{i}\\t{lat or \"\"}\\t{lon or \"\"}\\t\"{address}\"\\n')",
"def _generateBatches(self):\n for e in range(0, self.confTrain['numEpochs']):\n self.generateBatchesForOneEpoch()",
"def test_batch(self):\n self._actor.do_a(async=True)\n self._actor.do_a(async=True)\n self._actor.do_b(async=True)\n self._actor.do_a(async=True)\n # Nothing should happen since it should be queued.\n self.assertEqual(self._actor.actions, [])\n self.run_actor_loop()\n # Then we should get a start, batch of only a and a finish.\n self.assertEqual(self._actor.actions, [\"sb\", \"a\", \"a\", \"b\", \"a\", \"fb\"])",
"def _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list):\n sim_matrix = []\n for idx1, b1 in enumerate(batch_list_t):\n input_mask, segment_ids, *_tmp = b1\n sequence_output = batch_sequence_output_list[idx1]\n each_row = []\n for idx2, b2 in enumerate(batch_list_v):\n video_mask, *_tmp = b2\n visual_output = batch_visual_output_list[idx2]\n # calculate the similarity\n b1b2_logits, *_tmp = model.get_inference_logits(sequence_output, visual_output, input_mask, video_mask)\n b1b2_logits = b1b2_logits.cpu().detach().numpy()\n each_row.append(b1b2_logits)\n each_row = np.concatenate(tuple(each_row), axis=-1)\n sim_matrix.append(each_row)\n return sim_matrix",
"def run(batchids):\n\n keywords = queueinit.readConfig(queue_dir=queue_dir)\n\n for batchid in batchids:\n cmd = form_command(keywords, batchid)\n logger.debug(\"Executing cmd: %s\"% cmd)\n qstat = sp.Popen(cmd, shell=True, stdout = sp.PIPE)\n outp = qstat.stdout.read()\n pbs_status = process_output(outp)\n exit_code = qstat.wait()\n print batchid, pbs_status\n\n return exit_code",
"def launch(self):\n out_log, err_log = fu.get_logs(path=self.path, mutation=self.mutation, step=self.step)\n gmx = 'gmx' if self.gmx_path is None else self.gmx_path\n\tif self.mpirun is not None:\n\t gmx = 'gmx'\n cmd = [gmx, 'mdrun', '-s', self.input_tpr_path, '-c', self.output_gro_path]\n\n if self.output_trr_path is not None:\n cmd.append('-o')\n cmd.append(self.output_trr_path)\n if self.output_xtc_path is not None:\n cmd.append('-x')\n cmd.append(self.output_xtc_path)\n if self.output_edr_path is not None:\n cmd.append('-e')\n cmd.append(self.output_edr_path)\n if self.output_cpt_path is not None:\n cmd.append('-cpo')\n cmd.append(self.output_cpt_path)\n if self.output_log_path is not None:\n cmd.append('-g')\n cmd.append(self.output_log_path)\n\n\tif self.mpirun_ppn is not None:\n cmd.insert(0, str(self.mpirun_ppn))\n cmd.insert(0, '-ppn')\n\n if self.mpirun_np is not None:\n cmd.insert(0, str(self.mpirun_np))\n cmd.insert(0, '-np')\n if self.mpirun:\n cmd.insert(0, 'mpirun')\n #Number of threads to run (0 is guess)\n if not self.num_threads is None:\n cmd.append('-nt')\n cmd.append(str(self.num_threads))\n if not self.ntmpi is None:\n cmd.append('-ntmpi')\n cmd.append(str(self.ntmpi))\n if not self.ntomp is None:\n cmd.append('-ntomp')\n cmd.append(str(self.ntomp))\n if not self.gpu_id is None:\n cmd.append('-gpu_id')\n cmd.append(str(self.gpu_id))\n\n command = cmd_wrapper.CmdWrapper(cmd, out_log, err_log)\n return command.launch()",
"def spm_run_batch(batch, variables, display=True, n_jobs=-1):\n\n m_script = matlab_define(variables)\n if n_jobs > -1:\n m_script += ' LASTN = maxNumCompThreads({:d});'.format(n_jobs)\n m_script += \"spm fmri; run('{}');\".format(batch)\n m_script += \"spm_jobman('run',matlabbatch); clear variables;\"\n\n matlab_run_script(m_script, display=display, exit_after=True)",
"def train(self, n_procs=2):\n sent_lists = np.array_split(self.sents, n_procs-1)\n if len(sent_lists) != n_procs:\n sent_lists = np.array_split(self.sents, n_procs)\n\n tmp_dir = tempfile.mkdtemp()\n tmp_files = [os.path.join(tmp_dir, 'tmp_' + str(i))\n for i in range(len(sent_lists))]\n\n sent_lists = list(zip(sent_lists, tmp_files))\n del self.sents\n\n\n try:\n print('Forking')\n # For debugging\n # tmp_files = map(mpfn, sent_lists)\n \n p = mp.Pool(n_procs)\n tmp_files = p.map(mpfn, sent_lists, 1)\n p.close()\n\n print('Reducing')\n self.matrix = np.zeros(tuple(_shape), dtype=self.dtype)\n\n for filename in tmp_files:\n\n with open(filename, 'rb') as f:\n result = cpickle.load(f)\n\n for k,v in result.items():\n self.matrix[k, :] += v\n\n finally:\n print('Removing {}'.format(tmp_dir))\n shutil.rmtree(tmp_dir)",
"def learn(self):\n\n for i in range(1, self.args.numIters+1):\n # bookkeeping\n print('------ITER ' + str(i) + '------')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i>1:\n if i == 2 and not self.skipFirstSelfPlay:\n iteration_train_examples = []\n else:\n iteration_train_examples = deque([], maxlen=self.args.maxlenOfQueue)\n\n num_eps = self.args.numEps\n if i == 1 and not self.args.load_model:\n num_eps = 1000\n eps_time = AverageMeter()\n bar = Bar('Self Play', max=num_eps)\n end = time.time()\n\n for eps in range(num_eps):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iteration_train_examples += self.execute_episode(i == 1)\n \n # bookkeeping + plot progress\n eps_time.update(time.time() - end)\n end = time.time()\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps+1, maxeps=num_eps, et=eps_time.avg,\n total=bar.elapsed_td, eta=bar.eta_td)\n bar.next()\n bar.finish()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iteration_train_examples)\n \n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(self.trainExamplesHistory), \" => remove the oldest trainExamples\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.save_train_examples(i)\n \n # shuffle examlpes before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.h5')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.h5')\n\n self.nnet.train(trainExamples)\n\n print('PITTING AGAINST PREVIOUS VERSION')\n\n arena = Arena(self.pnet, self.nnet, self.game, self.args)\n\n scores = arena.playGames(self.args.arenaCompare)\n if scores[1] == 0 or float(scores[1]) / sum(scores) < self.args.updateThreshold:\n print('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.h5')\n else:\n print('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.get_checkpoint_file(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.h5')",
"def testCLAMultistepModel(self):\n\n self._printTestHeader()\n inst = OneNodeTests(self._testMethodName)\n return inst.testCLAMultistepModel(onCluster=True, maxModels=4)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Only search till the current item is less or equal to the item we are searching for | def search(self, item):
found = False
stop = False
current = self.head
while current is not None and not found and not stop:
if current.get_data() == item:
found = True
elif current.get_data() > item:
stop = True
else:
current = current.get_next()
return found | [
"def search(self, item):\n current = self.head\n while current:\n if current.value == item:\n return True\n elif current.value < item:\n current = current.next\n else:\n break\n return False",
"def seqsearch(data, item):\n\tindex = 0\n\tfound = False\n\twhile index < len(data) and data[index] <= item and not found:\n\t\tif data[index] == item:\n\t\t\tfound = True\n\t\telse:\n\t\t\tindex += 1\n\tif found:\n\t\treturn index\n\telse:\n\t\treturn \"item not in list\"",
"def items_before(iterable, limit):\n return itertools.takewhile(lambda x: x < limit, iterable)",
"def is_item_among_top_10_percent(self, item, items):\n #start at 0 as we will add 1 for the item itself\n item_position = 0\n for item2 in items:\n if self.is_preferred_item(item2,item):\n item_position += 1\n return item_position <= len(items)//10",
"def search(self, item):\n current = self.head\n found = False\n while current is not None and not found:\n if current.get_data() is item:\n found = True\n else:\n current = current.get_next()\n return found",
"def contains(self, item : int) -> bool:\n return self.start <= item and item < self.end",
"def search(self, item):\n temp = self.head\n\n while(temp):\n if temp.data == item:\n return True\n temp = temp.next\n\n return False",
"def binary_search_iteratively(ls: list, item: object) -> bool:\n assert pythonic_is_sorted(ls)\n if len(ls) == 0:\n return -1\n start = 0\n end = len(ls) - 1\n while start <= end:\n mid = (start + end) // 2\n if ls[mid] == item:\n return mid\n if ls[mid] < item: # search on the right\n start = mid + 1\n else: # search on the left\n end = mid - 1\n return -1",
"def recherche(x, L):\n i = 0\n while x >= L[i]:\n if x == L[i]:\n return i\n i = i + 1\n return False",
"def SearchAbove(db: List[CurrencyRow], start_idx: int, back_count: int, threshold: float) -> int:\n\n # 1. safe check\n ret = _validate(db, start_idx, back_count)\n if -1 == ret:\n return -1\n\n # 2 avoid back count exceed the begin of the list\n begin_idx = start_idx - back_count\n if begin_idx < 0:\n begin_idx = 0\n\n # 2. find if exist value less then threshold\n idx = -1\n for i in range(begin_idx, start_idx):\n if db[i].close >= threshold:\n idx = i\n break\n\n if -1 != idx:\n print(\"[above]Find %d(%s) is above threshold %d\" % (idx, db[idx].time, threshold))\n return idx",
"def bsearch_iterative(list, item, sorted=True):\n if not sorted:\n list.sort()\n\n first = 0\n last = len(list) - 1\n found = False\n while first <= last and not found:\n middle = (first + last)//2\n if list[middle] == item:\n found = True\n else:\n if list[middle] > item:\n first = middle + 1\n else:\n last = middle - 1\n\n return found",
"def where_above(lst, limit):\n return [x for x in lst if x>limit]",
"def breach(cur_units, limit):\n return cur_units >= limit",
"def prevItem(self):\n if self.item_count == 0: return\n \n prev_focus_index = self.focus_index \n while True:\n if self.focus_index < 0: self.focus_index = 0\n else: self.focus_index -= 1 \n\n if self.focus_index == prev_focus_index: return \n elif self.focus_index == -1: \n self.focus_index = self.item_count - 1\n \n item_key = self.item_indexes[self.focus_index]\n if self.items[item_key].is_active == True:\n if self.items[item_key].focusable == True: break\n \n self.changeFocusItem(item_key)",
"def nextItem(self):\n if self.item_count == 0: return\n \n prev_focus_index = self.focus_index \n while True:\n if self.focus_index < 0: self.focus_index = 0\n else: self.focus_index += 1\n\n if self.focus_index == prev_focus_index: return\n elif self.focus_index == self.item_count: \n self.focus_index = 0\n \n item_key = self.item_indexes[self.focus_index]\n if self.items[item_key].is_active == True:\n if self.items[item_key].focusable == True: break\n \n self.changeFocusItem(item_key)",
"def sequential_search(a_list, item):\n strt_time = time.time()\n pos = 0\n found = False\n\n while pos < len(a_list) and not found:\n if a_list[pos] == item:\n found = True\n else:\n pos = pos + 1\n\n end_time = time.time()\n\n run_time = end_time - strt_time\n\n return (run_time, found)",
"def linear(self,search_item=None):\n for index,element in enumerate(self.data):\n if element == search_item:\n return index\n break\n return -1",
"def find_next_customer_items(self, name):\n for i in range(len(self._line_list)):\n sub_list = self._line_list[i].customer_list\n for j in range(len(sub_list)):\n if name == sub_list[j].name:\n return self._line_list[i].customer_list[j+1].items",
"def findNextLarger(self, val):\n\t\tcurridx = self.find(val)\n\t\tlargeridx = self._nextLarger(curridx)\n\t\treturn largeridx"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply a wavelet transform using a prespecified set of filters. Calculates the center frequencies and bandwidths for the wavelets and applies them along with | def wavelet_transform(X, rate, filters='rat', hg_only=True, X_fft_h=None, npad='fast', to_removes=None,
precision='single'):
if X_fft_h is None:
X_dtype = dtype(X, precision)
X = X.astype(X_dtype, copy=False)
npads, to_removes, _ = _npads(X, npad)
X = _smart_pad(X, npads)
n_time = X.shape[0]
else:
n_time = X_fft_h.shape[0]
X_fft_h = X_fft_h.astype(dtype(X_fft_h, precision), copy=False)
freq = fftfreq(n_time, 1. / rate)
filters, cfs, sds = get_filterbank(filters, n_time, rate, hg_only, precision=precision)
Xh = np.zeros(X.shape + (len(filters),), dtype=dtype(complex(1.), precision=precision))
if X_fft_h is None:
# Heavyside filter with 0 DC
h = np.zeros(len(freq))
h[freq > 0] = 2.
h = h[:, np.newaxis]
X_fft_h = fft(X, axis=0, workers=-1) * h
for ii, f in enumerate(filters):
if f is None:
Xh[..., ii] = ifft(X_fft_h, axis=0, workers=-1)
else:
f = f / np.linalg.norm(f)
Xh[..., ii] = ifft(X_fft_h * f[:, np.newaxis], axis=0, workers=-1)
Xh = _trim(Xh, to_removes)
return Xh, X_fft_h, cfs, sds | [
"def cwave_filters(filters):\n\n\tf = h5py.File(dir_file+'filters_w.hdf5', 'r')\n\tnbands = len(filters)\n\n\tif nbands>1:\n\t\tcwaves = np.zeros(nbands)\n\t\tfor bb in range(0,nbands):\n\t\t\tstr_temp = 'cw_%s' % filters[bb]\n\t\t\tcwaves[bb] = f[filters[bb]].attrs[str_temp]\n\telse:\n\t\tstr_temp = 'cw_%s' % filters\n\t\tcwaves = f[filters].attrs[str_temp]\n\tf.close()\n\n\treturn cwaves",
"def cwt(\n x ,\n# frequencies = np.exp(np.arange( -5.5 , 0.0 , 0.01 )) ,\n frequencies = np.exp(np.arange( -2.5 , 0.0 , 0.01 )) ,\n wavelet = cauchy,\n Q = 10.\n):\n\n\n N_x = len(x)\n N_pad = closest_anti_prime( N_x + 120 ) - N_x\n N = N_x + N_pad # data length including padding\n\n X = np.fft.fft( np.concatenate(( x , np.zeros(N_pad) )) )\t# fft of padded input data\n w = np.arange( 0 , N/2 ) * 2./N \n # TODO check if frequency scaling is correct ( either Nyquist or zero included or both ? )\n\n WT = [] \t# the resulting transform\n\n\n for f in frequencies:\n a = 1.0 / f\n WT.append( np.fft.ifft( np.concatenate((X[:N/2] * wavelet(a*w,Q) , np.zeros(N/2))) )[:N_x] ) # <-- this makes real w'lets progressive, FIXME\n\n return [ np.array(WT) , frequencies ]\n # TODO make this a class behaving like the actual transform with freq and wlet as memebers",
"def UBVRIraw(lambdaScale, flux):\r\n \r\n filters = filterSet()\r\n\r\n numBands = len(filters)\r\n #var numLambdaFilt\r\n\r\n bandFlux = [0.0 for i in range(numBands)]\r\n\r\n\r\n #var deltaLam, newY, product;\r\n\r\n for ib in range(numBands):\r\n\r\n bandFlux[ib] = 0.0 #//initialization\r\n numLambdaFilt = len(filters[ib][0])\r\n #//console.log(\"ib \" + ib + \" numLambdaFilt \" + numLambdaFilt);\r\n #//wavelength loop is over photometric filter data wavelengths\r\n\r\n for il in range(1, numLambdaFilt):\r\n\r\n #//In this case - interpolate model SED onto wavelength grid of given photometric filter data\r\n\r\n deltaLam = filters[ib][0][il] - filters[ib][0][il - 1] #//nm\r\n #//deltaLam = 1.0e-7 * deltaLam; //cm\r\n #//console.log(\"ib: \" + ib + \" il: \" + il + \" filters[ib][0][il] \" + filters[ib][0][il] + \" deltaLam: \" + deltaLam + \" filters[ib][1][il] \" + filters[ib][1][il]);\r\n\r\n #//hand log flux (row 1) to interpolation routine: \r\n newY = ToolBox.interpol(lambdaScale, flux[1], filters[ib][0][il])\r\n #// linearize interpolated flux: - fluxes add *linearly*\r\n newY = math.exp(newY)\r\n\r\n product = filters[ib][1][il] * newY\r\n #if (ib == 2):\r\n # //console.log(\"Photometry: il: \" + il + \" newY: \" + newY + \" filterLamb: \" + filters[ib][0][il] + \" filterTrans: \" + filters[ib][1][il] + \" product \" + product);\r\n \r\n #//System.out.println(\"Photometry: filtertrans: \" + filters[ib][1][il] + \" product: \" + product + \" deltaLam: \" + deltaLam);\r\n #//Rectangular picket integration\r\n bandFlux[ib] = bandFlux[ib] + (product * deltaLam)\r\n #//console.log(\"Photometry: ib: \" + ib + \" deltaLam \" + deltaLam + \" bandFlux: \" + bandFlux[ib]);\r\n\r\n #} //il loop - lambdas\r\n #//console.log(\"Photometry: ib: \" + ib + \" bandFlux: \" + bandFlux[ib], \" product \" + product + \" deltaLam \" + deltaLam);\r\n\r\n #} //ib loop - bands\r\n\r\n #var raw;\r\n\r\n return bandFlux",
"def MBfilter_CF(st, frequencies,\n CN_HP, CN_LP,\n filter_norm, filter_npoles=2,\n var_w=True,\n CF_type='envelope', CF_decay_win=1.0,\n hos_order=4,\n rosenberger_decay_win=1.0,\n rosenberger_filter_power=1.0,\n rosenberger_filter_threshold=None,\n rosenberger_normalize_each=False,\n wave_type='P',\n hos_sigma=None,\n rec_memory=None,\n full_output=False):\n delta = st[0].stats.delta\n Tn = 1. / frequencies\n Nb = len(frequencies)\n CF_decay_nsmps = CF_decay_win / delta\n rosenberger_decay_nsmps = rosenberger_decay_win / delta\n\n if hos_sigma is None:\n hos_sigma = -1.\n\n # Single component analysis\n if len(st) < 2:\n # Use just the first trace in stream\n tr = st[0]\n y = tr.data\n\n YN1 = np.zeros((Nb, len(y)), float)\n CF1 = np.zeros((Nb, len(y)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem = rec_memory[(tr.id, wave_type)][n]\n else:\n rmem = None\n\n YN1[n] = recursive_filter(y, CN_HP[n], CN_LP[n],\n filter_npoles, rmem)\n YN1[n] /= filter_norm[n]\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n]/delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n # Calculates CF for each MBF signal\n if CF_type == 'envelope':\n CF1[n] = recursive_rms(YN1[n], CF_decay_constant, rmem)\n\n if CF_type == 'kurtosis':\n CF1[n] = recursive_hos(YN1[n], CF_decay_constant,\n hos_order, hos_sigma, rmem)\n\n # 2 (horizontal) components analysis\n elif len(st) == 2:\n # Assumes that 2 horizontal components are used\n tr1 = st.select(channel='*[E,W,1]')[0]\n tr2 = st.select(channel='*[N,S,2]')[0]\n\n y1 = tr1.data\n y2 = tr2.data\n\n # Initializing arrays\n YN_E = np.zeros((Nb, len(y1)), float)\n YN_N = np.zeros((Nb, len(y1)), float)\n YN1 = np.zeros((Nb, len(y1)), float)\n CF1 = np.zeros((Nb, len(y1)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem1 = rec_memory[(tr1.id, wave_type)][n]\n rmem2 = rec_memory[(tr2.id, wave_type)][n]\n else:\n rmem1 = None\n rmem2 = None\n\n YN_E[n] = recursive_filter(y1, CN_HP[n], CN_LP[n],\n filter_npoles, rmem1)\n YN_E[n] /= filter_norm[n]\n YN_N[n] = recursive_filter(y2, CN_HP[n], CN_LP[n],\n filter_npoles, rmem2)\n YN_N[n] /= filter_norm[n]\n # Combining horizontal components\n YN1[n] = np.sqrt(np.power(YN_E[n], 2) + np.power(YN_N[n], 2))\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n] / delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n # Calculates CF for each MBF signal\n if CF_type == 'envelope':\n CF1[n] = recursive_rms(YN1[n], CF_decay_constant, rmem1)\n\n if CF_type == 'kurtosis':\n CF1[n] = recursive_hos(YN1[n], CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n\n # 3 components analysis, includes polarization P and S decomposition\n else:\n # Vertical\n tr1 = st.select(channel='*[Z,U,D]')[0]\n # Horizontals\n tr2 = st.select(channel='*[E,W,1]')[0]\n tr3 = st.select(channel='*[N,S,2]')[0]\n\n y1 = tr1.data\n y2 = tr2.data\n y3 = tr3.data\n\n # Initializing arrays\n YN1 = np.zeros((Nb, len(y1)), float)\n YN2 = np.zeros((Nb, len(y1)), float)\n YN3 = np.zeros((Nb, len(y1)), float)\n CF1 = np.zeros((Nb, len(y1)), float)\n filteredDataP = np.zeros((Nb, len(y1)), float)\n filteredDataS = np.zeros((Nb, len(y1)), float)\n if full_output:\n CF2 = np.zeros((Nb, len(y1)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem1 = rec_memory[(tr1.id, wave_type)][n]\n rmem2 = rec_memory[(tr2.id, wave_type)][n]\n rmem3 = rec_memory[(tr3.id, wave_type)][n]\n else:\n rmem1 = None\n rmem2 = None\n rmem3 = None\n\n YN1[n] = recursive_filter(y1, CN_HP[n], CN_LP[n],\n filter_npoles, rmem1)\n YN1[n] /= filter_norm[n]\n YN2[n] = recursive_filter(y2, CN_HP[n], CN_LP[n],\n filter_npoles, rmem2)\n YN2[n] /= filter_norm[n]\n YN3[n] = recursive_filter(y3, CN_HP[n], CN_LP[n],\n filter_npoles, rmem3)\n YN3[n] /= filter_norm[n]\n\n # Define the decay constant\n rosenberger_decay_constant = 1 / rosenberger_decay_nsmps\n\n # print('Rosenberger in process {}/{}\\r'.format(n+1, Nb),\n # sys.stdout.flush())\n\n # third value returned by rosenberger() is the polarizaion filter,\n # which we do not use here\n filt_dataP, filt_dataS, _ =\\\n rosenberger(YN2[n], YN3[n], YN1[n],\n rosenberger_decay_constant,\n pol_filter_power=rosenberger_filter_power,\n pol_filter_threshold=rosenberger_filter_threshold,\n normalize_each=rosenberger_normalize_each)\n\n # Use vertical component for P data\n filteredDataP[n] = filt_dataP[0, :]\n # Use vector composition of the two horizontal component for S data\n filteredDataS[n] = np.sqrt(np.power(filt_dataS[1, :], 2) +\n np.power(filt_dataS[2, :], 2))\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n]/delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n if CF_type == 'envelope':\n if wave_type == 'P':\n CF1[n] = recursive_rms(filteredDataP[n],\n CF_decay_constant, rmem1)\n if full_output:\n CF2[n] = recursive_rms(filteredDataS[n],\n CF_decay_constant, rmem2)\n else:\n CF1[n] = recursive_rms(filteredDataS[n],\n CF_decay_constant, rmem1)\n if full_output:\n CF2[n] = recursive_rms(filteredDataP[n],\n CF_decay_constant, rmem2)\n\n if CF_type == 'kurtosis':\n if wave_type == 'P':\n CF1[n] = recursive_hos(filteredDataP[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n if full_output:\n CF2[n] = recursive_hos(filteredDataS[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem2)\n else:\n CF1[n] = recursive_hos(filteredDataS[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n if full_output:\n CF2[n] = recursive_hos(filteredDataP[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem2)\n\n if full_output:\n return YN1, CF1, CF2, Tn, Nb, filteredDataP, filteredDataS\n else:\n return YN1, CF1, Tn, Nb",
"def apply(self, sed):\n\t\tWaveLength = np.array(sed['wavelength'])\n\t\tFluxLam = np.array(sed['flux'])\n\t\tif ('zeropoint' in sed):\n\t\t\tZeroPoint = np.array(sed['zeropoint'])\n\t\telse:\n\t\t\tZeroPoint = np.full(len(WaveLength),3.63e-5)\n\t\t\n\t\tApplyFilter = np.interp(WaveLength, self.wavelength, self.throughput, left=0.0, right=0.0)\n\t\tindex, = np.where(ApplyFilter > 0.0) # Range of wavelengths over which the filter is non-zero\n\t\t\n\t\tif len(index) == 0:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\tintslice = slice(index.min(),index.max())\n\t\t\t\n\t\t\tif (self.format == 'energy'):\n\t\t\t\tFilterFlux = integrate.trapz(ApplyFilter[intslice]*FluxLam[intslice],WaveLength[intslice])\n\t\t\t\tFilterNorm = integrate.trapz(ApplyFilter[intslice]*ZeroPoint[intslice],WaveLength[intslice])\n\t\t\telse:\n\t\t\t\tFilterFlux = integrate.trapz(ApplyFilter[intslice]*WaveLength[intslice]*FluxLam[intslice],WaveLength[intslice])\n\t\t\t\tFilterNorm = integrate.trapz(ApplyFilter[intslice]*WaveLength[intslice]*ZeroPoint[intslice],WaveLength[intslice])\n\t\t\n\t\t\treturn FilterFlux/FilterNorm",
"def apply_filterbank(self, fbankTF):\n self.nBands = fbankTF.shape[0]\n self.fvec = np.fft.fftfreq(len(self.wave), 1/self.samplingRate)\n self.spectrum = np.fft.fft(self.wave)\n self.bandsFourier = fbankTF * self.spectrum\n self.bands = np.real(np.fft.ifft(self.bandsFourier, axis=1))\n return self.get_bands()",
"def fn_buildFilters(params, fs):\n bandPassRange = params.bpRanges\n params.filtType = 'bandpass'\n params.filterSignal = True\n \n # Handle different filter cases:\n # 1) low pass\n if params.bpRanges[0] == 0:\n # they only specified a top freqency cutoff, so we need a low pass\n # filter\n bandPassRange = params.bpRanges[1]\n params.filtType = 'low'\n if bandpassRange == fs/2:\n # they didn't specify any cutoffs, so we need no filter\n params.filterSignal = False\n \n # 2) High passs\n if params.bpRanges[1] == fs/2 and params.filterSignal:\n # they only specified a lower freqency cutoff, so we need a high pass\n # filter\n bandPassRange = params.bpRanges[0]\n params.filtType = 'high'\n \n if params.filterSignal:\n params.fB, params.fA = signal.butter(params.filterOrder, bandPassRange/(fs/2),btype=params.filtType)\n \n # filtTaps = length(fB)\n previousFs = fs\n \n params.fftSize = int(math.ceil(fs * params.frameLengthUs / 10**6))\n if params.fftSize % 2 == 1:\n params.fftSize = params.fftSize - 1 # Avoid odd length of fft\n\n params.fftWindow = signal.windows.hann(params.fftSize)\n\n lowSpecIdx = int(params.bpRanges[0]/fs*params.fftSize)\n highSpecIdx = int(params.bpRanges[1]/fs*params.fftSize)\n\n params.specRange = np.arange(lowSpecIdx, highSpecIdx+1)\n params.binWidth_Hz = fs / params.fftSize\n params.binWidth_kHz = params.binWidth_Hz / 1000\n params.freq_kHz = params.specRange*params.binWidth_kHz # calculate frequency axis\n return previousFs, params",
"def multifrequency_wavelet_maps(self,input_maps_dict,output_maps_prefix,\n\t\t\tscale_int,j_min,precomputed=False,nest=False,n_quads=1000):\n\t\t# First we want to order the frequencies by fwhm. Keys will be strings.\n\t\tfreq_list = np.array(list(input_maps_dict.keys()))\n\t\tfwhm_list = np.array(list(map(lambda x: input_maps_dict[x]['fwhm'],\n\t\t\tinput_maps_dict)))\n\t\tband_lim_list = np.array(list(map(\n\t\t\tlambda x:input_maps_dict[x]['band_lim'],input_maps_dict)))\n\t\tnside_list = np.array(list(map(lambda x: input_maps_dict[x]['nside'],\n\t\t\tinput_maps_dict)))\n\t\tnside_list = nside_list[np.argsort(fwhm_list)[::-1]]\n\t\tfreq_list = freq_list[np.argsort(fwhm_list)[::-1]]\n\t\tn_freqs = len(freq_list)\n\t\tband_lim_list = band_lim_list[np.argsort(fwhm_list)[::-1]]\n\n\t\t# Get the maximum wavelet scale for each map\n\t\tj_max_list = np.array(list(map(lambda x: wavelets_base.calc_j_max(\n\t\t\tinput_maps_dict[x]['band_lim'],scale_int),input_maps_dict)))\n\t\tj_max_list = j_max_list[np.argsort(fwhm_list)[::-1]]\n\t\tfwhm_list = fwhm_list[np.argsort(fwhm_list)[::-1]]\n\n\t\t# We will always target the smallest fwhm.\n\t\ttarget_fwhm = np.ones(2+np.max(j_max_list)-j_min)*np.min(fwhm_list)\n\n\t\t# The wavelet analysis maps we will populated. Save the information\n\t\t# in the input_maps_dict for later reconstruction.\n\t\twav_analysis_maps = {'input_maps_dict':input_maps_dict,\n\t\t\t'analysis_type':'hgmca','scale_int':scale_int,'j_min':j_min,\n\t\t\t'j_max':np.max(j_max_list),'band_lim':np.max(band_lim_list),\n\t\t\t'target_fwhm':target_fwhm,'output_nside':np.max(nside_list),\n\t\t\t'n_freqs':len(freq_list)}\n\n\t\t# Get the largest n_side that will be considered and therefore the\n\t\t# actual largest level that will be used (this will be less than\n\t\t# or equal to the maximum level specified).\n\t\tmax_nside = wavelets_base.get_max_nside(scale_int,np.max(j_max_list)+1,\n\t\t\tnp.max(nside_list))\n\t\tm_level = nside_to_level(max_nside,self.m_level)\n\t\twav_analysis_maps['m_level'] = m_level\n\n\t\tself.allocate_analysis_arrays(wav_analysis_maps,scale_int,j_min,\n\t\t\tnp.max(j_max_list),m_level,max_nside,n_freqs)\n\n\t\t# Get the analysis level for each coefficient\n\t\twav_level = self.get_analysis_level(scale_int,j_min,\n\t\t\tnp.max(j_max_list),m_level,max_nside)\n\n\t\t# Create a matching array with the j index for each scale (including\n\t\t# 0 for the scaling coefficients).\n\t\twav_j_ind = np.zeros(2+np.max(j_max_list)-j_min)\n\t\twav_j_ind[1:] = np.arange(j_min,np.max(j_max_list)+1)\n\n\t\t# Now we go through each input frequency map and populate the\n\t\t# wavelet map arrays.\n\t\tfor freq_i, freq in enumerate(freq_list):\n\t\t\tn_scales = 2+j_max_list[freq_i]-j_min\n\t\t\tinput_map = hp.read_map(input_maps_dict[str(freq)]['path'],\n\t\t\t\tverbose=False,dtype=np.float64)\n\t\t\tfreq_wav_dict = self.s2dw_wavelet_tranform(input_map,\n\t\t\t\toutput_maps_prefix+str(freq),\n\t\t\t\tinput_maps_dict[str(freq)]['band_lim'],scale_int,j_min,\n\t\t\t\tfwhm_list[freq_i],target_fwhm=target_fwhm[:n_scales],\n\t\t\t\tprecomputed=precomputed,nest=nest,n_quads=n_quads)\n\n\t\t\t# Iterate through the levels\n\t\t\tfor level in range(m_level+1):\n\t\t\t\t# If no wavelet scales should be analyzed at this level\n\t\t\t\t# continue\n\t\t\t\tif np.sum(wav_level==level) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\t# Which scales belong at this level\n\t\t\t\tlevel_j_ind = wav_j_ind[wav_level==level]\n\t\t\t\t# Get the number of patches for a given level.\n\t\t\t\tn_patches = level_to_npatches(level)\n\n\t\t\t\t# Keep track of how many pixels into the level we've\n\t\t\t\t# gone so far.\n\t\t\t\toffset = 0\n\t\t\t\tfor j in level_j_ind:\n\t\t\t\t\t# Check that this scale exists for this frequency\n\t\t\t\t\tif j > j_max_list[freq_i]:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t# Now deal with scaling or wavelet coefficient\n\t\t\t\t\tif j == 0:\n\t\t\t\t\t\tnside = wavelets_base.get_max_nside(scale_int,j_min,\n\t\t\t\t\t\t\tmax_nside)\n\t\t\t\t\t\twav_map_freq = hp.ud_grade(hp.read_map(\n\t\t\t\t\t\t\tfreq_wav_dict['scale_map']['path'],nest=True,\n\t\t\t\t\t\t\tverbose=False,dtype=np.float64),nside,\n\t\t\t\t\t\t\torder_in='NESTED',order_out='NESTED')\n\t\t\t\t\telse:\n\t\t\t\t\t\tnside = wavelets_base.get_max_nside(scale_int,j+1,\n\t\t\t\t\t\t\tmax_nside)\n\t\t\t\t\t\t# Read in the map for this frequency and scale\n\t\t\t\t\t\twav_map_freq = hp.ud_grade(hp.read_map(\n\t\t\t\t\t\t\tfreq_wav_dict['wav_%d_map'%(j)]['path'],nest=True,\n\t\t\t\t\t\t\tverbose=False,dtype=np.float64),nside,\n\t\t\t\t\t\t\torder_in='NESTED',order_out='NESTED')\n\t\t\t\t\tn_pix = hp.nside2npix(nside)\n\t\t\t\t\tn_pix_patch = n_pix//n_patches\n\n\t\t\t\t\t# Now populate each patch\n\t\t\t\t\tfor patch in range(n_patches):\n\t\t\t\t\t\twav_analysis_maps[str(level)][patch,freq_i,\n\t\t\t\t\t\t\toffset:offset+n_pix_patch] = wav_map_freq[\n\t\t\t\t\t\t\tpatch*n_pix_patch:(patch+1)*n_pix_patch]\n\n\t\t\t\t\t# Update the number of pixels that have already been\n\t\t\t\t\t# filled.\n\t\t\t\t\toffset += n_pix_patch\n\n\t\treturn wav_analysis_maps",
"def apply_transform(filter, interp_vars, filters_size, old_bilinear_interpolation=True):\n\n dim = 2 if len(filter.size()) == 4 else 3\n\n if dim == 2:\n if old_bilinear_interpolation:\n [x0_0, x1_0], [x0_1, x1_1], [w0, w1] = interp_vars\n rotated_filter = (filter[:, :, x0_0, x1_0] * (1 - w0) * (1 - w1) +\n filter[:, :, x0_1, x1_0] * w0 * (1 - w1) +\n filter[:, :, x0_0, x1_1] * (1 - w0) * w1 +\n filter[:, :, x0_1, x1_1] * w0 * w1)\n else:\n # Expand dimmentions to fit filter\n interp_vars = [[inner_el.expand_as(filter) for inner_el in outer_el] for outer_el in interp_vars]\n\n [x0_0, x1_0], [x0_1, x1_1], [w0, w1] = interp_vars\n\n a = torch.gather(torch.gather(filter, 2, x0_0), 3, x1_0) * (1 - w0) * (1 - w1)\n b = torch.gather(torch.gather(filter, 2, x0_1), 3, x1_0) * w0 * (1 - w1)\n c = torch.gather(torch.gather(filter, 2, x0_0), 3, x1_1) * (1 - w0) * w1\n d = torch.gather(torch.gather(filter, 2, x0_1), 3, x1_1) * w0 * w1\n rotated_filter = a + b + c + d\n\n rotated_filter = rotated_filter.view(filter.size()[0], filter.size()[1], filters_size[0], filters_size[1])\n\n elif dim == 3:\n [x0_0, x1_0, x2_0], [x0_1, x1_1, x2_1], [w0, w1, w2] = interp_vars\n\n rotated_filter = (filter[x0_0, x1_0, x2_0] * (1 - w0) * (1 - w1) * (1 - w2) +\n filter[x0_1, x1_0, x2_0] * w0 * (1 - w1) * (1 - w2) +\n filter[x0_0, x1_1, x2_0] * (1 - w0) * w1 * (1 - w2) +\n filter[x0_1, x1_1, x2_0] * w0 * w1 * (1 - w2) +\n filter[x0_0, x1_0, x2_1] * (1 - w0) * (1 - w1) * w2 +\n filter[x0_1, x1_0, x2_1] * w0 * (1 - w1) * w2 +\n filter[x0_0, x1_1, x2_1] * (1 - w0) * w1 * w2 +\n filter[x0_1, x1_1, x2_1] * w0 * w1 * w2)\n\n rotated_filter = rotated_filter.view(filter.size()[0], filter.size()[1], filters_size[0], filters_size[1],\n filters_size[2])\n\n return rotated_filter",
"def spectrafilter(spectre,filtertype,fq,numtaps,columns):\n\n # we already say what is the output array\n out = np.zeros(spectre.shape)\n\n # Butterworth band stop filter caracteristics\n a = spectre[1,0] - spectre[0,0]\n samplerate = 1/a #Hertz\n nyq_rate = samplerate/2 # frequence Nyquist\n cutf = fq # cutoff frequency\n #bandwidth = 0.005 # largeur filtre, for band pass/stop filters\n numtaps = 1 # ordre du filtre...\n\n for i in range(len(columns)):\n y = spectre[:,columns[i]]\n if (filtertype == 'low') or (filtertype == 'high'):\n b, a = signal.butter(numtaps, [(cutf/nyq_rate)], btype = filtertype)\n out[:,columns[i]] = signal.filtfilt(b, a, y) # filter with phase shift correction\n else:\n b, a = signal.butter(numtaps, [(cutf[0]/nyq_rate),(cutf[1]/nyq_rate)], btype = filtertype)\n out[:,columns[i]] = signal.filtfilt(b, a, y) # filter with phase shift correction\n\n # Note forgetting to register the x axis\n out[:,0] = spectre[:,0]\n\n return out",
"def filter_cascade(filters):\n def newFilter(image):\n for f in filters:\n image = f(image)\n return image\n return newFilter",
"def layer_filters_to_viz_bw(layer=None):\n filters = layer.get_weights()[0]\n fig = plt.figure(figsize=(15, 160))\n num_filters = len(filters[0, 0, 0, :])\n for i in range(num_filters):\n f = filters[:, :, :, i]\n h, w, d = f.shape\n\n # gather each depth-wise slice of kernel\n chan_max_1 = np.amax(f[:, :, 0])\n\n # scale each depth-wise slice of the given kernel by that slice's maximum value\n viz = np.zeros((h, w, 3), dtype=np.uint8)\n viz[:, :, 0] = 255 * f[:, :, 0] / chan_max_1\n\n # plot the three depth-wise slices of the kernel across row in final figure\n for j in range(0, 3):\n ax = fig.add_subplot(num_filters, 3, i * 3 + j + 1)\n ax.matshow(viz[:, :, j], cmap=matplotlib.cm.binary)\n plt.xticks(np.array([]))\n plt.yticks(np.array([]))\n # plt.tight_layout()\n\n filename = \"{}/filters\".format(globals.filter_visuals_dir)\n pylab.savefig(filename, bbox_inches='tight')\n return plt",
"def getFlux(self, filters):\n self._readfile()\n w = self.wavelength.to('AA').magnitude\n f = self.flux.magnitude\n r = numpy.array([k.getFlux(w, f) for k in filters])\n return r",
"def convolve_with_wavelet(map, L, M, R, lmax):\n npix = map.size\n nside = healpy.npix2nside(npix)\n \n # convert the map to Fourier space\n alm = healpy.map2alm(map, lmax=lmax)\n \n # get the Fourier components of the chosen wavelet\n blm = compute_wavelet_harmonics_zonal(L, M, R, nside, lmax).copy()\n\n # Get the ell values\n ell, m = healpy.Alm.getlm(lmax)\n \n # Normalize for the convolution\n blm *= np.sqrt((4 * np.pi) / (2 * ell + 1))\n \n # Multiply and then inverse Fourier transform\n result = healpy.alm2map(alm * blm, nside, verbose=False)\n\n return result",
"def fft_reconstruction(fft_img, bandpass_filters):\n\n warnings.filterwarnings(\"ignore\")\n if len(bandpass_filters) > 0:\n for f in bandpass_filters:\n try:\n fft_img *= f\n except:\n raise ValueError(\"Illegal input filter found, shape doesn't match?\")\n output = fftpack.ifft2(fftpack.ifftshift(fft_img)).real\n output -= output.min()\n return output",
"def _plot_wavelet(datas):\n \n # Declare a starlet object (and performs the transform)\n Sw = scarlet.Starlet(datas, lvl=5, direct=True)\n # This is the starlet transform as an array\n w = Sw.coefficients\n # The inverse starlet transform of w (new object otherwise, the tranform is not used)\n iw = Sw.image\n\n # TODO: Clean this code up using plt.subplots()\n # The wavelet transform of the first slice of images in pictures\n lvl = w.shape[1]\n plt.figure(figsize=(lvl*5+5,5))\n plt.suptitle('Wavelet coefficients')\n for i in range(lvl):\n plt.subplot(1, lvl, i+1)\n plt.title('scale' + str(i+1))\n plt.imshow(w[0,i], cmap='inferno')\n plt.colorbar()\n plt.show()\n\n # Making sure we recover the original image\n plt.figure(figsize=(30,10))\n plt.subplot(131)\n plt.title('Original image', fontsize=20)\n plt.imshow(datas[0], cmap='inferno')\n plt.colorbar()\n plt.subplot(132)\n plt.title('Starlet-reconstructed image', fontsize=20)\n plt.imshow(iw[0], cmap='inferno')\n plt.colorbar()\n plt.subplot(133)\n plt.title('Absolute difference', fontsize=20)\n plt.imshow((np.abs(iw[0]-datas[0])), cmap='inferno')\n plt.colorbar()\n plt.show()\n \n return",
"def wavelet_maps_to_real(self,wav_analysis_maps,output_maps_prefix,\n\t\tn_quads=1000):\n\t\t# Make the wavelet dict we'll feed into the reconstruction script.\n\t\ttarget_fwhm = wav_analysis_maps['target_fwhm']\n\t\tscale_int = wav_analysis_maps['scale_int']\n\t\tj_min = wav_analysis_maps['j_min']\n\t\tj_max = wav_analysis_maps['j_max']\n\t\toutput_nside = wav_analysis_maps['output_nside']\n\t\twavelet_dict = {'scale_int':scale_int,\n\t\t\t'band_lim':wav_analysis_maps['band_lim'],'j_max':j_max,\n\t\t\t'j_min':j_min,'original_nside':output_nside,\n\t\t\t'target_fwhm':target_fwhm}\n\t\tanalysis_type = wav_analysis_maps['analysis_type']\n\t\tm_level = wav_analysis_maps['m_level']\n\n\t\t# Check that the right type of map dict was passed in.\n\t\tif analysis_type != 'hgmca':\n\t\t\traise ValueError('A non-hgmca wav_analysis_maps was passed in.')\n\n\t\t# Get the analysis level for each coefficient\n\t\twav_level = self.get_analysis_level(scale_int,j_min,j_max,m_level,\n\t\t\toutput_nside)\n\t\twav_j_ind = np.zeros(2+j_max-j_min)\n\t\twav_j_ind[1:] = np.arange(j_min,j_max+1)\n\n\t\t# Iterate through the levels\n\t\tfor level in range(m_level+1):\n\t\t\t# If no wavelet scales should be analyzed at this level\n\t\t\t# continue\n\t\t\tif np.sum(wav_level==level) == 0:\n\t\t\t\tcontinue\n\t\t\t# Which scales belong at this level\n\t\t\tlevel_j_ind = wav_j_ind[wav_level==level]\n\t\t\t# Get the number of patches for a given level.\n\t\t\tn_patches = level_to_npatches(level)\n\n\t\t\t# Keep track of how many pixels into the level we've\n\t\t\t# gone so far.\n\t\t\toffset = 0\n\t\t\tfor j in level_j_ind:\n\t\t\t\t# Now deal with scaling or wavelet coefficient\n\t\t\t\tif j == 0:\n\t\t\t\t\tnside = wavelets_base.get_max_nside(scale_int,j_min,\n\t\t\t\t\t\toutput_nside)\n\t\t\t\t\tpath = output_maps_prefix+'_scaling.fits'\n\t\t\t\t\twavelet_dict.update({'scale_map':{'path':path,\n\t\t\t\t\t\t'nside':nside}})\n\t\t\t\telse:\n\t\t\t\t\tnside = wavelets_base.get_max_nside(scale_int,j+1,\n\t\t\t\t\t\toutput_nside)\n\t\t\t\t\tpath = output_maps_prefix+'_wav_%d.fits'%(j)\n\t\t\t\t\twavelet_dict.update({'wav_%d_map'%(j):{'path':path,\n\t\t\t\t\t\t'nside':nside}})\n\t\t\t\tn_pix = hp.nside2npix(nside)\n\t\t\t\tn_pix_patch = n_pix//n_patches\n\n\t\t\t\t# Allocate the array we'll use to write the wavelets\n\t\t\t\twav_coeff = np.zeros(n_pix)\n\n\t\t\t\t# Now grab the data from each patch\n\t\t\t\tfor patch in range(n_patches):\n\t\t\t\t\twav_coeff[patch*n_pix_patch:(patch+1)*n_pix_patch] = (\n\t\t\t\t\t\twav_analysis_maps[str(level)][patch,\n\t\t\t\t\t\toffset:offset+n_pix_patch])\n\t\t\t\toffset += n_pix_patch\n\n\t\t\t\t# Write the map and point the dictionary to the path\n\t\t\t\thp.write_map(path,wav_coeff,dtype=np.float64,\n\t\t\t\t\toverwrite=True,nest=True)\n\n\t\treturn self.s2dw_wavelet_inverse_transform(wavelet_dict,np.min(\n\t\t\ttarget_fwhm),n_quads=n_quads)",
"def _computeTF(self, wl_min_um=1.2, wl_max_um=10.0, N=200):\n #-- wavelength table:\n self.TF_wl = 1/np.linspace(1/wl_max_um, 1/wl_min_um, N)*1e-6\n for i in range(len(self.scans)):\n #-- find center of the scan\n try:\n snr = slidop.slidingMean(self.scans[i]['TIME'],self.scans[i]['GDSNR'],\n 20e6/self.header['ISS PRI FSU1 FREQ'])\n except:\n snr,rms,xsnr= sliding_avg_rms(self.scans[i]['TIME'],self.scans[i]['GDSNR'],\n 20e6/self.header['ISS PRI FSU1 FREQ'])\n snr = np.interp(self.scans[i]['TIME'], xsnr,snr)\n\n opd0 = self.scans[i]['RTOFFSET'][snr.argmax()]\n #-- create cos and sin waves\n _cos = np.cos(2*np.pi*(self.scans[i]['RTOFFSET']-opd0)[None,:]/self.TF_wl[:,None])\n _sin = np.sin(2*np.pi*(self.scans[i]['RTOFFSET']-opd0)[None,:]/self.TF_wl[:,None])\n #-- apodization window (width in microns):\n apod_width = np.array([50,50,50,50,50,50])*1e-6/(np.sqrt(2)/2) \n apod = np.exp(-((self.scans[i]['RTOFFSET']-opd0)[:,None]/\n apod_width[None,:])**2) \n #-- computation for each channel (A,B,C,D)\n for k in ['A', 'B', 'C', 'D']:\n self.scans[i]['TF_'+k]=(apod[None,:,:]*self.scans[i][k][None,:,:]*\n (_cos+1j*_sin)[:,:,None]).mean(axis=1)\n if self.scans[i].has_key('DARK'+k):\n self.scans[i]['TF_DARK'+k]=(self.scans[i]['DARK'+k][None,:]*\n (_cos+1j*_sin)).mean(axis=1)\n return",
"def wavelet_tf(wavelet, N=2048, scale=100, notext=False, width=1.1, height=1):\n wavelet = Wavelet._init_if_not_isinstance(wavelet)\n\n #### Compute psi & psihf #################################################\n psi = ifft(wavelet(scale * _xifn(1, N)) * (-1)**np.arange(N))\n apsi = np.abs(psi)\n t = np.arange(-N/2, N/2, step=1)\n\n w = aifftshift(_xifn(1, N))[N//2-1:]\n psih = wavelet(scale * w)\n\n #### Compute stdevs & respective indices #################################\n wc = center_frequency(wavelet, scale, N)\n std_w = freq_resolution(wavelet, scale, N, nondim=0)\n std_t = time_resolution(wavelet, scale, N, nondim=0, min_decay=1)\n _wc = np.pi - wc\n\n wlix = np.argmin(np.abs(w - (_wc - std_w)))\n wrix = np.argmin(np.abs(w - (_wc + std_w)))\n wl, wr = w[wlix], w[wrix]\n\n tlix = np.argmin(np.abs(t - (0 - std_t)))\n trix = np.argmin(np.abs(t - (0 + std_t)))\n tl, tr = t[tlix], t[trix]\n\n ## Rescale psi so that its y-coords span 1/5 of psih's x-coords, & vice-versa\n frac = 5\n psig = psi * (w.max() / apsi.max()) / frac\n apsig = apsi * (w.max() / apsi.max()) / frac\n psihg = psih * (t.max() / psih.max()) / frac\n # additionally shift psih to psi's left\n psihg += t.min()\n\n ## Find intersections\n w_xminu, w_xmax = psihg[::-1][wlix], tr\n w_xmind = psihg[::-1][wrix] # psih not necessarily symmetric\n w_ymin, w_ymax = wl, wr\n t_xmin, t_xmax = tl, tr\n t_yminl, t_ymax = apsig[tlix], wr\n t_yminr = apsig[trix] # same for psi\n\n #### Plot ################################################################\n plot(t, psig, complex=1, h=1.5)\n plot(t, apsig, linestyle='--', color='k')\n plot(psihg[::-1], w, color='purple')\n\n # bounds lines\n lkw = dict(color='k', linewidth=1)\n plot([t_xmin, t_xmin], [t_yminl, t_ymax], **lkw)\n plot([t_xmax, t_xmax], [t_yminr, t_ymax], **lkw)\n plot([w_xminu, w_xmax], [w_ymin, w_ymin], **lkw)\n plot([w_xmind, w_xmax], [w_ymax, w_ymax], **lkw)\n plt.xlim(t.min()*1.02, t.max()*1.02)\n\n # radians 0 to pi from top to bottom(=psi's mean)\n ylabels = np.round(np.linspace(np.pi, 0, 7), 1)\n plt.yticks(np.linspace(0, np.pi, len(ylabels)), ylabels)\n\n if notext:\n plt.gcf().set_size_inches(12*width, 12*height)\n plt.show()\n return\n #### Title, annotations, labels, styling #################################\n ## Annotation: info summary\n txt = (\" wc = {:<6.5f} rad-c/s\\n\"\n \" std_t = {:<6.4f} s/c-rad\\n\"\n \" std_w = {:<6.5f} rad-c/s\\n\"\n \"area/4 = {:.12f}\\n\"\n \" = std_t * std_w\\n\\n\"\n \"(rad-c/s=\\n radians*cycles/samples)\"\n ).format(wc, std_t, std_w, std_t * std_w)\n _kw = dict(xycoords='axes fraction', xy=(.7, .76), weight='bold',\n fontsize=16)\n try:\n # 'Consolas' for vertical align\n plt.annotate(txt, family='Consolas', **_kw)\n except:\n plt.annotate(txt, **_kw) # in case platform lacks 'Consolas'\n\n ## Title: wavelet name & parameters\n title = wavelet._desc(N=N, scale=scale)\n plt.title(title, loc='left', weight='bold', fontsize=16)\n\n ## Styling\n plt.xlabel(\"samples\", weight='bold', fontsize=15)\n plt.ylabel(\"radians\", weight='bold', fontsize=15)\n\n plt.gcf().set_size_inches(12*width, 12*height)\n plt.show()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads the relevant .npy files in a folder and returns them in a list | def load_npy(filepath, filenames_list):
if not os.path.exists(filepath):
raise InvalidPathError("{} does not exist!".format(filepath))
data = []
for i in range(len(filenames_list)):
data.append(np.load(filepath + '/' + filenames_list[i]))
return data | [
"def _load_array(self, path_list):\n array_list = []\n for path in path_list:\n array = np.load(path)\n for f in array.files:\n array_list.append(array[f])\n array = np.concatenate(array_list, axis=0)\n\n return array",
"def parse_data_folder(images_dir_path: Path, labels_dir_path: Path) -> List[tuple]:\n\n parsed_files: List[tuple] = list()\n for json_file in labels_dir_path.iterdir():\n image_file = images_dir_path.joinpath(f'{json_file.stem}.jpg')\n\n parsed_files.append((str(image_file), str(json_file)))\n print(len(parsed_files))\n return parsed_files",
"def load_np_pickles_in_directory(path, regex=r'.*.(npy|npc)'):\n result = {}\n for filename in os.listdir(path):\n if re.match(regex, filename):\n # Get rid of file extensions and (keypoints|descriptors) annotations.\n key = filename.split('.')[0].split('_')[0]\n result[key] = np.load(path + filename, allow_pickle=True)\n\n return result",
"def get_data():\n frames = []\n filenames = []\n for imname in sorted(os.listdir(folder), key=numericalSort):\n if not imname.startswith('.'):\n im = imageio.imread(folder+'/'+imname)\n #im = im[:,180:1100,:]\n im = im[:,275:1000,:]\n im = skimage.transform.resize(im, (imageSize, imageSize, 3))\n img_arr = np.asarray(im)\n img_arr = preprocess_image(img_arr)\n frames.append(img_arr)\n filenames.append(imname)\n frames = np.asarray(frames)\n print('Finished converting frames to nparray')\n return frames, filenames",
"def load_and_concat(fpath: str, file_identifier: str) -> np.ndarray:\n arrays = []\n files = [f for f in os.listdir(fpath) if f[:len(file_identifier)] == file_identifier]\n file_numbers = np.array([int(f.replace(file_identifier, '').replace('-', '').replace('.npy', '')) for f in files])\n files = [files[i] for i in file_numbers.argsort()]\n\n for file in files:\n arrays.append(np.load(fpath + file, allow_pickle=True))\n\n return np.concatenate(arrays)",
"def _load_npz_list_files(self, npz_files):\n data = []\n labels = []\n fs = None\n for npz_f in npz_files:\n print(\"Loading {} ...\".format(npz_f))\n tmp_data, tmp_labels, sampling_rate = self._load_npz_file(npz_f)\n if fs is None:\n fs = sampling_rate\n elif fs != sampling_rate:\n raise Exception(\"Found mismatch in sampling rate.\")\n data.append(tmp_data)\n labels.append(tmp_labels)\n data = np.vstack(data)\n labels = np.hstack(labels)\n return data, labels",
"def load_npz_list_files(npz_files):\n data = []\n labels = []\n fs = None\n for npz_f in npz_files:\n print(\"Loading {} ...\".format(npz_f))\n tmp_data, tmp_labels, sampling_rate = load_npz_file(npz_f)\n if fs is None:\n fs = sampling_rate\n elif fs != sampling_rate:\n raise Exception(\"Found mismatch in sampling rate.\")\n\n # Reshape the data to match the input of the model - conv2d\n tmp_data = np.squeeze(tmp_data)\n tmp_data = tmp_data[:, :, np.newaxis, np.newaxis]\n \n # # Reshape the data to match the input of the model - conv1d\n # tmp_data = tmp_data[:, :, np.newaxis]\n\n # Casting\n tmp_data = tmp_data.astype(np.float32)\n tmp_labels = tmp_labels.astype(np.int32)\n\n data.append(tmp_data)\n labels.append(tmp_labels)\n\n return data, labels",
"def load_folder(folder, size):\n\n # create a 4D array with first dimension the number of files\n num_files = len(os.listdir(folder))\n print(folder, \"contains\", num_files, \"objects.\")\n dataset = np.zeros([num_files, size, size, size])\n\n for index, filename in enumerate(os.listdir(folder)):\n print(\"\\nImporting:\", filename)\n dataset[index, :, :, :] = load_off(folder + filename, size)\n\n return dataset",
"def _get_files_and_labels_list(dataset_dir):\n file_list = []\n lib_names = sorted(f.name for f in Path(dataset_dir).iterdir() if re.match(r\"^n[0-9]+$\", f.name))\n class_id = {v: i for i, v in enumerate(lib_names)}\n for lib in Path(dataset_dir).iterdir():\n for img in lib.iterdir():\n file_list.append([str(img), class_id[lib.name]])\n random.seed(0)\n random.shuffle(file_list)\n return file_list",
"def data_reader(input_dir, shuffle=True):\r\n file_paths = []\r\n\r\n for img_file in scandir(input_dir):\r\n if img_file.name.endswith('.jpg') and img_file.is_file():\r\n file_paths.append(img_file.path)\r\n\r\n if shuffle:\r\n # Shuffle the ordering of all image files in order to guarantee\r\n # random ordering of the images with respect to label in the\r\n # saved TFRecord files. Make the randomization repeatable.\r\n shuffled_index = list(range(len(file_paths)))\r\n random.seed(12345)\r\n random.shuffle(shuffled_index)\r\n\r\n file_paths = [file_paths[i] for i in shuffled_index]\r\n\r\n return file_paths",
"def find_nii_files():\n nii_files = []\n for folder in DATASET_FOLDERS:\n for root, dirs, files in os.walk(folder):\n for f in files:\n if f == 'mprage.nii' or f =='anat.nii':\n nii_path = join(root, f)\n nii_files.append(nii_path)\n return nii_files",
"def load_data(dir_name=None):\n\n # extract all files from directory\n output_files = os.listdir(dir_name)\n output_files = [os.path.join(dir_name, file) for file in output_files]\n\n return output_files",
"def read_midi_files(folder: str) -> np:\n files_list = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]\n files_list = sorted(list(filter(lambda file_name: file_name.endswith(\".midi\"), files_list)))\n\n dst_dir = os.path.join(folder, Config().PROCESSED_FOLDER)\n if not os.path.isdir(dst_dir):\n os.makedirs(dst_dir)\n for file in tqdm(files_list):\n dst_file = os.path.join(dst_dir, os.path.splitext(file)[0]) + '.pkl'\n\n # Skip file if processed file already exists\n if os.path.isfile(dst_file):\n continue\n\n filename = os.path.join(folder, file)\n mid = MidiFile(filename)\n df = extract_track_info(mid)\n\n df = preprocess_df(df, filename)\n midi_arr = convert_to_matrix(df)\n\n with open(dst_file, 'wb') as f:\n pickle.dump(midi_arr, f)",
"def get_samples_from_folder(folder: Path) -> List[SampleReads]:\n\tsamples = list()\n\tfor subfolder in folder.iterdir():\n\t\ttry:\n\t\t\tsamples.append(SampleReads.from_folder(subfolder))\n\t\texcept IndexError:\n\t\t\t# The expected reads do no exist\n\t\t\tlogger.warning(f\"Cannot find the reads for {subfolder}\")\n\t\texcept NotADirectoryError:\n\t\t\t# There was an extra file in the folder. ignore it.\n\t\t\tpass\n\treturn samples",
"def gather_files():\n return glob.glob(\"input/*.json\")",
"def data_reader(input_dir, shuffle=False):\n file_paths = []\n\n for img_file in scandir(input_dir):\n if img_file.name.endswith('.png') and img_file.is_file():\n file_paths.append(img_file.path)\n\n if shuffle:\n # Shuffle the ordering of all image files in order to guarantee\n # random ordering of the images with respect to label in the\n # saved TFRecord files. Make the randomization repeatable.\n shuffled_index = list(range(len(file_paths)))\n random.seed(12345)\n random.shuffle(shuffled_index)\n\n file_paths = [file_paths[i] for i in shuffled_index]\n\n return file_paths",
"def getFiles(folder):\n\treturn [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]",
"def mp3files():\n\tBase_dir = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))\n\tmp3_source = os.path.join(Base_dir,'raw_data','mp3_files')\n\tmp3list = []\n\tfor paths,dirs,files in scandir.walk(mp3_source):\n\t#for paths,dirs,files in scandir.walk(r'D:\\Audio\\forJarvis'):\n\t\t\"\"\"if want to search mp3 files from all you HDD then \n\t\tprovide all drives path postions instead of D:\\\\Audio\n\t\tadd extra back slash where ever back slash occur. \n\t\t\"\"\"\n\t\tfor file in files:\n\t\t\tif file.endswith('.mp3'):\n\t\t\t\tfullpath =mp3list.append(os.path.join(paths,file))\n\t#print mp3list\n\t#print len(mp3list)\n\treturn mp3list",
"def LoadFileList(self, input_paths):\n files = []\n for path in input_paths:\n if os.path.isdir(path):\n files += glob.glob(os.path.join(path, \"*.proto\"))\n elif os.path.isfile(path):\n files.append(path)\n return files"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads all data saved in a given folder. Searches through all subfolders and finds every folder that contains the file names listed in 'filenames'. Returns them in the list 'loaded_data'. Also searches for the vectorisation settings file according to 'analysis_type' and returns a dict with the settings. | def load_from_dir_root(rootdir):
# Find json file to get the analysis type
for file in os.listdir(rootdir):
if '_settings.json' in file:
analysis_type = file[:-14]
assert analysis_type in filenames_list_dict.keys()
with open(rootdir + '/{}_settings.json'.format(analysis_type)) as json_file:
json_vector_settings_dict = json.load(json_file)
filenames_list = filenames_list_dict[analysis_type]
if not os.path.exists(rootdir):
raise InvalidPathError("{} does not exist!".format(rootdir))
if not os.path.exists(rootdir + '/{}_settings.json'.format(analysis_type)):
raise Exception("{}_settings.json file not found. Maybe the data hasn't been vectorised yet.".format(analysis_type))
loaded_data = []
for root, dir, filenames in os.walk(rootdir):
if all(x in filenames for x in filenames_list):
# print('Loading files from {}...'.format(root), end='')
data = load_npy(root, filenames_list)
# print('done')
loaded_data.append(data)
return loaded_data, json_vector_settings_dict, analysis_type | [
"def _load_folder(self, folder):\n for f in os.listdir(folder):\n self._load_file(os.path.join(folder, f))",
"def loadFiles(self, filenames):\n loadFiles(filenames, self.cache)",
"def load_folder(folder, size):\n\n # create a 4D array with first dimension the number of files\n num_files = len(os.listdir(folder))\n print(folder, \"contains\", num_files, \"objects.\")\n dataset = np.zeros([num_files, size, size, size])\n\n for index, filename in enumerate(os.listdir(folder)):\n print(\"\\nImporting:\", filename)\n dataset[index, :, :, :] = load_off(folder + filename, size)\n\n return dataset",
"def load_data(data_dir):\n # Read data in from files\n images = []\n labels = []\n for folder in range(0, NUM_CATEGORIES):\n full_folder = os.path.join(data_dir, str(folder))\n for filename in os.listdir(full_folder):\n # print(f\"Reading and resizing file {filename} from folder {full_folder}...\")\n img = cv2.imread(os.path.join(full_folder, filename), 1)\n if img is not None:\n res = cv2.resize(img, (IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_AREA)\n images.append(res)\n labels.append(folder)\n else:\n continue\n # print(f\"ERROR: Issue reading file {filename} from folder {full_folder}!\")\n\n return images, labels",
"def load_files(folder='../MNIST_data/', source_url=None):\n\n\t\tif source_url:\n\t\t\treturn read_data_sets(folder, source_url=source_url, one_hot=False)\n\t\telse:\n\t\t\treturn read_data_sets(folder, one_hot=False)",
"def load_data(data_dir):\n images_result = []\n labels_result = []\n\n for dir_name in os.listdir(data_dir):\n path = os.path.join(data_dir, dir_name)\n directory_id = int(dir_name)\n images_list = os.listdir(path)\n\n print(f\"Loading {path} folder...\")\n for image_file in os.listdir(path):\n image_path = os.path.join(path, image_file)\n image_cv = cv2.imread(image_path)\n image_resized = cv2.resize(image_cv, (IMG_WIDTH, IMG_HEIGHT))\n\n images_result.append(image_resized)\n labels_result.append(directory_id)\n\n return (images_result, labels_result)",
"def loadFiles(search_path, cache):\n for root, dirs, files in os.walk(search_path):\n # Parse all files.\n for file in files:\n if os.path.basename(file).startswith('.'):\n continue # Skipp hidden files.\n path = os.path.join(root, file)\n if getFileType(path) in [FILETYPE_CPP, FILETYPE_DDDOC]:\n parseFile(path, cache)\n # Exclude ignored diretories.\n for ignored in IGNORED_DIRS:\n if ignored in dirs:\n dirs.remove(ignored)",
"def _load_data(self): \n # Every key in self.calcdata['compositions'] is a composition, and each composition contains a list of dict entrees.\n # relaxed_structure, input_structure, magmoms, total_energy. \n \n _is_vasp_calc = lambda fs: 'POSCAR' in fs and 'INCAR' in fs and 'KPOINTS' in fs and 'POTCAR' in fs\n # Load VASP runs from given directories\n \n n_matched = 0\n n_inputs = 0\n new_unassigned_strs = []\n for root,dirs,files in os.walk(self.vaspdir):\n #A calculation directories has only 3 status: \n #accepted: calculation was successful, and already entered into calcdata.mson\n #falied: calculated but not successful, either aborted or can't be read into calcdata.mson\n #For these above two, we don't want to submit a calculation or post-process again.\n #not marked: calculation run not started or not finished yet. Since analyzer is always called\n #after runner, we don't need to worry that analyzer will find unmarked folders.\n\n if _is_vasp_calc(files) and (not 'accepted' in files) and (not 'failed' in files):\n print(\"Loading VASP run in {}\".format(root));\n parent_root = os.path.join(*root.split(os.sep)[0:-1])\n parent_parent_root = os.path.join(*root.split(os.sep)[0:-2])\n with open(os.path.join(parent_parent_root,'composition_by_site')) as compfile:\n composition = json.load(compfile)\n compstring = json.dumps(composition)\n \n if compstring not in self.calcdata['compositions']:\n self.calcdata['compositions'][compstring]=[]\n \n if not os.path.isfile(os.path.join(parent_root,'matrix')):\n print('Warning: matrix presave not found. Will autodetect supercell matrix using structure matcher,\\\n and will suffer from numerical errors!')\n matrix = None\n else:\n with open(os.path.join(parent_root,'matrix')) as mat_file:\n matrix = json.load(mat_file)\n #Check existence of output structure\n try:\n relaxed_struct = Poscar.from_file(os.path.join(root,'CONTCAR')).structure\n except:\n print('Entry {} CONTCAR can not be read. Skipping.'.format(root))\n open(os.path.join(root,'failed'),'a').close()\n continue\n\n input_struct = Poscar.from_file(os.path.join(parent_root,'POSCAR')).structure\n \n #Check uniqueness\n strict_sm = StructureMatcher(stol=0.1, ltol=0.1, angle_tol=1, comparator=ElementComparator())\n _is_unique = True\n for entry in self.calcdata['compositions'][compstring]:\n entry_struct = Structure.from_dict(entry['relaxed_structure'])\n if strict_sm.fit(entry_struct,relaxed_struct):\n _is_unique = False\n break\n if not _is_unique:\n print('Entry {} alredy calculated before.'.format(root))\n open(os.path.join(root,'accepted'),'a').close()\n continue\n n_inputs += 1\n \n # Note: the input_struct here comes from the poscar in upper root, rather than fm.0, so \n # it is not deformed.\n \n # Rescale volume to that of unrelaxed structure, this will lead to a better mapping back. \n # I changed it to a rescaling tensor\n relaxed_lat_mat = np.matrix(relaxed_struct.lattice.matrix)\n input_lat_mat = np.matrix(input_struct.lattice.matrix)\n o2i_deformation = Deformation(input_lat_mat.T*relaxed_lat_mat.I.T)\n relaxed_deformed = o2i_deformation.apply_to_structure(relaxed_struct)\n #print(relaxed_deformed,input_struct)\n \n # Assign oxidation states to Mn based on magnetic moments in OUTCAR, first check existence of OUTCAR\n try:\n Out=Outcar(os.path.join(root,'OUTCAR'))\n except:\n print('Entry {} OUTCAR can not be read. Skipping.'.format(root))\n open(os.path.join(root,'failed'),'a').close()\n continue\n \n # Get final energy from OSZICAR or Vasprun. Vasprun is better but OSZICAR is much\n # faster and works fine is you separately check for convergence, sanity of\n # magnetic moments, structure geometry\n with open(os.path.join(root, 'OUTCAR')) as outfile:\n outcar_string = outfile.read()\n if 'reached required accuracy' not in outcar_string:\n print('Entry {} did not converge to required accuracy. Skipping.'.format(root))\n open(os.path.join(root,'failed'),'a').close()\n continue\n TotE=Oszicar(os.path.join(root, 'OSZICAR')).final_energy;\n # Checking convergence\n Mag = []\n for SiteInd,Site in enumerate(relaxed_struct.sites):\n Mag.append(np.abs(Out.magnetization[SiteInd]['tot']));\n \n \n new_entry = {}\n new_entry['input_structure']=input_struct.as_dict()\n new_entry['relaxed_structure']=relaxed_struct.as_dict()\n new_entry['relaxed_deformed']=relaxed_deformed.as_dict()\n new_entry['total_energy']=TotE\n new_entry['magmoms']=Mag\n new_entry['matrix']=matrix\n \n if os.path.isfile(os.path.join(parent_parent_root,'axis')):\n with open(os.path.join(parent_parent_root,'axis')) as axisfile:\n axis = json.load(axisfile)\n if 'axis' not in new_entry:\n new_entry['axis']=axis\n \n new_unassigned_strs.append((compstring,root,new_entry))\n \n if len(new_unassigned_strs)==0:\n print('No new structures appeared. Calcdata will not be updated.')\n return\n\n #Charge assignment\n if self.is_charged_ce:\n relaxed_deformed_pool = []\n relaxed_strs_pool = []\n mags = []\n roots = []\n energies = []\n comps = []\n inputs = []\n mats = []\n if 'axis' in new_unassigned_strs[0][2]:\n axis = []\n for compstring,root,new_entry in new_unassigned_strs:\n # Out=Outcar(os.path.join(root,'OUTCAR'))\n Mag=new_entry['magmoms']\n relaxed_struct = Structure.from_dict(new_entry['relaxed_structure'])\n relaxed_deformed = Structure.from_dict(new_entry['relaxed_deformed'])\n # Throw out structures where oxidation states don't make charge balanced.\n \n mags.append(Mag)\n roots.append(root)\n relaxed_strs_pool.append(relaxed_struct)\n relaxed_deformed_pool.append(relaxed_deformed)\n comps.append(compstring)\n inputs.append(Structure.from_dict(new_entry['input_structure']))\n energies.append(new_entry['total_energy'])\n mats.append(new_entry['matrix'])\n if 'axis' in new_entry:\n axis.append(new_entry['axis'])\n \n CA = ChargeAssign(relaxed_strs_pool,mags,algo=self.assign_algo)\n relaxed_strs_assigned = CA.assigned_structures\n relaxed_deformed_assigned = CA.extend_assignments(relaxed_deformed_pool,mags)\n \n for i in range(len(inputs)):\n if relaxed_strs_assigned[i] is not None and relaxed_deformed_assigned[i] is not None:\n # Checking whether structure can be mapped to corr function.\n # This is out deformation tolerance. \n try:\n if mats[i] is not None:\n cesup = self.ce.supercell_from_matrix(mats[i])\n corr=cesup.corr_from_structure(relaxed_deformed_assigned[i])\n else:\n corr=self.ce.corr_from_structure(relaxed_deformed_assigned[i])\n except:\n print(\"Entry {} too far from original lattice. Skipping.\".format(roots[i]))\n open(os.path.join(roots[i],'failed'),'a').close()\n continue\n\n assigned_entry = {}\n assigned_entry['input_structure']=inputs[i].as_dict()\n assigned_entry['relaxed_structure']=relaxed_strs_assigned[i].as_dict()\n assigned_entry['relaxed_deformed']=relaxed_deformed_assigned[i].as_dict()\n assigned_entry['matrix']=mats[i]\n assigned_entry['total_energy']=energies[i]\n assigned_entry['magmoms']=mags[i]\n if 'axis' in new_unassigned_strs[0][2]:\n assigned_entry['axis']=axis[i]\n self.calcdata['compositions'][comps[i]].append(assigned_entry)\n print('Entry {} accepted!'.format(roots[i]))\n open(os.path.join(roots[i],'accepted'),'a').close()\n n_matched+=1\n\n else:\n print(\"Entry {} can not be assigned. Skipping.\".format(roots[i]))\n open(os.path.join(roots[i],'failed'),'a').close()\n continue\n else:\n print('Doing non charged ce.')\n for compstring,root,new_entry in new_unassigned_strs:\n # Checking whether structure can be mapped to corr function.\n # This is out deformation tolerance. \n try:\n if new_entry['matrix'] is not None:\n cesup = self.ce.supercell_from_matrix(new_entry['matrix'])\n corr = cesup.corr_from_structure(Structure.from_dict(new_entry['relaxed_defromed']))\n else:\n corr = self.ce.corr_from_structure(Structure.from_dict(new_entry['relaxed_defromed']))\n except:\n print(\"Entry {} too far from original lattice. Skipping.\".format(root))\n open(os.path.join(root,'failed'),'a').close()\n continue\n\n self.calcdata['compositions'][compstring].append(new_entry)\n open(os.path.join(root,'accepted'),'a').close()\n n_matched+=1\n # Data already deduplicated!\n\n print('{}/{} structures matched in this run. Parsed vasp data will be saved into {}.'.format(n_matched,n_inputs,self.calc_data_file))",
"def parseAndStore(self,folder, files):\n ps = PorterStemmer()\n stop_words = stopwords.words('english')\n tokenizer = RegexpTokenizer(r'\\w+')\n # print(files)\n file_data = {}\n for file in files:\n # for i in range(1):\n data_with_tf = {}\n with open(folder + file,'rb') as f:\n data = f.read()\n data = data.decode('utf-8','ignore')\n # print(data)\n data = str(data)\n data = data.lower()\n tokens = tokenizer.tokenize(data)\n # print(tokens)\n filtered = [w for w in tokens if w not in stop_words]\n filtered.sort()\n keys = list(set(filtered))\n keys.sort()\n for i in range(len(keys)):\n data_with_tf[keys[i]] = filtered.count(keys[i])\n # print(data_with_tf)\n file_data[file] = data_with_tf\n cwd = os.getcwd()\n with open(cwd +'\\\\' + 'data.txt','wb') as f:\n pickle.dump(file_data,f)\n return True",
"def load_all_results():\n directory = os.getcwd() + \"\\\\result\"\n\n return __load_results(directory)",
"def process_data_all(folders, folder_main_loc=\"data/\", main_file=\"2021_04\", overwrite=False):\n files = [f + \".parquet.gzip\" for f in folders]\n files_exist = np.array([os.path.exists(folder_main_loc + f) for f in files])\n if not overwrite and np.all(files_exist):\n final_df = unduplicate_data(folder_main_loc, main_file, folders)\n return final_df\n # otherwise prooceed\n data = []\n # double check that main_file has been included in list of folders to process\n if main_file not in folders: folders.append(main_file)\n for data_folder in folders:\n folder_path = folder_main_loc + data_folder\n data_folder_files = sorted(os.listdir(folder_path))\n df = process_data_folder(folder_main_loc, data_folder, data_folder_files, save=True)\n print(\"Finished proccessing file {}\".format(data_folder))\n folders.remove(main_file)\n # remove duplicate projects\n final_df = unduplicate_data(folder_main_loc, main_file, folders)\n return final_df",
"def _load_base(self):\n\n # Check if pre-computed \"tables\" exist for faster loading\n fn_prestored = os.path.join(self.path, '__prestored')\n if os.path.isdir(fn_prestored):\n try:\n self.entity2idx = common.json_load(\n os.path.join(fn_prestored, 'entity2idx.json'))\n self.rel2idx = common.json_load(\n os.path.join(fn_prestored, 'rel2idx.json'))\n self.train_set = [tuple(l) for l in common.json_load(\n os.path.join(fn_prestored, 'train_set.json'))]\n self.test_set = [tuple(l) for l in common.json_load(\n os.path.join(fn_prestored, 'test_set.json'))]\n self.valid_set = [tuple(l) for l in common.json_load(\n os.path.join(fn_prestored, 'valid_set.json'))]\n except FileExistsError as e:\n print(e)\n else:\n # load each data_type in order\n\n data = {\n \"train\": list(self._load_data_file(\"train\")),\n \"valid\": list(self._load_data_file(\"valid\")),\n \"test\": list(self._load_data_file(\"test\")),\n }\n\n # Needs to be done over all datasets, as there are some defective\n # datasets like WN18RR or Yago3-10\n self._generate_unique_ids(\n data[\"train\"][0] + data[\"valid\"][0] + data[\"test\"][0],\n data[\"train\"][1] + data[\"valid\"][1] + data[\"test\"][1],\n data[\"train\"][2] + data[\"valid\"][2] + data[\"test\"][2])\n\n for data_type in [\"train\", \"test\", \"valid\"]:\n heads, rels, tails = data[data_type]\n\n if data_type == \"train\":\n self.train_set, self.train_oog = self._convert_names_to_ids(\n heads, rels,\n tails)\n if self.train_oog:\n print(self.train_oog)\n elif data_type == \"test\":\n self.test_set, self.test_oog = self._convert_names_to_ids(\n heads, rels,\n tails)\n if self.test_oog:\n print(self.test_oog)\n elif data_type == \"valid\":\n self.valid_set, self.valid_oog = self._convert_names_to_ids(\n heads, rels,\n tails)\n if self.valid_oog:\n print(self.valid_oog)\n\n # print(\"If the list are not empty, something is wrong with the data:\", train_oog, valid_oog, test_oog)\n\n # Create folder and dump generated files to preloading\n common.mkdir_p(fn_prestored)\n common.json_dump(os.path.join(fn_prestored, 'entity2idx.json'),\n self.entity2idx)\n common.json_dump(os.path.join(fn_prestored, 'rel2idx.json'),\n self.rel2idx)\n common.json_dump(os.path.join(fn_prestored, 'train_set.json'),\n self.train_set)\n common.json_dump(os.path.join(fn_prestored, 'test_set.json'),\n self.test_set)\n common.json_dump(os.path.join(fn_prestored, 'valid_set.json'),\n self.valid_set)\n\n # For easier access and checking if other data types are added\n self.data_type2array = {\"train\": self.train_set,\n \"test\": self.test_set,\n \"valid\": self.valid_set}\n\n # Set some useful variables\n self.n_entities = len(self.entity2idx)\n self.n_relations = len(self.rel2idx)\n self.number_of_entries = {\"train\": len(self.train_set),\n \"test\": len(self.test_set),\n \"valid\": len(self.valid_set)}",
"def load_folder_contents( self, trans, folder ):\n current_user_roles = trans.get_current_user_roles()\n is_admin = trans.user_is_admin()\n content_items = []\n for subfolder in folder.active_folders:\n if not is_admin:\n can_access, folder_ids = trans.app.security_agent.check_folder_contents( trans.user, current_user_roles, subfolder )\n if (is_admin or can_access) and not subfolder.deleted:\n subfolder.api_type = 'folder'\n content_items.append( subfolder )\n for dataset in folder.datasets:\n if not is_admin:\n can_access = trans.app.security_agent.can_access_dataset( current_user_roles, dataset.library_dataset_dataset_association.dataset )\n if (is_admin or can_access) and not dataset.deleted:\n dataset.api_type = 'file'\n content_items.append( dataset )\n return content_items",
"def load_directory(self, directory, suffix = '.json', train = True):\n\n directory = os.path.abspath(directory)\n\n for root, subdirs, files in os.walk(directory):\n print(\"--------- In:\",root,\"----------\")\n # filter out files that don't end with suffix\n for doc in filter(lambda s:s.endswith(suffix), files):\n print(\"Loading:\", doc)\n jobject = self._from_json(os.path.join(root,doc))\n # check for if json loaded properly\n if jobject:\n # iterate through reviews to feed to analze_document\n for review in jobject['Reviews']:\n self._analyze_document(review, train = True)\n # calculate document frequency in the training documents\n # self._calc_doc_frequency()",
"def read_all_data(dirname='data'):\n \n path = dirname + '/'\n allFiles = os.listdir(path)\n files, names, exts = parser(allFiles)\n cols_parser = lambda x: x.split(':')[0].casefold() != 'unnamed'\n \n DataFrames = {}\n for f, n, e in zip(files, names, exts):\n DataFrames[n] = EXT[e](path + f, usecols=cols_parser)\n return DataFrames",
"def read_all_files_in_folder(folder_name: str, extension=\"csv\", debug=False) \\\n -> Tuple[Functionalisations_t, WorkingChannels_t, Mapping[str, DataRowsSet_t]]:\n all_data = {}\n functionalisations = []\n correct_channels = []\n\n if not os.path.isdir(folder_name):\n print('Directory \"%s\" does not exist!' % folder_name)\n\n for file in glob.glob(os.path.join(folder_name, '*.{}'.format(extension))):\n print('Reading file %s' % file)\n functionalisations, correct_channels, data = read_data_csv(file, debug)\n all_data[file] = data\n\n print('Read %i files' % len(all_data))\n return functionalisations, correct_channels, all_data",
"def load_data(dir_name=None):\n\n # extract all files from directory\n output_files = os.listdir(dir_name)\n output_files = [os.path.join(dir_name, file) for file in output_files]\n\n return output_files",
"def load_files(\n folder: Path,\n fnames: Iterable[Path]) -> Tuple[Dict[Path, str], bool]:\n # Load each file and store its name and content in the `out` dictionary.\n out: Dict[Path, str] = {}\n for fname_rel in fnames:\n # Construct absolute file path.\n fname_abs = folder / fname_rel\n logit.debug(f\"Loading {fname_abs}\")\n\n # Read the file. Abort on error.\n try:\n out[fname_rel] = fname_abs.read_text()\n except FileNotFoundError:\n logit.error(f\"Could not find <{fname_abs}>\")\n return ({}, True)\n\n # Return the read files.\n return (out, False)",
"def load_DL_experiments(save_folder:str, \n experiment_repo:str='/toy_experiments/'):\n path = os.path.abspath('../')+experiment_repo\n #print('full path',path)\n elements=[]\n for file in os.listdir(path+save_folder):\n #print(file)\n if file.endswith(\".npy\"):\n local_path =os.path.join(path+save_folder, file)\n #print('local path', local_path)\n last_piece = local_path.split('/')[-1]\n #print('last piece',last_piece)\n #elements.append(last_piece.split('.')[0])\n pos = last_piece[::-1].find('.')\n elements.append(last_piece[:-pos-1])\n loaded_res={}\n for elem in elements:\n try:\n loaded_res[elem] = np.load(path+save_folder+elem+'.npy')\n except:\n continue #some experiments have pickled data - to handle later\n return loaded_res"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a line, solve for x when y is specified | def find_x_given_y(line, y):
dx = line[0][2] - line[0][0]
dy = line[0][3] - line[0][1]
return np.round(np.array([line[0][0] + (y - line[0][1])*dx/dy, y]))#.astype(np.uint16) | [
"def _solve_line(cls, line, hints):\n line_try = list(line)\n occupied_cnt = sum(1 for tile in line if tile == cls.TILE_OCCUPIED)\n hints_cnt = sum(hints)\n if hints_cnt == occupied_cnt:\n return None\n tries = {\n idx: (cls.TILE_EMPTY, cls.TILE_OCCUPIED)\n for idx, tile in enumerate(line)\n if tile == cls.TILE_UNKNOWN\n }\n intersection = None\n for one_try in itertools.product(*tries.values()):\n new_cnt = sum(1 for tile in one_try if tile == cls.TILE_OCCUPIED)\n if occupied_cnt + new_cnt != hints_cnt:\n continue\n for idx, tile in zip(tries.keys(), one_try):\n line_try[idx] = tile\n if cls._get_hint(line_try) == hints:\n if intersection is None:\n intersection = one_try\n else:\n intersection = tuple(\n i_tile if i_tile == t_tile else cls.TILE_UNKNOWN\n for i_tile, t_tile in zip(intersection, one_try)\n )\n if intersection is None:\n return None\n if all(tile == cls.TILE_UNKNOWN for tile in intersection):\n return None\n else:\n for idx, tile in zip(tries.keys(), intersection):\n line_try[idx] = tile\n return line_try",
"def best_origin(a, b, lineseg, expr):\n a1, b1 = lineseg.points[0]\n\n def x_axis_cut(ls):\n \"\"\"Returns the point where the input line segment\n intersects the x-axis.\n\n Parameters\n ==========\n\n ls :\n Line segment\n \"\"\"\n p, q = ls.points\n if p.y.is_zero:\n return tuple(p)\n elif q.y.is_zero:\n return tuple(q)\n elif p.y/q.y < S.Zero:\n return p.y * (p.x - q.x)/(q.y - p.y) + p.x, S.Zero\n else:\n return ()\n\n def y_axis_cut(ls):\n \"\"\"Returns the point where the input line segment\n intersects the y-axis.\n\n Parameters\n ==========\n\n ls :\n Line segment\n \"\"\"\n p, q = ls.points\n if p.x.is_zero:\n return tuple(p)\n elif q.x.is_zero:\n return tuple(q)\n elif p.x/q.x < S.Zero:\n return S.Zero, p.x * (p.y - q.y)/(q.x - p.x) + p.y\n else:\n return ()\n\n gens = (x, y)\n power_gens = {}\n\n for i in gens:\n power_gens[i] = S.Zero\n\n if len(gens) > 1:\n # Special case for vertical and horizontal lines\n if len(gens) == 2:\n if a[0] == 0:\n if y_axis_cut(lineseg):\n return S.Zero, b/a[1]\n else:\n return a1, b1\n elif a[1] == 0:\n if x_axis_cut(lineseg):\n return b/a[0], S.Zero\n else:\n return a1, b1\n\n if isinstance(expr, Expr): # Find the sum total of power of each\n if expr.is_Add: # generator and store in a dictionary.\n for monomial in expr.args:\n if monomial.is_Pow:\n if monomial.args[0] in gens:\n power_gens[monomial.args[0]] += monomial.args[1]\n else:\n for univariate in monomial.args:\n term_type = len(univariate.args)\n if term_type == 0 and univariate in gens:\n power_gens[univariate] += 1\n elif term_type == 2 and univariate.args[0] in gens:\n power_gens[univariate.args[0]] +=\\\n univariate.args[1]\n elif expr.is_Mul:\n for term in expr.args:\n term_type = len(term.args)\n if term_type == 0 and term in gens:\n power_gens[term] += 1\n elif term_type == 2 and term.args[0] in gens:\n power_gens[term.args[0]] += term.args[1]\n elif expr.is_Pow:\n power_gens[expr.args[0]] = expr.args[1]\n elif expr.is_Symbol:\n power_gens[expr] += 1\n else: # If `expr` is a constant take first vertex of the line segment.\n return a1, b1\n\n # TODO : This part is quite hacky. Should be made more robust with\n # TODO : respect to symbol names and scalable w.r.t higher dimensions.\n power_gens = sorted(power_gens.items(), key=lambda k: str(k[0]))\n if power_gens[0][1] >= power_gens[1][1]:\n if y_axis_cut(lineseg):\n x0 = (S.Zero, b / a[1])\n elif x_axis_cut(lineseg):\n x0 = (b / a[0], S.Zero)\n else:\n x0 = (a1, b1)\n else:\n if x_axis_cut(lineseg):\n x0 = (b/a[0], S.Zero)\n elif y_axis_cut(lineseg):\n x0 = (S.Zero, b/a[1])\n else:\n x0 = (a1, b1)\n else:\n x0 = (b/a[0])\n return x0",
"def closest_point_on_line(line, point):\n a, b, c = line\n slope = a / -b # Slope for the given line.\n\n x, y = point\n\n slope2 = 1.0 / -slope # Slope for a perpendicular line.\n c2 = y - slope2 * x # y intercept for a perpendicular line through point.\n\n A = np.array([\n a, b,\n slope2, -1.0\n ]).reshape(2, 2)\n\n b = np.array([\n -c, -c2\n ]).reshape(2, 1)\n\n res = linalg.solve(A, b)\n\n return res.flatten()",
"def piecewise_linear(x_data: Sequence[Num], y_data: Sequence[Num], x_to_find: Num) -> float:\n\n plt.plot(x_data, y_data)\n\n # This does the piece wise interopolation\n for i, x in enumerate(x_data):\n if x_data[i] <= x_to_find < x_data[i+1]:\n y_found = y_data[i] + (y_data[i+1] - y_data[i])/(x_data[i+1] - x_data[i])*(x_to_find - x_data[i])\n\n plt.plot(x_to_find, y_found, 'r+')\n plt.title(\"Piece-Wise Interpolation\")\n plt.show()\n\n print(f\"Minimum y-value: {min(y_data)}\")\n print(f\"Corresponding x-value: {x_data[y_data.index(min(y_data))]}\")\n\n return y_found",
"def solveLine(self,line):\n\t\tmissing = filter(lambda x: x in line, self.board.vals)\n\t\tmissing = [val for val in self.board.vals if val not in line]\n\t\tif len(missing) == 1:\n\t\t\tpos = line.index(None)\n\t\t\tline[pos] = missing[0]\n\t\t\treturn (line, (pos, missing[0]))\n\t\treturn None",
"def nearest_point_on_line(point, line): \n return line.interpolate(line.project(point))",
"def fitLeastSquareLine(x, y):\n\n return np.polyfit(x, y, 1)",
"def fit_straight_line(x, y, y_err=None):\n\n popt, cov = curve_fit(line, x, y, sigma=y_err)\n m = popt[0]\n c = popt[1]\n return m,c, cov",
"def solve(self,y,a=None,b=None):\n if a==None: a=self.x[0]\n if b==None: b=self.x[-1]\n assert a<b\n return brentq(lambda x:self(x)-y,a=a,b=b)",
"def line_intersection(line1, line2):\n (x1,y1), (x2,y2) = line1\n (u1,v1), (u2,v2) = line2\n (a,b), (c,d) = (x2-x1, u1-u2), (y2-y1, v1-v2)\n e, f = u1-x1, v1-y1\n # Solve ((a,b), (c,d)) * (t,s) = (e,f)\n denom = float(a*d - b*c)\n if MathHelper.near(denom, 0):\n # parallel\n # If collinear, the equation is solvable with t = 0.\n # When t=0, s would have to equal e/b and f/d\n if MathHelper.near(float(e)/b, float(f)/d):\n # collinear\n px = x1\n py = y1\n else:\n return None\n else:\n t = (e*d - b*f)/denom\n # s = (a*f - e*c)/denom\n px = x1 + t*(x2-x1)\n py = y1 + t*(y2-y1)\n return px, py",
"def solve_lineax(\n lin: Callable,\n b: jnp.ndarray,\n lin_t: Optional[Callable] = None,\n symmetric: bool = False,\n nonsym_solver: Optional[lx.AbstractLinearSolver] = None,\n **kwargs: Any\n) -> jnp.ndarray:\n input_structure = jax.eval_shape(lambda: b)\n kwargs.setdefault(\"rtol\", 1e-6)\n kwargs.setdefault(\"atol\", 1e-6)\n if symmetric:\n solver = lx.CG(**kwargs)\n fn_operator = lx.FunctionLinearOperator(\n lin, input_structure, tags=lx.positive_semidefinite_tag\n )\n return lx.linear_solve(fn_operator, b, solver).value\n # In the non-symmetric case, use NormalCG by default, but consider\n # user defined choice of alternative lx solver.\n solver_type = lx.NormalCG if nonsym_solver is None else nonsym_solver\n solver = solver_type(**kwargs)\n fn_operator = CustomTransposeLinearOperator(\n lin, lin_t, input_structure, input_structure\n )\n return lx.linear_solve(fn_operator, b, solver).value",
"def find_y(self, x): \n return self.m*x + self.b",
"def get_line_coefficients(line: Line) -> Optional[tuple[float, float]]:\n (x1, y1), (x2, y2) = line\n\n # Check for vertical line.\n if x2 == x1:\n return None\n\n a = (y2 - y1) / (x2 - x1)\n b = -x1 * (y2 - y1) / (x2 - x1) + y1\n\n return a, b",
"def intersection(line1, line2):\n a = array([[line2[2], -line1[2]],\n [line2[3], -line1[3]]])\n b = array([[line1[0] - line2[0]],\n [line1[1] - line2[1]]])\n co = solve(a, b)\n\n x = line2[0] + co[0][0] * line2[2]\n y = line2[1] + co[0][0] * line2[3]\n return x, y",
"def distPointToLine(point, line):\n\n [xp, yp] = point\n [a, c] = line\n b = -1\n\n return abs((a*xp + b*yp + c) / np.linalg.norm([a, b]))",
"def findVanishingPoint(self, point_lines):\n lns = []\n\n for i in range(0, len(point_lines), 2):\n lns.append(self.__computeLineNormal(point_lines[i][None, :], point_lines[i + 1][None, :]))\n\n # ln1 = self.__computeLineNormal(point_line1[0, None], point_line1[1, None])\n # ln2 = self.__computeLineNormal(point_line2[0, None], point_line2[1, None])\n # ln3 = self.__computeLineNormal(point_line3[0, None], point_line3[1, None])\n\n # ln1 = ln1 / ln1[:, -1]\n # ln2 = ln2 / ln2[:, -1]\n # ln3 = ln3 / ln3[:, -1]\n\n lns = np.vstack(lns)\n # lns = lns / lns[:, -1]\n\n # A = np.vstack((ln1, ln2, ln3))\n A = lns\n vp = la.solve(np.dot(A[:, 0:2].T, A[:, 0:2]), np.dot(A[:, 0:2].T, -A[:, -1]))\n\n return np.vstack((vp[:, None], 1)).T",
"def func_from_line(a: tuple, b: tuple) -> Callable[[int], int]:\n def f(x):\n \"\"\" the line function y = f(x)\"\"\"\n return a[1] + (b[1]-a[1])/(b[0]-a[0])*x - (b[1]-a[1])/(b[0]-a[0])*a[0]\n return f",
"def fp_fit(x, y):\n\tequation = pyeq2.Models_2D.Sigmoidal.FourParameterLogistic()\n\n\tdata = \"\\n\".join(\"{} {}\".format(x1, y1) for x1, y1 in zip(x, y))\n\t\n\tequation.upperCoefficientBounds = [0.25, -0.1, None, 1.25]\n\tequation.lowerCoefficientBounds = [-0.25, None, 0, 0.75]\n\t\n\tpyeq2.dataConvertorService().ConvertAndSortColumnarASCII(data, equation, False)\n\tequation.Solve()\n\t\n\treturn equation.solvedCoefficients, equation.CalculateAllDataFittingTarget(equation.solvedCoefficients)",
"def find_intersection(line_1, line_2):\n det = line_1['a'] * line_2['b'] - line_2['a'] * line_1['b']\n # Lines are parallel if the determinant is zero\n if det == 0:\n return None\n x = (line_2['b'] * line_1['c'] - line_1['b'] * line_2['c']) / float(det)\n y = (line_1['a'] * line_2['c'] - line_2['a'] * line_1['c']) / float(det)\n return (x, y)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extend ``line`` to the bottom of the masked image | def extend_to_bottom(line, y_bottom):
# find the points on the bottom and top
xy_bottom = find_x_given_y(line, y_bottom)
if line[0][1] > line[0][3]:
return np.array([[xy_bottom[0], xy_bottom[1], line[0][2], line[0][3]]])
else:
return np.array([[xy_bottom[0], xy_bottom[1], line[0][0], line[0][1]]]) | [
"def extend_line(line,shape=[640,480],plot=False):\n start=line[0]\n end=line[1]\n dxs,dys=shape[0]-start[0],shape[1]-start[1] #offsets from origin\n deltax=np.float(end[0])-np.float(start[0])\n deltay=np.float(end[1])-np.float(start[1])\n if deltax == 0.0:\n slope = 90.\n else:\n slope = deltay/deltax #*-1 ?\n #make a line with this slope, passing through start and end, that extends over the whole frame. Get endpoints...\n #if dxs >= shape[0]/2 and dys <=shape[1]/2: #look closer to bottom right corner...assume all slopes are +-45 degrees\n xvec=np.arange(0,shape[0],1)\n #x2=np.arange(int(xvec),shape[0],1)\n y2=slope*(xvec - np.float(start[0])) +np.float(start[1])\n #else:\n # x2=np.arange(0,int(np.float(start[0])+np.float(dxs)/np.sqrt(2.)+3),1)\n # y2=slope*(x2 - np.float(start[0])) +np.float(start[1])\n\n #now get endpoints for parts of the line that are within the frame - need to re-do limit on y!\n if y2[0] < y2[-1]:\n xi=np.where(y2 >= 0.)[0][0]\n try:\n xf=np.where(y2 >=shape[1]-1)[0][0]\n except IndexError:\n xf = np.where(y2==y2[-1])[0][0]\n else:\n xf=np.where(y2 >= 0.)[0][-1]\n try:\n xi=np.where(y2 >=shape[1]-1)[0][-1]\n except IndexError:\n xi = np.where(y2==y2[0])[0][0]\n\n extended_line=(int(xi),int(y2[xi])),(int(xf),int(y2[xf]))\n #slopeE=float(int(y2[xf])-int(y2[xi]))/float(int(xf)-int(xi))\n #print slope,slopeE\n if plot:\n s1=extended_line[0]\n e1=extended_line[1]\n fig,ax=plt.subplots()\n ax.plot((start[0],end[0]),(start[1],end[1]))\n ax.plot((s1[0],e1[0]),(s1[1],e1[1]),'r--')\n fig.show()\n\n return extended_line#,xvec,y2",
"def replot_line(self):\n # restore the clean slate background\n self.canvas.restore_region(self.background)\n # just draw the animated artist\n self.axes.draw_artist(self.line)\n # just redraw the axes rectangle\n self.canvas.draw()#blit(self.axes.bbox)",
"def extract_masked(image, linedesc, pad=5, expand=0, background=None):\n assert amin(image) >= 0 and amax(image) <= 1\n if background is None:\n background = amin(image)\n y0,x0,y1,x1 = [int(x) for x in [linedesc.bounds[0].start,linedesc.bounds[1].start, \\\n linedesc.bounds[0].stop,linedesc.bounds[1].stop]]\n if pad > 0:\n mask = pad_image(linedesc.mask, pad, cval=0)\n else:\n mask = linedesc.mask\n line = extract(image, y0 - pad, x0 - pad, y1 + pad, x1 + pad)\n if expand > 0:\n mask = filters.maximum_filter(mask, (expand, expand))\n line = where(mask, line, background)\n return line",
"def draw_btm_line(list_of_psb_chr, img_thresh):\n if len(list_of_psb_chr):\n btms = [x.pos_y + x.height for x in list_of_psb_chr]\n btm_clear_line = np.min([int(np.mean(btms)) + 1, img_thresh.shape[0] - 1])\n img_thresh[btm_clear_line, :] = 0\n\n return img_thresh",
"def add_region(mask, poly_line):\n\n c, r = masked_points(poly_line, mask.shape)\n mask[r, c] = 1\n\n return mask",
"def set_line(self, y : int, value : bytearray):\n lineStart, lineEnd = self.find_line(y)\n \n if len(value) != self.lineByteLength - 1:\n raise ValueError(f'Lines must be exactly {self.lineByteLength - 1} bytes long for this PNG !')\n \n self.data[lineStart + 1:lineEnd] = value",
"def __fall_back(self, line):\n line.allx = line.prev_x\n line.ally = line.prev_y\n line.detected = False\n line.position = line.prev_position\n line.detection_counter += 1",
"def add(self, line):\n\n r, theta = line_helper.get_r_and_theta(line)\n low, high = line_helper.get_low_point_and_high_point(line)\n\n if low[1] < self.ymin:\n self.ymin = low[1]\n self.low_point = low\n if high[1] > self.ymax:\n self.ymin = high[1]\n self.high_point = high\n\n self.radii.append(r)\n self.thetas.append(theta)\n\n self.update_means()",
"def draw_line_mask(canvas, x1, y1, x2, y2, colour, threshold):\n dx = x2 - x1\n \"\"\"\n if not dx:\n # Vertical line\n draw_line((x1, y1, x2, y2), fill=col, width=1)\n return\n \"\"\"\n\n dy = y2 - y1\n steep = abs(dx) < abs(dy)\n if steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n dx, dy = dy, dx\n if x2 < x1:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n try:\n gradient = float(dy) / float(dx)\n except ZeroDivisionError:\n gradient = 1.0\n\n # Handle first endpoint\n xend = round(x1)\n yend = y1 + gradient * (xend - x1)\n xgap = rfpart(x1 + 0.5)\n xpxl1 = xend # this will be used in the main loop\n ypxl1 = ipart(yend)\n plot(canvas, xpxl1, ypxl1, steep, colour, rfpart(yend) * xgap, threshold)\n plot(canvas, xpxl1, ypxl1 + 1, steep, colour, fpart(yend) * xgap, threshold)\n intery = yend + gradient # first y-intersection for the main loop\n\n # handle second endpoint\n xend = round(x2)\n yend = y2 + gradient * (xend - x2)\n xgap = fpart(x2 + 0.5)\n xpxl2 = xend # this will be used in the main loop\n ypxl2 = ipart(yend)\n plot(canvas, xpxl2, ypxl2, steep, colour, rfpart(yend) * xgap, threshold)\n plot(canvas, xpxl2, ypxl2 + 1, steep, colour, fpart(yend) * xgap, threshold)\n\n # main loop\n for x in range(int(xpxl1 + 1), int(xpxl2)):\n plot(canvas, x, ipart(intery), steep, colour, rfpart(intery), threshold)\n plot(canvas, x, ipart(intery) + 1, steep, colour, fpart(intery), threshold)\n intery = intery + gradient",
"def line_draw(image):\n img = image.copy()\n \n #read in background for paper appearance\n paper = cv2.imread(\"ink-paper.jpg\", cv2.IMREAD_COLOR)\n\n paper = cv2.resize(paper, (img.shape[1], img.shape[0]))\n\n img = cv2.medianBlur(img, 5)\n edges = cv2.Canny(img, 100 , 125)\n\n c_img, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_NONE)\n \n #iterate through each contour found in the image\n for c in contours:\n #draw contours on image. Can vary intensity of lines\n #c_img = cv2.drawContours(c_img, c, -1, (125,125,0), 4)\n c_img = cv2.drawContours(c_img, c, -1, (255,255,255), 2) \n \n #Invert the line drawing\n c_img = 255 - c_img\n c_img = cv2.cvtColor(c_img, cv2.COLOR_GRAY2BGR)\n\n c_img_blur = cv2.blur(c_img, (5,5))\n \n #convert to BGR to enable adding\n edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\n \n edges = np.uint8(edges) \n c_img_blur = np.uint8(c_img_blur)\n \n #add blurred and contoured to paper to create an overlay/blend\n output = cv2.addWeighted(c_img_blur, .35, paper, .65, 0)\n output = np.uint8(output)\n \n return output",
"def line_drawing(image, inverse_image=True):\n threshold = 7\n block_size = 4\n image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n # Changing last value higher makes lighter, but weird ,changing second to last value makes lines stronger\n if inverse_image:\n image = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, threshold, block_size)\n else:\n image = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, threshold, block_size)\n # cv.GaussianBlur(frame, (5, 5), -1)\n # image = cv2.medianBlur(image, 3)\n\n return image",
"def prepare_line(line,pad=16):\n line = line * 1.0/np.amax(line)\n line = np.amax(line)-line\n line = line.T\n if pad>0:\n w = line.shape[1]\n line = np.vstack([np.zeros((pad,w)),line,np.zeros((pad,w))])\n return line",
"def shorten_line(line: Line, intersections: list[Matchstick], gw: GameWindow) -> Line:\n # Get the smallest and largest x coordinates of the intersected sticks\n smallest_stick_x = get_min_x(intersections)\n largest_stick_x = get_max_x(intersections)\n\n # All the sticks are on the same row, so they all have the same y coordinates\n y_low = intersections[0].v_pos - gw.stick_length / 2\n y_high = intersections[0].v_pos + gw.stick_length / 2\n\n # Adjust the x and y coordinates\n new_line = chop_y(line, y_low, y_high)\n new_line = chop_x(new_line, smallest_stick_x - gw.h_spacing/3, largest_stick_x + gw.h_spacing/3)\n\n return new_line",
"def test_lineExtension(self):\n\n angle = Angle()\n\n while angle.degrees < 360.0:\n start = PositionValue2D(0.0, 0.0, 0.1, 0.1)\n end = PositionValue2D(2.0, 0.0, 0.1, 0.1)\n end.rotate(angle, start)\n\n line = LineSegment2D(start, end)\n\n line.postExtendLine(2.0)\n self.assertAlmostEqual(line.length.raw, 4.0, 4, '%s' % line.echo(asciiLabel=True))\n\n line.preExtendLine(2.0)\n self.assertAlmostEqual(line.length.raw, 6.0, 4, '%s' % line.echo(asciiLabel=True))\n\n angle.degrees += 9.0",
"def line_height(self, value: LineHeight) -> 'Tailwind':\n self.element.classes('leading-' + value)\n return self",
"def set_line_image_data(image, line_id, image_file_name, image_fh):\n\n base_name = os.path.splitext(os.path.basename(image_file_name))[0]\n line_id = '_' + line_id.zfill(4)\n line_image_file_name = base_name + line_id + '.png'\n image_path = os.path.join(args.out_dir, line_image_file_name)\n imgray = image.convert('L')\n imgray_rev_arr = np.fliplr(imgray)\n imgray_rev = toimage(imgray_rev_arr)\n imgray_rev.save(image_path)\n image_fh.write(image_path + '\\n')",
"def create_final_line(self):\n p = Image(\n c.screen_width - 100,\n 0,\n 100,\n c.screen_height,\n 'images/finish_line.png')\n self.finish_line = p\n self.objects.append(p)",
"def render_gap(img_width, line: Line, gap: Gap) -> Image:\n img = Image.new(\"L\", (img_width, img_width), color=0)\n draw = ImageDraw.Draw(img)\n\n gap_start, gap_end = gap.coords_on_line(line)\n width = line.width + 2\n draw.line([*gap_start, *gap_end], width=width, fill=255)\n\n return img",
"def draw_line(self, index, a, b, c, d):\n for mask_index in range(min(self.mask_count + 1, self.trail_size)):\n cv2.line(self.masks[mask_index], (a,b),(c,d), \\\n self.color[index].tolist(), 2, lineType=cv2.CV_AA)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a new molecule to the molecule database | def __add__(self, molecule):
if not isinstance(molecule, Molecule):
raise ValueError('The passed molecule is not a Molecule object')
self.__list.append(molecule) | [
"def add_calculation(self, molecule, method, basis, cp, tag, optimized):\n\n # check if this model is not already in Models table\n if not self.cursor.execute(\"SELECT EXISTS(SELECT * FROM Models WHERE method=? AND basis=? AND cp=?)\", (method, basis, cp)).fetchone()[0]:\n\n # create entry in Models table\n self.cursor.execute(\"INSERT INTO Models (method, basis, cp) VALUES (?, ?, ?)\", (method, basis, cp))\n \n # get id of this model\n model_id = self.cursor.lastrowid\n\n else:\n\n # get id of this model\n model_id = self.cursor.execute(\"SELECT ROWID FROM Models WHERE method=? AND basis=? AND cp=?\", (method, basis, cp)).fetchone()[0]\n\n # get the SHA1 hash of this molecule\n molecule_hash = molecule.get_SHA1()\n\n # check if this molecule is not already in Molecules table\n if not self.cursor.execute(\"SELECT EXISTS(SELECT * FROM Molecules WHERE name=? AND hash=?)\", (molecule.get_name(), molecule_hash)).fetchone()[0]:\n\n # create entry in Molecules table\n self.cursor.execute(\"INSERT INTO Molecules (name, hash) VALUES (?, ?)\", (molecule.get_name(), molecule_hash))\n \n # get id of this molecule\n molecule_id = self.cursor.lastrowid\n\n # insert molecule's fragments into the table\n for fragment in molecule.get_fragments():\n self.cursor.execute(\"INSERT INTO Fragments (molecule_id, name, charge, spin) VALUES (?, ?, ?, ?)\", (molecule_id, fragment.get_name(), fragment.get_charge(), fragment.get_spin_multiplicity()))\n \n # get id of this fragment\n fragment_id = self.cursor.lastrowid\n\n # insert fragment's atoms into the table\n for atom in fragment.get_atoms():\n self.cursor.execute(\"INSERT INTO Atoms (fragment_id, symbol, symmetry_class, x, y, z) VALUES (?, ?, ?, ?, ?, ?)\", (fragment_id, atom.get_name(), atom.get_symmetry_class(), atom.get_x(), atom.get_y(), atom.get_z())) \n \n else:\n \n # get id of this molecule\n molecule_id = self.cursor.execute(\"SELECT ROWID FROM Molecules WHERE name=? AND hash=?\", (molecule.get_name(), molecule_hash)).fetchone()[0]\n\n # check if the calculation is not already in the Calculations table\n\n if not self.cursor.execute(\"SELECT EXISTS(SELECT * FROM Calculations WHERE molecule_id=? AND model_id=? AND tag=? AND optimized=?)\", (molecule_id, model_id, tag, optimized)).fetchone()[0]:\n \n # create entry in Calculations table\n self.cursor.execute(\"INSERT INTO Calculations (molecule_id, model_id, tag, optimized) VALUES (?, ?, ?, ?)\", (molecule_id, model_id, tag, optimized))\n\n # get id of this calculation\n calculation_id = self.cursor.lastrowid\n\n # add rows to the Energies table\n for energy_index in range(number_of_energies(molecule.get_num_fragments(), cp)):\n # create a job for this energy\n self.cursor.execute(\"INSERT INTO Jobs (status) VALUES (?)\", (\"pending\",))\n\n # get the id of this job\n job_id = self.cursor.lastrowid\n \n # insert row into Energies table for this energy\n self.cursor.execute(\"INSERT INTO Energies (calculation_id, energy_index, job_id) VALUES (?, ?, ?)\", (calculation_id, energy_index, job_id))",
"def load_molecule(i):\n \n with open('/misc/vlgscratch4/BrunaGroup/sulem/chem/data/molecules/dataset.pickle','rb') as file :\n molecules = pickle.load(file)\n \n mol = [pre.molecule_to_instance(molecules[i])]\n \n dir_path = '/misc/vlgscratch4/BrunaGroup/sulem/chem/data/tensors'\n file = 'molecule' + str(i) + '.pickle'\n file_path = join(dir_path, file)\n \n with open(file_path,'wb') as fileout:\n pickle.dump(mol,fileout)\n \n return mol",
"def _molecules(self, line, lineno=0):\n # we need to keep the order here so cannot make it a dict\n # also mol names do not need to be unique\n name, n_mol = line.split()\n self.molecules.append((name, n_mol))",
"def new_compound(self, compound, reference, session):\n # Generate RDKit molecule with precomputed data\n calc_compound = Compound(compound.smiles, name=compound.name)\n \n # Prepare necessary data\n curation_data = atlasdb.CurationData(\n file_name=\"DB{:05d}\".format(self.dataset_id)\n )\n name = self.get_compound_name(compound.name, session)\n origin = self.get_origin(compound, session)\n db_compound = atlasdb.Compound(\n inchikey=calc_compound.inchikey,\n inchi=calc_compound.inchi,\n molecular_formula=calc_compound.formula,\n molecular_weight=calc_compound.mass,\n accurate_mass=calc_compound.accurate_mass,\n m_plus_H=calc_compound.m_plus_h,\n m_plus_Na=calc_compound.m_plus_na,\n smiles=calc_compound.smiles,\n molblock=calc_compound.molblock,\n curation_data=curation_data\n )\n # Add external database ids \n for k, v in {\"pubchem_id\": 1, \"berdy_id\": 4, \"mibig_id\": 5}.items():\n id_ = getattr(compound, k)\n if id_:\n db_compound.db_ids.append(\n atlasdb.ExternalDB(db_code=id_, db_id=v)\n )\n session.add(db_compound)\n\n self.associate_compound_name(db_compound, name, reference, session, new=True)\n self.associate_compound_origin(db_compound, origin, reference, session, new=True)",
"def add():\n updateDB()\n node = getSelectedNode()\n if node != None:\n if node.type().definition() is None:\n hou.ui.displayMessage(\"Not a Digital Asset.\")\n else:\n libraryPath = node.type().definition().libraryFilePath()\n filename = os.path.basename(libraryPath)\n info = getFileInfo(filename)\n if info == None:\n saveOTL(node)\n moveToOtlDir(node, filename)\n addOTL(filename)\n hou.ui.displayMessage(\"Add Successful!\")\n else:\n hou.ui.displayMessage(\"Already Added\")\n else:\n hou.ui.displayMessage(\"Select EXACTLY one node.\")",
"def add_compound(self):\n # Increment through all added compounds\n for insert in self._args.add:\n well, compound = insert[0], insert[1]\n\n # Check if well is already taken\n if well in self.plate.wells:\n warning(f'{well} already has a compound in it and will be skipped.\\n'\n f'To delete the contents of this well use \"kaleido well {well} --delete\"\\n')\n continue\n # Assumption: Cannot add a compound unless it is registered\n # Check to see if the compound is registered first\n exist = exists(compound, self.compounds)\n if not exist or (exist and self.compounds[compound]['state'] == 'stored'):\n warning(f'{compound} is not registered and will be skipped.\\n'\n f'To register this compound use \"kaleido compound {compound} --register\"\\n')\n continue\n\n # Add compound to well\n if not self.plate.add_comp(well, compound):\n continue\n\n self.compounds[compound]['plate.well'].append(f'{self.plate._id}.{well}')\n print(f'Successfully added {compound} to {self.plate._id}.{well}\\n')\n\n self.plates[self.plate._id] = self.plate.__todict__()\n # Write the plate to a file\n write_file(self._args.plate_file, self.plates)\n write_file(self._args.comp_file, self.compounds)\n display(self.plate)",
"def add_database_entry(self, row):\n database = self.open_database_ab()\n pickle.dump(row, database)\n self.close_database(database)",
"def test_set_molecules(self):\n\n descriptor_engine = Descriptors()\n assert descriptor_engine.Molecule == None\n\n descriptor_engine.set_molecule('c1ccccc1')\n isinstance(descriptor_engine.Molecule, Chem.rdchem.Mol)\n\n return",
"def addOeMol(self, ccId, oeMol, missingModelXyz=True, writeIdealXyz=False, skipAnnotations=False):\n ccIdU = str(ccId).strip().upper()\n curContainer = DataContainer(ccIdU)\n #\n rowD = self.__makeChemCompCategory(ccIdU, oeMol, site=\"RCSB\", missingModelXyz=missingModelXyz, skipAnnotations=skipAnnotations)\n aCat = DataCategory(\"chem_comp\", list(rowD.keys()), [rowD])\n curContainer.append(aCat)\n #\n rowDL = self.__makeChemCompAtomCategory(ccIdU, oeMol, writeIdealXyz=writeIdealXyz)\n aCat = DataCategory(\"chem_comp_atom\", list(rowDL[0].keys()), rowDL)\n curContainer.append(aCat)\n #\n rowDL = self.__makeChemCompBondCategory(ccIdU, oeMol)\n if rowDL:\n aCat = DataCategory(\"chem_comp_bond\", list(rowDL[0].keys()), rowDL)\n curContainer.append(aCat)\n #\n if not skipAnnotations:\n rowDL = self.__makeChemCompDescriptorCategory(ccIdU, oeMol)\n aCat = DataCategory(\"pdbx_chem_comp_descriptor\", list(rowDL[0].keys()), rowDL)\n curContainer.append(aCat)\n #\n rowDL = self.__makeChemCompIdentifierCategory(ccIdU, oeMol)\n aCat = DataCategory(\"pdbx_chem_comp_identifier\", list(rowDL[0].keys()), rowDL)\n curContainer.append(aCat)\n #\n rowD = self.__makeChemCompAuditRow(ccIdU)\n aCat = DataCategory(\"pdbx_chem_comp_audit\", list(rowD.keys()), [rowD])\n curContainer.append(aCat)\n #\n self.__containerList.append(curContainer)\n return True",
"def add_material_to_media(media, material):\n\n media.materials.append(material)\n db.session.commit()",
"def add_genome(self, genome):\n assert isinstance(genome, Genome)\n assert type(self.genomes) is list\n\n self.genomes.append(genome)",
"def add_recipe_to_media(media, recipe):\n\n media.recipes.append(recipe)\n db.session.commit()",
"def add_to_database(item):\n\n db.session.add(item)\n db.session.commit()",
"def _insertion_command(cls) -> str:\n return sql.Metacard.insert()",
"def addStarSystem(starSystem):\n\ttry:\n\t\tstarSystem.save()\n\texcept Exception as e:\n\t\tprint(e)\n\t\tpass #return False?",
"def add(self, ocd):\n # individual\n self.opcodes.append(ocd)\n # cluster\n cluster = ocd.opcode.strip().upper()\n self.opcluster.setdefault(cluster, [])\n self.opcluster[cluster].append(ocd)",
"def add_to_db(fname):\n return",
"def add_celestial(self, celestial):\n celestial.universe = self\n self.celestials[celestial.uuid] = celestial",
"def add_item(self, path_, mtime_, hash_, mime_, read_size_, file_size_):\n cur = self.conn.cursor()\n cur.execute('''SELECT i_id FROM %s WHERE path=\"%s\" ''' % (self.name, path_))\n for i in cur:\n return False\n cur.execute('''INSERT INTO %s (i_id, path, mtime, hash, mime, read_size, file_size)\n VALUES (NULL, \"%s\", %f, '%s','%s',%d,%d) ''' % (self.name, path_, mtime_, hash_, mime_, read_size_, file_size_))\n self.conn.commit()\n cur.close()\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read in all the mol2 or SDF files Returns | def read_molecule_files(self):
# This list is used as container to handle all the molecules read in by using RdKit.
# All the molecules are instances of Molecule class
molid_list = []
# List of molecule that failed to load in
mol_error_list_fn = []
logging.info(30 * '-')
# The .mol2 and .sdf file formats are the only supported so far
mol_fnames = glob.glob(self.options.directory + "/*.mol2")
mol_fnames += glob.glob(self.options.directory + "/*.sdf")
mol_fnames.sort()
if len(mol_fnames) < 2:
raise IOError('The directory %s must contain at least two mol2/sdf files' % self.options.directory)
print_cnt = 0
mol_id_cnt = 0
for fname in mol_fnames:
# The RDkit molecule object reads in as mol2/sdf file. The molecule is not sanitized and
# all the hydrogens are kept in place - we are assuming 3D input, correctly charged
# and prepared in the protein active site
if fname.endswith(".mol2"):
rdkit_mol = Chem.MolFromMol2File(fname, sanitize=False, removeHs=False)
else:
rdkit_mol = Chem.MolFromMolFile(fname, sanitize=False, removeHs=False)
# Reading problems
if rdkit_mol == None:
logging.warning('Error reading the file: %s' % os.path.basename(fname))
mol_error_list_fn.append(os.path.basename(fname))
continue
# The Rdkit molecule is stored in a Molecule object
mol = Molecule(rdkit_mol, mol_id_cnt, os.path.basename(fname))
mol_id_cnt += 1
# Cosmetic printing and status
if print_cnt < 15 or print_cnt == (len(mol_fnames) - 1):
logging.info('ID %s\t%s' % (mol.getID(), os.path.basename(fname)))
if print_cnt == 15:
logging.info('ID %s\t%s' % (mol.getID(), os.path.basename(fname)))
logging.info(3 * '\t.\t.\n')
print_cnt += 1
molid_list.append(mol)
logging.info(30 * '-')
logging.info('Finish reading input files. %d structures in total....skipped %d\n' % (
len(molid_list), len(mol_error_list_fn)))
if mol_error_list_fn:
logging.warning('Skipped molecules:')
logging.warning(30 * '-')
for fn in mol_error_list_fn:
logging.warning('%s' % fn)
print(30 * '-')
return molid_list | [
"def readin(self):\n \n if self.filename.endswith('.fits'):\n # Assumes Science Verification data\n self.read_SV_fits()\n elif self.filename.endswith('.npz'): \n # Assumes DES Y3 Gold data\n self.read_Y3_2_2_npz()\n else: \n print('Unrecognized file type: ' + self.filename)",
"def ReadMolFromSDF(filename=\"\"):\n molset = Chem.SDMolSupplier(filename)\n return molset",
"def readmol2(self,filename,tag=''):\n self.tag=tag\n self.filename = filename\n data = open(self.filename).read()\n # ATOM section\n start = data.find(\"@<TRIPOS>ATOM\")\n stop = data.find(\"@<TRIPOS>BOND\")\n atoms = data[start+14:stop-2].split(\"\\n\")\n # BOND section\n start = data.find(\"@<TRIPOS>BOND\")\n stop = data.find(\"@<TRIPOS>SUBSTRUCTURE\")\n bonds = data[start+14:stop-1].split(\"\\n\")\n self.parse_mol2lines(atoms)\n #self.parseBonds(bonds)\n #self.createlBondedAtoms()\n return",
"def read_all_raw_files():\n pass",
"def read_mol2(filepath_or_buffer=..., usecols=..., molecule_column=..., molecule_name_column=..., smiles_column=..., skip_bad_mols=..., chunksize=..., **kwargs): # -> Generator[ChemDataFrame, None, None] | ChemDataFrame:\n ...",
"def read_everything():\n\n ### Paths to the fullsed, source and temperature files:\n fullsed_path = '../OldBeAtlas/fullsed_v2/'\n #fullsed_path = '../OldBeAtlas/fullsed/'\n source_path = '../OldBeAtlas/source/'\n temps_path = '../OldBeAtlas/temperatures/'\n\n ### assumed distance [parsecs] for the calculations\n dist_std = 10.\n\n\n ###########################\n \n ### The domain of the power-law grid:\n npar, sigpar, Mpar, obpar, cosipar = domain_PLgrid()\n filepars=[npar,sigpar,Mpar,obpar]\n\n print(\"Reading the OldBeAtlas files...\")\n print(\"\")\n\n files_fullsed=sorted(glob.glob(fullsed_path+'*'))\t\n files_source=sorted(glob.glob(source_path+'*'))\n files_temps=sorted(glob.glob(temps_path+'*'))\n\n files_fullsed_new=[] ### will receive the names of the fullsed\n ### files to be opened.\n\n ### It is assumed that the names of the fullsed files are of the form:\n ### fullsed_mod191_PLn4.0_sig0.05_h072_Rd050.0_Be_M04.80_ob1.10_H0.30_Z0.014_bE_Ell.sed2\n ### or\n ### fullsed_mod01_PLn3.5_sig0.00_h060_Rd050.0_Be_M03.80_ob1.20_H0.77_Z0.014_bE_Ell.sed2\n for i in range(0,len(npar)):\n for j in range(0,len(sigpar)):\n for k in range(0,len(Mpar)):\n for l in range(0,len(obpar)):\n ### Check if there is a fullsed file with some specific\n ### values of n, Sig, M and ob:\n for ifile in xrange(0,len(files_fullsed)):\n if ('PLn{0}_sig{1}_h072_Rd050.0_Be_'\\\n .format(filepars[0][i],filepars[1][j])+\\\n 'M{0}_ob{1}_H0.30_Z0.014_bE_Ell'\\\n .format(filepars[2][k],filepars[3][l]) in \\\n files_fullsed[ifile]) \\\n or ('PLn{0}_sig{1}_h060_Rd050.0_Be_'\\\n .format(filepars[0][i],filepars[1][j])+\\\n 'M{0}_ob{1}_H0.30_Z0.014_bE_Ell'\\\n .format(filepars[2][k],filepars[3][l]) in \\\n files_fullsed[ifile]):\n \n ### elements of 'files_fullsed_new' are = \n ### [ [n,sig,M,ob], \"fullsed file\" ]\n files_fullsed_new.append([[ filepars[0][i],\\\n filepars[1][j],\\\n filepars[2][k],\\\n filepars[3][l]],\\\n files_fullsed[ifile]]) \n\n ### Now that we have a 'files_fullsed_new' list complete, the idea is\n ### to create lists of source and temperature files in such a way that, \n ### for each fullsed file stored in a 'files_fullsed_new' line, \n ### there is a line with the correspondent source file in \n ### 'files_source_new' and a line with the correspondent temp file in \n ### 'files_temps_new'. \n\n ### It is assumed that the names of the source files are of the form:\n ### Be_M03.40_ob1.45_H0.54_Z0.014_bE_Ell.txt\n ### (Notice that the it is contained in the name of the fullsed file.)\n files_source_new=[] ### will receive the names of the source\n ### files to be opened.\n for iffn in xrange(0,len(files_fullsed_new)):\n ### Check if there is a source file whose name is contained in \n ### the name of the specific fullsed file:\n for ifs in xrange(0,len(files_source)):\n if files_source[ifs].replace(source_path,'').replace('.txt','')\\\n in files_fullsed_new[iffn][1]:\n files_source_new.append(files_source[ifs])\n ### (Notice that I have assumed that there is always a source file \n ### associated with a fullsed file. That is not the case with the \n ### temperature files below.)\n\n\n ### It is assumed that the names of the temperature files are of the form:\n ### mod126_PLn3.5_sig0.28_h072_Rd050.0_Be_M09.60_ob1.20_H0.30_Z0.014_bE_Ell30_avg.temp\n ### (Notice that the it is contained in the name of the fullsed file.)\n files_temps_new=[] ### will receive the names of the temperature\n ### files to be opened.\n for iffn in xrange(0,len(files_fullsed_new)):\n achei=0 ### Some fullsed files may not have correspondent temp files,\n ### like the ones of purely photospherical models.\n ### Check if there is a temperature file whose name is contained in\n ### the name of the specific fullsed file.\n ### If not, add \"EMPTY\" to the 'files_temps_new' list.\n for ifs in xrange(0,len(files_temps)):\n if files_temps[ifs].replace(temps_path,'').replace(\\\n '30_avg.temp','')\\\n in files_fullsed_new[iffn][1]:\n files_temps_new.append(files_temps[ifs])\n achei=1\n if achei == 0:\n files_temps_new.append('EMPTY')\n\n\n ### Now, building the 'fullsed_contents' list. It will contain the \n ### relevant contents of all available fullsed, source and temperature \n ### files of the grid.\n\n fullsed_contents=[] ### This list will receive the important contents\n ### of all the files\n for ifile in xrange(0,len(files_fullsed_new)):\n\n ### Reading the fullsed, source and temperature files:\n \n fullsedtest=files_fullsed_new[ifile][1]\n f0=open(fullsedtest,'r')\n f0linhas=f0.readlines()\n f0.close()\n\n sourcetest=files_source_new[ifile]\n f1=open(sourcetest,'r')\n f1linhas=f1.readlines()\n f1.close() \n\n tempstest=files_temps_new[ifile]\n if tempstest != 'EMPTY':\n ### OBS: This pyhdust procedure will print \n ### \"'FILE' completely read!\"\n ncr, ncmu, ncphi, nLTE, nNLTE, Rstarz, Raz, betaz, dataz, \\\n pcr, pcmu, pcphi = hdt.readtemp(tempstest)\n abttemp=[\n [dataz[0,i,ncmu/2,0]/Rstarz for i in \\\n xrange(0,len(dataz[0,:,ncmu/2,0]))],\n [dataz[3,i,ncmu/2,0] for i in \\\n xrange(0,len(dataz[3,:,ncmu/2,0]))]\n ]\n else:\n abttemp=[\n [np.nan,np.nan],\n [np.nan,np.nan]\n ]\n\n\n ### Obtaining each element of the 'fullsed_contents' list\n\n nobs=int(f0linhas[3].split()[1]) ### number of different cosi\n nlbd=int(f0linhas[3].split()[0]) ### number of lambdas for each cosi\n contents=[ \n fullsedtest, ### 0: Name of fullsed file\n np.zeros(nobs), ### 1: will receive the cosi's\n np.zeros((nobs,nlbd,3)), ### 2: will receive the SED\n sourcetest, ### 3: Name of source file\n np.zeros(5), ### 4: will receive the \n ### parameters of the star \n ### (source)\n tempstest, ### 5: Name of temperature file\n np.zeros((2,len(abttemp[0]))), ### 6: will receive the temp \n ### profile\n [[],[]]\n ]\n contents[1][:] = np.nan\n contents[2][:] = np.nan\n contents[4][:] = np.nan\n contents[6][:] = np.nan\n\n\n ### Receiving cosi and SED (\"1\" and \"2\")\n for iobs in xrange(0,nobs):\n mu = float(f0linhas[5+iobs*nlbd].split()[0])\n contents[1][iobs] = mu\n for ilbd in xrange(0, nlbd):\n auxi = f0linhas[5+iobs*nlbd+ilbd].split()\n contents[2][iobs, ilbd, 0] = float(auxi[2])\n contents[2][iobs, ilbd, 1] = float(auxi[3])\n contents[2][iobs, ilbd, 2] = float(auxi[7])\n\n\n ### Receiving parameters of the star (source) (\"4\")\n contents[4][0] = float(f1linhas[3].split()[2]) ### M\n contents[4][1] = float(f1linhas[4].split()[2]) ### R_pole\n contents[4][2] = float(f1linhas[5].split()[2]) ### W\n contents[4][3] = float(f1linhas[6].split()[2]) ### L\n contents[4][4] = float(f1linhas[7].split()[2]) ### Beta_GD\n \n ### Receiving the temperature profile (\"6\")\n for i in xrange(0,len(contents[6][0,:])):\n contents[6][0,i] = abttemp[0][i]\n contents[6][1,i] = abttemp[1][i]\n \n ### elements of 'fullsed_contents':\n fullsed_contents.append([files_fullsed_new[ifile][0],contents])\n\n print(\"\")\n\n return files_fullsed_new, files_source_new, files_temps_new, fullsed_contents, \\\n fullsed_path, source_path, temps_path, dist_std",
"def read_grism_files(root='COSMOS-3-G141', BASE_PATH='', GRISM_NAME='G141'):\n import threedhst\n import unicorn.analysis\n \n grismCat, SPC = None, None\n \n if not BASE_PATH:\n BASE_PATH = get_grism_path(root)\n \n ##### catalog\n grismCat = threedhst.sex.mySexCat(BASE_PATH+'DATA/'+root+'_drz.cat')\n for col in grismCat.column_names:\n if col.startswith('MAG_F'):\n grismCat.MAG = grismCat[col]\n grismCat.DETECT_FILTER = col\n break\n \n ##### SPC file \n if root+'_2_opt.SPC.fits' == unicorn.analysis.SPC_FILENAME:\n SPC = unicorn.analysis.SPC\n else:\n try:\n try:\n unicorn.analysis.SPC.fits.close()\n except:\n pass\n SPC = threedhst.plotting.SPCFile(root+'_2_opt.SPC.fits',\n axe_drizzle_dir=BASE_PATH+'DRIZZLE_'+GRISM_NAME)\n unicorn.analysis.SPC = SPC\n unicorn.analysis.SPC_FILENAME = SPC.filename\n except:\n SPC = None\n unicorn.analysis.SPC_FILENAME = None\n unicorn.analysis.SPC = None\n \n return grismCat, SPC",
"def read_sdf(filepath_or_buffer=..., usecols=..., molecule_column=..., molecule_name_column=..., smiles_column=..., skip_bad_mols=..., chunksize=..., **kwargs): # -> Generator[ChemDataFrame, None, None] | ChemDataFrame:\n ...",
"def read_omi_eofs(eof1_files, eof2_files):\n\n # observed EOFs from NOAA PSL are saved in individual text files for each doy\n # horizontal resolution of EOFs is 2.5 degree\n EOF1 = xr.DataArray(np.empty([366,17,144]),dims=['doy','lat','lon'],\n coords={'doy':np.arange(1,367,1), 'lat':np.arange(-20,22.5,2.5), 'lon':np.arange(0,360,2.5)})\n EOF2 = xr.DataArray(np.empty([366,17,144]),dims=['doy','lat','lon'],\n coords={'doy':np.arange(1,367,1), 'lat':np.arange(-20,22.5,2.5), 'lon':np.arange(0,360,2.5)})\n nlat = len(EOF1['lat'])\n nlon = len(EOF1['lon'])\n\n for doy in range(len(eof1_files)):\n doystr = str(doy).zfill(3)\n tmp1 = pd.read_csv(eof1_files[doy], header=None, delim_whitespace=True, names=['eof1'])\n tmp2 = pd.read_csv(eof2_files[doy], header=None, delim_whitespace=True, names=['eof2'])\n eof1 = xr.DataArray(np.reshape(tmp1.eof1.values,(nlat, nlon)),dims=['lat','lon'])\n eof2 = xr.DataArray(np.reshape(tmp2.eof2.values,(nlat, nlon)),dims=['lat','lon'])\n EOF1[doy,:,:] = eof1.values\n EOF2[doy,:,:] = eof2.values\n\n return EOF1, EOF2",
"def read():\n print(\"Read Medlars medical abstracts data set\")\n dir = join(dirname(dirname(abspath(__file__))), \"datasets\", \"Medlars\", \"med.all\")\n doc = open(dir)\n V = sp.lil_matrix((16017, 1033))\n term2idx = {}\n idx2term = {}\n n_free = 0\n line = doc.readline()\n for abstract in range(1033):\n ii = int(line.split()[1])\n # omit .W char\n doc.readline()\n line = doc.readline()\n while line != \".I \" + str(ii + 1) and line != \"\":\n for term in line.split():\n term = term.strip().replace(',', '').replace('.', '')\n if term not in term2idx:\n term2idx[term] = n_free\n idx2term[n_free] = term\n n_free += 1\n V[term2idx[term], ii - 1] += 1\n line = doc.readline().strip()\n return V, term2idx, idx2term",
"def readmol(molfile):\n # Open read the file:\n f = open(molfile, \"r\")\n lines = f.readlines()\n f.close()\n\n # Find the line where the species are listed.\n for start in np.arange(len(lines)):\n if lines[start].startswith(\"# ID\"):\n break\n start += 2\n\n # Extract the species info:\n ID, mol, mass, diam = [], [], [], []\n while lines[start].strip() != \"\":\n line = lines[start].split()\n ID .append(line[0])\n mol .append(line[1])\n mass.append(line[2])\n diam.append(line[3])\n start += 1\n\n return (np.asarray(ID, int), np.asarray(mol),\n np.asarray(mass, np.double), np.asarray(diam, np.double))",
"def _load_meds_files(self):\n self.meds_list=[]\n self.meds_meta_list=[]\n\n for i,funexp in enumerate(self.meds_files):\n f = os.path.expandvars(funexp)\n print('band %d meds: %s' % (i,f))\n medsi=meds.MEDS(f)\n medsi_meta=medsi.get_meta()\n\n if i==0:\n nobj_tot=medsi.size\n else:\n nobj=medsi.size\n if nobj != nobj_tot:\n raise ValueError(\"mismatch in meds \"\n \"sizes: %d/%d\" % (nobj_tot,nobj))\n self.meds_list.append(medsi)\n self.meds_meta_list.append(medsi_meta)\n\n self.nobj_tot = self.meds_list[0].size",
"def _readIndexFiles(self):\n if self.haveIndexFiles:\n return\n\n self.log.debug(\"read index files\")\n self.haveIndexFiles = True # just try once\n\n if self.andConfig is None:\n self.andConfig = getConfigFromEnvironment()\n\n self.multiInds = AstrometryNetCatalog(self.andConfig)",
"def read_files(file):\n # TODO Fix search into the dictionary for increased speed\n file_path, filename = rename_to_text(file)\n print('\\n\\n')\n print(file_path)\n print('\\n\\n')\n # find the exposure (L)\n # langmuir.append(langmuir_determination(filename=filename))\n try:\n # read file\n file_read = pd.read_csv(file_path, sep='\\t', header=3)\n\n # remove whitespace\n column_names = [file_read.keys()[i].lstrip() for i in range(0, len(file_read.keys()))]\n # rename columns\n file_read.columns = column_names\n # drop the time column and mse=8\n # file_read = file_read.drop([column_names[0], column_names[-1]], axis=1)\n file_read = file_read.drop([column_names[0]], axis=1)\n temp = file_read[file_read != 0]\n temp = temp.dropna(axis=0)\n\n\n file_read = file_read.dropna(axis=1)\n\n # for the bug in the labview that the temperature cuts out\n temp = file_read[file_read != 0]\n file_read = temp.dropna(axis=0)\n\n # set the index to be temperature\n file_read = file_read.set_index(file_read.keys()[0])\n except IndexError:\n \"except it is a hiden mass spec file!\"\n file_read = pd.read_csv(file_path, header=29)\n file_read = file_read.dropna(axis=1)\n file_read.drop(['Time', 'ms'],axis=1, inplace=True)\n file_read.set_index('Temperature', inplace=True)\n # pseudo code...\n # pd.DataFrame(molecule_area[i], index=langmuir) and append all of them\n\n return file_read, filename",
"def list_from_dico(self):\n rflist = [' --- raw files ---']\n pflist = [' --- processed ---']\n flist = []\n for i,ftype in self.file_dico.items():\n if ftype in self.accept:\n iP = Path(i)\n if ftype in ('fid', 'FID', 'ser') and self.dotd:\n rflist.append(MSfile(iP, iP.relative_to(self.base), ftype)) \n elif self.msh5 and i.endswith('.msh5'):\n pflist.append(MSfile(iP, iP.relative_to(self.base), ftype))\n if self.dotd:\n flist = rflist\n if self.msh5:\n flist += pflist\n return flist",
"def read(filename='Omega2'):\n f=open(filename,'r').read()\n i = 0\n filetype, = unpack('<64s',f[i:i+64*strSize]) ; i += 64*strSize\n version, = unpack('<i',f[i:i+intSize]) ; i += intSize\n comment, = unpack('<1024s',f[i:i+1024*strSize]) ; i += 1024*strSize\n D,N_b,N_q = unpack('<3i',f[i:i+3*intSize]) ; i += 3*intSize\n Omega2 = unpack('<%id' % (N_q*N_b*D),f[i:])\n Omega2 = numpy.array(Omega2)\n Omega2.shape = (N_q,N_b*D)\n filetype = filetype.strip( '\\x00' )\n assert filetype == 'Omega2', \"File %s is not in correct format\" % filename\n return (filetype,version,comment.strip('\\x00')),Omega2",
"def load_sdf_file(file_path):\n moleculeSet = []\n log.info('Loading (SDF): %s',file_path)\n for molecule in Chem.SDMolSupplier(file_path):\n if molecule is None:\n log.error('Invalid molecule detected. (Skipped)')\n continue\n moleculeSet.append(molecule)\n return moleculeSet",
"def get_data(methylation_files, names, window, smoothen=5):\n return flatten(\n [read_mods(f, n, window, smoothen) for f, n in zip(methylation_files, names)]\n )",
"def get_mol_list_from_sdf(sdf_fname):\n suppl = Chem.SDMolSupplier(sdf_fname)\n mols = []\n\n for idx, mol in enumerate(suppl):\n if mol is not None:\n mols.append(mol)\n else:\n fail_sdf_block = suppl.GetItemText(idx)\n raise ValueError(\"Unable to parse the following mol block %s\" %\n fail_sdf_block)\n return mols"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function coordinates the calculation of the similarity score matrices by distributing chunks of the matrices between the allocated processes | def build_matrices(self):
logging.info('\nMatrix scoring in progress....\n')
# The similarity score matrices are defined instances of the class SMatrix
# which implements a basic class for symmetric matrices
self.strict_mtx = SMatrix(shape=(self.nums(),))
self.loose_mtx = SMatrix(shape=(self.nums(),))
self.true_strict_mtx = SMatrix(shape=(self.nums(),))
# The total number of the effective elements present in the symmetric matrix
l = int(self.nums() * (self.nums() - 1) / 2)
if self.options.parallel == 1: # Serial execution
MCS_map = {}
self.compute_mtx(0, l - 1, self.strict_mtx, self.loose_mtx, self.true_strict_mtx, MCS_map)
for idx in MCS_map:
self.set_MCSmap(idx[0],idx[1],MCS_map[idx])
else:
# Parallel execution
logging.info('Parallel mode is on')
# Number of selected processes
num_proc = self.options.parallel
delta = int(l / num_proc)
rem = l % num_proc
if delta < 1:
kmax = l
else:
kmax = num_proc
proc = []
with multiprocessing.Manager() as manager:
# Shared memory array used by the different allocated processes
# At the moment we're using a combination of Array and Manager, which is nasty
strict_mtx = multiprocessing.Array('d', self.strict_mtx)
loose_mtx = multiprocessing.Array('d', self.loose_mtx)
true_strict_mtx = multiprocessing.Array('d', self.true_strict_mtx)
MCS_map = manager.dict()
# Chopping the indexes redistributing the remainder
for k in range(0, kmax):
spc = delta + int(int(rem / (k + 1)) > 0)
if k == 0:
i = 0
else:
i = j + 1
if k != kmax - 1:
j = i + spc - 1
else:
j = l - 1
# Python multiprocessing allocation
p = multiprocessing.Process(target=self.compute_mtx,
args=(i, j, strict_mtx, loose_mtx, true_strict_mtx, MCS_map, ))
p.start()
proc.append(p)
# End parallel execution
for p in proc:
p.join()
# Copying back the results
self.strict_mtx[:] = strict_mtx[:]
self.loose_mtx[:] = loose_mtx[:]
self.true_strict_mtx[:] = true_strict_mtx[:]
for idx in MCS_map.keys():
self.set_MCSmap(idx[0],idx[1],MCS_map[idx])
return self.strict_mtx, self.loose_mtx | [
"def compute_similarity_matrix(db_iter, sparse_mode=True, **sim_func_args):\n igs = list(db_iter)\n n = len(igs)\n\n set_defaults_sim_func(sim_func_args, igs)\n logging.info(\"Similarity function parameters: %s\", sim_func_args)\n similarity_function = partial(sim_function, **sim_func_args)\n\n # logging.info(\"Start similar_elements function ...\")\n # rows, cols = similar_elements(dd, igs, n, similarity_function)\n\n logging.info(\"Start parallel_sim_matrix function ...\")\n data, rows, cols = sm_sparse(\n np.array(igs), similarity_function, sim_func_args['tol'])\n\n # from icing.externals import neighbors\n # sp = neighbors.radius_neighbors_graph(np.array(igs))\n # rows, cols, _ = map(list, scipy.sparse.find(sp))\n # data = indicator_to_similarity(rows, cols, igs, similarity_function)\n #\n # data = np.array(data, dtype=float)\n # idx = data > 0\n # data = data[idx]\n # rows = np.array(rows, dtype=int)[idx]\n # cols = np.array(cols, dtype=int)[idx]\n\n # tic = time.time()\n # data, rows, cols = similar_elements(dd, igs, n, similarity_function)\n # print(time.time()-tic)\n # rows, cols, _ = map(list, scipy.sparse.find(indicator_matrix))\n # data = jl.Parallel(n_jobs=-1)(jl.delayed(d_func)\n # (igs[i], igs[j]) for i, j in s2)\n\n sparse_mat = scipy.sparse.csr_matrix((data, (rows, cols)),\n shape=(n, n))\n # similarity_matrix = sparse_mat + sparse_mat.T + scipy.sparse.eye(\n # sparse_mat.shape[0])\n similarity_matrix = sparse_mat # connected components works well\n if not sparse_mode:\n similarity_matrix = similarity_matrix.toarray()\n\n return similarity_matrix",
"def populate_score_matrices(self):\n ### FILL IN ###\n #careful to use len_alphabet_a vs. len(align_params.seq_a) for align_test\n #we dont specify seqa, only size of alphabet for testing update_ix, update_m, update_iy\n for i in range(0,len(self.align_params.seq_a)+1):\n for j in range(0,len(self.align_params.seq_b)+1):\n print(\"i j:\",i,j)\n if (i==0 or j==0):\n self.m_matrix.set_score(i,j,0.0)\n self.ix_matrix.set_score(i,j,0.0)\n self.iy_matrix.set_score(i,j,0.0)\n elif(i!=0 and j!=0):\n print(\"i j\",i,j,\"seqa:\",self.align_params.seq_a, \"seqb:\",self.align_params.seq_b)\n print(\"seq_a[i-1]\",self.align_params.seq_a[i-1])\n print(\"seq_b[j-1]\",self.align_params.seq_b[j-1])\n print(self.align_params.match_matrix.get_score(self.align_params.seq_a[i-1],self.align_params.seq_b[j-1]))\n \n \n firstM = self.m_matrix.get_score(i-1,j-1) + (self.align_params.match_matrix.get_score(self.align_params.seq_a[i-1],self.align_params.seq_b[j-1]))\n secondM = self.ix_matrix.get_score(i-1,j-1) + (self.align_params.match_matrix.get_score(self.align_params.seq_a[i-1],self.align_params.seq_b[j-1]))\n thirdM = self.iy_matrix.get_score(i-1,j-1) + (self.align_params.match_matrix.get_score(self.align_params.seq_a[i-1],self.align_params.seq_b[j-1]))\n maxM = max(firstM,secondM,thirdM)\n self.m_matrix.set_score(i,j, maxM)\n if firstM==maxM:\n self.m_matrix.M_pointer_add(i,j,(i-1,j-1))\n if(secondM==maxM):\n self.m_matrix.Ix_pointer_add(i,j,(i-1,j-1))\n if(thirdM==maxM):\n self.m_matrix.Iy_pointer_add(i,j,(i-1,j-1))\n\n firstIx = self.m_matrix.get_score(i-1,j) - self.align_params.dy\n secondIx = self.ix_matrix.get_score(i-1,j) - self.align_params.ey\n maxIx = max(firstIx,secondIx)\n self.ix_matrix.set_score(i,j, maxIx)\n \n if firstIx == maxIx:\n self.m_matrix.M_pointer_add(i,j,(i-1,j))\n if secondIx == maxIx:\n self.m_matrix.Iy_pointer_add(i,j,(i-1,j))\n\n firstIy = self.m_matrix.get_score(i,j-1) - self.align_params.dx\n secondIy = self.iy_matrix.get_score(i,j-1) - self.align_params.ex\n maxIy = max(firstIy,secondIy)\n self.iy_matrix.set_score(i,j, maxIy)\n if firstIy == maxIy:\n self.m_matrix.M_pointer_add(i,j,(i,j-1))\n if secondIy == maxIy:\n self.m_matrix.Iy_pointer_add(i,j,(i,j-1))\n else:\n print(\"should not see this\")\n print(\"----------------\")\n self.m_matrix.print_scores()\n print(\"----------------\")\n self.ix_matrix.print_scores()\n #print(\"maxIx:\",max([max(x) for x in self.ix_matrix.score_matrix]))\n print(\"----------------\")\n self.iy_matrix.print_scores()\n #print(\"maxIy:\",max([max(x) for x in self.iy_matrix.score_matrix]))\n #print(\"M pointers\")\n #self.m_matrix.print_pointers_old()\n #print(\"----------------\")\n #print(\"Ix pointers\")\n #self.ix_matrix.print_pointers_old()\n #print(\"----------------\")\n #print(\"Iy pointers\")\n #self.iy_matrix.print_pointers_old()\n #print(\"----------------\")\n #this is better. all the pointers in one line this is for traceback not all tehe pointers.\n self.m_matrix.print_pointers()",
"def compute_distance_matrix(subgroup_chunk_list: List[List[Token]], tokens_of_subject: List[str],\n spacy_nlp: Language) -> np.ndarray:\n\n vectors = np.array(\n [get_normalized_vector_of_chunk(chunk[:-len(tokens_of_subject)], spacy_nlp) for chunk in subgroup_chunk_list])\n similarity_matrix = np.matmul(vectors, vectors.T)\n\n # take care of antonyms\n antonyms_list = [get_antonyms_of_token_list(chunk[:-len(tokens_of_subject)]) for chunk in subgroup_chunk_list]\n words_list = [set([token.lemma_.lower() for token in chunk[:-len(tokens_of_subject)]]) for chunk in\n subgroup_chunk_list]\n\n for i, cur_antonyms in enumerate(antonyms_list):\n cur_words = words_list[i]\n for j in range(i + 1, len(antonyms_list)):\n nex_antonyms = antonyms_list[j]\n nex_words = words_list[j]\n if (cur_antonyms & nex_words) or (cur_words & nex_antonyms): # intersections\n similarity_matrix[i][j] = 0.0\n similarity_matrix[j][i] = 0.0\n\n return 1 - similarity_matrix",
"def populate_score_matrices(self, align_params):\n\t\tfor i in range(1,len(align_params.seq_a)+1):\n\t\t\tfor j in range(1,len(align_params.seq_b)+1):\n\t\t\t\tself.update(i, j, align_params)",
"def create_similarity_matrix(tested_embeddings):\n return 1-scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(tested_embeddings, 'cosine'))",
"def train(self, n_procs=2):\n sent_lists = np.array_split(self.sents, n_procs-1)\n if len(sent_lists) != n_procs:\n sent_lists = np.array_split(self.sents, n_procs)\n\n tmp_dir = tempfile.mkdtemp()\n tmp_files = [os.path.join(tmp_dir, 'tmp_' + str(i))\n for i in range(len(sent_lists))]\n\n sent_lists = list(zip(sent_lists, tmp_files))\n del self.sents\n\n\n try:\n print('Forking')\n # For debugging\n # tmp_files = map(mpfn, sent_lists)\n \n p = mp.Pool(n_procs)\n tmp_files = p.map(mpfn, sent_lists, 1)\n p.close()\n\n print('Reducing')\n self.matrix = np.zeros(tuple(_shape), dtype=self.dtype)\n\n for filename in tmp_files:\n\n with open(filename, 'rb') as f:\n result = cpickle.load(f)\n\n for k,v in result.items():\n self.matrix[k, :] += v\n\n finally:\n print('Removing {}'.format(tmp_dir))\n shutil.rmtree(tmp_dir)",
"def test_example_4():\n import pwseqdist as pw\n import pandas as pd\n from tcrdist.repertoire import TCRrep\n import multiprocessing\n\n df = pd.read_csv(\"dash.csv\")\n df = df.head(100) # for faster testing\n tr = TCRrep(cell_df = df, \n organism = 'mouse', \n chains = ['alpha','beta'], \n use_defaults=False,\n compute_distances = False,\n cpus = 1,\n db_file = 'alphabeta_gammadelta_db.tsv')\n\n metrics_a = {\n \"cdr3_a_aa\" : pw.metrics.nw_hamming_metric ,\n \"pmhc_a_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr2_a_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr1_a_aa\" : pw.metrics.nw_hamming_metric }\n\n metrics_b = {\n \"cdr3_b_aa\" : pw.metrics.nw_hamming_metric ,\n \"pmhc_b_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr2_b_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr1_b_aa\" : pw.metrics.nw_hamming_metric }\n\n weights_a = { \n \"cdr3_a_aa\" : 1,\n \"pmhc_a_aa\" : 1,\n \"cdr2_a_aa\" : 1,\n \"cdr1_a_aa\" : 1}\n\n weights_b = { \n \"cdr3_b_aa\" : 1,\n \"pmhc_b_aa\" : 1,\n \"cdr2_b_aa\" : 1,\n \"cdr1_b_aa\" : 1}\n\n kargs_a = { \n 'cdr3_a_aa' : \n {'use_numba': False},\n 'pmhc_a_aa' : {\n 'use_numba': False},\n 'cdr2_a_aa' : {\n 'use_numba': False},\n 'cdr1_a_aa' : {\n 'use_numba': False}\n }\n kargs_b = { \n 'cdr3_b_aa' : \n {'use_numba': False},\n 'pmhc_b_aa' : {\n 'use_numba': False},\n 'cdr2_b_aa' : {\n 'use_numba': False},\n 'cdr1_b_aa' : {\n 'use_numba': False}\n }\n\n tr.metrics_a = metrics_a\n tr.metrics_b = metrics_b\n\n tr.weights_a = weights_a\n tr.weights_b = weights_b\n\n tr.kargs_a = kargs_a \n tr.kargs_b = kargs_b\n\n tr.compute_distances()\n\n tr.pw_cdr3_b_aa\n tr.pw_beta",
"def optimized(embeddings, args):\n similarities = np.zeros(shape=(len(embeddings), len(embeddings)))\n\n if args.similarity_algo == \"cosine\":\n for i in tqdm(range(len(embeddings))):\n for j in range(len(embeddings)):\n similarities[i][j] = np.dot(embeddings[i], np.transpose(embeddings[j]))\n\n else:\n raise ValueError(\"Invalid similarity algorithm\")\n\n return similarities",
"def compute_similarity_scores(tfidf_model, tfidf_matrix,\r\n tfidf_mids, training_info,\r\n test_info, nb_similars=100):\r\n test_mids = list(test_info['mid'])\r\n mid_recipient_scores = {}\r\n\r\n test_mids_pbar = tqdm_notebook(test_mids)\r\n for test_mid in test_mids_pbar:\r\n query_vector = tfidftools.get_tfidf_vector(\r\n test_mid, test_info, tfidf_model)\r\n similars = find_similar(query_vector, tfidf_matrix,\r\n nb_similars=nb_similars)\r\n\r\n # Get mid in training set corresponding to best matches\r\n best_match_mid = [tfidf_mids[similar_item[0]]\r\n for similar_item in similars]\r\n\r\n # Get corresponding similarity scores\r\n best_match_scores = [similar_item[1] for similar_item in similars]\r\n test_mail_scores = defaultdict(lambda: 0)\r\n for train_mid, train_score in zip(best_match_mid, best_match_scores):\r\n recipients = preprocess.get_recipients(training_info, train_mid)\r\n for recipient in recipients:\r\n test_mail_scores[recipient] += train_score\r\n mid_recipient_scores[test_mid] = test_mail_scores\r\n return mid_recipient_scores",
"def mat_mult_parallel(matIn1, matIn2, sharedMemArr, lastI):\n\n\tif check_dims(matIn1, matIn2) == False:\n\t\tprint_matrix_warning(matIn1, matIn2)\n\t\treturn \t\n\n\tn,m = matIn1.shape\n\t_, r = matIn2.shape\n\n\t# print((n,m,r))\n\n\tfor i in range(n):\n\t\t# print(i,j,k)\n\t\tfor j in range(r):\n\t\t\tsumMat = 0\n\t\t\tfor k in range(m):\n\t\t\t\tsumMat = sumMat + int(matIn1[i][k]*matIn2[k][j])\n\n\t\t\t# print(lastI)\t\t\t\n\t\t\tsharedMemArr[lastI*r + i*r + j] = sumMat",
"def output_similarity_matrix(active_sites):\n # Create empty pairwise matrix \n mat = np.empty([len(active_sites), len(active_sites)])\n # For every pair calculate the RMSD\n for (x,y), value in np.ndenumerate(mat):\n mat[x][y] = compute_similarity(active_sites[x], active_sites[y])\n # Infinite values means proteins had less than 3 similar amino acids, set to none\n mat[np.isinf(mat)] = None\n # Find max value in array for normalization\n max_val = np.nanmax(mat)\n # Make none values max value\n mat[np.isnan(mat)] = max_val\n # Get normalized dissimilarity matrix\n norm_mat = mat/max_val\n # Convert dissimilarity matrix to similarity by subtracting 1\n norm_mat_sim = 1 - norm_mat\n return norm_mat_sim",
"def create_condensed_dist_matrix(n, dist_fn, num_processes=None):\n global _cdm_max_num_processes\n if num_processes is None:\n num_processes = min(multiprocessing.cpu_count(),\n _cdm_max_num_processes)\n\n # Define the global variable _dist_matrix_shared (this must be top-level\n # to be accessible by the top-level function _fill_in_for_j_range())\n logger.debug((\"Setting up shared distance matrix\"))\n global _dist_matrix_shared\n dist_matrix_len = int(n*(n-1)/2)\n _dist_matrix_shared = multiprocessing.sharedctypes.RawArray(\n ctypes.c_float, dist_matrix_len)\n\n # Define the global function _dist_fn (this must be top-level to be\n # used in a multiprocessing Pool)\n global _dist_fn\n _dist_fn = dist_fn\n\n num_pairs = n*(n-1) / 2\n logger.debug((\"Condensed distance matrix has %d entries\"), num_pairs)\n num_entries_per_process = int(num_pairs / num_processes)\n\n # Find out which value of j to start each process with\n logger.debug((\"Assigning ranges in distance matrix to %d processes\"),\n num_processes)\n j_start_for_process = [None for _ in range(num_processes)]\n num_entries_in_process = [0 for _ in range(num_processes)]\n process_num = 0\n j_start_for_process[process_num] = 0\n for j in range(n):\n if num_entries_in_process[process_num] >= num_entries_per_process:\n if process_num < num_processes - 1:\n # Move onto the next process\n process_num += 1\n j_start_for_process[process_num] = j\n # There are j entries associated with j\n num_entries_in_process[process_num] += j\n\n # Make arguments to _fill_in_for_j_range()\n args_for_process = []\n for process_num in range(num_processes):\n j_start = j_start_for_process[process_num]\n if j_start is None:\n # There are more processes than needed; stop filling in args\n break\n if process_num == num_processes - 1:\n j_end = n\n else:\n j_end = j_start_for_process[process_num + 1]\n if j_end is None:\n j_end = n\n args_for_process += [(j_start, j_end, n)]\n\n # Run the pool\n logger.debug((\"Running multiprocessing pool to fill in distance matrix\"))\n pool = multiprocessing.Pool(num_processes)\n pool.map(_fill_in_for_j_range, args_for_process)\n pool.close()\n\n # Convert back to numpy array\n logger.debug((\"Converting shared distance matrix to numpy array\"))\n dist_matrix = np.ctypeslib.as_array(_dist_matrix_shared)\n\n return dist_matrix",
"def sim_matrix_calculation(self):\n\n self.ns = len(self.subject_list)\n self.sim_matrix = np.zeros(shape=(self.ns, self.ns))\n\n for i in range(self.ns):\n print(f'i={i}', flush=True)\n try:\n matrix_file_m1 = np.loadtxt(f'{self.path_m1}/{self.filename_list_m1_final[i]}', dtype=np.double)\n except ValueError:\n matrix_file_m1 = np.loadtxt(f'{self.path_m1}/{self.filename_list_m1_final[i]}', delimiter=',', dtype=np.double)\n\n #Set an empty variable called r1. We will use it to store the data we need for the correlation.\n #r1 = None #The empty variable is made because it will store different values depending on the type of fingerprinting done\n if self.type == 'within':\n submatrix1 = matrix_file_m1[self.nodes][:, self.nodes]\n r1 = submatrix1[np.triu_indices(len(submatrix1), k=1)] #Exclude the diagonal and retain upper triangular (within-network connections are a \"similarity\" graph)\n if self.net == \"random2\":\n print(r1)\n elif self.type == 'between':\n submatrix1 = matrix_file_m1[self.nodes][:, self.between] #Exclude any within-network connections (retain only the rectangle of \"between\" connections)\n r1 = submatrix1.flatten() #The within-network, upper triangle, gives a \"flattened\" array. We also need to flatten our \"between\" network array to execute the correlation properly \n # (otherwise the correlation between individual is exactly the same across all subjects)\n\n z1 = np.arctanh(r1) #Fisher normalize the remaining edges\n\n #If there are cells where the Fisher norm returns \"Inf\", replace the \"Inf\" by 0\n if np.count_nonzero(np.isinf(z1)) > 0:\n fin1 = np.where(np.isinf(z1), 0, z1)\n else:\n fin1 = z1.copy()\n\n for j in range(i, self.ns):\n try:\n matrix_file_m2 = np.loadtxt(f'{self.path_m2}/{self.filename_list_m2_final[j]}', dtype=np.double)\n except ValueError:\n matrix_file_m2 = np.loadtxt(f'{self.path_m2}/{self.filename_list_m2_final[j]}', delimiter=',', dtype=np.double)\n\n #See comments in the first loop for more info.\n #r2 = None\n if self.type == 'within':\n submatrix2 = matrix_file_m2[self.nodes][:, self.nodes]\n r2 = submatrix2[np.triu_indices(len(submatrix2), k=1)]\n if self.type == 'between':\n submatrix2 = matrix_file_m2[self.nodes][:, self.between]\n r2 = submatrix2.flatten()\n\n z2 = np.arctanh(r2)\n\n if np.count_nonzero(np.isinf(z2)) > 0:\n fin2 = np.where(np.isinf(z2), 0, z2)\n else:\n fin2 = z2.copy()\n\n #For Fisher normalized arrays, we compute the Pearson correlation between the two modalities. This results in a similarity matrix where\n # the diagonal is the \"within-individual\" correlation while off-diagonal elements are the \"between-individual\" correlation\n self.sim_matrix[i, j] = np.corrcoef(fin1, fin2)[0, 1] #In the future, will need to change to scipy for this. Otherwise, can't use other correlation methods for FP.\n \n #The sim matrix is computed for the upper triangle only for the within-network\n # We add a final transpose to make a full matrix, as the similarity matrix is\n # completly symmetric.\n self.sim_matrix = self.sim_matrix + np.triu(self.sim_matrix, k=1).T\n\n return None",
"def run(\n self,\n NUMITERS: typing.Optional[int] = 3,\n NH1: typing.Optional[int] = 3,\n NH2: typing.Optional[int] = 3,\n K: typing.Optional[int] = 20,\n N_GENE_CHUNKS: typing.Optional[int] = 1,\n umap: typing.Optional[bool] = True,\n ncpus=os.cpu_count(),\n \n ):\n start_time = time.time()\n\n sam1 = self.sam1\n sam2 = self.sam2\n gnnm = self.gnnm\n gn1 = self.gn1\n gn2 = self.gn2\n gn = self.gn\n smap = self.smap\n\n smap.run(\n NUMITERS=NUMITERS,\n NOPs1=0,\n NOPs2=0,\n NH1=NH1,\n NH2=NH2,\n K=K,\n NCLUSTERS=N_GENE_CHUNKS,\n ncpus=ncpus,\n )\n samap = smap.final_sam\n self.samap = samap\n self.ITER_DATA = smap.ITER_DATA\n\n print(\"Alignment score ---\", _avg_as(samap).mean())\n if umap:\n print(\"Running UMAP on the stitched manifolds.\")\n sc.tl.umap(self.samap.adata,min_dist=0.1,init_pos='random')\n \n try:\n hom_graph = smap.GNNMS_corr[-1]\n samap.adata.uns[\"homology_graph_reweighted\"] = hom_graph\n except:\n pass\n \n samap.adata.uns[\"homology_graph\"] = gnnm\n samap.adata.uns[\"homology_gene_names\"] = gn\n samap.adata.uns[\"homology_gene_names1\"] = gn1\n samap.adata.uns[\"homology_gene_names2\"] = gn2\n\n samap.adata.obs[\"species\"] = pd.Categorical(\n [self.id1] * sam1.adata.shape[0] + [self.id2] * sam2.adata.shape[0]\n )\n \n if umap:\n self.sam1.adata.obsm['X_umap_samap'] = self.samap.adata[self.sam1.adata.obs_names].obsm['X_umap']\n self.sam2.adata.obsm['X_umap_samap'] = self.samap.adata[self.sam2.adata.obs_names].obsm['X_umap'] \n \n self.run_time = time.time() - start_time\n print(\"Elapsed time: {} minutes.\".format(self.run_time / 60))\n return samap",
"def build_similarity_matrix(self, df_responses, agg_strategy, filter_sample_method, mapping_matrix):\n n_items = self.URM_train.shape[1]\n if mapping_matrix is None:\n mapping_matrix = np.repeat(np.reshape(np.arange(0, n_items), newshape=(1, n_items)), repeats=n_items, axis=0)\n matrix_builder = IncrementalSparseMatrix(n_rows=n_items, n_cols=n_items)\n\n for currentItem in range(n_items):\n response_df = df_responses[df_responses.item_id == currentItem].copy()\n self.add_sample_responses_to_matrix_builder(matrix_builder, agg_strategy, filter_sample_method, response_df,\n currentItem, mapping_matrix[currentItem])\n\n return sps.csr_matrix(matrix_builder.get_SparseMatrix())",
"def compute_genome_distances(genomes):\n p = Pool(initializer=init_pool, initargs=(None, genomes))\n genome_edit_dists = p.starmap(genome_distance,\n list(itertools.product(range(len(genomes)), range(len(genomes))))[:0]) # TODO\n p.close()\n if genome_edit_dists: # check if we computed new value or if it should use precomputed value\n print(genome_edit_dists)\n else:\n genome_edit_dists = compute_genome_distances_pre_computed\n\n result = np.full([len(genomes), len(genomes)], 0)\n for genome_edit_dist in genome_edit_dists: # convert list like result to a matrix\n if genome_edit_dist is None:\n continue\n result[genome_edit_dist[0]][genome_edit_dist[1]] = genome_edit_dist[2]\n result[genome_edit_dist[1]][genome_edit_dist[0]] = genome_edit_dist[2]\n return result",
"def similarity_matrix(self):\n queries = all_sounds[:10]\n similarity_matrix = np.zeros([10, 150], dtype=int)\n results = self.get_results()\n for i, query in enumerate(queries):\n result_list = results[query]\n # add query to resultlist\n result_list.insert(i, (query, 5))\n # now only keep the scores\n scores = [x[1] for x in result_list]\n # and append every row to the matrix\n similarity_matrix[i] = scores\n return similarity_matrix, queries, all_sounds",
"def __calculate_matrix_common(self):\n columns = []\n for i in range(len(self.user_to_id)):\n columns.append((self.matrix_users_files * self.matrix_users_files[i]).sum(axis=1))\n\n self.matrix_common = np.column_stack(columns)",
"def res_alignment1():\n min_contig_length=10\n min_fraction_domain=0.2\n a = len(seq_a)\n b = len(seq_b)\n # Check for the basic cases\n if (a == 0) or (b == 0): return [],[],0\n if seq_a == seq_b: return range(a),range(a),1.0\n min_matches = min(min_contig_length,min(a,b))\n misalign_penalty = 100000\n # limit the number of mis-alignments\n max_mis_align = int((1 - min_fraction_domain) * max(a,b))\n # Starting score according to the required similarity\n score = max_mis_align + 1\n # build score matrix\n R = use_dict(row=a,col=b,max_score=score)\n # populate score matrix\n for j in xrange(1,b + 1):\n for i in xrange(1,a + 1):\n # We want to halt when there is not way to get a good score\n not_aligned = (seq_a[i-1].lower() != seq_b[j-1].lower())\n s1 = R[i-1,j-1].score - misalign_penalty * not_aligned\n s2,mc2 = gap_score(R[i-1,j],min_matches)\n s3,mc3 = gap_score(R[i,j-1],min_matches)\n if (s1 >= s2) and (s1 >= s3):\n s = s1\n i_temp,j_temp = i-1,j-1\n match_count = R[(i_temp,j_temp)].match_count + 1\n consecutive_matches = R[(i_temp,j_temp)].consecutive_matches + 1\n elif (s2 >= s1) and (s2 >= s3):\n i_temp,j_temp = i-1,j\n s = s2\n match_count = mc2\n consecutive_matches = 0\n else:\n i_temp,j_temp = i,j-1\n s = s3\n match_count = mc3\n consecutive_matches = 0\n # if i > (0.9 * a):\n # aaa = 10\n # break\n # else:\n # aaa = -10\n R[i,j].score = s\n R[i,j].origin = (i_temp,j_temp)\n R[i,j].match_count = match_count\n R[i,j].consecutive_matches = consecutive_matches"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function returns the size of the square similarity score matrix Returns | def mat_size(self):
# Length of the linear array
l = self.size
# Total number of elements in the corresponding bi-dimensional symmetric matrix
n = int((1 + math.sqrt(1 + 8 * l)) / 2)
return n | [
"def size(self, matrix):\r\n return matrix.shape",
"def create_similarity_matrix(tested_embeddings):\n return 1-scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(tested_embeddings, 'cosine'))",
"def get_similarity_score(differences):\n\n sum_of_squares = 0\n for diff in differences:\n sum_of_squares += np.power(diff, 2)\n\n similarity_score = 1 / (np.sqrt(sum_of_squares) + 1)\n return similarity_score",
"def corr_matrix_score(matrix):\n total = 0\n for row in matrix:\n total += max(row)\n \n return total/len(matrix)",
"def __len__(self):\n return len(self.__matrix)",
"def get_square_width(self):\r\n return self._square_width",
"def SizeOfStiffnessMatrix(self):\n\t\tpass",
"def test_similarity_shape(self):\n self._test_similarity_shape()\n try:\n self._test_similarity_shape_sparse()\n except TypeError:\n # computation of kernel is not supported on sparse matrices\n pass",
"def similarity_matrix(self):\n queries = all_sounds[:10]\n similarity_matrix = np.zeros([10, 150], dtype=int)\n results = self.get_results()\n for i, query in enumerate(queries):\n result_list = results[query]\n # add query to resultlist\n result_list.insert(i, (query, 5))\n # now only keep the scores\n scores = [x[1] for x in result_list]\n # and append every row to the matrix\n similarity_matrix[i] = scores\n return similarity_matrix, queries, all_sounds",
"def output_similarity_matrix(active_sites):\n # Create empty pairwise matrix \n mat = np.empty([len(active_sites), len(active_sites)])\n # For every pair calculate the RMSD\n for (x,y), value in np.ndenumerate(mat):\n mat[x][y] = compute_similarity(active_sites[x], active_sites[y])\n # Infinite values means proteins had less than 3 similar amino acids, set to none\n mat[np.isinf(mat)] = None\n # Find max value in array for normalization\n max_val = np.nanmax(mat)\n # Make none values max value\n mat[np.isnan(mat)] = max_val\n # Get normalized dissimilarity matrix\n norm_mat = mat/max_val\n # Convert dissimilarity matrix to similarity by subtracting 1\n norm_mat_sim = 1 - norm_mat\n return norm_mat_sim",
"def size(self) -> \"unsigned int\":\n return _vnl_diag_matrixPython.vnl_diag_matrixSI_size(self)",
"def vector_size_to_square_matrix_size(d, validate_args, name=None):\n if isinstance(d, (float, int, np.generic, np.ndarray)):\n n = (-1 + np.sqrt(1 + 8 * d)) / 2.\n if float(int(n)) != n:\n raise ValueError('Vector length {} is not a triangular number.'.format(d))\n return int(n)\n else:\n with tf.name_scope(name or 'vector_size_to_square_matrix_size') as name:\n n = (-1. + tf.sqrt(1 + 8. * tf.cast(d, dtype=tf.float32))) / 2.\n if validate_args:\n with tf.control_dependencies([\n tf.debugging.Assert(\n tf.math.equal(\n tf.cast(tf.cast(n, dtype=tf.int32), dtype=tf.float32), n),\n data=['Vector length is not a triangular number: ', d]\n )\n ]):\n n = tf.identity(n)\n return tf.cast(n, d.dtype)",
"def regularize(weights: np.matrix) -> int:\n squared_weights = np.square(weights)\n return np.sum(squared_weights)",
"def similarity_score(self) -> float:\n return self.__score",
"def test_can_dynamically_calculate_larger_square_areas(self):\n matrix = [\n [1, 1, 1, 1, 0, 0],\n [0, 1, 1, 1, 0, 1],\n [1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 0],\n [1, 1, 1, 0, 0, 0],\n ]\n\n result = maximal_square(matrix)\n self.assertEqual(result, 9)",
"def get_num_squares(board, player):\n num_squares = 0\n dim = board.get_dim()\n for row in range(dim):\n for col in range(dim):\n if board.square(row, col) == player:\n num_squares += 1\n return num_squares",
"def get_similarity_matrix(self, rows=None, cols=None, rtol=1e-5):\n rows = rows if rows is not None else range(self.num_structures)\n cols = cols if cols is not None else range(self.num_features)\n\n matrix = self.feature_matrix[rows][:, cols]\n num_structs = len(rows)\n num_corrs = len(cols)\n sim_matrix = np.identity(num_corrs)\n for i in range(num_corrs):\n for j in range(i + 1, num_corrs):\n num_identical = np.sum(\n np.isclose(matrix[:, i], matrix[:, j], rtol=rtol)\n )\n sim_matrix[i, j] = num_identical / num_structs\n sim_matrix[j, i] = num_identical / num_structs\n\n return sim_matrix",
"def size(self) -> \"unsigned int\":\n return _vnl_diag_matrixPython.vnl_diag_matrixF_size(self)",
"def squarest_grid_size(num_images):\n divisors = sympy.divisors(num_images)\n square_root = math.sqrt(num_images)\n width = 1\n for d in divisors:\n if d > square_root:\n break\n width = d\n return (num_images // width, width)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the proper dict item by referencing the index and model information. Since all models could be optimized by PTQ or NNCF, we need to check that there are proper values in the data. For example, if model A could be optimized by both PTQ and NNCF and model B couldn't be supported by PTQ and NNCF. In this case, we have PTQ, NNCF results about A, however, we don't have PTQ, NNCF results about B. So, if we don't have results, we need to mark the empty result as "". | def get_metric_dict(dict_data: Union[List[Dict[str, Any]], None], idx: int, model: str):
if dict_data and len(dict_data) > idx:
if dict_data[idx].get(model) is None:
return "-"
return dict_data[idx][model]
else:
return "-" | [
"def get_result(self, index):\n\t\tif not Util.dic_is_empty(self.case_json['results']):\n\t\t\treturn self.case_json[index]",
"def __getitem__(self, index):\n\n # index is a single number\n if isinstance(index, Number):\n model = self._models[index]\n model_index = self._model_indices[index]\n if model.get_stored_output_values() is None:\n return None\n else:\n output_values = model.get_stored_output_values()\n return output_values[model_index]\n\n # index is a slice\n elif isinstance(index, slice):\n result = []\n for i in range(index.start, index.stop):\n val = self[i]\n if val != None:\n result.append(val)\n else:\n return None\n return result",
"def test_explain_result_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n analyzer_model = {} # Analyzer\n analyzer_model['name'] = 'classic'\n analyzer_model['stopwords'] = ['testString']\n\n index_text_operator_default_field_model = {} # IndexTextOperatorDefaultField\n index_text_operator_default_field_model['analyzer'] = analyzer_model\n index_text_operator_default_field_model['enabled'] = True\n\n index_field_model = {} # IndexField\n index_field_model['name'] = 'testString'\n index_field_model['type'] = 'boolean'\n index_field_model['foo'] = 'asc'\n\n index_definition_model = {} # IndexDefinition\n index_definition_model['default_analyzer'] = analyzer_model\n index_definition_model['default_field'] = index_text_operator_default_field_model\n index_definition_model['fields'] = [index_field_model]\n index_definition_model['index_array_lengths'] = True\n index_definition_model['partial_filter_selector'] = {}\n\n index_information_model = {} # IndexInformation\n index_information_model['ddoc'] = 'testString'\n index_information_model['def'] = index_definition_model\n index_information_model['name'] = 'testString'\n index_information_model['type'] = 'json'\n\n explain_result_range_model = {} # ExplainResultRange\n explain_result_range_model['end_key'] = [{ 'foo': 'bar' }]\n explain_result_range_model['start_key'] = [{ 'foo': 'bar' }]\n\n # Construct a json representation of a ExplainResult model\n explain_result_model_json = {}\n explain_result_model_json['dbname'] = 'testString'\n explain_result_model_json['fields'] = ['testString']\n explain_result_model_json['index'] = index_information_model\n explain_result_model_json['limit'] = 0\n explain_result_model_json['opts'] = {}\n explain_result_model_json['range'] = explain_result_range_model\n explain_result_model_json['selector'] = {}\n explain_result_model_json['skip'] = 0\n\n # Construct a model instance of ExplainResult by calling from_dict on the json representation\n explain_result_model = ExplainResult.from_dict(explain_result_model_json)\n assert explain_result_model != False\n\n # Construct a model instance of ExplainResult by calling from_dict on the json representation\n explain_result_model_dict = ExplainResult.from_dict(explain_result_model_json).__dict__\n explain_result_model2 = ExplainResult(**explain_result_model_dict)\n\n # Verify the model instances are equivalent\n assert explain_result_model == explain_result_model2\n\n # Convert model instance back to dict and verify no loss of data\n explain_result_model_json2 = explain_result_model.to_dict()\n assert explain_result_model_json2 == explain_result_model_json",
"def get_indexed_result():\n try:\n requests_session = get_requests_session()\n data = request.args if request.args else request.json\n if data is None:\n msg = f\"Both index & job_id are required\"\n return Response(json.dumps({\"error\": msg}), 400, headers=standard_headers)\n required_attributes = [\"owner\", \"providerSignature\", \"nonce\", \"index\", \"jobId\"]\n msg, status = check_required_attributes(\n required_attributes, data, \"GET:/getResult\"\n )\n if msg:\n return Response(\n json.dumps({\"error\": msg}), status, headers=standard_headers\n )\n index = data.get(\"index\", None)\n job_id = data.get(\"jobId\", None)\n owner = data.get(\"owner\", None)\n nonce = data.get(\"nonce\", None)\n # verify provider's signature\n msg, status, provider_address = process_provider_signature_validation(\n data.get(\"providerSignature\"), f\"{owner}{job_id}\", nonce\n )\n if msg:\n return Response(\n json.dumps({\"error\": msg}), status, headers=standard_headers\n )\n index = int(index)\n outputs, output_owner = get_sql_job_urls(job_id)\n # check owner & provider\n logger.info(f\"Got {output_owner}\")\n logger.info(f\"Got {outputs}\")\n if owner != output_owner:\n msg = f\"Owner {owner} mismatch for job {job_id}\"\n return Response(json.dumps({\"error\": msg}), 404, headers=standard_headers)\n\n wanted_jobs = get_job_by_provider_and_owner(\n owner=owner, provider=provider_address\n )\n logger.info(f\"Got jobs by owner and provider: {wanted_jobs}\")\n if wanted_jobs is None:\n msg = f\"Provider {provider_address} mismatch for job {job_id}\"\n return Response(json.dumps({\"error\": msg}), 404, headers=standard_headers)\n\n if outputs is None or not isinstance(outputs, list):\n msg = f\"No results for job {job_id}\"\n return Response(json.dumps({\"error\": msg}), 404, headers=standard_headers)\n # check the index\n logger.info(f\"Len outputs {len(outputs)}, index: {index}\")\n if int(index) < 0:\n msg = f\"Negative index {index}\"\n return Response(json.dumps({\"error\": msg}), 404, headers=standard_headers)\n if int(index) >= len(outputs):\n msg = f\"No such index {index} in this compute job\"\n return Response(json.dumps({\"error\": msg}), 404, headers=standard_headers)\n logger.info(f\"Trying: {outputs[index]['url']}\")\n return build_download_response(\n request, requests_session, outputs[index][\"url\"], None\n )\n\n except ApiException as e:\n msg = f\"Error getting the status: {e}\"\n logger.error(msg)\n return Response(json.dumps({\"error\": msg}), 400, headers=standard_headers)\n except Exception as e:\n msg = f\"{e}\"\n logger.error(msg)\n return Response(json.dumps({\"error\": msg}), 400, headers=standard_headers)",
"def _run_and_optimize_model(self, data):\n return {}, None",
"def _update_data(data, model_data, model, model_text):\n idx_missing = np.isnan(data)\n num_missing = np.sum(idx_missing)\n if num_missing > 0 and isinstance(model_data, np.ndarray):\n data[idx_missing] = model_data[idx_missing]\n num_missing_model = np.sum(np.isnan(model_data))\n log.debug(f\"Used {model} for {model_text} for {num_missing-num_missing_model} observations\")\n elif num_missing > 0 and isinstance(model_data, float):\n data = model_data\n log.debug(f\"Used {model} for {model_text}\")\n\n return data",
"def test_search_info_result_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n search_index_info_model = {} # SearchIndexInfo\n search_index_info_model['committed_seq'] = 26\n search_index_info_model['disk_size'] = 0\n search_index_info_model['doc_count'] = 0\n search_index_info_model['doc_del_count'] = 0\n search_index_info_model['pending_seq'] = 26\n\n # Construct a json representation of a SearchInfoResult model\n search_info_result_model_json = {}\n search_info_result_model_json['name'] = 'testString'\n search_info_result_model_json['search_index'] = search_index_info_model\n\n # Construct a model instance of SearchInfoResult by calling from_dict on the json representation\n search_info_result_model = SearchInfoResult.from_dict(search_info_result_model_json)\n assert search_info_result_model != False\n\n # Construct a model instance of SearchInfoResult by calling from_dict on the json representation\n search_info_result_model_dict = SearchInfoResult.from_dict(search_info_result_model_json).__dict__\n search_info_result_model2 = SearchInfoResult(**search_info_result_model_dict)\n\n # Verify the model instances are equivalent\n assert search_info_result_model == search_info_result_model2\n\n # Convert model instance back to dict and verify no loss of data\n search_info_result_model_json2 = search_info_result_model.to_dict()\n assert search_info_result_model_json2 == search_info_result_model_json",
"def evaluate_dataloader_item(dataloader,model,indx):\n etl = enumerate(dataloader)\n with inference_context(model), torch.no_grad():\n found = False\n for idx, inputs in etl:\n if idx==indx:\n outputs = model(inputs)\n return outputs\n if found == False:\n return \"Index not found\"",
"def test_index_result_serialization(self):\n\n # Construct a json representation of a IndexResult model\n index_result_model_json = {}\n index_result_model_json['id'] = 'testString'\n index_result_model_json['name'] = 'testString'\n index_result_model_json['result'] = 'created'\n\n # Construct a model instance of IndexResult by calling from_dict on the json representation\n index_result_model = IndexResult.from_dict(index_result_model_json)\n assert index_result_model != False\n\n # Construct a model instance of IndexResult by calling from_dict on the json representation\n index_result_model_dict = IndexResult.from_dict(index_result_model_json).__dict__\n index_result_model2 = IndexResult(**index_result_model_dict)\n\n # Verify the model instances are equivalent\n assert index_result_model == index_result_model2\n\n # Convert model instance back to dict and verify no loss of data\n index_result_model_json2 = index_result_model.to_dict()\n assert index_result_model_json2 == index_result_model_json",
"def prepare_result(self, question, current_filters=None):\n\t\tcurrent_filters = current_filters if current_filters else []\n\t\tresult_summary = {}\n\n\t\t# Calculate and return statistics for choice\n\t\tif question.type in ['simple_choice', 'multiple_choice']:\n\t\t\tanswers = {}\n\t\t\tcomments = []\n\t\t\t[answers.update({label.id: {'text': label.value, 'count': 0, 'answer_id': label.id}}) for label in question.labels_ids]\n\t\t\tfor input_line in question.user_input_line_ids:\n\t\t\t\tif input_line.answer_type == 'suggestion' and answers.get(input_line.value_suggested.id) and (not(current_filters) or input_line.user_input_id.id in current_filters):\n\t\t\t\t\tanswers[input_line.value_suggested.id]['count'] += 1\n\t\t\t\tif input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):\n\t\t\t\t\tcomments.append(input_line)\n\t\t\tresult_summary = {'answers': answers.values(), 'comments': comments}\n\n\t\t# Calculate and return statistics for matrix\n\t\tif question.type == 'matrix':\n\t\t\trows = OrderedDict()\n\t\t\tanswers = OrderedDict()\n\t\t\tres = dict()\n\t\t\tcomments = []\n\t\t\t[rows.update({label.id: label.value}) for label in question.labels_ids_2]\n\t\t\t[answers.update({label.id: label.value}) for label in question.labels_ids]\n\t\t\tfor cell in product(rows.keys(), answers.keys()):\n\t\t\t\tres[cell] = 0\n\t\t\tfor input_line in question.user_input_line_ids:\n\t\t\t\tif input_line.answer_type == 'suggestion' and (not(current_filters) or input_line.user_input_id.id in current_filters) and input_line.value_suggested_row:\n\t\t\t\t\tres[(input_line.value_suggested_row.id, input_line.value_suggested.id)] += 1\n\t\t\t\tif input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):\n\t\t\t\t\tcomments.append(input_line)\n\t\t\tresult_summary = {'answers': answers, 'rows': rows, 'result': res, 'comments': comments}\n\n\t\t# Calculate and return statistics for free_text, textbox, datetime\n\t\tif question.type in ['free_text', 'textbox', 'datetime','attachment']:\n\t\t\tresult_summary = []\n\t\t\tfor input_line in question.user_input_line_ids:\n\t\t\t\tif not(current_filters) or input_line.user_input_id.id in current_filters:\n\t\t\t\t\tresult_summary.append(input_line)\n\n\t\t# Calculate and return statistics for numerical_box\n\t\tif question.type == 'numerical_box':\n\t\t\tresult_summary = {'input_lines': []}\n\t\t\tall_inputs = []\n\t\t\tfor input_line in question.user_input_line_ids:\n\t\t\t\tif not(current_filters) or input_line.user_input_id.id in current_filters:\n\t\t\t\t\tall_inputs.append(input_line.value_number)\n\t\t\t\t\tresult_summary['input_lines'].append(input_line)\n\t\t\tif all_inputs:\n\t\t\t\tresult_summary.update({'average': round(sum(all_inputs) / len(all_inputs), 2),\n\t\t\t\t\t\t\t\t\t 'max': round(max(all_inputs), 2),\n\t\t\t\t\t\t\t\t\t 'min': round(min(all_inputs), 2),\n\t\t\t\t\t\t\t\t\t 'sum': sum(all_inputs),\n\t\t\t\t\t\t\t\t\t 'most_common': Counter(all_inputs).most_common(5)})\n\t\treturn result_summary",
"def get_entry(self, model, key, val):\n if key not in model.schema.props:\n raise RuntimeError(f\"{key} is not a part of {model.name}'s schema\")\n if model.schema.props[key].index:\n return self.get_item_from_index(model, key, val)\n elif model.schema.props[key].index_key:\n found = self.get_item_from_index_set(model, key, val, val)\n return found[0] if found else None\n else:\n for obj in self.storage.get_keys_in_model(model):\n if getattr(obj, key) == val:\n return obj\n return None",
"def findBestModel(self):\n self.reggridSearch()",
"def _models_info(self, testcases):\n models = {}\n unknown_models = []\n for testcase in testcases:\n testcase_name = testcase[\"name\"]\n\n if re.search(\"^tempest\\.api\", testcase_name):\n temp = re.findall(\"tempest\\.api\\.[0-9a-zA-Z_]*\",\n testcase_name)\n if len(temp) == 1:\n model = temp[0]\n if models.has_key(model):\n models[model][\"count\"] += 1\n else:\n models[model] = {}\n models[model][\"count\"] = 1\n models[model][\"success\"] = 0\n models[model][\"fail\"] = 0\n models[model][\"skip\"] = 0\n models[model]['fail_cast'] = []\n\n result = testcase[\"result\"]\n if result == \"ok\":\n models[model][\"success\"] += 1\n elif result == \"SKIPPED\":\n models[model][\"skip\"] += 1\n else:\n models[model][\"fail\"] += 1\n models[model]['fail_cast'].append(testcase['testcase'])\n else:\n unknown_models.append(testcase_name)\n elif re.search(\"^tempest\\.sf_scenario\", testcase_name):\n temp = re.findall(\"tempest\\.sf_scenario\",\n testcase_name)\n if len(temp) == 1:\n model = temp[0]\n if models.has_key(model):\n models[model][\"count\"] += 1\n else:\n models[model] = {}\n models[model][\"count\"] = 1\n models[model][\"success\"] = 0\n models[model][\"fail\"] = 0\n models[model][\"skip\"] = 0\n models[model]['fail_cast'] = []\n\n result = testcase[\"result\"]\n if result == \"ok\":\n models[model][\"success\"] += 1\n elif result == \"SKIPPED\":\n models[model][\"skip\"] += 1\n else:\n models[model][\"fail\"] += 1\n models[model]['fail_cast'].append(testcase['testcase'])\n else:\n unknown_models.append(testcase_name)\n else:\n unknown_models.append(testcase_name)\n models_info = {\n \"known\": models,\n \"unkwon\": unknown_models\n }\n return models_info",
"def get_objective_search_results_record(self, objective_search_record_type):\n raise errors.Unimplemented()",
"def get_eqopp_threshold_cache(estimator_index, estimator_name, clf_i,\n valid_data, results, inds):\n print('caching results for {} estimator'.format(estimator_name))\n thr_cache = {}\n inds_0 = valid_data[:, 0] < 0.5\n inds_1 = valid_data[:, 0] > 0.5\n for t1 in THRESHOLDS:\n eqopp_value_1 = get_eqopp_value(\n valid_data, inds_1, t1, '1', estimator_index, clf_i, results)\n for t0 in THRESHOLDS:\n eqopp_value_0 = get_eqopp_value(\n valid_data, inds_0, t0, '0', estimator_index, clf_i,\n results)\n eqopp_distance = abs(eqopp_value_0 - eqopp_value_1)\n utility = get_utility(t0, t1, estimator_index,\n clf_i, results, inds)\n thr_cache[(t0, t1)] = (utility, eqopp_distance)\n return thr_cache",
"def meta_model(row):\n meta_condition = row['ul_pred_tamper'] == 1 and row['sl_pred_tamper'] == 1\n if meta_condition:\n return row['ul_pred_tamper']\n return row['sl_pred_tamper']",
"def get_best_model(\n self, timeout=60 * 15, return_model=True, return_score=True, return_params=True\n ):\n assert any(\n (return_model, return_score, return_params)\n ), \"Must return at least one thing.\"\n failed_before = False\n interval = 60\n start = time.time()\n while (time.time() - start) < timeout:\n status = self.job_status()\n\n # Get all score files.\n score_files = glob(\n os.path.join(self.dir, \"**\", \"scores_*.pickle\"), recursive=True\n )\n if status is None:\n # If no current matching job is found.\n\n if score_files:\n logger.info(\n \"No matching job was found. Scores were found however, so \"\n \"assuming job has finished.\"\n )\n break\n else:\n if failed_before:\n logger.error(\"No job and no scores were found.\")\n raise ValueError(\"No job and no scores were found.\")\n\n failed_before = True\n logger.warning(\n \"No matching job was found, and no scores were found. Trying \"\n f\"again in {interval} seconds.\"\n )\n else:\n logger.info(\n f\"Waiting for job to finish. Current status: {status}. \"\n f\"Completed: {len(score_files)}/{self.n_sets}.\"\n )\n\n time.sleep(interval)\n\n if not status is None:\n logger.warning(\"No results found within the timeout.\")\n return None\n\n # Get all the scores and compute their means to find the score with the\n # highest mean.\n mean_scores = []\n for score_file in score_files:\n with open(score_file, \"rb\") as f:\n mean_scores.append(np.mean(pickle.load(f)))\n\n index = int(\n re.search(\n \"scores_(\\d*)\\.pickle\", score_files[np.argmax(mean_scores)]\n ).group(1)\n )\n\n output = {}\n if return_model:\n with open(\n os.path.join(self.job_results_dir, f\"model_{index}.pickle\"), \"rb\"\n ) as f:\n output[\"model\"] = pickle.load(f)\n if return_score:\n output[\"score\"] = np.max(mean_scores)\n if return_params:\n output[\"params\"] = dict(\n zip(self.param_grid, list(product(*self.param_grid.values()))[index])\n )\n\n if len(output) == 1:\n return list(output.values())[0]\n else:\n return output",
"def model_query(model: db.Model) -> List[dict]:\n result = []\n fields = ['spin_mode', 'basis_set', 'method', 'method_family', 'program', 'version', 'solvation', 'solvent',\n 'embedding', 'periodic_boundaries', 'external_field', 'temperature', 'electronic_temperature']\n for field in fields:\n value = getattr(model, field)\n if value.lower() != \"any\":\n result.append({f\"model.{field}\": value})\n return result",
"def select_model(to_search, captcha_format):\n if captcha_format != 4 and to_search in SPECIFIC_MODEL:\n return SPECIFIC_MODEL[to_search]\n return DEFAULT_MODEL"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if task is anomaly. | def is_anomaly_task(task: str) -> bool:
return "anomaly" in task | [
"def byass_time_point_status(self):\n return False",
"def is_time_critical(task):\n return task.task.start_after == task.task.end_before",
"def successful(self):\n return not np.isnan(self.time_points.interaction)",
"def high_storm_peaks(self):\n\n if (self.postprocessor.sim_storm_peaks > \n self.postprocessor.obs_storm_peaks): \n return True\n\n return False",
"def is_event(self):\n current_time = self.current_time()\n current_event_time = self.events[self.current_event]\n cet = current_event_time\n current_event_time = time(cet.hour, cet.minute, cet.second)\n self.logger.debug(\"current_event_time: {0}\".format(current_event_time))\n fudge_factor = (datetime.combine(date(1,1,1),\n current_event_time) + timedelta(seconds=60)).time()\n self.logger.debug(\"fudge_factor: {0}\".format(fudge_factor))\n status = current_event_time <= current_time <= fudge_factor\n return status",
"def get_anomaly_detection():",
"def recent_failure(self):\n return timezone.now() < self.timestamp + timedelta(minutes=BB_BLOCK_INTERVAL)",
"def is_time_to_act(self):\n if self.act_hack:\n return False\n\n if self.count_to_actuate <= 0:\n return True\n else:\n return False",
"def low_storm_peaks(self):\n\n if (self.postprocessor.sim_storm_peaks < \n self.postprocessor.obs_storm_peaks): \n return True\n\n return False",
"def check_append_wires(self, task: Union[MitTask, TaskGraph]) -> bool:\n sig = inspect.signature(task.run)\n params = list(sig.parameters.values())\n return (\n (task.n_in_wires == self.n_out_wires)\n and (task.n_out_wires == 1)\n and (len(params) == 1)\n and (params[0].annotation == List[QubitPauliOperator])\n and (sig.return_annotation == Tuple[List[QubitPauliOperator]])\n )",
"def in_the_future(self):\n return int(self.time) > time.time() + CHAIN.TIME_TOLERANCE",
"def check_times(self):\r\n if self.in_time and self.out_time and not (self.in_time == self.out_time):\r\n return False\r\n return True",
"def test_check_ontime(self, grading_system):\r\n flag = False\r\n try:\r\n flag = self.check_ontime(grading_system)\r\n except Exception as e:\r\n raise e\r\n finally:\r\n atexit.register(self.report,\r\n **{\"function\": inspect.stack()[0][3], \"result\": True if flag is True else False,\r\n \"done\": False})\r\n assert flag",
"def will_have_storm(self):\n return weather.any_status_is(self.forecast.weathers, \"storm\", self._wc_registry)",
"def timed_out(self):\n return STATE_TASK_RESULT.TIMED_OUT == self._task_result",
"def has_time(self):\n return self.has_time_coord and self.has_time_dim",
"def does_fall_asleep(self):\n return self.times_slept != []",
"def _waitForLiveEpochs(self):\n return not not (self.ourEpoch or self.masterEpoch)",
"def can_complete_event(self):\n s = self.eventshift_set\n now = timezone.now()\n s_future = s.filter(end_time__gte=now)\n if self.completed:\n return False\n if s_future:\n return False\n else:\n return True",
"def bpm_beat_now(self):\n try:\n return bool(self._tempo(self.audio_sample(raw=True))[0])\n except ValueError as e:\n _LOGGER.warning(e)\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize weights to small random numbers | def _initialize_weights(self, size: int) -> 'None':
self.w_ = self.random_generator.normal(loc=0.0, scale=0.01,
size=1 + size)
self.w_initialized = True | [
"def default_weight_initializer(self):\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y, x)/np.sqrt(x)\n for x, y in zip(self.sizes[:-1], self.sizes[1:])]",
"def random_weight():\n # We found that random.randrange(-1,2) to work well emperically \n # even though it produces randomly 3 integer values -1, 0, and 1.\n # return random.randrange(-1, 2)\n\n # Uncomment the following if you want to try a uniform distribuiton \n # of random numbers compare and see what the difference is.\n return random.uniform(-1, 1)\n\n # When training larger networks, initialization with small, random\n # values centered around 0 is also common, like the line below:\n # return np.random.normal(0,0.1)",
"def _initialize_weights(self):\n self.weights = np.random.randn(self.number_of_classes,self.input_dimensions+1)",
"def initialize_weights(self,seed=None):\r\n if seed != None:\r\n np.random.seed(seed)\r\n self.weights = np.random.randn(self.weights.shape[0],self.weights.shape[1])",
"def get_random_weights():\r\n return random.uniform(0,100)",
"def init_weights(self):\r\n default_init_weights(self, 1)",
"def initialize_weights(self,seed=None):\r\n if seed != None: # using seed to initialize the weights if the seed is given\r\n np.random.seed(seed)\r\n\r\n self.weights=[] \r\n self.weights=np.random.randn(self.number_of_nodes,self.input_dimensions+1) #initialize the weights using random number\r\n return None",
"def reinit_weights(self):\n self.w = 0.01 * np.random.randn(self.prev_layer.get_shape()[0], self.nodes)",
"def weight_new(weights,stepsize,unused):\n\tnew_weights = np.zeros(len(weights))\n\tfor kk in range(0,len(weights)):\n\t\tif not kk in unused:\n\t\t\tnew_weights[kk] = weights[kk]+stepsize*np.random.normal()\n\t\tif new_weights[kk]<0:\n\t\t\tnew_weights[kk] = 0\n\tnew_weights/=np.sum(new_weights)\n\treturn new_weights",
"def init_weights(self):\r\n self.weights = [0 for i in range(len(self.inputs[0][0]))]",
"def reset_weights(self):\n # TODO: Maybe use xavier initialization instead.\n self.delete_torch_layers()\n weights = np.random.randn(len(self.connections)) * self.weight_init_std\n self.weights = weights.tolist()",
"def init_weights(self):\r\n if self.init_seed:\r\n np.random.seed(self.init_seed)\r\n\r\n weights_list = []\r\n biases_list = []\r\n\r\n for layer in range(self.nb_layers):\r\n new_W = np.random.randn(self.K_list[layer], self.K_list[layer + 1])\r\n new_b = np.zeros(self.K_list[layer + 1])\r\n weights_list.append(new_W)\r\n biases_list.append(new_b)\r\n\r\n self.weights_list = weights_list\r\n self.biases_list = biases_list",
"def randInitializeWeights(layers):\n # numbers in each layer\n nel = (layers[:-1]+1)*layers[1:]\n nel = nel.astype('int')\n \n # the init apmlitudes for each layer\n epsilon_init = np.repeat(efun(layers[:-1], layers[1:]),nel)\n \n # the init weights for each neuron\n w = (2*np.random.uniform(size = sum(nel))-1)*epsilon_init\n \n return w",
"def mutate_weight(self):\n self.weight += np.random.uniform(low = -2.0, high = 2.0)\n return",
"def random_weights_init(self, data):\n self._check_input_len(data)\n if 'numpy' in str(type(data)):\n data = torch.from_numpy(data).float().to(device)\n for i in range(self._xdim * self._ydim):\n rand_i = torch.randint(len(data), size=(1,))[0]\n self._weights[i] = data[rand_i]",
"def sample_task_weights(self):\r\n for i in range(len(self.graph)):\r\n for j in range(len(self.graph)):\r\n a = random.randint(0,1)\r\n if a==0:\r\n self.weights[i][j] = 1\r\n else:\r\n self.weights[i][j] = -1",
"def em_init(self, random_state: np.random.RandomState):\n weights = random_state.dirichlet(np.ones(len(self.children)))\n self.weights = weights.astype(np.float32)",
"def randInitializeWeights(L_in, L_out):\n\n # Initialize W randomly so that we break the symmetry while training the neural network.\n # Sample W from Uniform [-b,b] where b = epsilon_init = np.sqrt(6.0/(L_in+L_out))\n #Note: The first row of W corresponds to the parameters for the bias units\n epsilon_init = np.sqrt(6.0/(L_in+L_out))\n #w = np.random.uniform(-epsilon_init, epsilon_init,(L_out, 1 + L_in))\n w = np.random.uniform(-epsilon_init, epsilon_init,(L_out, L_in))\n bias = np.zeros((L_out,1))\n w = np.hstack((w,bias))\n return w",
"def randInitializeWeights(L_in, L_out):\n epsilon_init = 0.12\n W = np.random.rand(L_out, 1+L_in)*2*epsilon_init-epsilon_init\n return W"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Numerically stable version on log(cosh(x)). Used to avoid `inf` for even moderately large differences. | def _log_cosh(cls, x: Tensor) -> Tensor: # pylint: disable=invalid-name
return x + softplus(-2.0 * x) - np.log(2.0) | [
"def centered_half_cauchy_logp(x, S):\n x = np.atleast_1d(x)\n if sum(x < 0):\n return -np.inf\n return pm.flib.cauchy(x, 0, S) + len(x) * np.log(2)",
"def squasher(x):\n x = np.asarray(x)\n ax = np.abs(x)\n y = 1 + np.log(ax)\n y[ax < 1] = ax[ax < 1]\n return np.sign(x) * y",
"def log_with_zeros(x):\n x = torch.max(x, torch.tensor(1e-10))\n return torch.log(x)",
"def ceillog(n): ## ceil( log_2 ( n )) [Used by LZ.py]\n assert n >= 1\n c = 0\n while 2 ** c < n:\n c += 1\n return c",
"def lgamma(x):\n cof = [ 76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5 ]\n y = x\n tmp = x + 5.5\n tmp -= ((x + 0.5) * math.log(tmp))\n ser = 1.000000000190015\n for j in range(len(cof)):\n y += 1\n ser += (cof[j] / y)\n return (-tmp + math.log(2.5066282746310005 * ser / x))",
"def log_squasher(self, x):\n if self.config.log_squasher:\n x_abs = np.absolute(x).astype(float)\n x = np.multiply(np.sign(x), np.log1p(x_abs))\n return x",
"def log2(x):\n pass",
"def _severe_log(self, n):\n try:\n return math.log(n, math.e ** 8)\n except ValueError:\n # `n` might be too small\n\n return 0",
"def logsinh(x):\n if numpy.any(x < 0):\n raise ValueError(\"logsinh only valid for positive arguments\")\n return x + numpy.log(1-numpy.exp(-2*x)) - numpy.log(2)",
"def log_up(x: float) -> float:\n return next(math.log(x), LIBM_ERROR_LIMIT)",
"def _log_prior_cauchy(self,f):\n #return -np.sum(np.log((f - self.location)**2 - self.spread**2))\n return -np.sum(np.log(1 + ((f - self.location)/self.spread)**2))",
"def log_down(x: float) -> float:\n return prev(math.log(x), LIBM_ERROR_LIMIT)",
"def make_cdf_monotonic(cdf):\n # laparra's version\n corrected_cdf = cdf.copy()\n for i in range(1, len(corrected_cdf)):\n if corrected_cdf[i] <= corrected_cdf[i-1]:\n if abs(corrected_cdf[i-1]) > 1e-14:\n corrected_cdf[i] = corrected_cdf[i-1] + 1e-14\n elif corrected_cdf[i-1] == 0:\n corrected_cdf[i] = 1e-80\n else:\n corrected_cdf[i] = (corrected_cdf[i-1] +\n 10**(np.log10(abs(corrected_cdf[i-1]))))\n return corrected_cdf\n\n # my version\n # I think actually i need to make sure i is strictly increasing....\n # return np.maximum.accumulate(cdf)",
"def test_log_cumsum(self):\n x = np.random.random(100) * 10.0 + 0.01\n xc = x.cumsum()\n lxc = np.exp(log_cumsum(np.log(x)))\n for a, b in zip(xc, lxc):\n self.assertAlmostEqual(a, b)",
"def mangoldt(n):\n if(n<1 or n!=int(n)):\n raise ValueError(\n \"n must be positive integer\"\n )\n d = 2\n while (d<=n):\n if(n%d == 0):\n if (math.log(n,d)-int(math.log(n,d))==0):\n return math.log(d)\n else:\n return 0\n d += 1\n return 0",
"def cosh(x):\n\ttry:\n\t\tval = np.cosh(x.val)\n\t\tders = defaultdict(float)\n\t\tsec_ders = defaultdict(float)\n\t\tfor key in x.der:\n\t\t\tders[key] += np.sinh(x.val) * (x.der[key])\n\t\t\tsec_ders[key] += x.sec_der[key]*np.sinh(x.val) + x.der[key]**2*np.cosh(x.val)\n\t\treturn Variable(val, ders, sec_ders)\n\texcept AttributeError:\n\t\treturn np.cosh(x)",
"def entropy(s):\n b = bytearray.fromhex(s)\n freqs = [c / len(b) for c in Counter(b).values()]\n return -sum(f * math.log2(f) for f in freqs)",
"def log_cdf_laplace(x, name=\"log_cdf_laplace\"):\n\n with ops.name_scope(name, values=[x]):\n x = ops.convert_to_tensor(x, name=\"x\")\n\n # For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.\n lower_solution = -np.log(2.) + x\n\n # safe_exp_neg_x = exp{-x} for x > 0, but is\n # bounded above by 1, which avoids\n # log[1 - 1] = -inf for x = log(1/2), AND\n # exp{-x} --> inf, for x << -1\n safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))\n\n # log1p(z) = log(1 + z) approx z for |z| << 1. This approximation is used\n # internally by log1p, rather than being done explicitly here.\n upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)\n\n return array_ops.where_v2(x < 0., lower_solution, upper_solution)",
"def log(x, base=e):\n return 1.0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate $log C_{m}(k)$ term in von MisesFisher loss. Since `log_cmk_exact` is diverges for `kappa` >~ 700 (using float64 precision), and since `log_cmk_approx` is unaccurate for small `kappa`, this method automatically switches between the two at `kappa_switch`, ensuring continuity at this point. | def log_cmk(
cls, m: int, kappa: Tensor, kappa_switch: float = 100.0
) -> Tensor: # pylint: disable=invalid-name
kappa_switch = torch.tensor([kappa_switch]).to(kappa.device)
mask_exact = kappa < kappa_switch
# Ensure continuity at `kappa_switch`
offset = cls.log_cmk_approx(m, kappa_switch) - cls.log_cmk_exact(
m, kappa_switch
)
ret = cls.log_cmk_approx(m, kappa) - offset
ret[mask_exact] = cls.log_cmk_exact(m, kappa[mask_exact])
return ret | [
"def logpow(x, m):\n return torch.where(\n torch.eq(x, torch.tensor(0)),\n torch.where(torch.eq(m, torch.tensor(0)), torch.tensor(0.0), torch.tensor(-np.inf)),\n m * torch.log(x),\n )",
"def posdef_logdet(m: np.ndarray) -> float:\n L = np.linalg.cholesky(m)\n return 2 * np.sum(np.log(np.diag(L)))",
"def grad_log_likelihood(kc, cb, eval_request, eval_result, model_params):\n if eval_request.type != KN_RC_EVALGA:\n print(\"*** grad_log_likelihood incorrectly called with eval type %d\" %\n eval_request.type)\n return -1\n params = eval_request.x\n\n np.savetxt(\"current_pars_k.txt\", params)\n\n mus_and_maybe_grad = model_params.mus_and_maybe_grad\n bases_surplus = model_params.bases_surplus\n observed_matching = model_params.observed_matching\n\n ncat_men, ncat_women = bases_surplus.shape[:-1]\n n_prod_categories = ncat_men * ncat_women\n\n mus, _, dmus = mus_and_maybe_grad(params, model_params, gr=True)\n\n grad_loglik = grad_loglik_all_mus(observed_matching, mus)\n\n gradN = grad_loglik[-1]\n gradxy = grad_loglik[:n_prod_categories].reshape(\n (ncat_men, ncat_women)) + gradN\n gradx0 = grad_loglik[n_prod_categories:(\n n_prod_categories + ncat_men)] + gradN\n grad0y = grad_loglik[(n_prod_categories + ncat_men):-1] + gradN\n\n der_muxy = np.einsum('ij,ijk->k', gradxy, dmus.muxy)\n der_mux0 = np.einsum('i,ik->k', gradx0, dmus.mux0)\n der_mu0y = np.einsum('i,ik->k', grad0y, dmus.mu0y)\n\n eval_result.objGrad = -(der_muxy + der_mux0 + der_mu0y)\n\n return 0",
"def get_hill_estimator_one_value(ordered_data, k):\r\n selected_logs = np.log(ordered_data[:k+1])\r\n return 1./k * sum(selected_logs[:-1]) - selected_logs[-1]",
"def get_kl(self, q_mu, q_logsigma, p_mu=None, p_logsigma=None):\n if p_mu is not None and p_logsigma is not None:\n sigma_q_sq = torch.exp(q_logsigma)\n sigma_p_sq = torch.exp(p_logsigma)\n kl = ( sigma_q_sq + (q_mu - p_mu)**2 ) / ( sigma_p_sq + 1e-6 )\n kl = kl - 1 + p_logsigma - q_logsigma\n kl = 0.5 * torch.sum(kl, dim=-1)\n else:\n kl = -0.5 * torch.sum(1 + q_logsigma - q_mu.pow(2) - q_logsigma.exp(), dim=-1)\n # calculate 1 + logsigma_theta - mu_theta.pow(2) - logsigma_theta.exp(), it's D X K\n # calculate sum and dim = -1, it's D\n return kl",
"def getLogFactorial(k):\n return np.sum([log(i) for i in range(1, k+1)])",
"def kl_divergence(mu, log_sigma, device=\"cpu\"):\n return torch.mean(\n -.5 * torch.sum(1. + log_sigma - mu**2 - torch.exp(log_sigma), dim=-1))",
"def f(k):\n return k // a + k // b + k // c - k // LCM(a, b) - k // LCM(b, c) - k // LCM(a, c) + k // LCM(a, b, c=c)",
"def log_compression(chromagram,gamma = 1):\n\n if type(chromagram) != np.ndarray:\n raise TypeError(\"Chromagram must be 2D numpy ndarray.\")\n\n if chromagram.shape[0] != 12:\n raise ValueError(\"Invalid shape of chromagram.\")\n\n if not isinstance(gamma,int) and not isinstance(gamma,float):\n raise TypeError(\"Gamma must be integer or float.\")\n\n smooth = np.log(1+gamma*chromagram)\n\n return smooth/np.linalg.norm(smooth, ord=2, axis=0, keepdims=True)",
"def E_K(E_inv_cm):\n E_hz = E_inv_cm*c # (1/cm)*(cm/s)\n E_ergs = h*E_hz # ergs\n return E_ergs/k # K",
"def get_hill_estimator(ordered_data):\r\n logs = np.log(ordered_data)\r\n logs_cumsum = np.cumsum(logs[:-1])\r\n k_vector = np.arange(1, len(ordered_data))\r\n m1 = (1./k_vector)*logs_cumsum - logs[1:]\r\n return m1",
"def expectation_maximization(self, corpus, convergence):\n # Do not modify this function\n old_ll = -10**210 # approximation to negative infinity\n log_likelihood = -10**209 # higher than old_ll\n\n while log_likelihood-old_ll > convergence:\n old_ll = log_likelihood\n log_likelihood, emitcounts, transcounts = self.expectation(corpus) # E Step\n self.maximization(emitcounts, transcounts) # M Step\n print 'LOG LIKELIHOOD:', log_likelihood,\n print 'DIFFERENCE:', log_likelihood-old_ll\n print 'CONVERGED'\n\n return log_likelihood",
"def cdf(self, k):\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n # print(self.pmf(k))\n e = 2.7182818285\n const = (e ** (-1 * self.lambtha))\n return self.pmf(k) + self.cdf(k - 1)",
"def kl_divergence_loss(mu, logvar):\n # Increase precision (numerical underflow caused negative KLD).\n return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())",
"def compute_kn(self, Knm, maxiter_cg=10, tol=1e-8, Kmm=None):\n # compute the Kmm^{-1} Kmn terms for each batch\n cov_params = self.get_kernel_params()\n if self.whitened_type == 'cholesky':\n if Kmm is None:\n Kmm = self.kernel(self.xinduce, self.xinduce, cov_params)\n I = torch.eye(Kmm.shape[0], dtype=Knm.dtype, device=Knm.device)\n cKmm = torch.cholesky(Kmm + I * self.jitter_val, upper=False)\n kn = torch.triangular_solve(Knm.t(), cKmm, upper=False)[0].t()\n else:\n kfun = lambda x, y: self.kernel(x, y, params=cov_params)\n # return bsz x M matrix\n # first solve d = Kmm^{-1} K_mn using PCG, then solve kn = R^T d\n if Kmm is None:\n Kmm = ToeplitzTensor(xgrids=self.xgrids, kernel=kfun, batch_shape=None, jitter_val=self.jitter_val)\n d0 = Kmm.inv_matmul(Knm, do_precond=True, maxiter=maxiter_cg, tol=tol) # (bsz, M)\n kn = Kmm._matmul_by_RT(d0) # (bsz, M')\n return kn",
"def cohen_kappa(self):\n pm = self.prediction_matrix.float()\n N = self.recorded.sum().float()\n\n p_observed = pm.diag().sum() / N\n p_expected = torch.dot(pm.sum(dim=0), pm.sum(dim=1)) / (N * N)\n\n if p_expected == 1:\n return 1\n else:\n return 1 - (1 - p_observed) / (1 - p_expected)",
"def logm(cls, mat):\n n = mat.shape[-1]\n dim_3_mat = gs.reshape(mat, [-1, n, n])\n logm = HermitianMatrices.apply_func_to_eigvals(\n dim_3_mat, gs.log, check_positive=True\n )\n logm = gs.reshape(logm, mat.shape)\n return logm",
"def log_b_m_x(m, x, myTheta):\n print(\"TODO\")",
"def smooth_kl_divergence(p, q):\n p_sm = smooth_softmax(p)\n q_sm = smooth_softmax(q)\n # This term is: cross_entropy(p, q) - entropy(p)\n kl_sm = T.sum(((T.log(p_sm) - T.log(q_sm)) * p_sm), axis=1, keepdims=True)\n return kl_sm"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate 3D Euclidean distance between predicted and target. | def _forward(self, prediction: Tensor, target: Tensor) -> Tensor:
return torch.sqrt(
(prediction[:, 0] - target[:, 0]) ** 2
+ (prediction[:, 1] - target[:, 1]) ** 2
+ (prediction[:, 2] - target[:, 2]) ** 2
) | [
"def get_distance(self, points_3d):\n return np.linalg.norm(points_3d - self.location, axis=-1)",
"def test_y3(self):\n self.assertEqual(sd.e_distance((0, 0), (3, 0)), 3)",
"def nose_to_target_dist(self):\n return np.linalg.norm(self.nose_to_target())",
"def test_x3(self):\n self.assertEqual(sd.e_distance((0, 0), (3, 0)), 3)",
"def get_distance_to_object(self, target):\n distance = (target.position.x - self.position.x) ** 2\n distance += (target.position.y - self.position.y) ** 2\n return math.sqrt(distance)",
"def euclidean_distance(vects):\n x, y = vects\n return K.sqrt(\n K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))",
"def get_point_distance(points, target):\n if len(points.shape) == 1:\n return la.norm(points - target)\n return la.norm(points - target, axis=1)",
"def h3_distance(h1, h2):\n h1 = _in_scalar(h1)\n h2 = _in_scalar(h2)\n\n d = _cy.distance(h1, h2)\n\n return d",
"def distance_to_3d(pos1, pos2) -> float:\n delta_x, delta_y, delta_z = abs(pos1[0] - pos2[0]), abs(pos1[1] - pos2[1]), abs(pos1[2] - pos2[2])\n delta_xz = pytagoras(delta_x, delta_z)\n return pytagoras(delta_xz, delta_y)",
"def compute_distances(model, prototypes, batch):\n inputs, targets = batch\n\n outputs = model(inputs)\n\n # Calculate euclidean distance in a vectorized way\n diffs = outputs.unsqueeze(1) - prototypes.unsqueeze(0)\n distances = torch.sum(diffs*diffs, -1) * -1 # get negative distances\n\n return distances",
"def get_distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)# + (a.z - b.z) ** 2)",
"def euclidean_distance(observations: np.ndarray, reference: np.ndarray) -> np.ndarray:\n return L2_magnitude(observations - reference, axis=observations.ndim - 1)",
"def uvdist(self):\n return np.sqrt(np.sum(self._uvw**2, axis=-1))",
"def distanceToTarget(self,target):\n if int(self.sat) > 4:\n # convert decimal degrees to radians \n lat1,lon1,lat2,lon2 = map(radians, [self.lat, self.lon, target[0],target[1]])\n\n # haversine formula \n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r\n else:\n return None",
"def z_normalized_distance(observations: np.ndarray, reference: np.ndarray) -> np.ndarray:\n return euclidean_distance(z_normalize(observations), z_normalize(reference))",
"def det3(self, *args) -> \"double\":\n return _coin.SbDPMatrix_det3(self, *args)",
"def three_nn_gpu(query_pts, data_pts):\n if not open3d.core.cuda.device_count() > 0:\n raise NotImplementedError\n\n dist2, idx = ml_ops.three_nn(query_pts, data_pts)\n return tf.sqrt(dist2), idx",
"def compute(self, predict, target, **kwargs):\n dims = tuple(range(predict.ndimension())[1:])\n intersect = (predict * target).sum(dims)\n union = (predict + target - predict * target).sum(dims)\n result = 1.0 - intersect / eps_denom(union)\n return result",
"def distance_ellipsoid(x, y, z, v, center, v1, v2, v3, dmax, label, normal):\n shape = (x.size, y.size, z.size) # shape of the outputs\n # build the equation of the ellipsoid\n # then write the second order equation in d\n # a d**2 + b d + c = 0\n # delta = b**2-4ac\n X = x - center[0]\n Y = y - center[1]\n Z = z - center[2]\n v12 = np.cross(v1, v2)\n v23 = np.cross(v2, v3)\n v31 = np.cross(v3, v1)\n d = np.inner(v1, v23) ** 2\n # equation of the ellipsoid\n # cxx XX + cyy YY + czz ZZ + cxy XY + cyz YZ + czx ZX = d\n cxx = v12[0] ** 2 + v23[0] ** 2 + v31[0] ** 2\n cyy = v12[1] ** 2 + v23[1] ** 2 + v31[1] ** 2\n czz = v12[2] ** 2 + v23[2] ** 2 + v31[2] ** 2\n cxy = 2 * (v12[0] * v12[1] + v23[0] * v23[1] + v31[0] * v31[1])\n cyz = 2 * (v12[1] * v12[2] + v23[1] * v23[2] + v31[1] * v31[2])\n czx = 2 * (v12[2] * v12[0] + v23[2] * v23[0] + v31[2] * v31[0])\n a = (\n cxx * v[0] ** 2\n + cyy * v[1] ** 2\n + czz * v[2] ** 2\n + cxy * v[0] * v[1]\n + cyz * v[1] * v[2]\n + czx * v[2] * v[0]\n )\n b = (\n (2 * cxx * v[0] + cxy * v[1] + czx * v[2]) * X\n + (2 * cyy * v[1] + cyz * v[2] + cxy * v[0]) * Y\n + (2 * czz * v[2] + czx * v[0] + cyz * v[1]) * Z\n )\n c = (\n cxx * X**2\n + cyy * Y**2\n + czz * Z**2\n + cxy * X * Y\n + cyz * Y * Z\n + czx * Z * X\n - d\n )\n delta = b**2 - 4 * a * c\n ind = delta >= 0 # wird but it works\n delta[ind] = np.sqrt(delta[ind])\n\n d1 = 1e16 * np.ones(shape)\n d2 = 1e16 * np.ones(shape)\n d1[ind] = (-b[ind] - delta[ind]) / (2 * a)\n d2[ind] = (-b[ind] + delta[ind]) / (2 * a)\n d1[d1 < 0] = 1e16\n d2[d2 < 0] = 1e16\n d = -np.ones(shape)\n d[ind] = np.minimum(d1[ind], d2[ind])\n d[d == 1e16] = -1\n\n alpha = -np.ones(shape)\n border = -np.ones(shape)\n if dmax is None:\n ind = d > 0\n else:\n ind = np.logical_and(d > 0, d <= dmax)\n alpha[ind] = d[ind]\n border[ind] = label[0]\n\n if normal: # compute the normal vector\n # initialization\n normal_vect = np.zeros(tuple(list(shape) + [3]))\n normal_x = normal_vect[:, :, :, 0]\n normal_y = normal_vect[:, :, :, 1]\n normal_z = normal_vect[:, :, :, 2]\n norm_normal = np.ones(shape)\n # coordinates of the point on the ellipsoid\n Xe = X + alpha * v[0]\n Ye = Y + alpha * v[1]\n Ze = Z + alpha * v[2]\n # compute the direction\n normal_x[ind] = 2 * cxx * Xe[ind] + cxy * Ye[ind] + czx * Ze[ind]\n normal_y[ind] = 2 * cyy * Ye[ind] + cxy * Xe[ind] + cyz * Ze[ind]\n normal_z[ind] = 2 * czz * Ze[ind] + czx * Xe[ind] + cyz * Ye[ind]\n # compute the sense (opposite to v)\n sense = np.asarray(\n normal_x * v[0] + normal_y * v[1] + normal_z * v[2] > 0\n ).nonzero()\n normal_x[sense] *= -1\n normal_y[sense] *= -1\n normal_z[sense] *= -1\n # normalization\n norm_normal[ind] = np.sqrt(\n normal_x[ind] ** 2 + normal_y[ind] ** 2 + normal_z[ind] ** 2\n )\n # copy in the output vector\n normal_x /= norm_normal\n normal_y /= norm_normal\n normal_z /= norm_normal\n else:\n normal_vect = None\n return alpha, border, normal_vect"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
used to generate a question for a given category. this function returns the PK of a random question object that fits the question category. | def get_possible_questions_id(q_category):
q_ids = Question.objects.filter(category=q_category).values_list("pk", flat=True)
q_ids = list(q_ids)
if q_ids:
maxi_id = max(q_ids)
else:
return 1
ran = None
while not ran in q_ids:
ran = random.randint(1, maxi_id)
return ran | [
"def get_random_question():\n rand = random.randrange(0, db.session.query(Question).count())\n return db.session.query(Question)[rand]",
"def next_question(): \n return random.choice(models.Question.objects(valid=True))",
"def random_exemption_category():\n return EXEMPTION_CATEGORY[randint(0, len(EXEMPTION_CATEGORY) - 1)]",
"def generate_random_question(self) -> IntegerQuestion:\n # Be careful.\n assert self.lowerbound < self.upperbound, \"Lowerbound must be smaller than upperbound.\"\n \n # Set operands. \n x = random.randint(self.lowerbound, self.upperbound)\n y = random.randint(self.lowerbound, self.upperbound)\n\n # Do not ask questions where the answer could be undefined.\n if self.QuestionClass is DivisionQuestion:\n while y == 0:\n y = random.randint(self.lowerbound, self.upperbound)\n\n return self.QuestionClass(x, y)",
"def createRandom(self):\n \n random_key = ''\n with CQCConnection(self.name) as User:\n for i in range(24):\n q = qubit(User)\n q.H()\n random_key = random_key + str(q.measure())\n return random_key",
"def randomCategory(self):\n self.category = (random.choice([\"colors\",\"animals\",\"others\"]))\n return self.category",
"def pick_random_questions(num_questions):\n print(\"=====pick_random_questions fired...\")\n shuffle(QUESTIONS)\n questions = sample(list(QUESTIONS), k=num_questions)\n\n shuffle(questions)\n return questions",
"def get_question_id(self):\n return # osid.id.Id",
"def getRandom( self ):\n import random \n count = Mysql.ex( \"SELECT count(*) AS c FROM `%s`.`companies`;\" % self.db_name )\n if count == 0:\n return False\n the_id = random.randint( 1, count[0]['c'] )\n company = self.getByID( the_id )\n return company",
"def get_random_product(category_product_response):\n products = category_product_response.get(\"products\")\n random_product = choice(products)\n return random_product",
"def exam_id(self):\n return random.choice(self['special_exam_ids'])",
"def select_word(category_selection):\r\n\r\n random_word_string = random.choice(wc[list(wc.keys())[category_selection - 1]])\r\n return random_word_string",
"def category_id(category_name: str) -> int:\n return self.categories_df[self.categories_df['name'] == category_name]['id'].values[0]",
"def select_question(questions: list) -> Tuple[str, str]:\n if (len(questions) == 0):\n return None\n return random.choice(questions)",
"def sequential_id(self):\n return random.choice(self['sequential_ids'])",
"def get_random_product_id(db_connect):\n\n query = f\"\"\"\n SELECT ID\n FROM {db_connect.db}.wp_posts\n WHERE post_type='product'\n ORDER BY RAND() LIMIT 1;\n \"\"\"\n\n id = db_connect.select(query)\n return id[0][0]",
"def get_key(self):\n return random.choice(self._keys)",
"def random_pk(model):\n max_id = model.objects.all().aggregate(max_id=Max(\"id\"))['max_id']\n while max_id:\n pk = random.randint(1, max_id)\n try:\n return model.objects.values('id').get(pk=pk)['id']\n except model.DoesNotExist:\n pass",
"def pick_random_recipe() -> int:\n all_recipes = Meal.objects.all()\n recipes_counter = all_recipes.len()\n generared_recipe_num = random.randint(1, recipes_counter)\n return generared_recipe_num"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function takes a target data frame and replaces the tags with their cleanedup, spaceless versions. | def clean_tags_dataframe(df_targets):
# Make a copy of the dataframe so we don't overwrite the original.
df_targets_cleaned = copy.deepcopy(df_targets)
# Loop through all the cleaned versions of the tags and replace the
# original versions, which have extra whitespace pre-pended to them, with
# the cleaned versions.
for tag in TAGS_SET:
df_targets_cleaned.replace(
to_replace=" "+tag,
value=tag,
inplace=True,
)
# Replace "None" tags with an empty string.
df_targets_cleaned.replace(
to_replace=[None],
value=[""],
inplace=True,
)
return df_targets_cleaned | [
"def clean_description(df):\n df.description = df.description.apply(lambda x: re.sub('<[^<]+?>', '', x))\n return df",
"def remove_semantic_tags(self, semantic_tags):\n _check_semantic_tags(self._dataframe, semantic_tags)\n dt = self._update_cols_and_get_new_dt('remove_semantic_tags', semantic_tags)\n if dt.index is None and self.index is not None:\n dt._dataframe = dt._dataframe.reset_index(drop=True)\n return dt",
"def tidy_data(df):\n\n ##clean up column headings\n df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')",
"def rename_tags_in_df(df_targets):\n df_targets_renamed = copy.deepcopy(df_targets)\n \n # Rename SPIKES.\n df_targets_renamed.replace(\n to_replace=\"Acclima-Spike\",\n value=\"spike\",\n inplace=True,\n )\n # Rename NOISE.\n noise_tag_list = [\n \"Acclima-Noise\",\n \"Acclima-Diurnal Noise\", \n \"Acclima-FrozenRecovery\", \n \"Acclima-Erratic\",\n ]\n for noise_tag in noise_tag_list:\n df_targets_renamed.replace(\n to_replace=noise_tag,\n value=\"noise\",\n inplace=True,\n )\n return df_targets_renamed",
"def clean_data(df):\n cleaned_categories = create_categories_columns(df[\"categories\"])\n\n # replace old categories with the cleaned one (which itself is a whole dataframe), then remove duplicates\n df = df.drop(columns=[\"categories\"], axis=1)\n df = pd.concat([df, cleaned_categories], sort=False, axis=1)\n df = remove_duplicates(df)\n\n return df",
"def _normalize_sent_tags(sentence_df):\n normalized_tags, begin_tags = [], []\n for col in np.arange(len(sentence_df.columns)):\n word_vals = sentence_df.iloc[:, col]\n\n word_vals = word_vals[word_vals != \"O\"]\n if word_vals.shape[0] == 1:\n normalized_tags.append(word_vals.iloc[0])\n begin_tags.append(word_vals.iloc[0].startswith(\"B\"))\n continue\n verb_words = word_vals[word_vals.isin([\"I-V\", \"B-V\"])]\n if verb_words.shape[0] != 0: # a) - verbo\n normalized_tags.append(verb_words.iloc[0])\n begin_tags.append(False) # Event\n continue\n # b) - ARGM e ARG (o último e mais especifico tem prio)\n arg_words = word_vals[word_vals.str.contains(r\".*[ARG][0-9]|ARGM\")]\n if arg_words.shape[0] != 0:\n normalized_tags.append(arg_words.iloc[-1]) # desempate entre dois ARGM-X diferentes\n begin_tags.append(arg_words.iloc[-1].startswith(\"B\"))\n continue\n else:\n print(\"\\nNORMALIZATION ERROR - MULTIPLE TAG VALUES FOUND FOR WORD.\")\n print(word_vals.values)\n\n return normalized_tags, begin_tags",
"def replace_all_tags(soup, old_tag_name, new_tag_name, old_tag_class=None):\n\tif old_tag_class:\n\t\told_tags = soup.find_all(old_tag_name, class_=old_tag_class)\n\telse:\n\t\told_tags = soup.find_all(old_tag_name)\n\n\tfor old_tag in old_tags:\n\t\treplaceTagWithContents(old_tag, new_tag_name)",
"def clean_data(articles: pd.DataFrame) -> pd.DataFrame:\n articles['authors'] = articles['authors'].apply(fix_separators)\n articles['tags'] = articles['tags'].apply(fix_separators)\n return articles",
"def _replace_tags(self, data, src_data=None):\n\n if src_data is None:\n src_data = data\n\n if isinstance(data, str):\n tag = self._find_tag(data)\n if tag is not None:\n data = data.replace(tag, src_data[tag[1:-1]])\n return data\n\n if isinstance(data, list):\n new_list = list()\n for item in data:\n new_list.append(self._replace_tags(item, src_data))\n return new_list\n\n if isinstance(data, dict):\n counter = 0\n for key, value in data.items():\n tag = self._find_tag(value)\n if tag is not None:\n try:\n data[key] = value.replace(tag, src_data[tag[1:-1]])\n counter += 1\n except KeyError:\n raise PresentationError(\n f\"Not possible to replace the tag {tag}\"\n )\n if counter:\n self._replace_tags(data, src_data)\n return data\n\n raise PresentationError(u\"Replace tags: Not supported data type.\")",
"def clean(self, df):\n df = df.drop(self.__preprocessor.get_non_redundant_entity_attributes(), axis=1)\n df = df.drop(self.__preprocessor.get_redundant_entity_attributes(), axis=1)\n return df",
"def fill_tag(df):\n df['tag'] = np.where(df.tag.isna(), df.up_tag, df.tag)\n return df",
"def correct_up_tag(df):\n manual_tag_exists = df.manual_tag.values != 'no tag'\n df['up_tag'] = np.where(manual_tag_exists, df.manual_tag, df.auto_tag)\n return df",
"def update_tags(self):\n raise NotImplementedError",
"def test_tags_replace(self):\n ctx = sm.ServiceContext(INFILENAME)\n svc = filter(lambda s: \"collector\" in s.tags and \"daemon\" in s.tags, ctx.services)[0]\n svc.tags = [\"unlikely_tag_1\", \"unlikely_tag_2\"]\n ctx.commit(OUTFILENAME)\n ctx = sm.ServiceContext(OUTFILENAME)\n svcs = filter(lambda s: \"unlikely_tag_1\" in s.tags and \"unlikely_tag_2\" in s.tags, ctx.services)\n self.assertEqual(len(svcs), 1)",
"def _standardize_column_values(dataframe):\n\n # TODO Use None instead of \"-\"; but may affect downstream pipelines that use \"-\" already\n if \"structure.alternate_model\" in dataframe.columns:\n dataframe[\"structure.alternate_model\"].replace(\"\", \"-\", inplace=True)\n if \"ligand.expo_id\" in dataframe.columns:\n dataframe[\"ligand.expo_id\"].replace(0, \"-\", inplace=True)\n if \"ligand_allosteric.expo_id\" in dataframe.columns:\n dataframe[\"ligand_allosteric.expo_id\"].replace(0, \"-\", inplace=True)\n if \"structure.resolution\" in dataframe.columns:\n dataframe[\"structure.resolution\"].replace(0, np.nan, inplace=True)\n\n # In case of drugs\n if \"drug.brand_name\" in dataframe.columns:\n dataframe[\"drug.brand_name\"] = dataframe[\"drug.brand_name\"].apply(\n lambda x: x.split(\";\") if x != \"\" else []\n )\n if \"drug.synonyms\" in dataframe.columns:\n dataframe[\"drug.synonyms\"] = dataframe[\"drug.synonyms\"].apply(\n lambda x: x.split(\"\\t\") if x != \"\" else []\n )\n\n return dataframe",
"def clean_data(self, data: pd.DataFrame) -> pd.DataFrame:",
"def reset_semantic_tags(self, columns=None, retain_index_tags=False):\n columns = _convert_input_to_set(columns, \"columns\")\n cols_not_found = sorted(list(columns.difference(set(self._dataframe.columns))))\n if cols_not_found:\n raise LookupError(\"Input contains columns that are not present in \"\n f\"dataframe: '{', '.join(cols_not_found)}'\")\n if not columns:\n columns = self._dataframe.columns\n dt = self._update_cols_and_get_new_dt('reset_semantic_tags', columns, retain_index_tags)\n if dt.index is None and self.index is not None:\n dt._dataframe = dt._dataframe.reset_index(drop=True)\n return dt",
"def clean_data(df):\n # Resolve categories and expand them to actual columns.\n categories_df = _resolve_categories(df['categories'])\n df = df.drop(columns=['categories'])\n df = pd.concat([df, categories_df], axis=1)\n\n # drop duplicates\n df = _drop_duplicates(df)\n return df",
"def clean_df(df):\n df = df.loc[df.Model.isin([\"Base-P\"])==False]\n df = df.loc[df.Perturbation.isin([\"general_gaussian_noise\"])==False]\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_shot_noise\", \"Perturbation\"] = \"shot noise\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_impulse_noise\", \"Perturbation\"] = \"impulse noise\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_defocus_blur\", \"Perturbation\"] = \"defocus blur\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_glass_blur\", \"Perturbation\"] = \"glass blur\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_motion_blur\", \"Perturbation\"] = \"motion blur\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_zoom_blur\", \"Perturbation\"] = \"zoom blur\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_snow\", \"Perturbation\"] = \"snow\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_frost\", \"Perturbation\"] = \"frost\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_contrast\", \"Perturbation\"] = \"contrast\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_elastic_transform\", \"Perturbation\"] = \"elastic transform\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_pixelate\", \"Perturbation\"] = \"pixelate\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_gaussian_blur\", \"Perturbation\"] = \"gaussian blur\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_spatter\", \"Perturbation\"] = \"spatter\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_speckle_noise\", \"Perturbation\"] = \"speckle noise\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_fog\", \"Perturbation\"] = \"fog\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_brightness\", \"Perturbation\"] = \"brightness\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_jpeg_compression\", \"Perturbation\"] = \"jpeg compr\"\n df.loc[df[\"Perturbation\"]== \\\n \"imagenet2012_corrupted_saturate\", \"Perturbation\"] = \"saturate\"\n return df"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replaces 'AcclimaSpike' with 'spike' and noiserelated Acclima tags with 'noise'. Returns a dataframe with renamed tags. | def rename_tags_in_df(df_targets):
df_targets_renamed = copy.deepcopy(df_targets)
# Rename SPIKES.
df_targets_renamed.replace(
to_replace="Acclima-Spike",
value="spike",
inplace=True,
)
# Rename NOISE.
noise_tag_list = [
"Acclima-Noise",
"Acclima-Diurnal Noise",
"Acclima-FrozenRecovery",
"Acclima-Erratic",
]
for noise_tag in noise_tag_list:
df_targets_renamed.replace(
to_replace=noise_tag,
value="noise",
inplace=True,
)
return df_targets_renamed | [
"def simplify_tcga_names(data):\n out = data.copy()\n cols = out.columns.str.replace('-', '.')\n cols = cols.str.replace(r'\\.[0-9A-Z]{3}\\.[0-9]{2}$', '')\n out.columns = cols\n\n # following this renaming, we have duplicates in the columns\n # I've checked these and they appear to have been sequenced twice. Pick one arbitrarily\n dupes = out.columns[out.columns.duplicated()]\n if len(dupes) > 0:\n logger.warn(\n \"After relabelling, there are %d duplicate sample names. We'll keep the first instance in each case.\",\n len(dupes)\n )\n out = out.loc[:, ~out.columns.duplicated()]\n return out",
"def tag_junk(issue, replace_nan=False, replace_all=True):\n issue = copy.deepcopy(issue)\n tags = []\n if replace_nan:\n tags.append(np.nan)\n if replace_all:\n tags.extend([\"B\", \"AT\", \"N\", \"CT\", \"CN\", \"OT\", \"PH\", \"MH\", \"BQA\", \"BQN\", \"BQT\", \"NP\", \"SH\"])\n\n for tag in tags:\n issue.tags_df.function.replace(tag, \"JNK\", inplace=True)\n return issue",
"def feature_engineered(df):\n # pre populate with all 0 s\n # df['funny'] = 0\n # df['cool'] = 0\n # df['useful'] = 0\n\n # parse the votes to create features\n # for index, row in df.iterrows():\n # val_dict = row['votes']\n # for key in val_dict:\n # # for any number of votes, we consider it as boolean\n # if val_dict[key] > 0:\n # df.loc[index, key] = 1\n\n # stem, lemmatize, stop word removal of the texts\n df['text'] = df['text'].apply(lambda x: perform_nlp(x.lower()))\n\n # create a review length as an additional feature\n # df['review_length'] = df['text'].apply(lambda x: len(x))\n df['text_feature'] = df['text'].apply(lambda x: get_features(x, 'bow'))\n\n # re define the target variable as 1 or 0\n df['label'] = df['stars'].apply(lambda x: 0 if x <= 3 else 1)\n\n # drop these columns\n del df['votes']\n del df['text']\n del df['stars']\n\n return df",
"def apply_imputation_encoding(df, mapping_table, tracking_flags=True):\n\n import pandas as pd\n import numpy as np\n\n categorical = df.select_dtypes(include=['object','category'])\n\n for feature in mapping_table['feature']:\n\n flag_feature = feature + '_flag_missing'\n tmp_df = mapping_table.loc[mapping_table['feature'] == feature]\n\n if feature in categorical.columns:\n value = tmp_df['mode']\n else:\n value = tmp_df['median']\n\n if tracking_flags == True:\n if tmp_df['create_flag'].item() == 1:\n df[flag_feature] = np.where(df[feature].isnull(), 1, 0)\n\n df[feature] = df[feature] = np.where(df[feature].isnull(), value, df[feature])\n\n return df",
"def _simplify_features(df):\n simple = [col for col in df.columns if 'error' not in col]\n simple_df = df[simple]\n simple_df.columns = [col.replace(' ', '_') for col in simple]\n \n return simple_df",
"def default_preprocessing(df):\n def race(row):\n if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE\n return 'White'\n return 'Non-White'\n\n df['RACEV2X'] = df.apply(lambda row: race(row), axis=1)\n df = df.rename(columns = {'RACEV2X' : 'RACE'})\n\n df = df[df['PANEL'] == 21]\n\n # RENAME COLUMNS\n df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH',\n 'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT',\n 'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM',\n 'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE',\n 'POVCAT16' : 'POVCAT', 'INSCOV16' : 'INSCOV'})\n\n df = df[df['REGION'] >= 0] # remove values -1\n df = df[df['AGE'] >= 0] # remove values -1\n\n df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9\n\n df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9\n\n df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG',\n 'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',\n 'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',\n 'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',\n 'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1\n\n def utilization(row):\n return row['OBTOTV16'] + row['OPTOTV16'] + row['ERTOT16'] + row['IPNGTD16'] + row['HHTOTD16']\n\n df['TOTEXP16'] = df.apply(lambda row: utilization(row), axis=1)\n lessE = df['TOTEXP16'] < 10.0\n df.loc[lessE,'TOTEXP16'] = 0.0\n moreE = df['TOTEXP16'] >= 10.0\n df.loc[moreE,'TOTEXP16'] = 1.0\n\n df = df.rename(columns = {'TOTEXP16' : 'UTILIZATION'})\n return df",
"def concepts_tag_column():\n concs = pd.read_csv(os.path.join(out_dir, 'ddf--concepts.csv'))\n graph = pd.read_excel('../source/graph_settings.xlsx', sheetname='Indicators')\n mappin = pd.read_excel('../source/Gapminder world tag tree.xlsx', skip_footer=4)\n\n measures = concs[concs['concept_type'] == 'measure']\n measures = measures.set_index('concept').drop(['age', 'latitude', 'longitude'])\n\n graph = graph.set_index('ddf_id')\n m = graph.loc[measures.index, ['Menu level1', 'Menu level 2']].copy()\n mappin = mappin.set_index(['tag_name'])\n\n m2 = m.copy()\n\n for k, v in m.iterrows():\n\n if v['Menu level 2'] == 'Water' and v['Menu level1'] == 'Environment':\n m2.loc[k, 'tags'] = 'environment_water'\n continue\n\n if v['Menu level 2'] == 'Water' and v['Menu level1'] == 'Infrastructure':\n m2.loc[k, 'tags'] = 'infrastructure_water'\n continue\n\n if not pd.isnull(v['Menu level 2']):\n m2.loc[k, 'tags'] = mappin.loc[v['Menu level 2'], 'tag_id']\n elif not pd.isnull(v['Menu level1']):\n m2.loc[k, 'tags'] = mappin.loc[v['Menu level1'], 'tag_id']\n else:\n continue\n\n # manually set some tags.\n m2.loc['children_per_woman_total_fertility', 'tags'] = '_root, newborn_infants'\n m2.loc['co2_emissions_tonnes_per_person', 'tags'] = '_root, emissions'\n m2.loc['income_per_person_gdppercapita_ppp_inflation_adjusted', 'tags'] = '_root, incomes_growth'\n m2.loc['child_mortality_0_5_year_olds_dying_per_1000_born', 'tags'] = '_root, mortality'\n m2.loc['life_expectancy_years', 'tags'] = '_root, life_expectancy'\n\n concs = concs.set_index('concept')\n concs['tags'] = m2['tags']\n\n concs['tags'] = concs['tags'].fillna('_none')\n\n # remove concepts from dont panic poverty\n concs = concs.drop(['sg_population', 'sg_gini', 'sg_gdp_p_cap_const_ppp2011_dollar'])\n\n concs.to_csv(os.path.join(out_dir, 'ddf--concepts.csv'), encoding='utf8')",
"def rename_columns(df):\n df = df.rename(columns={'Sample Name':'Sample','Gene Name': 'Target', 'Condition Name': 'Treatment'})\n return df",
"def map_imputation_encoding(df, x):\n \n import pandas as pd\n import numpy as np\n\n im = pd.DataFrame()\n im['type'] = df.dtypes.astype(str)\n im['feature'] = im.index\n im['missing'] = df.isnull().sum()\n\n # Identify logical features and convert to int\n logical = im.loc[(im['type'] == 'bool')]['feature']\n df[logical] = df[logical].astype(int)\n im['type'] = df.dtypes.astype(str) # Update table\n\n numeric = im.loc[im['type'].str.contains('float')]['feature']\n numeric = numeric.append(im.loc[im['type'].str.contains('int')]['feature'])\n\n categorical = im.loc[(im['type'] == 'object')]['feature']\n\n im['mode'] = df[categorical].mode().iloc[0]\n im['median'] = df[numeric].median()\n im.reset_index(drop=True, inplace=True)\n im['create_flag'] = np.where(im['missing'] > 0, 1, 0)\n\n im = im[['feature','median','mode','create_flag']]\n im = im.loc[im['feature'].isin(x)]\n \n return im",
"def replace_all_tags(soup, old_tag_name, new_tag_name, old_tag_class=None):\n\tif old_tag_class:\n\t\told_tags = soup.find_all(old_tag_name, class_=old_tag_class)\n\telse:\n\t\told_tags = soup.find_all(old_tag_name)\n\n\tfor old_tag in old_tags:\n\t\treplaceTagWithContents(old_tag, new_tag_name)",
"def clean_iris(df):\n \n dropcols = ['species_id', 'measurement_id']\n df.drop(columns= dropcols, inplace=True)\n df.rename(columns={'species_name': 'species'}, inplace=True)\n dummy_sp = pd.get_dummies(df[['species']], drop_first=True)\n return pd.concat([df, dummy_sp], axis =1)",
"def tidy_dic():\n #defining path for data\n fname = os.path.join(data_path, \"gardner_time_to_catastrophe_dic_tidy.csv\")\n\n #read csv\n df = pd.read_csv(fname)\n\n # Since just True or False on a plot legend doesn't make much sense, we'll create a column, \n #```tubulin_labeled```, that converts the ```True``` and ```False``` values from the \n #```labeled``` column to ```'labeled tubulin'``` and ```'microtubules'```\n df['tubulin_labeled'] = [\n 'labeled tubulin' if df.labeled[i] else 'microtubules' \n for i in range(len(df.labeled))\n ]\n return df",
"def rename_friendly_name(attributes):\n rename_table = {\n \"Old Sensor Name\": \"New Sensor Name\",\n }\n\n if \"friendly_name\" in attributes and attributes[\"friendly_name\"] in rename_table:\n # print(\"renaming %s to %s\" % (attributes[\"friendly_name\"], rename_table[attributes[\"friendly_name\"]]))\n attributes[\"friendly_name\"] = rename_table[attributes[\"friendly_name\"]]\n\n return attributes",
"def getTagMapping(self,df):\n tags_qid = dict()\n\n en_words = dict()\n for i in words.words():\n en_words[i] = 1\n\n lemmatizer = WordNetLemmatizer()\n ps = PorterStemmer()\n\n for i in range(len(df)):\n text = df['Tags'].iloc[i]\n qid_ = i+1\n text = text.split(\",\")\n for i_ in text:\n i = i_.strip()\n i = i.lower()\n i = lemmatizer.lemmatize(i)\n if(\"-\" in i or len(i.split(\" \"))>1 or len(i)<=2 or (en_words.get(i,0)==0)):\n continue\n else:\n i = ps.stem(i)\n tags_qid[i] = tags_quotes.get(i,[])\n tags_qid[i].append(qid_)\n return tags_qid",
"def replace_in_claws(self, sent):\n for i, (word, tag, biber_tag) in enumerate(sent):\n sent[i][1] = self.claws_replacements_dict.get(tag, tag)\n return sent",
"def rename_columns(df, pheno, pop):\n\n if \"LOG(OR)_SE\" in df.columns:\n df.rename(columns={\"LOG(OR)_SE\": \"SE\"}, inplace=True)\n columns_to_rename = [\"BETA\", \"SE\", \"P\"]\n renamed_columns = [(x + \"_\" + pheno + \"_\" + pop) for x in columns_to_rename]\n df.rename(columns=dict(zip(columns_to_rename, renamed_columns)), inplace=True)\n return df",
"def clean_titanic(df):\n \n \n df[\"is_female\"] = df.sex == \"Female\"\n embarked_dummies = pd.get_dummies(df.embarked, prefix='Embarked', drop_first=True)\n class_dummies = pd.get_dummies(df.pclass, prefix='class', drop_first=True)\n\n dropcols = ['deck', 'age', 'embark_town', 'passenger_id', 'embarked', 'sex', 'pclass', 'class']\n df.drop(columns= dropcols, inplace=True)\n\n return pd.concat([df, embarked_dummies, class_dummies], axis =1)",
"def clean_ratings(inp_df):\n\n temp_df = inp_df[['imdb_rating', 'meta_score']]\n\n temp_imdb = list(mm_scaler.fit_transform(temp_df[['imdb_rating']]))\n temp_meta = list(mm_scaler.fit_transform(temp_df[['meta_score']]))\n\n temp_df = pd.DataFrame({'title': inp_df['title'],\n 'scaled_imdb': temp_imdb,\n 'scaled_meta': temp_meta})\n\n temp_df['scaled_imdb'] = 1 - (temp_df['scaled_imdb'].str[0])\n temp_df['scaled_meta'] = 1 - (temp_df['scaled_meta'].str[0])\n\n temp_df['final_score'] = temp_df['scaled_imdb'] * temp_df['scaled_meta']\n\n return temp_df[['title', 'final_score']]",
"def remove_conflicting(old_df, diff_df):\n diffs = zip(list(diff_df['id']),list(diff_df['antimicrobial']))\n\n # for every difference, set that cell to blank\n for run, mic in diffs:\n old_df.loc[old_df['run']==run, 'MIC_'+mic] = ''\n\n return old_df"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instantiates the ElementTree based on the xml_file | def __init__(self, xml_file):
self.the_etree = ElementTree.parse(xml_file)
self.xml_file = xml_file | [
"def __init__(self, xmlfile):\n\t\tctx = _new_xml(xmlfile)\n\t\tsuper(XMLContext, self).__init__(ctx)",
"def open_xml(self, file_name):\r\n tree = ET.parse(file_name)\r\n root = tree.getroot()\r\n return root",
"def from_file(cls, xml_path):\n try:\n parsed_xml = cls._parse(xml_path)\n except OSError as e:\n raise XmlParser.XmlError(\"Problem reading xml file at {}: {}\".format(xml_path, e))\n return cls(xml_path, parsed_xml)",
"def _get_root_element_from_xml(self, filename):\n # Open file as an ElementTree object\n file = os.path.join(self.document_path, filename)\n try:\n tree = ET.parse(file)\n except ET.ParseError, e:\n log.error(\n \"%s (%s): file %s\" %\n (type(e).__name__, e.message, self.__current_file))\n return\n except IOError, e:\n log.error(\n \"%s (%s): file %s\" %\n (type(e).__name__, e.message, self.__current_file))\n return\n root = tree.getroot()\n return root",
"def parse_xml(xml_file):\n with open(xml_file) as f:\n xml = f.read()\n \n root = objectify.fromstring(xml)\n return root",
"def loading_xml(self):\n\n dom = minidom.parse(self.filepath)\n return dom",
"def from_xml(self, filename):\n # #print(os.getcwd())\n root = ET.parse(filename).getroot()\n for node in root:\n if node.tag == \"persoon\":\n p = Person(node.attrib[\"naam\"], id_=node.attrib[\"id\"])\n for i in node:\n if i.tag == \"head\":\n self.head = p\n elif i.tag == \"geb\":\n p.birth = i.text\n elif i.tag == \"stf\":\n p.dead = i.text\n elif node.tag == \"familie\":\n parents = []\n children = []\n div = False\n for i in node:\n if i.tag in (\"ouder\", \"kind\"):\n try:\n p = Person.all_[i.text]\n except KeyError:\n p = Person(\"ERROR\", id_=i.text)\n if i.tag == \"ouder\":\n parents.append(p)\n else:\n children.append(p)\n elif i.tag == \"divorsed\":\n div = True\n self.families.append(Family(parents, children, div))",
"def fromXmlFile(self, file='case.xml'):\n f = open(file, 'r')\n try:\n xml = f.read()\n finally:\n f.close()\n\n # Populate the entity data variables from the retrieved XML\n self.load(xml)",
"def load_xml(self):\n try:\n self.root = XMLReader(self.path).root\n\n #for sign in self.root.findall('./signs/sign'):\n # self.load_sign_xml(sign)\n\n for block in self.root.findall('./blocks/block'):\n self.load_block_xml(block)\n\n # load replacments etc...\n except Exception, e:\n log.exception('error loading buildfile')",
"def getTree(file):\n parser = etree.XMLParser(strip_cdata=False)\n tree = etree.parse(file, parser)\n root= tree.getroot()\n return tree ,root",
"def getXMLTree( self ):\n \n try:\n self.tree = ET.parse(self.cdlfilename)\n except Exception, inst:\n print \"Unexpected error opening %s: %s\" % (self.cdlfilename, inst)\n return\n \n doc = self.tree.getroot()\n\n #do something bad to get the namespace (should really be handling these separately for when the asc cdl spec updates).\n try:\n self.ASCCDLNS = str(doc.tag)[str(doc.tag).index(\"{\"):str(doc.tag).index(\"}\")+1]\n except ValueError:\n nuke.tprint(\"badly formatted xml, no namespace. Attempting to continue without namespace. Unlikely to work.\")\n self.ASCCDLNS = \"\"\n \n return",
"def __parseFile(self):\n root = self.template_xml.getroot()\n \n self.templateName = root.find(self.NS+\"name\").text\n \n descriptionElem = root.find(self.NS+\"description\")\n if(descriptionElem is not None):\n self.description = descriptionElem.text\n \n authorElem = root.find(self.NS+\"author\")\n if(authorElem is not None):\n self.author = authorElem.text\n\n previewImageElem = root.find(self.NS+\"previewImage\")\n if(previewImageElem is not None):\n self.previewImageFilename = previewImageElem.get(\"src\")\n\n canvas = root.find(self.NS+\"canvas\")\n self.__parseCanvas(canvas)",
"def FromXml(cls, element):\n\n\t\telementTag = etree.QName(element.tag)\n\t\tif (elementTag.localname != \"ipxactFile\"):\n\t\t\traise PyIpxactException(\"Expected tag 'ipxactFile'.\")\n\t\t\n\t\tfor element2 in element:\n\t\t\telement3 = etree.QName(element2)\n\t\t\tif (element3.localname == \"vlnv\"):\n\t\t\t\tvendor = element2.get(\"vendor\")\n\t\t\t\tlibrary = element2.get(\"library\")\n\t\t\t\tname2 = element2.get(\"name\")\n\t\t\t\tversion = element2.get(\"version\")\n\t\t\t\t\n\t\t\t\tvlnv = Vlnv(vendor, library, name2, version)\n\t\t\telif (element3.localname == \"name\"):\n\t\t\t\tname = element2.text\n\t\t\telif (element3.localname == \"description\"):\n\t\t\t\tdescription = element2.text\n\t\t\telse:\n\t\t\t\traise PyIpxactException(\"Unsupported tag '{0}' in node 'ipxactFile'.\".format(element.localname))\n\t\t\n\t\tipxactFile = cls(vlnv, name, description)\n\t\treturn ipxactFile",
"def __init__(self, xml_config_data):\n super(self.__class__, self).__init__(ET.fromstring(xml_config_data))",
"def _load_ead(self, ead_file_path):\n if not os.path.exists(ead_file_path):\n raise ValueError('The XML file is not available at the given location: %s' % ead_file_path)\n self.ead_tree = lxml.etree.parse(ead_file_path)",
"def from_xml(cls, lib_root, layers, settings, grid, from_file=None):\n name = lib_root.attrib.get('name')\n\n desc_nodes = lib_root.xpath('.//description')\n if desc_nodes:\n description = desc_nodes[0].text\n else:\n description = None\n\n packages = OrderedDict()\n for package_node in lib_root.xpath('.//packages/package'):\n package = Package.from_xml(package_node)\n packages[package.name] = package\n\n symbols = OrderedDict()\n for symbol_node in lib_root.xpath('.//symbols/symbol'):\n symbol = Symbol.from_xml(symbol_node)\n symbols[symbol.name] = symbol\n\n device_sets = OrderedDict()\n for ds_node in lib_root.xpath('.//devicesets/deviceset'):\n device_set = DeviceSet.from_xml(ds_node, packages)\n device_sets[device_set.name] = device_set\n\n return cls(name=name,\n description=description,\n packages=packages,\n symbols=symbols,\n device_sets=device_sets,\n from_file=from_file,\n layers=layers,\n settings=settings,\n grid=grid)",
"def create_from_xml(cls, ci, node):\n name = node.getAttribute(\"name\")\n if not name:\n return None\n wdir = os.path.join(ci.wdir, node.getAttribute(\"wdir\"))\n exe = node.getAttribute(\"exe\")\n if exe:\n cmds = glob.glob(os.path.join(wdir, exe))\n if len(cmds):\n cmd = cmds[0]\n else:\n sys.stderr.write(\"Warning: could not expand test cmd '%s'\" % (exe))\n cmd = exe\n else:\n cmd = node.getAttribute(\"cmd\")\n if not cmd:\n return None\n info = node.getAttribute(\"info\")\n if not info:\n info = name\n xmlresult = node.getAttribute(\"xmlresult\")\n obj = cls(ci, name, info, cmd, wdir, xmlresult)\n if node.getAttribute(\"disabled\") and node.getAttribute(\"disabled\")!=\"0\":\n obj.disabled = True\n return obj",
"def init_parser(self) -> None:\n self.xml_depth = 0\n self.xml_root = None\n self.parser = ET.XMLPullParser((\"start\", \"end\"))",
"def read_xml(fname):\n tree = ET.parse(fname)\n root = tree.getroot()\n \n return tree, root"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find parameters in the _init.xml OM input file that match the keys in change_dict and change them to the value in change_dict. | def change_parameter(self, change_dict):
log = logging.getLogger()
changed = False
# Make a set copy so that any parameters not found can be reported
change_set = set(change_dict)
if not change_set:
return changed
# Make a dictionary to store any attempts to
# change non parameter variables
not_params = {}
# All the variables, including parameters,
# are in element 'ModelVariables'
e_root = self.the_etree.getroot()
e_variables = e_root.find('ModelVariables')
for var in e_variables.getchildren():
## All the variable elements are just called <ScalarVariable> so we
## need to extract the name from the attributes
var_name = var.attrib['name']
if var_name in change_set:
## Check if it is actually a parameter before changing it
if var.attrib['variability'] != 'parameter':
not_params[var_name] = var.attrib['variability']
else:
## Get the value element (Real, Integer or Boolean)
change_val = change_dict[var_name]
change_type = type(change_val)
var_elem = get_value_elem(var, change_type)
if var_elem is None:
raise ValueError('Did not find Real, Integer\
or Boolean')
try:
current_val = change_type(var_elem.attrib['start'])
except KeyError:
current_val = None
if current_val and current_val == change_val:
log.info('parameter {0} is already equal to {1}'.format(var_name, current_val))
else:
# Print the change details and do it
log.debug('changing parameter {0} from {1} to {2}'.format(var_name, current_val,
str(change_dict[var_name])))
var_elem.attrib['start'] = str(change_dict[var_name])
changed = True
# Remove a found variable from the input set copy
change_set.remove(var_name)
if change_set:
log.warning('Could not find the following parameter variables:')
for var in change_set:
log.warning('{0}, tried to set to {1}'.format(var, change_dict[var]))
if not_params:
log.warning('The following variables are not parameters:')
for var in not_params:
log.warning('{0}, variability is {1}'.format(var, not_params[var]))
return changed | [
"def change_event_params(self, changes_dict):\r\n # print changes_dict\r\n for key, sub_dict in list(changes_dict.items()): # loop through events (key)\r\n for sub_key, val in list(sub_dict.items()): # loop through parameters being changed (sub_key)\r\n if isinstance(sub_key, int): # in this case, it is the layer id of a stratigraphic layer!\r\n self.events[key].layers[sub_key].properties[val['property']] += val['val']\r\n else:\r\n self.events[key].properties[sub_key] += val",
"def updateParams(self,mapName):\n pass",
"def remap_parameters(self, spam):\n if hasattr(self, 'redefine'):\n for key, value in dictitems(self.redefine):\n # Check that the key was an original name\n if key in self.backup_names:\n print(' /|\\ Transforming', key, 'into', value)\n # We recover the indices of the key\n index_to_change = self.backup_names.index(key)+2\n print('/_o_\\ The new variable will be called ' +\n self.ref_names[self.backup_names.index(key)])\n # Recover all indices of all variables present in the\n # remapping\n variable_names = [elem for elem in self.backup_names if\n value.find(elem) != -1]\n indices = [self.backup_names.index(name)+2 for name in\n variable_names]\n # Now loop over all files in spam\n for i in xrange(len(spam)):\n # Assign variables to their values\n for index, name in zip(indices, variable_names):\n exec(\"%s = spam[i][:, %i]\" % (name, index))\n # Assign to the desired index the combination\n exec(\"spam[i][:, %i] = %s\" % (index_to_change, value))\n if hasattr(self, 'to_derive'):\n for key, value in dictitems(self.to_derive):\n print(' /|\\ Creating new parameter', key)\n print('/_o_\\ with formula ' + key + \" = \"+value)\n # Recover all indices of all variables present in the\n # remapping\n variable_names = [elem for elem in self.backup_names if\n value.find(elem) != -1]\n indices = [self.backup_names.index(name)+2 for name in\n variable_names]\n # Now loop over all files in spam\n for i in xrange(len(spam)):\n # For each file expand the dimension of spam by one\n spam[i] = np.hstack([spam[i],np.empty((len(spam[i]),1))])\n # Assign local variables to their values\n for index, name in zip(indices, variable_names):\n exec(\"%s = spam[i][:, %i]\" % (name, index))\n # Assign to to the appended array the combination\n exec(\"spam[i][:,-1] = %s\" % (value))\n\n # If everything was successfull, add the corresponding info\n self.ref_names.append(key)\n self.tex_names.append(io_mp.get_tex_name(key,number=1))\n self.backup_names.append(key)\n self.boundaries.append([None,None])\n N = len(self.scales)\n self.scales = np.vstack([np.hstack([self.scales,np.zeros((N,1))]),np.zeros((N+1,1)).T])\n self.scales[-1,-1]=1\n self.rescales = np.vstack([np.hstack([self.rescales,np.zeros((N,1))]),np.zeros((N+1,1)).T])\n self.rescales[-1,-1]=1\n self.number_parameters +=1\n self.plotted_parameters.append(key)\n self.centers = np.append(self.centers,0)\n if hasattr(self, 'to_reorder'):\n if(len(self.to_reorder)>0):\n indices = [self.backup_names.index(name) for name in self.to_reorder]\n missing_indices = [x for x in np.arange(len(self.backup_names)) if x not in indices]\n indices = np.concatenate([indices,missing_indices],dtype=int)\n self.ref_names = [self.ref_names[i] for i in indices]\n self.tex_names = [self.tex_names[i] for i in indices]\n self.backup_names = [self.backup_names[i] for i in indices]\n self.boundaries = [self.boundaries[i] for i in indices]\n self.scales = self.scales[indices][:,indices]\n self.rescales = self.rescales[indices][:,indices]\n self.centers = self.centers[indices]\n # Re-sort spam (barely any overhead, due to numpy's internal memory views)\n for i in xrange(len(spam)):\n spam[i][:,2:] = spam[i][:,indices+2]\n # Play the same game independently for plotted_parameters\n # since these might be a lot fewer\n indices = [self.plotted_parameters.index(name) for name in self.to_reorder]\n if(len(indices)>0):\n missing_indices = [x for x in np.arange(len(self.plotted_parameters)) if x not in indices]\n indices = np.concatenate([indices,missing_indices])\n self.plotted_parameters = [self.plotted_parameters[i] for i in indices]",
"def set_input_params(self, param_dict):\n for k, v in param_dict.items():\n # If v is dictionary, function was called using default values.\n # Set v equal to the default value of that parameter.\n # Must check if default is in the dict, as other dicts exist that are not default values.\n if isinstance(v, dict) and 'default' in v:\n v = v['default']\n if isinstance(v, list):\n v = v[-1]\n if k in self.input_widgets:\n cla = self.input_widgets[k]\n if isinstance(cla, QComboBox):\n idx = cla.findData(v)\n if idx != -1:\n cla.setCurrentIndex(idx)\n elif isinstance(cla, QLineEdit):\n cla.setText(v)\n else:\n cla.setValue(v)",
"def from_dict(cls, param_change_dict: ParamChangeDict) -> ParamChange:\n return cls(\n param_change_dict['name'], param_change_dict['generator_id'],\n param_change_dict['customization_args']\n )",
"def edit_dictionary():",
"def change(self, new_dict):\n self.dict = new_dict",
"def update_from_guess_dict(self,guess_dict):\n\n for name,spec in self.theta.items():\n value = guess_dict[name]\n setattr(self.par,name,value)\n spec['guess']= value",
"def update_starting_parameters(self):\n pass",
"def updateFieldsParams(fieldName, fieldDict):\n # todo - see if this function is needed\n fieldParamsDict = getSetting(\"FieldParams\")\n Configuration.simFieldsParams = fieldParamsDict\n\n fieldName = str(fieldName)\n\n print('CONFIGURATION: fieldName =', fieldName)\n print('CONFIGURATION: fieldDict =', fieldDict)\n\n Configuration.simFieldsParams[fieldName] = fieldDict # do regardless of in there or not\n\n setSetting('FieldParams', Configuration.simFieldsParams)\n\n Configuration.simFieldsParams[fieldName] = fieldDict # do regardless of in there or not",
"def InjectCustomKeys(self, keys, change):\n for key in keys:\n self._dict[str(key)] = change",
"def to_param_mapping(self, changes):\n return NotImplementedError",
"def test_param_changer_changes(self):\n i = Island()\n loc = (1, 1)\n s = Herbivore(i, loc)\n old_param = s.parameters[\"F\"]\n s.param_changer({\"F\" : 20})\n new_param = s.parameters[\"F\"]\n\n assert old_param != new_param\n s.param_changer({\"F\": 10})",
"def _update(self, params: ParamsMap):\n self.cosmo_hash = None\n self.tracers = []\n self._update_source(params)",
"def set_pars(pardic, dsin='dsin.txt', copy_to=None, check_auxiliary=False):\r\n\r\n \r\n\r\n orig_file = open(dsin, 'r')\r\n lines = orig_file.readlines() # list of strings, each ending with '\\n'\r\n orig_file.close()\r\n\r\n # only search the relevant part of the file\r\n for ln, line in enumerate(lines):\r\n if line.find('double initialValue(') > -1:\r\n start=ln\r\n elif line.find('char initialDescription(') > -1:\r\n end=ln\r\n break\r\n \r\n pardic_to_search = pardic.copy()\r\n #pardic_remaining = pardic.copy()\r\n lines_ = lines[start:end]\r\n #foundpar = None\r\n \r\n for linenumber, s in enumerate(lines_):\r\n splitted = s.split()\r\n for par, val in pardic_to_search.iteritems():\r\n if par in splitted:\r\n #print '{} found'.format(par)\r\n # check structure of the file\r\n two_lines = len(splitted) != 8 #True if all in one line\r\n \r\n # first we check that the parameter is not an auxiliary\r\n # parameter (5th value of the 'array line' should be a 1)\r\n if two_lines: index=0\r\n else: index=4\r\n if check_auxiliary and not splitted[index] == '1':\r\n raise ValueError(\"The parameter %s is of type 'auxiliary'.\\n\\\r\n it cannot be set in the dymosim input file. \" % (par))# check if the value to write is in this line, or the previous one\r\n \r\n # now changing the value:\r\n if two_lines: \r\n #We have to change the \r\n # second value of the previous line\r\n prev_splitted = lines[start+linenumber-1].split() \r\n old_value = copy.copy(prev_splitted[1])\r\n prev_splitted[1] = str(val)\r\n prev_splitted.append('\\n')\r\n lines[start+linenumber-1] = ' '.join(prev_splitted)\r\n else:\r\n # all is nicely in one line\r\n old_value = copy.copy(splitted[1])\r\n splitted[1] = str(val)\r\n splitted.append('\\n') \r\n lines[start+linenumber] = ' '.join(splitted)\r\n print '%s found: %s is replaced by %s' % (par,old_value, val)\r\n pardic_to_search.pop(par)\r\n break\r\n \r\n \r\n # Write the file\r\n \r\n if copy_to is None:\r\n copy_to = dsin\r\n \r\n writefile = file(copy_to, 'w')\r\n writefile.writelines(lines)\r\n writefile.close()\r\n \r\n print \"These parameters were NOT found:\\n\"\r\n for i in sorted(pardic_to_search.keys()):\r\n print i",
"def update(self, new_mapping):\n for option, value in new_mapping.items():\n if isinstance(value, dict):\n if hasattr(self, option):\n sublevel_config = getattr(self, option)\n else:\n sublevel_config = DocconvertConfiguration(level=option)\n sublevel_config.update(value)\n value = sublevel_config\n setattr(self, option, value)",
"def _update_source(self, params: ParamsMap):",
"def _update_param_dict(self, dest_dict, ori_dict):\n\n # First append to select parameters\n for param in self.__cfg_append_keys:\n if param in dest_dict and param in ori_dict:\n dest_dict[param].extend(ori_dict[param])\n del ori_dict[param]\n\n # Then override remaining parameters\n dest_dict.update(ori_dict)",
"def update_parameters(node, old_params):\n def default_params_update(definition_params, old_params):\n for key in definition_params:\n if key == 'type':\n continue\n elif (key in ('order', 'label', 'description') and\n key in definition_params and\n old_params.get(key) != definition_params[key]):\n old_params[key] = definition_params[key]\n elif key not in old_params:\n old_params[key] = definition_params[key]\n elif (isinstance(definition_params[key], dict) and\n isinstance(old_params[key], dict)):\n default_params_update(definition_params[key], old_params[key])\n\n try:\n definition_params = node.parameters\n if isinstance(definition_params, ParameterGroup):\n definition_params = definition_params.parameter_dict\n\n # We need to make sure that definition parameters have correct values\n # for 'order'. The call to reorder will fix that, but does so by\n # mutating the parameter dictionary, so we make a copy of the\n # dictionary to avoid unwanted side-effects.\n definition_params = copy.deepcopy(definition_params)\n ParameterRoot(definition_params).reorder()\n except AttributeError:\n definition_params = {}\n\n # Node specific parameter updating if applicable.\n try:\n old_params = node.update_parameters_basic(old_params)\n except NotImplementedError:\n pass\n # And then default parameter updating.\n default_params_update(definition_params, old_params)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Change an attribute and return True only if new value differs from old. Otherwise don't change and return False. | def change_attrib(elem, name, value):
log = logging.getLogger()
value_type = type(value)
if value_type(elem.attrib[name]) == value:
log.warning('{0} in {1} already equal to {2}'.format(name, str(elem), value))
return False
else:
log.info('Changed {0} in {1} from {2} to {3}'.format(name, str(elem), elem.attrib[name], value))
elem.attrib[name] = str(value)
return True
# end of change_attrib
| [
"def is_attribute_overridden(obj: model.Attribute, new_value: Optional[ast.expr]) -> bool:\n return obj.value is not None and new_value is not None",
"def _set_attr_sub_(self, attr):\n if attr.lower() in self.attributes:\n self.attr_sub = self.attributes.index(attr.lower())\n\n # if attribute is changed, check if main and sub attributes are the same\n if self.attr_main == self.attr_sub:\n self.is_same_attr = True\n else:\n self.is_same_attr = False",
"def lock_unlock_attribute(element, attribute, state):\n\n try:\n cmds.setAttr(\"{}.{}\".format(element, attribute), lock=state)\n return True\n except RuntimeError:\n return False",
"def change_attribute(self, attr, old_value, new_value):\n self.sender.graph_attr_changed(self.source_id_buff, self.time_id, attr, old_value, new_value)\n self.time_id += 1",
"def _attribute_inverter(obj, name, value):\n setattr(obj, name, value)\n return True",
"def test_attribute_noteq(self):\n attr1 = Attribute(\"device\", \"read\")\n attr2 = Attribute(\"device\", \"write\")\n assert attr1 != attr2",
"def change_attr(el, attr, values):\n v = el.attrib.get(attr, '')\n changed = False\n for value in values.split(';'):\n k, newv = split2(value, \"Each value must be in the form x:y\", \":\")\n v = replace_key(v, k, newv)\n if v == '': # there were no such yet\n v = \"%s:%s\" % (k, newv)\n #print \"Changing %s : %s, got %s\" % (attr, values, str(v))\n el.attrib[attr] = v",
"def _waitModelAttributeToChange(self, modelAttribute, newValue, waitTime=2.0, decrValue=.1):\n assert decrValue > 0\n assert waitTime > decrValue\n\n result = False\n\n while waitTime > 0:\n time.sleep(decrValue)\n waitTime -= decrValue\n if modelAttribute == newValue:\n result = True\n break\n\n return result",
"def match(self, compared_attribute):\n for k, v in self.attribute.items():\n if not (k in compared_attribute and compared_attribute[k] == v):\n return False\n return True",
"def has_changed(self, field):\n if not self.pk:\n return False\n old_value = self.__class__._default_manager.\\\n filter(pk=self.pk).values(field).get()[field]\n return not getattr(self, field) == old_value",
"def check_attribute(array):\n if array[0] == array[1] and array[1] == array[2]:\n return True\n elif array[0] != array[1] and array[1] != array[2] and array[0] != array[2]:\n return True\n else:\n return False",
"def _waitCloudioAttributeToChange(self, cloudioAttribute, newValue, waitTime=2.0, decrValue=.1):\n assert decrValue > 0\n assert waitTime > decrValue\n\n result = False\n\n while waitTime > 0:\n time.sleep(decrValue)\n waitTime -= decrValue\n if cloudioAttribute.getValue() == newValue:\n result = True\n break\n\n return result",
"def _needs_to_track_change(self, instance, value) -> bool:\n try:\n current_value = instance.__dict__[self._name]\n except KeyError:\n return True\n return value != current_value",
"def test_attribute_value(feature, att_name, target_value, check_case):\n \n att_value = FME_utils.feature_get_attribute(feature, att_name, True)\n \n if check_case:\n # Nothing to do\n pass\n else:\n # Adjust the case\n target_value = target_value.lower()\n att_value = att_value.lower()\n \n if att_value == target_value:\n match = True\n else:\n match = False\n \n return match",
"def _has_changed(self, initial, data):\n\n try:\n data = self.to_python(data)\n initial = self.to_python(initial)\n except forms.ValidationError:\n return True\n\n # Only do a geographic comparison if both values are available\n if initial and data:\n data.transform(initial.srid)\n # If the initial value was not added by the browser, the geometry\n # provided may be slightly different, the first time it is saved.\n # The comparison is done with a very low tolerance.\n return not initial.equals_exact(data, tolerance=0.000001)\n else:\n # Check for change of state of existence\n return bool(initial) != bool(data)",
"def has_changed(self):\n for field_name in self._originaldict.keys():\n if getattr(self, field_name) != self._originaldict[field_name]:\n return True\n\n return False",
"def assert_and_test(self, attr, to_set, to_test='NoAttr'):\n # Set attribute :\n obj = 'self.OBJ'\n if isinstance(to_set, str):\n exec(\"{}.{}\".format(obj, attr) + \"='\" + to_set + \"'\")\n else:\n exec(\"{}.{}\".format(obj, attr) + ' = to_set')\n value = eval(\"{}.{}\".format(obj, attr))\n # Test either to_set or to_test :\n value_to_test = to_set if to_test == 'NoAttr' else to_test\n # Test according to data type :\n if isinstance(value_to_test, np.ndarray):\n # Be sure that arrays have the same shape and dtype :\n value = value.reshape(*value_to_test.shape)\n value = value.astype(value_to_test.dtype)\n np.testing.assert_allclose(value, value_to_test)\n else:\n assert value == value_to_test",
"def change_node_attribute(self, node, attr, old_value, new_value):\n self.sender.node_attr_changed(self.source_id_buff, self.time_id, node, attr, old_value, new_value)\n self.time_id += 1",
"def has_changed(self):\n new_value = self.port.value\n return new_value != self.last_read_value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search for and return either a Real, Integer, Boolean or String etree element based on the Python type that is intended to be assigned (float, int, bool or str). Returns None if nothing is found that matches. | def get_value_elem(elem, var_type):
if var_type is float or var_type is np.float64:
val_elem = elem.find('Real')
elif var_type is int:
val_elem = elem.find('Integer')
# Allow for assigning an int to a Real
if val_elem is None:
val_elem = elem.find('Real')
elif var_type is bool:
val_elem = elem.find('Boolean')
elif var_type is str:
val_elem = elem.find('String')
else:
raise ValueError('Unrecognized Python type = {0}'.format(var_type))
return val_elem | [
"def __decode_result(element):\n type = element.get('{http://www.w3.org/1999/XMLSchema-instance}type')\n if type is not None:\n try:\n prefix, local = type.split(\":\")\n if prefix == 'xsd':\n type = local\n except ValueError:\n pass\n\n if type == \"integer\" or type == \"int\":\n return int(element.text)\n if type == \"float\" or type == \"double\":\n return float(element.text)\n if type == \"boolean\":\n return element.text == \"true\"\n\n return element.text or \"\"",
"def __return_type(cls, locator):\n Ranorex.Validate.EnableReport = False\n Ranorex.Adapter.DefaultUseEnsureVisible = True\n supported_types = ['AbbrTag', 'AcronymTag', 'AddressTag', 'AreaTag',\n 'ArticleTag', 'AsideTag', 'ATag', 'AudioTag',\n 'BaseFontTag', 'BaseTag', 'BdoTag', 'BigTag',\n 'BodyTag', 'BrTag', 'BTag', 'Button',\n 'ButtonTag', 'CanvasTag', 'Cell', 'CenterTag',\n 'CheckBox', 'CiteTag', 'CodeTag', 'ColGroupTag',\n 'ColTag', 'Column', 'ComboBox', 'CommandTag',\n 'Container', 'ContextMenu', 'DataListTag', 'DateTime',\n 'DdTag', 'DelTag', 'DetailsTag', 'DfnTag',\n 'DirTag', 'DivTag', 'DlTag', 'EmbedTag', 'EmTag',\n 'FieldSetTag', 'FigureTag', 'FontTag', 'Form', 'FormTag',\n 'Link', 'List', 'ListItem', 'MenuBar',\n 'MenuItem', 'Picture', 'ProgressBar',\n 'RadioButton', 'Row', 'ScrollBar', 'Slider',\n 'StatusBar', 'Table', 'TabPage', 'Text', 'TitleBar',\n 'ToggleButton', 'Tree', 'TreeItem', 'Unknown' ]\n \n ele = RanorexLibrary.extract_element(locator)\n \n for item in supported_types:\n if ele.lower() == item.lower():\n return item\n elif ele.lower() == '':\n raise AssertionError(\"No element entered\")\n \n log = logging.getLogger(\"Return type\")\n log.debug(\"Ranorex supports: %s\", dir(Ranorex))\n \n raise AssertionError(\"Element is not supported. Entered element: %s\" %ele)",
"def getElem(type) :\n if type not in list(data.elem_type.values()):\n raise AttributeError(type+'is not available type. See data.elem_type')\n elem = data.dictClass({'type':type})\n if type=='drift' :\n elem.length = 0.1\n elem.n_sckick = 1\n elem.n_map = 1\n elem.pipe_radius = 1.0\n elif type=='quad' :\n elem.length = 0.1\n elem.n_sckick = 1\n elem.n_map = 1\n elem.Kx = 10.0\n elem.file_id = 0\n elem.pipe_radius = 1.0\n elem.misalign_x = 0.0\n elem.misalign_y = 0.0\n elem.rotation_x = 0.0\n elem.rotation_y = 0.0\n elem.rotation_z = 0.0\n elif type=='quad_hardedge' :\n elem.n_map = 1\n elem.Kx = 10.0\n elem.flagEntrance = True\n elif type=='const_focusing' :\n elem.length = 0.1\n elem.n_sckick = 1\n elem.n_map = 1\n elem.kx2 = 0.33333\n elem.ky2 = 0.33333\n elem.kz2 = 0.33333\n elem.pipe_radius = 1.0\n elif type=='solenoid' :\n elem.length = 0.1\n elem.n_sckick = 1\n elem.n_map = 1\n elem.Bz = 0.0\n elem.file_id = 0\n elem.pipe_radius = 1.0\n elem.misalign_x = 0.0\n elem.misalign_y = 0.0\n elem.rotation_x = 0.0\n elem.rotation_y = 0.0\n elem.rotation_z = 0.0\n elif type=='dipole' :\n elem.length = 0.1\n elem.n_sckick = 1\n elem.n_map = 1\n elem.bending_angle = 0.0\n elem.k1 = 0.0\n elem.file_id = 150\n elem.pipe_radius = 1.0\n elem.entrance_angle = 0.0\n elem.exit_angle = 0.0\n elem.entrance_curvature = 0.0\n elem.exit_curvature = 0.0\n elem.fringe_field_integration = 0.5\n elif type=='multipole_thin' :\n elem.n_sckick = 1\n elem.n_map = 1\n elem.KL_dipole = 0.0\n elem.KL_quad = 0.0\n elem.KL_sext = 0.0\n elem.KL_oct = 0.0\n elem.KL_deca = 0.0\n elem.KL_dodeca = 0.0\n elif type=='linear_matrix_map' :\n elem.nonlinear_insert_length = 1.0\n elem.nonlinear_insert_tuneAdvance = 0.3\n elem.tune_advance_x = 0.0\n elem.tune_advance_y = 0.0\n elif type in ['nonlinear_insert','nonlinear_insert_sliced','nonlinear_insert_smooth_focusing'] :\n elem.length = 1.8\n elem.n_sckick = 50\n elem.n_map = 10\n elem.strength_t = 0.4\n elem.transverse_scale_c = 0.01\n elem.pipe_radius = 1.0\n if type in ['nonlinear_insert','nonlinear_insert_sliced']:\n elem.tune_advance = 0.3\n if type == 'nonlinear_insert_sliced':\n elem.total_length = 1.8\n elem.start_position = 0.0\n else:\n elem.betx = 1.5\n elif type in ['TBT_integral','TBT_integral_onMomentum'] :\n elem.strength_t = 0.0\n elem.transverse_scale_c = 0.01\n elem.betx = 1.0\n elem.alfx = 0.0\n elem.file_id = 1000\n elem.pID_begin = 1\n elem.pID_end = 100\n elif type in ['TBT','TBT_multiple_file']:\n elem.file_id = 1000\n elem.pID_begin = 1\n elem.pID_end = 100\n if type == 'TBT_multiple_file':\n elem.n_files = 1\n elif type == 'write_raw_ptcl':\n elem.file_id = 1000\n elem.format_id = 1\n elem.turn = 1\n elem.sample_period = 1\n elif type == 'pipe_override':\n elem.pipe_shape = 'rectangular'\n elem.xmax = 1.0\n elem.ymax = 1.0 \n elif type=='loop':\n elem.turns = 1\n return elem",
"def search(self, attrname: 'char const *', attrvalue: 'char const *') -> \"ScXMLElt const *\":\n return _coin.ScXMLIfElt_search(self, attrname, attrvalue)",
"def valuetype(self, tag):\n if tag in self.tag_filters:\n return self.tag_filters[tag][0]\n return None",
"def element(**args):\n if len(args) != 1:\n raise TypeError(\"This routine accepts a single argument\")\n\n if args.get(\"symbol\"):\n symbol = args[\"symbol\"]\n for e in elements.all():\n if e.symbol() == symbol:\n return e\n return None\n elif args.get(\"atomic_number\"):\n atnum = args[\"atomic_number\"]\n for e in elements.all():\n if e.atomicNumber() == atnum:\n return e\n return None",
"def search(self, attrname: 'char const *', attrvalue: 'char const *') -> \"ScXMLElt const *\":\n return _coin.ScXMLElseIfElt_search(self, attrname, attrvalue)",
"def xml_to_value(conn, element):\n \n if element.tag == 'null':\n return None\n elif element.tag == 'bool':\n return element.get('value') == 'true'\n elif element.tag == 'int':\n return int(element.get('value'))\n elif element.tag == 'double':\n return float(element.get('value'))\n elif element.tag == 'string':\n return element.get('value')\n elif element.tag == 'dict':\n result = {}\n \n for child in element:\n result[child.get('key')] = xml_to_value(conn, child)\n elif element.tag == 'array':\n [xml_to_value(child) for child in element]\n elif element.tag == 'obj':\n return FireworksObj(conn, element.get('value'), cls=element.get('class'))\n elif element.tag == 'void':\n return VOID",
"def _element_check(data):\n if isinstance(data, etree.Element):\n logging.debug(\"attempting to convert to xml string\")\n return etree.tostring(data)\n else:\n return data",
"def guess_type_value(x, none=None):\n try:\n int(x)\n if x[0] == '0' and len(x) > 1:\n return str\n else:\n return int if len(x) < 9 else str\n except ValueError:\n try:\n x = float(x)\n return float\n except ValueError:\n if none:\n if x is None:\n return None\n try:\n if len(x) > 0:\n return str\n else:\n return None\n except Exception:\n return None\n else:\n return str",
"def xml_type(self) -> Optional[str]:\n return xml_type_map.get(self.tag)",
"def _get_xml_node_value (cls, root, *name):\n if len(name) == 1:\n node = root.find(name[0])\n elif len(name) == 2:\n node = root.find(name[0], name[1])\n\n if not node:\n return None\n elif len(node.text) < 1:\n return None\n else:\n return node.text.strip()",
"def search(self, attrname: 'char const *', attrvalue: 'char const *') -> \"ScXMLElt const *\":\n return _coin.ScXMLScxmlElt_search(self, attrname, attrvalue)",
"def search(self, attrname: 'char const *', attrvalue: 'char const *') -> \"ScXMLElt const *\":\n return _coin.ScXMLDataModelElt_search(self, attrname, attrvalue)",
"def from_soup_get_membership_current_type(self, soup):\n\n try:\n _ = soup.find(\"div\", {\"class\": \"ed-panel__info__value ed-panel__info__value_subscription-type\"},).text\n except Exception as e:\n log.warn(\"soup find got exception {}\".format(e))\n _ = None\n return _",
"def search(self, attrname: 'char const *', attrvalue: 'char const *') -> \"ScXMLElt const *\":\n return _coin.ScXMLDataElt_search(self, attrname, attrvalue)",
"def search(self, attrname: 'char const *', attrvalue: 'char const *') -> \"ScXMLElt const *\":\n return _coin.ScXMLValidateElt_search(self, attrname, attrvalue)",
"def _get_element_type(num_nodes):\n ELEMENT_TYPE_DICT = {\n 2: \"BEAM\",\n 3: \"TRI\",\n 4: \"SHELL4\",\n 5: \"SHELL5\",\n 6: \"TRI6\",\n 7: \"TRI7\",\n 8: \"SHELL8\",\n }\n element_type = ELEMENT_TYPE_DICT[num_nodes]\n return element_type",
"def get_exact(self, value):\n for element in self:\n if element.name == value:\n return element"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Se encarga de procesar una solicitud POST al registrar una cancionPersonal de un usuario | def post(self, usuario_actual):
cancion_a_registrar = CancionPersonal(nombre=self.argumentos['nombre'], artistas=self.argumentos['artistas'],
album=self.argumentos['album'], id_usuario=usuario_actual.id_usuario)
errores_validacion_registro = ValidacionCancionPersonal.validar_registro_cancion_personal(cancion_a_registrar)
if errores_validacion_registro is not None:
return errores_validacion_registro, 400
cancion_a_registrar.guardar()
return cancion_a_registrar.obtener_json(), 201 | [
"def post(self,Utilisateur,mdp):\r\n return createUser(login,Utilisateur,mdp,\"\")",
"def test_registration_view_can_save_post_request(self):\n\t\trequest = HttpRequest()\n\t\trequest.method = 'POST'\n\t\trequest.POST['user'] = User.objects.create_user(username='fisherman-bob', password='BoBfish23')\n\t\tresponse = registration(request)\n\t\t\n\t\tself.assertEqual(User.objects.count(), 1)\n\t\tuser = User.objects.first()\n\t\tself.assertEqual(user.username, 'fisherman-bob')\n\t\tself.assertEqual(response.status_code, 200)",
"def onRequestCreateAccount(registerName, password, datas):\n\tINFO_MSG('onRequestCreateAccount: registerName=%s' % (registerName))\n\t\n\tcommitName = registerName\n\t\n\t#Default account name is the name at the time of submission\n\trealAccountName = commitName \n\t\n\t# Here, the request can be submitted to a third-party platform through http or other means, and the data returned by the platform can also be put into datas.\n\t# datas will call back to the client\n\t# If using http access, because interfaces are single-threaded, synchronous http access is easy to get stuck in the main thread, it is recommended to use\n\t\t# Ouroboros.registerReadFileDescriptor()和Ouroboros.registerWriteFileDescriptor()结�\n\t# Ouroboros.urlopen(\"https://www.baidu.com\", onHttpCallback) Asynchronous access. It can also interact with the platform in the same way as sockets.\n\t\n\tOuroboros.createAccountResponse(commitName, realAccountName, datas, Ouroboros.SERVER_SUCCESS)",
"def registro_punto_muestreo(request, cadena_id):\n\n cadena = get_object_or_404(SimuesCadenaCustodia, id=cadena_id)\n mostrar = False\n\n if request.method == 'POST':\n\n form_punto = RegistroPuntoMuestra(request.POST)\n\n if form_punto.is_valid():\n\n # Se crea y se guarda el punto de muestreo\n punto_muestreo = SimuesPuntoMuestreo(nombre=form_punto.cleaned_data['nombre'],\n cadena=cadena,\n zona=form_punto.cleaned_data['zona'],\n datum=form_punto.cleaned_data['datum'],\n gps=form_punto.cleaned_data['gps'],\n tipomuestra=form_punto.cleaned_data['tipo_muestra'],\n tipopunto=form_punto.cleaned_data['tipo_punto'],\n observacion=form_punto.cleaned_data['observacion'],\n fecha_hora=form_punto.cleaned_data['fecha_hora'],\n altitud=form_punto.cleaned_data['altitud'],\n coord_norte=form_punto.cleaned_data['coord_norte'],\n coord_este=form_punto.cleaned_data['coord_este'])\n\n punto_muestreo.created_by = request.user\n punto_muestreo.modified_by = request.user\n\n punto_muestreo.save()\n\n # Mensaje para saber que se guardo con éxito\n messages.success(request, 'El punto de muestreo se grabó satisfactoriamente',\n extra_tags='punto')\n\n mostrar = True\n\n return redirect('cadenas:editar_punto_muestreo', punto_id=punto_muestreo.id)\n\n else:\n\n form_punto = RegistroPuntoMuestra()\n\n return render(request, 'cadenas/registro_punto_muestreo.html', {'form_punto':form_punto,\n 'mostrar':mostrar})",
"def post_user():\n required_data = {\"email\", \"password\"}\n return post(cls, None, None, required_data)",
"def register(request, success_url='/account/register/complete/',\n\t\t\t form_class=RegistrationFormUniqueEmail, profile_callback=None,\n\t\t\t template_name='registration/registration_form.html'):\n\tif request.method == 'POST':\n\t\tform = form_class(request.POST)\n\t\tlogger.debug(\"%s - account-register: username: '%s', first_name: '%s', last_name: '%s', email: '%s', tipo_verificacion: '%s', verificacion: '%s', tipo_request_auth: '%s', request_auth: '%s'\" % (request.META.get('REMOTE_ADDR'), form.data['username'], form.data['first_name'], form.data['last_name'], form.data['email'], form.data['tipo_verificacion'], form.data['verificacion'], form.data['tipo_request_auth'], form.data['request_auth']))\n\t\tif form.is_valid():\n\t\t\t# new_user = form.save(profile_callback=profile_callback)\n\t\t\t# return HttpResponseRedirect(success_url)\n\t\t\tif form.is_request_auth():\n\t\t\t\t# User is not alumno.\n\t\t\t\tsend_email = False\n\t\t\t\tactivation_email = form.cleaned_data['email']\n\t\t\telse:\n\t\t\t\t# User is alumno or has an email account.\n\t\t\t\tif form.is_account():\n\t\t\t\t\t# User has an email account.\n\t\t\t\t\tif settings.CHECK_PADRON.is_registered(form.cleaned_data['verificacion']):\n\t\t\t\t\t\tform.errors['verificacion'] = [_(u'Cuenta ya utilizada. Por favor contactate con nosotros \\\n\t\t\t\t\t\t\t\tsi creés que alguien utilizó tu cuenta.')]\n\t\t\t\t\t\t#logger.error(\"%s - check_padron %s ('%s', '%s', '%s')\" % (request.META.get('REMOTE_ADDR'), 'ALREADY USED', form.cleaned_data['verificacion'], form.data['first_name'], form.data['last_name']))\n\t\t\t\t\telse:\n\t\t\t\t\t\tactivation_email = form.cleaned_data['verificacion'] + '@fi.uba.ar'\n\n\t\t\t\t# Check padron\n\t\t\t\telif form.is_padron():\n\t\t\t\t\t# Check verificacion repetido\n\t\t\t\t\tverificacion_error, log_message = settings.CHECK_PADRON.check_padron(form.cleaned_data['verificacion'], form.data['first_name'], form.data['last_name'])\n\t\t\t\t\tif verificacion_error:\n\t\t\t\t\t\tlogger.error(\"%s - check_padron %s ('%s', '%s', '%s')\" % (request.META.get('REMOTE_ADDR'), log_message, form.cleaned_data['verificacion'], form.data['first_name'], form.data['last_name']))\n\t\t\t\t\t\tform.errors['verificacion'] = [verificacion_error]\n\t\t\t\t\telse:\n\t\t\t\t\t logger.info(\"%s - check_padron %s ('%s', '%s', '%s')\" % (request.META.get('REMOTE_ADDR'), log_message, form.cleaned_data['verificacion'], form.data['first_name'], form.data['last_name']))\n\t\t\t\t\tactivation_email = form.cleaned_data['email']\n\t\t\t\tsend_email = True\n\n\t\t\tif len(form.errors) == 0:\n\t\t\t\t# Register new user\n\t\t\t\tnew_user = RegistrationProfile.objects.create_inactive_user(username=form.cleaned_data['username'],\n\t\t\t\t\t\t\t password=form.cleaned_data['password'], first_name=form.cleaned_data['first_name'],\n\t\t\t\t\t\t\t last_name=form.cleaned_data['last_name'], email=activation_email, send_email=send_email)\n\t\t\t\tlogger.info(\"%s - new_user ('%s', '%s')\" % (request.META.get('REMOTE_ADDR'), new_user.username, new_user.email))\n\t\t\t\tif form.is_account():\n\t\t\t\t\t# Save user's email.\n\t\t\t\t\tnew_user.email = form.cleaned_data['email']\n\t\t\t\t\tnew_user.save()\n\n\t\t\t\tfrom django.core.mail import send_mail\n\t\t\t\tcurrent_site = Site.objects.get_current()\n\n\t\t\t\tsubject = _(u'[%s] New user' % current_site.name)\n\t\t\t\tmessage = render_to_string('registration/new_user_email.html',\n\t\t\t\t\t\t\t\t\t\t { 'new_user': form.cleaned_data,\n\t\t\t\t\t\t\t\t\t\t\t 'site_url': 'http://%s/' % current_site.domain,\n\t\t\t\t\t\t\t\t\t\t\t 'site_name': current_site.name, })\n\t\t\t\tsend_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [a[1] for a in settings.MANAGERS])\n\n\t\t\t\tif send_email:\n\t\t\t\t\tsettings.CHECK_PADRON.save_user(form.cleaned_data['verificacion'], form.data['first_name'], form.data['last_name'])\n\t\t\t\treturn render_to_response('registration/registration_complete.html',\n\t\t\t\t\t\t\t\t\t\t { 'send_email': send_email,\n\t\t\t\t\t\t\t\t\t\t\t'activation_email' : activation_email, },\n\t\t\t\t\t\t\t\t\t\t context_instance=RequestContext(request))\n\n\t\t\t\"\"\"\n\t\t\trequest_auth = form.cleaned_data['request_auth']\n\n\t\t\treturn render_to_response('registration_complete.html',\n\t\t\t\t\t\t\t\t\t { 'send_email': send_email },\n\t\t\t\t\t\t\t\t\t context_instance=RequestContext(request))\n\t\t\treturn HttpResponseRedirect(success_url)\n\t\t\t\"\"\"\n\telse:\n\t\tform = form_class()\n\treturn render_to_response(template_name, { 'form': form },\n\t\t\t\t\t\t\t context_instance=RequestContext(request))",
"def on_post(self, req, resp):\n authentication(req, ['admin'])\n resp_dict = {}\n try:\n # have pre-processed by JSONTranslator, post_data is a dict\n post_data = req.context['doc']\n # logger.debug('username:%s, password:%s, data:%s'\n # % (username, password, post_data))\n # logger.debug('env:%s , \\nstream:%s, \\ncontext:, \\ninput:' % (\n # req.env, req.stream.read()))\n except Exception as ex:\n logger.error('error when try to get headers and data, ', ex)\n raise falcon.HTTPBadRequest('bad req',\n 'when read from req, please check if the req is correct.')\n try:\n \"\"\"\n handle_request:\n\n \"\"\"\n status, doctorid, password = doctor.register_doctor(post_data)\n except Exception as ex:\n logger.exception('error when register doctor, ', ex)\n resp_dict['info'] = 'Error when register doctor {}'.format(\n post_data['last_name'])\n resp.status = falcon.HTTP_500\n resp.body = json.dumps(resp_dict, sort_keys=True, indent=4)\n else:\n if status:\n logger.debug('register ok, status positive')\n resp_dict['info'] = 'Register doctor {} success'.format(\n post_data['last_name'])\n resp_dict['doctorid'] = doctorid\n resp_dict['password'] = password\n resp.status = falcon.HTTP_201\n resp.body = json.dumps(resp_dict)\n else:\n logger.exception('return error when try to register doctor, ', ex)\n resp_dict['errinfo'] = 'Error when register doctor {}'.format(\n post_data['last_name'])\n resp.status = falcon.HTTP_400\n resp.body = json.dumps(resp_dict)",
"def registrarRemotamente(self):\n id, nombre, email, version, password = self.obtenerDatosRegistrados()\n id_obtenido, server_id = self.peticionRemota.registrarUsuario(nombre,\n email, password, version)\n modulo_logger.log(logging.INFO, 'ID OBTENIDO: %s, server_id: %s' %\n (id_obtenido, server_id))\n if (int(id_obtenido) > 0):\n self.cursor.execute('update instalacion set id =?, serverid =?, '\n 'passwordnotificada=1', (id_obtenido, server_id,))\n self.conexion_db.commit()\n modulo_logger.log(logging.INFO, 'Se registro correctamente la '\n 'instalacion')\n else:\n modulo_logger.log(logging.ERROR, 'Hubo un error al tratar de '\n 'registrarse remotamente')",
"def test_create_account_using_post(self):\n pass",
"def test_insert_account_and_related_permission_using_post(self):\n pass",
"def register(self, data):\n return self.client().post(\n '/register',\n data=json.dumps(data),\n content_type='application/json'\n )",
"def register_urls(request):\n access_token = MpesaAccessToken.validated_mpesa_access_token\n print(access_token)\n api_url = \"https://sandbox.safaricom.co.ke/mpesa/c2b/v1/registerurl\"\n headers = {\"Authorization\": \"Bearer %s\" % access_token}\n options = {\"ShortCode\": LipanaMpesaPpassword.Business_short_code_1,\n \"ResponseType\": \"Completed\",\n \"ConfirmationURL\": \"https://746501842e36.ngrok.io/api/mobile-money/confirm\",\n \"ValidationURL\": \"https://746501842e36.ngrok.io/api/mobile-money/validate\"}\n response = requests.post(api_url, json=options, headers=headers)\n return HttpResponse(response.text)",
"def registra(self):\r\n lista_de_datos=[]#esta lista ayuda a almacenar temporalmente los datos para posteriormente convertirlos en una tupla\r\n \"\"\"Funcion que llama a las otras funciones\"\"\"\r\n dic=self.solicitar_datos()\r\n set_get_datos(self.obj_equipo, dic)\r\n #covierte los datos de diccionario en una tupla\r\n for valor in dic.values():\r\n lista_de_datos.append(valor)\r\n #convvertir la lista en una tupla\r\n tupla_de_datos=tuple(lista_de_datos)\r\n #llama a la funcion agregar_registro de la clase conexion_equipos\r\n estatus=self.obj_conexion.agregar_registro(tupla_de_datos)\r\n #si el estatus es true\r\n if estatus:\r\n print(Fore.GREEN+\" Registro agregado correctamente\"+Fore.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" Registro no agregado\"+Fore.RESET,Back.RESET)",
"def post(self, resource, **params):\n\n data = params[\"data\"] or None\n\n response = self.client.post(\n resource,\n service_name=\"personalization\",\n params=params,\n signature=self.token,\n data=data,\n )\n return response",
"def company_register(request):\n\n if request.method == \"GET\":\n\n # For testing. Remove this line later!\n if not Plan.objects.all().exists():\n Plan.objects.create(name=\"Test Plan\", price=0)\n\n plan = request.GET.get('p')\n\n company_form = CompanyForm(initial = {'plan': plan }, prefix=\"company\")\n user_form = UserForm(prefix = \"user\")\n\n if request.method == \"POST\":\n company_form = CompanyForm(request.POST, prefix=\"company\")\n user_form = UserForm(request.POST, prefix=\"user\")\n\n if company_form.is_valid() and user_form.is_valid():\n\n company = company_form.save()\n\n try:\n user = user_form.save(commit=False)\n user.company = company\n user.is_admin = True\n user.is_manager = True\n user.save()\n\n except IntegrityError:\n return HttpResponse(\"Error creating company.\\\n Please try again later.\", status=500)\n\n auth_login(request, user)\n return redirect(\"/app\" , {'first_login':True})\n\n context = {\n \"company_form\": company_form,\n \"user_form\": user_form\n }\n\n return render(request, 'registration/company_registration.html', context)",
"def register_user(url, payload):\n resp = requests.post(url, data=payload)\n resp_obj = {\n 'resp_obj': resp,\n 'resp_data': resp.json()\n }\n return resp_obj",
"def test_post_success(self):\n tmp_data = tmp_user_id(self.user.id)\n\n data = {\n 'tmp_user_id': tmp_data['otp'],\n 'otp_code': self.user.otp.get_otp_code()\n }\n response = self.client.post(\n reverse('login_otp'), data=data, format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue('token' in response.data)",
"def register(self) -> None:\n # Used for creating account. Submit to /create_account endpoint\n username = input(\"Enter your username. \" +\n \"This will be used to log in to the web service.\\n\")\n email = input(\"Enter your email. \" +\n \"This will be used as your GNUPG username.\\n\")\n password = getpass(\"Enter your password. \" +\n \"This will be used as your GNUPG passphrase.\\n\")\n headers = {\n \"accept\": \"application/json\",\n \"Content-Type\": \"application/json\"\n }\n payload = {\n \"username\": username,\n \"email\": email,\n \"password\": password\n }\n response = requests.post(f\"{self.BASE_URL}/register\", json=payload,\n headers=headers)\n\n self.gpg_service.create_key(passphrase=password, email=email)\n\n if response.status_code == 200:\n print(\"Account created.\\nAttempting to log in...\")\n self.login(username, password)\n\n else:\n print(\"There was a problem creating your account.\")",
"def register(request):\n\tif request.method == \"POST\":\n\t\tform = UserRegisterForm(request.POST)\n\t\tif form.is_valid():\n\t\t\trequest.session['username'] = form.cleaned_data['userName']\n \t \trequest.session['password'] = form.cleaned_data['userPwd']\n \t\temail = form.cleaned_data['userEmail']\n\t\t\t# Work around - login as admin keystone client for user creation\n\t\t\t# then logged into new user from projects page arrival\n\t\t\tapi.joinTenant('admin', 'admin', 'demo')\n\t\t\t# register user with keystone\n\t\t\tapi.registerUser(request.session['username'], request.session['password'], email)\t\n\t\t\t# login as new user\n\t\t\t#api.login(request.session['username'], request.session['password'])\n\treturn HttpResponseRedirect('/projects/')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Se encarga de procesar una solicitud GET al devolver las canciones del usuario | def get(self, usuario_actual):
cantidad = request.args.get('cantidad')
pagina = request.args.get('pagina')
try:
if cantidad is not None and pagina is not None:
cantidad = int(cantidad)
pagina = int(pagina)
else:
cantidad = 10
pagina = 1
canciones = CancionPersonal.obtener_canciones_de_usuario(usuario_actual.id_usuario, cantidad, pagina)
except ValueError:
canciones = CancionPersonal.obtener_canciones_de_usuario(usuario_actual.id_usuario)
lista_canciones = []
for cancion in canciones:
lista_canciones.append(cancion.obtener_json())
return lista_canciones, 200 | [
"def usuarios_conectados():\n\n global my_user\n print(\"Actualizando clientes conectados.\")\n usuarios = api.get_AllUser()\n lista_usarios = []\n\n for user in usuarios:\n if user['Estado'] == '1':\n # Anadimos todos los users menos el propio.\n if user['Nombre'] != my_user:\n lista_usarios.append(user['Nombre'])\n\n if len(lista_usarios) == 0:\n lista_usarios = ['- Vacio -']\n\n return lista_usarios",
"def solicita_llaves(self, nombre):\r\n # Solicita las llaves de usuario con quien se quiere comunicar\r\n solicitud = {'tipo': \"datos_usuario\", 'nombre': nombre}\r\n paquete = crea_paquete(solicitud)\r\n # envía la solicitud de datos al servidor\r\n self.socket_connection.send(paquete)\r\n # recibe la respuesta\r\n self.socket_connection.recv(2)\r\n a = self.socket_connection.recv(1024)\r\n return json.loads(a)",
"def buscarusuario(): \n if not current_user.is_authenticated():\n flash('Debe loguearse primeramente!!!!', 'loggin')\n return render_template('index.html')\n \n permission = UserRol('ADMINISTRADOR')\n if permission.can():\n valor = request.args['patron']\n parametro = request.args['parametro']\n if valor == \"\" : \n administrarusuario()\n if parametro == 'fecha_nac':\n p = db_session.query(Usuario).from_statement(\"SELECT * FROM usuario where to_char(\"+parametro+\", 'YYYY-mm-dd') ilike '%\"+valor+\"%'\").all()\n else:\n p = db_session.query(Usuario).from_statement(\"SELECT * FROM usuario where \"+parametro+\" ilike '%\"+valor+\"%'\").all()\n return render_template('usuario/administrarusuario.html', usuarios = p) \n valor = request.args['patron']\n r = db_session.query(Usuario).filter_by(usuario=valor)\n return render_template('usuario/administrarusuario.html', usuarios = r)\n else:\n flash('Sin permisos para buscar usuarios', 'permiso')\n return render_template('index.html')",
"def get_user_info(self):\n url = self.BASE_URL +'users/1?api_key=' + self.api_key\n req = requests.get(url)\n return req",
"def get(self, *args):\n user = self.__getUser()\n if user or not REQUIRE_AUTH:\n self.doAuthenticatedGet(user, *args)\n else:\n self.redirect(users.create_login_url(self.request.uri))",
"def recargarDominiosPermitidos(self):\n modulo_logger.log(logging.DEBUG, \"Recargando dominios permitidos\")\n self.dominios_permitidos = []\n respuesta = self.cursor.execute(\n 'select url from dominios_permitidos where usuario=?', (self.id, )\n ).fetchall()\n for fila in respuesta:\n self.dominios_permitidos.append(fila[0])",
"def get(self, request, *args, **kwargs):\n enrollInst = enrolledProgramSession.objects.filter(status = enrolledProgramSession.CONTENT_STATUS_ACTIVE).filter(programDefinitionKey__clubKey = self.request.GET.get('clubid')).filter(date = now().date()).select_related('user').order_by(\"firstAccess\", \"sessionTimeBegin\")\n return Response(enrollProgramSerializer(enrollInst, many=True).data)",
"def press_Usuario():\n\n # Devuelve el usuario escogido en la lista de usuarios conectados.\n user_llamar = app.getOptionBox(\"Usuarios conectados: \")\n\n # Obtener la informacion de lA BD para iniciar la comunicacion con el.\n user_llamar_info = api.get_IPandPort(user_llamar)\n if user_llamar_info[\"status_code\"] == \"OK\":\n # Si user conectado.\n if user_llamar_info[\"Estado\"] == '1':\n # Lanzar el cliente y obtener su socket\n client_socket = client.lanzar_cliente(user_llamar_info[\"IP\"],\n user_llamar_info[\"Puerto\"],\n my_user)\n\n # Creamos diccionario con la informacion relevante del usuario al que hay que llamar.\n user_Info = {\"IP\": user_llamar_info['IP'],\n \"Puerto\": user_llamar_info['Puerto'],\n \"Socket\": client_socket,\n \"Nombre\": user_llamar}\n\n # Calculamos la longitud de users_Info, pasa saber en que posicion insertar en la misma.\n long = len(users_Info)\n\n # Insertamos la informacion en la variable global users_Info, en la pos que este vacia.\n num_ventana = -1\n cuenta = 0\n while cuenta < long:\n if users_Info[str(cuenta)] == \"Null\":\n users_Info[str(cuenta)] = user_Info\n num_ventana = cuenta\n break\n\n cuenta += 1\n\n # Si ya tiene 3 chat abiertos.\n else:\n app.infoBox(\"Maximo numeros de chats activos\", \"Ya tienes 3 chats activos.\\n Si deseas hablar con otra persona, cierra uno de tus chats activos.\")\n\n # Si se le asigna un numero de ventana, creamos un hilo para recibir los posibles mensajes y mostramos la ventana del chat.\n if (num_ventana >= 0) and (num_ventana < 3):\n # Creamos un hilo que quede a la escucha de posibles mensajes de la persona con la que se habla.\n client_thread_principal = threading.Thread(target=client.recibir_mensaje,\n args=(client_socket,\n app,\n num_ventana,\n users_Info))\n client_thread_principal.daemon = True\n client_thread_principal.start()\n\n # Mostramos una nueva ventana donde poder llevar a cabo el chat 0, 1 o 2.\n app.showSubWindow(\"Ventana Inicio-Identificado-Client-\"+str(num_ventana))\n\n # Usuario no conectado.\n else:\n app.errorBox(\"usuario no conectado\", \"El usuario no se encuentra conectado en estos momentos\")\n\n else:\n app.errorBox(\"Error al llamar al usuario\", \"Hubo un error a la hora de llamar al usuario \"+user_llamar)",
"def retrieve_cursantes(self, token, carrera, anio=None):\n headers = self.get_headers(token)\n url = app.config['CURSANTES_URL'].format(carrera) \n response = requests.get(url + str(anio) + '/' if anio else url, headers=headers)\n if response.status_code == 200:\n return response.text\n else:\n return []",
"def http_request_coachuser(self, data):\n response_data_json = self._http_request(\n method='POST',\n url_suffix=URL_SUFFIX_COACH_USER,\n json_data=data,\n data=data,\n )\n return response_data_json",
"def user_data(username: str):\n url = f\"https://cas.gmri.org/api/cas/v1/users/?username={username}\"\n\n response = session.get(url)\n\n if response.status_code != 200:\n raise Error404\n return response.json()[0]",
"def read(self, request):\n try:\n imei = request.GET.get('imei', None)\n cn = request.GET.get('cn', None)\n if cn:\n # if center is passed - pull all active users\n # for the respective center\n if cn.lower() == 'all':\n mau = MobileAppUser.active.all()\n else:\n mau = MobileAppUser.active.filter(center__code=cn)\n else:\n mau = MobileAppUser.active.get(imei=imei)\n except MobileAppUser.DoesNotExist:\n return {'error': \"Unknown or inactive imei %s\" % imei}\n return mau",
"def _get_restrictions(self, url_params, username=TEST_USERNAME,\n password=TEST_PASSWORD):\n url = self.URL_BASE + url_params\n client = Client()\n client.login(username=username, password=password)\n return client.get(url)",
"def get(self, username):\n session = Session()\n domain = session['domain']\n if not domain:\n self.redirect('/')\n details = {}\n details['groups'] = self.GetGroups(domain, username)\n details['orgunit'] = self.GetOrgunit(domain, username)\n details['nicknames'] = self.GetNicknames(domain, username)\n data = json.dumps(details)\n logging.debug('Sending data...')\n logging.debug(data)\n self.response.out.write(data)\n logging.debug('Data sent successfully')",
"def list(self, request) -> QuerySet:\n if request.user.has_perm(\"user.can_retrieve_all_users\"):\n return self.get_queryset().all()\n elif request.user.has_perm(\"user.can_retrieve_users_in_school\"):\n #TODO: implment this\n pass \n else:\n raise PermissionError(\"You cannot retrieve users that way.\")",
"def get(self, request: 'Request', format=None) -> Response:\n travels = Viagem.objects.filter(user=request.user).all().order_by('-id')\n serializer = ViagemSerializer(travels, many=True)\n return Response(serializer.data)",
"def staff_api(request):\n\n if request.method == 'GET':\n copy = request.GET.copy()\n params = copy.urlencode()\n scheme = request.is_secure() and 'https' or 'http'\n base_url = scheme + '://' + request.get_host()\n rest_url = base_url + '/api/v2/pages/?' + params\n\n try:\n token = request.META['HTTP_AUTHORIZATION']\n rest_headers = {'Authorization': token}\n rest_response = requests.get(rest_url, headers=rest_headers)\n return JsonResponse(rest_response.json())\n except (KeyError):\n raise PermissionDenied\n raise PermissionDenied",
"def request_api_data(query_char):\n url = 'https://api.pwnedpasswords.com/range/'+ str(query_char)\n response = requests.get(url)\n if response.status_code != 200:\n raise RuntimeError(f'error fetching : {res.status_code},check the api and try again')\n return response",
"async def get_user(request, next_id):\n log_request(request)\n head_block = await get_request_block(request)\n conn = await create_connection()\n user_resource = await users_query.fetch_user_resource(\n conn, escape_user_input(next_id)\n )\n conn.close()\n\n return await create_response(conn, request.url, user_resource, head_block)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve the newest object in the target s3 bucket | def get_latest_s3_object(
bucket=os.environ["ARTIFACTS_BUCKET"], prefix="slack-response"
):
response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)
all = response["Contents"]
return max(all, key=lambda x: x["LastModified"]) | [
"def get_latest(self, bucket, prefix):\n none = datetime.datetime(1,1,1) # always be expired\n \n def p(name, prefix):\n \"\"\"\n A new parser function because the sorted() function can't compare \n datetime objects with None, so instead of None, return a really really\n old datetime object.\n \"\"\"\n return key_parser(name, prefix) or none\n \n keys = [{'dt': p(k.name, prefix), 'string': k.name} for k in bucket.list()]\n latest = sorted(keys, key=lambda x: x['dt'])[-1]\n key = latest['string']\n dt = latest['dt']\n \n assert dt is not none, \"Can't find any dumps in bucket\"\n \n log.info(\"Using latest dump from: {0:%B %d, %Y -- %X}\".format(dt))\n return bucket.get_key(key)",
"def get_object(bucket, key):\n return ObjectStore.get_object(bucket, key)",
"def download_file(key):\n return s3_bucket.Object(key).get()",
"def get_object(bucket_name, object_key):\n try:\n s3_client = S3Utils.get_S3_client()\n response = s3_client.get_object(Bucket=bucket_name, Key=object_key)\n return response\n \n except ClientError as e:\n logging.error(e)\n return None",
"def get_latest_blob():\n storage_client = storage.Client()\n bucket_name = config.UNPROCESSED_BUCKET_NAME\n bucket = storage_client.lookup_bucket(bucket_name)\n\n if bucket is None:\n logger.critical(\"Bucket does not exist. Exiting program.\")\n return None\n\n blobs = list(storage_client.list_blobs(bucket_name))\n logger.debug(f\"blobs {blobs}\")\n latest_blob = max(blobs, key=lambda x: x.updated, default=None)\n\n return latest_blob",
"def get_s3_object_and_read(obj, iteration=0):\n try:\n return obj.get()[\"Body\"].read()\n except Exception: # pylint: disable=broad-except\n if iteration < settings.MAX_S3_GET_ITERATIONS:\n return get_s3_object_and_read(obj, iteration+1)\n else:\n raise",
"def download_object_from_s3(object_key, bucket, file_name=None):\n\n # If file_name was not specified, use object_key\n if file_name is None:\n file_name = object_key\n\n # Download the object\n try:\n response = s3_client.download_file(bucket, object_key, file_name)\n except ClientError as e:\n logging.error(e)\n return e\n return response",
"def get_object(bucket=None, key=None, uri=None, s3_resource=None):\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n return s3_resource.Bucket(bucket).Object(key=key).get()",
"def test_last_modified(self):\n bucket = b'testBucket'\n key = b'testKey'\n before_time = time.time()\n self.put_test_object(bucket.decode('utf-8'), key.decode('utf-8'))\n\n time.sleep(0.05)\n rec = self.sink.fetch()\n after_time = time.time()\n self.assertGreaterEqual(float(rec.last_modified), before_time)\n self.assertLessEqual(float(rec.last_modified), after_time)",
"def get_file_from_object_storage(client, bucket_name, file_to_get):\n\n print('Get file {} from bucket {}'.format(file_to_get, bucket_name))\n object_to_get = get_object_storage_filename(file_to_get)\n\n client.fget_object(bucket_name=bucket_name,\n object_name=object_to_get,\n file_path=file_to_get)",
"def _get_json_file_and_etag_from_s3(self, key: str) -> Tuple[Union[dict, list], str]:\n response = self._s3_client.get_object(Bucket=self.s3_bucket_name, Key=key)\n return json.loads(response[\"Body\"].read().decode(\"utf-8\")), response[\"ETag\"]",
"def retrieve(self, bucket, key, gzipped=True):\n object = boto3.resource('s3').Object(bucket, key)\n body = object.get()['Body']\n try:\n raw = body.read()\n if gzipped:\n return gzip.decompress(raw)\n else:\n return raw\n finally:\n body.close()",
"def download(source_bucket, source_object_key, tmp):\n # TODO\n pass",
"def get_blob_meta(objecturl, logprefix=\"\", **kwargs):\n bucketname, keyname = s3_split_url(objecturl)\n logprefix = logprefix + \" \" if logprefix else logprefix\n logger.debug(\"%sfetching meta for URL: %s\", logprefix, objecturl)\n s3 = boto3.client('s3')\n try:\n # if 'RequestPayer' not in kwargs:\n # kwargs['RequestPayer'] = 'requester'\n\n head_res = s3.head_object(Bucket=bucketname, Key=keyname, **kwargs)\n except ClientError as clierr:\n if clierr.response['Error']['Code'] == '404':\n raise NoSuchFile(objecturl)\n logger.error(\"%scould not fetch URL (%s): %s\", logprefix, repr(clierr.response['Error']['Code']), objecturl,\n exc_info=clierr)\n raise\n return head_res",
"def get_key(self, key, bucket_name=None):\n if not bucket_name:\n (bucket_name, key) = self.parse_s3_url(key)\n \n obj = self.get_resource_type('s3').Object(bucket_name, key)\n obj.load()\n return obj",
"def get(cls, bucket_id):\n return cls.query.filter_by(id=bucket_id, deleted=False).one_or_none()",
"def read_object(bucket=None, key=None, uri=None, amt=None, s3_resource=None):\n s3_resource = s3_resource if s3_resource else resource\n return get_object(bucket, key, uri, s3_resource=s3_resource)['Body'].read(amt)",
"def download_bucket(bucket_name):\n\n s3 = boto3.resource('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)\n\n bucket_obj = s3.Bucket(bucket_name)\n \n all_files = []\n\n for key in bucket_obj.objects.all():\n all_files.append(key.key)\n\n for key in all_files:\n download = s3.Bucket(bucket_name).download_file(key, key)\n print(\"{0} file/object downloaded from {1} bucket as {2}.\\n\\nFile/object can be found here: {3}\".format(key, bucket_name, key, os.getcwd()))\n print()\n\n print(\"The entire {} bucket has been downloaded!\".format(bucket_name))",
"def load_object(bucket_name, key, file):\r\n s3_client.download_file(bucket_name, key, file)\r\n with open(file, \"rb\") as f:\r\n object = pickle.load(f)\r\n return object"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs a controller that copies 'owner' labels to pods owned by deployments. | def main():
def handle_item(item):
"""
Updates the given item by copying its 'owner' label to its pod template.
This requires that the item have a nonempty 'owner' label.
Args:
item: A Kubernetes object with a metadata field and a spec.template.metadata field.
"""
owner = item.metadata.labels.get('owner')
if not owner:
raise Rejection("Label 'owner' missing from {}:{}".format(
item.metadata.namespace, item.metadata.name), 'MissingOwner')
# Update the item's template. All deployments should have a template with labels; we will
# update the 'owner' label iff it's not present.
# If the label is present and doesn't match the deployment's label, raise an error, since we
# don't want to figure out if it's used in the deployment's selector before mutating.
template_metadata = item.spec.template.metadata
if 'owner' not in template_metadata.labels:
# Set the template's owner label.
template_metadata.labels['owner'] = owner
elif template_metadata.labels['owner'] != owner:
raise Rejection(
'Template label owner={} does not match Deployment label owner={}'.format(
owner, template_metadata.labels['owner']), 'MismatchedOwner')
# Return the updated / validated item.
return item
def build_initializer(api_client):
# Build the controller.
deployment_controller = SimpleResourceController(
ResourceHandler.deployment_handler(api_client), handle_item)
# The name here should match what you've configurd in your InitializerConfiguration.
return InitializerController('owner.propagate.example', [deployment_controller])
main_loop(build_initializer) | [
"def create_dc_pods(request):\n class_instance = request.node.cls\n\n def finalizer():\n \"\"\"\n Delete multiple dc pods\n \"\"\"\n if hasattr(class_instance, \"dc_pod_objs\"):\n for pod in class_instance.dc_pod_objs:\n delete_deploymentconfig_pods(pod_obj=pod)\n\n request.addfinalizer(finalizer)\n\n class_instance.dc_pod_objs = [\n helpers.create_pod(\n interface_type=class_instance.interface,\n pvc_name=pvc_obj.name,\n do_reload=False,\n namespace=class_instance.namespace,\n sa_name=class_instance.sa_obj.name,\n dc_deployment=True,\n replica_count=class_instance.replica_count,\n )\n for pvc_obj in class_instance.pvc_objs\n ]\n\n for pod in class_instance.dc_pod_objs:\n helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING, timeout=180)",
"def ClaimPodResources(self, podname, ns, uid):\n cfgstr = self.k8s.GetCfgAnnotations(podname, ns)\n if cfgstr == False:\n self.logger.error(f'Couldn\\'t find pod resources for {ns}.{podname}')\n return\n\n cfgtype = self.k8s.GetCfgType(podname, ns)\n tcfg = self.GetCfgParser(cfgtype, cfgstr)\n\n top = tcfg.CfgToTopology(True)\n if top is not None: # Start removing pod's resources from node\n n = self.k8s.GetPodNode(podname, ns)\n if not n:\n self.logger.error('Pulled pod\\'s config, but it wasn\\'t assigned a node!')\n return\n\n if n not in self.nodes:\n self.logger.error(f'Pod is mapping to node {n} but that node isn\\'t in the current node list. Skipping')\n return\n\n if self.nodes[n].PodPresent(podname, ns):\n self.logger.error(f'Pod {ns}.{podname} already scheduled on node {n}! Cannot add again')\n return \n\n # Passed all the tests. Now remove the resources from the cluster\n self.logger.info(f'Taking node resources from {n}')\n if not self.nodes[n].RemoveResourcesFromTopology(top):\n self.logger.error(\"Failed removing resources\")\n return\n\n # (alloc, free) = self.k8s.GetNodeHugepageResources(n) \n # if not self.nodes[n].SetHugepages(alloc, free):\n # self.logger.error(f'Error while parsing allocatable resources for node {n}')\n\n self.nodes[n].AddScheduledPod(podname, ns, top)\n self.pod_state[(ns, podname)] = {'state': PodStatus.POD_STATUS_SCHEDULED, 'time': time.time(), 'uid': uid}",
"def generate_controller(args):\n controller_template = os.path.join(dirname(abspath(__file__)), 'templates/controller.py')\n test_template = os.path.join(dirname(abspath(__file__)), 'templates/unittest.py')\n controller_name = args.get('<controller>')\n current_path = os.getcwd()\n\n logger.info('Start generating controller.')\n\n if not controller_name:\n logger.warning('Controller name cannot be empty.')\n return\n\n # controller file\n with open(controller_template, 'r') as template_file:\n controller_file_path = os.path.join(current_path, 'application/controllers',\n controller_name + '.py')\n with open(controller_file_path, 'w+') as controller_file:\n for line in template_file:\n new_line = line.replace('#{controller}', controller_name)\n controller_file.write(new_line)\n logger.info(\"New: %s\" % controller_file_path)\n\n # test file\n with open(test_template, 'r') as template_file:\n test_file_path = os.path.join(current_path, 'tests',\n 'test_%s.py' % controller_name)\n with open(test_file_path, 'w+') as test_file:\n for line in template_file:\n new_line = line.replace('#{controller}', controller_name) \\\n .replace('#{controller|title}', controller_name.title())\n test_file.write(new_line)\n logger.info(\"New: %s\" % test_file_path)\n\n # template dir\n template_dir_path = os.path.join(current_path, 'application/templates/%s' % controller_name)\n _mkdir_p(template_dir_path)\n logger.info(\"New: %s\" % template_dir_path + \"/\")\n\n # css dir\n css_dir_path = os.path.join(current_path, 'application/static/css/%s' % controller_name)\n _mkdir_p(css_dir_path)\n logger.info(\"New: %s/\" % css_dir_path)\n\n # js dir\n js_dir_path = os.path.join(current_path, 'application/static/js/%s' % controller_name)\n _mkdir_p(js_dir_path)\n logger.info(\"New: %s/\" % js_dir_path)\n\n # form file\n _generate_form(controller_name)\n\n logger.info('Finish generating controller.')",
"def _get_deployment_name_from_owners( self, owners, namespace ):\n replicaset = None\n owner = self._get_managing_controller( owners, 'ReplicaSet' )\n\n # check if we are owned by a replicaset\n if owner:\n name = owner.get( 'name', None )\n if name is None:\n return None\n replicaset = self._replicasets.lookup( namespace, name )\n\n if replicaset is None:\n return None\n\n return replicaset.deployment_name",
"def run_on_kubernetes(config, namespace='default'):\n # read local config\n load_kube_config()\n c = Configuration()\n Configuration.set_default(c)\n\n # create client and create pod on default namespace\n core_v1 = core_v1_api.CoreV1Api()\n spec = _generate_cluster_spec(config, kubernetes=True)\n core_v1.create_namespaced_pod(body=spec, namespace=namespace)\n print('Pod created.')",
"def deploy_ocs_via_operator(self, image=None):\n ui_deployment = config.DEPLOYMENT.get(\"ui_deployment\")\n live_deployment = config.DEPLOYMENT.get(\"live_deployment\")\n arbiter_deployment = config.DEPLOYMENT.get(\"arbiter_deployment\")\n\n if ui_deployment and ui_deployment_conditions():\n self.deployment_with_ui()\n # Skip the rest of the deployment when deploy via UI\n return\n else:\n logger.info(\"Deployment of OCS via OCS operator\")\n self.label_and_taint_nodes()\n\n if not live_deployment:\n create_catalog_source(image)\n\n if config.DEPLOYMENT.get(\"local_storage\"):\n setup_local_storage(storageclass=self.DEFAULT_STORAGECLASS_LSO)\n\n logger.info(\"Creating namespace and operator group.\")\n run_cmd(f\"oc create -f {constants.OLM_YAML}\")\n\n # Create Multus Networks\n if config.ENV_DATA.get(\"is_multus_enabled\"):\n create_public_net = config.ENV_DATA[\"multus_create_public_net\"]\n create_cluster_net = config.ENV_DATA[\"multus_create_cluster_net\"]\n interfaces = set()\n if create_public_net:\n interfaces.add(config.ENV_DATA[\"multus_public_net_interface\"])\n if create_cluster_net:\n interfaces.add(config.ENV_DATA[\"multus_cluster_net_interface\"])\n worker_nodes = get_worker_nodes()\n node_obj = ocp.OCP(kind=\"node\")\n platform = config.ENV_DATA.get(\"platform\").lower()\n if platform != constants.BAREMETAL_PLATFORM:\n for node in worker_nodes:\n for interface in interfaces:\n ip_link_cmd = f\"ip link set promisc on {interface}\"\n node_obj.exec_oc_debug_cmd(node=node, cmd_list=[ip_link_cmd])\n\n if create_public_net:\n logger.info(\"Creating Multus public network\")\n public_net_data = templating.load_yaml(constants.MULTUS_PUBLIC_NET_YAML)\n public_net_data[\"metadata\"][\"name\"] = config.ENV_DATA.get(\n \"multus_public_net_name\"\n )\n public_net_data[\"metadata\"][\"namespace\"] = config.ENV_DATA.get(\n \"multus_public_net_namespace\"\n )\n public_net_config_str = public_net_data[\"spec\"][\"config\"]\n public_net_config_dict = json.loads(public_net_config_str)\n public_net_config_dict[\"master\"] = config.ENV_DATA.get(\n \"multus_public_net_interface\"\n )\n public_net_config_dict[\"ipam\"][\"range\"] = config.ENV_DATA.get(\n \"multus_public_net_range\"\n )\n public_net_config_dict[\"type\"] = config.ENV_DATA.get(\n \"multus_public_net_type\"\n )\n public_net_config_dict[\"mode\"] = config.ENV_DATA.get(\n \"multus_public_net_mode\"\n )\n public_net_data[\"spec\"][\"config\"] = json.dumps(public_net_config_dict)\n public_net_yaml = tempfile.NamedTemporaryFile(\n mode=\"w+\", prefix=\"multus_public\", delete=False\n )\n templating.dump_data_to_temp_yaml(public_net_data, public_net_yaml.name)\n run_cmd(f\"oc create -f {public_net_yaml.name}\")\n\n if create_cluster_net:\n logger.info(\"Creating Multus cluster network\")\n cluster_net_data = templating.load_yaml(\n constants.MULTUS_CLUSTER_NET_YAML\n )\n cluster_net_data[\"metadata\"][\"name\"] = config.ENV_DATA.get(\n \"multus_cluster_net_name\"\n )\n cluster_net_data[\"metadata\"][\"namespace\"] = config.ENV_DATA.get(\n \"multus_cluster_net_namespace\"\n )\n cluster_net_config_str = cluster_net_data[\"spec\"][\"config\"]\n cluster_net_config_dict = json.loads(cluster_net_config_str)\n cluster_net_config_dict[\"master\"] = config.ENV_DATA.get(\n \"multus_cluster_net_interface\"\n )\n cluster_net_config_dict[\"ipam\"][\"range\"] = config.ENV_DATA.get(\n \"multus_cluster_net_range\"\n )\n cluster_net_config_dict[\"type\"] = config.ENV_DATA.get(\n \"multus_cluster_net_type\"\n )\n cluster_net_config_dict[\"mode\"] = config.ENV_DATA.get(\n \"multus_cluster_net_mode\"\n )\n cluster_net_data[\"spec\"][\"config\"] = json.dumps(cluster_net_config_dict)\n cluster_net_yaml = tempfile.NamedTemporaryFile(\n mode=\"w+\", prefix=\"multus_public\", delete=False\n )\n templating.dump_data_to_temp_yaml(\n cluster_net_data, cluster_net_yaml.name\n )\n run_cmd(f\"oc create -f {cluster_net_yaml.name}\")\n\n disable_addon = config.DEPLOYMENT.get(\"ibmcloud_disable_addon\")\n managed_ibmcloud = (\n config.ENV_DATA[\"platform\"] == constants.IBMCLOUD_PLATFORM\n and config.ENV_DATA[\"deployment_type\"] == \"managed\"\n )\n if managed_ibmcloud:\n ibmcloud.add_deployment_dependencies()\n if not live_deployment:\n create_ocs_secret(self.namespace)\n if config.DEPLOYMENT.get(\"create_ibm_cos_secret\", True):\n logger.info(\"Creating secret for IBM Cloud Object Storage\")\n with open(constants.IBM_COS_SECRET_YAML, \"r\") as cos_secret_fd:\n cos_secret_data = yaml.load(cos_secret_fd, Loader=yaml.SafeLoader)\n key_id = config.AUTH[\"ibmcloud\"][\"ibm_cos_access_key_id\"]\n key_secret = config.AUTH[\"ibmcloud\"][\"ibm_cos_secret_access_key\"]\n cos_secret_data[\"data\"][\"IBM_COS_ACCESS_KEY_ID\"] = key_id\n cos_secret_data[\"data\"][\"IBM_COS_SECRET_ACCESS_KEY\"] = key_secret\n cos_secret_data_yaml = tempfile.NamedTemporaryFile(\n mode=\"w+\", prefix=\"cos_secret\", delete=False\n )\n templating.dump_data_to_temp_yaml(\n cos_secret_data, cos_secret_data_yaml.name\n )\n exec_cmd(f\"oc create -f {cos_secret_data_yaml.name}\")\n if managed_ibmcloud and live_deployment and not disable_addon:\n self.deploy_odf_addon()\n return\n self.subscribe_ocs()\n operator_selector = get_selector_for_ocs_operator()\n subscription_plan_approval = config.DEPLOYMENT.get(\"subscription_plan_approval\")\n ocs_version = version.get_semantic_ocs_version_from_config()\n if ocs_version >= version.VERSION_4_9:\n ocs_operator_names = [\n defaults.ODF_OPERATOR_NAME,\n defaults.OCS_OPERATOR_NAME,\n defaults.MCG_OPERATOR,\n ]\n # workaround for https://bugzilla.redhat.com/show_bug.cgi?id=2075422\n ocp_version = version.get_semantic_ocp_version_from_config()\n if live_deployment and (\n (\n ocp_version == version.VERSION_4_10\n and ocs_version == version.VERSION_4_9\n )\n or (\n ocp_version == version.VERSION_4_11\n and ocs_version == version.VERSION_4_10\n )\n ):\n ocs_operator_names.remove(defaults.MCG_OPERATOR)\n else:\n ocs_operator_names = [defaults.OCS_OPERATOR_NAME]\n\n if ocs_version >= version.VERSION_4_10:\n ocs_operator_names.append(defaults.ODF_CSI_ADDONS_OPERATOR)\n\n channel = config.DEPLOYMENT.get(\"ocs_csv_channel\")\n is_ibm_sa_linked = False\n\n for ocs_operator_name in ocs_operator_names:\n package_manifest = PackageManifest(\n resource_name=ocs_operator_name,\n selector=operator_selector,\n subscription_plan_approval=subscription_plan_approval,\n )\n package_manifest.wait_for_resource(timeout=300)\n csv_name = package_manifest.get_current_csv(channel=channel)\n csv = CSV(resource_name=csv_name, namespace=self.namespace)\n if managed_ibmcloud and not live_deployment:\n if not is_ibm_sa_linked:\n logger.info(\"Sleeping for 60 seconds before applying SA\")\n time.sleep(60)\n link_all_sa_and_secret_and_delete_pods(\n constants.OCS_SECRET, self.namespace\n )\n is_ibm_sa_linked = True\n csv.wait_for_phase(\"Succeeded\", timeout=720)\n # create storage system\n if ocs_version >= version.VERSION_4_9:\n exec_cmd(f\"oc apply -f {constants.STORAGE_SYSTEM_ODF_YAML}\")\n\n ocp_version = version.get_semantic_ocp_version_from_config()\n if managed_ibmcloud:\n config_map = ocp.OCP(\n kind=\"configmap\",\n namespace=self.namespace,\n resource_name=constants.ROOK_OPERATOR_CONFIGMAP,\n )\n config_map.get(retry=10, wait=5)\n config_map_patch = (\n '\\'{\"data\": {\"ROOK_CSI_KUBELET_DIR_PATH\": \"/var/data/kubelet\"}}\\''\n )\n logger.info(\"Patching config map to change KUBLET DIR PATH\")\n exec_cmd(\n f\"oc patch configmap -n {self.namespace} \"\n f\"{constants.ROOK_OPERATOR_CONFIGMAP} -p {config_map_patch}\"\n )\n\n # Modify the CSV with custom values if required\n if all(\n key in config.DEPLOYMENT for key in (\"csv_change_from\", \"csv_change_to\")\n ):\n modify_csv(\n csv=csv_name,\n replace_from=config.DEPLOYMENT[\"csv_change_from\"],\n replace_to=config.DEPLOYMENT[\"csv_change_to\"],\n )\n\n # create custom storage class for StorageCluster CR if necessary\n if self.CUSTOM_STORAGE_CLASS_PATH is not None:\n with open(self.CUSTOM_STORAGE_CLASS_PATH, \"r\") as custom_sc_fo:\n custom_sc = yaml.load(custom_sc_fo, Loader=yaml.SafeLoader)\n # set value of DEFAULT_STORAGECLASS to mach the custom storage cls\n self.DEFAULT_STORAGECLASS = custom_sc[\"metadata\"][\"name\"]\n run_cmd(f\"oc create -f {self.CUSTOM_STORAGE_CLASS_PATH}\")\n\n # Set rook log level\n self.set_rook_log_level()\n\n # creating StorageCluster\n if config.DEPLOYMENT.get(\"kms_deployment\"):\n kms = KMS.get_kms_deployment()\n kms.deploy()\n\n if config.ENV_DATA[\"mcg_only_deployment\"]:\n mcg_only_deployment()\n return\n\n cluster_data = templating.load_yaml(constants.STORAGE_CLUSTER_YAML)\n # Figure out all the OCS modules enabled/disabled\n # CLI parameter --disable-components takes the precedence over\n # anything which comes from config file\n if config.ENV_DATA.get(\"disable_components\"):\n for component in config.ENV_DATA[\"disable_components\"]:\n config.COMPONENTS[f\"disable_{component}\"] = True\n logger.warning(f\"disabling: {component}\")\n\n # Update cluster_data with respective component enable/disable\n for key in config.COMPONENTS.keys():\n comp_name = constants.OCS_COMPONENTS_MAP[key.split(\"_\")[1]]\n if config.COMPONENTS[key]:\n if \"noobaa\" in key:\n merge_dict(\n cluster_data,\n {\n \"spec\": {\n \"multiCloudGateway\": {\"reconcileStrategy\": \"ignore\"}\n }\n },\n )\n else:\n merge_dict(\n cluster_data,\n {\n \"spec\": {\n \"managedResources\": {\n f\"{comp_name}\": {\"reconcileStrategy\": \"ignore\"}\n }\n }\n },\n )\n\n if arbiter_deployment:\n cluster_data[\"spec\"][\"arbiter\"] = {}\n cluster_data[\"spec\"][\"nodeTopologies\"] = {}\n cluster_data[\"spec\"][\"arbiter\"][\"enable\"] = True\n cluster_data[\"spec\"][\"nodeTopologies\"][\n \"arbiterLocation\"\n ] = self.get_arbiter_location()\n cluster_data[\"spec\"][\"storageDeviceSets\"][0][\"replica\"] = 4\n\n cluster_data[\"metadata\"][\"name\"] = config.ENV_DATA[\"storage_cluster_name\"]\n\n deviceset_data = cluster_data[\"spec\"][\"storageDeviceSets\"][0]\n device_size = int(config.ENV_DATA.get(\"device_size\", defaults.DEVICE_SIZE))\n\n logger.info(\n \"Flexible scaling is available from version 4.7 on LSO cluster with less than 3 zones\"\n )\n zone_num = get_az_count()\n if (\n config.DEPLOYMENT.get(\"local_storage\")\n and ocs_version >= version.VERSION_4_7\n and zone_num < 3\n and not config.DEPLOYMENT.get(\"arbiter_deployment\")\n ):\n cluster_data[\"spec\"][\"flexibleScaling\"] = True\n # https://bugzilla.redhat.com/show_bug.cgi?id=1921023\n cluster_data[\"spec\"][\"storageDeviceSets\"][0][\"count\"] = 3\n cluster_data[\"spec\"][\"storageDeviceSets\"][0][\"replica\"] = 1\n\n # set size of request for storage\n if self.platform.lower() == constants.BAREMETAL_PLATFORM:\n pv_size_list = helpers.get_pv_size(\n storageclass=self.DEFAULT_STORAGECLASS_LSO\n )\n pv_size_list.sort()\n deviceset_data[\"dataPVCTemplate\"][\"spec\"][\"resources\"][\"requests\"][\n \"storage\"\n ] = f\"{pv_size_list[0]}\"\n else:\n deviceset_data[\"dataPVCTemplate\"][\"spec\"][\"resources\"][\"requests\"][\n \"storage\"\n ] = f\"{device_size}Gi\"\n\n # set storage class to OCS default on current platform\n if self.DEFAULT_STORAGECLASS:\n deviceset_data[\"dataPVCTemplate\"][\"spec\"][\n \"storageClassName\"\n ] = self.DEFAULT_STORAGECLASS\n\n # StorageCluster tweaks for LSO\n if config.DEPLOYMENT.get(\"local_storage\"):\n cluster_data[\"spec\"][\"manageNodes\"] = False\n cluster_data[\"spec\"][\"monDataDirHostPath\"] = \"/var/lib/rook\"\n deviceset_data[\"name\"] = constants.DEFAULT_DEVICESET_LSO_PVC_NAME\n deviceset_data[\"portable\"] = False\n deviceset_data[\"dataPVCTemplate\"][\"spec\"][\n \"storageClassName\"\n ] = self.DEFAULT_STORAGECLASS_LSO\n lso_type = config.DEPLOYMENT.get(\"type\")\n if (\n self.platform.lower() == constants.AWS_PLATFORM\n and not lso_type == constants.AWS_EBS\n ):\n deviceset_data[\"count\"] = 2\n # setting resource limits for AWS i3\n # https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.6/html-single/deploying_openshift_container_storage_using_amazon_web_services/index#creating-openshift-container-storage-cluster-on-amazon-ec2_local-storage\n if (\n ocs_version >= version.VERSION_4_5\n and config.ENV_DATA.get(\"worker_instance_type\")\n == constants.AWS_LSO_WORKER_INSTANCE\n ):\n deviceset_data[\"resources\"] = {\n \"limits\": {\"cpu\": 2, \"memory\": \"5Gi\"},\n \"requests\": {\"cpu\": 1, \"memory\": \"5Gi\"},\n }\n if (ocp_version >= version.VERSION_4_6) and (\n ocs_version >= version.VERSION_4_6\n ):\n cluster_data[\"metadata\"][\"annotations\"] = {\n \"cluster.ocs.openshift.io/local-devices\": \"true\"\n }\n count = config.DEPLOYMENT.get(\"local_storage_storagedeviceset_count\")\n if count is not None:\n deviceset_data[\"count\"] = count\n\n # Allow lower instance requests and limits for OCS deployment\n # The resources we need to change can be found here:\n # https://github.com/openshift/ocs-operator/blob/release-4.5/pkg/deploy-manager/storagecluster.go#L88-L116\n if config.DEPLOYMENT.get(\"allow_lower_instance_requirements\"):\n none_resources = {\"Requests\": None, \"Limits\": None}\n deviceset_data[\"resources\"] = deepcopy(none_resources)\n resources = [\n \"mon\",\n \"mds\",\n \"rgw\",\n \"mgr\",\n \"noobaa-core\",\n \"noobaa-db\",\n ]\n if ocs_version >= version.VERSION_4_5:\n resources.append(\"noobaa-endpoint\")\n cluster_data[\"spec\"][\"resources\"] = {\n resource: deepcopy(none_resources) for resource in resources\n }\n if ocs_version >= version.VERSION_4_5:\n cluster_data[\"spec\"][\"resources\"][\"noobaa-endpoint\"] = {\n \"limits\": {\"cpu\": 1, \"memory\": \"500Mi\"},\n \"requests\": {\"cpu\": 1, \"memory\": \"500Mi\"},\n }\n else:\n local_storage = config.DEPLOYMENT.get(\"local_storage\")\n platform = config.ENV_DATA.get(\"platform\", \"\").lower()\n if local_storage and platform == \"aws\":\n resources = {\n \"mds\": {\n \"limits\": {\"cpu\": 3, \"memory\": \"8Gi\"},\n \"requests\": {\"cpu\": 1, \"memory\": \"8Gi\"},\n }\n }\n if ocs_version < version.VERSION_4_5:\n resources[\"noobaa-core\"] = {\n \"limits\": {\"cpu\": 2, \"memory\": \"8Gi\"},\n \"requests\": {\"cpu\": 1, \"memory\": \"8Gi\"},\n }\n resources[\"noobaa-db\"] = {\n \"limits\": {\"cpu\": 2, \"memory\": \"8Gi\"},\n \"requests\": {\"cpu\": 1, \"memory\": \"8Gi\"},\n }\n cluster_data[\"spec\"][\"resources\"] = resources\n\n # Enable host network if enabled in config (this require all the\n # rules to be enabled on underlaying platform).\n if config.DEPLOYMENT.get(\"host_network\"):\n cluster_data[\"spec\"][\"hostNetwork\"] = True\n\n cluster_data[\"spec\"][\"storageDeviceSets\"] = [deviceset_data]\n\n if managed_ibmcloud:\n mon_pvc_template = {\n \"spec\": {\n \"accessModes\": [\"ReadWriteOnce\"],\n \"resources\": {\"requests\": {\"storage\": \"20Gi\"}},\n \"storageClassName\": self.DEFAULT_STORAGECLASS,\n \"volumeMode\": \"Filesystem\",\n }\n }\n cluster_data[\"spec\"][\"monPVCTemplate\"] = mon_pvc_template\n # Need to check if it's needed for ibm cloud to set manageNodes\n cluster_data[\"spec\"][\"manageNodes\"] = False\n\n if config.ENV_DATA.get(\"encryption_at_rest\"):\n if ocs_version < version.VERSION_4_6:\n error_message = \"Encryption at REST can be enabled only on OCS >= 4.6!\"\n logger.error(error_message)\n raise UnsupportedFeatureError(error_message)\n logger.info(\"Enabling encryption at REST!\")\n cluster_data[\"spec\"][\"encryption\"] = {\n \"enable\": True,\n }\n if ocs_version >= version.VERSION_4_10:\n cluster_data[\"spec\"][\"encryption\"] = {\n \"clusterWide\": True,\n }\n if config.DEPLOYMENT.get(\"kms_deployment\"):\n cluster_data[\"spec\"][\"encryption\"][\"kms\"] = {\n \"enable\": True,\n }\n\n if config.DEPLOYMENT.get(\"ceph_debug\"):\n setup_ceph_debug()\n cluster_data[\"spec\"][\"managedResources\"] = {\n \"cephConfig\": {\"reconcileStrategy\": \"ignore\"}\n }\n if config.ENV_DATA.get(\"is_multus_enabled\"):\n public_net_name = config.ENV_DATA[\"multus_public_net_name\"]\n public_net_namespace = config.ENV_DATA[\"multus_public_net_namespace\"]\n cluster_net_name = config.ENV_DATA[\"multus_cluster_net_name\"]\n cluster_net_namespace = config.ENV_DATA[\"multus_cluster_net_namespace\"]\n selector_data = {}\n if create_public_net:\n public_selector_data = {\n \"public\": f\"{public_net_namespace}/{public_net_name}\"\n }\n selector_data.update(public_selector_data)\n if create_cluster_net:\n cluster_selector_data = {\n \"cluster\": f\"{cluster_net_namespace}/{cluster_net_name}\"\n }\n selector_data.update(cluster_selector_data)\n cluster_data[\"spec\"][\"network\"] = {\n \"provider\": \"multus\",\n \"selectors\": selector_data,\n }\n\n # Enable in-transit encryption.\n if config.ENV_DATA.get(\"in_transit_encryption\"):\n if \"network\" not in cluster_data[\"spec\"]:\n cluster_data[\"spec\"][\"network\"] = {}\n\n if \"connections\" not in cluster_data[\"spec\"][\"network\"]:\n cluster_data[\"spec\"][\"network\"][\"connections\"] = {}\n\n cluster_data[\"spec\"][\"network\"][\"connections\"] = {\n \"encryption\": {\"enabled\": True}\n }\n\n # Use Custom Storageclass Names\n if config.ENV_DATA.get(\"custom_default_storageclass_names\"):\n storageclassnames = config.ENV_DATA.get(\"storageclassnames\")\n\n keys_to_update = [\n constants.OCS_COMPONENTS_MAP[\"cephfs\"],\n constants.OCS_COMPONENTS_MAP[\"rgw\"],\n constants.OCS_COMPONENTS_MAP[\"blockpools\"],\n constants.OCS_COMPONENTS_MAP[\"cephnonresilentpools\"],\n ]\n\n cluster_data.setdefault(\"spec\", {}).setdefault(\"managedResources\", {})\n\n for key in keys_to_update:\n if storageclassnames.get(key):\n cluster_data[\"spec\"][\"managedResources\"][key] = {\n \"storageClassName\": storageclassnames[key]\n }\n\n if cluster_data[\"spec\"].get(\"nfs\"):\n cluster_data[\"spec\"][\"nfs\"] = {\n \"storageClassName\": storageclassnames[\"nfs\"]\n }\n\n if cluster_data[\"spec\"].get(\"encryption\"):\n cluster_data[\"spec\"][\"encryption\"] = {\n \"storageClassName\": storageclassnames[\"encryption\"]\n }\n\n cluster_data_yaml = tempfile.NamedTemporaryFile(\n mode=\"w+\", prefix=\"cluster_storage\", delete=False\n )\n templating.dump_data_to_temp_yaml(cluster_data, cluster_data_yaml.name)\n run_cmd(f\"oc create -f {cluster_data_yaml.name}\", timeout=1200)\n if config.DEPLOYMENT[\"infra_nodes\"]:\n _ocp = ocp.OCP(kind=\"node\")\n _ocp.exec_oc_cmd(\n command=f\"annotate namespace {config.ENV_DATA['cluster_namespace']} \"\n f\"{constants.NODE_SELECTOR_ANNOTATION}\"\n )",
"def pod(action=None, name=None, datacenter=None):\n base_url = '%s/zones' % (server)\n r = None\n if action == 'list':\n r = call('get', '%s' % base_url)\n elif action == 'create':\n if not datacenter:\n print 'Missing datacenter_id to create'\n sys.exit(1)\n\n datacenter_id = fetch_id('datacenters', datacenter)\n r = call('post', '%s/zones' %\n (server),\n data=json.dumps({'name': name, 'datacenter_id': datacenter_id})\n )\n elif action == 'delete':\n r = call('delete', '%s/%s' %\n (base_url, fetch_id('zones', name))\n )\n elif action == 'info':\n r = call('get', '%s/by-name/%s' % (base_url, name))\n else:\n baker.usage(sys._getframe().f_code.co_name)\n sys.exit(1)\n pretty_output(r)",
"def add_labels(controller):\n labels = lists(models(MarathonLabel, controller=just(controller)))\n return labels.map(lambda _: controller)",
"def check_admin_console_pods(args=None, ):\n ocutil.namespace = args.namespace2\n logger.info('Namespace: %s', args.namespace2)\n return check_deployment_replicas(args.deployment2)",
"def submitTo(self, controller):\n self.controller = controller",
"def add_owner_to_channels(cls, channels, owners):\n for channel in channels:\n try:\n channel['owner'] = owners[channel['owner']]\n except TypeError:\n pass",
"def do_host_device_label_assign(cc, args):\n attributes = utils.args_array_to_list_dict(args.attributes[0])\n parameters = [\"overwrite=\" + str(args.overwrite)]\n host = ihost_utils._find_ihost(cc, args.hostnameorid)\n device = pci_device.find_device(cc, host, args.nameorpciaddr)\n attributes.append({'pcidevice_uuid': device.uuid})\n new_device_labels = cc.device_label.assign(attributes, parameters)\n for p in new_device_labels.device_labels:\n uuid = p['uuid']\n if uuid is not None:\n try:\n device_label = cc.device_label.get(uuid)\n except exc.HTTPNotFound:\n raise exc.CommandError('Host device label not found: %s' % uuid)\n _print_device_label_show(device_label)",
"def _assign_owner_role(self) -> dict:\n print('Assigning Owner role...')\n\n count = 5\n\n #\n # This operation can fail if the service principal is not finished\n # being created on the application\n #\n while True:\n try:\n return self._run_az([\n 'role', 'assignment', 'create',\n '--assignee', self._selected_application['appId'],\n '--role', 'Owner',\n '--resource-group', self._selected_resource_group['name']\n ])\n except Exception as e:\n if count:\n print(self.format_error(\n 'Role assignment failed, trying again...'))\n time.sleep(5)\n count -= 1\n else:\n raise e",
"def delegate_command_to_pod(args: str, pod: str, container: str):\n # logger.info(\"Delegate command to pod: %s, %s, %s\", args, pod, container)\n args = f'kubectl exec -c {container} {pod} -- bash -c \"{args}\"'\n return run_command(args)",
"def _apply_object(obj: Mapping):\n data = yaml.dump(obj)\n subprocess.run(f\"kubectl apply -f -\", shell=True, check=True, text=True, input=data)",
"def update_controller():\n update_items(\n inst, config_entry, mikrotik_controller, async_add_entities, tracked\n )",
"async def owner(ctx):\n pass",
"def post(self, pod):\n if self.from_pods:\n raise exception.OperationNotPermitted\n\n pod_obj = objects.Pod(pecan.request.context,\n **pod.as_dict())\n new_pod = pecan.request.rpcapi.pod_create(pod_obj)\n # Set the HTTP Location Header\n pecan.response.location = link.build_url('pods', new_pod.uuid)\n return Pod.convert_with_links(new_pod)",
"def make_pod_from_template(temjson, kdd, docker2run):\n\n gname = cvt_kdd_to_gname( kdd )\n\n temjson[\"metadata\"][\"name\"] = gname\n\n temjson[\"spec\"][\"containers\"][0][\"name\"] = gname\n\n temjson[\"spec\"][\"containers\"][0][\"image\"] = docker2run\n\n temjson[\"spec\"][\"containers\"][0][\"args\"][0] = kdd\n\n return temjson"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the given item by copying its 'owner' label to its pod template. This requires that the item have a nonempty 'owner' label. | def handle_item(item):
owner = item.metadata.labels.get('owner')
if not owner:
raise Rejection("Label 'owner' missing from {}:{}".format(
item.metadata.namespace, item.metadata.name), 'MissingOwner')
# Update the item's template. All deployments should have a template with labels; we will
# update the 'owner' label iff it's not present.
# If the label is present and doesn't match the deployment's label, raise an error, since we
# don't want to figure out if it's used in the deployment's selector before mutating.
template_metadata = item.spec.template.metadata
if 'owner' not in template_metadata.labels:
# Set the template's owner label.
template_metadata.labels['owner'] = owner
elif template_metadata.labels['owner'] != owner:
raise Rejection(
'Template label owner={} does not match Deployment label owner={}'.format(
owner, template_metadata.labels['owner']), 'MismatchedOwner')
# Return the updated / validated item.
return item | [
"def main():\n\n def handle_item(item):\n \"\"\"\n Updates the given item by copying its 'owner' label to its pod template.\n\n This requires that the item have a nonempty 'owner' label.\n\n Args:\n item: A Kubernetes object with a metadata field and a spec.template.metadata field.\n \"\"\"\n owner = item.metadata.labels.get('owner')\n if not owner:\n raise Rejection(\"Label 'owner' missing from {}:{}\".format(\n item.metadata.namespace, item.metadata.name), 'MissingOwner')\n\n # Update the item's template. All deployments should have a template with labels; we will\n # update the 'owner' label iff it's not present.\n # If the label is present and doesn't match the deployment's label, raise an error, since we\n # don't want to figure out if it's used in the deployment's selector before mutating.\n\n template_metadata = item.spec.template.metadata\n\n if 'owner' not in template_metadata.labels:\n # Set the template's owner label.\n template_metadata.labels['owner'] = owner\n elif template_metadata.labels['owner'] != owner:\n raise Rejection(\n 'Template label owner={} does not match Deployment label owner={}'.format(\n owner, template_metadata.labels['owner']), 'MismatchedOwner')\n\n # Return the updated / validated item.\n return item\n\n def build_initializer(api_client):\n # Build the controller.\n deployment_controller = SimpleResourceController(\n ResourceHandler.deployment_handler(api_client), handle_item)\n # The name here should match what you've configurd in your InitializerConfiguration.\n return InitializerController('owner.propagate.example', [deployment_controller])\n\n main_loop(build_initializer)",
"def modify(username, template, summary, owner, email=None):\n if owner:\n email = lookup_email_addr(owner)\n update_meta(template,\n username=username,\n owner=owner,\n email=email,\n summary=summary)",
"def putToOPML(self, owner):\n PutToOPML(owner)",
"def replacePod(self, **kwargs):\n\n allParams = ['name', 'namespaces', 'body', 'ca_cert', 'cert', 'key']\n\n params = locals()\n for (key, val) in params['kwargs'].iteritems():\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method replacePod\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/api/v1beta3/namespaces/{namespaces}/pods/{name}'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'PUT'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json'\n headerParams['Content-Type'] = '*/*,'\n\n \n\n \n\n \n if ('name' in params):\n replacement = str(self.apiClient.toPathValue(params['name']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'name' + '}',\n replacement)\n \n if ('namespaces' in params):\n replacement = str(self.apiClient.toPathValue(params['namespaces']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'namespaces' + '}',\n replacement)\n \n\n \n\n \n if ('body' in params):\n bodyParam = params['body']\n \n\n postData = (formParams if formParams else bodyParam)\n\n ca = params.get('ca_cert')\n cert = params.get('cert')\n key = params.get('key')\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, ca, cert, key, headerParams, files=files)\n\n if not response:\n return None\n\n responseObject = self.apiClient.deserialize(response, 'V1beta3_Pod')\n return responseObject",
"def check_for_owner_tag(self, ec2item):\n tag = \"EC2 instance has no owner tag\"\n severity = 3\n tags = ec2item.config.get('tags', {})\n if 'owner' not in tags:\n # TODO: remove below exception for mrjobs INFRA-3453\n if 'aws:elasticmapreduce:job-flow-id' not in tags:\n self.add_issue(severity, tag, ec2item, notes=None)",
"def _update_template_fcp_raw_usage(self, raw_usage, raw_item):\n (fcp_id, template_id, path_id, assigner_id, connections,\n reserved, wwpn_npiv, wwpn_phy, chpid, state, owner,\n tmpl_id) = raw_item\n if not raw_usage.get(template_id, None):\n raw_usage[template_id] = {}\n if not raw_usage[template_id].get(path_id, None):\n raw_usage[template_id][path_id] = []\n # remove path_id from raw data, keep the last templ_id to\n # represent from which template this FCP has been allocated out.\n return_raw = (fcp_id, template_id, assigner_id, connections,\n reserved, wwpn_npiv, wwpn_phy, chpid, state,\n owner, tmpl_id)\n raw_usage[template_id][path_id].append(return_raw)\n return raw_usage",
"def update_item(self, xblock, user_id, allow_not_found=False, force=False, **kwargs):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def upsert(self, kind: VersionedDataKind, item: dict):",
"def update_PODtemplate_styles(pod_template, event):\n style_template = pod_template.get_style_template()\n if not style_template or pod_template.odt_file.contentType != 'application/vnd.oasis.opendocument.text':\n return\n style_odt = style_template.odt_file\n style_template_file = create_temporary_file(style_odt, 'style_template.odt')\n _update_template_styles(pod_template, style_template_file.name)\n logger.info('\"{}\" => updated'.format(pod_template.Title()))",
"def create_owner(owner_info):\n return insert('owner', owner_info.keys(), owner_info.values())",
"def make_pod_from_template(temjson, kdd, docker2run):\n\n gname = cvt_kdd_to_gname( kdd )\n\n temjson[\"metadata\"][\"name\"] = gname\n\n temjson[\"spec\"][\"containers\"][0][\"name\"] = gname\n\n temjson[\"spec\"][\"containers\"][0][\"image\"] = docker2run\n\n temjson[\"spec\"][\"containers\"][0][\"args\"][0] = kdd\n\n return temjson",
"def patchPod(self, **kwargs):\n\n allParams = ['name', 'namespaces', 'body', 'ca_cert', 'cert', 'key']\n\n params = locals()\n for (key, val) in params['kwargs'].iteritems():\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method patchPod\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/api/v1beta3/namespaces/{namespaces}/pods/{name}'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'PATCH'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json'\n headerParams['Content-Type'] = 'application/json-patch+json,application/merge-patch+json,application/strategic-merge-patch+json,'\n\n \n\n \n\n \n if ('name' in params):\n replacement = str(self.apiClient.toPathValue(params['name']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'name' + '}',\n replacement)\n \n if ('namespaces' in params):\n replacement = str(self.apiClient.toPathValue(params['namespaces']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'namespaces' + '}',\n replacement)\n \n\n \n\n \n if ('body' in params):\n bodyParam = params['body']\n \n\n postData = (formParams if formParams else bodyParam)\n\n ca = params.get('ca_cert')\n cert = params.get('cert')\n key = params.get('key')\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, ca, cert, key, headerParams, files=files)",
"def post(self, pod):\n if self.from_pods:\n raise exception.OperationNotPermitted\n\n pod_obj = objects.Pod(pecan.request.context,\n **pod.as_dict())\n new_pod = pecan.request.rpcapi.pod_create(pod_obj)\n # Set the HTTP Location Header\n pecan.response.location = link.build_url('pods', new_pod.uuid)\n return Pod.convert_with_links(new_pod)",
"def update_note(ip, ensure_empty=True):\n ip_json = client.call('SoftLayer_Network_Subnet_IpAddress', 'getObject', id=ip.id)\n old_ip = IP(ip.id, ip_json)\n if not old_ip.network and not old_ip.broadcast and not old_ip.gateway and not old_ip.reserved and (\n not ensure_empty or not old_ip.note):\n logger.info('Assigning note \"{note}\" to ip \"{ip}\"'.format(note=ip.note, ip=ip.address))\n client.call('SoftLayer_Network_Subnet_IpAddress', 'editObject', {'id': ip.id, 'note': ip.note}, id=ip.id)\n else:\n raise Exception('IP \"{ip}\" is not available for assignment'.format(ip=ip.address))",
"def put_in_pouch(self, item):\r\n self.pouch_contents.append(item)",
"def update(name, spec, **_):\n if \"ingress\" in spec:\n utils.create_or_update('placement/ingress.yml.j2',\n name=name, spec=spec)",
"def update(self, POST) -> str:\n # Renaming the label has no immediate effect on item__id's in\n # data and choice tables. However, the user must ensure that\n # new uploaded data take into account the new label names.\n # Get names of current items\n item_id = int(POST['id'])\n new_label = str(POST['label'])\n try:\n current_label = self.labels_by_ids[item_id]\n except:\n d = { 'item_id': item_id, 'new_label': new_label }\n self.context['item_list_message'] = self.message.get('item_update_fail_ID',d)\n else:\n d = { 'current_label': current_label, 'new_label': new_label }\n if new_label in self.labels:\n self.context['item_list_message'] = self.message.get('item_update_failure',d)\n else:\n item = self.model.objects.get(pk=item_id)\n item.label = new_label\n item.save()\n self.context['item_list_message'] = self.message.get('item_update_success',d)\n self.set_list_context()\n return self.context",
"def transfer_sample_template_owner(\n self, sample_template_id: Union[int, str], new_owner: str\n ):\n st_id = Id(sample_template_id)\n return self._do_transfer_owner(\"sampleTemplates\", st_id, new_owner)",
"def get_owner_pi(context):\n assert interfaces.IOwned.providedBy(context), \\\n \"Not an Owned (parliamentary) Item: %s\" % (context)\n return dbutils.get_user(context.owner_id)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Opens the REACTORS.ratdb in the /ReacDB/db directory and grabs the relevant reactor information from the ratdb file. Information includes the reactor's licensed MWt, latitude and longitude, and reactor type (if available). Longitude and latitude are rounded to two decimal places for more conservative distance accuracy. | def parseRATDB(reacname):
MWt = 'none'
longlat = ['none', 'none']
f=open(ratdbpath, 'r')
beginparse = False
parsing = False
while beginparse == False:
stuff = str(f.readline())
if stuff.find(reacname) != -1:
beginparse = True
parsing = True
while parsing == True:
parseline = f.readline()
if parseline == '':
print("Reached the end of the data block or file. stopping")
break
line_pieces = parseline.split(":")
if line_pieces[0] == 'power_therm':
MWt = round(float(line_pieces[1].rstrip(",\n").lstrip()),1)
elif line_pieces[0] == 'longitude':
longlat[0] = round(float(line_pieces[1].rstrip(",\n").lstrip()),2)
elif line_pieces[0] == 'latitude':
longlat[1] = round(float(line_pieces[1].rstrip(",\n").lstrip()),2)
if (MWt != 'none') and ('none' not in longlat):
parsing = False
print(MWt, longlat)
return MWt, longlat | [
"def R_ob(filename):\n \n data = apr3read(filename)\n time_d = data['timedates']\n lon_gate = data['lon_gate']\n lat_gate = data['lat_gate']\n alt_gate = data['alt_gate']\n Z_Ku = data['Ku']\n Z_Ka = data['Ka']\n Z_W = data['W']\n Z_DFR1 = data['DFR_1']\n Z_DFR2 = data['DFR_2']\n Z_DFR3 = data['DFR_3']\n plane = data['alt_plane']\n rrange = data['range']\n lon_ = data['lon']\n lat_ = data['lat']\n\n time = {}\n pitch = {}\n roll = {}\n drift = {}\n fields = {}\n Ku = {}\n Ka = {} \n W = {}\n DFR1 = {}\n DFR2 = {}\n DFR3 = {}\n _range = {}\n metadata = {}\n longitude = {}\n latitude = {}\n altitude = {}\n sweep_number = {}\n sweep_mode = {}\n fixed_angle = {}\n sweep_start_ray_index = {}\n sweep_end_ray_index = {}\n rays_per_sweep = {}\n azimuth = {} \n elevation = {}\n gate_altitude = {}\n gate_longitude = {}\n gate_latitude = {}\n\n metadata['info'] = 'This is radar data from the APR3 insturment aboard the NASA DC-8 during the OLYMPEX Field Campain. This data contains matched Ku, Ka and W band radar data'\n projection = 'pyart_aeqd'\n Ku['data'] = Z_Ku\n Ka['data'] = Z_Ka\n W ['data'] = Z_W\n DFR1['data'] = Z_DFR1\n DFR2['data'] = Z_DFR2\n DFR3['data'] = Z_DFR3\n _range['data'] = rrange\n fields['Ku'] = Ku\n fields['Ka'] = Ka\n fields['W'] = W\n fields['DFR1'] = DFR1\n fields['DFR2'] = DFR2\n fields['DFR3'] = DFR3\n \n time['data'] = time_d\n time['units'] = 'EPOCH_UNITS'\n ##\n #\n #THIS IS WHERE THE ISSUE LIES.....plane radar is moving...ground radar is not...\n #Pyart can deal? probs not. \n longitude['data'] = lon_\n latitude['data'] = lat_\n altitude['data'] = plane\n #\n #\n ##\n gate_altitude['data'] = alt_gate\n gate_longitude['data'] =lon_gate\n gate_latitude['data'] = lat_gate\n sweep_number['data'] = np.arange(0,24,1)\n sweep_mode['data'] = [np.nan]\n fixed_angle['data'] = [np.nan]\n pitch['data'] = data['pitch']\n roll['data'] = data['roll']\n drift['data'] = data['drift']\n scan_type = 'apr3 scan'\n sweep_start_ray_index['data'] = [0]\n sweep_end_ray_index['data'] = [23]\n rays_per_sweep['data'] = [time_d.shape[1]]\n azimuth['data'] = [np.nan]\n elevation['data'] = [np.nan]\n ngates = int(_range['data'].shape[0])\n nrays = int(time_d.shape[1])\n nsweeps = int(time_d.shape[0])\n APR = Radar(time, _range, fields, metadata, scan_type, latitude, longitude, altitude, sweep_number,\n sweep_mode, fixed_angle, sweep_start_ray_index, sweep_end_ray_index, azimuth, elevation,\n altitude, target_scan_rate=None, rays_are_indexed=None, ray_angle_res=None,\n scan_rate=None, antenna_transition=None, instrument_parameters=None,\n radar_calibration=None, rotation=None, tilt=None, roll=roll, drift=drift,\n heading=None, pitch=pitch, georefs_applied=None)\n \n APR.gate_altitude.update(gate_altitude)\n APR.gate_longitude.update(gate_longitude)\n APR.gate_latitude.update(gate_latitude)\n \n return APR",
"def get_recipydb():\n return os.path.join(get_recipy_dir(), RECIPYDB)",
"def watersheds_gdb_reader():\n\n #watersheds_gdb = 'WRIWatersheds.gdb'\n # watersheds_gdb = 'AQID_Watwershed_Jan2020/AQID_Watwershed_Jan2020.shp'\n # watersheds = geopandas.read_file(watersheds_gdb)\n # watersheds.set_index(\"aqid\",inplace=True)\n \n #pfaf_id, areakm2\n watersheds_gdb = 'Watersheds_032020/wastershed_prj_latlon.shp'\n watersheds = geopandas.read_file(watersheds_gdb)\n watersheds.rename(columns={\"pfaf_id\": \"aqid\"},inplace=True)\n watersheds.set_index(\"aqid\",inplace=True)\n\n return watersheds",
"def retrieve_angles(db, res):\n\n # Retrieve angle values\n with sqlite3.connect(runDir + \"/results/RNANet.db\") as conn:\n conn.execute('pragma journal_mode=wal')\n df = pd.read_sql(f\"\"\"SELECT chain_id, nt_name, alpha, beta, gamma, delta, epsilon, zeta, chi \n FROM (\n SELECT chain_id FROM chain JOIN structure ON chain.structure_id = structure.pdb_id\n WHERE chain.rfam_acc = 'unmappd' AND structure.resolution <= {res} AND issue = 0\n ) AS c NATURAL JOIN nucleotide\n WHERE nt_name='A' OR nt_name='C' OR nt_name='G' OR nt_name='U';\"\"\", conn)\n\n # convert to degrees\n j = (180.0/np.pi)\n torsions = df.iloc[:, 0:2].merge(\n df.iloc[:, 2:9].applymap(lambda x: j*x if x <= np.pi else j*x-360.0, na_action='ignore'), \n left_index=True, right_index=True\n )\n return torsions",
"def get_rinfo(rindex):\n credentials_path = '/glade/u/home/jdubeau/github/rda-ML-resource/dsrqst-creds.json'\n credentials = json.load(open(credentials_path))\n\n host_name = 'rda-db.ucar.edu'\n db_name = 'dssdb'\n\n conn = mysql.connector.connect(user=credentials['user'],\n password=credentials['password'],\n host=host_name,\n database=db_name)\n curs = conn.cursor()\n curs.execute(\"SELECT rinfo FROM dsrqst WHERE rindex = %s\",\n (rindex,))\n\n rinfo = curs.fetchone()[0]\n\n conn.close()\n return rinfo",
"def refresh(self):\n print(\"Copy to\", self.ro_dbpath)\n\n if self.ro_dbpath:\n if (\n not self.ro_dbpath.is_file()\n or self.ro_dbpath.stat().st_mtime < self.dbpath.stat().st_mtime\n ):\n shutil.copyfile(self.dbpath, self.ro_dbpath)\n self.engine = None\n\n if not self.engine:\n self.engine = create_engine(\n \"sqlite://\", creator=self.connect, connect_args={\"readonly\": True}\n )\n # options={ \"mode\": \"ro\"})\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n logging.info(\"Connected to Zotero SQL database\")\n\n self.fields = {}\n for row in self.session.query(dbz.FieldsCombined):\n self.fields[row.fieldName] = row.fieldID",
"def __init__(self, myshp, mydbf, west_lon, east_lon, south_lat, north_lat):\n self.reader = shapefile.Reader(shp = myshp, dbf = mydbf)\n\n# The following four coordinate inputs must be obtained outside of the scope of the program. Go to\n# https://www.engineeringtoolbox.com/utm-latitude-longitude-d_1370.html\n# Then, call the method bounding_box to see the bounding UTM coordinates of the\n# data collected. Enter the bounding coordinates into the calculator to find the\n# corresponding latitude and longitude values. These will be used later to\n# integrate with other parts of the code.\n\n# This step is necessary because accurately location constrained data\n# acquisition is not always possible.\n\n self.west_lon = west_lon\n self.east_lon = east_lon\n self.south_lat = south_lat\n self.north_lat = north_lat\n\n# Get shapes objects which include information such as location and grid size\n self.shapes = self.reader.shapes()\n# Get record objects containing wind data\n self.records = self.reader.records()\n# Get number of datum\n self.length = len(self.shapes)",
"def get_dialogue_restr(dialogue_file, db):\n c = sqlite3.connect(db)\n curs = c.cursor()\n\n with open(dialogue_file, \"r\") as f:\n dialogues = pickle.load(f)\n\n dial_to_rests = collections.defaultdict(set)\n\n # Get restr. candidates from api_calls\n for idx, dial in enumerate(dialogues):\n dial = dial[::-1]\n for _, system in dial:\n tokens = system.split()\n api_call = []\n # Found an api_call\n if tokens[0] == \"api_call\":\n for t in tokens[1:]:\n if t in attr_names:\n api_call.append(\"%\")\n else:\n api_call.append(t)\n\n api_call = tuple(api_call)\n curs.execute(\"SELECT * FROM Restaurants WHERE cuisine LIKE ? \"\n \"and location LIKE ? and price LIKE ?\", api_call)\n api_response = curs.fetchall()\n rests = set([entry[0] for entry in api_response])\n\n # Update which restaurants map for given dialogue\n dial_to_rests[idx] = rests\n break\n\n # Get restr. candidates by string-matching from set of all restaurants\n all_restr = get_all_restaurants(db)\n for idx, dial in enumerate(dialogues):\n dial_text = reduce(lambda m,n: m + \" \" + n[0] + \" \" + n[1], dial, \"\")\n dial_restr = set()\n for restr in all_restr:\n if restr == \"ask\": continue\n restr_clean = \" \".join(restr.split(\"_\"))\n if restr_clean in dial_text or restr in dial_text:\n dial_restr.add(restr)\n\n dial_to_rests[idx].update(dial_restr)\n\n\n return dial_to_rests",
"def run():\n with open('directTDoA_knownpoints.db') as h:\n global my_info1, my_info2, my_info3\n i = 3 # skip the 3x comment lines at start of the text file database\n lines = h.readlines()\n my_info1 = []\n my_info2 = []\n my_info3 = []\n while i < sum(1 for _ in open('directTDoA_knownpoints.db')):\n inforegexp = re.search(r\"(.*),(.*),(.*)\", lines[i])\n my_info1.append(inforegexp.group(1))\n my_info2.append(inforegexp.group(2))\n my_info3.append(inforegexp.group(3))\n i += 1",
"def get_current_races_data():\n\tdatabase_filepath = get_database_filepath()\n\tcurrent_races_filepath = database_filepath + \"current_races.dat\"\n\tf = open(current_races_filepath, 'r')\n\tcurr_state_name = \"\"\n\tcurr_bp_dict = {}\n\tcurr_rp_dict = {}\n\tcurr_gp_dict = {}\n\tall_race_data_dict = {}\n\tfor line in f:\n\t\tif line != \"\\n\" or line != \"\":\n\t\t\tendl_index = line.index('\\n')\n\t\t\tclean_line = line[:endl_index]\n\t\t\tline_split_list = clean_line.split('; ')\n\t\t\tline_dat_name = line_split_list[0]\n\t\t\tif line_dat_name == \"State\":\n\t\t\t\tcurr_state_name = line_split_list[1]\n\t\t\telif line_dat_name == \"rp\":\n\t\t\t\tcurr_rp_dict = ast.literal_eval(line_split_list[1])\n\t\t\telif line_dat_name == \"bp\":\n\t\t\t\tcurr_bp_dict = ast.literal_eval(line_split_list[1])\n\t\t\telif line_dat_name == \"gp\":\n\t\t\t\tcurr_gp_dict = ast.literal_eval(line_split_list[1])\n\t\t\t\tnew_poll_obj = data_structures.StatePollData(curr_state_name)\n\t\t\t\tnew_poll_obj.red_poll_dict_list = curr_rp_dict\n\t\t\t\tnew_poll_obj.blue_poll_dict_list = curr_bp_dict\n\t\t\t\tnew_poll_obj.general_poll_dict_list = curr_gp_dict\n\t\t\t\tall_race_data_dict[curr_state_name] = new_poll_obj\n\treturn all_race_data_dict",
"def build_astronomia_db():\n\tdb = PqDbController()\n\tdb.connect_db()\n\tprint('Setting up Astronomia Schema')\n\tsetup_path = os.path.join('astronomia', 'astronomia_setup')\n\tdb.apply_sql_folder_and_list_to_db(setup_path, [\n\t\t'astronomia_schema.pgsql',\n\t\t'delta_t_data.pgsql',\n\t\t'planet_data.pgsql',\n\t\t'astronomia_constants_table.pgsql',\n\t\t'nutation_constants.pgsql',\n\t])\n\tprint('Loading VSOP87B Data')\n\tdelta_t_path = os.path.join('astronomia', 'astronomia_data', 'delta_t.json')\n\tdb.apply_vsop87_deltat_json_file(delta_t_path)\n\teath_data_path = os.path.join('astronomia', 'astronomia_data', 'earth.json')\n\tdb.apply_vsop87_planet_json_file(eath_data_path, 'earth')\n\t# Load types\n\tprint('Loading Astronomia Types')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'astronomia_types'))\n\t# Load Functions\n\tprint('Loading Astronomia - Base')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'base'))\n\tprint('Loading Astronomia - Constants')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'constants'))\n\tprint('Loading Astronomia - Coordinates')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'coordinates'))\n\tprint('Loading Astronomia - Delta T')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'delta_t'))\n\tprint('Loading Astronomia - Interpolation')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'interpolation'))\n\tprint('Loading Astronomia - Julian')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'julian'))\n\tprint('Loading Astronomia - Moonphase')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'moonphase'))\n\tprint('Loading Astronomia - Nutation')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'nutation'))\n\tprint('Loading Astronomia - Ordinal Dates')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'ordinal_dates'))\n\tprint('Loading Astronomia - Planet Position')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'planet_position'))\n\tprint('Loading Astronomia - Precession')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'precession'))\n\tprint('Loading Astronomia - Sexagesimal')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'sexagesimal'))\n\tprint('Loading Astronomia - Solar')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'solar'))\n\tprint('Loading Astronomia - Solstice')\n\tdb.apply_sql_folder_to_db(os.path.join('astronomia', 'solstice'))",
"def load_thermo_db(dbpath):\n with open(dbpath,'r') as f:\n lines = f.readlines()\n \n db = {}\n \n for line in lines:\n d = ast.literal_eval(line.strip())\n db[d['Name']] = d\n \n return db",
"def read_crater_list(cfg=CFG):\n df = pd.read_csv(cfg.crater_csv_in, names=cfg.crater_cols, header=0)\n\n # Convert units, mandatory columns\n df[\"diam\"] = df[\"diam\"] * 1000 # [km -> m]\n df[\"rad\"] = df[\"diam\"] / 2\n df[\"age\"] = df[\"age\"] * 1e9 # [Gyr -> yr]\n df[\"age_low\"] = df[\"age_low\"] * 1e9 # [Gyr -> yr]\n df[\"age_upp\"] = df[\"age_upp\"] * 1e9 # [Gyr -> yr]\n\n # Define optional columns\n if \"psr_area\" in df.columns:\n df[\"psr_area\"] = df.psr_area * 1e6 # [km^2 -> m^2]\n else:\n # Estimate psr area as 90% of crater area\n df[\"psr_area\"] = 0.9 * np.pi * df.rad**2\n return df",
"def read_GWinfos():\n filename = 'infos_recoveredGWsources_sim20160929.dat'\n lines = []\n\n print 'Reading {} content...'.format(filename)\n\n with open(filename, 'r') as datafile:\n\n for line in datafile:\n\n if line.startswith('#'): \n continue\n\n GPStime, date, m1, m2, D, SNR, RA, dec, iota, skymap, Egw = line.split(';')\n\n m1 = float(m1)\n m2 = float(m2)\n D = float(D)\n SNR = float(SNR)\n RA = float(RA) \n dec = float(dec)\n Egw = float(Egw)\n iota = float(iota)\n\n data = (GPStime, date, m1, m2, D, SNR, RA, dec, iota, skymap, Egw)\n lines.append(data)\n\n return numpy.array(lines,\n dtype=[('GPStime', '|S20'), ('date', '|S30'), ('mass1', float), ('mass2', float),\n ('distances', float), ('SNR', float), ('RA', float),\n ('dec', float), ('inclination', float), ('file_skymap', '|S40'), ('Egw', float)])",
"def read_regulondb(self, filename, gene_identifier=\"name\", sep=\"\\t\",\n comment=\"#\", encoding=\"utf-8\", mode=\"rb\", **kw_args):\n # choose the column for the gene identifier\n gene_identifier = gene_identifier.lower()\n if gene_identifier == \"name\":\n idn = itemgetter(1)\n elif gene_identifier == \"blattner\":\n idn = itemgetter(2)\n elif gene_identifier == \"regulondb\":\n idn = itemgetter(0)\n else:\n raise PyOrganismError(\"unrecognised gene identifier '%s'\",\n gene_identifier)\n # read information from the file\n kw_args[\"encoding\"] = encoding\n kw_args[\"mode\"] = mode\n with open_file(filename, **kw_args) as (file_h, ext):\n information = file_h.readlines()\n # parse the information\n genes = list()\n phantom_num = 0\n warnings = parser_warning\n for line in information:\n line = line.strip()\n if line == \"\" or line.startswith(comment):\n continue\n partners = line.split(sep)\n if len(partners) > 4 and idn(partners) and partners[3] and partners[4]:\n name = idn(partners)\n if name == \"Phantom Gene\":\n phantom_num += 1\n name = \"%s %d\" % (name, phantom_num)\n # record the identifier, start-, end-position, other information\n genes.append([name, int(partners[3]),\n int(partners[4])] + partners[5:])\n else:\n warnings(line)\n warnings = LOGGER.warn\n LOGGER.warn(\"%d phantom genes included\", phantom_num)\n name = itemgetter(0)\n start = itemgetter(1)\n end = itemgetter(2)\n # assume the maximal end position of the genes is the total length\n genome_length = end(max(genes, key=end))\n length = len(genes)\n self.i2name = dict(itertools.izip(xrange(length),\n (name(gene) for gene in genes)))\n self.distances = numpy.zeros((length, length), dtype=int)\n for i in xrange(length - 1):\n gene_u = genes[i]\n start_u = start(gene_u)\n end_u = end(gene_u)\n for j in xrange(i + 1, length):\n gene_v = genes[j]\n start_v = start(gene_v)\n end_v = end(gene_v)\n # assuming a circular genome here,\n # since RegulonDB is only for E. coli\n # compute difference between start and end points (overlap)\n diff_1 = abs(start_u - end_v)\n diff_1 = min(diff_1, genome_length - diff_1)\n diff_2 = abs(start_v - end_u)\n diff_2 = min(diff_2, genome_length - diff_2)\n diff_3 = abs(start_u - start_v)\n diff_3 = min(diff_3, genome_length - diff_3)\n diff_4 = abs(end_v - end_u)\n diff_4 = min(diff_4, genome_length - diff_4)\n # we only use the UR triangle of the distances matrix\n self.distances[i, j] = min(diff_1, diff_2, diff_3, diff_4)",
"def download_gpcrdb_residues(name, directory=None, show=False, scrape=False): \n # Fetch data from GPCRdb\n if scrape: \n # scrape info from the website HTML (outdated, for test purposes only)\n residue_info = scrape_residue_info(name)\n else: \n # retrieve info via the GPCRdb API\n residue_info = get_residue_info(name)\n # Write information to CSV file\n if directory is not None:\n Path(directory).mkdir(parents=True, exist_ok=True)\n out_filename = Path(directory).joinpath('gpcrdb-residues_'+name+'.csv')\n np.savetxt(out_filename, residue_info, delimiter=',', fmt='%s', \n header='Part, SeqID, Code, GPCRdbID')\n # Print information to screen\n if show:\n for res in residue_info:\n print('%6s %4s %1s %s'%(res[0],res[1],res[2],res[3]))\n return residue_info",
"def get_db(path: str, filename=ROI_DS, file_format=ROI_DRIVER) -> gpd.GeoDataFrame:\n _f = os.path.join(path, ROI_DS)\n if os.path.isfile(_f):\n ds = gpd.read_file(_f, driver=ROI_DRIVER)\n ds.file = _f\n else:\n ds = gpd.GeoDataFrame({'name': [],'aoi':[], 'geometry': []})\n ds.crs = ROI_PROJ\n ds.file = _f\n # ds['geometry'] = None\n # ds['name'] = None\n # ds.to_file(_f, driver=ROI_FORMAT)\n return ds",
"def read_db_coords(json_fname='default'):\n\n if json_fname == 'default':\n read_dir = op.join(op.abspath(__file__).replace('config.py', ''))\n # read_dir = op.join(op.abspath(__package__), 'config')\n json_fname = op.join(read_dir, 'db_coords.json')\n\n if op.exists(json_fname):\n print('Loading database coordinates...')\n with open(json_fname, 'r') as open_file:\n coords = json.load(open_file)\n db_fs = coords['db_fs']\n db_bv = coords['db_bv']\n db_mne = coords['db_mne']\n return db_fs, db_bv, db_mne\n else:\n raise FileExistsError('Database coordinates file do not exist, please set them using '\n '\\'setup_db_coords()\\' function, specifying the folder that '\n 'contains BrainVISA and FreeSurfer databases, and the project name')",
"def read_database(self):\n root_path = os.path.join(self.clrmamepro, \"datfiles\", \"NoIntro\")\n files = os.listdir(root_path)\n from models.system import System\n platform = System(self.system)\n db = \"\"\n for fname in files:\n no_intro_name = fname.split(\" (\")\n if no_intro_name[0] == platform.nointro_db:\n db = os.path.join(root_path, fname)\n msg = \"Using the database found at {}\".format(db)\n logger.info(msg)\n\n with open(db, mode=\"r\") as fd:\n doc = xmltodict.parse(fd.read())\n\n output = {}\n roms = []\n data = doc[\"datafile\"][\"game\"]\n for rom_info in data:\n output[\"name\"] = rom_info[\"@name\"]\n output[\"crc\"] = rom_info[\"rom\"][\"@crc\"].zfill(8).upper()\n output[\"md5\"] = rom_info[\"rom\"][\"@md5\"]\n output[\"sha1\"] = rom_info[\"rom\"][\"@sha1\"]\n roms.append(dict(output))\n\n languages = ['Cs', 'Da', 'De', 'El', 'En', 'Es', 'Fi', 'Fr', 'Hu', 'It', 'Ja', 'Ko', 'Nl', 'No', 'Pl', 'Pt',\n 'Ru', 'Sv', 'Tr']\n regions = ['Asia', 'Australia', 'Brazil', 'Canada', 'China', 'Denmark', 'Europe', 'Finland', 'France',\n 'Germany', 'Greece', 'Hong Kong', 'Italy', 'Japan', 'Korea', 'Mexico', 'Netherlands', 'Norway',\n 'Russia', 'Spain', 'Sweden', 'Taiwan', 'USA', 'Unknown', 'World']\n bad_dump = \"[b]\"\n bios = \"[BIOS]\"\n\n for rom in roms:\n temp1 = rom[\"name\"].replace(bad_dump, \"(b)\")\n temp2 = temp1.replace(\")\", \"\")\n rom_list = temp2.split(\" (\")\n rom[\"short_desc\"] = rom_list[0]\n if bios in rom_list[0]:\n rom[\"bios\"] = True\n if len(rom_list[1:]) > 1:\n for i in rom_list[1:]:\n if i == \"b\":\n rom[\"bad_dump\"] = True\n elif i in regions:\n rom[\"region\"] = i\n elif i == \"Demo\":\n rom[\"demo\"] = True\n elif \"Kiosk\" in i:\n rom[\"Kiosk\"] = True\n elif i == \"Unl\":\n rom[\"unl\"] = True\n elif \"Rev\" in i:\n rom[\"rev\"] = i\n elif \"NDSi Enhanced\" in i:\n rom[\"NDSi Enhanced\"] = True\n elif \"SGB Enhanced\" in i:\n rom[\"SGB Enhanced\"] = True\n elif \"GB Compatible\" in i:\n rom[\"GB Compatible\"] = True\n elif \"Wii Virtual Console\" in i:\n rom[\"Wii Virtual Console\"] = True\n elif \"iQue\" in i:\n rom[\"iQue\"] = True\n elif \"Proto\" in i:\n rom[\"prototype\"] = True\n elif \"Beta\" in i:\n rom[\"beta\"] = True\n elif i in languages:\n rom[\"language\"] = [i]\n elif \",\" in i:\n stuff = [x.strip() for x in i.split(',')] # Split the string and remove whitespace\n for j in stuff:\n if j in languages:\n rom[\"language\"] = stuff\n elif j in regions:\n rom[\"region\"] = stuff\n elif len(rom_list[1:]) == 1:\n if \",\" in rom_list[1]:\n stuff = rom_list[1].split(\", \")\n if stuff[0] in regions:\n rom[\"region\"] = stuff\n elif rom_list[1] in regions:\n rom[\"region\"] = rom_list[1]\n return roms"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test threshold at which is needed based on truncation_quantile | def test_truncate(self, truncation_quantile, space, monkeypatch):
# Test that trial within threshold is not replaced
lineages = build_lineages_for_exploit(space, monkeypatch)
trials = self.get_trials(lineages, TrialStub(objective=50))
trials = sorted(trials, key=lambda trial: trial.objective.value)
threshold_index = int(truncation_quantile * len(trials))
good_trial = trials[threshold_index - 1]
selected_trial = trials[-1]
# Add non completed trials and shuffle the list to test it is filtered and sorted properly
lots_of_trials = trials + space.sample(20, seed=2)
numpy.random.shuffle(lots_of_trials)
exploit = self.constructor(
truncation_quantile=truncation_quantile, candidate_pool_ratio=0.2
)
if truncation_quantile > 0.0:
def mocked_choice(choices, *args, **kwargs):
raise RuntimeError("Should not be called")
rng = RNGStub()
rng.choice = mocked_choice
trial = exploit._truncate(
rng,
good_trial,
lots_of_trials,
)
assert trial is good_trial
if truncation_quantile < 1.0:
bad_trial = trials[threshold_index]
def mocked_choice(choices, *args, **kwargs):
return -1
rng = RNGStub()
rng.choice = mocked_choice
trial = exploit._truncate(
rng,
bad_trial,
lots_of_trials,
)
assert trial is selected_trial | [
"def threshold_percentile(validation_loss, percentile):\n thres = np.percentile(validation_loss, percentile)\n return thres",
"def test_find_percentile():\n array = np.arange(10) + 1\n perc = backgrounds.find_percentile(array, 0.6)\n assert perc == 6\n\n perc = backgrounds.find_percentile(array, 0.2)\n assert perc == 2",
"def set_threshold(validation_loss, criterion_dict):\n criterion = list(criterion_dict.keys())[0]\n\n if criterion == 'iqr':\n thres = threshold_median_iqr(validation_loss, criterion_dict[criterion])\n elif criterion == 'percentile':\n thres = threshold_percentile(validation_loss, criterion_dict[criterion])\n return thres",
"def halfChange(threshold):\n return lambda iterationNumber, corrections, values, datasetSize: numpy.sum([(numpy.absolute(x) < threshold).all() for x in corrections if x is not None], dtype=numpy.dtype(float)) / numpy.sum([x is not None for x in corrections], dtype=numpy.dtype(float)) < 0.5",
"def test_quantile_boundaries_detection(self):\n n_cuts = 3\n binarizer = FeaturesBinarizer(method='quantile', n_cuts=n_cuts,\n detect_column_type=\"column_names\",\n remove_first=False)\n # only for the two continuous features\n boundaries_0 = binarizer._get_boundaries(self.columns[0],\n self.features[:, 0], fit=True)\n np.testing.assert_array_almost_equal(\n boundaries_0,\n np.array([-np.inf, 0.009021, 0.271109, 0.473155, np.inf]))\n\n boundaries_1 = binarizer._get_boundaries(self.columns[1],\n self.features[:, 1], fit=True)\n np.testing.assert_array_almost_equal(\n boundaries_1,\n np.array([-np.inf, -0.718759, -0.191478, 0.445833, np.inf]))",
"def test_quantile(self, dut):\n for p in [0.01, 0.1, 0.2, 0.5, 0.8, 0.9, 0.99]:\n t = dut.quantile(p)\n approximated_p = dut.cdf(t)\n epsilon = (\n 0.0001 if not dut.contains_point_masses() else dut.pdf(t))\n self.assertAlmostEqual(p, approximated_p, delta=epsilon,\n msg=(\"Failed quantile check at p=%f t=%f\" %\n (p, t)))",
"def above_quantile_threshold(X, source_col=None, quantile_threshold=None, new_colname=None):\n # Create new column name if none specified\n if not new_colname:\n new_colname = source_col + '_above_' + str(quantile_threshold) + '_quantile'\n if not source_col:\n raise 'No source column to compute quantile threshold from specified'\n new_colname = source_col + '_above_' + str(quantile_threshold) + '_quantile'\n if not quantile_threshold:\n raise 'No source column to quantile threshold specified. Should be float in range 0-1, eg .75' \n \n # New column is array with 1 where source col is above specified quantile\n new_col = np.where(X[source_col] > X[source_col].quantile(quantile_threshold), 1, 0)\n return X.assign(**{new_colname: new_col})",
"def get_outlier_threshold(y_key, temp_df, num_outliers):\n series_size = temp_df[y_key].shape[0]\n if series_size > num_outliers:\n thresh = sorted(temp_df[y_key].to_list())[-num_outliers]\n else:\n thresh = 0\n return thresh",
"def threshold(src, thresh, maxval, type, dst=...) -> Tuple[retval, dst]:\n ...",
"def remove_movies_with_less_votes(dataframe, percentile):\n print(\"Filtering the movies with less vote count than the percentile = {} ...\".format(percentile))\n\n all_votes_count = dataframe[dataframe[\"vote_count\"].notnull()]['vote_count'].astype('int')\n votes_count_limit = all_votes_count.quantile(percentile)\n\n print(\" The {0} percentile of all vote counts: {1}\".format(percentile, votes_count_limit))\n print(\" The movies count before filtering: {0}\".format(len(dataframe)))\n\n dataframe = dataframe[dataframe[\"vote_count\"] > votes_count_limit]\n print(\" The movies after filtering: {0}\".format(len(dataframe)))\n return dataframe",
"def t_val_from_t_percentile(t_percentile, df, one_tailed = 0):\n return round(t.isf(t_percentile, df), 3) if one_tailed else round(t.isf(t_percentile/2., df), 3)",
"def threshold(self, upper=None, lower=None, binarize=False):\n\n b = self.copy()\n if isinstance(upper, str) and upper[-1] == \"%\":\n upper = np.percentile(b.data, float(upper[:-1]))\n if isinstance(lower, str) and lower[-1] == \"%\":\n lower = np.percentile(b.data, float(lower[:-1]))\n\n if upper and lower:\n b.data[(b.data < upper) & (b.data > lower)] = 0\n elif upper:\n b.data[b.data < upper] = 0\n elif lower:\n b.data[b.data > lower] = 0\n if binarize:\n b.data[b.data != 0] = 1\n return b",
"def extract_extreme_events(df, quantile=0.9, pad=True):\n extracted = df.apply(lambda row: row > row.quantile(quantile), axis=1)\n\n # pad the extracted extreme event sequence if so requested\n if pad:\n extracted.insert(0, 0, True)\n extracted.insert(len(extracted.columns), 999999999999, True)\n\n return extracted",
"def query_above(scale, loc, is_above, threshold):\n pr_above = 1 - Laplace(scale, loc).cdf(threshold)\n if is_above:\n return pr_above\n else:\n return 1 - pr_above",
"def test_detect_outliers():\n # Make random numbers, with two outliers\n b = np.random.rand(1,1000)\n st = np.std(b)\n b[0][0] = b[0][0] + 5 * st\n b[0][1] = b[0][1] - 5 * st\n thresh = 2.5\n idx = ut.detect_outliers(b,thresh)\n npt.assert_equal(sum(x),2)",
"def __standardise_cutoff(cutoff, type_hist='quartile'):\n cutoff = np.asarray(cutoff)\n if cutoff is None:\n return DEFAULT_CUTOFF\n if len(cutoff) > 2:\n cutoff = np.unique([np.min(cutoff), np.max(cutoff)])\n if len(cutoff) < 2:\n return DEFAULT_CUTOFF\n if cutoff[0] > cutoff[1]:\n cutoff[0], cutoff[1] = cutoff[1], cutoff[0]\n cutoff[0] = max(0., cutoff[0])\n cutoff[1] = min(1., cutoff[1])\n if type_hist == 'quartile':\n cutoff[0] = np.min([cutoff[0], 0.24])\n cutoff[1] = np.max([cutoff[1], 0.76])\n else:\n cutoff[0] = np.min([cutoff[0], 0.09])\n cutoff[1] = np.max([cutoff[1], 0.91])\n return cutoff",
"def test_twonumberlow(self):\n x = pf.PercentileFinder([5,6])\n t = x.getPercentile(1)\n self.assertEqual(t, 5)",
"def test_strict_thresholding():\n\n # Generate test dataset\n test_dset_size = (100, 100)\n test_hdim_1_pt = 50.0\n test_hdim_2_pt = 50.0\n test_hdim_1_sz = 10\n test_hdim_2_sz = 10\n test_amp = 10\n test_data = np.zeros(test_dset_size)\n test_data = tbtest.make_feature_blob(\n test_data,\n test_hdim_1_pt,\n test_hdim_2_pt,\n h1_size=test_hdim_1_sz,\n h2_size=test_hdim_2_sz,\n amplitude=test_amp,\n )\n test_data_iris = tbtest.make_dataset_from_arr(test_data, data_type=\"iris\")\n\n # All of these thresholds will be met\n thresholds = [1, 5, 7.5]\n\n # The second n_min threshold can never be met\n n_min_thresholds = [0, test_data.size + 1, 0]\n\n # This will detect 2 features (first and last threshold value)\n features = feat_detect.feature_detection_multithreshold_timestep(\n test_data_iris,\n 0,\n dxy=1,\n threshold=thresholds,\n n_min_threshold=n_min_thresholds,\n strict_thresholding=False,\n )\n assert len(features) == 1\n assert features[\"threshold_value\"].item() == thresholds[-1]\n\n # Since the second n_min_thresholds value is not met this will only detect 1 feature\n features = feat_detect.feature_detection_multithreshold_timestep(\n test_data_iris,\n 0,\n dxy=1,\n threshold=thresholds,\n n_min_threshold=n_min_thresholds,\n strict_thresholding=True,\n )\n assert len(features) == 1\n assert features[\"threshold_value\"].item() == thresholds[0]",
"def premier_quartile(data_frame,colonne):\n return data_frame[colonne].quantile(q=0.25)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test encrypt for simple encryption and decryption | def test_encrypt_decrypt(self):
s = scrypt.encrypt(self.input, self.password, 0.1)
m = scrypt.decrypt(s, self.password)
self.assertEqual(m, self.input) | [
"def test_encryption():\n expected=\"Ymnx nx 956 u~ymts htzwxj\"\n actual=encrypt(\"This is 401 python course\",5)\n assert expected==actual",
"def test03(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.short_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.short_message)",
"def test_constructor_encrypt_decrypt(self):\n plain_text = (\n \"Do not ask what you can do for yourselves.\\n\"\n \"Ask what you can do for your country.\")\n cypher_text = CoreClientFileTests._cut._encrypt(\n bytes(plain_text, 'utf-8'))\n decrypted_text = str(\n CoreClientFileTests._cut._decrypt(cypher_text), 'utf-8')\n self.assertEqual(plain_text, decrypted_text)",
"def test_encrypt_lowercase():\n output = rot13.encrypt(\"abc\")\n assert output == \"nop\"",
"def test_encrypt_input_and_password_as_keywords(self):\n s = scrypt.encrypt(password=self.password, input=self.input)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test04(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.long_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.long_message)",
"def test_encrypt_uppercase():\n output = rot13.encrypt(\"ABC\")\n assert output == \"NOP\"",
"def test_encrypt_with_two_rails(self):\n plaintext = \"HELLO WORLD\"\n assert_that(\"HLOWRD EL OL\", equal_to(encrypt(plaintext, 2)))",
"def test_statictext(self):\n cypher = CypherAES('password')\n\n cyphertext = cypher.encrypt_text(text)\n retext = cypher.decrypt_text(cyphertext).decode()\n self.assertEqual(retext.rstrip(), text, 'Input text doesn''t correspond to output text')",
"def test_comparison(key: bytes, plaintext: bytes):\n cipher = Cipher(algorithms.AES(key), modes.ECB(), default_backend())\n enc_cryptography = cipher.encryptor().update(plaintext)\n dec_cryptography = cipher.decryptor().update(enc_cryptography)\n assert dec_cryptography == plaintext\n enc = aes.aes_encrypt(plaintext, key)\n assert enc_cryptography == enc\n dec = aes.aes_decrypt(enc, key)\n assert dec_cryptography == dec",
"def test_encrypt_with_three_rails(self):\n plaintext = \"NO MORE SEGRETS\"\n assert_that(\"NMEEE OO GT RSRS\", equal_to(encrypt(plaintext, 3)))",
"def test_encrypt_maxtime_key(self):\n s = scrypt.encrypt(self.input, self.password, maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_randomtext(self):\n cypher = CypherAES('password')\n ratext = GenPasswd2(length=100000)\n cyphertext = cypher.encrypt_text(ratext)\n retext = cypher.decrypt_text(cyphertext).decode()\n self.assertEqual(retext.rstrip(), ratext, 'Input text doesn''t correspond to output text')",
"def test_xor_decript(self):\n expected_text = 'I love cats! I want to pet all the cats in the world. I wish every cat could be my friend. MEOW!'\n encryption_key = 'cat'\n\n text = xor_decrypt(\n get_cipher('assets/test_cipher.txt'), encryption_key)\n\n self.assertEqual(text, expected_text)",
"def test_aes_encryption():\n key = bytearray(binascii.unhexlify(\"F4C020A0A1F604FD343FAC6A7E6AE0F9\"))\n plain_text = bytearray(binascii.unhexlify(\"F295B9318B994434D93D98A4E449AFD8\"))\n nb = 4\n nk = 4\n nr = 10\n states = aes.create_states(plain_text, nb)\n key_exp = rijndael.expand_keys(key, nb, nk, nr)\n states_enc = aes.encrypt(states, key_exp, nb, nk, nr)\n result = aes.create_cipher_text(states_enc, nb)\n result_ref = bytearray(binascii.unhexlify(\"52E418CBB1BE4949308B381691B109FE\"))\n assert(result == result_ref)",
"def encrypt(self, password, assoc=None):",
"def encrypt(self, unencrypted, secret, tenant):",
"def test_encrypt_with_spaces():\n output = rot13.encrypt(\"space space\")\n assert output == \"fcnpr fcnpr\"",
"def test_file_content_encryption(self):\n\n given = \"Hello, World!\"\n expected = b\"Hello, World!\"\n\n encryptor = FileEncryption()\n encryptor.set_key(self.encryption_key)\n encryptor.set_iv(self.iv_key)\n\n actual = encryptor.encrypt_file_content(given)\n\n self.assertNotEqual(given, actual)\n\n actual = encryptor.decrypt_file_content(actual)\n\n self.assertEqual(expected, actual)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test encrypt for input and password accepted as keywords | def test_encrypt_input_and_password_as_keywords(self):
s = scrypt.encrypt(password=self.password, input=self.input)
m = scrypt.decrypt(s, self.password)
self.assertEqual(m, self.input) | [
"def test_encrypt_missing_input_keyword_argument(self):\n self.assertRaises(TypeError, lambda: scrypt.encrypt(password=self.password))",
"def test_encrypt_raises_error_on_invalid_keyword(self):\n self.assertRaises(TypeError, lambda: scrypt.encrypt(self.input,\n self.password, nonsense=\"Raise error\"))",
"def test_encrypt_missing_password_positional_argument(self):\n self.assertRaises(TypeError, lambda: scrypt.encrypt(self.input))",
"def test_encryption():\n expected=\"Ymnx nx 956 u~ymts htzwxj\"\n actual=encrypt(\"This is 401 python course\",5)\n assert expected==actual",
"def test_encrypt_decrypt(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_lowercase():\n output = rot13.encrypt(\"abc\")\n assert output == \"nop\"",
"def verifyPlaintextPassword(password):",
"def encrypt(self, password, assoc=None):",
"def test_randomtext(self):\n cypher = CypherAES('password')\n ratext = GenPasswd2(length=100000)\n cyphertext = cypher.encrypt_text(ratext)\n retext = cypher.decrypt_text(cyphertext).decode()\n self.assertEqual(retext.rstrip(), ratext, 'Input text doesn''t correspond to output text')",
"def ap_input_is_password(input_text):\n return (ap_input_is_secured_data(input_text)\n or ap_input_matches_condition(input_text))",
"def test_statictext(self):\n cypher = CypherAES('password')\n\n cyphertext = cypher.encrypt_text(text)\n retext = cypher.decrypt_text(cyphertext).decode()\n self.assertEqual(retext.rstrip(), text, 'Input text doesn''t correspond to output text')",
"def test_encrypt_uppercase():\n output = rot13.encrypt(\"ABC\")\n assert output == \"NOP\"",
"def test_encrypt_long_input(self):\n s = scrypt.encrypt(self.longinput, self.password, 0.1)\n self.assertEqual(len(s), 128 + len(self.longinput))",
"def test03(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.short_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.short_message)",
"def test_encrypt_with_two_rails(self):\n plaintext = \"HELLO WORLD\"\n assert_that(\"HLOWRD EL OL\", equal_to(encrypt(plaintext, 2)))",
"def test_password_verifier_works(password):\n (input, result) = password\n print '\\n'\n print 'Inputs->' , input\n print 'Request->', result\n assert check_password(input) == result",
"def test_encrypt_with_three_rails(self):\n plaintext = \"NO MORE SEGRETS\"\n assert_that(\"NMEEE OO GT RSRS\", equal_to(encrypt(plaintext, 3)))",
"def encrypt_vigenere(plaintext: str, keyword: str) ->str:\n\n ciphertext= \"\"\n\n def encrypt(a:str,b:str)->str:\n\n keyS = 0\n ret = \"\"\n\n if (b.islower()):\n keyS = ord(b)-97\n elif (b.isupper()):\n keyS = ord(b)-65\n\n if (a.islower()):\n ret = chr(97+(ord(a)-97+keyS)%26)\n elif(a.isupper()):\n ret = chr(65+(ord(a)-65+keyS)%26)\n return ret\n\n for a in range(len(plaintext)):\n ciphertext += encrypt(plaintext[a],keyword[a % len(keyword)])\n \n return ciphertext",
"def test_encrypt_maxtime_key(self):\n s = scrypt.encrypt(self.input, self.password, maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test encrypt raises TypeError if keyword argument missing input | def test_encrypt_missing_input_keyword_argument(self):
self.assertRaises(TypeError, lambda: scrypt.encrypt(password=self.password)) | [
"def test_encrypt_missing_password_positional_argument(self):\n self.assertRaises(TypeError, lambda: scrypt.encrypt(self.input))",
"def test_encrypt_raises_error_on_invalid_keyword(self):\n self.assertRaises(TypeError, lambda: scrypt.encrypt(self.input,\n self.password, nonsense=\"Raise error\"))",
"def test_encrypt_input_and_password_as_keywords(self):\n s = scrypt.encrypt(password=self.password, input=self.input)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_lowercase():\n output = rot13.encrypt(\"abc\")\n assert output == \"nop\"",
"def test_encryption():\n expected=\"Ymnx nx 956 u~ymts htzwxj\"\n actual=encrypt(\"This is 401 python course\",5)\n assert expected==actual",
"def check_encryption(value):\n value = value.lower()\n if value not in ['ssl', 'tls', 'starttls', 'none']:\n raise ArgumentTypeError(f'{value} is an unknown encryption. Use can use ssl, tls, starttls or none instead.')\n return value",
"def test_encrypt_uppercase():\n output = rot13.encrypt(\"ABC\")\n assert output == \"NOP\"",
"def test_encrypt_with_two_rails(self):\n plaintext = \"HELLO WORLD\"\n assert_that(\"HLOWRD EL OL\", equal_to(encrypt(plaintext, 2)))",
"def test_encrypt_with_three_rails(self):\n plaintext = \"NO MORE SEGRETS\"\n assert_that(\"NMEEE OO GT RSRS\", equal_to(encrypt(plaintext, 3)))",
"def test03(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.short_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.short_message)",
"def encrypt(self, password, assoc=None):",
"def test_no_infinite_encryption_cycle_on_empty_source():\n aws_encryption_sdk.EncryptionSDKClient().encrypt(source=b\"\", key_provider=fake_kms_key_provider())",
"def test_encrypt_decrypt(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test04(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.long_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.long_message)",
"def encrypt():\n cipher = get_cipher()\n\n message = cipher.get_user_message()\n encrypted_message = cipher.encryption(message)\n print(f'Your encrypted message is: {encrypted_message}')\n\n input('Enter anything to continue')",
"def encrypt_vigenere(plaintext: str, keyword: str) ->str:\n\n ciphertext= \"\"\n\n def encrypt(a:str,b:str)->str:\n\n keyS = 0\n ret = \"\"\n\n if (b.islower()):\n keyS = ord(b)-97\n elif (b.isupper()):\n keyS = ord(b)-65\n\n if (a.islower()):\n ret = chr(97+(ord(a)-97+keyS)%26)\n elif(a.isupper()):\n ret = chr(65+(ord(a)-65+keyS)%26)\n return ret\n\n for a in range(len(plaintext)):\n ciphertext += encrypt(plaintext[a],keyword[a % len(keyword)])\n \n return ciphertext",
"def test_constructor_encrypt_decrypt(self):\n plain_text = (\n \"Do not ask what you can do for yourselves.\\n\"\n \"Ask what you can do for your country.\")\n cypher_text = CoreClientFileTests._cut._encrypt(\n bytes(plain_text, 'utf-8'))\n decrypted_text = str(\n CoreClientFileTests._cut._decrypt(cypher_text), 'utf-8')\n self.assertEqual(plain_text, decrypted_text)",
"def test_decrypt_raises_error_on_too_little_time(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n self.assertRaises(scrypt.error,\n lambda: scrypt.decrypt(s, self.password, .01))",
"def test_decrypt_maxtime_keyword_argument(self):\n m = scrypt.decrypt(maxtime=1.0, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test encrypt raises TypeError if second positional argument missing (password) | def test_encrypt_missing_password_positional_argument(self):
self.assertRaises(TypeError, lambda: scrypt.encrypt(self.input)) | [
"def test_encrypt_missing_input_keyword_argument(self):\n self.assertRaises(TypeError, lambda: scrypt.encrypt(password=self.password))",
"def encrypt(self, password, assoc=None):",
"def test_encrypt_raises_error_on_invalid_keyword(self):\n self.assertRaises(TypeError, lambda: scrypt.encrypt(self.input,\n self.password, nonsense=\"Raise error\"))",
"def test_encrypt_input_and_password_as_keywords(self):\n s = scrypt.encrypt(password=self.password, input=self.input)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_encryption():\n expected=\"Ymnx nx 956 u~ymts htzwxj\"\n actual=encrypt(\"This is 401 python course\",5)\n assert expected==actual",
"def test_encrypt_with_two_rails(self):\n plaintext = \"HELLO WORLD\"\n assert_that(\"HLOWRD EL OL\", equal_to(encrypt(plaintext, 2)))",
"def test_encrypt_decrypt(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_lowercase():\n output = rot13.encrypt(\"abc\")\n assert output == \"nop\"",
"def test_constructor_encrypt_decrypt(self):\n plain_text = (\n \"Do not ask what you can do for yourselves.\\n\"\n \"Ask what you can do for your country.\")\n cypher_text = CoreClientFileTests._cut._encrypt(\n bytes(plain_text, 'utf-8'))\n decrypted_text = str(\n CoreClientFileTests._cut._decrypt(cypher_text), 'utf-8')\n self.assertEqual(plain_text, decrypted_text)",
"def encrypt(self, unencrypted, secret, tenant):",
"def test_check_pw_failure(dbtransaction, auth_env):\n from .. security import check_password\n password = 'not secret'\n assert check_password(password) is False",
"def verifyPlaintextPassword(password):",
"def test_encrypt_uppercase():\n output = rot13.encrypt(\"ABC\")\n assert output == \"NOP\"",
"def test03(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.short_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.short_message)",
"def test_encrypt_with_three_rails(self):\n plaintext = \"NO MORE SEGRETS\"\n assert_that(\"NMEEE OO GT RSRS\", equal_to(encrypt(plaintext, 3)))",
"def test_decrypt_raises_error_on_too_little_time(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n self.assertRaises(scrypt.error,\n lambda: scrypt.decrypt(s, self.password, .01))",
"def test_check_pw_success(dbtransaction, auth_env):\n from .. security import check_password\n password = 'muniri'\n assert check_password(password)",
"def test04(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.long_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.long_message)",
"def check_encryption(value):\n value = value.lower()\n if value not in ['ssl', 'tls', 'starttls', 'none']:\n raise ArgumentTypeError(f'{value} is an unknown encryption. Use can use ssl, tls, starttls or none instead.')\n return value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test encrypt maxtime accepts maxtime as keyword argument | def test_encrypt_maxtime_key(self):
s = scrypt.encrypt(self.input, self.password, maxtime=0.01)
m = scrypt.decrypt(s, self.password)
self.assertEqual(m, self.input) | [
"def test_decrypt_maxtime_keyword_argument(self):\n m = scrypt.decrypt(maxtime=1.0, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_maxmemfrac_keyword_argument(self):\n s = scrypt.encrypt(self.input, self.password, maxmemfrac=0.0625,\n maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt_maxmem_keyword_argument(self):\n m = scrypt.decrypt(maxmem=self.ten_megabytes, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_maxmem_in_normal_range(self):\n s = scrypt.encrypt(self.input,\n self.password,\n 0.01,\n self.ten_megabytes)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def set_max_time(self, time):\n raise NotImplementedError",
"def test_verify_jwt_with_jwt_lasting_gt_max_time(self, m_j_decode):\n expected_msg = 'exceeds the maximum'\n claims = self._jwt_auth_signer._generate_claims(self._example_aud)\n claims['iat'] = claims['exp'] - datetime.timedelta(minutes=61)\n for key in ['iat', 'exp']:\n claims[key] = claims[key].strftime('%s')\n m_j_decode.return_value = claims\n a_jwt = self._jwt_auth_signer.generate_jwt(self._example_aud)\n verifier = self._setup_jwt_auth_verifier(self._public_key_pem)\n with self.assertRaisesRegex(ValueError, expected_msg):\n verifier.verify_jwt(a_jwt, self._example_aud)",
"def test_encrypt_long_input(self):\n s = scrypt.encrypt(self.longinput, self.password, 0.1)\n self.assertEqual(len(s), 128 + len(self.longinput))",
"def test_encrypt_maxmem_undersized(self):\n s = scrypt.encrypt(self.input, self.password, 0.01, self.one_byte)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def list_maxtime(self, list_maxtime):\n if (type(list_length) == type(120000)) and (list_length > 0) :\n out = \"t{}\\n\".format(int(list_maxtime))\n self.transfer_ESP32(out)\n else:\n print(\"max time must be an integer larger than zero\")",
"def test_request_timeout_success():\n from FireEyeNX import get_request_timeout\n\n request_timeout = '5'\n\n request_timeout_int = get_request_timeout(request_timeout)\n assert request_timeout_int == int(request_timeout)",
"def setMaximumTime(self, account, acl, equipment, maxtime):\n\n acl.assertIsAdministrator(account)\n\n maxtime = to_int(maxtime)\n\n if not maxtime:\n maxtime = None\n\n if maxtime != self.max_booking_time:\n item = equipment._getFromDB()\n item.constraints.max_booking_time = maxtime\n self.max_booking_time = maxtime\n item.put()",
"def test_decrypt_raises_error_on_too_little_time(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n self.assertRaises(scrypt.error,\n lambda: scrypt.decrypt(s, self.password, .01))",
"def test04(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.long_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.long_message)",
"def test_max_iterations(max_iterations):\n stop = NoImprovement(max_iterations)\n assert_equal(stop.max_iterations, max_iterations)",
"def setLimit(self, time):\r\n\t\tself.limit = int(time)",
"def validate_session_timelimit(self, value):\n return (\n None\n if value is None\n else max(0, min(self.instance.account.tier.session_timelimit_max, value))\n )",
"def _one_test_max_crypto_domains(self, faked_cpc, faked_adapter,\n exp_max_domains):\n\n cpc = self.client.cpcs.find(name=faked_cpc.name)\n adapter = cpc.adapters.find(name=faked_adapter.name)\n\n # Exercise code to be tested\n max_domains = adapter.maximum_crypto_domains\n\n assert max_domains == exp_max_domains",
"def block(max_number_of_txns, exp_time):\n blk = {'transactions':[transaction(randrange(2, max_txt_length)) for i in range(randrange(1, max_number_of_txns))], 'time':exp_time}\n return blk",
"def maxitmess():\n\n return 'Maximum number of iterations has been exceeded'"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test encrypt maxmem accepts (< 1 megabyte) of storage to use for V array | def test_encrypt_maxmem_undersized(self):
s = scrypt.encrypt(self.input, self.password, 0.01, self.one_byte)
m = scrypt.decrypt(s, self.password)
self.assertEqual(m, self.input) | [
"def test_encrypt_maxmem_in_normal_range(self):\n s = scrypt.encrypt(self.input,\n self.password,\n 0.01,\n self.ten_megabytes)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt_maxmem_keyword_argument(self):\n m = scrypt.decrypt(maxmem=self.ten_megabytes, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_maxmemfrac_keyword_argument(self):\n s = scrypt.encrypt(self.input, self.password, maxmemfrac=0.0625,\n maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_max_memory_settings(self):\n\n # 4-qubit quantum circuit\n shots = 100\n circuit = QuantumVolume(4, 1, seed=0)\n circuit.measure_all()\n system_memory = int(psutil.virtual_memory().total / 1024 / 1024)\n\n # Test defaults\n opts = self.backend_options_parallel()\n result = execute(circuit, self.SIMULATOR, shots=shots,\n **opts).result()\n max_mem_result = result.metadata.get('max_memory_mb')\n self.assertGreaterEqual(max_mem_result, int(system_memory / 2),\n msg=\"Default 'max_memory_mb' is too small.\")\n self.assertLessEqual(max_mem_result, system_memory,\n msg=\"Default 'max_memory_mb' is too large.\")\n\n # Test custom value\n max_mem_target = 128\n opts = self.backend_options_parallel()\n opts['max_memory_mb'] = max_mem_target\n result = execute(circuit, self.SIMULATOR, shots=shots,\n **opts).result()\n max_mem_result = result.metadata.get('max_memory_mb')\n self.assertEqual(max_mem_result, max_mem_target,\n msg=\"Custom 'max_memory_mb' is not being set correctly.\")",
"def test_enough_memory(self):\n import psutil\n mem = psutil.virtual_memory()\n mem_total_gib = mem.total * (2**-30)\n self.assertGreaterEqual(mem_total_gib, self.minimum_mem_gib,\n msg=\"The total available memory of this system\"\n \" is {} GiB. For most practical \"\n \"application more than {} GiB will be \"\n \"needed to run mpunet optimally. \"\n \"For testing purposes, you may disregard \"\n \"this error.\".format(mem_total_gib,\n self.minimum_mem_gib))",
"def test_key_large(self):\n key = \"\".join([\"Key:{} \".format(i) for i in range(4096)]).encode(\"ascii\")\n expected = 1765856722 % 100000 # Int is from online hash tool\n p = HashedPartitioner(self.T1, self.parts)\n part = p.partition(key, self.parts)\n self.assertEqual(expected, part)",
"def max_memory(self) -> int:\n raise NotImplementedError",
"def test_marginalize_memory_in_parallel(self):\n memory = [hex(ii) for ii in range(15)]\n res = marginal_memory(memory, indices=[0], parallel_threshold=1)\n self.assertEqual(res, [bin(ii % 2)[2:] for ii in range(15)])",
"def test_encrypt_maxtime_key(self):\n s = scrypt.encrypt(self.input, self.password, maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_get_disk_capacity(self):\n self.assertEqual(\"536870912\",\n self.helper.get_disk_capacity(self.blank_vmdk))\n\n self.assertEqual(\"1073741824\",\n self.helper.get_disk_capacity(self.input_vmdk))",
"def testMemory():\n DB = PDatabase(server='localhost', username='farrell',\n password='123', project='novo')\n #DB = PDatabase(local='large.fs')\n\n print DB\n db = DB.db\n print db.cacheSize()\n for k in DB.getRecs()[:50]:\n #print k\n r=DB[k]\n r.name\n if db.cacheSize()>500:\n db.cacheMinimize()\n\n print db.cacheSize()\n return",
"def test_maxMsgSize():\n nt.assert_equal(PsiInterface.maxMsgSize(), CIS_MSG_MAX)",
"def check_vmem_for_reference(ref):\n refpath = os.path.join(ref, \"fasta\", \"genome.fa\")\n if not os.path.exists(refpath):\n hostname = socket.gethostname()\n martian.exit(\"Your reference does not contain the expected files, or they are not readable. Please check your reference folder on {}.\".format(hostname))\n refsize = os.path.getsize(refpath) / 1e9\n vmem_gb = int(np.ceil(refsize)) + 4\n return vmem_gb",
"def test_marginalize_memory(self):\n memory = [hex(ii) for ii in range(8)]\n res = marginal_memory(memory, indices=[0])\n self.assertEqual(res, [bin(ii % 2)[2:] for ii in range(8)])",
"def set_max_mem(max_mem):\n\n JobServer._set_max_mem(max_mem)",
"def calc_chunksize(expected_mb):\n\n expected_mb = limit_es(expected_mb)\n zone = int(math.log10(expected_mb))\n expected_mb = 10**zone\n chunksize = csformula(expected_mb)\n # XXX: Multiply by 8 seems optimal for sequential access\n return chunksize * 8",
"def test_8_large_readv(self, sftp):\n kblob = bytes().join([struct.pack('>H', n) for n in range(512)])\n try:\n with sftp.open('%s/hongry.txt' % sftp.FOLDER, 'wb') as f:\n f.set_pipelined(True)\n for n in range(1024):\n f.write(kblob)\n if n % 128 == 0:\n sys.stderr.write('.')\n sys.stderr.write(' ')\n\n assert sftp.stat('%s/hongry.txt' % sftp.FOLDER).st_size == 1024 * 1024\n \n with sftp.open('%s/hongry.txt' % sftp.FOLDER, 'rb') as f:\n data = list(f.readv([(23 * 1024, 128 * 1024)]))\n assert len(data) == 1\n data = data[0]\n assert len(data) == 128 * 1024\n \n sys.stderr.write(' ')\n finally:\n sftp.remove('%s/hongry.txt' % sftp.FOLDER)",
"def libgen_test_allocate_maximum_buffers_success():\n out = BuiltIn().run_keyword(\"Testing Allocate Buffers For 256K Number Of Buffer Handles\", \"0\")\n \n if out.count('ILDMXTestCli test allocate maximum number (262143) of buffers successfully.') != 1:\n return 'failure: allocate maximum number of buffers not succeeded'\n\n return 'success'",
"def limit_mem():\n K.get_session().close()\n cfg = K.tf.ConfigProto()\n cfg.gpu_options.allow_growth = True\n K.set_session(K.tf.Session(config=cfg))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test encrypt maxmem accepts (> 1 megabyte) of storage to use for V array | def test_encrypt_maxmem_in_normal_range(self):
s = scrypt.encrypt(self.input,
self.password,
0.01,
self.ten_megabytes)
m = scrypt.decrypt(s, self.password)
self.assertEqual(m, self.input) | [
"def test_encrypt_maxmem_undersized(self):\n s = scrypt.encrypt(self.input, self.password, 0.01, self.one_byte)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt_maxmem_keyword_argument(self):\n m = scrypt.decrypt(maxmem=self.ten_megabytes, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_maxmemfrac_keyword_argument(self):\n s = scrypt.encrypt(self.input, self.password, maxmemfrac=0.0625,\n maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_max_memory_settings(self):\n\n # 4-qubit quantum circuit\n shots = 100\n circuit = QuantumVolume(4, 1, seed=0)\n circuit.measure_all()\n system_memory = int(psutil.virtual_memory().total / 1024 / 1024)\n\n # Test defaults\n opts = self.backend_options_parallel()\n result = execute(circuit, self.SIMULATOR, shots=shots,\n **opts).result()\n max_mem_result = result.metadata.get('max_memory_mb')\n self.assertGreaterEqual(max_mem_result, int(system_memory / 2),\n msg=\"Default 'max_memory_mb' is too small.\")\n self.assertLessEqual(max_mem_result, system_memory,\n msg=\"Default 'max_memory_mb' is too large.\")\n\n # Test custom value\n max_mem_target = 128\n opts = self.backend_options_parallel()\n opts['max_memory_mb'] = max_mem_target\n result = execute(circuit, self.SIMULATOR, shots=shots,\n **opts).result()\n max_mem_result = result.metadata.get('max_memory_mb')\n self.assertEqual(max_mem_result, max_mem_target,\n msg=\"Custom 'max_memory_mb' is not being set correctly.\")",
"def test_enough_memory(self):\n import psutil\n mem = psutil.virtual_memory()\n mem_total_gib = mem.total * (2**-30)\n self.assertGreaterEqual(mem_total_gib, self.minimum_mem_gib,\n msg=\"The total available memory of this system\"\n \" is {} GiB. For most practical \"\n \"application more than {} GiB will be \"\n \"needed to run mpunet optimally. \"\n \"For testing purposes, you may disregard \"\n \"this error.\".format(mem_total_gib,\n self.minimum_mem_gib))",
"def test_key_large(self):\n key = \"\".join([\"Key:{} \".format(i) for i in range(4096)]).encode(\"ascii\")\n expected = 1765856722 % 100000 # Int is from online hash tool\n p = HashedPartitioner(self.T1, self.parts)\n part = p.partition(key, self.parts)\n self.assertEqual(expected, part)",
"def test_encrypt_maxtime_key(self):\n s = scrypt.encrypt(self.input, self.password, maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def max_memory(self) -> int:\n raise NotImplementedError",
"def test_maxMsgSize():\n nt.assert_equal(PsiInterface.maxMsgSize(), CIS_MSG_MAX)",
"def test_get_disk_capacity(self):\n self.assertEqual(\"536870912\",\n self.helper.get_disk_capacity(self.blank_vmdk))\n\n self.assertEqual(\"1073741824\",\n self.helper.get_disk_capacity(self.input_vmdk))",
"def test_marginalize_memory_in_parallel(self):\n memory = [hex(ii) for ii in range(15)]\n res = marginal_memory(memory, indices=[0], parallel_threshold=1)\n self.assertEqual(res, [bin(ii % 2)[2:] for ii in range(15)])",
"def test_8_large_readv(self, sftp):\n kblob = bytes().join([struct.pack('>H', n) for n in range(512)])\n try:\n with sftp.open('%s/hongry.txt' % sftp.FOLDER, 'wb') as f:\n f.set_pipelined(True)\n for n in range(1024):\n f.write(kblob)\n if n % 128 == 0:\n sys.stderr.write('.')\n sys.stderr.write(' ')\n\n assert sftp.stat('%s/hongry.txt' % sftp.FOLDER).st_size == 1024 * 1024\n \n with sftp.open('%s/hongry.txt' % sftp.FOLDER, 'rb') as f:\n data = list(f.readv([(23 * 1024, 128 * 1024)]))\n assert len(data) == 1\n data = data[0]\n assert len(data) == 128 * 1024\n \n sys.stderr.write(' ')\n finally:\n sftp.remove('%s/hongry.txt' % sftp.FOLDER)",
"def test_encrypt_long_input(self):\n s = scrypt.encrypt(self.longinput, self.password, 0.1)\n self.assertEqual(len(s), 128 + len(self.longinput))",
"def libgen_test_allocate_maximum_buffers_success():\n out = BuiltIn().run_keyword(\"Testing Allocate Buffers For 256K Number Of Buffer Handles\", \"0\")\n \n if out.count('ILDMXTestCli test allocate maximum number (262143) of buffers successfully.') != 1:\n return 'failure: allocate maximum number of buffers not succeeded'\n\n return 'success'",
"def testMemory():\n DB = PDatabase(server='localhost', username='farrell',\n password='123', project='novo')\n #DB = PDatabase(local='large.fs')\n\n print DB\n db = DB.db\n print db.cacheSize()\n for k in DB.getRecs()[:50]:\n #print k\n r=DB[k]\n r.name\n if db.cacheSize()>500:\n db.cacheMinimize()\n\n print db.cacheSize()\n return",
"def check_vmem_for_reference(ref):\n refpath = os.path.join(ref, \"fasta\", \"genome.fa\")\n if not os.path.exists(refpath):\n hostname = socket.gethostname()\n martian.exit(\"Your reference does not contain the expected files, or they are not readable. Please check your reference folder on {}.\".format(hostname))\n refsize = os.path.getsize(refpath) / 1e9\n vmem_gb = int(np.ceil(refsize)) + 4\n return vmem_gb",
"def set_max_mem(max_mem):\n\n JobServer._set_max_mem(max_mem)",
"def calc_chunksize(expected_mb):\n\n expected_mb = limit_es(expected_mb)\n zone = int(math.log10(expected_mb))\n expected_mb = 10**zone\n chunksize = csformula(expected_mb)\n # XXX: Multiply by 8 seems optimal for sequential access\n return chunksize * 8",
"def test04(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.long_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.long_message)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test encrypt maxmemfrac accepts keyword argument of 1/16 total memory for V array | def test_encrypt_maxmemfrac_keyword_argument(self):
s = scrypt.encrypt(self.input, self.password, maxmemfrac=0.0625,
maxtime=0.01)
m = scrypt.decrypt(s, self.password)
self.assertEqual(m, self.input) | [
"def test_encrypt_maxmem_in_normal_range(self):\n s = scrypt.encrypt(self.input,\n self.password,\n 0.01,\n self.ten_megabytes)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_maxmem_undersized(self):\n s = scrypt.encrypt(self.input, self.password, 0.01, self.one_byte)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt_maxmem_keyword_argument(self):\n m = scrypt.decrypt(maxmem=self.ten_megabytes, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)",
"def test_max_memory_settings(self):\n\n # 4-qubit quantum circuit\n shots = 100\n circuit = QuantumVolume(4, 1, seed=0)\n circuit.measure_all()\n system_memory = int(psutil.virtual_memory().total / 1024 / 1024)\n\n # Test defaults\n opts = self.backend_options_parallel()\n result = execute(circuit, self.SIMULATOR, shots=shots,\n **opts).result()\n max_mem_result = result.metadata.get('max_memory_mb')\n self.assertGreaterEqual(max_mem_result, int(system_memory / 2),\n msg=\"Default 'max_memory_mb' is too small.\")\n self.assertLessEqual(max_mem_result, system_memory,\n msg=\"Default 'max_memory_mb' is too large.\")\n\n # Test custom value\n max_mem_target = 128\n opts = self.backend_options_parallel()\n opts['max_memory_mb'] = max_mem_target\n result = execute(circuit, self.SIMULATOR, shots=shots,\n **opts).result()\n max_mem_result = result.metadata.get('max_memory_mb')\n self.assertEqual(max_mem_result, max_mem_target,\n msg=\"Custom 'max_memory_mb' is not being set correctly.\")",
"def test_too_large_packing_fraction():\n\n nmm.Material.from_library(\"eurofer\", packing_fraction=1.1)",
"def memory_util(self) -> float:\n return self.current_memory / self.max_memory",
"def calc_chunksize(expected_mb):\n\n expected_mb = limit_es(expected_mb)\n zone = int(math.log10(expected_mb))\n expected_mb = 10**zone\n chunksize = csformula(expected_mb)\n # XXX: Multiply by 8 seems optimal for sequential access\n return chunksize * 8",
"def csformula(expected_mb):\n\n # For a basesize of 8 KB, this will return:\n # 8 KB for datasets <= 1 MB\n # 1 MB for datasets >= 10 TB\n basesize = 8 * 1024 # 8 KB is a good minimum\n return basesize * int(2**math.log10(expected_mb))",
"def test_marginalize_memory_in_parallel(self):\n memory = [hex(ii) for ii in range(15)]\n res = marginal_memory(memory, indices=[0], parallel_threshold=1)\n self.assertEqual(res, [bin(ii % 2)[2:] for ii in range(15)])",
"def protecc_ram(pctg: float = 0.5):\n soft, hard = resource.getrlimit(resource.RLIMIT_AS)\n resource.setrlimit(resource.RLIMIT_AS, (int(get_free_memory() * 1024 * pctg), hard))",
"def model_elastic_modulus(T):\n return 2.25e6",
"def test_marginalize_memory(self):\n memory = [hex(ii) for ii in range(8)]\n res = marginal_memory(memory, indices=[0])\n self.assertEqual(res, [bin(ii % 2)[2:] for ii in range(8)])",
"def testCreateMemDecBlockedFormat(self):\n if test_util.IsMklEnabled():\n s0 = np.ones((1, 8188, 4092, 1), dtype=np.uint8).astype(np.float32)\n s1 = array_ops.strided_slice(\n s0, [0, 1, 1, 0], [0, -1, -1, 0], [1, 1, 1, 1],\n begin_mask=9,\n end_mask=9)\n s2 = array_ops.slice(s1, [0, 0, 0, 0], [-1, -1, -1, 1])\n s3_1 = array_ops.slice(s2, [0, 4, 4, 0], [-1, 8178, 4082, 1])\n s3_2 = array_ops.slice(s2, [0, 4, 4, 0], [-1, 8178, 4082, 1])\n filter4_1 = constant_op.constant([[[[1.18, -0.51]]]])\n s4_1 = nn_ops.conv2d(\n s3_1, filter4_1, strides=[1, 1, 1, 1], padding=\"VALID\")\n filter4_2 = constant_op.constant([[[[1.38, -0.11]]]])\n s4_2 = nn_ops.conv2d(\n s3_2, filter4_2, strides=[1, 1, 1, 1], padding=\"VALID\")\n s5_1 = array_ops.slice(s4_1, [0, 6, 6, 0], [-1, 1, 1, -1])\n s5_2 = array_ops.slice(s4_2, [0, 6, 6, 0], [-1, 1, 1, -1])\n x_concat = array_ops.concat([s5_1, s5_2], 3)\n self.evaluate(\n x_concat\n ) # This test is only meant to check the creation is not crashed",
"def getMaxLVSize(pe):\n return pe*64",
"def test_repval_large_vm(self):\n elem = DataElement(0x00080054, 'AE', 'a\\\\' * 1000 + 'a')\n assert len(elem.repval) < 100",
"def enlarge(n):\n return n*5",
"def euler69():\n # At first I tried to bruteforce this. My algorithm was O(n²) and\n # was taking too long, so I decided to look at the data. I\n # perceived that everytime my program reached a maximum n/phi(n),\n # the value for 'n' was a product of consecutive primes. I decided\n # to just calculate the phi for the product of the consecutive\n # primes and found the answer quickly.\n primes = [2,3,5,7,11,13,17,19,23]\n maxim = 0\n n = 1\n for p in primes:\n n *= p\n if n > 1000000:\n break\n phi_n = phi(n)\n ratio = n / phi_n\n if ratio > maxim:\n maxim = ratio\n print(n, phi_n, ratio) \n print(\"maxim =\", maxim)",
"def test_enough_memory(self):\n import psutil\n mem = psutil.virtual_memory()\n mem_total_gib = mem.total * (2**-30)\n self.assertGreaterEqual(mem_total_gib, self.minimum_mem_gib,\n msg=\"The total available memory of this system\"\n \" is {} GiB. For most practical \"\n \"application more than {} GiB will be \"\n \"needed to run mpunet optimally. \"\n \"For testing purposes, you may disregard \"\n \"this error.\".format(mem_total_gib,\n self.minimum_mem_gib))",
"def test_encrypt_maxtime_key(self):\n s = scrypt.encrypt(self.input, self.password, maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test encrypt accepts long input for encryption | def test_encrypt_long_input(self):
s = scrypt.encrypt(self.longinput, self.password, 0.1)
self.assertEqual(len(s), 128 + len(self.longinput)) | [
"def test_encrypt_maxtime_key(self):\n s = scrypt.encrypt(self.input, self.password, maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_maxmem_undersized(self):\n s = scrypt.encrypt(self.input, self.password, 0.01, self.one_byte)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test04(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.long_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.long_message)",
"def test_encrypt_maxmem_in_normal_range(self):\n s = scrypt.encrypt(self.input,\n self.password,\n 0.01,\n self.ten_megabytes)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_maxmemfrac_keyword_argument(self):\n s = scrypt.encrypt(self.input, self.password, maxmemfrac=0.0625,\n maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test03(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.short_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.short_message)",
"def test_decrypt_maxtime_keyword_argument(self):\n m = scrypt.decrypt(maxtime=1.0, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)",
"def test_encryption():\n expected=\"Ymnx nx 956 u~ymts htzwxj\"\n actual=encrypt(\"This is 401 python course\",5)\n assert expected==actual",
"def test_encrypt_signed_integers(self):\n numbers = [0, 1, -1, 10, '1', '-10550']\n\n for num in numbers:\n result = self.runner.invoke(cli, ['encrypt', self.public_keyfile.name, \"--\", str(num)])\n assert result.exit_code == 0",
"def test_decrypt_raises_error_on_too_little_time(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n self.assertRaises(scrypt.error,\n lambda: scrypt.decrypt(s, self.password, .01))",
"def test_encrypt_with_three_rails(self):\n plaintext = \"NO MORE SEGRETS\"\n assert_that(\"NMEEE OO GT RSRS\", equal_to(encrypt(plaintext, 3)))",
"def test_encrypt_input_and_password_as_keywords(self):\n s = scrypt.encrypt(password=self.password, input=self.input)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_long_password(self):\n\n # Create a password with a 72 bytes length\n password = 'A' * 72\n pw_hash = self.flask_bcrypt.generate_password_hash(password)\n # Ensure that a longer password **do not** yield the same hash\n self.assertFalse(self.flask_bcrypt.check_password_hash(pw_hash, 'A' * 80))",
"def test_decrypt_maxmem_keyword_argument(self):\n m = scrypt.decrypt(maxmem=self.ten_megabytes, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)",
"def test_long_password(self):\n\n # Create a password with a 72 bytes length\n password = 'A' * 72\n pw_hash = self.flask_bcrypt.generate_password_hash(password)\n # Ensure that a longer password yields the same hash\n self.assertTrue(self.flask_bcrypt.check_password_hash(pw_hash, 'A' * 80))",
"def test_encrypt_decrypt(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def testLoginPassword128Long(self):\n self.assertEquals(UserModel.ERR_BAD_PASSWORD, self.users.login(\"user1\", \"abcdefghijklmnopqrstuvwxyz\n abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy\"))",
"def test_encrypt_with_two_rails(self):\n plaintext = \"HELLO WORLD\"\n assert_that(\"HLOWRD EL OL\", equal_to(encrypt(plaintext, 2)))",
"def big_encrypt(rsa_key, plaintext):\n length = 64\n chunks = (plaintext[0+i:length+i] for i in range(0, len(plaintext), length))\n cipherchunks = [encrypt(rsa_key, chunk) for chunk in chunks]\n return cipherchunks"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test encrypt raises TypeError if invalid keyword used in argument | def test_encrypt_raises_error_on_invalid_keyword(self):
self.assertRaises(TypeError, lambda: scrypt.encrypt(self.input,
self.password, nonsense="Raise error")) | [
"def test_encrypt_missing_input_keyword_argument(self):\n self.assertRaises(TypeError, lambda: scrypt.encrypt(password=self.password))",
"def test_encrypt_missing_password_positional_argument(self):\n self.assertRaises(TypeError, lambda: scrypt.encrypt(self.input))",
"def test_encrypt_input_and_password_as_keywords(self):\n s = scrypt.encrypt(password=self.password, input=self.input)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_lowercase():\n output = rot13.encrypt(\"abc\")\n assert output == \"nop\"",
"def test_encryption():\n expected=\"Ymnx nx 956 u~ymts htzwxj\"\n actual=encrypt(\"This is 401 python course\",5)\n assert expected==actual",
"def test_encrypt_with_three_rails(self):\n plaintext = \"NO MORE SEGRETS\"\n assert_that(\"NMEEE OO GT RSRS\", equal_to(encrypt(plaintext, 3)))",
"def test03(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.short_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.short_message)",
"def test_encrypt_uppercase():\n output = rot13.encrypt(\"ABC\")\n assert output == \"NOP\"",
"def test_encrypt_with_two_rails(self):\n plaintext = \"HELLO WORLD\"\n assert_that(\"HLOWRD EL OL\", equal_to(encrypt(plaintext, 2)))",
"def check_encryption(value):\n value = value.lower()\n if value not in ['ssl', 'tls', 'starttls', 'none']:\n raise ArgumentTypeError(f'{value} is an unknown encryption. Use can use ssl, tls, starttls or none instead.')\n return value",
"def test04(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.long_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.long_message)",
"def encrypt_vigenere(plaintext: str, keyword: str) ->str:\n\n ciphertext= \"\"\n\n def encrypt(a:str,b:str)->str:\n\n keyS = 0\n ret = \"\"\n\n if (b.islower()):\n keyS = ord(b)-97\n elif (b.isupper()):\n keyS = ord(b)-65\n\n if (a.islower()):\n ret = chr(97+(ord(a)-97+keyS)%26)\n elif(a.isupper()):\n ret = chr(65+(ord(a)-65+keyS)%26)\n return ret\n\n for a in range(len(plaintext)):\n ciphertext += encrypt(plaintext[a],keyword[a % len(keyword)])\n \n return ciphertext",
"def test_decrypt_raises_error_on_too_little_time(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n self.assertRaises(scrypt.error,\n lambda: scrypt.decrypt(s, self.password, .01))",
"def encrypt_vigenere(plaintext, keyword):\n ciphertext = ''\n key = ''\n while len(key) < len(plaintext):\n key += keyword\n for index in range(len(plaintext)):\n if plaintext[index].isalpha():\n shift = ord(key[index])\n shift -= ord('a') if 'a' <= plaintext[index] <= 'z' else ord('A')\n code = ord(plaintext[index]) + shift\n if 'a' <= plaintext[index] <= 'z' and code > ord('z'):\n code -= 26\n elif 'A' <= plaintext[index] <= 'Z' and code > ord('Z'):\n code -= 26\n ciphertext += chr(code)\n else:\n ciphertext += plaintext[index]\n return ciphertext",
"def test_validate_on_invalid_name(self):\n args = (enums.CryptographicAlgorithm.AES, 128, self.bytes_128a)\n kwargs = {'name': 0}\n\n self.assertRaises(TypeError, SymmetricKey, *args, **kwargs)",
"def test_constructor_encrypt_decrypt(self):\n plain_text = (\n \"Do not ask what you can do for yourselves.\\n\"\n \"Ask what you can do for your country.\")\n cypher_text = CoreClientFileTests._cut._encrypt(\n bytes(plain_text, 'utf-8'))\n decrypted_text = str(\n CoreClientFileTests._cut._decrypt(cypher_text), 'utf-8')\n self.assertEqual(plain_text, decrypted_text)",
"def test_validate_on_invalid_length(self):\n args = (enums.CryptographicAlgorithm.AES, 'invalid', self.bytes_128a)\n\n self.assertRaises(TypeError, SymmetricKey, *args)",
"def test_encrypt_decrypt(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_validate_on_invalid_value(self):\n args = (enums.CryptographicAlgorithm.AES, 128, 0)\n\n self.assertRaises(TypeError, SymmetricKey, *args)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test decrypt function accepts maxtime keyword argument | def test_decrypt_maxtime_keyword_argument(self):
m = scrypt.decrypt(maxtime=1.0, input=self.ciphertext, password=self.password)
self.assertEqual(m, self.input) | [
"def test_encrypt_maxtime_key(self):\n s = scrypt.encrypt(self.input, self.password, maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt_maxmem_keyword_argument(self):\n m = scrypt.decrypt(maxmem=self.ten_megabytes, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt2(benchmark):\n benchmark(td.decrypt2, _enc_data)",
"def test_decryption():\n expected=\"This is python course\"\n actual=decrypt('Ymnx nx u~ymts htzwxj',5)\n assert expected==actual",
"def test_encrypt_maxmemfrac_keyword_argument(self):\n s = scrypt.encrypt(self.input, self.password, maxmemfrac=0.0625,\n maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt_raises_error_on_too_little_time(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n self.assertRaises(scrypt.error,\n lambda: scrypt.decrypt(s, self.password, .01))",
"def test_decrypt_with_two_rails(self):\n ciphertext = \"HLOWRD EL OL\"\n assert_that(\"HELLO WORLD\", equal_to(decrypt(ciphertext, 2)))",
"def test_decrypt2_old(benchmark):\n benchmark(td.decrypt2_old, _enc_data)",
"def test_decrypt_with_three_rails(self):\n ciphertext = \"NMEEE OO GT RSRS\"\n assert_that(\"NO MORE SEGRETS\", equal_to(decrypt(ciphertext, 3)))",
"def test04(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.long_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.long_message)",
"def decrypt_price(cipher: Union[str, bytes], e_key: Union[str, bytes], i_key: Union[str, bytes],\n max_timedelta_seconds: int = None) -> int:\n # Decode inputs\n cipher = DoubleClickCrypto._decode_base64(cipher)\n e_key = DoubleClickCrypto._decode_base64(e_key)\n i_key = DoubleClickCrypto._decode_base64(i_key)\n\n price, iv = DoubleClickCrypto._decrypt(cipher, 16, 8, e_key, i_key)\n price = int.from_bytes(price, \"big\")\n\n # Check timestamp if max_timedelta_seconds is provided\n if max_timedelta_seconds is not None:\n sec = int.from_bytes(iv[0:4], \"big\", signed=False)\n usec = int.from_bytes(iv[4:8], \"big\", signed=False)\n timestamp = datetime.fromtimestamp(sec + usec / 1_000_000)\n timedelta = datetime.now() - timestamp\n if abs(timedelta.total_seconds()) > max_timedelta_seconds:\n raise StaleResponseException(\"Time delta is too large\")\n\n return price",
"def solitaire_decrypt(txt, deck, seed_arg):\n\n\ttxt = text_to_num(txt)\n\tlength = len(txt)\n\tkey = keystream_mod.solitaire_keystream(deck, length, seed_arg)\n\tkey = text_to_num(key)\n\ttxt_key_sums = subtr_lists(txt, key)\n\tdec_txt = int_to_text(txt_key_sums)\n\tprint(dec_txt)",
"def test_partial_decryption(self):\n\n tprk = self.tSetUp.generate_private_key(0, self.trustees[0].private_key)\n text_to_encrypt_dir = os.path.join(os.path.dirname(__file__), \n \"TestThresholdPrivateKey.resources\")\n text_to_encrypt = os.path.join(text_to_encrypt_dir, \"text_to_encrypt\")\n text_encrypted = self.tpkey.encrypt_text(text_to_encrypt)\n \n # Decrypt the file created with our public key must be fine\n tprk.generate_partial_decryption(text_encrypted)\n \n # Create another ThresholdEcryptuonSetUp with other 1024 bits\n # cryptosys to create a cypthertext that cant be decrypted\n second_cryptosys_file = os.path.join(os.path.dirname(__file__), \n \"TestThresholdEncryptionSetUp.resources\",\n \"test1024bits_second.pvcryptosys\")\n # Load the cryptosystem from file\n second_cryptosys = EGCryptoSystem.from_file(second_cryptosys_file) \n secondtSetUp = ThresholdEncryptionSetUp(second_cryptosys, \n self.num_trustees, self.threshold)\n # Adding the keys from trustees for 2ndsetUp\n for i in range(self.num_trustees):\n secondtSetUp.add_trustee_public_key(i, self.trustees[i].public_key)\n secondcommitments = []\n # Generate commitmes for trustees for 2ndsetUp\n for i in range(self.num_trustees):\n secondcommitments.append(secondtSetUp.generate_commitment()) \n # Adding the secont trustees commitments \n for i in range(self.num_trustees):\n secondtSetUp.add_trustee_commitment(i, secondcommitments[i])\n # Generate secon cryptosis publickey\n secondtpkey = secondtSetUp.generate_public_key()\n # Encrypt the file with the secon cryptosis publickey\n secondtext_encrypted = secondtpkey.encrypt_text(text_to_encrypt)\n \n \n # Try to decryp something created with other ThresholdEcryptuonSetUp \n # must raise IncompatibleCiphertextError\n \n self.assertRaises(IncompatibleCiphertextError, \n tprk.generate_partial_decryption, secondtext_encrypted)\n\n\n # Create another ThresholdEcryptuonSetUp with other 512 bits\n # cryptosys to create a cypthertext that cant be decrypted\n third_cryptosys_file = os.path.join(os.path.dirname(__file__), \n \"TestThresholdEncryptionSetUp.resources\",\n \"test512bits.pvcryptosys\")\n # Load the cryptosystem from file\n third_cryptosys = EGCryptoSystem.from_file(third_cryptosys_file) \n thirdtSetUp = ThresholdEncryptionSetUp(third_cryptosys, \n self.num_trustees, self.threshold)\n # Adding the keys from trustees for 2ndsetUp\n for i in range(self.num_trustees):\n thirdtSetUp.add_trustee_public_key(i, self.trustees[i].public_key)\n thirdcommitments = []\n # Generate commitmes for trustees for 2ndsetUp\n for i in range(self.num_trustees):\n thirdcommitments.append(thirdtSetUp.generate_commitment()) \n # Adding the secont trustees commitments \n for i in range(self.num_trustees):\n thirdtSetUp.add_trustee_commitment(i, thirdcommitments[i])\n # Generate secon cryptosis publickey\n thirdtpkey = thirdtSetUp.generate_public_key()\n # Encrypt the file with the secon cryptosis publickey\n thirdtext_encrypted = thirdtpkey.encrypt_text(text_to_encrypt)\n \n \n # Try to decryp something created with other ThresholdEcryptuonSetUp \n # must raise IncompatibleCiphertextError\n \n self.assertRaises(IncompatibleCiphertextError, \n tprk.generate_partial_decryption, thirdtext_encrypted)",
"def decrypt(self, ciphertext): # pragma: no cover\n pass",
"def decrypt(self, private_key, cipher_text):",
"def decrypt():\n cipher = get_cipher()\n\n message = cipher.get_user_message()\n decrypted_message = cipher.decryption(message)\n\n print(f'Your decrypted message is: {decrypted_message}')\n\n input('Enter anything to continue')",
"def advapi32_CryptDecrypt(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hKey\", \"hHash\", \"Final\", \"dwFlags\", \"pbData\", \"pdwDataLen\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def test_encrypt_maxmem_in_normal_range(self):\n s = scrypt.encrypt(self.input,\n self.password,\n 0.01,\n self.ten_megabytes)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def otp_decrypt(ct, key):\n res = binary_to_string(\"0b\"+xor_compare(hex_to_binary(ct), hex_to_binary(key)))\n return res"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test decrypt function accepts maxmem keyword argument | def test_decrypt_maxmem_keyword_argument(self):
m = scrypt.decrypt(maxmem=self.ten_megabytes, input=self.ciphertext, password=self.password)
self.assertEqual(m, self.input) | [
"def test_decrypt_maxtime_keyword_argument(self):\n m = scrypt.decrypt(maxtime=1.0, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_maxmem_undersized(self):\n s = scrypt.encrypt(self.input, self.password, 0.01, self.one_byte)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_maxmem_in_normal_range(self):\n s = scrypt.encrypt(self.input,\n self.password,\n 0.01,\n self.ten_megabytes)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt2(benchmark):\n benchmark(td.decrypt2, _enc_data)",
"def test_decryption():\n expected=\"This is python course\"\n actual=decrypt('Ymnx nx u~ymts htzwxj',5)\n assert expected==actual",
"def test_encrypt_maxmemfrac_keyword_argument(self):\n s = scrypt.encrypt(self.input, self.password, maxmemfrac=0.0625,\n maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt_with_two_rails(self):\n ciphertext = \"HLOWRD EL OL\"\n assert_that(\"HELLO WORLD\", equal_to(decrypt(ciphertext, 2)))",
"def test04(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.long_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.long_message)",
"def test_decrypt2_old(benchmark):\n benchmark(td.decrypt2_old, _enc_data)",
"def test_encrypt_maxtime_key(self):\n s = scrypt.encrypt(self.input, self.password, maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def advapi32_RtlDecryptMemory(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"Memory\", \"MemorySize\", \"OptionFlags\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def test_decrypt_with_three_rails(self):\n ciphertext = \"NMEEE OO GT RSRS\"\n assert_that(\"NO MORE SEGRETS\", equal_to(decrypt(ciphertext, 3)))",
"def decrypt(self, ciphertext): # pragma: no cover\n pass",
"def solitaire_decrypt(txt, deck, seed_arg):\n\n\ttxt = text_to_num(txt)\n\tlength = len(txt)\n\tkey = keystream_mod.solitaire_keystream(deck, length, seed_arg)\n\tkey = text_to_num(key)\n\ttxt_key_sums = subtr_lists(txt, key)\n\tdec_txt = int_to_text(txt_key_sums)\n\tprint(dec_txt)",
"def decrypt(self, n, key):\n if key is None:\n return\n m = n - 1\n if m % 7:\n self.bytes[n - 8:n] = ldecrypt(key, self.bytes[n - 8:n])\n idx = m - m % 7\n for dummy in xrange(m / 7):\n idx -= 7\n self.bytes[idx: idx + 8] = ldecrypt(key, self.bytes[idx: idx + 8])",
"def decrypt(self, private_key, cipher_text):",
"def advapi32_CryptDecrypt(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hKey\", \"hHash\", \"Final\", \"dwFlags\", \"pbData\", \"pdwDataLen\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def decrypt_into(self, src: ReadableBuffer, dest: WriteableBuffer) -> None:\n ...",
"def test_decrypt_raises_error_on_too_little_time(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n self.assertRaises(scrypt.error,\n lambda: scrypt.decrypt(s, self.password, .01))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test decrypt function raises scrypt.error raised if insufficient time allowed for ciphertext decryption | def test_decrypt_raises_error_on_too_little_time(self):
s = scrypt.encrypt(self.input, self.password, 0.1)
self.assertRaises(scrypt.error,
lambda: scrypt.decrypt(s, self.password, .01)) | [
"def test_decrypt_maxtime_keyword_argument(self):\n m = scrypt.decrypt(maxtime=1.0, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)",
"def test_encrypt_maxtime_key(self):\n s = scrypt.encrypt(self.input, self.password, maxtime=0.01)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt2(benchmark):\n benchmark(td.decrypt2, _enc_data)",
"def decrypt(self, ciphertext): # pragma: no cover\n pass",
"def test04(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.long_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.long_message)",
"def test_decryption():\n expected=\"This is python course\"\n actual=decrypt('Ymnx nx u~ymts htzwxj',5)\n assert expected==actual",
"def test_encrypt_decrypt(self):\n s = scrypt.encrypt(self.input, self.password, 0.1)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt2_old(benchmark):\n benchmark(td.decrypt2_old, _enc_data)",
"def test_decrypt_with_two_rails(self):\n ciphertext = \"HLOWRD EL OL\"\n assert_that(\"HELLO WORLD\", equal_to(decrypt(ciphertext, 2)))",
"def test_decrypt_maxmem_keyword_argument(self):\n m = scrypt.decrypt(maxmem=self.ten_megabytes, input=self.ciphertext, password=self.password)\n self.assertEqual(m, self.input)",
"def test_decrypt_with_three_rails(self):\n ciphertext = \"NMEEE OO GT RSRS\"\n assert_that(\"NO MORE SEGRETS\", equal_to(decrypt(ciphertext, 3)))",
"def decrypt(self, ciphertext):\n\n raise TypeError(\"decrypt() not allowed for SIV mode.\"\n \" Use decrypt_and_verify() instead.\")",
"def decrypt(self, private_key, cipher_text):",
"def test03(self):\n\t\tengine = SecretEngine(key=self.key)\n\t\tencrypted = engine.encrypt(self.short_message)\n\t\tself.assertEqual(engine.decrypt(encrypted),self.short_message)",
"def test_decrypt_derived_key(self, mock_derive_key):\n mock_derive_key.return_value = sentinel.derived_key\n test = aws_encryption_sdk.internal.crypto.decrypt(\n algorithm=self.mock_algorithm,\n key=sentinel.key,\n encrypted_data=EncryptedData(\n VALUES['iv'],\n VALUES['ciphertext'],\n VALUES['tag']\n ),\n associated_data=sentinel.aad,\n message_id=sentinel.message_id\n )\n mock_derive_key.assert_called_with(\n source_key=sentinel.key,\n algorithm=self.mock_algorithm,\n message_id=sentinel.message_id\n )\n self.mock_cryptography_cipher.assert_called_with(\n sentinel.encryption_algorithm,\n sentinel.encryption_mode,\n backend=sentinel.crypto_backend\n )\n assert self.mock_cryptography_cipher_instance.decryptor.called\n self.mock_decryptor.authenticate_additional_data.assert_called_with(\n sentinel.aad\n )\n self.mock_decryptor.update.assert_called_with(VALUES['ciphertext'])\n assert self.mock_decryptor.finalize.called\n assert test, VALUES['plaintext']",
"def decrypt(ciphertext, key):\n return encrypt(ciphertext, key, strict=False, verbose=False)",
"def test_encrypt_maxmem_undersized(self):\n s = scrypt.encrypt(self.input, self.password, 0.01, self.one_byte)\n m = scrypt.decrypt(s, self.password)\n self.assertEqual(m, self.input)",
"def test_partial_decryption(self):\n\n tprk = self.tSetUp.generate_private_key(0, self.trustees[0].private_key)\n text_to_encrypt_dir = os.path.join(os.path.dirname(__file__), \n \"TestThresholdPrivateKey.resources\")\n text_to_encrypt = os.path.join(text_to_encrypt_dir, \"text_to_encrypt\")\n text_encrypted = self.tpkey.encrypt_text(text_to_encrypt)\n \n # Decrypt the file created with our public key must be fine\n tprk.generate_partial_decryption(text_encrypted)\n \n # Create another ThresholdEcryptuonSetUp with other 1024 bits\n # cryptosys to create a cypthertext that cant be decrypted\n second_cryptosys_file = os.path.join(os.path.dirname(__file__), \n \"TestThresholdEncryptionSetUp.resources\",\n \"test1024bits_second.pvcryptosys\")\n # Load the cryptosystem from file\n second_cryptosys = EGCryptoSystem.from_file(second_cryptosys_file) \n secondtSetUp = ThresholdEncryptionSetUp(second_cryptosys, \n self.num_trustees, self.threshold)\n # Adding the keys from trustees for 2ndsetUp\n for i in range(self.num_trustees):\n secondtSetUp.add_trustee_public_key(i, self.trustees[i].public_key)\n secondcommitments = []\n # Generate commitmes for trustees for 2ndsetUp\n for i in range(self.num_trustees):\n secondcommitments.append(secondtSetUp.generate_commitment()) \n # Adding the secont trustees commitments \n for i in range(self.num_trustees):\n secondtSetUp.add_trustee_commitment(i, secondcommitments[i])\n # Generate secon cryptosis publickey\n secondtpkey = secondtSetUp.generate_public_key()\n # Encrypt the file with the secon cryptosis publickey\n secondtext_encrypted = secondtpkey.encrypt_text(text_to_encrypt)\n \n \n # Try to decryp something created with other ThresholdEcryptuonSetUp \n # must raise IncompatibleCiphertextError\n \n self.assertRaises(IncompatibleCiphertextError, \n tprk.generate_partial_decryption, secondtext_encrypted)\n\n\n # Create another ThresholdEcryptuonSetUp with other 512 bits\n # cryptosys to create a cypthertext that cant be decrypted\n third_cryptosys_file = os.path.join(os.path.dirname(__file__), \n \"TestThresholdEncryptionSetUp.resources\",\n \"test512bits.pvcryptosys\")\n # Load the cryptosystem from file\n third_cryptosys = EGCryptoSystem.from_file(third_cryptosys_file) \n thirdtSetUp = ThresholdEncryptionSetUp(third_cryptosys, \n self.num_trustees, self.threshold)\n # Adding the keys from trustees for 2ndsetUp\n for i in range(self.num_trustees):\n thirdtSetUp.add_trustee_public_key(i, self.trustees[i].public_key)\n thirdcommitments = []\n # Generate commitmes for trustees for 2ndsetUp\n for i in range(self.num_trustees):\n thirdcommitments.append(thirdtSetUp.generate_commitment()) \n # Adding the secont trustees commitments \n for i in range(self.num_trustees):\n thirdtSetUp.add_trustee_commitment(i, thirdcommitments[i])\n # Generate secon cryptosis publickey\n thirdtpkey = thirdtSetUp.generate_public_key()\n # Encrypt the file with the secon cryptosis publickey\n thirdtext_encrypted = thirdtpkey.encrypt_text(text_to_encrypt)\n \n \n # Try to decryp something created with other ThresholdEcryptuonSetUp \n # must raise IncompatibleCiphertextError\n \n self.assertRaises(IncompatibleCiphertextError, \n tprk.generate_partial_decryption, thirdtext_encrypted)",
"def AESDecrypt(shared_key, enc_assertion):\n try:\n iv = enc_assertion[:16]\n enc_msg = enc_assertion[16:]\n cipher = AES.new(shared_key, AES.MODE_CBC, iv)\n return cipher.decrypt(enc_msg)\n except Exception as e:\n sys.exit('Failed to decrypt assertion: %s(%s)\\n' % (e.__class__.__name__, e))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash takes keyword valid buflen | def test_hash_buflen_keyword(self):
h64 = scrypt.hash(self.input, self.salt, buflen=64)
h128 = scrypt.hash(self.input, self.salt, buflen=128)
self.assertEqual(len(h64), 64)
self.assertEqual(len(h128), 128) | [
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_hash_table_hash_key_word():\n hash = HT()\n assert hash._hash_key('key') == 329\n assert hash._hash_key('Key') == 297",
"def test_hash_p_positional(self):\n h = scrypt.hash(self.input, self.salt, 256, 8, 2)\n self.assertEqual(len(h), 64)",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def check_hash_len(hash):\n if len(hash) != 40 + 1:\n raise ProtocolError(\"Invalid hash len!\")",
"def validate_hash_data(hashtype, hashsum):\n if hashtype not in hashlib.algorithms_available:\n return False\n try:\n int(hashsum, 16)\n except ValueError:\n return False\n hashd = getattr(hashlib, hashtype)()\n hashd.update('blah')\n if len(hashsum) != len(hashd.hexdigest()):\n return False\n return True",
"def test_hash_for_paragraph(self):\n self.assertEqual(hash_for_paragraph('Abc 123 More.'),\n hash_for_paragraph(' abc123 mOrE'))\n random_term = uuid.uuid4().hex\n self.assertTrue(hash_for_paragraph(random_term) > 10000,\n msg=\"Hashed too small: {0}\".format(random_term))",
"def test_cases_for_hash(self,\n alg: crypto_knowledge.Algorithm\n ) -> Iterator[test_case.TestCase]:\n calc = self.CALCULATE[alg.expression]\n if calc is None:\n return # not implemented yet\n\n short = b'abc'\n hash_short = calc(short)\n long = (b'Hello, world. Here are 16 unprintable bytes: ['\n b'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a'\n b'\\x80\\x81\\x82\\x83\\xfe\\xff]. '\n b' This message was brought to you by a natural intelligence. '\n b' If you can read this, good luck with your debugging!')\n hash_long = calc(long)\n\n yield self.one_test_case(alg, 'hash_empty', '', [calc(b'')])\n yield self.one_test_case(alg, 'hash_valid_one_shot', '',\n [short.hex(), hash_short])\n for n in [0, 1, 64, len(long) - 1, len(long)]:\n yield self.one_test_case(alg, 'hash_valid_multipart',\n '{} + {}'.format(n, len(long) - n),\n [long[:n].hex(), calc(long[:n]),\n long[n:].hex(), hash_long])",
"def test_args(self):\n sample_hash1 = sha1('foo').hexdigest()\n sample_hash2 = sha1('bar').hexdigest()\n \n a = linealHash('name', 'version', [sample_hash1, sample_hash2])\n expected = sha1(linealHash('name', 'version') + sample_hash1 \\\n + sample_hash2).hexdigest()\n self.assertEqual(a, expected, \"With inputs, expected lineal hash to be\"\n \" H(linealHash + input1hash + input2hash)\")",
"def hash_verification(hash): \n md5 = re.findall(r'^[a-fA-F0-9]{32}$',hash)\n sha1 = re.findall(r'^[a-fA-F0-9]{40}$',hash)\n sha256 = re.findall(r'^[a-fA-F0-9]{64}$',hash)\n if md5 or sha1 or sha256:\n return True",
"def test_hash_table_hash_key_single():\n hash = HT()\n assert hash._hash_key('b') == 98",
"def test_hashable(self):\n\n test = 'test'\n\n result = hashiter(test)\n\n self.assertEqual(result, hash(test))",
"def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))",
"def test_00():\n hs1 = hashlib.sha256()\n hs2 = hashlib.sha256()\n\n # 해쉬는 바이너리로 진행해야 한다\n hs1.update(b\"Nobody inspects\")\n hs2.update(b\"the spammish repetition\")\n\n # 결과는 바이너리로 출력된다\n print(hs1.digest())\n print(hs2.digest(), \"\\n\\n\")\n\n \"\"\"바이너리 스트링 길이 체크 (테스트)\"\"\"\n ss1 = str(hs1.digest()).split(\"\\\\\")\n ss2 = str(hs2.digest()).split(\"\\\\\")\n\n # 리스트 스트링의 갯수 체크\n print(ss1)\n print(ss2)\n\n print(len(ss1))\n print(len(ss2), \"\\n\\n\")\n\n # 바이너리를 핵사로 변경하여 출력 ... 당연히 길이는 동일함!\n print(\"hs1=\", hs1.hexdigest())\n print(\"hs1.digest_siz=\", hs1.digest_size)\n print(\"hs2.digest_siz=\", hs2.digest_size, \"\\n\\n\")\n\n print(\"hs2=\", hs2.hexdigest())\n print(\"hs1.block_size=\", hs1.block_size)\n # hash comparison\n print(\"hs2.block_size=\", hs2.block_size)",
"def test_basic(self):\n a = linealHash('name', 'version')\n expected = sha1(sha1('name').hexdigest() + 'version').hexdigest()\n self.assertEqual(a, expected)",
"def test_password_hashing_checking(self):\n\n pw = 'Te#%ghTdkk'\n hashed = hash_password(pw)\n self.assertTrue(check_password(hashed, pw))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash accepts valid N in position 3 | def test_hash_n_positional(self):
h = scrypt.hash(self.input, self.salt, 256)
self.assertEqual(len(h), 64) | [
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_n_not_power_of_two(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=3))",
"def test_hash_p_positional(self):\n h = scrypt.hash(self.input, self.salt, 256, 8, 2)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_n_under_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=1))\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=-1))",
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def check_hash_len(hash):\n if len(hash) != 40 + 1:\n raise ProtocolError(\"Invalid hash len!\")",
"def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)",
"def hash_verification(hash): \n md5 = re.findall(r'^[a-fA-F0-9]{32}$',hash)\n sha1 = re.findall(r'^[a-fA-F0-9]{40}$',hash)\n sha256 = re.findall(r'^[a-fA-F0-9]{64}$',hash)\n if md5 or sha1 or sha256:\n return True",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def test_rehashing():\n count = 3\n rehashed = rehash('example', count)\n assert len(rehashed) == count\n assert len(set(rehashed)) == count",
"def validate_hash_data(hashtype, hashsum):\n if hashtype not in hashlib.algorithms_available:\n return False\n try:\n int(hashsum, 16)\n except ValueError:\n return False\n hashd = getattr(hashlib, hashtype)()\n hashd.update('blah')\n if len(hashsum) != len(hashd.hexdigest()):\n return False\n return True",
"def test_hash_raises_error_r_p_over_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=2, p=2 ** 29))",
"def hash32(value): # -> int:\n ...",
"def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))",
"def test_hash(self):\n # Test dictionary behavior\n self.assertEqual(len(dict.fromkeys([self.atom1, self.atom2, self.atom3, self.atom4])), 4)\n\n # Test set behavior\n self.assertEqual(len({self.atom1, self.atom2, self.atom3, self.atom4}), 4)",
"def test_hash_for_paragraph(self):\n self.assertEqual(hash_for_paragraph('Abc 123 More.'),\n hash_for_paragraph(' abc123 mOrE'))\n random_term = uuid.uuid4().hex\n self.assertTrue(hash_for_paragraph(random_term) > 10000,\n msg=\"Hashed too small: {0}\".format(random_term))",
"def knot_hash(input_string):\n # Read lengths as ASCII characters\n lst = list(range(0,256))\n lengths = [ord(ch) for ch in input_string]\n lengths += [17, 31, 73, 47, 23]\n curr, skip = 0, 0\n for i in range(64):\n lst, curr, skip = hash_round(lst, lengths, curr, skip)\n # Get dense hash by taking blocks of numbers\n dense = []\n for i in range(16):\n xor = 0\n for j in range(16):\n xor ^= lst[i*16+j]\n dense.append(xor)\n # Convert 16 numbers to hex string\n knothash = ''.join([hex(num)[2:].zfill(2) for num in dense])\n return(knothash)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash takes keyword valid N | def test_hash_n_keyword(self):
h = scrypt.hash(N=256, password=self.input, salt=self.salt)
self.assertEqual(len(h), 64) | [
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def test_hash_p_positional(self):\n h = scrypt.hash(self.input, self.salt, 256, 8, 2)\n self.assertEqual(len(h), 64)",
"def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)",
"def test_rehashing():\n count = 3\n rehashed = rehash('example', count)\n assert len(rehashed) == count\n assert len(set(rehashed)) == count",
"def test_hash_table_hash_key_word():\n hash = HT()\n assert hash._hash_key('key') == 329\n assert hash._hash_key('Key') == 297",
"def test_hash_raises_error_n_not_power_of_two(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=3))",
"def test_hash(self):\n # Test dictionary behavior\n self.assertEqual(len(dict.fromkeys([self.atom1, self.atom2, self.atom3, self.atom4])), 4)\n\n # Test set behavior\n self.assertEqual(len({self.atom1, self.atom2, self.atom3, self.atom4}), 4)",
"def test_hash_raises_error_n_under_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=1))\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=-1))",
"def test_hash(self):\n # Test dictionary behavior\n self.assertEqual(len(dict.fromkeys([self.mol1, self.mol2, self.mol3])), 2)\n\n # Test set behavior\n self.assertEqual(len({self.mol1, self.mol2, self.mol3}), 2)",
"def test_hashable(self):\n\n test = 'test'\n\n result = hashiter(test)\n\n self.assertEqual(result, hash(test))",
"def test_hash_for_paragraph(self):\n self.assertEqual(hash_for_paragraph('Abc 123 More.'),\n hash_for_paragraph(' abc123 mOrE'))\n random_term = uuid.uuid4().hex\n self.assertTrue(hash_for_paragraph(random_term) > 10000,\n msg=\"Hashed too small: {0}\".format(random_term))",
"def hashFunctionTest():\n m = 128\n h = HashFunction(m)\n print(h)\n\n count = [0] * m\n for i in range(m*2):\n count[h.h(random.randint(-10000,10000))] += 1\n print count",
"def test_hash_table_hash_key_single():\n hash = HT()\n assert hash._hash_key('b') == 98",
"def test_cases_for_hash(self,\n alg: crypto_knowledge.Algorithm\n ) -> Iterator[test_case.TestCase]:\n calc = self.CALCULATE[alg.expression]\n if calc is None:\n return # not implemented yet\n\n short = b'abc'\n hash_short = calc(short)\n long = (b'Hello, world. Here are 16 unprintable bytes: ['\n b'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a'\n b'\\x80\\x81\\x82\\x83\\xfe\\xff]. '\n b' This message was brought to you by a natural intelligence. '\n b' If you can read this, good luck with your debugging!')\n hash_long = calc(long)\n\n yield self.one_test_case(alg, 'hash_empty', '', [calc(b'')])\n yield self.one_test_case(alg, 'hash_valid_one_shot', '',\n [short.hex(), hash_short])\n for n in [0, 1, 64, len(long) - 1, len(long)]:\n yield self.one_test_case(alg, 'hash_valid_multipart',\n '{} + {}'.format(n, len(long) - n),\n [long[:n].hex(), calc(long[:n]),\n long[n:].hex(), hash_long])",
"def test_hash(self):\n # Test dictionary behavior\n self.assertEqual(len(dict.fromkeys([self.bond1, self.bond2, self.bond3, self.bond4])), 4)\n\n # Test set behavior\n self.assertEqual(len({self.bond1, self.bond2, self.bond3, self.bond4}), 4)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash takes keyword valid r | def test_hash_r_keyword(self):
h = scrypt.hash(r=16, password=self.input, salt=self.salt)
self.assertEqual(len(h), 64) | [
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def test_hash_table_hash_key_word():\n hash = HT()\n assert hash._hash_key('key') == 329\n assert hash._hash_key('Key') == 297",
"def hash_verification(hash): \n md5 = re.findall(r'^[a-fA-F0-9]{32}$',hash)\n sha1 = re.findall(r'^[a-fA-F0-9]{40}$',hash)\n sha256 = re.findall(r'^[a-fA-F0-9]{64}$',hash)\n if md5 or sha1 or sha256:\n return True",
"def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))",
"def test_hashable(self):\n\n test = 'test'\n\n result = hashiter(test)\n\n self.assertEqual(result, hash(test))",
"def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)",
"def test_basic(self):\n a = linealHash('name', 'version')\n expected = sha1(sha1('name').hexdigest() + 'version').hexdigest()\n self.assertEqual(a, expected)",
"def test_hash_for_paragraph(self):\n self.assertEqual(hash_for_paragraph('Abc 123 More.'),\n hash_for_paragraph(' abc123 mOrE'))\n random_term = uuid.uuid4().hex\n self.assertTrue(hash_for_paragraph(random_term) > 10000,\n msg=\"Hashed too small: {0}\".format(random_term))",
"def test_hash_table_hash_key_single():\n hash = HT()\n assert hash._hash_key('b') == 98",
"def matchHashedText(hashedText, providedText):\n _hashedText, salt = hashedText.split(':')\n print(_hashedText)\n return _hashedText == hashlib.sha256(salt.encode() + providedText.encode()).hexdigest()",
"def validate_hash_data(hashtype, hashsum):\n if hashtype not in hashlib.algorithms_available:\n return False\n try:\n int(hashsum, 16)\n except ValueError:\n return False\n hashd = getattr(hashlib, hashtype)()\n hashd.update('blah')\n if len(hashsum) != len(hashd.hexdigest()):\n return False\n return True",
"def test_args(self):\n sample_hash1 = sha1('foo').hexdigest()\n sample_hash2 = sha1('bar').hexdigest()\n \n a = linealHash('name', 'version', [sample_hash1, sample_hash2])\n expected = sha1(linealHash('name', 'version') + sample_hash1 \\\n + sample_hash2).hexdigest()\n self.assertEqual(a, expected, \"With inputs, expected lineal hash to be\"\n \" H(linealHash + input1hash + input2hash)\")",
"def test_hash_id(self):\n self.assertEqual(hash_id(self.id1, self.id2, self.salt, self.length), \"2Y7W5d\")",
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_password_hashing_checking(self):\n\n pw = 'Te#%ghTdkk'\n hashed = hash_password(pw)\n self.assertTrue(check_password(hashed, pw))",
"def test_hash_handles_collisions(): ## 4\n ht = Hashtable()\n ht.add('Farah', 'what ever you want')\n ht.add('Ahmad', 'not what ever you want')\n actual = ht.find('Farah')\n assert actual == 'what ever you want'\n actual = ht.find('Ahmad')\n assert actual == 'not what ever you want'"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash accepts valid p in position 5 | def test_hash_p_positional(self):
h = scrypt.hash(self.input, self.salt, 256, 8, 2)
self.assertEqual(len(h), 64) | [
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_on_p_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=0))",
"def test_hash_raises_error_on_negative_p(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=-1))",
"def hash_verification(hash): \n md5 = re.findall(r'^[a-fA-F0-9]{32}$',hash)\n sha1 = re.findall(r'^[a-fA-F0-9]{40}$',hash)\n sha256 = re.findall(r'^[a-fA-F0-9]{64}$',hash)\n if md5 or sha1 or sha256:\n return True",
"def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)",
"def test_password_hashing_checking(self):\n\n pw = 'Te#%ghTdkk'\n hashed = hash_password(pw)\n self.assertTrue(check_password(hashed, pw))",
"def test_hash_for_paragraph(self):\n self.assertEqual(hash_for_paragraph('Abc 123 More.'),\n hash_for_paragraph(' abc123 mOrE'))\n random_term = uuid.uuid4().hex\n self.assertTrue(hash_for_paragraph(random_term) > 10000,\n msg=\"Hashed too small: {0}\".format(random_term))",
"def test_hash_raises_error_r_p_over_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=2, p=2 ** 29))",
"def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))",
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def check_hash_len(hash):\n if len(hash) != 40 + 1:\n raise ProtocolError(\"Invalid hash len!\")",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def validate_hash_data(hashtype, hashsum):\n if hashtype not in hashlib.algorithms_available:\n return False\n try:\n int(hashsum, 16)\n except ValueError:\n return False\n hashd = getattr(hashlib, hashtype)()\n hashd.update('blah')\n if len(hashsum) != len(hashd.hexdigest()):\n return False\n return True",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def test_password_hash(self):\n u = User(nickname='pass', email='pass@pass.com')\n u.make_a_hash('passwordofpass')\n assert u.check_password('passwordofpass')",
"def test_hash_raises_error_n_not_power_of_two(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=3))",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def is_valid_info_hash(info_hash):\n valid = True\n try:\n if len(info_hash) != 40:\n valid = False\n except TypeError:\n valid = False\n return valid"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash takes keyword valid p | def test_hash_p_keyword(self):
h = scrypt.hash(p=4, password=self.input, salt=self.salt)
self.assertEqual(len(h), 64) | [
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_p_positional(self):\n h = scrypt.hash(self.input, self.salt, 256, 8, 2)\n self.assertEqual(len(h), 64)",
"def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)",
"def test_hash_for_paragraph(self):\n self.assertEqual(hash_for_paragraph('Abc 123 More.'),\n hash_for_paragraph(' abc123 mOrE'))\n random_term = uuid.uuid4().hex\n self.assertTrue(hash_for_paragraph(random_term) > 10000,\n msg=\"Hashed too small: {0}\".format(random_term))",
"def test_hash_table_hash_key_word():\n hash = HT()\n assert hash._hash_key('key') == 329\n assert hash._hash_key('Key') == 297",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_password_hashing_checking(self):\n\n pw = 'Te#%ghTdkk'\n hashed = hash_password(pw)\n self.assertTrue(check_password(hashed, pw))",
"def hash_verification(hash): \n md5 = re.findall(r'^[a-fA-F0-9]{32}$',hash)\n sha1 = re.findall(r'^[a-fA-F0-9]{40}$',hash)\n sha256 = re.findall(r'^[a-fA-F0-9]{64}$',hash)\n if md5 or sha1 or sha256:\n return True",
"def test_hashable(self):\n\n test = 'test'\n\n result = hashiter(test)\n\n self.assertEqual(result, hash(test))",
"def test_hash_table_hash_key_single():\n hash = HT()\n assert hash._hash_key('b') == 98",
"def test_hash_handles_collisions(): ## 4\n ht = Hashtable()\n ht.add('Farah', 'what ever you want')\n ht.add('Ahmad', 'not what ever you want')\n actual = ht.find('Farah')\n assert actual == 'what ever you want'\n actual = ht.find('Ahmad')\n assert actual == 'not what ever you want'",
"def validate_hash_data(hashtype, hashsum):\n if hashtype not in hashlib.algorithms_available:\n return False\n try:\n int(hashsum, 16)\n except ValueError:\n return False\n hashd = getattr(hashlib, hashtype)()\n hashd.update('blah')\n if len(hashsum) != len(hashd.hexdigest()):\n return False\n return True",
"def test_hash_raises_error_on_p_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=0))",
"def test_hash_raises_error_on_negative_p(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=-1))",
"def test_args(self):\n sample_hash1 = sha1('foo').hexdigest()\n sample_hash2 = sha1('bar').hexdigest()\n \n a = linealHash('name', 'version', [sample_hash1, sample_hash2])\n expected = sha1(linealHash('name', 'version') + sample_hash1 \\\n + sample_hash2).hexdigest()\n self.assertEqual(a, expected, \"With inputs, expected lineal hash to be\"\n \" H(linealHash + input1hash + input2hash)\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash raises scrypt error on illegal parameter value (p = 0) | def test_hash_raises_error_on_p_equals_zero(self):
self.assertRaises(scrypt.error,
lambda: scrypt.hash(self.input, self.salt, p=0)) | [
"def test_hash_raises_error_on_negative_p(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=-1))",
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_r_p_over_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=2, p=2 ** 29))",
"def test_hash_p_positional(self):\n h = scrypt.hash(self.input, self.salt, 256, 8, 2)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_on_r_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=0))",
"def test_hash_raises_error_on_negative_r(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=-1))",
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_n_not_power_of_two(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=3))",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_n_under_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=1))\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=-1))",
"def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))",
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def test_password_hashing_checking(self):\n\n pw = 'Te#%ghTdkk'\n hashed = hash_password(pw)\n self.assertTrue(check_password(hashed, pw))",
"def test_hash():\n \n # Create a Dealer\n dealer = Dealer(p256, n_participants, s_secrets, access_structures)\n\n # test hash function - it should be repeatable for the same Dealer object\n hash1 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n hash2 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n assert_equal(hash1, hash2)",
"def test_password_hash(self):\n u = User(nickname='pass', email='pass@pass.com')\n u.make_a_hash('passwordofpass')\n assert u.check_password('passwordofpass')",
"def test_hashpw(self):\n salt = '$2a$04$e9aKlXB7x0Uacbi7tRyKA.'\n should_be = ('$2a$04$e9aKlXB7x0Uacbi7tRyKA.ZE1qLWlAKZiMV9P2q8bI.'\n 'azUYj2EcFS')\n\n hashed = bcrypt.hashpw('abc', salt)\n\n eq_(hashed, should_be)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash raises scrypt error on illegal parameter value (p < 0) | def test_hash_raises_error_on_negative_p(self):
self.assertRaises(scrypt.error,
lambda: scrypt.hash(self.input, self.salt, p=-1)) | [
"def test_hash_raises_error_on_p_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=0))",
"def test_hash_raises_error_r_p_over_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=2, p=2 ** 29))",
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_p_positional(self):\n h = scrypt.hash(self.input, self.salt, 256, 8, 2)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_on_r_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=0))",
"def test_hash_raises_error_on_negative_r(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=-1))",
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_n_not_power_of_two(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=3))",
"def test_hash_raises_error_n_under_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=1))\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=-1))",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))",
"def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)",
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def test_password_hashing_checking(self):\n\n pw = 'Te#%ghTdkk'\n hashed = hash_password(pw)\n self.assertTrue(check_password(hashed, pw))",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def test_password_hash(self):\n u = User(nickname='pass', email='pass@pass.com')\n u.make_a_hash('passwordofpass')\n assert u.check_password('passwordofpass')",
"def test_hash():\n \n # Create a Dealer\n dealer = Dealer(p256, n_participants, s_secrets, access_structures)\n\n # test hash function - it should be repeatable for the same Dealer object\n hash1 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n hash2 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n assert_equal(hash1, hash2)",
"def test_hashpw(self):\n salt = '$2a$04$e9aKlXB7x0Uacbi7tRyKA.'\n should_be = ('$2a$04$e9aKlXB7x0Uacbi7tRyKA.ZE1qLWlAKZiMV9P2q8bI.'\n 'azUYj2EcFS')\n\n hashed = bcrypt.hashpw('abc', salt)\n\n eq_(hashed, should_be)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash raises scrypt error on illegal parameter value (r = 0) | def test_hash_raises_error_on_r_equals_zero(self):
self.assertRaises(scrypt.error,
lambda: scrypt.hash(self.input, self.salt, r=0)) | [
"def test_hash_raises_error_on_negative_r(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=-1))",
"def test_hash_raises_error_r_p_over_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=2, p=2 ** 29))",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_on_p_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=0))",
"def test_hash_raises_error_on_negative_p(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=-1))",
"def test_hash_raises_error_n_not_power_of_two(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=3))",
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_n_under_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=1))\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=-1))",
"def test_hash_p_positional(self):\n h = scrypt.hash(self.input, self.salt, 256, 8, 2)\n self.assertEqual(len(h), 64)",
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def test_hash():\n \n # Create a Dealer\n dealer = Dealer(p256, n_participants, s_secrets, access_structures)\n\n # test hash function - it should be repeatable for the same Dealer object\n hash1 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n hash2 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n assert_equal(hash1, hash2)",
"def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)",
"def test_password_hashing_checking(self):\n\n pw = 'Te#%ghTdkk'\n hashed = hash_password(pw)\n self.assertTrue(check_password(hashed, pw))",
"def test_password_hash(self):\n u = User(nickname='pass', email='pass@pass.com')\n u.make_a_hash('passwordofpass')\n assert u.check_password('passwordofpass')",
"def test_00():\n hs1 = hashlib.sha256()\n hs2 = hashlib.sha256()\n\n # 해쉬는 바이너리로 진행해야 한다\n hs1.update(b\"Nobody inspects\")\n hs2.update(b\"the spammish repetition\")\n\n # 결과는 바이너리로 출력된다\n print(hs1.digest())\n print(hs2.digest(), \"\\n\\n\")\n\n \"\"\"바이너리 스트링 길이 체크 (테스트)\"\"\"\n ss1 = str(hs1.digest()).split(\"\\\\\")\n ss2 = str(hs2.digest()).split(\"\\\\\")\n\n # 리스트 스트링의 갯수 체크\n print(ss1)\n print(ss2)\n\n print(len(ss1))\n print(len(ss2), \"\\n\\n\")\n\n # 바이너리를 핵사로 변경하여 출력 ... 당연히 길이는 동일함!\n print(\"hs1=\", hs1.hexdigest())\n print(\"hs1.digest_siz=\", hs1.digest_size)\n print(\"hs2.digest_siz=\", hs2.digest_size, \"\\n\\n\")\n\n print(\"hs2=\", hs2.hexdigest())\n print(\"hs1.block_size=\", hs1.block_size)\n # hash comparison\n print(\"hs2.block_size=\", hs2.block_size)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash raises scrypt error on illegal parameter value (r < 1) | def test_hash_raises_error_on_negative_r(self):
self.assertRaises(scrypt.error,
lambda: scrypt.hash(self.input, self.salt, r=-1)) | [
"def test_hash_raises_error_r_p_over_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=2, p=2 ** 29))",
"def test_hash_raises_error_on_r_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=0))",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_on_p_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=0))",
"def test_hash_raises_error_n_not_power_of_two(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=3))",
"def test_hash_raises_error_on_negative_p(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=-1))",
"def test_hash_raises_error_n_under_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=1))\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=-1))",
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_p_positional(self):\n h = scrypt.hash(self.input, self.salt, 256, 8, 2)\n self.assertEqual(len(h), 64)",
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))",
"def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)",
"def test_password_hashing_checking(self):\n\n pw = 'Te#%ghTdkk'\n hashed = hash_password(pw)\n self.assertTrue(check_password(hashed, pw))",
"def test_hash():\n \n # Create a Dealer\n dealer = Dealer(p256, n_participants, s_secrets, access_structures)\n\n # test hash function - it should be repeatable for the same Dealer object\n hash1 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n hash2 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n assert_equal(hash1, hash2)",
"def test_password_hash(self):\n u = User(nickname='pass', email='pass@pass.com')\n u.make_a_hash('passwordofpass')\n assert u.check_password('passwordofpass')",
"def test_basic(self):\n a = linealHash('name', 'version')\n expected = sha1(sha1('name').hexdigest() + 'version').hexdigest()\n self.assertEqual(a, expected)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash raises scrypt error when parameters r multiplied by p over limit 230 | def test_hash_raises_error_r_p_over_limit(self):
self.assertRaises(scrypt.error,
lambda: scrypt.hash(self.input, self.salt, r=2, p=2 ** 29)) | [
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_p_positional(self):\n h = scrypt.hash(self.input, self.salt, 256, 8, 2)\n self.assertEqual(len(h), 64)",
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_n_not_power_of_two(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=3))",
"def test_hash_raises_error_n_under_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=1))\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=-1))",
"def test_hash_raises_error_on_p_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=0))",
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_on_negative_p(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=-1))",
"def test_hash_raises_error_on_negative_r(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=-1))",
"def test_hash_raises_error_on_r_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=0))",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)",
"def test_password_hashing_checking(self):\n\n pw = 'Te#%ghTdkk'\n hashed = hash_password(pw)\n self.assertTrue(check_password(hashed, pw))",
"def test_hash():\n \n # Create a Dealer\n dealer = Dealer(p256, n_participants, s_secrets, access_structures)\n\n # test hash function - it should be repeatable for the same Dealer object\n hash1 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n hash2 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n assert_equal(hash1, hash2)",
"def secure_hash(s, iterations=10000):\n if s is None:\n return None\n hash_result = SHA256.new(data=str(s).encode()).hexdigest()\n for i in range(iterations):\n hash_result = SHA256.new(data=hash_result.encode()).hexdigest()\n return hash_result",
"def hash(x):\r\n return (randint(1, 5*c)*x + randint(1, 5*c)) % c",
"def test_long_password(self):\n\n # Create a password with a 72 bytes length\n password = 'A' * 72\n pw_hash = self.flask_bcrypt.generate_password_hash(password)\n # Ensure that a longer password **do not** yield the same hash\n self.assertFalse(self.flask_bcrypt.check_password_hash(pw_hash, 'A' * 80))",
"def test_long_password(self):\n\n # Create a password with a 72 bytes length\n password = 'A' * 72\n pw_hash = self.flask_bcrypt.generate_password_hash(password)\n # Ensure that a longer password yields the same hash\n self.assertTrue(self.flask_bcrypt.check_password_hash(pw_hash, 'A' * 80))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash raises scrypt error when parameter N is not a power of two {2, 4, 8, 16, etc} | def test_hash_raises_error_n_not_power_of_two(self):
self.assertRaises(scrypt.error,
lambda: scrypt.hash(self.input, self.salt, N=3)) | [
"def test_hash_raises_error_n_under_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=1))\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=-1))",
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_r_p_over_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=2, p=2 ** 29))",
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_p_positional(self):\n h = scrypt.hash(self.input, self.salt, 256, 8, 2)\n self.assertEqual(len(h), 64)",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_on_negative_r(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=-1))",
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def test_hash_raises_error_on_p_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=0))",
"def test_hash_raises_error_on_negative_p(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=-1))",
"def test_hash_raises_error_on_r_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=0))",
"def test_hash(self):\n value = divisors.PrimeFactorization({17: 3, 23: 1, 41: 2})\n self.assertEqual(hash(value), 17 ** 3 * 23 * 41 ** 2)",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def hashFunctionTest():\n m = 128\n h = HashFunction(m)\n print(h)\n\n count = [0] * m\n for i in range(m*2):\n count[h.h(random.randint(-10000,10000))] += 1\n print count",
"def hash_v1(string: str, nbins: int) -> int:\n output_bin: int = 7\n for char in string.encode('utf-8'):\n output_bin = (output_bin*31 + char) % nbins\n return output_bin",
"def hash32(value): # -> int:\n ...",
"def test_hash(self):\n with pytest.raises(TypeError, match=r\"unhashable\"):\n hash(DataElement(0x00100010, 'PN', 'ANON'))",
"def test_hash():\n \n # Create a Dealer\n dealer = Dealer(p256, n_participants, s_secrets, access_structures)\n\n # test hash function - it should be repeatable for the same Dealer object\n hash1 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n hash2 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n assert_equal(hash1, hash2)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test hash raises scrypt error when parameter N under limit of 1 | def test_hash_raises_error_n_under_limit(self):
self.assertRaises(scrypt.error,
lambda: scrypt.hash(self.input, self.salt, N=1))
self.assertRaises(scrypt.error,
lambda: scrypt.hash(self.input, self.salt, N=-1)) | [
"def test_hash_raises_error_n_not_power_of_two(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, N=3))",
"def test_hash_n_positional(self):\n h = scrypt.hash(self.input, self.salt, 256)\n self.assertEqual(len(h), 64)",
"def test_hash_n_keyword(self):\n h = scrypt.hash(N=256, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_r_p_over_limit(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=2, p=2 ** 29))",
"def test_hash_p_positional(self):\n h = scrypt.hash(self.input, self.salt, 256, 8, 2)\n self.assertEqual(len(h), 64)",
"def test_hash_r_keyword(self):\n h = scrypt.hash(r=16, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_p_keyword(self):\n h = scrypt.hash(p=4, password=self.input, salt=self.salt)\n self.assertEqual(len(h), 64)",
"def test_hash_raises_error_on_negative_r(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=-1))",
"def test_hash_buflen_keyword(self):\n h64 = scrypt.hash(self.input, self.salt, buflen=64)\n h128 = scrypt.hash(self.input, self.salt, buflen=128)\n self.assertEqual(len(h64), 64)\n self.assertEqual(len(h128), 128)",
"def test_hash_raises_error_on_r_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, r=0))",
"def test_hash_raises_error_on_p_equals_zero(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=0))",
"def test_hash_raises_error_on_negative_p(self):\n self.assertRaises(scrypt.error,\n lambda: scrypt.hash(self.input, self.salt, p=-1))",
"def secure_hash(s, iterations=10000):\n if s is None:\n return None\n hash_result = SHA256.new(data=str(s).encode()).hexdigest()\n for i in range(iterations):\n hash_result = SHA256.new(data=hash_result.encode()).hexdigest()\n return hash_result",
"def secure_hash(s, iterations=10000):\n hash_result = SHA256.new(data=str(s)).hexdigest()\n for _ in xrange(iterations):\n hash_result = SHA256.new(data=hash_result).hexdigest()\n return hash_result",
"def test_hash_function(self):\n\n # Both values must be the same, if not the hash does\n # not work.\n hashed_value1 = WordFilter.hash(\"256\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"256\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with 384 hash.\n hashed_value1 = WordFilter.hash(\"384\", \"Hello World!\")\n hashed_value2 = WordFilter.hash(\"384\", \"Hello World!\")\n\n self.assertNotEqual(hashed_value1, \"Hello World!\")\n self.assertEqual(hashed_value1, hashed_value2)\n\n # Test values with invalid inputs\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", None)\n with self.assertRaises(ValueError):\n WordFilter.hash(\"256\", 12)\n with self.assertRaises(ValueError):\n WordFilter.hash(None, \"hello\")",
"def check_hash_len(hash):\n if len(hash) != 40 + 1:\n raise ProtocolError(\"Invalid hash len!\")",
"def test_init_error_too_long(self):\n with self.assertRaises(ValueError):\n TransactionHash(\n b'JVMTDGDPDFYHMZPMWEKKANBQSLSDTIIHAYQUMZOK'\n b'HXXXGJHJDQPOMDOMNRDKYCZRUFZROZDADTHZC99999'\n )",
"def test_hash():\n \n # Create a Dealer\n dealer = Dealer(p256, n_participants, s_secrets, access_structures)\n\n # test hash function - it should be repeatable for the same Dealer object\n hash1 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n hash2 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n assert_equal(hash1, hash2)",
"def test_00():\n hs1 = hashlib.sha256()\n hs2 = hashlib.sha256()\n\n # 해쉬는 바이너리로 진행해야 한다\n hs1.update(b\"Nobody inspects\")\n hs2.update(b\"the spammish repetition\")\n\n # 결과는 바이너리로 출력된다\n print(hs1.digest())\n print(hs2.digest(), \"\\n\\n\")\n\n \"\"\"바이너리 스트링 길이 체크 (테스트)\"\"\"\n ss1 = str(hs1.digest()).split(\"\\\\\")\n ss2 = str(hs2.digest()).split(\"\\\\\")\n\n # 리스트 스트링의 갯수 체크\n print(ss1)\n print(ss2)\n\n print(len(ss1))\n print(len(ss2), \"\\n\\n\")\n\n # 바이너리를 핵사로 변경하여 출력 ... 당연히 길이는 동일함!\n print(\"hs1=\", hs1.hexdigest())\n print(\"hs1.digest_siz=\", hs1.digest_size)\n print(\"hs2.digest_siz=\", hs2.digest_size, \"\\n\\n\")\n\n print(\"hs2=\", hs2.hexdigest())\n print(\"hs1.block_size=\", hs1.block_size)\n # hash comparison\n print(\"hs2.block_size=\", hs2.block_size)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the list of nodes (dict of attributes) for the given object attributes from the csv file, and the parent node's attributes | def getNodes(object_attributes, parent_node_attributes):
query = object_attributes[objects_config['query_field']]
parameter_info = object_attributes[objects_config['parameter_1']]
# TODO: need a better way to encode parameter in the csv
parameter_name = unicode(string.split(parameter_info, ':')[0])
if not parameter_name in parent_node_attributes:
print 'Error: parent_node_attributes doesn\'t have key ' + parameter_name
sys.exit(0)
query = replaceStr(query, '?', str(parent_node_attributes[parameter_name]))
print query
header, records = getQueryResult(query)
nodes = []
for record in records:
nodes.append(dict(zip(header, record)))
return nodes | [
"def _process_csv(filename):\n import csv\n\n node_dict, neighbor_dict = {}, {}\n\n with open(filename, \"r\") as csv_file:\n for row in csv.DictReader(csv_file):\n node = EuclideanNode(\n node_type=row['NodeType'],\n name=row['Name'],\n floor=row['Floor'],\n coord=eval(row['Coordinates'])\n )\n node_dict[row['Name']] = node\n neighbor_dict[row['Name']] = eval(row['Neighbors'])\n return node_dict, neighbor_dict",
"def from_csv(self, csv_file):\n self.node_dict, neighbor_dict = self._process_csv(csv_file)\n\n for name, node in self.node_dict.items():\n for neighbor_name in neighbor_dict[name]:\n neighbor = self.node_dict[neighbor_name]\n distance = funcs.euclidean_dist_nodes(node, neighbor)\n self.add_edge(node, neighbor, weight=distance)\n self.add_node(node)\n return self",
"def load_neos(neo_csv_path):\n\n \"\"\" A list for keeping all the `NearEarthObject`s created from each CSV row \"\"\"\n neo_list = []\n\n with open(neo_csv_path, 'r') as neo_file_obj:\n reader = csv.DictReader(neo_file_obj)\n\n \"\"\" Reading each row in the CSV file, creating `NearEarthObject` and adding to the list \"\"\"\n for entry in reader:\n neo_list.append(NearEarthObject(**entry))\n\n return neo_list",
"def load_neos(neo_csv_path) -> list:\n with open(neo_csv_path, 'r') as neo_file:\n neos_info = csv.DictReader(neo_file)\n neos_objects = [NearEarthObject(**neo) for neo in neos_info]\n\n return neos_objects",
"def read_kg_data(csv_file):\n print(f\"Started a model builder for data from: {csv_file}\")\n df = pd.read_csv(csv_file)\n df.columns = [\"h\", \"r\", \"t\"]\n entities = list(set(df[\"h\"].tolist() + df[\"t\"].tolist()))\n relations = list(set(df[\"r\"].tolist()))\n return entities, relations",
"def __init__(self, path_to_csv):\n with open(path_to_csv) as csvfile:\n reader = csv.reader(csvfile)\n t = BSTree()\n for row in reader:\n traffic = row[0]\n protocol = row[1]\n ip = row[3]\n ports = row[2]\n if '-' in ports:\n nports = ports.split('-')\n port1 = int(nports[0])\n portn = int(nports[1])\n for p in range(port1, portn):\n n = Node(p, traffic, protocol, ip)\n t.insert(n)\n else:\n n = Node(int(ports), traffic, protocol, ip)\n t.insert(n)\n self.tree = t",
"def load_taxonomy(path_out: str) -> Dict[int, List[int]]:\n taxonomy = {}\n path_tax = os.path.join(path_out, 'hierarchy/taxonomy.csv')\n with open(path_tax, 'r', encoding='utf8') as f:\n csv_reader = csv.reader(f, delimiter=',')\n for row in csv_reader:\n node_id = int(row[0])\n child_ids = [int(nid) for nid in row[1:6]]\n taxonomy[node_id] = child_ids\n return taxonomy",
"def load_neos(neo_csv_path=\"data/neos.csv\"):\n neos = []\n with open(neo_csv_path, 'r') as infile:\n reader = csv.DictReader(infile)\n for line in reader:\n neos.append(line)\n print('loaded NEO data')\n neo_collection = []\n for neo in neos:\n neo_collection.append(NearEarthObject(\n designation=neo['pdes'],\n name=neo['name'],\n diameter=neo['diameter'],\n hazardous=neo['pha']))\n print('created NearEarthObject collection')\n return neo_collection",
"def __init__(self, path_nodes, path_names, focal_id):\n\n print(\"Generating tree.\")\n self.phyl_nodes = []\n self.focal_id = focal_id\n self.gene_2_hits = {}\n self.ph_node_id_2_PS = {}\n self.ph_node_id_2_species = {}\n self.gene_2_min_PS = {}\n self.node_id_2_PS = {}\n self.rep_2_members = {}\n\n node_2_name = {}\n with open(path_names) as in_file:\n for line in in_file:\n lines_splitted = line.split()\n if line.strip() == \"\":\n continue\n if len(lines_splitted) < 2:\n raise Exception(\"Invalid names file format. Line: \" + line)\n try:\n node_2_name[int(lines_splitted[0])] = lines_splitted[1].strip()\n except ValueError:\n print(\"Node id-s must be parsable to integer. Line: \" + line)\n\n parent_2_children = {}\n root = True\n root_id = \"\"\n\n with open(path_nodes) as input:\n for line in input:\n if line.strip() == \"\":\n continue\n line_splitted = line.split()\n if len(lines_splitted) < 2:\n raise Exception(\"Invalid nodes file format. Line: \" + line)\n try:\n child_id = int(line_splitted[0])\n parent_id = int(line_splitted[1])\n except ValueError:\n print(\"Node id-s must be parsable to integer. Line: \" + line)\n if root:\n root_id = parent_id\n root = False\n\n if parent_id not in parent_2_children:\n parent_2_children[parent_id] = []\n\n parent_2_children[parent_id].append(child_id)\n\n name = root_id\n if root_id in node_2_name:\n name = node_2_name[root_id]\n else:\n print(\"ID: \" + int(root_id) + \" is not in names file. Names set to id value.\")\n\n node = Node(root_id, is_root=True, name=name)\n self.root = node\n self.__add_nodes_from_child_2_parent_dict(node, parent_2_children, node_2_name)\n\n # set phylostratum nodes to all nodes on the path from focal species to root\n current_node = self.get_node(focal_id)\n while not current_node.is_root:\n self.phyl_nodes.append(current_node)\n current_node.is_phyl_node = True\n current_node = current_node.parent\n\n current_node.is_phyl_node = True\n self.phyl_nodes.append(current_node)\n # set phylostratum numbers\n PS = 1\n current_node = self.root\n while True:\n if current_node.is_phyl_node:\n self.ph_node_id_2_PS[current_node.id] = PS\n current_node.phylostratum = PS\n PS += 1\n phyl_node = None\n for c in current_node.children:\n if c.is_phyl_node:\n if phyl_node:\n raise ValueError(\"Invalid format of phylogeny. Each phyl node must contain one and only one\"\n \"phyl child.\")\n phyl_node = c\n # breaks loop when last phyl node is reached\n if not phyl_node:\n break\n current_node = phyl_node\n\n # set species from side branch to each phyl node\n self.ph_node_id_2_species = self.get_side_phyl_branch_leafs()\n\n # set species 2 PS\n for ph_node in self.ph_node_id_2_species:\n for spec in self.ph_node_id_2_species[ph_node]:\n PS = self.ph_node_id_2_PS[ph_node]\n spec.phylostratum = PS\n self.node_id_2_PS[spec.id] = PS",
"def get_nodes(dataset):\n file_path = f'{dataset}/rating.csv'\n df = pd.read_csv(file_path)\n users = df['user_id'].unique()\n # items = df[['item_id', 'category_id']].drop_duplicates()\n # items = items.values\n items = df['item_id'].unique()\n return users, items",
"def parser_file(file_in, header=False):\n df = pd.read_csv(file_in, sep=SEPARATOR)\n try:\n df = df.sort_values(by=['score'], ascending=False)\n\n except Exception as e:\n\n print('cannot sort ', file_in)\n\n\n\n try:\n ids = df['node,layer'].values\n except:\n #print('WARNING: cannot select \\\"node,layer\\\" perform a replace operation if needed')\n ids = df['node'].values\n\n return ids",
"def load_node_meta(file_path):\n nmeta = pd.read_csv(file_path, sep=\"\\t\")\n nmeta.columns = ['Node', 'Term', 'Definition', 'Vocabulary']\n nmeta.index = nmeta['Node']\n return nmeta",
"def load_csv_nx(file_path = 'data/graph.csv', start_position = 0, end_position = 1, weight_positon = 2):\r\n G = nx.Graph()\r\n # with open(file_path) as f:\r\n f = open(file_path) \r\n \r\n # f = codecs.open(file_path,'r','utf-8')\r\n \r\n for line in f:\r\n if \"start\" in line:\r\n continue\r\n start = line.strip().split(\",\")[start_position].decode('utf-8')\r\n end = line.strip().split(\",\")[end_position].decode('utf-8')\r\n weight = line.strip().split(\",\")[weight_positon].decode('utf-8')\r\n \r\n \r\n\r\n G.add_edge(start, end, weight = int(weight))\r\n\r\n return G",
"def build_nodes(graph, record):\n\n if record.name == 'node':\n nid = None\n for attr in record.attr:\n if attr[0] == 'id':\n nid = attr[1]\n break\n\n if nid is not None:\n graph.add_node(nid, **record.to_dict({}))\n else:\n logging.error(\"GML import, skipping node without 'id'\")\n\n for child_record in record:\n build_nodes(graph, child_record)",
"def import_from_csv(self, csv_file):\n reader = csv.reader(csv_file)\n\n self.variable_labels = next(reader, None)[1:]\n self.element_labels = []\n self.data = []\n\n data_mode = True\n for row in reader:\n if not any(row):\n if data_mode:\n data_mode = False\n continue\n else:\n if data_mode:\n self.element_labels.append(row[0])\n self.data.append([int(i) for i in row[1:]])\n else:\n self.weights = [int(i) for i in row[1:]]\n self.neg_min = [int(i) for i in next(reader, None)[1:]]\n self.pos_max = [int(i) for i in next(reader, None)[1:]]\n break",
"def extract_as_nodes(as_number, input_dir, output_dir, as_nodes_file=AS_NODES_FILE):\n as_nodes = set(())\n file_path = os.path.join(input_dir, as_nodes_file)\n with open(file_path, 'r', encoding=\"utf8\") as myfile:\n file_path_out = os.path.join(output_dir, as_number+NODES_FILE_OUT)\n with open(file_path_out, 'w', encoding=\"utf8\") as out_file:\n reader = csv.reader(myfile)\n next(reader, None) # skip the headers\n for line in reader:\n if line[1] == as_number:\n node = line[0]\n out_file.write(node+'\\n')\n as_nodes.add(node)\n return as_nodes",
"def load_from_file_csv(cls):\n\n file = \"{}.csv\".format(cls.__name__)\n\n list_objs = []\n Headers = []\n with open(file, 'r', newline='') as file:\n from_csv = csv.reader(file)\n count = 0\n for row in from_csv:\n if count == 0:\n Headers = row\n count += 1\n else:\n dict = {}\n for i in range(len(row)):\n dict[Headers[i]] = int(row[i])\n list_objs.append(cls.create(**dict))\n return list_objs",
"def run(cls, row, reader):\n\n cls._parse_keys(row, reader.line_num)\n cls._parse_relationships(row, reader.line_num)",
"def generate_csv_files(file_path):\n\n file_name = file_path.split(\"/\")[-1]\n # Read json file created by LND describegraph command on the mainnet.\n json_data = load_json(file_path)\n # Parse data into a networkx MultiGraph obj.\n G = load_graph(json_data)\n\n with open('LN_nodes_'+file_name[3:13]+'_.csv', 'a+', newline='', encoding='utf8', errors='ignore') as file_object:\n csv_file = csv.writer(file_object)\n nodes = list(G.nodes(data=True))\n csv_file.writerow(['id', 'label', 'weight'] + list(reversed(list(nodes[0][1].keys()))))\n for node in nodes:\n csv_file.writerow([node[0], node[1]['alias'], node[1]['capacity']/1e8] + list(reversed(list(node[1].values()))))\n\n file_object.close()\n\n with open('LN_edges_'+file_name[3:13]+'_.csv', 'a+', newline='', encoding='utf8', errors='ignore') as file_object:\n csv_file = csv.writer(file_object)\n edges = list(G.edges(data=True))\n keys = ['id', 'label', 'source', 'target', 'weight', 'capacity', 'max_htlc'] + \\\n list(edges[0][2]['node1_policy'].keys())\n csv_file.writerow(keys)\n for edge in edges:\n edge_data = list(edge[2].values())\n direction1_values = [edge[2]['channel_id']+\"_1\", edge[2]['channel_id']+\"_1\"] +\\\n edge_data[3:5] + [edge_data[5]/1e8 , edge_data[5], edge_data[10]] + \\\n list(edge[2]['node1_policy'].values())\n direction2_values = [edge[2]['channel_id']+\"_2\", edge[2]['channel_id']+\"_2\"] + [edge_data[4], \\\n edge_data[3], edge_data[5]/1e8, edge_data[5], edge_data[10]] + list(edge[2]['node2_policy'].values())\n csv_file.writerow(direction1_values)\n csv_file.writerow(direction2_values)\n\n file_object.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the ses class for the given element node. Create a new ses node if not exist, link the element node to it. | def linkToSES(node, attributes, graph_db):
ses_class = attributes[elements_config['ses_class']]
if ses_class is None or ses_class == '':
print 'Error: node ' + attributes + ' doesn\'t have ses class'
sys.exit(0)
ses_node = graph_db.get_or_create_indexed_node(
'SES', # index name
'ses_class', # index key
ses_class, # index value
{elements_config['ses_class']: ses_class}
)
ses_node.set_labels('ses_class')
graph_db.create(rel(node, "belongs to", ses_node)) | [
"def add_class_to_node(node, classname):\n\n if 'class' in node.attrib:\n node.attrib['class'] += ' ' + classname\n else:\n node.attrib['class'] = classname",
"def assign_class(otsession_id):\r\n return 200",
"def EnterClassType(self, node):\n nodes = [node]\n seen = set()\n while nodes:\n cur_node = nodes.pop(0)\n if cur_node in seen:\n continue\n seen.add(cur_node)\n for prefix, cls in self._Lookup(cur_node):\n if isinstance(cls, pytd.Alias) and isinstance(cls.type, pytd.ClassType):\n if cls.type.cls:\n cls = cls.type.cls\n else:\n nodes.append(cls.type)\n if isinstance(cls, pytd.Class):\n node.cls = cls\n return\n else:\n logging.warning(\"Couldn't resolve %s: Not a class: %s\",\n prefix + node.name, type(cls))",
"def get_or_create_sop_class(self, sop_class_identifier):\n return self.get_sop_class('sop_class_identifier',\n sop_class_identifier) or \\\n self.add_sop_class({'sop_class_identifier': sop_class_identifier})",
"def get_class_by_name(classname):\n\n # The empty string is accepted for compatibility\n # with old default arguments.\n if classname is None or classname == '':\n classname = 'Node'\n\n # Get the class object corresponding to `classname`.\n if classname not in class_name_dict:\n raise TypeError(\"there is no registered node class named ``%s``\"\n % (classname,))\n\n return class_name_dict[classname]",
"def _cls(self, tag_name, class_name):\n return 'descendant-or-self::node()/%s[contains(concat(\" \", normalize-space(@class), \" \"), \" %s \")]' % (tag_name, class_name)",
"def find(element, node_name):\n if \"/\" in node_name:\n node_name = \"//ns:\".join(node_name.split(\"/\"))\n return element.find(\n \".//ns:{0}\".format(node_name), namespaces={\"ns\": XHTML_NAMESPACE}\n )",
"def _find(cls, node: etree._Entity, expr: str) -> etree._Entity:\n return node.find(expr, namespaces=cls._NS)",
"def find_by_class(tag_name, classes, ctx):\n return find_by(tag_name, {\"class\": classes}, ctx)",
"def Visit(self, node):\n mapping = self._mapping\n\n # Build a visitor that performs the old_class -> new_class mapping:\n class Visitor(visitors.Visitor):\n visits_all_node_types = True\n name_to_class = mapping\n for name, new_cls in mapping.iteritems():\n\n def Visit(self, node):\n # Python doesn't allow us to build this as a closure, so we have to\n # use the clunky way of retrieving the replacement class.\n cls = self.name_to_class.get(node.__class__.__name__)\n if cls is not None:\n return cls(*node)\n else:\n return node\n locals()[\"Visit\" + name] = Visit\n return node.Visit(Visitor())",
"def import_node(cls, type_id, toolkit_id, class_name):\n if type_id not in cls.registered_nodes:\n module_name = \"toolkits.\" + toolkit_id + \".\" + class_name\n module = importlib.import_module(module_name)\n cls.register_node(type_id, getattr(module, class_name))\n\n return cls.registered_nodes[type_id]",
"def parse_class(element):\n assert element.tag == 'class'\n style_class = {\n 'name': element.get('type'),\n 'entries': [],\n }\n\n for child in element:\n if child.tag != 'category':\n continue\n style_class['entries'].append(parse_category(child))\n return style_class",
"def find_by_class(self, tag, class_, soup=None):\n soup = soup or self.soup\n return soup.find(tag, attrs={'class': class_})",
"def fromElement(Class, element):\n stanza = Class()\n stanza.parseElement(element)\n return stanza",
"def assign_class_session(self, class_session, subject, \n start_date = None):\n query = StudentsClass.all(keys_only=True)\n query.filter(\"class_session =\", class_session)\n query.ancestor(self)\n key = query.get()\n if (not key):\n students_class = StudentsClass.create(self, class_session,\n subject, start_date)\n if students_class:\n self.update_active_classes_cache()\n logging.info(\n \"**Assigned class session. Student: %s Class: %s\"\\\n %(self.short_name(), unicode(class_session)))\n else:\n students_class = db.get(key) \n logging.info(\n \"--Class session already assigned. Student: %s Class: %s\"\\\n %(self.short_name(), class_session.name))\n return students_class",
"def convert_element(self, el, pse=None):\n if el in list(self._store_elements.keys()):\n return self._store_elements[el]\n\n if isinstance(el, string_types): # as symbol\n element = Atom(el, pse=pse).element\n elif isinstance(el, Atom):\n element = el.element\n el = el.element.Abbreviation\n elif isinstance(el, ChemicalElement):\n element = el\n el = el.Abbreviation\n else:\n raise ValueError(\"Unknown static type to specify a element\")\n\n self._store_elements[el] = element\n if hasattr(self, \"species\"):\n if element not in self.species:\n self._species.append(element)\n self.set_species(self._species)\n return element",
"def find(self, tag: str) -> \"Element\":",
"def get_element_class(cls, name):\n return Element",
"def get_enclosing_namespace(\n node: ASTNode,\n class_node_callback: Optional[Callable[[ClassNode], None]] = None\n) -> NamespaceNode:\n parent_node = node.parent\n while not isinstance(parent_node, NamespaceNode):\n assert parent_node is not None, \\\n \"Can't find enclosing namespace for '{}' known as: '{}'\".format(\n node.full_export_name, node.native_name\n )\n if class_node_callback:\n class_node_callback(cast(ClassNode, parent_node))\n parent_node = parent_node.parent\n return parent_node"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create in Neo4j the object node and its standalone element nodes Returns the object node reference in Neo4j | def createNode(node_attributes, object_id, objects, elements, graph_db):
index_field = objects[object_id][objects_config['index_field']]
if index_field in node_attributes:
object_node = graph_db.get_or_create_indexed_node(
'ID',
'index_field',
node_attributes[index_field],
node_attributes
#{index_field: node_attributes[index_field]}
)
#object_node.set_properties(node_attributes)
else:
object_node, = graph_db.create(node(node_attributes))
object_node.add_labels(objects[object_id][objects_config['label_field']])
# if this object has standalone elements
for field_name, value in node_attributes.items():
if (object_id, field_name) in elements:
element_attributes = elements[(object_id, field_name)]
element_attributes[field_name] = value
element_node, = graph_db.create(node(element_attributes))
# label the nodes as elements
element_node.add_labels("Element")
graph_db.create(rel(object_node, "has element", element_node))
# link the element node to a ses concept
linkToSES(element_node, element_attributes, graph_db)
return object_node | [
"def get_node_by_object(self, object: object):\n data = self.database.select(self.TABLE_NAME,\n {'target_id' : object.id,\n 'parent_type': object.object_type.value})\n\n return self.get_node(data[0]['id'])",
"def generateSiblingFlatStructureforObject(self,node,objectName):\n if node.name == objectName:\n return [node]\n lNodes = [] \n lsubNodes = []\n for child in node.getObjects():\n lsubNodes = self.generateFlatStructureforObject(child,objectName)\n if lsubNodes:\n # append?\n lNodes.extend(lsubNodes)\n return lNodes",
"def replace_node_names_with_node_objects(self):\n for node in self.nodes:\n for key, node_to in node.edges_to.items():\n node.edges_to[key] = self.get_node_by_name(node_to)\n\n node.parent = self.get_node_by_name(node.parent)",
"def createTree(root_node_attributes, root_object_id, objects, elements, graph_db):\n root_node = createNode(\n root_node_attributes, \n root_object_id, \n objects, \n elements, \n graph_db\n ) \n \n if 'child_id_field' in objects[root_object_id]:\n child_ids = objects[root_object_id]['child_id_field'] \n print root_object_id, child_ids\n\n for child_id in child_ids:\n child_nodes_attributes = getNodes(objects[child_id], root_node_attributes)\n for child_node_attributes in child_nodes_attributes:\n child_node = createTree(\n child_node_attributes, \n child_id, \n objects,\n elements, \n graph_db\n )\n # child_node.add_labels(objects[child_id][objects_config['label_field']])\n graph_db.create(rel(root_node, \"has child\", child_node)) \n \n return root_node",
"def CreateNode(position, size, objects):\r\n return OctNode(position, size, objects)",
"def createNodeInContainer(container, nodetypecategory, nodetypename, nodename,\n exact_node_type):\n return (hou.Node(),hou.Node())",
"def make_node(self):\n self.parent().make_node(self.node, custom=self.custom)",
"def create_node(**kwargs):",
"def _exportNode(self):\n output = self._doc.createElement(\"object\")\n for nodename in (\"order\", \"hidden\"):\n skins = getattr(self.context, \"_\" + nodename)\n for skin in sorted(skins):\n for name in sorted(skins[skin]):\n node = self._doc.createElement(nodename)\n node.setAttribute(\"skinname\", skin)\n node.setAttribute(\"manager\", name)\n for viewlet in skins[skin][name]:\n child = self._doc.createElement(\"viewlet\")\n child.setAttribute(\"name\", viewlet)\n node.appendChild(child)\n output.appendChild(node)\n return output",
"def createnodes(self):\n i = 0\n for j in range(0, self.width):\n for k in range(0, self.height):\n n = node.Node(j, k, i)\n self.nodes[int(n.guid)] = n\n i += 1\n for nds in self.nodes:\n self.nodes[nds].walkable = True\n self.nodes[nds].neighbors = helpers.get_neighbors(self.nodes[nds], self)",
"def _gen_node(cls, object, index, point, nodeType = 0, link = 0):\r\n\r\n #execute ecotect instruction\r\n arg_str = p2e._base._util._convert_args_to_string(\"add.node\", object._eco_id, index, \r\n point[0], \r\n point[1], \r\n point[2], \r\n nodeType, link)\r\n val = p2e._app.Request(arg_str)\r\n eco_id = p2e._base._util._convert_str_to_type(val, int)\r\n \r\n return eco_id",
"def createNode(nodeIdentifier, owner, config):",
"def _add_node_to_graph(obj: ObjectWithContext, graph: Graph) -> Node:\n thing_node = Node(\n label=obj.label(),\n properties=obj.node_properties(),\n )\n graph.add_node(thing_node)\n return thing_node",
"def populate_obj(self, obj):\n for node in self.schema_instance.children:\n setattr(obj, node.name, self.data[node.name])",
"def __createNewNode(self, nodeName, attributes={}):\n node = ETS.HierarchicalNode(nodeName)\n node.add(\"grid\", factory.returnInstance(\"GridEntity\"))\n for key, attribute in attributes.items():\n node.add(key,attribute)\n\n return node",
"def addNodeSet(self, name, object=None, parent=None, mouseBinding={},\\\n hasChildren=False, firstExpand_cb=None, nodeClass=Node):\n \n if (type(object) is not list) or \\\n (type(name) is not list) or \\\n (type(hasChildren) is not list):\n warn(\"List of children needed, non-list type found\")\n return None\n \n if self.mouseBinding is not None:\n mouseBinding.update(self.mouseBinding)\n\n num = len(name)\n nodeList=[]\n for i in range(num):\n if self.mouseBinding is not None:\n mouseBinding.update(self.mouseBinding[i])\n node = nodeClass(name[i], object[i], \\\n hasChildren=hasChildren[i], firstExpand_cb=firstExpand_cb)\n nodeList.append(node)\n node.tree = self\n try:\n hash(object[i])\n node.objectKey = object[i]\n except TypeError:\n node.objectKey = self.objIndex\n self.objIndex +=1\n\n ## if type(object) is not types.InstanceType:\n ## node.objectKey = self.objIndex\n ## self.objIndex +=1\n ## else:\n ## node.objectKey = object\n\n if self.obj2Node:\n self.objToNode[node.objectKey] = node\n\n self.numberOfNodes += 1\n node.uniqueID = self.numberOfNodes\n node.tag = [str(node.uniqueID)]\n \n # if parent given as a string, find the Node obj of the parent\n if type(parent) is bytes:\n input=parent\n parent = self.findNodeFromName(parent)\n if parent is None:\n node.parentFullname = None\n warn( \"error in addNode, check name of parent: \"+ input) \n return\n else:\n node.parentFullname = input\n elif parent in self.objToNode:\n parent = self.objToNode[parent]\n elif not isinstance(parent, Node) and parent is not None:\n raise RuntimeError('bad parent')\n\n # if parent is given as None,we have a new root node\n # The new root is added to the end(bottom) of the tree\n if parent is None:\n node.parentFullname = None\n h = 0\n for r in self.roots:\n if r.name == name :\n warn(\"The node with name\"+name + \"already exists\")\n return\n h += r.height\n # calc the Y offset of current node\n node.y += h * OFFSET + self.offy \n node.x += self.offx\n self.roots.append(node)\n else:\n assert isinstance(parent, Node)\n if parent.parentFullname != None:\n node.parentFullname = parent.parentFullname + '|' + \\\n parent.name\n else:\n node.parentFullname = parent.name\n\n node.parent = parent\n \n if parent is not None:\n # check duplicated node\n # FIXME ... this is expensive\n## for c in parent.children:\n## if c.name == node.name:\n## print \"The node with name\", name, \"already exists\"\n## return \n\n for node in nodeList:\n node.x = parent.x + OFFSET\n parent.children.append(node)\n if parent.expanded:\n parent.increaseParentHeight(offset=num)\n parent.inserted = True\n self.updateY()\n if parent.inserted:\n parent.draw_new_insert(num=num, mode = 'batch')\n parent.inserted = False\n parent.draw()\n else:\n for i in range(num):\n self.draw_new_root(nodeList[i])\n \n bb = self.canvas.bbox(tkinter.ALL)\n self.canvas.configure(scrollregion=(0, 0,bb[2]+OFFSET, bb[3]+OFFSET))\n \n return nodeList",
"def __createOVALUnameObject ():\n name = \"uname_object\"\n \n if name not in testsHash[\"obj\"]:\n objectId = __getNewId (\"object\");\n object = __createXMLElement(\"uname_object\",\n attrs={\"id\":objectId, \n \"version\":\"1\",\n \"xmlns\":\"http://oval.mitre.org/XMLSchema/oval-definitions-5#unix\"})\n objects.append (object)\n\n testsHash[\"obj\"][name] = objectId\n \n return (testsHash[\"obj\"][name])",
"def create_node(self, label_name: str, properties: List[Property] = None) -> Node:",
"def addNode(self, name, object=None, parent=None, mouseBinding={},\\\n hasChildren=False, firstExpand_cb=None, nodeClass=Node):\n # the '|' is not allowed as name of the node\n if name.find('|')!=-1:\n warn( \"No '|' is allowed in node name \")\n return\n\n if self.mouseBinding is not None:\n mouseBinding.update(self.mouseBinding)\n\n node = nodeClass(name, object, mouseBinding=mouseBinding, \\\n hasChildren=hasChildren, firstExpand_cb=firstExpand_cb)\n\n node.tree = self\n try:\n hash(object)\n node.objectKey = object\n except TypeError:\n node.objectKey = self.objIndex\n self.objIndex +=1\n \n## if type(object) is not types.InstanceType:\n## node.objectKey = self.objIndex\n## self.objIndex +=1\n## else:\n## node.objectKey = object\n \n if self.obj2Node:\n self.objToNode[node.objectKey] = node\n \n self.numberOfNodes += 1\n node.uniqueID = self.numberOfNodes\n node.tag = [str(node.uniqueID)]\n \n # if parent is given as None,we have a new root node\n # The new root is added to the end(bottom) of the tree\n if parent is None:\n node.parentFullname = None\n h = 0\n for r in self.roots:\n if r.name == name :\n warn( \"The node with name\"+ name + \"already exists\")\n return\n h += r.height\n # calc the Y offset of current node\n node.y += h * OFFSET + self.offy \n node.x += self.offx\n self.roots.append(node)\n self.draw_new_root(node)\n \n else:\n # if parent given as a string, find the Node obj of the parent\n if type(parent) is bytes:\n input=parent\n parent = self.findNodeFromName(parent)\n if parent is None:\n node.parentFullname = None\n warn( \"error in addNode, check name of parent:\"+ input)\n return \n elif parent in self.objToNode:\n parent = self.objToNode[parent]\n elif not isinstance(parent, Node):\n raise RuntimeError('bad parent')\n #else:\n # # only Node type is accepted.\n # assert isinstance(parent, Node)\n\n if parent.parentFullname != None:\n node.parentFullname = parent.parentFullname + '|' + parent.name\n else:\n node.parentFullname = parent.name\n \n node.parent = parent \n # check duplicated node\n # FIXME ... this is expensive\n## for c in parent.children:\n## if c.name == node.name:\n## print \"The node with name\", name, \"already exists\"\n## return \n node.x = parent.x + OFFSET\n parent.children.append(node)\n if parent.expanded:\n parent.increaseParentHeight()\n parent.inserted = True\n self.updateY()\n if parent.inserted:\n parent.draw_new_insert()\n parent.inserted = False\n # FIXME erasing the parent is very expensif, we only need to\n # draw from node to end of children and move everything below\n # parent down\n parent.draw() \n \n bb = self.canvas.bbox(tkinter.ALL)\n self.canvas.configure(\n scrollregion=(0, 0,bb[2]+OFFSET, bb[3]+OFFSET))\n \n return node"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create in Neo4j the root node and its all descendants (recursively) Returns the root node reference in Neo4j | def createTree(root_node_attributes, root_object_id, objects, elements, graph_db):
root_node = createNode(
root_node_attributes,
root_object_id,
objects,
elements,
graph_db
)
if 'child_id_field' in objects[root_object_id]:
child_ids = objects[root_object_id]['child_id_field']
print root_object_id, child_ids
for child_id in child_ids:
child_nodes_attributes = getNodes(objects[child_id], root_node_attributes)
for child_node_attributes in child_nodes_attributes:
child_node = createTree(
child_node_attributes,
child_id,
objects,
elements,
graph_db
)
# child_node.add_labels(objects[child_id][objects_config['label_field']])
graph_db.create(rel(root_node, "has child", child_node))
return root_node | [
"def insert_root(cls):\n if cls.objects.exists():\n return False, None\n\n root = cls(root=None, parent=None, height=0,)\n try:\n with transaction.atomic():\n root.save()\n root.root = root\n root.save()\n return True, root\n except IntegrityError:\n return False, None",
"def root(self,):\n return self.parent.root() if self.parent else self",
"def _create_tree(self, nodes):\n\n if len(nodes) == 1:\n self.root = nodes[0].data\n return self.root\n\n next_level = len(nodes)\n tree_level = []\n for i in range(0, next_level, 2):\n combined = sha3(nodes[i].data + nodes[i + 1].data)\n next_node = Node(combined, nodes[i], nodes[i + 1])\n tree_level.append(next_node)\n\n self.tree.append(tree_level)\n self._create_tree(tree_level)",
"def encode(self, root):\n if root is None:\n return None\n\n def build(n):\n res = TreeNode(n.val)\n r = None\n for c in (n.children or []):\n n2 = build(c)\n if r is None:\n res.right, r = n2, n2\n else:\n r.left = n2\n r = n2\n return res\n return build(root)",
"def make_node(self):\n self.parent().make_node(self.node, custom=self.custom)",
"def clone_decorated(self, root):\n if root is None:\n return None\n\n newnode = TreeNode(root.val)\n newnode.left = self.clone_decorated(root.left)\n newnode.right = self.clone_decorated(root.right)\n\n return newnode",
"def add_root(self, e):\n if self.root() is not None:\n raise ValueError('Root exists')\n self._root = self._Node(e)\n self._size = 1\n return self._root",
"def set_root(self,node) :\n if not node is None:\n node.parent = None\n self.__root = node",
"def getRoot(self) -> HuffNode:\n return self._root",
"def getRoot(self):\n\n return self.root_node",
"def getRootNode(self) -> \"SoNode *\":\n return _coin.SoProtoInstance_getRootNode(self)",
"def assemble_tree(node_list: list) -> object:\n tree = BinarySearchTree()\n tree.root = BinarySearchTreeNode(node_list[0])\n index = 1\n while index <= len(node_list)-1:\n tree.insert_node(BinarySearchTreeNode(node_list[index]), tree.root)\n index += 1\n return tree",
"def get_root(entity):\n parent = get_parent(entity)\n if parent:\n return get_root(parent)\n return entity",
"def root_ref(self):\n self.__reference = self.__client\n self.set_ref(self.root)",
"def build(root, get_children, get_edge_weight=default_edge_weight):\n graph = networkx.DiGraph()\n stack = [root]\n while len(stack) > 0:\n node = stack.pop()\n for child in get_children(node):\n graph.add_edge(node, child, weight=get_edge_weight(node, child))\n stack.append(child)\n return graph",
"def _build_tree_dynamic(self):\n node_id = 0\n fractions = dice_fractions(self.fixed_k)\n\n c = Components(self.proc_affinity_matrix)\n #Build the bottom level\n components, comp_mat = c.get_components(fractions.next(), \n self.proc_affinity_matrix,\n strongly_connected=True)\n for component in components:\n base_mat = self.base_affinity_matrix[component,:][:,component]\n proc_mat = self.proc_affinity_matrix[component,:][:,component]\n keys = self.key_list[component]\n n = Node(component, keys, base_mat, proc_mat, node_id)\n self.nodes[node_id] = n\n node_id += 1\n\n node_offset = temp_offset = 0\n for fraction in fractions:\n temp_offset += len(components) \n c = Components(comp_mat)\n components, comp_mat = c.get_components(fraction, comp_mat, True)\n for component in components:\n instances = []\n for instance in component:\n idx = instance + node_offset\n self.nodes[idx]._parent = node_id\n instances += self.nodes[idx].list_of_instances\n base_mat = self.base_affinity_matrix[instances,:][:,instances]\n proc_mat = self.proc_affinity_matrix[instances,:][:,instances]\n \n keys = self.key_list[instances]\n n = Node(instances, keys, base_mat, proc_mat, node_id)\n n._children = list(asanyarray(component) + node_offset) \n self.nodes[node_id] = n\n node_id += 1\n \n node_offset = temp_offset\n self.root_id = node_id - 1",
"def _add_root(self,e):\n if self._root is not None:\n raise ValueError('Root is not Empty')\n self._root = self._Node(e)\n self._size = 1\n return self._make_position(self._root)",
"def __init__(self, root):\n if not isinstance(root, TreeNode):\n root = TreeNode(root)\n self.root = root",
"def resetRoot(self):\n self._rootID = self.model.changes.get(self._leafID).getRoot()\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the indexes listed in this dataset's image set file. | def _load_image_set_index(self):
image_set_file = os.path.join(self._data_path,self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index | [
"def _load_image_set_index(self):\r\n # Example path to image set file:\r\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\r\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main','train.txt')\r\n assert os.path.exists(self._data_path), \\\r\n 'Path does not exist: {}'.format(self._data_path)\r\n with open(image_set_file) as f:\r\n image_index = [x.strip() for x in f.readlines()]\r\n return image_index",
"def _read_indexes(self):\n p = os.path.join(self.path, '_indexes')\n for ind in os.listdir(p):\n if ind.endswith('.py'):\n self.add_index('path:' + ind, create=False)",
"def _readIndexFiles(self):\n if self.haveIndexFiles:\n return\n\n self.log.debug(\"read index files\")\n self.haveIndexFiles = True # just try once\n\n if self.andConfig is None:\n self.andConfig = getConfigFromEnvironment()\n\n self.multiInds = AstrometryNetCatalog(self.andConfig)",
"def load_indexers():\n global WORDS_SET, W2I, TAGS_SET, T2I, CHARS_SET, C2I, PREFIX_SIZE, SUFFIX_SIZE, P2I, S2I\n global I2W, I2T, I2C, P2I, S2I\n W2I = {word : i for i, word in enumerate(WORDS_SET)}\n I2W = {i : word for word, i in W2I.iteritems()}\n T2I = {tag : i for i, tag in enumerate(TAGS_SET)}\n I2T = {i : word for word, i in T2I.iteritems()}\n C2I = {tag : i for i, tag in enumerate(CHARS_SET)}\n I2C = {i : word for word, i in C2I.iteritems()}\n\n # initialize prefixes and suffixes\n prefixes = {word[:PREFIX_SIZE] for word in WORDS_SET}\n suffixes = {word[-SUFFIX_SIZE:] for word in WORDS_SET}\n P2I = {word[:PREFIX_SIZE]:i for i, word in enumerate(prefixes)}\n S2I = {word[-SUFFIX_SIZE:]:i for i, word in enumerate(suffixes)}",
"def loadImageList(self):\n listFiles = os.listdir(self.data_path)\n exclude_files = ['.DS_Store', 'conf.json', 'README.txt']\n listNames = [f for f in listFiles if f not in exclude_files]\n nListNames = range(1,len(listNames)+1)\n dictImgNames = dict(zip(nListNames, listNames))\n return dictImgNames",
"def load_ind(env, i):\n inds_group = env.data_file[inds_group_key]\n data = inds_group[ind_key(env, i)]\n if data is None:\n raise IndexError(f\"Individual {i} is not stored in data.\")\n return ind_from_hdf(env, data)",
"def index_subset(subset):\n images = []\n print('Indexing {}...'.format(subset))\n # Quick first pass to find total for tqdm bar\n subset_len = 0\n for root, folders, files in os.walk(\n DATA_PATH + '/miniImageNet/images_{}/'.format(subset)):\n subset_len += len([f for f in files if f.endswith('.png')])\n\n progress_bar = tqdm(total=subset_len)\n for root, folders, files in os.walk(\n DATA_PATH + '/miniImageNet/images_{}/'.format(subset)):\n if len(files) == 0:\n continue\n\n class_name = root.split('/')[-1]\n\n for f in files:\n progress_bar.update(1)\n images.append({\n 'subset': subset,\n 'class_name': class_name,\n 'filepath': os.path.join(root, f)\n })\n\n progress_bar.close()\n return images",
"def index_subset(subset):\n images = []\n print('Indexing {}...'.format(subset))\n # Quick first pass to find total for tqdm bar\n subset_len = 0\n for root, folders, files in os.walk(\n DATA_PATH + '/Omniglot/images_{}/'.format(subset)):\n subset_len += len([f for f in files if f.endswith('.png')])\n\n progress_bar = tqdm(total=subset_len)\n for root, folders, files in os.walk(\n DATA_PATH + '/Omniglot/images_{}/'.format(subset)):\n if len(files) == 0:\n continue\n\n alphabet = root.split('/')[-2]\n class_name = '{}.{}'.format(alphabet, root.split('/')[-1])\n\n for f in files:\n progress_bar.update(1)\n images.append({\n 'subset': subset,\n 'alphabet': alphabet,\n 'class_name': class_name,\n 'filepath': os.path.join(root, f)\n })\n\n progress_bar.close()\n return images",
"def _load_dataset_info_(self):\n image_index = []\n event_index = []\n\n for fn in self.files:\n datafile = self.files[fn]\n \n # All events in the same file have the same particle type\n particle_type = datafile.root._v_attrs.particle_type\n class_name = PARTICLE_ID_TO_CLASS_NAME[particle_type]\n \n # Each row of each datafile is a different event\n for row in datafile.root.Event_Info.iterrows():\n # skip the row if the event associated does not pass the filter\n if not self._filter_event(fn, row): continue\n \n event_img_idxs = []\n for tel_type in self._selected_tel_types:\n try:\n img_rows_tel = row[tel_type + '_indices']\n except KeyError:\n logging.warning(f\"No such telescope {tel_type} in file {fn}\")\n continue\n \n img_idxs = []\n for img_row in img_rows_tel: \n # If the image was not triggered or does not pass the filter\n if img_row == 0 or not self._filter_img(fn, tel_type, img_row): \n if self._keep_telescope_position:\n img_idxs.append(-1)\n continue\n \n # Compute image statistics\n record = datafile.root._f_get_child(tel_type)[img_row]\n energy_trace = record['image_charge']\n min_energy = np.min(energy_trace)\n max_energy = np.max(energy_trace)\n total_energy = np.sum(energy_trace)\n \n img_idxs.append(len(image_index))\n \n image_index.append({\n 'filename': fn,\n 'tel_type': tel_type,\n 'img_row': img_row,\n 'event_index': len(event_index),\n 'class_name': class_name,\n 'min_energy': min_energy,\n 'max_energy': max_energy,\n 'total_energy' : total_energy\n })\n \n # Add global image indices to the event indices\n event_img_idxs += img_idxs\n \n # If there is at least one non-dummy image associated to this event\n # add it to the event index\n if len([idx for idx in event_img_idxs if idx != -1]) >= self._min_triggers_per_event:\n event_index.append({\n 'filename': fn, \n 'image_indices': event_img_idxs, \n 'class_name': class_name\n })\n \n # Create pandas dataframes\n self._image_index_df = pd.DataFrame(image_index)\n self._event_index_df = pd.DataFrame(event_index)",
"def get_index_files(self):\n return",
"def open_files(self):\n with open(data_dir+index_file,'rb') as f:\n\t index_raw= pickle.load(f)\n val_list = [6,13,20,34,41]\n\t index_data = []\n\t for a in index_raw:\n\t\tif self.istrain and a[0] not in val_list:\n\t\t index_data.append(a)\n\t\telif not self.istrain and a[0] in val_list:\n\t\t index_data.append(a)\n\t index_data = index_data\n\tprint len(index_data)\t\n\tinput_list = []\n\ttarget_list = []\n\n target_data = h5py.File(data_dir+target_file,'r') \n input_data = h5py.File(data_dir+input_file,'r')\n\t#for i in range(len(input_data.keys())):\n\t# input_list.append(input_data[vid_dict[i]][:])\n # target_list.append(target_data[vid_dict[i]][:])\n\t# print i\n\t#with open(data_dir+'data','w') as f:\n\t# data={'input':input_list,'target': target_list}\n\t# pickle.dump(data,f)\n return index_data,target_data, input_data",
"def createIndex(self):\n log.info(\"-------------------------------->\")\n stats = {\n 'total_labels': 0\n ,'total_annotations': 0\n ,'total_images': 0\n ,'total_unique_images': 0\n ,\"total_label_per_img\": defaultdict(list)\n ,\"total_img_per_label\": defaultdict()\n ,\"label_per_img\": defaultdict(list)\n ,\"total_annotation_per_label\": defaultdict()\n }\n\n unique_images = set()\n anns, cats, imgs = {}, {}, {}\n imgToAnns, catToImgs, cat_lblid_to_id, catToAnns = defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list)\n\n if 'annotations' in self.dataset:\n for ann in self.dataset['annotations']:\n imgToAnns[ann['img_id']].append(ann)\n anns[ann['ant_id']] = ann\n stats['total_annotations'] += 1\n\n # catToImgs[ann['lbl_id']].append(ann['img_id'])\n if 'categories' in self.dataset:\n catToImgs[ann['lbl_id']].append(ann['img_id'])\n catToAnns[ann['lbl_id']].append(ann['ant_id'])\n\n if 'images' in self.dataset:\n for img in self.dataset['images']:\n imgs[img['img_id']] = img\n stats['total_images'] += 1\n _ann = imgToAnns[img['img_id']]\n\n\n # if 'annotations' in self.dataset and 'categories' in self.dataset:\n # for ann in self.dataset['annotations']:\n # # catid = cat_lblid_to_id[ann['lbl_id']]\n # # catToImgs[catid].append(ann['img_id'])\n # catToImgs[ann['lbl_id']].append(ann['img_id'])\n # catToAnns[ann['lbl_id']].append(ann['ant_id'])\n\n ## categories and labels are synonymous and are used to mean the same thing\n if 'categories' in self.dataset:\n for cat in self.dataset['categories']:\n cats[cat['lbl_id']] = cat\n # cats[cat['id']] = cat\n # self.cat_lblid_to_id[cat['lbl_id']] = cat['id']\n stats['total_labels'] += 1\n stats['total_annotation_per_label'][cat['lbl_id']] = len(catToAnns[cat['lbl_id']])\n stats['total_img_per_label'][cat['lbl_id']] = len(catToImgs[cat['lbl_id']])\n\n log.info('index created!')\n log.info(\"stats: {}\".format(stats))\n\n # create class members\n self.anns = anns\n self.imgToAnns = imgToAnns\n self.catToImgs = catToImgs\n self.catToAnns = catToAnns\n self.imgs = imgs\n self.cats = cats\n self.minstats = stats",
"def get_indexes(self):\n url = \"%s/index\" % self.database.URL\n response = self.connection.session.get(url, params = {\"collection\": self.name})\n data = response.json()\n for indx in data[\"indexes\"]:\n self.indexes[indx[\"type\"]][indx[\"id\"]] = Index(collection = self, infos = indx)\n\n return self.indexes",
"def load_all_images(self):\n self._load_train_images()\n self.load_test_images()",
"def merge_indexes(index_files):\n index = {}\n for f in index_files:\n print f\n part_index = pickle.load(file(f))\n index.update(part_index)\n\n return index",
"def load_multiple_images(self, filepath_list):\n self.image = Image.from_multiples(filepath_list)",
"def load_train(self):\n images, labels = self.load(os.path.join('mnist', 'train', 'images'),\n os.path.join('mnist', 'train', 'labels'))\n self.train_data = list(zip(images, labels))",
"def gather_images(datasets, batch_img_paths):\r\n n_batch = len(batch_img_paths)\r\n\r\n images = [[] for d in datasets]\r\n image_idx = [[] for d in datasets]\r\n\r\n for img_path in batch_img_paths:\r\n\r\n img_path_idx = index_by_path(datasets, img_path) \r\n\r\n for j, path_idx in enumerate(img_path_idx):\r\n\r\n images[j].extend(load_dataset_images(datasets[j][path_idx[0]], path_idx[1], 1))\r\n image_idx[j].append(path_idx[0]) # the model/dataset that the image is mapped to\r\n\r\n return images, image_idx",
"def GetOriginalImages(self, IDlist):\n\n\t\t# * * * OLD FIXED-DIM VERSION * * *\n\n\t\tif self.data_loaded:\n\n\t\t\tfor ii in IDlist:\n\t\t\t\tpass\n\t\t\t\t# i = sample; % 39 when digit =1\n\t\t\t\t#\n\t\t\t\t# %% Add original image\n\t\t\t\t#\n\t\t\t\t# imagesc(reshape(X0(i,:), self.imR,[]));\n\n\t\t\treturn\n\n\t\telse:\n\t\t\traise IOError, \"Can't get image until data is loaded successfully\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the default path where PASCAL VOC is expected to be installed. | def _get_default_path(self):
return os.path.join(datasets.ROOT_DIR, 'data', 'VOCdevkit' + self._year) | [
"def _get_default_path(self):\n return os.path.join('/mnt/saturn/datasets/MSCOCO');",
"def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'ftdata')",
"def default_catalog_path(self) -> Path:\n\n try:\n return self._default_catalog_path\n except AttributeError:\n pass\n with ir.path(\"pulsaroftheday.catalogs.atnf.data\", \"psrcat.db\") as db:\n self._default_catalog_path = db\n return self._default_catalog_path",
"def default_config_path():\n return os.path.join(get_config_home(), 'config')",
"def getDefaultOutputPath(self):\n return self.session.request('bootcdbuilder/defaults')",
"def make_default_data_path():\n return os.path.relpath(\n os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"..\", \"..\", \"data\"))",
"def def_report_path():\n if os.name == 'nt':\n return(getwindoc())\n else:\n return(os.getenv(\"HOME\"))",
"def _get_default_path(self):\n #return os.path.join(cfg.DATA_DIR, 'SNUBH_BUS')\n return cfg.DATA_DIR",
"def get_default_installation_dir():\n if sys.platform == \"win32\":\n install_path = os.path.expandvars(r'%PROGRAMW6432%\\dynatrace\\oneagent')\n conf_path = os.path.expandvars(r'%programdata%\\dynatrace\\oneagent\\agent\\config\\ruxitagentproc.conf')\n else:\n install_path = '/opt/dynatrace/oneagent'\n conf_path = '/var/lib/dynatrace/oneagent/agent/config/ruxitagentproc.conf'\n try:\n with open (conf_path, 'r') as conf_file:\n prefix = 'libraryPath64 '\n for line in conf_file:\n if line.startswith(prefix):\n lib_path = Path(line[len(prefix)+1:-1])\n install_path = lib_path.parent.parent.parent.parent\n break\n except OSError as e:\n pass\n logging.debug(\"Setting installation root dir to %s\", install_path)\n return install_path",
"def _output_directory_default(self):\n return os.getcwd()",
"def _fname_geotxt_default(self):\n dir_detector = os.path.abspath(os.path.dirname(__file__))\n return '%s/../%s' % (dir_detector, self._path_geo_default)",
"def get_local_dotrecipyrc():\n return os.path.join(os.getcwd(), DOTRECIPYRC)",
"def default_plugin_path(self):\n return self._default_plugin_path",
"def get_default_path(self, default_path=''):\n return default_path if default_path else os.path.dirname(self.last_im_path)",
"def default_config():\n path = os.path.join(googkit_root(), DEFAULT_CONFIG)\n if not os.path.exists(path):\n msg = 'Default config file is not found: {path}'.format(path=path)\n raise GoogkitError(msg)\n\n return path",
"def pines_dir_check():\n home_dir = Path(os.path.expanduser('~/Documents/'))\n default_pines_dir = Path(os.path.join(home_dir, 'PINES_analysis_toolkit/'))\n if os.path.exists(default_pines_dir):\n return default_pines_dir\n else:\n print('ERROR...I have not set this up to work for directories other than ~/Documents/PINES_analysis_toolkit.')\n # TODO: Config file for non-Documents path?\n return",
"def defaultClientPath():\n clientName=\"astrometryDotNetClient.py2\"\n myName=sys.argv[0]\n clientPath=pathlib.Path(myName).absolute().parent/clientName\n return str(clientPath)",
"def get_default_path(self, default_path=''):\n return default_path if default_path else os.path.dirname(self.last_path)",
"def find_base_path():\n if platform.system() == 'windows':\n base_path = os.path.join('K:', 'ptestbend')\n else:\n base_path = os.path.join('/mnt','K', 'ptestbend')\n return base_path",
"def _get_default_data_dir_name():\n return _get_path(DATA_DIR)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates the next states of the system. | def generate_states(self, current_state, no=10):
future_states = []
emitted_states=[]
x=[]
for i in range(no):
next_state = self.next_state(current_state)
#print("Next state is",next_state)
emitted_states.append(self.next_emitted_state(next_state))
future_states.append(next_state)
current_state = next_state
x=emitted_states
return [x[-1], current_state] | [
"def calculate_next_state(self):\n self.current_step = self.current_step + 1\n self.current_state = self.game.next_state(current_state=self.current_state, actions=self.next_action)",
"def next_state(self, state, move):\n\n pass",
"def next_states(self,state,args={}):\n res = self.possible_results(state,args)\n effects = []\n states = []\n ps = []\n for r in res:\n for e,p in zip(r.effects,r.probabilities):\n s = copy.deepcopy(state)\n loc = {'_nextstate_':s,'_state_':state,'_args_':args}\n for i,expr,ass in zip(e.items,e.expressions,e.assignment_expressions):\n #s[i] = expr.evaluate(state,args)\n try:\n exec(ass.parsed_expr,self.context.__dict__,loc)\n except Exception as e:\n print \"Error evaluating expression\",ass.source_string\n raise\n effects.append(e.name)\n states.append(s)\n ps.append(p)\n #TODO: normalize ps?\n return (effects,states,ps)",
"def generate_recent_states():\n\n states = []\n\n # Add generated states into the list\n states.extend(generate_won_states())\n states.extend(generate_best_states())\n\n return states",
"def _initialize_state_generators(self):\n suffix = \"_handler_generator\"\n shg_names = [name for name in dir(self)\n if name.endswith(suffix)\n and not name.startswith(\"_\")]\n for name in shg_names:\n shg = getattr(self, name)\n print( shg )\n setattr(self, name[:-len(suffix)], shg().__next__)",
"def next_states(self, s):\n next_states = {}\n for x, next_s in self._delta[s].items():\n if next_s not in next_states:\n next_states[next_s] = []\n next_states[next_s].append(x)\n return next_states",
"def estimate_next_state(self):\n return self.__transition_function(self.__state)",
"def get_next_state(self, index_next_state):\n raise NotImplementedError()",
"def __generate_state() -> str:\n return ''.join(secrets.choice(string.ascii_uppercase + string.digits) for _ in range(16))",
"def next_state(self):\r\n s = max(self.states)\r\n self.states.remove(s)\r\n return s[1]",
"def gen_next_state(self, direction):\r\n # Find the current zero-location (blank space).\r\n zero_row = self.zero_location[0]\r\n zero_col = self.zero_location[1]\r\n\r\n # Store the zero location values for our swap tile calculations.\r\n swap_row = zero_row\r\n swap_col = zero_col\r\n\r\n # Find the value in the appropriate direction.\r\n if direction == 'up':\r\n swap_row -= 1\r\n if direction == 'down':\r\n swap_row += 1\r\n if direction == 'left':\r\n swap_col -= 1\r\n if direction == 'right':\r\n swap_col += 1\r\n\r\n # Move the zero-location in the direction specified,\r\n # swapping with the number in the location it moves to.\r\n new_puzzle = np.copy(self.puzzle)\r\n new_puzzle[zero_row, zero_col], new_puzzle[swap_row, swap_col] = (\r\n new_puzzle[swap_row, swap_col], new_puzzle[zero_row, zero_col]\r\n )\r\n\r\n # Create the new state.\r\n path_cost = self.g_cost + 1\r\n predecessor_state = self\r\n next_state = PuzzleState(new_puzzle, path_cost, predecessor_state)\r\n\r\n # Set the predecessor's direction being moved.\r\n next_state.action_from_predecessor = direction\r\n\r\n return next_state",
"def allStates():",
"def random_state(self, state):\n pass",
"def generate_state(start, diff, state_size, state_name):\n values = []\n increment = float(1) / state_size\n for iteration in range(int(state_size)):\n # Get a value between start + diff\n sample = start + diff * increment * iteration\n values.append(sample)\n\n return {\n \"state_name\": state_name,\n \"values\": values\n }",
"def generate(self, n):\n # TODO: fill in\n # due at checkpoint date (ungraded)\n # Use the sample_from_dist helper function in this file\n\n # Initialize the first state based on the '#' state\n current = sample_from_dist(self.transitions['#'])\n\n # Initialize the list and generate up to n number of symbols\n # Each time using the current state to get the next state based on probability\n symbolsSample = []\n for x in range(0, n):\n symbolsSample.append(sample_from_dist(self.emissions[current]))\n # Use the dictionary of the current transition state to get the next one\n current = sample_from_dist(self.transitions[current])\n return symbolsSample",
"def wizard(self) :\n\n\t\t# Variables\n\t\tprint(\"Complete list of state variables, separated by commas :\")\n\t\tself.states = input().replace(\" \", \"\").split(\",\")\n\t\tself.N_states = len(self.states)\n\t\tself.states_map = { s : idx for s, idx in zip(self.states, range(self.N_states)) }\n\n\t\t# Initial condition for each variable\n\t\tprint(\"\\nInitial conditions (integers) :\")\n\t\tself.initconds = { s : int(input(\"%s : \" % s)) for s in self.states }\n\n\t\t# Parameters\n\t\tprint(\"\\nComplete list of parameters, separated by commas :\")\n\t\tparams = input().replace(\" \", \"\").split(\",\")\n\n\t\t# Value of each parameter\n\t\tprint(\"\\nValues of parameters :\")\n\t\tself.parameters = { p : input(\"%s : \" % p) for p in params }\n\n\t\t# State transitions\n\t\tevent = []\n\t\tself.events = []\n\t\tprint(\"\\nEvents, as \\\"<rate>, <state_change>, ...\\\" lists, with commas between state changes and X+1, Y-1 as example changes :\")\n\t\twhile True :\n\n\t\t\t# Grab user input of one event\n\t\t\tevent = input().split(\",\")\n\t\t\tif event == [\"\"] : # if they hit Enter\n\t\t\t\tbreak # stop reading in events\n\n\t\t\tthisevent = {}\n\t\t\tfor e in event[1:] :\n\t\t\t\tif \"+\" in e :\n\t\t\t\t\tst, quant = e.split(\"+\")\n\t\t\t\t\tquant = int(quant)\n\t\t\t\telif \"-\" in e :\n\t\t\t\t\tst, quant = e.split(\"-\")\n\t\t\t\t\tquant = -int(quant)\n\t\t\t\telse :\n\t\t\t\t\traise helpers.InvalidModel(\"The syntax of this event was not recognised.\")\n\t\t\t\tthisevent[st.strip()] = quant\n\n\t\t\tself.events.append([event[0].strip(), thisevent])\n\n\t\t# Model variables\n\t\tself.build()",
"def getStates():",
"def generateSystem(num_points, odes, initial_state, parameters, dt):\n # Create matrix to store results [n_states x data_length)\n results = np.zeros([initial_state.shape[0], num_points])\n\n # Store Initial State\n results[:, 0] = initial_state\n\n # Initalize state as initial_state\n state = initial_state\n\n # Calculate Remaining States\n for point in range(num_points-1):\n # Calculate next State from current State using Runge-Kutta 4th order method\n state = rk4_singleStep(odes, state, parameters, dt)\n\n # Append state to results\n results[:, point+1] = state\n\n return results",
"def next_state(current_state=0, state_scale=None, action=0):\n if state_scale is None:\n state_scale = np.arange(0, 100, 0.1)\n\n if isinstance(current_state, int) or isinstance(current_state, float):\n current_state = initial_state(state_scale)\n\n all_states = list(prd(current_state, state_scale))\n transition_out = transition_distribution(all_states[:, 0],\n all_states[:, 1])\n transition_out = transition_out.reshape((len(current_state), -1))\n return(transition_out)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creating a closure around the provider with the custom formatting to be applied on the provider output. | def formatter(provider: typing.Callable[..., payload.ColumnMajor]) -> typing.Callable[..., typing.Any]:
@functools.wraps(provider)
def wrapper(*args, **kwargs) -> typing.Any:
"""Wrapped provider with custom formatting.
Args:
*args: Original args.
**kwargs: Original kwargs.
Returns:
Formatted data.
"""
return self.format(provider(*args, **kwargs))
return wrapper | [
"def wrapper(*args, **kwargs) -> typing.Any:\n return self.format(provider(*args, **kwargs))",
"def create_print_wrapper(f):\n def new_func(*args, **kwargs):\n response_format = kwargs.pop('format')\n response = f(*args, **kwargs)\n echo(format_output(response, response_format))\n return response\n\n new_func = update_wrapper(new_func, f)\n insert_click_param(new_func,\n Option(['--format'],\n type=Choice(['json_pp', 'json', 'yaml', 'column']),\n default='column',\n help='Specify how responses should be formatted and echoed to the terminal.'))\n return new_func",
"def format_options(self, ctx, formatter):\n ...",
"def custom_template_formatter(self):\n return self.FORMATTER_DELIMITER.join(self.custom_template_formatters)",
"def formatter(self): # type: () -> Formatter\n return self._formatter",
"def __init__(self, orig_formatter=None):\n self.orig_formatter = orig_formatter",
"def make_style(self, opts=(), **kwargs):\n if len(kwargs) == 0 and len(opts) == 0:\n return lambda text: text\n return lambda text: self.colorize(text, opts, **kwargs)",
"def _create_formatter(self, level, fmt):\n color = ''\n reset = ''\n\n if sys.stdout.isatty():\n color_name = self.config['COLOR'].get(level.upper())\n\n if color_name:\n color = getattr(colorama.Fore, color_name.upper(), '')\n\n if color:\n reset = colorama.Fore.RESET\n\n return logging.Formatter(fmt.format(color=color, reset=reset))",
"def inject(self, span, format, carrier):\n pass",
"def _additional_formatting(self, line):\n return line",
"def perform_additional_formatting(self, charter: LineChart) -> None:\n pass",
"def prettify(indent=0, width=80, compact=True):\n\n def decorate(func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n result = func(*args, **kwargs)\n\n print(t.format_function_header(func, args, kwargs))\n print(t.format_return_value(result, indent, width, compact))\n print(t.BLUE_LINES)\n\n return result\n\n return inner\n\n return decorate",
"def _get_formatter(self, attribute):\n\n entry = self._numeric_format.get(attribute, None)\n if isinstance(entry, string_types):\n fmt_str = '{0:' + entry + '}'\n return fmt_str.format\n elif callable(entry):\n return entry\n else:\n return str",
"def setFormatter(self, fmt):\r\n pass",
"def currency_custom_handler(currency):\n pass",
"def __call__(self, x):\n if self.is_mol(x):\n return PrintAsImageString(x)\n if callable(self.orig_formatter):\n return self.orig_formatter(x)\n return self.default_formatter(x)",
"def _format_callee(func_width):\n # Unintentional, but both the summary and callee lines\n # contain the same layout of cells. Keeping the function\n # calls separate in case I ever need to change 'em.\n return MergedStats._format_summary(func_width)",
"def __format__(self, format_spec):\n return \"[Formatted point: {}, {}, {}]\".format(self.x, self.y, format_spec)",
"def GetEventFormatterHelper(cls, identifier):\n identifier = identifier.lower()\n return cls._custom_formatter_helpers.get(identifier)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrapped provider with custom formatting. | def wrapper(*args, **kwargs) -> typing.Any:
return self.format(provider(*args, **kwargs)) | [
"def formatter(provider: typing.Callable[..., payload.ColumnMajor]) -> typing.Callable[..., typing.Any]:\n\n @functools.wraps(provider)\n def wrapper(*args, **kwargs) -> typing.Any:\n \"\"\"Wrapped provider with custom formatting.\n\n Args:\n *args: Original args.\n **kwargs: Original kwargs.\n\n Returns:\n Formatted data.\n \"\"\"\n return self.format(provider(*args, **kwargs))\n\n return wrapper",
"def inject(self, span, format, carrier):\n pass",
"def __init__(self, orig_formatter=None):\n self.orig_formatter = orig_formatter",
"def format_options(self, ctx, formatter):\n ...",
"def custom_template_formatter(self):\n return self.FORMATTER_DELIMITER.join(self.custom_template_formatters)",
"def setFormatter(self, fmt):\r\n pass",
"def formatter(self): # type: () -> Formatter\n return self._formatter",
"def _additional_formatting(self, line):\n return line",
"def __format__(self, format_spec):\n return \"[Formatted point: {}, {}, {}]\".format(self.x, self.y, format_spec)",
"def perform_additional_formatting(self, charter: LineChart) -> None:\n pass",
"def special_format_field(self, obj, format_spec):\n raise NotImplementedError()",
"def format(cls):\n return CaseSensitiveStringParameter(FORMAT, \"Format of result.\",\n FORMATS._fields,\n default=FORMATS.json, # @UndefinedVariable\n allow_multiple=False)",
"def format(self, value):\r\n return value",
"def provider(self, *args, **kwargs):\n kwargs[\"injector\"] = self\n return provider(*args, **kwargs)",
"def _get_formatter(self, attribute):\n\n entry = self._numeric_format.get(attribute, None)\n if isinstance(entry, string_types):\n fmt_str = '{0:' + entry + '}'\n return fmt_str.format\n elif callable(entry):\n return entry\n else:\n return str",
"def getFormat(self): # real signature unknown; restored from __doc__\n pass",
"def get_format_string(self) -> str:\n pass",
"def patched_to_html(self, *args, **kwargs):\n frame = None\n if self.__class__.__name__ == \"DataFrameRenderer\":\n fmt = self.fmt\n elif self.__class__.__name__ == \"DataFrameFormatter\":\n fmt = self\n else:\n raise ValueError(f\"patched_to_html: unexpected class {self.__class__.__name__}\")\n frame = fmt.frame\n if not check_rdk_attr(frame, RDK_MOLS_AS_IMAGE_ATTR):\n return orig_to_html(self, *args, **kwargs)\n orig_formatters = fmt.formatters\n try:\n formatters = orig_formatters or {}\n if not isinstance(formatters, dict):\n formatters = {col: formatters[i] for i, col in enumerate(self.columns)}\n else:\n formatters = dict(formatters)\n formatters.update(MolFormatter.get_formatters(frame, formatters))\n fmt.formatters = formatters\n res = orig_to_html(self, *args, **kwargs)\n # in pandas 0.25 DataFrameFormatter.to_html() returns None\n if (res is None and not hasattr(html_formatter_class, \"get_result\")\n and hasattr(self, \"buf\") and hasattr(self.buf, \"getvalue\")):\n res = self.buf.getvalue()\n should_inject = res and InteractiveRenderer and InteractiveRenderer.isEnabled()\n if should_inject:\n res = InteractiveRenderer.injectHTMLFooterAfterTable(res)\n # in pandas 0.25 we need to make sure to update buf as return value will be ignored\n if hasattr(self, \"buf\") and isinstance(self.buf, StringIO):\n self.buf.seek(0)\n self.buf.write(res)\n return res\n finally:\n fmt.formatters = orig_formatters",
"def setReturnFormat(self, format):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper for creating the reader actor spec for given query. | def actor(handler: typing.Callable[..., typing.Any], spec: 'frame.Query') -> task.Spec:
return extract.Reader.Actor.spec(
handler, extract.Statement.prepare(spec, source.extract.ordinal, lower, upper)
) | [
"def _from_spec(self, spec):",
"def _make_query(self):\n return RtuOverTcpQuery()",
"def _construct_input_spec(self):",
"def create(question: str) -> Asker:\n if question.startswith(\"/imagine\"):\n return ImagineAsker()\n return TextAsker()",
"def newActor(actor_name):\n actor={'name': None, 'movies': None, 'details': None,'vote_avg': None}\n actor['name']=actor_name\n actor['movies']=lt.newList(datastructure='SINGLE_LINKED')\n actor['details'] = lt.newList(datastructure='SINGLE_LINKED')\n return actor",
"def retrieve_actor(form, actor_name, wanted_actors=None, wanted_target=None):\n actor_data = {}\n target_data = {}\n\n protoform_data = {}\n\n protoform_data_dict = {\n \"uniprotid\": \"UniprotAC\",\n \"hgnc_symbol\": \"HgncSymbol\",\n \"synonyms\": \"Synonyms\",\n \"location\": \"Location\",\n }\n for k, v in protoform_data_dict.items():\n if form[actor_name + v] != \"\":\n protoform_data[k] = form[actor_name + v]\n\n protoform_data[\"regions\"], actors_in_regions, target_in_regions =\\\n retrieve_regions(\n form, actor_name, wanted_actors, wanted_target)\n protoform_data[\"sites\"], actors_in_sites, target_in_sites =\\\n retrieve_sites(\n form, actor_name, wanted_actors, wanted_target)\n protoform_data[\"residues\"], target_in_residues = retrieve_residues(\n form, actor_name, wanted_target)\n protoform_data[\"states\"], target_in_states = retrieve_states(\n form, actor_name, wanted_target)\n\n if target_in_regions is not None:\n target_data[\"in_regions\"] = target_in_regions\n elif target_in_sites is not None:\n target_data[\"in_sites\"] = target_in_sites\n elif target_in_residues is not None:\n target_data = {\"target\": target_in_residues}\n elif target_in_states is not None:\n target_data = {\"target\": target_in_states}\n\n if wanted_actors is not None:\n for wanted_actor, wanted_actor_id in wanted_actors.items():\n actor_data[wanted_actor] = {}\n if wanted_actor in actors_in_regions.keys() and\\\n len(actors_in_regions[wanted_actor]) > 0:\n actor_data[wanted_actor][\"in_regions\"] =\\\n actors_in_regions[wanted_actor]\n elif wanted_actor in actors_in_sites.keys() and\\\n len(actors_in_sites[wanted_actor]) > 0:\n actor_data[wanted_actor][\"in_sites\"] =\\\n actors_in_sites[wanted_actor]\n return protoform_data, actor_data, target_data",
"def build_param_extractor(self, param_spec):\n reader = partial(self.READER_MAP[param_spec['in']], param_name=param_spec['name'])\n kwargs = {}\n if 'content' in param_spec:\n content_type = next(iter(param_spec['content'].keys()))\n if content_type not in self.CONTENT_DECODER_MAP:\n raise UnsupportedContentTypeError(content_type)\n schema_spec = param_spec['content'][content_type]\n kwargs['decoder'] = self.CONTENT_DECODER_MAP[content_type]()\n kwargs['schema'] = self.schema_builder.build(schema_spec)\n else:\n kwargs['decoder'] = PlainDecoder()\n kwargs['schema'] = self.schema_builder.build(param_spec['schema'])\n kwargs['required'] = param_spec.get('required', False)\n return Extractor(read_data=reader, **kwargs)",
"def _create_authenticator(spec: Union[str, Dict[str, Any]]) -> Authenticator:\n log = logging.getLogger(__name__)\n\n if isinstance(spec, str):\n log.debug(\"Creating authenticator: %s\", spec)\n return get_callable(spec, __name__)\n\n log.debug(\"Creating authenticator using factory: %s\", spec['factory'])\n factory = get_callable(spec['factory'], __name__) # type: Callable[..., Authenticator]\n options = spec.get('options', {})\n return factory(**options)",
"def create_agent(name, model, attention):\n\n from snakeai.agent import DeepQNetworkAgent, HumanAgent, RandomActionAgent\n\n if name == 'human':\n return HumanAgent()\n elif name == 'dqn':\n if model is None:\n raise ValueError('A model file is required for a DQN agent.')\n return DeepQNetworkAgent(model=model, memory_size=-1, num_last_frames=4, attention=attention)\n elif name == 'random':\n return RandomActionAgent()\n\n raise KeyError(f'Unknown agent type: \"{name}\"')",
"def __query_spec(self):\n spec = self.__spec\n if not self.__is_command and \"$query\" not in self.__spec:\n spec = SON({\"$query\": self.__spec})\n if self.__ordering:\n spec[\"$orderby\"] = self.__ordering\n if self.__explain:\n spec[\"$explain\"] = True\n if self.__hint:\n spec[\"$hint\"] = self.__hint\n if self.__comment:\n spec[\"$comment\"] = self.__comment\n if self.__snapshot:\n spec[\"$snapshot\"] = True\n if self.__max_scan:\n spec[\"$maxScan\"] = self.__max_scan\n if self.__max_time_ms:\n spec[\"$maxTimeMS\"] = self.__max_time_ms\n return spec",
"def from_spec(cls, spec, *args, **kwds):\n mo = cls.pattern.fullmatch(spec)\n if mo is None:\n raise ValueError(f'Invalid informat {spec}')\n name = mo['name'].upper()\n bytestring = name.encode('ascii')\n if len(bytestring) > 8:\n raise ValueError(f'ASCII-encoded name {bytestring} longer than 8 characters')\n length = int(mo.group('w'))\n try:\n decimals = int(mo.group('d'))\n except (TypeError, IndexError):\n decimals = 0\n form = cls(*args, name=name, length=length, decimals=decimals, **kwds)\n LOG.debug(f'Parsed {form!r} from {spec!r}')\n return form",
"def node_from_query(self, query):\r\n if query:\r\n querynode = desc_http_query(query)\r\n query_params = query.split('&')\r\n for p in query_params:\r\n querynode += desc_http_queryparam(p, p)\r\n return querynode",
"def agent_creator(agent_type):\n# print(\"creating agent of type {}\".format(str(agent_type)))\n if \"Robot\" in str(agent_type):\n return Robot()\n if \"Person\" in str(agent_type):\n return Person()\n if \"ORCAAgent\" in str(agent_type):\n return ORCAAgent()\n else:\n raise ValueError(\"agent type {} does not exist\".format(str(agent_type)))",
"def build_query(self, query_string: str) -> Query:\n raise EntityMapperNotImplemented",
"def test_get_actor_assistant(self):\r\n with self.client as c:\r\n with self.app_context:\r\n c.post(\r\n \"/actors\",\r\n data=json.dumps(self.actor),\r\n headers=TestActorResources.headers_producer,\r\n )\r\n results = c.get(\r\n \"/actors/1\", headers=TestActorResources.headers_assistant,\r\n )\r\n\r\n data = json.loads(results.data)\r\n\r\n self.assertEqual(data[\"actor\"][\"name\"], self.actor[\"name\"])",
"def _make_query(self, cls, value) -> AttributeQuery:\n return AttributeQuery(cls(attribute=self.attribute, value=value))",
"def _create_input_spec(self, input_shape):\n dim = input_shape[self.axis]\n self.input_spec = tf.keras.layers.InputSpec(\n ndim=len(input_shape), axes={self.axis: dim})",
"def test_get_actor_producer(self):\r\n with self.client as c:\r\n with self.app_context:\r\n c.post(\r\n \"/actors\",\r\n data=json.dumps(self.actor),\r\n headers=TestActorResources.headers_producer,\r\n )\r\n results = c.get(\r\n \"/actors/1\", headers=TestActorResources.headers_producer,\r\n )\r\n\r\n data = json.loads(results.data)\r\n\r\n self.assertEqual(data[\"actor\"][\"name\"], self.actor[\"name\"])",
"def handle_actor_and_get_request():\n\n attr_dict = request.args.to_dict()\n # print(attr_dict)\n actors_matching_query = and_get_request_helper(attr_dict, ACTORS, \"actor\")\n return make_response(jsonify(actors_matching_query),\n 200 if len(actors_matching_query) > 0 else 400)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the slicer instance of this feed, that is able to split the loaded dataset columnwise. This default slicer is plain positional sequence slicer. | def slicer(
cls, schema: typing.Sequence['series.Column'], columns: typing.Mapping['series.Column', parser.Column]
) -> typing.Callable[[payload.ColumnMajor, typing.Union[slice, int]], payload.ColumnMajor]:
return extract.Slicer(schema, columns) | [
"def slicing(self, name, slicer, axis='y'):\n if self._is_array(name):\n raise NotImplementedError('Cannot slice codes from arrays!')\n if 'rules' not in self._meta['columns'][name]:\n self._meta['columns'][name]['rules'] = {'x': {}, 'y': {}}\n if not isinstance(slicer, list): slicer = [slicer]\n slicer = self._clean_codes_against_meta(name, slicer)\n rule_update = {'slicex': {'values': slicer}}\n self._meta['columns'][name]['rules'][axis].update(rule_update)\n return None",
"def slicer(self, condition):\n full_data = self._data.copy()\n series_data = full_data[full_data.columns[0]].copy()\n slicer, _ = get_logic_index(series_data, condition, full_data)\n return slicer",
"def __getitem__(self, slicer):\n\n if (\n not isinstance(slicer, tuple)\n or not hasattr(slicer, '__len__')\n or len(slicer) != 2\n ):\n raise FitGridError('Must slice on time and channels.')\n\n # now we can unpack\n time, channels = slicer\n\n def check_slicer_component(component):\n if isinstance(component, slice):\n return component\n elif isinstance(component, list):\n # deduplicate and warn if duplicates found\n deduped_component = tools.deduplicate_list(component)\n if deduped_component != component:\n msg = f'Slicer {component} contained duplicates, '\n msg += f'slicing instead on deduped: {deduped_component}.'\n warnings.warn(UserWarning(msg))\n return deduped_component\n else:\n # wrap in list to always get a DataFrame in return on slicing\n # otherwise we might get a scalar or a pandas Series,\n # which can't be used to create a FitGrid object\n return [component]\n\n time = check_slicer_component(time)\n channels = check_slicer_component(channels)\n subgrid = self._grid.loc[time, channels].copy()\n return self.__class__(subgrid, self.epoch_index, self.time)",
"def current_data_slice(self):\n return self.get_data_slice(self.i)",
"def get_label_data_slice(self, i):\n self.label_data = np.clip(self.label_data, 0, 1)\n if self.section == 0:\n return self.label_data[i]\n elif self.section == 1:\n return np.flip(self.label_data[:, i].transpose())\n elif self.section == 2:\n return np.flip(self.label_data[:, :, i].transpose(), axis=1)",
"def dataline(self, line):\n return super(scandata, self).data[:, line - 1]",
"def get_slice(self, start: int, end: int):\n columns = self.get_col_names()\n all_rows = self.get_rows()\n sliced_list = all_rows[start:end]\n sliced_arrable = Arrable().init_from_arrable(columns, sliced_list)\n \n return sliced_arrable",
"def partitioner(self):\n return self._partitioner",
"def get_other_labels_data_slice(self, i):\n if self.section == 0:\n return self.other_labels_data[i]\n elif self.section == 1:\n return np.flip(self.other_labels_data[:, i].transpose())\n elif self.section == 2:\n return np.flip(self.other_labels_data[:, :, i].transpose(), axis=1)",
"def strided_slice(self, x: \"PondTensor\", *args: Any, **kwargs: Any):\n\n node_key = (\"strided_slice\", x)\n\n x_sliced = nodes.get(node_key, None)\n\n if x_sliced is not None:\n return x_sliced\n\n if isinstance(x, PondPublicTensor):\n x_sliced = _strided_slice_public(self, x, args, kwargs)\n elif isinstance(x, PondPrivateTensor):\n x_sliced = _strided_slice_private(self, x, args, kwargs)\n elif isinstance(x, PondMaskedTensor):\n x_sliced = _strided_slice_masked(self, x, args, kwargs)\n nodes[(\"strided_slice\", x.unmasked)] = x_sliced.unmasked\n else:\n raise TypeError(\n (\"Don't know how to do a strided slice on \" \" {}\").format(type(x))\n )\n\n nodes[node_key] = x_sliced\n\n return x_sliced",
"def slice(self):\n return slice(self._start, self._stop, 1)",
"def sliced(self,*args):\n if len(args)==1 and type(args[0])==slice: s=args[0]\n else: s=slice(*args)\n ps = self.apply_func(lambda _, spec: spec[s], lambda _, cov: cov[s,s])\n ps.ells = self.ells[s]\n return ps",
"def __getslice__(*args):\n return _Field.vectormats___getslice__(*args)",
"def train_dataloader(self):\n return self._dataloader(split_type='train')",
"def slice(self, columns):\n pass",
"def _slice(self, X):\n XTranspose = np.array(X).transpose()\n slices = []\n nbFeatures, nbGroups, nbFeaturePerGroup = self._groupsInfo(X.shape[1])\n imgIncluded = self._convolExtractor.isImageIncluded()\n for i in xrange(nbGroups):\n Xtmp = XTranspose[i*nbFeaturePerGroup:(i+1)*nbFeaturePerGroup]\n slices.append(Xtmp.transpose())\n if imgIncluded and not self._compressImage:\n slices = slices[1:]\n self._imgSize = nbFeaturePerGroup\n return slices",
"def get_data(dset, i_slice=None, pos_slice=None, output_type=None):\n # For back-compatibility: Convert pos_slice and i_slice to\n # single-element lists if they are not lists (e.g. float\n # and int respectively).\n if pos_slice is not None and not isinstance(pos_slice, list):\n pos_slice = [pos_slice]\n if i_slice is not None and not isinstance(i_slice, list):\n i_slice = [i_slice]\n # Case of a constant dataset\n if isinstance(dset, h5py.Group):\n shape = dset.attrs['shape']\n # Restrict the shape if slicing is enabled\n if pos_slice is not None:\n shape = [ x for index, x in enumerate(shape) if\n index not in pos_slice ]\n # Create the corresponding dataset\n data = dset.attrs['value'] * np.ones(shape)\n\n # Case of a non-constant dataset\n elif isinstance(dset, h5py.Dataset):\n if pos_slice is None:\n data = dset[...]\n else:\n # Get largest element of pos_slice\n max_pos = max(pos_slice)\n # Create list of indices list_index of type\n # [:, :, :, ...] where Ellipsis starts at max_pos + 1\n list_index = [np.s_[:]] * (max_pos + 2)\n list_index[max_pos + 1] = np.s_[...]\n # Fill list_index with elements of i_slice\n for count, dir_index in enumerate(pos_slice):\n list_index[dir_index] = i_slice[count]\n # Convert list_index into a tuple\n tuple_index = tuple(list_index)\n # Slice dset according to tuple_index\n data = dset[tuple_index]\n\n # Convert to the right type\n if (output_type is not None) and (data.dtype != output_type):\n data = data.astype( output_type )\n # Scale by the conversion factor\n if np.issubdtype(data.dtype, np.floating) or \\\n np.issubdtype(data.dtype, np.complexfloating):\n if dset.attrs['unitSI'] != 1.0:\n data *= dset.attrs['unitSI']\n\n return(data)",
"def get_catalog_slice(self, slice_id) -> CatalogSlice:\n query_str = \"\"\"query getSavedQueryPyApi($id: ID!) {\n getSavedQuery(id: $id) {\n id\n name\n description\n filter\n createdAt\n updatedAt\n }\n }\n \"\"\"\n res = self.execute(query_str, {'id': slice_id})\n return Entity.CatalogSlice(self, res['getSavedQuery'])",
"def test_get_slice_dense(self):\n config.session.execute(\"TRUNCATE TABLE hecuba.istorage\")\n config.session.execute(\"DROP KEYSPACE IF EXISTS hecuba_dislib\")\n\n bn, bm = 5, 5\n x = np.random.randint(100, size=(30, 30))\n ds_data = ds.array(x=x, block_size=(bn, bm))\n data = ds.array(x=x, block_size=(bn, bm))\n data.make_persistent(name=\"hecuba_dislib.test_array\")\n\n slice_indices = [(7, 22, 7, 22), # many row-column\n (6, 8, 6, 8), # single block row-column\n (6, 8, None, None), # single-block rows, all columns\n (None, None, 6, 8), # all rows, single-block columns\n (15, 16, 15, 16), # single element\n # (-10, -5, -10, -5), # out-of-bounds (not\n # implemented)\n # (-10, 5, -10, 5), # out-of-bounds (not implemented)\n (21, 40, 21, 40)] # out-of-bounds (correct)\n\n for top, bot, left, right in slice_indices:\n got = data[top:bot, left:right].collect()\n expected = ds_data[top:bot, left:right].collect()\n\n self.assertTrue(equal(got, expected))\n\n # Try slicing with irregular array\n x = data[1:, 1:]\n data = ds_data[1:, 1:]\n\n for top, bot, left, right in slice_indices:\n got = x[top:bot, left:right].collect()\n expected = data[top:bot, left:right].collect()\n\n self.assertTrue(equal(got, expected))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The explicit columns mapping implemented by this feed to be used by the query parser. | def columns(self) -> typing.Mapping['series.Column', parser.Column]:
return {} | [
"def _get_column_mapping(cls) -> Dict[str, str]:\n pass",
"def column_reflection_fallback(self):\n sql = sa.select([sa.text(\"*\")]).select_from(self._table)\n col_names = self.engine.execute(sql).keys()\n col_dict = [{'name': col_name} for col_name in col_names]\n return col_dict",
"def get_columns(self) -> dict:\n\n return self.source.columns",
"def input_columns(self) -> Sequence['outputs.DataSetPhysicalTableMapRelationalTableInputColumn']:\n return pulumi.get(self, \"input_columns\")",
"def _colNames(self):\n self.mjdCol = 'expMJD'\n self.fieldIdCol = 'fieldID'\n self.raCol = 'fieldRA'\n self.decCol = 'fieldDec'\n self.propIdCol = 'propID'\n self.propConfCol = 'propConf'\n self.propNameCol = 'propName' #(propname == proptype)\n # For config parsing.\n self.versionCol = 'version'\n self.sessionDateCol = 'sessionDate'\n self.runCommentCol = 'runComment'",
"def type_hints(self):\n\n return self._dp_extractor.column_type_hints",
"def get_feature_columns(self):\n return self.feature_columns",
"def dof_mapping(self):\n pass",
"def columns(self):\n\n if hasattr(self.table, '_aliased_insp'):\n column_source = self.table._aliased_insp.class_\n # TODO: Relying on underscores is scary.\n else:\n column_source = self.table\n for attr_name in dir(column_source):\n # distinguish actual columns from other model attributes\n try:\n col = getattr(self.table, attr_name)\n if hasattr(col, 'key') and hasattr(col, 'prop') and hasattr(\n col, 'base_columns'):\n yield col\n except ArgumentError:\n pass",
"def getAllColumns (self):\n\n return self.columns",
"def columns(self):\n return list(self.values())",
"def get_dataset_columns(dataset):\n return {\n converter_str(col)\n for col in get_dataset_schema(dataset).names\n if not col.startswith(\"__\") and col != \"KLEE_TS\"\n }",
"def extract_column_names(self) -> Dict[str, Tuple[str, str]]:\n fields = []\n for field in self.properties.keys():\n if not is_airbyte_column(field):\n fields.append(field)\n result = {}\n field_names = set()\n for field in fields:\n field_name = self.name_transformer.normalize_column_name(field, in_jinja=False)\n field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name)\n jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True)\n if field_name_lookup in field_names:\n # TODO handle column name duplicates or collisions deterministically in this stream\n for i in range(1, 1000):\n field_name = self.name_transformer.normalize_column_name(f\"{field}_{i}\", in_jinja=False)\n field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name)\n jinja_name = self.name_transformer.normalize_column_name(f\"{field}_{i}\", in_jinja=True)\n if field_name_lookup not in field_names:\n break\n field_names.add(field_name_lookup)\n result[field] = (field_name, jinja_name)\n return result",
"def columns(self):\n return self.properties.get('columns',\n ColumnDefinitionCollection(self.context,\n ResourcePath(\"columns\", self.resource_path), self))",
"def get_column_names(self):\r\n return [column.key for column in self.table.columns]",
"def host_columns(self):\n columns = ('*',)\n limit = 1\n record = self.get_host(columns=columns, limit=limit)[0]\n return self.get_columns(record)",
"def _create_db_columns_def_(self):\r\n\r\n columns = {}\r\n first_dict = self.new_data[0]\r\n\r\n for key, value in first_dict.items():\r\n columns.update({key: None})\r\n\r\n for key, value in first_dict.items():\r\n if key == 'IpAddress':\r\n columns[key] = 'TEXT PRIMARY KEY'\r\n elif isinstance(value, str):\r\n columns[key] = \"TEXT\"\r\n elif isinstance(value, float):\r\n columns[key] = \"REAL\"\r\n else:\r\n columns[key] = \"TEXT\"\r\n\r\n return columns",
"def map_to_es(self):\n full_name = self.query_path\n return set_default(\n {\n c.names[full_name]: c.es_column\n for k, cs in self.lookup.items()\n # if startswith_field(k, full_name)\n for c in cs if c.jx_type not in STRUCT\n },\n {\n c.names[\".\"]: c.es_column\n for k, cs in self.lookup.items()\n # if startswith_field(k, full_name)\n for c in cs if c.jx_type not in STRUCT\n }\n )",
"def map(self, *args, **kwargs):\n from .datasets_core import DataSetMap\n for a in args:\n if isinstance(a, DSColumn):\n if a.dataset is not self:\n raise ValueError('%s is not a column of %s.' % (a.name, self.dataset_name))\n a = a.name\n if not isinstance(a, str):\n raise ValueError('Arguments of .map() should be column names, not %s.' % type(a))\n kwargs[a] = a\n return DataSetMap(self, kwargs, keep_all_columns=False)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select a feed that can provide for (be used to construct) the given source. | def match(self, source: 'frame.Source') -> Provider:
for feed in self:
matcher = self.Matcher(feed.sources)
source.accept(matcher)
if matcher:
break
else:
raise error.Missing(f'None of the {len(self._feeds)} available feeds provide all of the required sources')
return feed # pylint: disable=undefined-loop-variable | [
"def select_sources(self, selection):\n\n # store selection\n self.selection = selection\n\n # make selection\n self.unit_vector = [self.unit_vector[i] for i in selection]\n self.distance = [self.distance[i] for i in selection]\n\n self.N = len(self.distance)\n\n self.coord = self.coord[selection]\n try:\n self.flux = self.flux[selection]\n self.flux_weight = self.flux_weight[selection]\n except:\n pass",
"async def async_select_source(self, source):\n self._source = source\n await send_command(self.hass, self._remote,\n self._ir_codes[CONF_SOURCES][source])\n await self.async_update_ha_state()",
"def choose_feeding(self,\n num_tokens_on_watering_hole: Natural,\n other_players=List[PlayerConfiguration]) -> OptValidFeedingChoice:\n self.validate_choose_feeding(num_tokens_on_watering_hole, other_players)\n\n other_players = [Player(*player_config) for player_config in other_players]\n best_species_key = self.best_species_key(other_players)\n\n if best_species_key == NoOrderedKey:\n return InvalidFeedingChoice\n else:\n best_species_key = cast(SimpleSpeciesOrderedKey, best_species_key)\n\n best_species = best_species_key.species\n best_species_index = self.index_of_own_species(best_species)\n best_species_feed_type = best_species_key.feed_type\n\n if best_species_feed_type is SpeciesFeedType.STORE_FAT:\n return StoreFatChoice(best_species_index, min(best_species.fat_tissue_need, num_tokens_on_watering_hole))\n elif best_species_feed_type is SpeciesFeedType.FEEDABLE_VEG:\n return FeedVegetarianChoice(best_species_index)\n elif best_species_feed_type is SpeciesFeedType.FEEDABLE_CARN:\n return self.choose_target(best_species_index, other_players)\n elif best_species_feed_type is SpeciesFeedType.FORGO_ATTACK:\n return ForgoChoice()\n else:\n return InvalidFeedingChoice",
"def get_feed(\n self, feed: FeedInput, default: Union[MissingType, _T] = MISSING\n ) -> Union[Feed, _T]:\n return zero_or_one(\n self.get_feeds(feed=feed),\n lambda: FeedNotFoundError(_feed_argument(feed)),\n default,\n )",
"def select_psf_sources(self,\n min_flux=None, min_amplitude=None, min_dist=None, max_ratio=None, \n edge_dist=None, verbose=True, aper_radius=None,\n units='deg', badpix_flags=['flags'], flag_max=0, psf_idx='psf'):\n if aper_radius is None:\n aper_radius = self.aper_radius\n elif self.aper_radius is None:\n self.aper_radius = aper_radius\n \n result = astropyp.phot.psf.select_psf_sources(self.img, \n self.catalog, aper_radius, min_flux, min_amplitude, min_dist, \n max_ratio, edge_dist, verbose, \n badpix_flags=badpix_flags, flag_max=flag_max)\n self.indices[psf_idx], self.flags = result\n return result",
"def from_config( # type: ignore[override]\n cls,\n source: Union[DatasetConfig, str],\n ) -> Union[\n \"Dataset\",\n \"EnsembleDataset\",\n Dict[str, \"Dataset\"],\n Dict[str, \"EnsembleDataset\"],\n ]:\n if isinstance(source, str):\n source = DatasetConfig.load(source)\n\n assert isinstance(source, DatasetConfig), (\n f\"Argument `source` of type ({type(source)}) is not a \"\n \"`DatasetConfig`\"\n )\n\n assert (\n \"graph_definition\" in source.dict().keys()\n ), \"`DatasetConfig` incompatible with current GraphNeT version.\"\n\n # Parse set of `selection``.\n if isinstance(source.selection, dict):\n return cls._construct_datasets_from_dict(source)\n elif (\n isinstance(source.selection, list)\n and len(source.selection)\n and isinstance(source.selection[0], str)\n ):\n return cls._construct_dataset_from_list_of_strings(source)\n\n cfg = source.dict()\n if cfg[\"graph_definition\"] is not None:\n cfg[\"graph_definition\"] = parse_graph_definition(cfg)\n return source._dataset_class(**cfg)",
"def fetcher_factory(conf):\n global PROMOTERS\n applicable = []\n if not PROMOTERS:\n PROMOTERS = load_promoters()\n for promoter in PROMOTERS:\n if promoter.is_applicable(conf):\n applicable.append((promoter.PRIORITY, promoter))\n if applicable:\n best_match = sorted(applicable, reverse=True)[0][1]\n return best_match(conf)\n else:\n raise ConfigurationError(\n 'No fetcher is applicable for \"{0}\"'.format(conf['name'])\n )",
"def ChooseScraper(self, url):",
"def select_source(args):\r\n if args.source is None:\r\n print('Choose a source path.')\r\n source = os.path.normpath(askdirectory())\r\n print(f'Source path: {source}')\r\n else:\r\n source = args.source\r\n if not os.path.exists(source):\r\n sys.exit('Error: Source path does not exist.')\r\n return source",
"def test_get_source_by_id(self):\n expected_source = Source(\n id_=REST_SOURCE[\"external_id\"],\n type_=EntityType(REST_SOURCE[\"entity_type\"], REST_SOURCE[\"entity_category\"]),\n station_model=REST_SOURCE[\"station_model\"],\n geometry=Point(*REST_SOURCE[\"default_footprint\"][\"coordinates\"][::-1]), # for some strange reason the points are inverted\n controlled_properties=None,\n tdmq_id=REST_SOURCE[\"tdmq_id\"]\n )\n\n client = Client(self.url)\n httpretty.register_uri(httpretty.GET, f'{client.sources_url}/{SENSORS[0].tdmq_id}',\n body=jsons.dumps(REST_SOURCE), match_querystring=False)\n\n res = client.get_sources(REST_SOURCE[\"tdmq_id\"])\n self.assertEqual(res.to_json(), expected_source.to_json())",
"def match(cls, source: Source) -> Any:\n raise NotImplementedError()",
"def createSourceFetcher(uri, fetchTarget):\n\n\tlowerUri = uri.lower()\n\tif lowerUri.startswith('bzr'):\n\t\treturn SourceFetcherForBazaar(uri, fetchTarget)\n\telif lowerUri.startswith('cvs'):\n\t\treturn SourceFetcherForCvs(uri, fetchTarget)\n\telif lowerUri.startswith('fossil'):\n\t\treturn SourceFetcherForFossil(uri, fetchTarget)\n\telif lowerUri.startswith('git'):\n\t\treturn SourceFetcherForGit(uri, fetchTarget)\n\telif lowerUri.startswith('hg'):\n\t\treturn SourceFetcherForMercurial(uri, fetchTarget)\n\telif lowerUri.startswith('http') or lowerUri.startswith('ftp'):\n\t\treturn SourceFetcherForDownload(uri, fetchTarget)\n\telif lowerUri.startswith('pkg:'):\n\t\treturn SourceFetcherForSourcePackage(uri, fetchTarget)\n\telif lowerUri.startswith('svn'):\n\t\treturn SourceFetcherForSubversion(uri, fetchTarget)\n\telif lowerUri.startswith('file://'):\n\t\treturn SourceFetcherForLocalFile(uri[7:], fetchTarget)\n\telif ':' not in lowerUri:\n\t\treturn SourceFetcherForLocalFile(uri, fetchTarget)\n\telse:\n\t\tsysExit('The protocol of SOURCE_URI %s is unsupported, sorry.' % uri)",
"def get_sources(self):\n\n self.sources = []\n cur = self.settings['conn'].cursor()\n cur.execute(\"SELECT id, name, fulltext, mediapath, memo, owner, date FROM source\")\n results = cur.fetchall()\n for r in results:\n guid = self.create_guid()\n suffix = \"txt\"\n if r[3] is not None:\n suffix = r[3].split('.')[-1]\n else:\n if '.' in r[1]:\n suffix = r[1].split('.')[-1]\n if suffix == 'transcribed':\n suffix = 'txt'\n filename = guid + '.' + suffix\n\n plaintext_filename = None\n if r[2] is not None:\n plaintext_filename = self.create_guid() + \".txt\"\n source = {'id': r[0], 'name': r[1], 'fulltext': r[2], 'mediapath': r[3],\n 'memo': r[4], 'owner': r[5], 'date': r[6].replace(' ', 'T'), 'guid': guid,\n 'filename': filename, 'plaintext_filename': plaintext_filename,\n 'external': None}\n if source['mediapath'] is not None:\n fileinfo = os.stat(self.settings['path'] + source['mediapath'])\n if fileinfo.st_size >= 2147483647:\n source['external'] = self.settings['directory']\n self.sources.append(source)",
"def test_source_dataset_factory_build(self):\n source_dataset = factories.SourceDatasetFactory.build()\n self.assertIsInstance(source_dataset, models.SourceDataset)",
"def open_rss_link(source: str, verbose: bool or None):\n\n logger = set_logger(verbose)\n\n if not source:\n raise ValueError\n\n content = feedparser.parse(source)\n logger.info(f\"Starting reading link {source}\")\n\n return content",
"def get_sources_by_type(self, source_type):\r\n\t\tif not source_type:\r\n\t\t\treturn self.sources\r\n\t\telse:\r\n\t\t\tmeth_name = \"get_%s_sources\" % source_type\r\n\t\t\treturn getattr(self, meth_name)()",
"def source(self, index=0):\n if not self._sources:\n self.get_data()\n try:\n sitename, url = self._sources[index]\n except TypeError:\n return self._sources[index]\n except IndexError:\n raise NotFoundError(\"No episode sources found.\")\n\n ext = get_extractor(sitename)(\n url, quality=self.quality, headers=self.headers)\n self._sources[index] = ext\n\n return ext",
"def select_next_entry(self):\n self.get_selected()\n current = self.selected_feed\n if not current:\n current = self.selected_category\n entry = self.ui.listFeedList.model().get_next(current)\n if entry:\n if isinstance(entry, Category):\n self.selected_category = entry\n self.selected_feed = None\n else:\n self.selected_feed = entry\n self.set_selected()\n current = self.selected_feed\n if not current:\n current = self.selected_category\n return not not current\n return False",
"def sources(digraph, target, label=None):\n assert digraph.sources('Mary') == {'John'}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Device firmware version. When unavailable, 'unknown' is returned | def firmware_version(self) -> str:
return "unknown" if self._fwversion is None else self._fwversion | [
"def firmware_version(self) -> str:\n self._logger.info(\"Retrieving current firmware version\")\n return self._device_info().get(\"firmware\")",
"def get_firmware_version(self):\n return self._word_or_none(self._send_command(COMMAND_GET_FIRMWARE_VERSION))",
"def firmware_update_version(self) -> str:\n self._logger.info(\"Retrieving firmware update version...\")\n return self._device_info().get(\"NewVer\")",
"def hardware(self) -> str:\n self._logger.info(\"Retrieving device hardware version...\")\n return self._device_info().get(\"hardware\")",
"def get_software_version(self):\n self.board_socket.send(bytes.fromhex(\"10 00 01 0F\"))\n temp = self.board_socket.recv(1024)\n return(temp[3:10])",
"def _get_hw_version(self) -> str:\n return self._quantsim_configs[ConfigDictKeys.DEFAULTS].get(ConfigDictKeys.HW_VERSION, 'default')",
"def get_version(self):\n url = self.url + '/api/sys/firmware/version/mgmt'\n resp = self._req(url)\n if len(resp):\n self.version = resp['firmwareRunning'][0]['packageVersion']\n logger.info('FXOS Software {} Version is {}'.format(self.url, self.version))",
"async def query_firmware(self):\n\n _version = await self.request.get(join_path(self._base_path, \"/fwversion\"))\n _fw = _version.get(\"firmware\")\n if _fw:\n _main = _fw.get(\"mainProcessor\")\n if _main:\n self._main_processor_version = self._make_version(_main)\n _radio = _fw.get(\"radio\")\n if _radio:\n self._radio_version = self._make_version(_radio)",
"def get_bios_version():\n cli_output = cli(\"show version\")\n if legacy:\n result = re.search(r'BIOS.*version\\s*(.*)\\n', cli_output[1])\n if result != None:\n return result.group(1)\n else:\n result = re.search(r'BIOS.*version\\s*(.*)\\n', cli_output)\n if result != None:\n return result.group(1)\n poap_log(\"Unable to get switch Bios version\")",
"async def get_fpga_version(self):\n return await self._execute_command('#GetFPGAVersion').content.decode()",
"def firmware_version(self) -> dict:\n self._clib.fxRequestFirmwareVersion(self.id)\n\n sleep(5)\n\n fw = self._clib.fxGetLastReceivedFirmwareVersion(self.id)\n\n fwDict = {\n \"mn\": decode_firmware(fw.mn),\n \"ex\": decode_firmware(fw.ex),\n \"re\": decode_firmware(fw.re),\n }\n\n if self._hasHabs:\n fwDict[\"habs\"] = decode_firmware(fw.habs)\n\n return fwDict",
"def get_firmware(self):\n mcl_get_firmware_version = self.madlib['MCL_GetFirmwareVersion']\n mcl_get_firmware_version.restype = c_int\n mcl_get_firmware_version(self.firmwareVersion_pointer, self.firmwareProfile_pointer, c_int(self.handler))\n print('version is: ' + str(self.firmwareVersion.value))\n print('profile is: ' + str(self.firmwareProfile.value))\n return [self.firmwareVersion.value, self.firmwareProfile.value]",
"def _get_software_version(self):\n return self.__software_version",
"def latest_widevine_version(eula=False):\n if eula or cdm_from_repo():\n url = config.WIDEVINE_VERSIONS_URL\n versions = http_get(url)\n return versions.split()[-1]\n\n from .arm import chromeos_config, select_best_chromeos_image\n devices = chromeos_config()\n arm_device = select_best_chromeos_image(devices)\n if arm_device is None:\n log(4, 'We could not find an ARM device in the Chrome OS recovery.json')\n ok_dialog(localize(30004), localize(30005))\n return ''\n return arm_device.get('version')",
"def recommended_firmware(self):\n ret = self._get_attr(\"recommendedFirmware\")\n return FirmwareType(ret)",
"def getPackageVersion():\n cmd = locations.DPKG + \" -l \" + ' | grep surfids-sensor | awk \\'{print $3}\\''\n pversion = os.popen(cmd)\n ver = pversion.readline().strip()\n if ver == \"\":\n return \"Unknown\"\n else:\n return ver",
"def check_software_version(device_list):\n for software_version in device_list:\n print(\"Checking for software version \" + software_version)\n output_version = net_connect.send_command(\"show version\")\n init_version = 0\n int_version = output_version.find(software_version)\n if int_version > 0:\n print(\"Software version found on device: \" + software_version)\n return software_version\n else:\n print(\"Software not found on device: \" + software_version)",
"def get_boot_mode():\n kernel = ctypes.windll.kernel32\n firmware_type = ctypes.c_uint()\n\n # Get value from kernel32 API\n try:\n kernel.GetFirmwareType(ctypes.byref(firmware_type))\n except:\n # Just set to zero\n firmware_type = ctypes.c_uint(0)\n\n # Set return value\n type_str = 'Unknown'\n if firmware_type.value == 1:\n type_str = 'Legacy'\n elif firmware_type.value == 2:\n type_str = 'UEFI'\n\n return type_str",
"def ds9Version():\n try:\n v = xpa.get(None, getXpaAccessPoint(), \"about\", \"\").strip()\n return v.splitlines()[1].split()[1]\n except Exception, e:\n print >> sys.stderr, \"Error reading version: %s (%s)\" % (v, e)\n return \"0.0.0\"",
"def ftduino_direct_get_version(self):\n return self.comm('ftduino_direct_get_version')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter a dataframe on the specified number of attributes | def filter_df_on_activities(df, activity_key="concept:name", max_no_activities=25):
activity_values_dict = dict(df[activity_key].value_counts())
activity_values_ordered_list = []
for act in activity_values_dict:
activity_values_ordered_list.append([act, activity_values_dict[act]])
activity_values_ordered_list = sorted(activity_values_ordered_list)
# keep only a number of attributes <= max_no_activities
activity_values_ordered_list = activity_values_ordered_list[0:min(len(activity_values_ordered_list), max_no_activities)]
activity_to_keep = [x[0] for x in activity_values_ordered_list]
df = df[df[activity_key].isin(activity_to_keep)]
return df | [
"def filter(self, df):\n pass",
"def _apply_attr_filters(self, df):\n mask = pd.Series([True] * len(df))\n for degree_substr in self.attr_filters['final_major']:\n mask = mask & ~df['final_major'].str.contains(f'{degree_substr}')\n return df[mask]",
"def df_filter(self, df, features={}, attributes={}, mode='overlap'):\n # Build query.\n query = []; feature_ranges = []\n for f, r in features.items():\n feature_ranges.append(r)\n # Filter by features.\n if mode == 'overlap':\n # Determine whether two ranges overlap:\n # https://stackoverflow.com/questions/325933/determine-whether-two-date-ranges-overlap/325964#325964\n query.append(f'`{f} <`>={r[0]}')\n query.append(f'`{f} >`<={r[1]}')\n elif mode == 'contain':\n query.append(f'`{f} >`>={r[0]}')\n query.append(f'`{f} <`<={r[1]}')\n for attr, r in attributes.items():\n # Filter by attributes.\n query.append(f'{attr}>={r[0]} & {attr}<={r[1]}')\n # Filter dataframe.\n df = df.query(' & '.join(query))\n if features != {}:\n # If using features, compute overlap proportions,\n # and store this in a new column of the dataframe.\n # There's a lot of NumPy wizardry going on here!\n feature_ranges = np.array(feature_ranges)\n node_ranges = np.dstack((df[[f'{f} >' for f in features]].values,\n df[[f'{f} <' for f in features]].values))\n overlap = np.maximum(0, np.minimum(node_ranges[:,:,1], feature_ranges[:,1]) \n - np.maximum(node_ranges[:,:,0], feature_ranges[:,0]))\n df['overlap'] = np.prod(overlap / (node_ranges[:,:,1] - node_ranges[:,:,0]), axis=1) \n return df",
"def test_filter_by_property_strict_n_components():\n\n property_types = [\"Density\", \"EnthalpyOfVaporization\", \"EnthalpyOfMixing\"]\n substance_entries = [\n ((\"CC\",), (True, True, False)),\n ((\"CCC\",), (True, True, False)),\n ((\"CCCCC\",), (True, False, False)),\n ((\"CCCCCC\",), (True, True, False)),\n ((\"CC\", \"CCC\"), (True, False, True)),\n ((\"CC\", \"CCCCC\"), (True, False, True)),\n ((\"CCC\", \"CCC\"), (True, False, False)),\n ((\"CCC\", \"CCCC\"), (False, False, True)),\n ]\n\n data_frame = _build_data_frame(property_types, substance_entries)\n\n filtered_frame = FilterByPropertyTypes.apply(\n data_frame,\n FilterByPropertyTypesSchema(\n property_types=property_types,\n n_components={\n \"Density\": [1, 2],\n \"EnthalpyOfVaporization\": [1],\n \"EnthalpyOfMixing\": [2],\n },\n strict=True,\n ),\n )\n\n assert len(filtered_frame) == 6\n\n assert data_frame_to_substances(filtered_frame) == {\n (\"CC\",),\n (\"CCC\",),\n (\"CC\", \"CCC\"),\n }",
"def filter_uneven_rows(self, dframe: pd.DataFrame) -> pd.DataFrame:\n logger.info(\"Filtering out rows with uneven number of elements in columns.\")\n dframe[\"length_diff\"] = dframe.applymap(len).apply(np.diff, axis=1).apply(int)\n filt = dframe[\"length_diff\"] == 0\n return dframe.loc[filt, [*self.columns_info.keys()]].reset_index(drop=True)",
"def _filter(self, df, filters):\n df = df.loc[(df[list(filters)] == pd.Series(filters)).all(axis=1)]\n return df.reset_index(drop=True).copy()",
"def test_filter_rows_overfit():\n rows = query_csv.iter_csv_rows(_PATH, delim=' ')\n where = {'i': 2, 'f': 2.0, 's': 'b', 'x': 'hi'}\n filtered = query_csv.filter_rows(rows, where)\n assert list(filtered) == []",
"def filter_df_on_case_length(df, case_id_glue=\"case:concept:name\", min_trace_length=3, max_trace_length=50):\n df = df.groupby(case_id_glue).filter(lambda x: (len(x)>= min_trace_length and len(x)<=max_trace_length))\n return df",
"def test_filter_by_components():\n\n dummy_data_set = create_filterable_data_set()\n dummy_data_set.filter_by_components(number_of_components=1)\n\n assert dummy_data_set.number_of_properties == 1\n\n dummy_data_set = create_filterable_data_set()\n dummy_data_set.filter_by_components(number_of_components=2)\n\n assert dummy_data_set.number_of_properties == 1\n\n dummy_data_set = create_filterable_data_set()\n dummy_data_set.filter_by_components(number_of_components=3)\n\n assert dummy_data_set.number_of_properties == 1",
"def search(data, **attrs):\n return filter(lambda x: attrs.items() <= x.items(), data)",
"def filter_n_elements(self):\n\n filter_dict_neles = {\n 1: self.dynamic_dataset[self.dynamic_dataset['n_elements'] == 1],\n 2: self.dynamic_dataset[self.dynamic_dataset['n_elements'] == 2],\n 3: self.dynamic_dataset[self.dynamic_dataset['n_elements'] == 3],\n 23: self.dynamic_dataset[(self.dynamic_dataset['n_elements'] == 2) |\n (self.dynamic_dataset['n_elements'] == 3)],\n }\n\n self.dynamic_dataset = filter_dict_neles.get(self.num_element_filter, self.dynamic_dataset)",
"def chunk_dataframe(df, attribute):\n return [df_attribute for _, df_attribute in df.groupby(attribute)]",
"def restrict_df(df, restriction):\n\trestrict_keys = restriction.keys()\n\tfor k in restrict_keys:\n\t\tN_vals = len(restriction[k])\n\t\tdf_keep = [0]*N_vals\n\t\tfor i in range(N_vals):\n\t\t\tdf_keep[i] = df[df[k]==restriction[k][i]]\n\t\tdf = pd.concat(df_keep)\n\treturn df",
"def __filter_unrelevant_events(self, df):\n return df[((df.EVENT_NAME == 'article_viewed')\n | (df.EVENT_NAME == 'top_news_card_viewed')\n | (df.EVENT_NAME == 'my_news_card_viewed')) & df.ATTRIBUTES.apply(\n lambda x: isinstance(x, str))]",
"def select_rows(self, xa):\n if not isinstance(xa, XArray):\n raise ValueError('Argument must be an XArray')\n return XFrame(impl=self._impl.logical_filter(xa.impl()))",
"def dataWNfilter(df):\n\t\n\t#WIDE\n\tdf = wide_filter(df,'combinado')\n\n\t#NARROW\n\tdf = narrow_filter(df,\"mb_total_qt\")\n\tdf = narrow_filter(df,\"arpu_negocio_promedio\")\n\n\treturn df",
"def filter(self, rows=None, **colname_value_pairs):\n if rows is not None:\n if callable(rows):\n rows = rows(self)\n elif colname_value_pairs:\n rows = Vector.fast([True], bool).repeat(self.nrow)\n for colname, value in colname_value_pairs.items():\n rows = rows & (self[colname] == value)\n rows = self._parse_rows_from_boolean(rows)\n for colname, column in self.items():\n yield colname, np.take(column, rows)",
"def filter_df_on_ncases(df, case_id_glue=\"case:concept:name\", max_no_cases=1000):\n cases_values_dict = dict(df[case_id_glue].value_counts())\n cases_to_keep = []\n for case in cases_values_dict:\n cases_to_keep.append(case)\n cases_to_keep = cases_to_keep[0:min(len(cases_to_keep),max_no_cases)]\n df = df[df[case_id_glue].isin(cases_to_keep)]\n return df",
"def filter_all_types(df, all_types):\n\n for type_list in all_types:\n return filter_type(df, *type_list)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter a dataframe keeping only the specified maximum number of cases | def filter_df_on_ncases(df, case_id_glue="case:concept:name", max_no_cases=1000):
cases_values_dict = dict(df[case_id_glue].value_counts())
cases_to_keep = []
for case in cases_values_dict:
cases_to_keep.append(case)
cases_to_keep = cases_to_keep[0:min(len(cases_to_keep),max_no_cases)]
df = df[df[case_id_glue].isin(cases_to_keep)]
return df | [
"def filter_df_on_case_length(df, case_id_glue=\"case:concept:name\", min_trace_length=3, max_trace_length=50):\n df = df.groupby(case_id_glue).filter(lambda x: (len(x)>= min_trace_length and len(x)<=max_trace_length))\n return df",
"def filter_df_on_activities(df, activity_key=\"concept:name\", max_no_activities=25):\n activity_values_dict = dict(df[activity_key].value_counts())\n activity_values_ordered_list = []\n for act in activity_values_dict:\n activity_values_ordered_list.append([act, activity_values_dict[act]])\n activity_values_ordered_list = sorted(activity_values_ordered_list)\n # keep only a number of attributes <= max_no_activities\n activity_values_ordered_list = activity_values_ordered_list[0:min(len(activity_values_ordered_list), max_no_activities)]\n activity_to_keep = [x[0] for x in activity_values_ordered_list]\n df = df[df[activity_key].isin(activity_to_keep)]\n return df",
"def prune_rare_cats(df):\n\n new_df = df.copy()\n categories = []\n [categories.append(item) for item in list(df.columns) if 'category_' in item] \n \n [new_df.drop(columns=category, inplace=True) for category in categories if df[category].sum() < 5]\n \n return new_df",
"def filter_categories_below_threshold(data_df, display_threshold):\n\n return data_df[data_df.iloc[:, 0] > display_threshold]",
"def restrict_df(df, restriction):\n\trestrict_keys = restriction.keys()\n\tfor k in restrict_keys:\n\t\tN_vals = len(restriction[k])\n\t\tdf_keep = [0]*N_vals\n\t\tfor i in range(N_vals):\n\t\t\tdf_keep[i] = df[df[k]==restriction[k][i]]\n\t\tdf = pd.concat(df_keep)\n\treturn df",
"def replicate_filter(compiled, replicate_threshold):\n df = compiled.copy()\n replicates = df.groupby('Sequence').count()['Proteins']\n rep_sequences = replicates[replicates == replicate_threshold].reset_index()['Sequence']\n return df[df['Sequence'].isin(rep_sequences)]",
"def remove_outliers(df):\n final_df = df[(np.abs(stats.zscore(df.iloc[:, 1:len(df.columns)])) < 3).all(axis=1)]\n\n return final_df",
"def filter_out_queries_with_not_enough_sessions(df):\n queries_before = df.searchTerm.nunique()\n\n term_summary = df.groupby('searchTerm').agg({'searchSessionId': lambda x: x.nunique()})\n enough_sessions = term_summary[term_summary.searchSessionId > 1000]\n queries_after = len(enough_sessions.index)\n\n if queries_after < queries_before:\n print(f'Filtered out {queries_before - queries_after} queries with less than 1000 sessions')\n print(f'There are now {queries_after} queries in the dataset')\n\n return df[df.searchTerm.isin(enough_sessions.index)]",
"def without_many_nans(df, threshold = 0.1):\r\n result = df.count() / float(df.shape[0]) > threshold\r\n return result.index[result]",
"def filter_digits(df, variable, max_digit, no_code):\n\n if no_code is not None:\n return df[(df[variable].isin(range(0, max_digit + 1))) | (df[variable] == no_code)]\n else:\n return df[df[variable].isin(range(0, max_digit + 1))]",
"def remove_baskets(df, threshold):\n customers = df.groupby('CustomerID').agg({'BasketID': 'nunique'})\n customers = customers[customers >= threshold].dropna().index.values\n return df[df['CustomerID'].isin(customers)]",
"def filter_uneven_rows(self, dframe: pd.DataFrame) -> pd.DataFrame:\n logger.info(\"Filtering out rows with uneven number of elements in columns.\")\n dframe[\"length_diff\"] = dframe.applymap(len).apply(np.diff, axis=1).apply(int)\n filt = dframe[\"length_diff\"] == 0\n return dframe.loc[filt, [*self.columns_info.keys()]].reset_index(drop=True)",
"def mse_filter(self, exprs, num_mad_genes):\r\n print('Determining most variably expressed genes and subsetting')\r\n mad_genes = exprs.mad(axis=0).sort_values(ascending=False)\r\n top_mad_genes = mad_genes.iloc[0:num_mad_genes, ].index\r\n subset_df = exprs.loc[:, top_mad_genes]\r\n return subset_df",
"def df_collectLimit(df, limit, *cols, sortCol=None):\n if sortCol:\n df = df.sort(sortCol)\n\n if df.count() > limit:\n df = df.limit(limit)\n\n if cols:\n return df.select(*cols).collect()\n return df.collect()",
"def filter_genes(self, exprs, y, number_genes):\r\n print('Filtering top ' + str(number_genes) + ' genes.')\r\n filter = SelectKBest(score_func=f_classif, k=number_genes)\r\n rnaseq_filtered = filter.fit(exprs, y).transform(exprs)\r\n mask = filter.get_support()\r\n new_features = exprs.columns[mask]\r\n rnaseq_filtered_df = pd.DataFrame(rnaseq_filtered, columns=new_features, index=exprs.index)\r\n return rnaseq_filtered_df",
"def limit_data(data, max_limit):\n \n N = len(data)\n if N <= max_limit:\n return data\n else:\n chosen = np.random.choice(N, size=int(max_limit), replace=False)\n return data[chosen]",
"def limit_for_fit(df, target_col, cont_feat = [], OHE_feat = []):\n final_col = [target_col]+ cont_feat +OHE_feat\n\n return df[final_col], final_col",
"def reduce_dataset(df, num_users=None,\n min_samples=None, max_samples=None,\n min_obs=None, max_obs=None):\n\n if max_obs:\n df = df.groupby(level=[0, 1]).apply(lambda x: x[:max_obs]).reset_index(level=[2, 3], drop=True)\n\n num_obs = df.groupby(level=[0, 1]).size()\n\n if min_obs:\n num_obs = num_obs[num_obs >= min_obs]\n\n num_samples = num_obs.groupby(level=0).size()\n\n if min_samples:\n num_samples = num_samples[num_samples >= min_samples]\n\n if num_users and num_users < len(num_samples):\n users = np.random.permutation(num_samples.index.values)[:num_users]\n else:\n users = num_samples.index.values\n\n num_obs = num_obs.loc[users.tolist()]\n\n if max_samples:\n num_obs = num_obs.groupby(level=0).apply(\n lambda x: x.loc[np.random.permutation(np.sort(x.index.unique()))[:max_samples]]).reset_index(level=1,\n drop=True)\n\n df = df.loc[num_obs.index].sort_index()\n\n return df",
"def filter_non_exiting_customers(df: pd.DataFrame) -> pd.DataFrame:\n df.sort_values(by=\"timestamp\", inplace=True)\n df[\"last_appearance\"] = ~df.iloc[::-1][\"customer_no\"].duplicated()\n\n invalid_customers = df.loc[df[\"last_appearance\"] & ~df[\"exit\"], \"customer_no\"]\n\n df.drop(columns=\"last_appearance\", inplace=True)\n return df[~df[\"customer_no\"].isin(invalid_customers)].reset_index(drop=True)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter a dataframe keeping only the cases that have the specified number of events | def filter_df_on_case_length(df, case_id_glue="case:concept:name", min_trace_length=3, max_trace_length=50):
df = df.groupby(case_id_glue).filter(lambda x: (len(x)>= min_trace_length and len(x)<=max_trace_length))
return df | [
"def filter_df_on_ncases(df, case_id_glue=\"case:concept:name\", max_no_cases=1000):\n cases_values_dict = dict(df[case_id_glue].value_counts())\n cases_to_keep = []\n for case in cases_values_dict:\n cases_to_keep.append(case)\n cases_to_keep = cases_to_keep[0:min(len(cases_to_keep),max_no_cases)]\n df = df[df[case_id_glue].isin(cases_to_keep)]\n return df",
"def filter_df_on_activities(df, activity_key=\"concept:name\", max_no_activities=25):\n activity_values_dict = dict(df[activity_key].value_counts())\n activity_values_ordered_list = []\n for act in activity_values_dict:\n activity_values_ordered_list.append([act, activity_values_dict[act]])\n activity_values_ordered_list = sorted(activity_values_ordered_list)\n # keep only a number of attributes <= max_no_activities\n activity_values_ordered_list = activity_values_ordered_list[0:min(len(activity_values_ordered_list), max_no_activities)]\n activity_to_keep = [x[0] for x in activity_values_ordered_list]\n df = df[df[activity_key].isin(activity_to_keep)]\n return df",
"def filter_out_queries_with_not_enough_sessions(df):\n queries_before = df.searchTerm.nunique()\n\n term_summary = df.groupby('searchTerm').agg({'searchSessionId': lambda x: x.nunique()})\n enough_sessions = term_summary[term_summary.searchSessionId > 1000]\n queries_after = len(enough_sessions.index)\n\n if queries_after < queries_before:\n print(f'Filtered out {queries_before - queries_after} queries with less than 1000 sessions')\n print(f'There are now {queries_after} queries in the dataset')\n\n return df[df.searchTerm.isin(enough_sessions.index)]",
"def _extract_events(self, df: DataFrame) -> DataFrame:\n return df.filter(df.event == self.event_type)",
"def __filter_unrelevant_events(self, df):\n return df[((df.EVENT_NAME == 'article_viewed')\n | (df.EVENT_NAME == 'top_news_card_viewed')\n | (df.EVENT_NAME == 'my_news_card_viewed')) & df.ATTRIBUTES.apply(\n lambda x: isinstance(x, str))]",
"def replicate_filter(compiled, replicate_threshold):\n df = compiled.copy()\n replicates = df.groupby('Sequence').count()['Proteins']\n rep_sequences = replicates[replicates == replicate_threshold].reset_index()['Sequence']\n return df[df['Sequence'].isin(rep_sequences)]",
"def filter_uneven_rows(self, dframe: pd.DataFrame) -> pd.DataFrame:\n logger.info(\"Filtering out rows with uneven number of elements in columns.\")\n dframe[\"length_diff\"] = dframe.applymap(len).apply(np.diff, axis=1).apply(int)\n filt = dframe[\"length_diff\"] == 0\n return dframe.loc[filt, [*self.columns_info.keys()]].reset_index(drop=True)",
"def popular_query_events(signals, min_query_count):\n queries = signals[signals['type'] == 'query']\n popular_queries = queries.groupby('target').count() \\\n .rename(columns={'query_id': 'query_count'}) \\\n .sort_values('query_count', ascending=False)\n popular_queries = popular_queries[popular_queries['query_count'] > min_query_count].index.to_list()\n pop_query_events = signals[signals['type'] == 'query'][signals['target'].isin(popular_queries)]\n query_events = pop_query_events[['query_id', 'target']].rename(columns={'target': 'query'})\n return query_events",
"def filterTiming(events):\n filters = []\n filters.append( KeepEventTypes(['EcatTimeOverrun', 'RealtimeLoopOverrun']) )\n filters.append( IntervalMerge(2.0) )\n return runFilters(filters,events)",
"def filter_data(self):\n\t\tself.genes_lowcoverage = self.indexed.groupby(['Sample Name', 'Gene Symbol' , 'Accession', 'Gene Size']).apply(lambda x: x[x['% Coverage at 30x'] < 100])\n\t\tself.gene_summary()\n\t\tself.test_values()",
"def extract_revenue_events(df, revenue_event):\n df = df[df.partner_event == revenue_event]\n return df.drop(columns=['partner_event'])",
"def test_filter_by_property_strict_n_components():\n\n property_types = [\"Density\", \"EnthalpyOfVaporization\", \"EnthalpyOfMixing\"]\n substance_entries = [\n ((\"CC\",), (True, True, False)),\n ((\"CCC\",), (True, True, False)),\n ((\"CCCCC\",), (True, False, False)),\n ((\"CCCCCC\",), (True, True, False)),\n ((\"CC\", \"CCC\"), (True, False, True)),\n ((\"CC\", \"CCCCC\"), (True, False, True)),\n ((\"CCC\", \"CCC\"), (True, False, False)),\n ((\"CCC\", \"CCCC\"), (False, False, True)),\n ]\n\n data_frame = _build_data_frame(property_types, substance_entries)\n\n filtered_frame = FilterByPropertyTypes.apply(\n data_frame,\n FilterByPropertyTypesSchema(\n property_types=property_types,\n n_components={\n \"Density\": [1, 2],\n \"EnthalpyOfVaporization\": [1],\n \"EnthalpyOfMixing\": [2],\n },\n strict=True,\n ),\n )\n\n assert len(filtered_frame) == 6\n\n assert data_frame_to_substances(filtered_frame) == {\n (\"CC\",),\n (\"CCC\",),\n (\"CC\", \"CCC\"),\n }",
"def _filter(self, df, filters):\n df = df.loc[(df[list(filters)] == pd.Series(filters)).all(axis=1)]\n return df.reset_index(drop=True).copy()",
"def filter_by_run_number(joined_df, run_number):\r\n\r\n joined_df = joined_df[joined_df['Run No.'] == str(run_number)]\r\n return joined_df",
"def condense(cls, events: List[Timestamp]):\n pass",
"def subSetMoreThanN(self, n): \n subbiG_i=[]\n for i in np.arange(len(self.bigramMatrix)):\n if self.Nbigrams[i] > n: # filter low frequency events\n subbiG_i.append(i)\n return subbiG_i",
"def pick_vis_cov_events(events):\n assert len(events) > 10\n events = events[events[:, 2] == vis_onset_number]\n assert len(events) > 10\n return events",
"def mse_filter(self, exprs, num_mad_genes):\r\n print('Determining most variably expressed genes and subsetting')\r\n mad_genes = exprs.mad(axis=0).sort_values(ascending=False)\r\n top_mad_genes = mad_genes.iloc[0:num_mad_genes, ].index\r\n subset_df = exprs.loc[:, top_mad_genes]\r\n return subset_df",
"def extract_extreme_events(df, quantile=0.9, pad=True):\n extracted = df.apply(lambda row: row > row.quantile(quantile), axis=1)\n\n # pad the extracted extreme event sequence if so requested\n if pad:\n extracted.insert(0, 0, True)\n extracted.insert(len(extracted.columns), 999999999999, True)\n\n return extracted"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the average per_domain auc and auprc for the test set | def compute_per_domain_auc(y_test, pred_probs, pred_idx, classifier):
y_test_copy = y_test.copy(deep=True)
y_test_copy["pred_probs"] = pred_probs
domain_auc_list = []
domain_auprc_list = []
domain_auprc_ratio_list = []
domain_name_list = []
domain_pos_num_list = []
domain_neg_num_list = []
idx = y_test.index
y_test_copy["domain_name"] = [x[:x.rfind("_")] for x in idx]
domains_list = y_test_copy["domain_name"].unique().tolist()
for domain_name in domains_list:
#Get only the domain positions
domain_df = y_test_copy[y_test_copy["domain_name"] == domain_name]
#Find the binding and non-binding positions of this domain
bind_list = domain_df[domain_df["label"] == 1].index
bind_idx = [int(x[len(domain_name)+1:]) for x in bind_list]
bind_num = len(bind_idx)
non_bind_list = domain_df[domain_df["label"] == 0].index
non_bind_idx = [int(x[len(domain_name)+1:]) for x in non_bind_list]
non_bind_num = len(non_bind_idx)
if (bind_num == 0 or non_bind_num == 0):
#No positions of one of the classes "binding/non-binding" - skipping"
continue
#Add number of positives and number of negatives
domain_pos_num_list.append(bind_num)
domain_neg_num_list.append(non_bind_num)
#Compute domain AUC
domain_auc = roc_auc_score(domain_df["label"], domain_df["pred_probs"])
domain_auc_list.append(domain_auc)
#Compute domain AUPRC
precision, recall, thresholds = precision_recall_curve(domain_df["label"], domain_df["pred_probs"])
domain_auprc = auc(recall, precision)
domain_auprc_list.append(domain_auprc)
#Add positives fraction to list
pos_frac_ratio = bind_num/float(domain_df.shape[0])
#Add ratio of AUPRC and positives fraction to list
domain_auprc_ratio_list.append(domain_auprc/float(pos_frac_ratio))
#Add domain name for AUC/AUPRC/Ratio tables
domain_name_list.append(domain_name)
#Compute the means for the lists
domain_auc_mean = np.mean(domain_auc_list)
domain_auprc_mean = np.mean(domain_auprc_list)
domain_auprc_ratio_mean = np.mean(domain_auprc_ratio_list)
return (domain_auc_mean, domain_auprc_mean, domain_auprc_ratio_mean) | [
"def calculate_AUROC(y_true, y_pred):\n return roc_auc_score(y_true, y_pred)",
"def compute_auprc(pred, label):\n #label = np.array(label)\n #pred = np.array(pred)\n precision, recall, thresholds = precision_recall_curve(label, pred)\n auprc = auc(recall, precision)\n return auprc",
"def compute_auc(false_positive_rates: list, true_positive_rates: list) -> float:\n return auc(false_positive_rates, true_positive_rates)",
"def auc(X, y, model):\n probs = model.predict_proba(X)[:,1] \n return roc_auc_score(y, probs)",
"def xgb_pr_auc(preds: np.ndarray, lgb_train: DMatrix) -> Tuple[str, float]:\n labels = lgb_train.get_label()\n precision, recall, _ = precision_recall_curve(labels, preds)\n result = auc(recall, precision)\n return \"pr_auc\", result",
"def auc(self) -> float:\n false_positive_rate, true_positive_rate, _ = self.roc_curve\n return np.trapz(true_positive_rate, false_positive_rate)",
"def compute_auroc(pred, label):\n #label = np.array(label)\n #pred = np.array(pred)\n fpr, tpr, thresholds = roc_curve(label, pred, pos_label =1)\n auroc = auc(fpr, tpr)\n return auroc",
"def compute_AUC(self, dataset=None, labels=None, binary_preds=False):\n if dataset is None:\n dataset = self.validation_set\n labels = self.validation_labels\n return sklearn.metrics.roc_auc_score(labels, self.predict(dataset, binary_preds=binary_preds))",
"def compute_AUC_on_all(model_class, x_train, x_test, y_test, n_classes=10, epochs=8, **params):\n auc_scores = []\n for k in range(n_classes):\n print(f\"Digit {k}, # Training examples: {x_train[k].shape[0]}\")\n model = model_class(**params)\n model.compile(optimizer=tf.keras.optimizers.Adam())\n model.fit(x_train[k], x_train[k], epochs=epochs, batch_size=128)\n predictions = model.predict(x_test)\n fpr, tpr, _ = compute_ROC(x_test, predictions, y_test, criterion=\"l2\", interest_class=k)\n auc = compute_AUC(fpr, tpr)\n auc_scores.append(auc)\n\n return np.array(auc_scores)",
"def score(y_true, y_pred):\n\n\treturn roc_auc_score(y_true, y_pred)",
"def compute_bpsn_auc(df, subgroup, label, model_name):\n subgroup_negative_examples = df[(df[subgroup]>0.5) & (df[label]<=0.5)]\n non_subgroup_positive_examples = df[(df[subgroup]<=0.5) & (df[label]>0.5)]\n examples = subgroup_negative_examples.append(non_subgroup_positive_examples)\n return compute_auc(examples[label]>0.5, examples[model_name])",
"def auc_score(known, unknown):\n y_true = np.array([0] * len(known) + [1] * len(unknown))\n y_score = np.concatenate([known, unknown])\n auc_score = roc_auc_score(y_true, y_score)\n return auc_score",
"def calc_accuracy(model_dict, test_dict):\n\n \"\"\" Calculate the result \"\"\"\n\n all_prob = []\n result_dict = {}\n test_label = []\n predict_label = []\n\n for t_name, t in test_dict.items():\n result = []\n index = []\n hype_dict = {}\n sum = len(t)\n counter = 0\n letter = t_name\n for p in t:\n test_label.append(t_name)\n high_score = -100000\n for m_name, m in model_dict.items():\n score = m.score([p])\n if score > high_score:\n high_score = score\n hypo = m_name\n result.append(hypo)\n predict_label.append(hypo)\n if hypo == letter:\n counter += 1\n all_letters = list(set(result))\n for l in all_letters:\n hype_dict[l] = result.count(l)\n\n sorted_hype_dict = sorted(hype_dict.iteritems(), key=operator.itemgetter(1))\n sorted_hype_dict.reverse()\n\n if sum != 0:\n prob = float(counter)/sum\n print str(letter) + \"(\"+ str(counter) + \"/\" + str(sum) + \")\" + \" ==> Accuracy: \" + str(prob),\n print sorted_hype_dict\n all_prob.append(prob)\n result_dict[letter] = np.array([counter, sum])\n\n \"\"\" Print the average accuracy\"\"\"\n\n all_prob = np.array(all_prob)\n print \"Average accuracy is: \" + str(all_prob.mean())\n print \"=================================\"\n\n return all_prob, result_dict, test_label, predict_label",
"def average_incremental_accuracy(self):\n all_preds, all_targets, _ = self._get_best_epochs(subset=\"test\")\n return statistics.mean(\n [accuracy(all_preds[t], all_targets[t]) for t in range(len(all_preds))]\n )",
"def compute_bnsp_auc(df, subgroup, label, model_name):\n subgroup_positive_examples = df[\n (df[subgroup] > 0.5) & (df[label] > 0.5)]\n non_subgroup_negative_examples = df[\n (df[subgroup] <= 0.5) & (df[label] <= 0.5)]\n examples = subgroup_positive_examples.append(\n non_subgroup_negative_examples)\n return compute_auc(examples[label] > 0.5, examples[model_name])",
"def accuracy(model, X_test, y_test):\n predictions = model.predict(X_test)\n return (np.array(predictions) == np.array(y_test)).mean()",
"def average_precision_score(y, y_pred):\n pass",
"def explicitness_per_factor(mus_train, y_train, mus_test, y_test):\n x_train = np.transpose(mus_train)\n x_test = np.transpose(mus_test)\n clf = LogisticRegression().fit(x_train, y_train)\n y_pred_train = clf.predict_proba(x_train)\n y_pred_test = clf.predict_proba(x_test)\n mlb = MultiLabelBinarizer()\n\n roc_train = roc_auc_score(mlb.fit_transform(np.expand_dims(y_train, 1)), y_pred_train)\n roc_test = roc_auc_score(mlb.fit_transform(np.expand_dims(y_test, 1)), y_pred_test)\n\n return roc_train, roc_test",
"def calcAvgPrec(self):\n avg = 0.0\n counter = 0\n self.recallCompInter = []\n self.precComplete = []\n for i in range (0, len(self.retrieved)):\n if self.retrieved[i] in self.relevant:\n counter += 1 \n avg += ((float(counter)/(i+1)))\n \n self.recallCompInter.append(float(counter)/(self.numberRelevant))\n self.precComplete.append(float(counter)/(i+1)) \n\n avg = avg/counter\n\n print(\"##############################################\") \n print(\"AvgPrecision:\")\n print(avg)\n print(\"##############################################\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
line name is composed by column1~columndata_start_col value and current column name | def _generate_series_name(self, row, current_col_index):
name = " ".join([row[col] for col in range(1, self.data_start_col)])
if len(self.theader_list)-self.data_start_col >= 2:
# if there is many data columns, append current data column name
name = u"%s-%s" % (name, self.theader_list[current_col_index].decode("utf-8"))
return name | [
"def get_line_identifier(self):",
"def dataline(self, line):\n return super(scandata, self).data[:, line - 1]",
"def read_column_names(file_obj): #How does this one know to start at line 5? with next()? \n\tcolumn_names = [cn.strip() for cn in next(file_obj).split(',')[1:]] #start at 1 as we don't need STN\n\treturn column_names",
"def madeline_columns():\n columns = {\n \"family\": \"FamilyId\",\n \"sample\": \"IndividualId\",\n \"sex\": \"Gender\",\n \"father\": \"Father\",\n \"mother\": \"Mother\",\n \"deceased\": \"Deceased\",\n \"proband\": \"Proband\",\n \"status\": \"Affected\",\n }\n return columns",
"def add_line(self, table_name, line):\n pass",
"def _extract_x_name(line):\n if line.startswith(\"x axis\"):\n return line[line.index(\": \") + 2:].strip()\n return \"\"",
"def _get_column_name(self, column):\n return column",
"def parse_table_to_madx_line_string(self) -> str:\n self.add_drifts()\n defstr = _parse_table_to_madx_definitions(self.table)\n linestr = \"{}: LINE=({});\".format(\n self.name,\n \",\\n\\t\\t\".join(\n [\",\".join(c) for c in list(self.chunks(self.table.name.to_list(), 20))]\n ),\n )\n return defstr + \"\\n\\n\" + linestr",
"def renameByCoordinates(self, filename_path, line, col, newname):",
"def find_column_with_start_match(self, pat, attr_set):\n for attr in self.attr_names:\n if attr.find(pat) == 0:\n myvect = getattr(self, attr)\n col = bb_column(attr, myvect)\n setattr(self, attr_set, col)",
"def _colHeader(strIn):\n return \" & \".join(strIn) + \"\\\\\\\\\\n\"",
"def _GetColumn(data, token):\n last_newline = data.rfind('\\n', 0, token.lexpos)\n if last_newline < 0:\n last_newline = 0\n column = token.lexpos - last_newline\n return column",
"def get_column_name(self):\r\n columns = list(self.all_data.columns)\r\n # Note: Excludes Year, Month, Day\r\n columns.remove(self._year)\r\n columns.remove(self._month)\r\n columns.remove(self._day_of_week)\r\n index = 1\r\n for col in columns:\r\n print(f'{index}. {col}')\r\n index += 1\r\n \r\n col_number = int(input('Please select column number: '))\r\n while col_number not in [1, 2, 3, 4]:\r\n col_number = int(input('Please select column number: '))\r\n return columns[ col_number - 1]",
"def split_line(self):\n # coordinate of the # symbol or end of the line (-1) if not found\n hash_or_end = self.line.find(\"#\")\n temp = self.line[self.region_end:hash_or_end].strip(\" |\")\n self.coord_str = regex_paren.sub(\"\", temp)\n\n # don't want any meta_str if there is no metadata found\n if hash_or_end >= 0:\n self.meta_str = self.line[hash_or_end:]\n else:\n self.meta_str = \"\"",
"def get_function_name(self, line):\n return line.split()[0].split('.')[1] if line else \"\"",
"def start_comment():\n column = None if line[:comment_start].strip() else comment_start\n first_line = i\n return (column, first_line)",
"def getMarkerName(index):",
"def process_line(line, sqrub, prefix=None, schema=None):\n\n indent = sqrub.indent\n # test if end of line has end of block\n if re.search(r'\\);$', line):\n sqrub.indent = False\n # remove noise lines from parse\n if re.search(r'^--', line) or line == '' or line == ');':\n return line\n # remove \\' and replace with ''\n if re.search(r'\\'', line.upper()):\n line = line.replace('\\\\\\'', '\\'\\'')\n # CASE: INSERT INTO\n if re.search(r'^INSERT INTO', line.upper()):\n sqrub.indent = True\n return split_insert_line(line, prefix, schema)\n # CASE: VALUES or sub-line\n if re.search(r'VALUES\\s?\\((E?\\'|NULL|\\d+,)', line.upper()):\n return ' ' + line\n if re.search(r'\\s?\\((E?\\'|NULL|\\d+,)', line.upper()):\n return ' ' + line\n # special DDL line with no name\n for tok in DDL_OTHER_KEYWORDS:\n if re.search(r''.join(tok), line.lower()):\n return line\n # set up initial values of name and remain for existence test later\n name = None\n remain = None\n for tok in DDL_KEYWORDS:\n if tok in line.lower():\n if ' '.join((tok, 'if exists')) in line.lower():\n tok = ' '.join((tok, 'if exists'))\n name, remain = split_line_with_token(line, tok)\n name = standardize_name(name, prefix, schema)\n sqrub.indent = True\n return ''.join((tok.upper(), ' ', name, ' ', remain)).replace(' ;', ';')\n # no token at start of line - column declaration\n for tok in DDL_TYPES:\n if tok in line.lower():\n name, remain = split_line_with_column_name(line)\n name = standardize_name(name, prefix=None, schema=None)\n remain = remain.strip()\n if not name or not remain:\n return\n if indent:\n return ' '.join((INDENT, name, remain.upper()))\n else:\n return ' '.join((name, remain.upper()))",
"def incrementCol(self):\n self.currentColumn += 1\n print self.currentColumn\n self.currentNumber = self.set[ self.currentColumn - 1 ]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run tasks asynchronously using asyncio and return results If max_concurrent_tasks are set to 0, no limit is applied. | def run_asyncio_commands(tasks, max_concurrent_tasks=0):
all_results = []
if max_concurrent_tasks == 0:
chunks = [tasks]
else:
chunks = make_chunks(l=tasks, n=max_concurrent_tasks)
for tasks_in_chunk in chunks:
if platform.system() == 'Windows':
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
commands = asyncio.gather(*tasks_in_chunk) # Unpack list using *
results = loop.run_until_complete(commands)
all_results += results
loop.close()
return all_results | [
"def run_asyncio_commands(tasks, max_concurrent_tasks=0):\r\n\r\n all_results = []\r\n\r\n if max_concurrent_tasks == 0:\r\n chunks = [tasks]\r\n else:\r\n chunks = make_chunks(l=tasks, n=max_concurrent_tasks)\r\n\r\n for tasks_in_chunk in chunks:\r\n loop = asyncio.get_event_loop()\r\n commands = asyncio.gather(*tasks_in_chunk)\r\n results = loop.run_until_complete(commands)\r\n all_results += results\r\n loop.close()\r\n return all_results",
"def run(*steps):\n if not steps:\n return\n try:\n loop = asyncio.get_running_loop()\n except RuntimeError:\n loop = asyncio.new_event_loop()\n except AttributeError:\n # Remove once support for Python 3.6 is dropped\n loop = asyncio.get_event_loop()\n\n for step in steps:\n task = loop.create_task(step)\n loop.run_until_complete(asyncio.wait([task]))\n\n # Let's also cancel any remaining tasks:\n while True:\n # issue #445 - asyncio.Task.all_tasks() deprecated in 3.7\n if version_info.major == 3 and version_info.minor >= 7:\n try:\n tasklist = asyncio.all_tasks()\n except RuntimeError:\n # no running event loop\n break\n else:\n tasklist = asyncio.Task.all_tasks()\n pending_tasks = [p for p in tasklist if not p.done()]\n if pending_tasks:\n logging.info(\n \"async -> sync. cleaning up pending tasks: len: {}\"\n .format(len(pending_tasks)))\n for pending_task in pending_tasks:\n pending_task.cancel()\n try:\n loop.run_until_complete(pending_task)\n except asyncio.CancelledError:\n pass\n except Exception as e:\n logging.error(\n \"A pending task caused an exception: {}\"\n .format(str(e)))\n else:\n break\n\n return task.result()",
"def execute_async(self):\n with concurrent.futures.ThreadPoolExecutor(max_workers = 1) as executor:\n return executor.submit(self.execute)",
"async def _async_loop(self, urls):\n results = []\n async with aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(ssl=False)\n ) as session:\n for url in urls:\n result = asyncio.ensure_future(self._get_async(url, session))\n results.append(result)\n responses = await asyncio.gather(*results)\n return responses",
"def run_concurrent_futures_threadpool():\n\n print('--- Concurrent futures threadpool ---')\n with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:\n return list(executor.map(load_url, URLS))",
"async def create_tasks(session: ClientSession, urls: List[str]) -> List[Task]:\n tasks = []\n for i, url in enumerate(urls):\n task = asyncio.create_task(fetch_and_save_url(session, url, i, len(urls)))\n tasks.append(task)\n return tasks",
"async def crawl_global_queue(self):\n tasks = []\n\n for i in range(self.concurrent_tasks):\n task = asyncio.create_task(self.process_all())\n tasks.append(task)\n\n await asyncio.gather(*tasks, return_exceptions=True)",
"def run_in_parallel(task_list, maximum_jobs):\n if maximum_jobs == 0:\n executor = concurrent.SynchronousExecutor()\n else:\n executor = concurrent.ProcessPoolExecutor(max_workers=maximum_jobs)\n\n result_list = []\n with executor:\n try:\n # Submit initial tasks.\n not_done = {executor.submit(_run_task, generator_task)\n for generator_task in task_list}\n while not_done:\n # Wait any task is completed.\n done, not_done = concurrent.wait(\n not_done, return_when=concurrent.FIRST_COMPLETED)\n\n for completed_future in done:\n if completed_future.exception():\n # An exception is raised in a task. Cancel remaining tasks and\n # re-raise the exception.\n for future in not_done:\n future.cancel()\n not_done = []\n raise completed_future.exception()\n\n # The task is completed successfully. Process the result.\n result, request_task_list = completed_future.result()\n if request_task_list:\n # If sub tasks are requested, submit them.\n assert not result\n not_done.update(\n executor.submit(_run_task, generator_task)\n for generator_task in request_task_list)\n continue\n\n if result:\n result_list.append(result)\n except:\n # An exception is raised. Terminate the running workers.\n if isinstance(executor, concurrent.ProcessPoolExecutor):\n executor.terminate()\n raise\n\n return result_list",
"def gather(results: Iterable[AsyncResult], task_timeout=10, return_exceptions=True):\n async def _gather():\n async def _get(result, timeout):\n loop = asyncio.get_running_loop()\n return await loop.run_in_executor(None, result.get, timeout)\n\n aws = [_get(r, task_timeout) for r in results]\n return await asyncio.gather(*aws, return_exceptions=return_exceptions)\n\n return asyncio.run(_gather())",
"async def _run_services(objects: List[BaseTask]):\n return await asyncio.gather(*[obj.async_run() for obj in objects])",
"def run_coro_sync(coro: Awaitable[_T]) -> _T:\n return asyncio.get_event_loop().run_until_complete(coro)",
"def print_tasks():\n while True:\n yield from asyncio.sleep(10)\n for task in asyncio.Task.all_tasks():\n if task.done():\n exception = task.exception()\n if exception is None:\n logger.info(\"Task DONE: %s = %s\", task, task.result())\n else:\n logger.error(\"Task FAILED: %s = %s\", task, exception)\n else:\n logger.debug(\"Tasks RUNNING: %s\", task)",
"async def test_runnable_async(self):\n # for pydocstyle\n class TestRun(Runnable):\n async def run(self):\n while True:\n await asyncio.sleep(1)\n\n run = TestRun()\n run.start()\n run.stop()\n await run.wait_completed()\n\n run = TestRun(threaded=True)\n run.start()\n run.stop()\n run.wait_completed(sync=True)\n\n run = RunAndExit()\n await run.start_and_wait_completed()",
"async def run_sync(func: Callable[..., T], *args: Any) -> T:\n loop = asyncio.get_event_loop()\n return await loop.run_in_executor(None, func, *args)",
"def processCasesAsyncF (cases, solver):\n\timport concurrent.futures\n\twith concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:\n\t\tfutures = [executor.submit(processCase, case, solver) for case in cases]\n\treturn [future.result() for future in concurrent.futures.as_completed(futures)]",
"def test_run_in_executor(self):\n # Container for task results modified by inner functions\n task_result = {\n 'count': 0,\n 'outer_completed': False,\n 'inner_completed': False,\n }\n\n def nested_task(num_loops):\n \"\"\"Simple task that loops and increments a counter before completing.\"\"\"\n for _ in range(num_loops):\n time.sleep(0.01)\n task_result['count'] += 1\n task_result['inner_completed'] = True\n\n def outer_task(num_loops):\n \"\"\"Outer task that launchas another task on an executor.\"\"\"\n util.run_in_executor(executor, nested_task, num_loops)\n task_result['outer_completed'] = True\n\n executor = concurrent.futures.ThreadPoolExecutor()\n\n num_loops = 10\n future = util.run_in_executor(executor, outer_task, num_loops)\n\n wait_count = 0\n while not task_result['inner_completed'] and wait_count < 100:\n time.sleep(0.01)\n wait_count += 1\n\n if version_info[0] <= 4:\n future_type = concurrent.futures.Future\n else:\n future_type = tornado.concurrent.Future\n\n assert isinstance(future, future_type)\n assert task_result['inner_completed'] is True\n assert task_result['count'] == num_loops\n assert task_result['outer_completed'] is True",
"def _brute_batch_get(session, urls):\n # Setup event loop for async calls.\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n # Execute calls and get responses as a list.\n responses = loop.run_until_complete(_get_async(session, urls))\n\n return responses",
"def async_func(self, *args, **kwargs):\n del args\n task = TaskRunner(run_function=func, obj=self, kwargs=kwargs)\n ret = task.start()\n return ret",
"def apply_async(fn, **kwargs):\n # split work load\n n = _pools # number of Pools\n chopped = {k: (chop(v[0],n),v[1]) if v[1] else v for k,v in kwargs.items()}\n arglist = []\n for i in range(n):\n dict_ = {}\n add = True\n for k,v in chopped.items():\n if v[1]:\n # do not create item in arglist of splitted argument has lengt 0\n if len(v[0][i]) == 0:\n add = False\n break\n dict_[k] = v[0][i]\n elif v[1] == False:\n dict_[k] = v[0]\n if add:\n arglist.append(dict_)\n\n # run in multiple Pools\n promises = []\n results = []\n p = Pool(n)\n for args in arglist:\n res = p.apply_async(fn, kwds=args)\n promises.append(res)\n for res in promises:\n result = res.get()\n if hasattr(result, '__iter__'):\n results.extend(result)\n else:\n results.append(result)\n\n return results"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Do basic widget setup. For Base, this is just changing the label text. | def setup(self, name=None, **kwargs):
if None not in (self.label, name):
self.label.setText(name) | [
"def set_label(self, label):",
"def create(self, parent):\n self.widget = QtGui.QLabel(parent)",
"def setup(self, parent):\n self.__init_widget(parent)\n self.init_grid()\n self.set_init()",
"def __init__(self, **kwargs):\n super(MyLabel, self).__init__(**kwargs)\n self.text = 'The counter is now: ' + str(self.counter)",
"def setup(self, parent):\n self.__init_widget(parent)\n self.__init_grid()",
"def __init__(self, parent = None, flags = 0):\n super(ClickLabel, self).__init__(parent, flags)",
"def initUI(self):\r\n self.setFlag(QGraphicsItem.ItemIsSelectable)\r\n self.setFlag(QGraphicsItem.ItemIsMovable)\r\n self.setAcceptHoverEvents(True)\r\n\r\n # init title\r\n self.initTitle()\r\n self.title = self.node.title\r\n\r\n self.initContent()",
"def __init__(self, **kwargs):\n super(VolumeLbl, self).__init__(**kwargs)\n self.text = rdc.get_volume()\n #Clock.schedule_interval(self.update, 2)",
"def update_label(self):\n if self.label is not None:\n # Don't resize because the label is what controls a dynamically sized button,\n # so save the resizing to when we re-make the label.\n # If the label is just totally removed then there isn't a sensible size to resize\n # to anyway.\n self.remove(self.label, no_resize=True)\n\n if self.definition.formatted_text is None:\n self.label = None\n return\n\n self.label = TextBox(TextBoxDefinition(text=self.definition.formatted_text))\n self.add(self.label)\n if not self.definition.is_dynamic_sized:\n # If dynamic sized, then the label helps control setting the size, so it makes no\n # sense to align it.\n self.label.align_anchor_with_other_anchor(self)",
"def setupQtWidget(self, **params):\n self.layouts[self.name].addWidget(self.texts[self.name])\n self.texts[self.name].setAcceptRichText(False)\n self.texts[self.name].setReadOnly(False)\n self.texts[self.name].setFontFamily(\"Courier\")\n self.texts[self.name].keyPressEventOrig = self.texts[self.name].keyPressEvent\n self.texts[self.name].keyPressEvent = self.keyPressEvent",
"def __init__(self, **kwargs):\n super(StationLbl, self).__init__(**kwargs)\n self.text = 'retrieving Info'\n self.update()\n Clock.schedule_interval(self.update, 2)",
"def initialize(self):\n super(WXField, self).initialize()\n shell = self.shell_obj\n self.set_validator(shell.validator)\n self.set_text(shell.validator.format(shell.value))\n self.set_placeholder_text(shell.placeholder_text)\n self.set_cursor_position(shell.cursor_position)\n self.set_max_length(shell.max_length)",
"def __init__(self, text):\n super(LabelTextMime, self).__init__()\n\n self.text = text",
"def __init__(self, master, label_text, unit, value, digits=4, **kwargs):\r\n\r\n row = get_row(master)\r\n self.options = self.options.copy()\r\n self.options.update(**kwargs)\r\n container = ttk.Frame(master)\r\n container.grid(column=0, row=row, sticky=\"ew\")\r\n if label_text is not None:\r\n label = ttk.Label(container, text=\"%s:\" % label_text, **self.options, padding=(5, 2))\r\n label.pack(side=\"left\")\r\n formatter = \"{0:.{1}g} {2}\"\r\n value = ttk.Label(container, text=formatter.format(value, digits, unit), **self.options)\r\n value.pack(side=\"left\")",
"def init_plotter(self, title:str, xLabel:str, yLabel:str):\n plt.title(title)\n self.fig.canvas.set_window_title(title)\n plt.xlabel(xLabel)\n plt.ylabel(yLabel)\n plt.show()",
"def __init__(self, name, parent=None):\n\n super(BaseControl, self).__init__(name, parent=parent)",
"def __init__(self, parent, controller, title):\n super(Core, self).__init__(parent=parent, title=title)\n self.controller = controller\n self.statusbar = self.CreateStatusBar()\n self.panel = wx.Panel(self)\n self.panel.SetBackgroundColour('Pink')\n self._create_widgets()\n self._bind_widgets()\n self.show()\n self.check_menus()",
"def initialize(self):\n self.slider = Scale(self.root, orient=VERTICAL, from_=self.min, to=self.max, value=float(self.gain.get()), command=self._updateValue)\n self.slider.grid(row=0,column=self.id, padx=14)\n\n self.valueLbl = Label(self.root, anchor=\"w\", textvariable=self.value)\n self.valueLbl.grid(row=1,column=self.id, padx=14)\n\n self.freqLbl = Label(self.root,text=str(self.freq)+\" Hz\")\n self.freqLbl.grid(row=2,column=self.id, padx=14)",
"def _make_label(self):\n label_text = \"{}: \".format(self.options_file.name)\n label = Label(self, text=label_text, justify=LEFT)\n label.grid(row=self.row, column=0, sticky=EW)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Hide all widgets in group. | def hide(self):
for widget in self.widgets:
widget.hide()
if self.label is not None:
self.label.hide() | [
"def hide_all_but(self, widget=None):\n for i in reversed(range(1, self.layout.count())):\n item = self.layout.itemAt(i)\n\n if isinstance(item, QWidgetItem):\n item.widget().hide() \n # or\n # item.widget().setParent(None)\n if widget is not None:\n widget.show()",
"def Hiding_Themes(self):\n self.groupBox.hide()",
"def HideGrid(self):\n self.hide_grid = True",
"def hideAllPanes(self):\n\t\t\tall_panes = self._mgr.GetAllPanes()\t \n\t\t\tfor ii in xrange(len(all_panes)):\n\t\t\t\t\tif not all_panes[ii].IsToolbar():\n\t\t\t\t\t\t\tall_panes[ii].Hide()",
"def hide(self):\n self.is_visible = False",
"def hide_all(self):\r\n tools_dock = IDE.get_service('tools_dock')\r\n toolbar = IDE.get_service('toolbar')\r\n if (self.lateralPanel.isVisible() or tools_dock.isVisible() or\r\n toolbar.isVisible()):\r\n if self.lateralPanel:\r\n self.lateralPanel.hide()\r\n if tools_dock:\r\n tools_dock.hide()\r\n if toolbar:\r\n toolbar.hide()\r\n else:\r\n if self.lateralPanel:\r\n self.lateralPanel.show()\r\n if tools_dock:\r\n tools_dock.show()\r\n if toolbar:\r\n toolbar.show()",
"def hide_all_without_collection():\n # Retrieve objects linked to master collection\n objects = bpy.context.scene.collection.objects\n\n for obj in objects:\n obj.hide_render = True\n obj.hide_viewport = True",
"def hide_popups(self): \n for popup in self.popups:\n popup.set_active(False)\n popup.set_visible(False)",
"def _hideAllGuis(self):\n\t\tif self._hud != None:\n\t\t\tself._hud.hide()\n\t\tif self._mainmenu != None:\n\t\t\tself._mainmenu.hide()\n\t\tif self._pausemenu != None:\n\t\t\tself._pausemenu.hide()\n\t\tif self._loadingmenu != None:\n\t\t\tself._loadingmenu.hide()\n\t\tif self._settingsmenu != None:\n\t\t\tself._settingsmenu.hide()\n\t\tif self._aboutmenu != None:\n\t\t\tself._aboutmenu.hide()",
"def hide(objects, allObjects=bool, returnHidden=bool, invertComponents=bool, clearSelection=bool, testVisibility=bool):\n pass",
"def hide_item(self):\n self.tree_widget.currentItem().setHidden(True)",
"def telector_hide():\n\n global labels_ui\n if labels_ui is not None:\n labels_ui.destroy()\n labels_ui = None\n ctx.tags = []",
"def disable(self):\n self._set_enable_widgets(False)",
"def hidePluginToolBars(self):\n for toolBar in self._pluginToolBars:\n toolBar.hide()",
"def removeAllWidget(self):\r\n for widget in self.widgets:\r\n super().removeWidget(widget)\r\n\r\n self.widgets.clear()",
"def hideAddSignal(self):\n try:\n self.addSignalGroupBox.hide()\n except:\n print(traceback.format_exc())",
"def _hide_metadata_status_buttons(self):\n self.__log.call()\n\n self._retry_aggregation_button.grid_remove()\n self._edit_asis_button.grid_remove()",
"def clear(self):\n for widget in self.winfo_children():\n widget.destroy()",
"def hide(self):\n self.withdraw()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show all widgets in group. | def show(self):
for widget in self.widgets:
widget.show()
if self.label is not None:
self.label.show() | [
"def add_widgets(self):\n widgets = self.get_widgets()\n self._widgets = [] # stores widgets which may need to be shut down when done\n for group in widgets:\n # Check for group label\n if isinstance(group[0], str):\n grouplabel, v = group\n box = QGroupBox(grouplabel)\n box.setContentsMargins(0, 18, 0, 0) # LTRB\n # Apply the center-label directive only for single-icon groups\n if len(group[1]) == 1:\n box.setAlignment(Qt.AlignHCenter)\n else:\n box = QGroupBox()\n box.setContentsMargins(0, 0, 0, 0) # LTRB\n v = group\n # Add widgets to QGroupBox\n layout = QHBoxLayout()\n layout.setSpacing(0)\n layout.setContentsMargins(0, 0, 0, 0) # LTRB\n for i in v:\n try:\n try:\n i.setIconSize(self.max_icon_size) # without this, icons are tiny\n except AttributeError as e:\n # triggers with battery which uses a QLabel instead of a QToolButton-based widget\n pass\n layout.addWidget(i)\n self._widgets.append(i)\n except:\n raise Exception(\"All widgets must be a subclass of QWidget!\")\n\n layout.activate()\n box.setLayout(layout)\n self._main_widget.addWidget(box)\n self._main_widget.addSeparator()",
"def hide_all_but(self, widget=None):\n for i in reversed(range(1, self.layout.count())):\n item = self.layout.itemAt(i)\n\n if isinstance(item, QWidgetItem):\n item.widget().hide() \n # or\n # item.widget().setParent(None)\n if widget is not None:\n widget.show()",
"def widgets(self):\r\n l = []\r\n for i in range(self.count()):\r\n w = self.widget(i)\r\n if w:\r\n l.append(w)\r\n return l",
"def Show_Themes(self):\n self.groupBox.show()",
"def showGroupMenu( self ):\n group_active = self.isGroupingActive()\n group_by = self.groupBy()\n \n menu = XMenu(self)\n menu.setTitle('Grouping Options')\n menu.setShowTitle(True)\n menu.addAction('Edit Advanced Grouping')\n \n menu.addSeparator()\n \n action = menu.addAction('No Grouping')\n action.setCheckable(True)\n action.setChecked(not group_active)\n \n action = menu.addAction('Advanced')\n action.setCheckable(True)\n action.setChecked(group_by == self.GroupByAdvancedKey and group_active)\n if ( group_by == self.GroupByAdvancedKey ):\n font = action.font()\n font.setBold(True)\n action.setFont(font)\n \n menu.addSeparator()\n \n # add dynamic options from the table schema\n tableType = self.tableType()\n if ( tableType ):\n columns = tableType.schema().columns()\n columns.sort(key = lambda x: x.displayName())\n for column in columns:\n action = menu.addAction(column.displayName())\n action.setCheckable(True)\n action.setChecked(group_by == column.displayName() and\n group_active)\n \n if ( column.displayName() == group_by ):\n font = action.font()\n font.setBold(True)\n action.setFont(font)\n \n point = QPoint(0, self.uiGroupOptionsBTN.height())\n action = menu.exec_(self.uiGroupOptionsBTN.mapToGlobal(point))\n \n if ( not action ):\n return\n elif ( action.text() == 'Edit Advanced Grouping' ):\n print 'edit advanced grouping options'\n elif ( action.text() == 'No Grouping' ):\n self.setGroupingActive(False)\n \n elif ( action.text() == 'Advanced' ):\n self.uiGroupBTN.blockSignals(True)\n self.setGroupBy(self.GroupByAdvancedKey)\n self.setGroupingActive(True)\n self.uiGroupBTN.blockSignals(False)\n \n self.refreshResults()\n \n else:\n self.uiGroupBTN.blockSignals(True)\n self.setGroupBy(str(action.text()))\n self.setGroupingActive(True)\n self.uiGroupBTN.blockSignals(False)\n \n self.refreshResults()",
"def cmd_list_widgets(self):\r\n return self.widgetMap.keys()",
"def displayChildren(self,master):\n for c in master.children:\n c.draw()\n \n if c.children and c.expanded:\n c.displayChildren(c)",
"def return_widget(self):\n return self.groupbox",
"def on_show_all_containers_checkbox_clicked(self):\n show_all = False\n\n if self.__show_all_checkbox.isChecked():\n show_all = True\n\n self.setCursor(QCursor(Qt.BusyCursor))\n self.__table.setup_table(show_all)\n self.setCursor(QCursor(Qt.ArrowCursor))\n\n # Add status text\n ApplicationStatusBox.prepend_status_text('Containers list refreshed. \\n')",
"def widgets(self):\n\n widgets = {}\n for f in self.fields():\n widgets[f.getName()] = f.widget\n return widgets",
"def show_all(self, titem: qt.QtWidgets.QTreeWidgetItem, hide=False):\n for i in range(titem.childCount()):\n self.show_all(titem.child(i), hide=hide)\n titem.setHidden(hide)",
"def initialize_all_widgets(self):\n self.hidden = True\n \n self.directory_view_window = QtWidgets.QDockWidget(\"Directory view\", self.main_form)\n self.main_form.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.directory_view_window)\n self.directory_view_window.visibilityChanged.connect(self.directory_view_window_visibility_callback)\n\n self.current_working_directory = None\n\n self.filesystem = QtWidgets.QFileSystemModel()\n self.filesystem.setIconProvider(IconProviderWidget())\n\n self.directory_view_tree = QtWidgets.QTreeView()\n self.directory_view_tree.setAnimated(False)\n self.directory_view_tree.clicked.connect(self.directory_view_tree_item_clicked_callback)\n self.directory_view_tree.setStyleSheet(\"QTreeView { border: 1px solid lightgrey; }\")\n self.directory_view_window.setWidget(self.directory_view_tree)\n self.directory_view_window.hide()\n\n self.directory_view_tree.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.directory_view_tree.customContextMenuRequested.connect(self.custom_menu_context)",
"def display(self):\n for box in self.boxes:\n box.display()\n for line in self.lines:\n line.display(self)",
"def set_visibility(group, status=True):\n for tree in group:\n tree.visible = True",
"def get_widgets(self):\n name = self.__class__.__name__\n if name.endswith('Widget'):\n name = name[:-6]\n yield name",
"def show_admin_widgets():\n widgets = g.db.get_widgets()\n for widget in widgets:\n if len(widget['body']) > 100:\n widget['body'] = widget['body'][:100] + \"...\"\n return render_template('admin/widgets.djhtml', widgets=widgets)",
"def allWidgets(self, object):\n\n if not object.isWidgetType():\n return []\n result = []\n if object.isVisible() and object.focusPolicy() != Qt.NoFocus and object.isEnabled():\n if object.inherits('QLineEdit'):\n if not object.isReadOnly():\n result += [object]\n else:\n result += [object]\n for child in object.children():\n result += self.allWidgets(child)\n return result",
"def draw(self, window):\n for widget in self.__widgets:\n widget.draw(self.__window)",
"def _show_widgets(self):\n self._inp_ptfrm_sze = self._Input(label_text='Platform Centre Length', parent=self._me, row=1, col=0)\n self._inp_ptfrm_len = self._Input(label_text='Platform Edge Length', parent=self._me, row=1, col=1)\n self._inp_lnkge_len = self._Input(label_text='Linkage Length', parent=self._me, row=1, col=2)\n self._inp_crank_len = self._Input(label_text='Crank Length', parent=self._me, row=1, col=3)\n self._inp_crank_ang = self._Input(label_text='Initial Crank Angle', parent=self._me, row=1, col=4,\n limit_low=-90, limit_high=90)\n self._inp_assly_ofs = self._Input(label_text='Assembly Offset', parent=self._me, row=1, col=5,\n limit_low=-90, limit_high=90)\n self._inp_assly_ang = self._Input(label_text='Assembly Angle', parent=self._me, row=1, col=6,\n limit_low=-90, limit_high=90)\n self._inp_plane_ofs = self._Input(label_text='Motor - Platform Offset', parent=self._me, row=2, col=0)\n self._design_update = Button(self._me, text='Update', command=lambda: self._update_design(), width=11,\n height=2, font=('Helvetica', '15'))\n self._design_update.grid(row=2, column=5)\n self._simulate_strt = Button(self._me, text='Simulate', command=lambda: self._start_sim(), width=11,\n height=2, font=('Helvetica', '15'))\n self._simulate_strt.grid(row=2, column=6)\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Swap active pv names and manage connections | def change_pvs(self, pvnames, name=None, **kwargs):
self.preserve_connections()
self.clear_connections()
self.setup(pvnames=pvnames, name=name, **kwargs)
self.create_connections() | [
"def __nameChanged(self,ippool_obj,old_name):\n self.unloadIPpoolByName(old_name)\n self.__keepObj(ippool_obj)",
"def update_pv(self, **kws):\n for k in ('readback', 'readset', 'setpoint'):\n v = kws.get(k, None)\n if v is not None:\n setattr(self, k, v)\n setattr(self, \"{}_pv\".format(k), get_pv(v, auto_monitor=self._am))",
"def test_two_hosts_swap_priorities(self):\n self.basic_flow()\n testflow.step(\n \"Swapping SPM priorities between host %s and %s\",\n self.high_spm_priority_host, self.low_spm_priority_host\n )\n self.high_spm_priority_host, self.low_spm_priority_host = (\n self.low_spm_priority_host, self.high_spm_priority_host\n )\n self.hosts = [self.high_spm_priority_host, self.low_spm_priority_host]\n self.basic_flow()",
"def change_connection(self, old_c, new_c):\n for i in range(len(self.cxns)):\n if self.cxns[i] == old_c:\n self.cxns[i] = new_c\n break\n #for c in self.cxns:\n # if c == old_c:\n # c = new_c\n # break",
"def switch_active_pokemon(self, new_active):\r\n # First check to see the number is valid (between 0 and \r\n # the length of the list). \r\n if new_active < len(self.pokemons) and new_active >= 0:\r\n # You can't switch to a pokemon that is knocked out\r\n if self.pokemons[new_active].is_knocked_out:\r\n print(f\"{self.pokemons[new_active].name} is knocked out. You can't \"\r\n \"make it your active pokemon.\")\r\n # You also can't switch to your current pokemon\r\n elif new_active == self.current_pokemon:\r\n print(f\"{self.pokemons[new_active].name} is already your active \"\r\n \"pokemon.\")\r\n else:\r\n # Switches the pokemon.\r\n self.current_pokemon = new_active\r\n print(f\"{self.pokemons[new_active].name} it's your turn!\")",
"def adjustPVs(vgUseList, hasMultipath):\n path = '/dev/%s'\n if hasMultipath:\n path = '/dev/mapper/%s'\n\n # run over commands and ajust PVs\n for command in vgUseList:\n\n # first assign all pvs to the vg\n # (those already assigned will fail but that's ok)\n for pv in command['pvs']:\n run(CMD_EXTEND_VG % (command['name'], path % pv))\n\n # then remove the deleted ones\n for pv in command['deletedPvs']:\n\n # command to reduve vg failed: abort\n if run(CMD_REDUCE_VG % (command['name'], path % pv)) != 0:\n llecho(ERROR_REMOVE_PV % command['name'])\n return False\n\n return True",
"def move_port_bindings_off_controller_1():\n cmd = (\"UPDATE ml2_port_bindings SET host='controller-0'\"\n \" WHERE host='controller-1';\")\n run_cmd_postgres(cmd)",
"def handle_upgrade_1_1_to_1_2(self):\n try:\n logging.info(\"handle_upgrade_1_1_to_1_2: Start\")\n self.conn.create_function('name_from_uuid', 1, vmdk_utils.get_vm_name_by_uuid)\n # Alter vms table to add a new column name vm_name to store vm name\n # update all the existing records with the vm_name.\n # If vm_name is not resolved, it is populated as None and handled appropriately later.\n # Finally update the db schema version\n script = \"\"\"ALTER TABLE vms ADD COLUMN vm_name TEXT;\n UPDATE vms SET vm_name=name_from_uuid(vm_id);\n UPDATE versions SET major_ver = {}, minor_ver = {};\n \"\"\"\n sql_script = script.format(DB_MAJOR_VER, DB_MINOR_VER)\n self.conn.executescript(sql_script)\n\n logging.info(\"handle_upgrade_1_1_to_1_2: update vms table Done\")\n\n # update the tenants table to set \"default_datastore\" to \"__VM_DS\" if \"default_datastore\" is \"\"\n self.conn.execute(\"UPDATE OR IGNORE tenants SET default_datastore_url = ? where default_datastore_url = \\\"\\\"\",\n (auth_data_const.VM_DS_URL,))\n logging.info(\"handle_upgrade_1_1_to_1_2: update default_datastore in tenants table\")\n\n cur = self.conn.execute(\"SELECT * FROM tenants\")\n result = cur.fetchall()\n\n self.conn.execute(\"\"\"INSERT OR IGNORE INTO privileges(tenant_id, datastore_url, allow_create, max_volume_size, usage_quota)\n SELECT tenants.id, tenants.default_datastore_url, 1, 0, 0 FROM tenants\n \"\"\")\n logging.info(\"handle_upgrade_1_1_to_1_2: Insert privilege to default_datastore in privileges table\")\n\n cur = self.conn.execute(\"SELECT * FROM tenants WHERE id = ?\",\n (auth_data_const.DEFAULT_TENANT_UUID,)\n )\n\n result = cur.fetchall()\n logging.debug(\"handle_upgrade_1_1_to_1_2: Check DEFAULT tenant exist\")\n if result:\n # _DEFAULT tenant exists\n # insert full access privilege to \"__ALL_DS\" for \"_DEFAULT\" tenant\n all_ds_privilege = (auth_data_const.DEFAULT_TENANT_UUID, auth_data_const.ALL_DS_URL, 1, 0, 0)\n self.conn.execute(\"INSERT INTO privileges(tenant_id, datastore_url, allow_create, max_volume_size, usage_quota) VALUES (?, ?, ?, ?, ?)\",\n all_ds_privilege)\n logging.info(\"handle_upgrade_1_1_to_1_2: Insert privilege to __ALL_DS for _DEFAULT tenant in privileges table\")\n # remove access privilege to \"DEFAULT_DS\"\n self.conn.execute(\"DELETE FROM privileges WHERE tenant_id = ? AND datastore_url = ?\",\n [auth_data_const.DEFAULT_TENANT_UUID, auth_data_const.DEFAULT_DS_URL])\n logging.info(\"handle_upgrade_1_1_to_1_2: Remove privilege to _DEFAULT_DS for _DEFAULT tenant in privileges table\")\n self.conn.commit()\n return None\n except sqlite3.Error as e:\n error_msg = \"Error when upgrading auth DB table({})\".format(str(e))\n logging.error(\"handle_upgrade_1_1_to_1_2. %s\", error_msg)\n raise DbUpgradeError(self.db_path, error_msg)",
"def set_pv(pv_name, value, **kwargs):\n # pylint: disable=unused-argument\n PVS[pv_name] = value",
"def _updateList(self):\r\n for i in self._control.get_children():\r\n self._control.delete(i)\r\n sorted_names = sorted(self._services.iterkeys())\r\n for name in sorted_names:\r\n info = self._services[name]\r\n self._control.insert(\"\" , 'end', text=name, \r\n values=(name[0:name.rfind(\"._http._tcp.local.\")], \r\n info.getServer()[0:info.getServer().rfind(\".local\")],\r\n str(socket.inet_ntoa(info.getAddress())),\r\n info.getPort()))",
"def set_hpn_server_swap(config, flag):\n # type: (dict, bool) -> None\n if 'ssh' not in config['pool_specification']:\n config['pool_specification']['ssh'] = {}\n config['pool_specification']['ssh']['hpn_server_swap'] = flag",
"def switch_sync_started(self):",
"def slotChangeHostname(self):\n\n for item in self.__topology.selectedItems():\n item.changeHostname()",
"def _failover(self):\n\n slap = slapos.slap.slap()\n slap.initializeConnection(self.server_url, self.key_file, self.cert_file)\n\n # partition that will take over.\n cp_winner = slap.registerComputerPartition(computer_guid=self.computer_guid,\n partition_id=self.partition_id)\n # XXX although we can already rename cp_winner, to change its software type we need to\n # get hold of the root cp as well\n\n cp_exporter_ref = self.namebase + '0' # this is ok. the boss is always number zero.\n\n # partition to be deactivated\n cp_broken = cp_winner.request(software_release=self.software_release,\n software_type='frozen',\n state='stopped',\n partition_reference=cp_exporter_ref)\n\n broken_new_ref = 'broken-{}'.format(time.strftime(\"%d-%b_%H:%M:%S\", time.gmtime()))\n\n log.debug(\"Renaming {}: {}\".format(cp_broken.getId(), broken_new_ref))\n\n cp_broken.rename(new_name=broken_new_ref)\n\n cp_broken.stopped()\n\n log.debug(\"Renaming {}: {}\".format(cp_winner.getId(), cp_exporter_ref))\n\n # update name (and later, software type) for the partition that will take over\n\n cp_winner.rename(new_name=cp_exporter_ref)\n cp_winner.bang(message='partitions have been renamed!')",
"def start(self):\n dpdk.init()\n super(OvsDpdkVhost, self).start()\n # old style OVS <= 2.5.0 multi-queue enable\n if S.getValue('OVS_OLD_STYLE_MQ') and \\\n int(S.getValue('VSWITCH_DPDK_MULTI_QUEUES')):\n tmp_br = OFBridge(timeout=-1)\n tmp_br.set_db_attribute(\n 'Open_vSwitch', '.', 'other_config:' +\n 'n-dpdk-rxqs', S.getValue('VSWITCH_DPDK_MULTI_QUEUES'))",
"def handle_vbs_up(self, vbs):",
"def swap(self):\n if self.away.abbrev == 'PRM':\n return\n tmp = self.away\n self.away = self.home\n self.home = tmp",
"def backup_connection_data(self):\n self.old_host_data['host'] = self.host\n self.old_host_data['listen_port'] = self.listen_port\n self.old_host_data['talk_port'] = self.talk_port",
"def makeHostDeployed(self, name):\n host = (name, )\n self.cursor.execute(\"UPDATE hosts SET status = 2 WHERE name=?\", host)\n self.database.commit()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an object, return the pvnames based on self.attrs | def get_pvnames(self, obj):
if obj is None:
return None
pvnames = []
for attr in self.attrs:
sig = self.nested_getattr(obj, attr)
try:
pvnames.append(sig.pvname)
except AttributeError:
pvnames.append(None)
return pvnames | [
"def get_pv_list(self):\n return [name for name in self.pv_dict.iterkeys()]",
"def __get_pv_names(k8s_conf):\n out_names = list()\n core_client = k8s_core_client(k8s_conf)\n pv_list = core_client.list_persistent_volume()\n for pv in pv_list.items:\n out_names.append(pv.metadata.name)\n return out_names",
"def get_children_names(obj):\n\n if hasattr(obj, '__dict__'):\n names = list(obj.__dict__.keys())\n\n elif hasattr(obj, 'keys'):\n names = list(obj.keys())\n\n if hasattr(obj, '__extra_attr__'):\n names += obj.__extra_attr__\n\n if hasattr(obj, 'state_dict'):\n names += list(obj.state_dict().keys())\n\n return list(set(names))",
"def get_name(self):\r\n names = []\r\n for volume in self.volumes:\r\n names += volume.get_name()\r\n return names",
"def __get_pv_attrs(k8s_conf, pv_name):\n core_client = k8s_core_client(k8s_conf)\n pv_list = core_client.list_persistent_volume()\n logger.debug('pv_list - %s', pv_list)\n for pv in pv_list.items:\n logger.debug('pv - %s', pv)\n if pv.metadata.name == pv_name:\n return pv.spec.capacity.get('storage'), pv.spec.host_path.path\n return None, None",
"def primals_names(self):\n pyomo_variables = self.get_pyomo_variables()\n return [v.getname(fully_qualified=True) for v in pyomo_variables]",
"def names(self):\n return self.__propNames",
"def _get_attrs_items(obj):\n attrs = getattr(obj.__class__, \"__attrs_attrs__\")\n attr_names = [a.name for a in attrs]\n return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]",
"def _all_names(self):\n # TODO: move to geobase\n names = [n for n in self._obj.coords]\n if isinstance(self._obj, xr.Dataset):\n names = names + [n for n in self._obj.data_vars]\n return names",
"def _prm_dict_names(self):\n #names = []\n #for attr in self.__dict__:\n # print attr\n # if re.search(r'^[^_].*_prm$', attr):\n # names.append(attr)\n names = [attr for attr in self.__dict__ if \\\n re.search(r'^[^_].*_prm$', attr)]\n return names",
"def get_object_attrs(obj):\n return dir(obj)",
"def get_property_names(obj: Any) -> List[str]:\n property_names = []\n\n if isinstance(obj, dict):\n for (key, value) in obj.items():\n property_names.append(key)\n elif isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, set):\n for index in range(len(obj)):\n property_names.append(str(index))\n else:\n property_names = PropertyReflector.get_property_names(obj)\n\n return property_names",
"def pvname(self):\n return self._read_pvname",
"def get_dependent_object_names(self):\n return []",
"def show_pets(self):\r\n print(\"The owner of these pets : \" + self.owner)\r\n for each in self.name:\r\n print(each)",
"def get_instance_names(self):\r\n return [r.name for r in self.get_instances()]",
"def get_variable_names(self, objType):\n if self.__ex_get_variable_param(objType).value == 0:\n return []\n return self.__ex_get_variable_names(objType)",
"def get_instance_name_list(self):\n\n\t\treturn [instance['name'] for instance in self.get_all_instances()]",
"def field_names(self):\r\n return [field[\"name\"] for field in self.fields]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Do a getattr more than one level deep, splitting on '.' | def nested_getattr(self, obj, attr):
steps = attr.split('.')
for step in steps:
obj = getattr(obj, step)
return obj | [
"def get_attribute(s, ob):\n spart = s.partition('.')\n\n f = ob\n for part in spart:\n if part == '.':\n continue\n \n f = f.__getattribute__(part)\n \n return f",
"def _dotted_itemgetter(field_name):\n\n if '.' not in field_name:\n return operator.itemgetter(field_name)\n head, tail = field_name.split('.', 1)\n return lambda d: _dotted_itemgetter(tail)(d[head])",
"def get_value_from_str_dotted_key(d, dotted_key):\n keys = dotted_key.split('.')\n temp = copy.deepcopy(d)\n try:\n for key in keys:\n temp = temp[key]\n return temp\n except KeyError:\n return None",
"def extract_object(instance: object, path: str):\n element = instance\n for step in path.split(\".\"):\n element = getattr(element, step)\n return element",
"def __getattr__(self, key):\n\n if key[-1] == '_':\n key = key[:-1]\n\n return type(self)(name='{0}.{1}'.format(self.name, key))",
"def test_dot_list_access():\n dl = dotify(['fred', 'alex', 'bill'])\n assert_equals('fred', dl[0])\n assert_equals('fred', dl._0)\n assert_equals('bill', dl[2])\n assert_equals('bill', dl._2)",
"def dotdictget(myjson, dotdict):\n if re_delim.match(dotdict):\n normalized_dotdict = dotdict\n else:\n normalized_dotdict = '.' + dotdict\n\n return _dotdictget(myjson, normalized_dotdict, [])",
"def resolve_dotted_name(name: str) -> typing.Any:\n if not isinstance(name, str):\n return name # already an object\n names = name.split(\".\")\n used = names.pop(0)\n found = __import__(used)\n for n in names:\n used += \".\" + n\n try:\n found = getattr(found, n)\n except AttributeError:\n __import__(used)\n found = getattr(found, n)\n\n return found",
"def get_recursed_field_value(obj, field=\"\"):\n fields = field.split(\"__\")\n while fields:\n field = fields.pop(0)\n if field:\n try:\n obj = getattr(obj, field)\n except AttributeError:\n return \"\"\n\n return obj",
"def get_val_in_dict_dotted(field: str, dicto: Dict[str, Any]) -> Any:\n try:\n if \".\" not in field: # simple field; ex: \"logical_name\", \"sha512\"\n return dicto[field] # possible KeyError/TypeError\n\n # compound field; ex: \"checksum.sha512\"\n parent, child = field.split(\".\", maxsplit=1) # ex: \"checksum\" & \"sha512\"\n\n # ex: is \"sha512\" in \"checksum\"'s dict?\n # possible KeyError/TypeError\n return get_val_in_dict_dotted(child, dicto[parent])\n\n except (KeyError, TypeError) as e:\n raise DottedKeyError() from e",
"def get_attr1(cmd):\n\tif '.' in cmd:\n\t\tmethod = frappe.get_attr(cmd)\n\telse:\n\t\tmethod = globals()[cmd]\n\tfrappe.log(\"method:\" + cmd)\n\treturn method",
"def module_name_split(obj):\n n = inspect.getmodule(obj).__name__\n return n.split(\".\")",
"def my_getattr(obj, query_list):\n\n if len(query_list) > 1:\n try:\n return my_getattr(getattr(obj, query_list[0]), query_list[1:])\n except:\n # DoesNotExist\n return '(none)'\n return getattr(obj, query_list[0])",
"def test_multilevel_attributes():\n prefix = Prefix(os.sep + \"usr\" + os.sep)\n\n assert prefix.share.man == os.sep + os.path.join(\"usr\", \"share\", \"man\")\n assert prefix.man.man8 == os.sep + os.path.join(\"usr\", \"man\", \"man8\")\n assert prefix.foo.bar.baz == os.sep + os.path.join(\"usr\", \"foo\", \"bar\", \"baz\")\n\n share = prefix.share\n\n assert isinstance(share, Prefix)\n assert share.man == os.sep + os.path.join(\"usr\", \"share\", \"man\")",
"def dotted_prefixes(string, proper_only=False):\n string_parts = string.split('.')\n for i in xrange(len(string_parts) - (1 if proper_only else 0)):\n yield '.'.join(string_parts[:i + 1])",
"def expand_dotdict(dotdict):\n dtype = type(dotdict)\n result = dtype()\n\n for key, value in dotdict.items():\n path = key.split('.')\n assert path, 'Invalid dot-notation path'\n\n node = result\n\n for part in path[:-1]:\n node = node.setdefault(part, dtype())\n assert isinstance(node, dtype), 'Incompatible paths to {}'.format(\n key,\n )\n\n node[path[-1]] = value\n\n return result",
"def __getattr__(self, attr):\n if attr.startswith('_'):\n return DispatchBaseClass.__getattr__(self, attr) \n \n try:\n extendedPropMap = self._prop_map_get_ex_\n except AttributeError:\n extendedPropMap = {}\n \n if attr in extendedPropMap:\n return extendedPropMap[attr](self)\n \n value = DispatchBaseClass.__getattr__(self, attr)\n if attr.endswith('s') and hasattr(self.api, attr):\n try:\n value = getattr(self.api, attr)(value)\n except:\n pass\n return value",
"def split_field_path(field_path):\n if not isinstance(field_path, str):\n raise InvalidTypeError(\"field_path\", field_path, str)\n search_parts = field_path.split(\"__\")\n field = search_parts[0]\n try:\n label = search_parts[1]\n except IndexError:\n label = None\n try:\n subfield = search_parts[2]\n except IndexError:\n subfield = None\n return field, label, subfield",
"def split_dotfile(dotfile):\n return os.path.dirname(dotfile), os.path.basename(dotfile)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a script This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. | def get_script(self, script_id, **kwargs):
all_params = ['script_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_script" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'script_id' is set
if ('script_id' not in params) or (params['script_id'] is None):
raise ValueError("Missing the required parameter `script_id` when calling `get_script`")
resource_path = '/api/v2/scripts/{scriptId}'.replace('{format}', 'json')
path_params = {}
if 'script_id' in params:
path_params['scriptId'] = params['script_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Script',
auth_settings=auth_settings,
callback=params.get('callback'))
return response | [
"async def get_script(self) -> str:\n response = await self.communicator.send_command(\n Message.command(\"get_script\", \"\")\n )\n if response.response_status == ResponseStatus.ERROR:\n raise RuntimeError(\"Response status error while fetching script.\")\n\n return response.message_data",
"def get_script_content(session: Session, id: str):\n response = session.get(endpoints.scripts() + '/{}'.format(id), timeout=30)\n if response.ok:\n return response.text\n else:\n raise Exception(get_error_message(response))",
"def list_scripts() -> dict:\n endpoint_url = '/real-time-response/entities/scripts/v1'\n response = http_request('GET', endpoint_url)\n return response",
"def get_script(cluster_name: Optional[str] = None,\n database_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n script_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetScriptResult:\n __args__ = dict()\n __args__['clusterName'] = cluster_name\n __args__['databaseName'] = database_name\n __args__['resourceGroupName'] = resource_group_name\n __args__['scriptName'] = script_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:kusto:getScript', __args__, opts=opts, typ=GetScriptResult).value\n\n return AwaitableGetScriptResult(\n continue_on_errors=pulumi.get(__ret__, 'continue_on_errors'),\n force_update_tag=pulumi.get(__ret__, 'force_update_tag'),\n id=pulumi.get(__ret__, 'id'),\n name=pulumi.get(__ret__, 'name'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n script_url=pulumi.get(__ret__, 'script_url'),\n system_data=pulumi.get(__ret__, 'system_data'),\n type=pulumi.get(__ret__, 'type'))",
"def load_script(self, command):\n try:\n s = script.Script(command, self)\n except script.ScriptError as v:\n return v.args[0]\n self.scripts.append(s)",
"def load_script(browser, url):\n if browser.current_url.startswith('file:'):\n url = 'https:' + url\n browser.execute_script(\"\"\"\n var script_tag = document.createElement(\"script\");\n script_tag.setAttribute(\"type\", \"text/javascript\");\n script_tag.setAttribute(\"src\", arguments[0]);\n document.getElementsByTagName(\"head\")[0].appendChild(script_tag);\n \"\"\", url)\n\n sleep(1)",
"def load_script(filename, script_type):\n try:\n return load_source(script_type, filename)\n except Exception,e:\n logging.error(str(script_type.capitalize()) + ' file (' + str(filename) + ') could not be loaded ' + str(e))\n return None",
"def get_scripts_published_script_id(self, script_id, **kwargs):\n\n all_params = ['script_id', 'script_data_version']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_scripts_published_script_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'script_id' is set\n if ('script_id' not in params) or (params['script_id'] is None):\n raise ValueError(\"Missing the required parameter `script_id` when calling `get_scripts_published_script_id`\")\n\n\n resource_path = '/api/v2/scripts/published/{scriptId}'.replace('{format}', 'json')\n path_params = {}\n if 'script_id' in params:\n path_params['scriptId'] = params['script_id']\n\n query_params = {}\n if 'script_data_version' in params:\n query_params['scriptDataVersion'] = params['script_data_version']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Script',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def download_script(_downloadUrl):\n session = requests.Session()\n poc_log(\"Downloading script at {}\".format(_downloadUrl))\n script = session.get(_downloadUrl, auth=(\"\", \"\"))\n session.close()\n poc_log(\"{0}\".format(script))\n with open('script.py', 'w') as rackhd_script:\n rackhd_script.write(script.text)",
"def ProcessScript(self, request):\n\n return self.__scripts[request.GetUrl()](request)",
"def script_url(self) -> Optional[str]:\n return pulumi.get(self, \"script_url\")",
"def get_scripts_published(self, **kwargs):\n\n all_params = ['page_size', 'page_number', 'expand', 'name', 'feature', 'flow_id', 'script_data_version']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_scripts_published\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/scripts/published'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n if 'page_number' in params:\n query_params['pageNumber'] = params['page_number']\n if 'expand' in params:\n query_params['expand'] = params['expand']\n if 'name' in params:\n query_params['name'] = params['name']\n if 'feature' in params:\n query_params['feature'] = params['feature']\n if 'flow_id' in params:\n query_params['flowId'] = params['flow_id']\n if 'script_data_version' in params:\n query_params['scriptDataVersion'] = params['script_data_version']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ScriptEntityListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def handle_scripts_get():\n with Sessions.current() as session: # noqa: F841\n if ScriptRoot is None:\n files = []\n else:\n files = util.listAllFiles(ScriptRoot, \".py\")\n\n return json.dumps(sorted(files))",
"def get_script(script_id):\r\n script: ScriptModule = available_post_modules[int(script_id)]\r\n\r\n with open(script.file_path, \"r\") as f:\r\n script_string = f.read()\r\n\r\n if script.os_type == \"windows\":\r\n # append command and return\r\n return flask.jsonify({\r\n 'script': f\"{script_string}; {script.command}\"\r\n })\r\n elif script.os_type == \"linux\":\r\n return flask.jsonify({\r\n 'script': f\"{script_string}\"\r\n })",
"def list(self):\n return self.connection.get(self.service + \"/AllScripts\")",
"def download_script(_downloadUrl):\n poc_log(\"Downloading script at {}\".format(_downloadUrl))\n script = urllib2.urlopen(_downloadUrl).read()\n poc_log(\"{0}\".format(script))\n with open('script.py', 'w') as rackhd_script:\n rackhd_script.write(script)",
"def scripts(self):\n scripts_yml = os.path.join(os.path.dirname(inspect.getmodule(self).__file__), \"scripts.yml\")\n if not os.path.exists(scripts_yml):\n return {}\n with open(scripts_yml, \"r\") as scripts_yml_fp:\n scripts = yaml.safe_load(scripts_yml_fp)\n return scripts",
"def source_script(self):\n return self._data.get('source_script')",
"def get_initial_script():\n \n return Script.script_stack[0]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the published scripts. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. | def get_scripts_published(self, **kwargs):
all_params = ['page_size', 'page_number', 'expand', 'name', 'feature', 'flow_id', 'script_data_version']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_scripts_published" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/scripts/published'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'expand' in params:
query_params['expand'] = params['expand']
if 'name' in params:
query_params['name'] = params['name']
if 'feature' in params:
query_params['feature'] = params['feature']
if 'flow_id' in params:
query_params['flowId'] = params['flow_id']
if 'script_data_version' in params:
query_params['scriptDataVersion'] = params['script_data_version']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScriptEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response | [
"def list_scripts() -> dict:\n endpoint_url = '/real-time-response/entities/scripts/v1'\n response = http_request('GET', endpoint_url)\n return response",
"def list(self):\n return self.connection.get(self.service + \"/AllScripts\")",
"def scripts(self):\n scripts_yml = os.path.join(os.path.dirname(inspect.getmodule(self).__file__), \"scripts.yml\")\n if not os.path.exists(scripts_yml):\n return {}\n with open(scripts_yml, \"r\") as scripts_yml_fp:\n scripts = yaml.safe_load(scripts_yml_fp)\n return scripts",
"def get_scripts_published_script_id(self, script_id, **kwargs):\n\n all_params = ['script_id', 'script_data_version']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_scripts_published_script_id\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'script_id' is set\n if ('script_id' not in params) or (params['script_id'] is None):\n raise ValueError(\"Missing the required parameter `script_id` when calling `get_scripts_published_script_id`\")\n\n\n resource_path = '/api/v2/scripts/published/{scriptId}'.replace('{format}', 'json')\n path_params = {}\n if 'script_id' in params:\n path_params['scriptId'] = params['script_id']\n\n query_params = {}\n if 'script_data_version' in params:\n query_params['scriptDataVersion'] = params['script_data_version']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Script',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def handle_scripts_get():\n with Sessions.current() as session: # noqa: F841\n if ScriptRoot is None:\n files = []\n else:\n files = util.listAllFiles(ScriptRoot, \".py\")\n\n return json.dumps(sorted(files))",
"def get_scripts(self):\n return []",
"def get_scripts_published_script_id_variables(self, script_id, **kwargs):\n\n all_params = ['script_id', 'input', 'output', 'type', 'script_data_version']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_scripts_published_script_id_variables\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'script_id' is set\n if ('script_id' not in params) or (params['script_id'] is None):\n raise ValueError(\"Missing the required parameter `script_id` when calling `get_scripts_published_script_id_variables`\")\n\n\n resource_path = '/api/v2/scripts/published/{scriptId}/variables'.replace('{format}', 'json')\n path_params = {}\n if 'script_id' in params:\n path_params['scriptId'] = params['script_id']\n\n query_params = {}\n if 'input' in params:\n query_params['input'] = params['input']\n if 'output' in params:\n query_params['output'] = params['output']\n if 'type' in params:\n query_params['type'] = params['type']\n if 'script_data_version' in params:\n query_params['scriptDataVersion'] = params['script_data_version']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='object',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def all(self, scriptid=None):\r\n return ScriptDB.objects.get_all_scripts_on_obj(self.obj, key=scriptid)",
"def get_scripts_list(self, state):\n return scripts_in_path(self.script_dir, state + \".d\")",
"def load_scripts(self):\n\n reload_interval = self.cfg.get(\"reload_interval\", 60)\n cls = self.cfg.get(\"resource_class\", \"%s.%s\" % (\"plugin\", self.cls))\n\n resources = self.db_find(\"store\", {\"class\": \"%s.%s\" % (self.cls, \"script\")}, list())\n\n for resource in resources:\n if resource[\"name\"] in self.invalid_scripts:\n continue\n name = resource[\"name\"]\n body = resource[\"body\"]\n if body != self.scripts.get(name, \"\"):\n self.logger.debug(\"Loaded script '%s'\" % (name))\n self.logger.debug(\"Body: %s\" % (body))\n self.scripts[name] = body\n\n self.service.reactor.callLater(reload_interval, self.load_scripts)",
"def get_published_repos(self):\n\n if self.verbose:\n print('Listing repos at: %s' % self.publish_url)\n\n r = self.__do_get(self.publish_url)\n\n # Create a distinct list of publications\n if r.status_code == requests.codes.ok:\n publications = r.json()\n return sorted(set([x['Prefix'] for x in publications]))\n else:\n raise AptlyApiError(r.status_code,\n 'Aptly API Error - %s - HTTP Error: %s' % (self.publish_url, r.status_code))",
"def _get_script_paths_from_scripts_node(self) -> list:\r\n paths: list = []\r\n\r\n script_nodes = ElementHelper.get(self.root_node, 'Scripts')\r\n if script_nodes is None:\r\n return []\r\n\r\n for script_node in script_nodes:\r\n if not script_node.tag.endswith('Script'):\r\n continue\r\n\r\n psc_path: str = self.parse(script_node.text)\r\n\r\n if ':' in psc_path:\r\n psc_path = psc_path.replace(':', os.sep)\r\n\r\n paths.append(os.path.normpath(psc_path))\r\n\r\n return PathHelper.uniqify(paths)",
"def _import_scripts():\n logger = _utils.get_logger()\n\n # Import parent package\n parent_package = 'scripts'\n importlib.import_module(parent_package, __name__)\n\n # Import scripts from package\n path = os.path.join(_utils.get_directory(), parent_package)\n scripts = []\n for importer, name, is_package in pkgutil.iter_modules([path]):\n try:\n try:\n # Get meta data\n metadata = _scrape_metadata(path, name, is_package)\n logger.debug('Script \"{}\" metadata: {}', name, metadata)\n # Get dependencies from meta data\n dependencies = metadata.get('dependencies', ())\n # Make to tuple if string\n if isinstance(dependencies, str):\n dependencies = (dependencies,)\n except AttributeError:\n dependencies = ()\n\n try:\n # Install dependencies\n for dependency in dependencies:\n _utils.install_dependency(dependency)\n except TypeError as exc:\n raise ScriptError() from exc\n\n try:\n # Import script\n logger.debug('Importing script \"{}\"', name)\n module = importlib.import_module('.' + name, parent_package)\n main = getattr(module, 'main')\n # Make sure that main is a co-routine\n if not asyncio.iscoroutinefunction(main):\n raise ScriptError(\n 'Main function of script \"{}\" is not a co-routine'.format(name))\n scripts.append((name, main))\n except (ImportError, AttributeError) as exc:\n raise ImportScriptError(name) from exc\n except ScriptError as exc:\n # Note: We are not re-raising here because script errors should not\n # affect other scripts that run fine\n logger.exception(exc)\n\n # Return scripts list\n return scripts",
"def get_script(self, script_id, **kwargs):\n\n all_params = ['script_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_script\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'script_id' is set\n if ('script_id' not in params) or (params['script_id'] is None):\n raise ValueError(\"Missing the required parameter `script_id` when calling `get_script`\")\n\n\n resource_path = '/api/v2/scripts/{scriptId}'.replace('{format}', 'json')\n path_params = {}\n if 'script_id' in params:\n path_params['scriptId'] = params['script_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Script',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_script_content(session: Session, id: str):\n response = session.get(endpoints.scripts() + '/{}'.format(id), timeout=30)\n if response.ok:\n return response.text\n else:\n raise Exception(get_error_message(response))",
"def get_all_scripts(self, key=None):\r\n if key:\r\n script = []\r\n dbref = self.dbref(key)\r\n if dbref or dbref == 0:\r\n script = self.dbref_search(dbref)\r\n if not script:\r\n script = self.filter(db_key=key)\r\n return script\r\n return self.all()",
"def load_scripts(self):\n for filename in self.interface.get('Scripts', []):\n glob, loc = {}, {}\n execfile(filename, glob, loc)\n self.scripts.append(loc['process'])",
"async def get_script(self) -> str:\n response = await self.communicator.send_command(\n Message.command(\"get_script\", \"\")\n )\n if response.response_status == ResponseStatus.ERROR:\n raise RuntimeError(\"Response status error while fetching script.\")\n\n return response.message_data",
"def get_deployments(self, **kwargs):\n return self._request('get', path='/deployments', params=kwargs, value_only=True)['data']"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the published script. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. | def get_scripts_published_script_id(self, script_id, **kwargs):
all_params = ['script_id', 'script_data_version']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_scripts_published_script_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'script_id' is set
if ('script_id' not in params) or (params['script_id'] is None):
raise ValueError("Missing the required parameter `script_id` when calling `get_scripts_published_script_id`")
resource_path = '/api/v2/scripts/published/{scriptId}'.replace('{format}', 'json')
path_params = {}
if 'script_id' in params:
path_params['scriptId'] = params['script_id']
query_params = {}
if 'script_data_version' in params:
query_params['scriptDataVersion'] = params['script_data_version']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Script',
auth_settings=auth_settings,
callback=params.get('callback'))
return response | [
"def get_scripts_published(self, **kwargs):\n\n all_params = ['page_size', 'page_number', 'expand', 'name', 'feature', 'flow_id', 'script_data_version']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_scripts_published\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/scripts/published'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n if 'page_number' in params:\n query_params['pageNumber'] = params['page_number']\n if 'expand' in params:\n query_params['expand'] = params['expand']\n if 'name' in params:\n query_params['name'] = params['name']\n if 'feature' in params:\n query_params['feature'] = params['feature']\n if 'flow_id' in params:\n query_params['flowId'] = params['flow_id']\n if 'script_data_version' in params:\n query_params['scriptDataVersion'] = params['script_data_version']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ScriptEntityListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"async def get_script(self) -> str:\n response = await self.communicator.send_command(\n Message.command(\"get_script\", \"\")\n )\n if response.response_status == ResponseStatus.ERROR:\n raise RuntimeError(\"Response status error while fetching script.\")\n\n return response.message_data",
"def get_script(self, script_id, **kwargs):\n\n all_params = ['script_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_script\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'script_id' is set\n if ('script_id' not in params) or (params['script_id'] is None):\n raise ValueError(\"Missing the required parameter `script_id` when calling `get_script`\")\n\n\n resource_path = '/api/v2/scripts/{scriptId}'.replace('{format}', 'json')\n path_params = {}\n if 'script_id' in params:\n path_params['scriptId'] = params['script_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Script',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_script_content(session: Session, id: str):\n response = session.get(endpoints.scripts() + '/{}'.format(id), timeout=30)\n if response.ok:\n return response.text\n else:\n raise Exception(get_error_message(response))",
"def get_scripts_published_script_id_variables(self, script_id, **kwargs):\n\n all_params = ['script_id', 'input', 'output', 'type', 'script_data_version']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_scripts_published_script_id_variables\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'script_id' is set\n if ('script_id' not in params) or (params['script_id'] is None):\n raise ValueError(\"Missing the required parameter `script_id` when calling `get_scripts_published_script_id_variables`\")\n\n\n resource_path = '/api/v2/scripts/published/{scriptId}/variables'.replace('{format}', 'json')\n path_params = {}\n if 'script_id' in params:\n path_params['scriptId'] = params['script_id']\n\n query_params = {}\n if 'input' in params:\n query_params['input'] = params['input']\n if 'output' in params:\n query_params['output'] = params['output']\n if 'type' in params:\n query_params['type'] = params['type']\n if 'script_data_version' in params:\n query_params['scriptDataVersion'] = params['script_data_version']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='object',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def list_scripts() -> dict:\n endpoint_url = '/real-time-response/entities/scripts/v1'\n response = http_request('GET', endpoint_url)\n return response",
"def ProcessScript(self, request):\n\n return self.__scripts[request.GetUrl()](request)",
"def handle_scripts_get():\n with Sessions.current() as session: # noqa: F841\n if ScriptRoot is None:\n files = []\n else:\n files = util.listAllFiles(ScriptRoot, \".py\")\n\n return json.dumps(sorted(files))",
"def script_url(self) -> Optional[str]:\n return pulumi.get(self, \"script_url\")",
"def get_script(script_id):\r\n script: ScriptModule = available_post_modules[int(script_id)]\r\n\r\n with open(script.file_path, \"r\") as f:\r\n script_string = f.read()\r\n\r\n if script.os_type == \"windows\":\r\n # append command and return\r\n return flask.jsonify({\r\n 'script': f\"{script_string}; {script.command}\"\r\n })\r\n elif script.os_type == \"linux\":\r\n return flask.jsonify({\r\n 'script': f\"{script_string}\"\r\n })",
"def get_script(cluster_name: Optional[str] = None,\n database_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n script_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetScriptResult:\n __args__ = dict()\n __args__['clusterName'] = cluster_name\n __args__['databaseName'] = database_name\n __args__['resourceGroupName'] = resource_group_name\n __args__['scriptName'] = script_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:kusto:getScript', __args__, opts=opts, typ=GetScriptResult).value\n\n return AwaitableGetScriptResult(\n continue_on_errors=pulumi.get(__ret__, 'continue_on_errors'),\n force_update_tag=pulumi.get(__ret__, 'force_update_tag'),\n id=pulumi.get(__ret__, 'id'),\n name=pulumi.get(__ret__, 'name'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n script_url=pulumi.get(__ret__, 'script_url'),\n system_data=pulumi.get(__ret__, 'system_data'),\n type=pulumi.get(__ret__, 'type'))",
"def scripts(self):\n scripts_yml = os.path.join(os.path.dirname(inspect.getmodule(self).__file__), \"scripts.yml\")\n if not os.path.exists(scripts_yml):\n return {}\n with open(scripts_yml, \"r\") as scripts_yml_fp:\n scripts = yaml.safe_load(scripts_yml_fp)\n return scripts",
"def load_script(self, command):\n try:\n s = script.Script(command, self)\n except script.ScriptError as v:\n return v.args[0]\n self.scripts.append(s)",
"def source_script(self):\n return self._data.get('source_script')",
"def list(self):\n return self.connection.get(self.service + \"/AllScripts\")",
"def grab_movie_script(self, title):\n query_url = query_base + title.replace(\" \", \"-\") + \".html\"\n resp = requests.get(query_url)\n if resp.ok:\n parsed_script = self.parse_html()\n else:\n print \"ERROR URL DOES NOT EXIST:\", query_url\n print \"PROGRAM BREAKING\"\n quit()\n return parsed_script",
"def script(self):\n if 'Suppress-Script' in self.data['record']:\n return Subtag(self.data['record']['Suppress-Script'], 'script')\n return None",
"def get(self) -> List[str]:\n build_scripts = self.get_for_platform()\n build_script = build_scripts.get(\n get_platform_version(), build_scripts[\"generic\"]\n )\n assert isinstance(build_script, list)\n return build_script",
"def get_script_provenance(path_to_archive):\n with ZipFile(os.path.normpath(path_to_archive), 'r') as archive:\n try:\n commit_details = get_metadata(path_to_archive=path_to_archive)\n workdir = mkdtemp(dir=os.path.dirname(path_to_archive))\n archive.extract('/'.join(('script', commit_details['script_name'])), workdir)\n if commit_details['script_name'].endswith('.json'):\n script_object = read_json(\n os.path.join(workdir, 'script',\n commit_details['script_name'].split('.')[0]))\n elif commit_details['script_name'].endswith('.py'):\n spec = util.spec_from_file_location(\n \"script_module\",\n os.path.join(workdir, 'script', commit_details['script_name']),\n )\n script_module = util.module_from_spec(spec)\n spec.loader.exec_module(script_module)\n script_object = script_module.SCRIPT\n rmtree(workdir)\n return script_object\n except Exception as e:\n print(e)\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the published variables This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. | def get_scripts_published_script_id_variables(self, script_id, **kwargs):
all_params = ['script_id', 'input', 'output', 'type', 'script_data_version']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_scripts_published_script_id_variables" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'script_id' is set
if ('script_id' not in params) or (params['script_id'] is None):
raise ValueError("Missing the required parameter `script_id` when calling `get_scripts_published_script_id_variables`")
resource_path = '/api/v2/scripts/published/{scriptId}/variables'.replace('{format}', 'json')
path_params = {}
if 'script_id' in params:
path_params['scriptId'] = params['script_id']
query_params = {}
if 'input' in params:
query_params['input'] = params['input']
if 'output' in params:
query_params['output'] = params['output']
if 'type' in params:
query_params['type'] = params['type']
if 'script_data_version' in params:
query_params['scriptDataVersion'] = params['script_data_version']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object',
auth_settings=auth_settings,
callback=params.get('callback'))
return response | [
"def list_variables(self, refresh=False):\n if self._variables is None or refresh:\n request = self.workspaces_service.variables().list(parent=self.path)\n\n response = request.execute()\n self._variables = [\n gtm_manager.variable.GTMVariable(\n variable=x, service=self.service, parent=self.path\n )\n for x in response.get(\"variable\") or []\n ]\n return self._variables",
"def __get_vars(self):\n\t\tself.__tvars = {}\n\t\tvar = self.dom.getElementsByTagName(\"vars\")[0]\n\t\tfor var_xml in var.getElementsByTagName(\"var\"):\n\t\t\tv_name = XmlHandler.get_label(\"name\", var_xml)\n\t\t\tv_value = XmlHandler.get_label(\"value\", var_xml)\n\t\t\tv_topo = XmlHandler.get_label(\"topology\", var_xml)\n\t\t\tif (v_topo is not None) and (v_topo.lower() == \"yes\"):\n\t\t\t\tself.__topovars[v_name] = v_value\n\t\t\telse: self.__compvars[v_name] = v_value",
"def show_vars(self):\n click.echo(settings.vars)",
"def defined_variables(self):\n return getattr(self, \"VARIABLES\", {})",
"def get_all_variables(self):\n with self.graph.as_default():\n return [_from_proto_fn(var_def) for var_def in self.info.variables]",
"def print_variables(self) -> None:\n\n file = self.files[0]\n file_path = os.path.join(self.folder, file)\n netcdf_file = Dataset(file_path)\n\n print(f'Variables in netcdf file {list(netcdf_file.variables)}')\n netcdf_file.close()",
"def statsvars(dcid):\n response = fetch_data(\n '/place/stats-var',\n {\n 'dcids': [dcid],\n },\n compress=False,\n post=False,\n has_payload=False\n )\n return response['places'][dcid]['statsVars']",
"def GypVariables(self):\n return self.variables",
"def variables(self, only_used=False):\n variables = []\n\n # Generate variable for each label\n for label in self.process.labels.values():\n var = self.determine_variable(label, shadow_use=True)\n if var:\n variables.append(self.determine_variable(label, shadow_use=True))\n\n if only_used:\n variables = [v for v in variables if v.use > 0]\n return variables",
"def _load_variables(self):\n for var in self.variables:\n try:\n if len(var['variable_status']) > 1 and var['variable_status'] == 'Retired':\n self._retire_variable_from_db(var['variable_name'])\n\n elif self._should_process_variable(var):\n var_entity = self._create_var_entity(var)\n self._create_non_variable_entities(var, var_entity) \n self._update_max_datetime(utils.date_str_to_datetime(var['release_date']))\n except:\n pass\n # force to set type = 'OTF' (instead of 'EDGE') if it was a variable based on derived edge, e.g. rcvr_diff_auth_amt_dk_160:\n graph.cypher.execute(\"match(otf:Var)<-[:DEPEND_ON]-(derived:Var)-[:DEPEND_ON]->(raw:Var) where raw.is_raw_edge is null and derived.type='EDGE' set derived.type='OTF'\")\n\n self.watermark.last_updated = self.max_datetime",
"def scoped_vars(self):\n return self._scoped_vars",
"def variables(self):\n #We give preference to test specification variable lists.\n if self.test is not None:\n return self.test.variables\n elif self.group is not None:\n return self.group.variables\n else:\n return []",
"def getPublishedPlugs(*args, **kwargs):\n \n pass",
"def fetch(project_name, config_name):\n # project = gcloud._helpers._determine_default_project()\n client = _create_client()\n\n variable_names = _list_variables(client, project_name, config_name)\n variables = _fetch_variable_values(client, variable_names)\n\n return variables",
"def get_variables(self) -> np.ndarray:\n return self._variables",
"def _get_properties(self, force=False):\r\n return self._portal.get_properties(force)",
"def stage_variables(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n return pulumi.get(self, \"stage_variables\")",
"def get_variable_groups(self, project, group_name=None, action_filter=None, top=None, continuation_token=None, query_order=None):\n route_values = {}\n if project is not None:\n route_values['project'] = self._serialize.url('project', project, 'str')\n query_parameters = {}\n if group_name is not None:\n query_parameters['groupName'] = self._serialize.query('group_name', group_name, 'str')\n if action_filter is not None:\n query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')\n if top is not None:\n query_parameters['$top'] = self._serialize.query('top', top, 'int')\n if continuation_token is not None:\n query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int')\n if query_order is not None:\n query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')\n response = self._send(http_method='GET',\n location_id='f5b09dd5-9d54-45a1-8b5a-1c8287d634cc',\n version='6.0-preview.2',\n route_values=route_values,\n query_parameters=query_parameters)\n return self._deserialize('[VariableGroup]', self._unwrap_collection(response))",
"def get_scripts_published(self, **kwargs):\n\n all_params = ['page_size', 'page_number', 'expand', 'name', 'feature', 'flow_id', 'script_data_version']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_scripts_published\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/scripts/published'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n if 'page_number' in params:\n query_params['pageNumber'] = params['page_number']\n if 'expand' in params:\n query_params['expand'] = params['expand']\n if 'name' in params:\n query_params['name'] = params['name']\n if 'feature' in params:\n query_params['feature'] = params['feature']\n if 'flow_id' in params:\n query_params['flowId'] = params['flow_id']\n if 'script_data_version' in params:\n query_params['scriptDataVersion'] = params['script_data_version']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ScriptEntityListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the upload status of an imported script This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. | def get_scripts_upload_status(self, upload_id, **kwargs):
all_params = ['upload_id', 'long_poll']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_scripts_upload_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'upload_id' is set
if ('upload_id' not in params) or (params['upload_id'] is None):
raise ValueError("Missing the required parameter `upload_id` when calling `get_scripts_upload_status`")
resource_path = '/api/v2/scripts/uploads/{uploadId}/status'.replace('{format}', 'json')
path_params = {}
if 'upload_id' in params:
path_params['uploadId'] = params['upload_id']
query_params = {}
if 'long_poll' in params:
query_params['longPoll'] = params['long_poll']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ImportScriptStatusResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response | [
"def get_upload_status(uploadId=None):\n pass",
"def import_status(self):\n result = self.__get_object('imports', None, None)\n if not 'status' in result:\n self.log.error(\"Unable to find 'status' key in result: %s\" % (result))\n return None \n elif not result['status'] in ['ready', 'queued', 'processing', 'succeeded', 'failed' ]:\n self.log.error(\"Unexpected status '%s' for import status. Check API and update library. Result = %s\" % (status, result))\n return None\n return result",
"def user_import_get_status(request, uimport_id):\n if not request.user.profile.is_superuser:\n raise Http403\n #invalidate('profiles_userimport')\n uimport = get_object_or_404(UserImport,\n pk=uimport_id)\n\n status_data = {'status': uimport.status,\n 'total_rows': str(uimport.total_rows),\n 'num_processed': str(uimport.num_processed)}\n\n if uimport.status == 'completed':\n summary_list = uimport.summary.split(',')\n status_data['num_insert'] = summary_list[0].split(':')[1]\n status_data['num_update'] = summary_list[1].split(':')[1]\n status_data['num_invalid'] = summary_list[2].split(':')[1]\n\n return HttpResponse(simplejson.dumps(status_data))",
"def check_tm_import_status(self, tmId: int, importId: str):\n\n return self.requester.request(\n method=\"get\", path=f\"{self.get_tms_path(tmId=tmId)}/imports/{importId}\"\n )",
"def _get_uploadState(self) -> \"adsk::core::UploadStates\" :\n return _core.DataFileFuture__get_uploadState(self)",
"def status(self):\n ret = self._get_attr(\"status\")\n return FileStatus(ret)",
"def get_status_file(self):\n return self.__status",
"def get_status(self, path, opt=None):\n\n url = self._paths_url(path, 'get-status')\n info = self._post(url, opt).json()\n return wire.FileInfo.from_json(info)",
"def get_status(self):\n request = self.workspaces_service.getStatus(path=self.path)\n return request.execute()",
"def get_status(self):\n result = None\n try:\n r = requests.get(self.url_status)\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result",
"def get_status(self):\n result = self.endpoint.get(endpoint=self.name + \"/status\")\n if result.status_code != requests.codes.ok:\n raise Exception(\n \"Error retrieving indexer status. \"\n \"result: {result}, content: {content}\".format(\n result=result, content=result.content)\n )\n\n return json.loads(result.content)",
"def get_status(self):\n\n status_value = ExecutionStatusCode.NOT_LAUNCHED\n\n if self.play_uuid == '-': # Initialized\n status_value = ExecutionStatusCode.NOT_LAUNCHED\n elif self.play_uuid == '': # Error launching playbook\n status_value = ExecutionStatusCode.ERROR\n else:\n endpoint = \"%s/%s\" % (PLAYBOOK_EXEC_URL, self.play_uuid)\n response = self.rest_client.http_get(endpoint)\n\n if response:\n the_status = json.loads(response.text)[\"msg\"]\n if the_status == 'successful':\n status_value = ExecutionStatusCode.SUCCESS\n elif the_status == 'failed':\n status_value = ExecutionStatusCode.ERROR\n else:\n status_value = ExecutionStatusCode.ON_GOING\n else:\n status_value = ExecutionStatusCode.ERROR\n\n self.log.info(\"Requested playbook execution status is: %s\", status_value)\n return status_value",
"def status(self) -> 'outputs.UpdateRunStatusResponse':\n return pulumi.get(self, \"status\")",
"def get_status(self):\n r = requests.get(self.url_status)\n try:\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result",
"def status(self, hdfs_path):\n self._logger.info('Fetching status for %s.', hdfs_path)\n return self._get_file_status(hdfs_path).json()['FileStatus']",
"def status(self, result, config=None):\r\n return result['status']",
"def get_status(self):\n return self.client.get_asg_ready(self.env, self.name)",
"def get_status (self):\n return self.__status",
"def get_status(self):\n self.doGet(STATUS_API, DEFAULT_HEADERS)\n self.parse_response_as_json()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Invalidate the cache for this Model, if the key_instance or model_instance are passed only that model will be deleted, otherwise the whole content should be dropped. | def invalidate(self, key_instance=None, model_instance=None):
raise NotImplementedError("invalidate should be implemented in any subclass!") | [
"def clear_cache(sender, instance, *args, **kwargs): # pylint: disable=unused-argument\n delete_instance(sender, instance)",
"def invalidate_model(model):\n model = non_proxy(model)\n conjs_keys = redis_client.keys('conj:%s:*' % get_model_name(model))\n if conjs_keys:\n cache_keys = redis_client.sunion(conjs_keys)\n redis_client.delete(*(list(cache_keys) + conjs_keys))",
"def flush_cached_instance(cls, instance):\r\n cls._flush_cached_by_key(instance._get_pk_val())",
"def delete(self):\n if self._store:\n self._store.delete(self.key)",
"def delete(self, *args, **kwargs):\r\n cache_key = self.make_key(args, kwargs)\r\n with self._cache_lock:\r\n try:\r\n del self._cache[cache_key]\r\n except KeyError:\r\n pass",
"def clear_settings_cache(instance, **kwargs):\n if CACHE_KEY in cache:\n cache.delete(CACHE_KEY)\n\n settings._backend.clear()",
"def clear(self) -> None:\n self._s3_cache.clear()\n self._model_id_semantic_version_manifest_key_cache.clear()",
"def evict(self):\n self._assert_valid_state()\n if self.valid:\n self.commit()\n for child in self.children:\n child.evict()\n self.valid = False\n self._delete_data()\n self._blob = None\n self._assert_valid_state()",
"def delete_cache(progress_key):\n django_cache.delete(progress_key)",
"def clear_cache(instance, attr_name='_cached'):\n for key in getattr(type(instance), attr_name, ()):\n instance.__dict__.pop(key, None)",
"def delete(self, key):\n cache_full_path = self._get_key_path(key)\n try:\n os.remove(cache_full_path)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise",
"def delete(self, key = None):\n memcache.delete('cache-'+key)\n result = self._read(key)\n if result:\n if 'AEU_Events' in __main__.__dict__:\n __main__.AEU_Events.fire_event('cacheDeleted')\n result.delete()",
"def invalidate(self, key):\r\n raise NotImplementedError",
"def delete(self, _key):\n\n try:\n document = self.cache_store[_key]\n document.previous.next_document = doc.next_document\n document.next_document.previous = document.previous\n del(self.cache_store[_key])\n\n except KeyError:\n raise KeyError(\"Document with _key %s is not available in cache\" % _key)",
"def do_destroy(self, *args):\n args = [e for e in args[0].split(' ')]\n if args[0] == '':\n print('** class name missing **')\n return\n if args[0] not in self.class_l:\n print(\"** class doesn't exist **\")\n return\n if len(args) < 2:\n print('** instance id missing **')\n return\n\n storage.reload()\n objs_dict = storage.all()\n\n if objs_dict is None:\n print(\"** no instance found **\")\n return\n key = \"{}.{}\".format(args[0], args[1])\n if key in objs_dict.keys():\n del objs_dict[key]\n storage.save()\n else:\n print(\"** no instance found **\")",
"def clear_cache(self):\n cache.delete_many(self.get_cache_keys())",
"def remove_from_model_cache(app_label, model_name):\n\n # Delete cached model in M2M relationship\n try:\n model = app_cache.app_models[app_label][model_name.lower()]\n except KeyError:\n pass\n else:\n for f, __ in model._meta.get_m2m_with_model():\n try:\n del f.rel.to._meta._related_many_to_many_cache\n except AttributeError:\n pass\n\n # Delete from the central model cache\n try:\n del app_cache.app_models[app_label][model_name.lower()]\n except KeyError:\n pass",
"def _invalidate_caches(self):\n pass",
"def delete(self, k: str):\n\n if k in self.__cache_dict:\n if not self.__cache_dict[k][1]:\n if k in self.__change_set:\n # changed in cache\n self.__delete_set.add(k)\n del self.__cache_dict[k]\n self.__change_set.remove(k)\n else:\n # new in cache, not changed in cache\n del self.__cache_dict[k]\n else:\n self.__delete_set.add(k)\n del self.__cache_dict[k]\n else:\n raise KeyError(k)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a cache key given a key_instance, that can be a complete key, or only a part of it | def _get_cache_key(self, key_instance):
assert isinstance(key_instance, tuple), "The key_instance is wrong: %s" % key_instance
# at: a key_instance is a touple containing as a fisrt element
# the primary key of a PersistentObject, and as additional
# elements unique constraint for that model instance. It is
# needed to index an object with all the parameters, cause it
# can be retrieved using different keys.
if not isinstance(key_instance, tuple):
# We assume someone tried to send the primary key directly
key_instance = (key_instance, None)
# try full shot
if self._cache.has_key(key_instance):
return key_instance
# is a part of a key separate the
# elements keys and unique constraints
pk, constraints = key_instance
for cache_key in self._cache:
# check with the pk first
if pk and pk == cache_key[0]:
return cache_key
# no pk found look in the constraint, are all unique
# so it should be enough to have one, it works also with
# unique together as a tuple
elif constraints:
for key_piece in constraints:
if key_piece:
if (key_piece not in cache_key[1]):
break
else:
return cache_key | [
"def get_cache_key(instance, extra=None):\n return '%s.%s.%s' % (instance.__class__.__name__, instance.short_url, extra) if extra else '%s.%s' % (instance.__class__.__name__, instance.short_url)",
"def _build_cache_key(self, *args):\n return self.key if not self.key_mod else self.key % tuple(args)",
"def _get_cache_key_from_model(self, model_instance):\n if model_instance:\n for cache_key, value in self._cache.items():\n # object identity should suffice, the goal of the cache is to\n # keep it unique\n if value == model_instance:\n return cache_key\n return None",
"def get_cache_key(class_name, settings=()):\n return '#{0}:{1}'.format(class_name, hash(tuple(settings)))",
"def get_specific_key(problem_id, version, key):\n return 'do some magic!'",
"def __cache_key__(*args, **kwargs):\n return args_to_key(base, args, kwargs, False)",
"def GetKeyByPath(self, key_path):\n return None",
"def cachekey(func, *args, **kwargs):\n args2 = arguments(func, *args, **kwargs)\n\n # ignoring `instance`\n instance_index = getattr(func, '_instance_index', False)\n if instance_index is not False:\n args2.pop(instance_index)\n\n return prefix(func) + str(args2)",
"def get_key_from_urlsafe(urlsafe):\n return ndb.Key(urlsafe=urlsafe)",
"def _get_cache_key(cls, args, kwargs):\r\n result = None\r\n # Quick hack for my composites work for now.\r\n if hasattr(cls._meta, 'pks'):\r\n pk = cls._meta.pks[0]\r\n else:\r\n pk = cls._meta.pk\r\n # get the index of the pk in the class fields. this should be calculated *once*, but isn't atm\r\n pk_position = cls._meta.fields.index(pk)\r\n if len(args) > pk_position:\r\n # if it's in the args, we can get it easily by index\r\n result = args[pk_position]\r\n elif pk.attname in kwargs:\r\n # retrieve the pk value. Note that we use attname instead of name, to handle the case where the pk is a\r\n # a ForeignKey.\r\n result = kwargs[pk.attname]\r\n elif pk.name != pk.attname and pk.name in kwargs:\r\n # ok we couldn't find the value, but maybe it's a FK and we can find the corresponding object instead\r\n result = kwargs[pk.name]\r\n\r\n if result is not None and isinstance(result, Model):\r\n # if the pk value happens to be a model instance (which can happen wich a FK), we'd rather use its own pk as the key\r\n result = result._get_pk_val()\r\n return result",
"def package_instance_key(package_name, instance_id):\n assert is_valid_instance_id(instance_id), instance_id\n return ndb.Key(PackageInstance, instance_id, parent=package_key(package_name))",
"def _get_key(self, entity_id):\n if entity_id:\n return self.client.key(self.kind, entity_id)\n return self.client.key(self.kind)",
"def key(obj):\n try:\n return obj.key()\n except AttributeError:\n return obj",
"def build_key(cls, user_id):\n key = ndb.Key(cls, user_id)\n return key",
"def _get_key( s3_path ):\n\n return S3Key(\n bucket = _get_bucket(),\n name = s3_path )",
"def make_cache_key(pattern, flags):\n return '{}_{}'.format(pattern, flags)",
"def get_apiauth_object_by_key(key):\n return API_Key.query.filter_by(key=key).first()",
"def _key_transform(key: CachePlayerKey) -> CacheKey:\n return key[0].name, key[1].name",
"def get_key(bucket, obj):\n\n key = bucket.get_key(obj)\n if not key or not key.exists():\n msg = _(\"Could not find key %(obj)s in bucket %(bucket)s\") % locals()\n logger.error(msg)\n raise exception.NotFound(msg)\n return key"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the cache key corresponding to the given model instance, if cached, otherwise return None | def _get_cache_key_from_model(self, model_instance):
if model_instance:
for cache_key, value in self._cache.items():
# object identity should suffice, the goal of the cache is to
# keep it unique
if value == model_instance:
return cache_key
return None | [
"def _get_cache_key(cls, args, kwargs):\r\n result = None\r\n # Quick hack for my composites work for now.\r\n if hasattr(cls._meta, 'pks'):\r\n pk = cls._meta.pks[0]\r\n else:\r\n pk = cls._meta.pk\r\n # get the index of the pk in the class fields. this should be calculated *once*, but isn't atm\r\n pk_position = cls._meta.fields.index(pk)\r\n if len(args) > pk_position:\r\n # if it's in the args, we can get it easily by index\r\n result = args[pk_position]\r\n elif pk.attname in kwargs:\r\n # retrieve the pk value. Note that we use attname instead of name, to handle the case where the pk is a\r\n # a ForeignKey.\r\n result = kwargs[pk.attname]\r\n elif pk.name != pk.attname and pk.name in kwargs:\r\n # ok we couldn't find the value, but maybe it's a FK and we can find the corresponding object instead\r\n result = kwargs[pk.name]\r\n\r\n if result is not None and isinstance(result, Model):\r\n # if the pk value happens to be a model instance (which can happen wich a FK), we'd rather use its own pk as the key\r\n result = result._get_pk_val()\r\n return result",
"def get_cache_key(self):\n\n return self.cache_key",
"def _get_cache_key(self, key_instance):\n assert isinstance(key_instance, tuple), \"The key_instance is wrong: %s\" % key_instance\n # at: a key_instance is a touple containing as a fisrt element\n # the primary key of a PersistentObject, and as additional\n # elements unique constraint for that model instance. It is\n # needed to index an object with all the parameters, cause it\n # can be retrieved using different keys. \n if not isinstance(key_instance, tuple):\n # We assume someone tried to send the primary key directly\n key_instance = (key_instance, None)\n \n # try full shot\n if self._cache.has_key(key_instance):\n return key_instance\n # is a part of a key separate the\n # elements keys and unique constraints\n pk, constraints = key_instance\n for cache_key in self._cache:\n # check with the pk first\n if pk and pk == cache_key[0]:\n return cache_key\n # no pk found look in the constraint, are all unique\n # so it should be enough to have one, it works also with\n # unique together as a tuple\n elif constraints:\n for key_piece in constraints:\n if key_piece:\n if (key_piece not in cache_key[1]):\n break\n else:\n return cache_key",
"def getCacheKey(self):\n\t\treturn self.cacheKey",
"def get_cache_key(instance, extra=None):\n return '%s.%s.%s' % (instance.__class__.__name__, instance.short_url, extra) if extra else '%s.%s' % (instance.__class__.__name__, instance.short_url)",
"def get_cache_key(self, request, view):\n ip_address = request.data.get('ip_address')\n return self.cache_format % {\n 'scope': self.scope,\n 'ident': ip_address or self.get_ident(request)\n }",
"def get_local_cache_key(self, obj, include_group_perms=True, permission_expiry=False):\n ctype = get_content_type(obj)\n return (ctype.id, force_str(obj.pk), include_group_perms, permission_expiry)",
"def get_cache_key(class_name, settings=()):\n return '#{0}:{1}'.format(class_name, hash(tuple(settings)))",
"def get_translation_cache_key(translated_model, master_id, language_code):\n # Always cache the entire object, as this already produces\n # a lot of queries. Don't go for caching individual fields.\n prefix = f\"{appsettings.PARLER_CACHE_PREFIX}.\" if appsettings.PARLER_CACHE_PREFIX else \"\"\n return f\"{prefix}parler.{translated_model._meta.app_label}.{translated_model.__name__}.{master_id}.{language_code}\"",
"def _get_cache_key(self, **kwargs):\n m = md5()\n for significant_kwarg in self.significant_kwargs:\n key, to_str = significant_kwarg\n m.update(to_str(kwargs[key]))\n\n if hasattr(self, 'cache_prefix'):\n cache_prefix = self.cache_prefix\n else:\n cache_prefix = '%s.%s' % (self.__module__, self.__name__)\n return '%s:%s' % (cache_prefix, m.hexdigest())",
"def key(obj):\n try:\n return obj.key()\n except AttributeError:\n return obj",
"def _get_cache_key(self, token_id):\n # NOTE(jamielennox): in the basic implementation there is no need for\n # a context so just pass None as it will only get passed back later.\n unused_context = None\n return self._CACHE_KEY_TEMPLATE % _hash_key(token_id), unused_context",
"def __cache_key__(*args, **kwargs):\n return args_to_key(base, args, kwargs, False)",
"def model_key(self):\n model = self.model()\n # pylint: disable=no-member\n return model.__name__ if inspect.isclass(model) else model",
"def get_cache_key(request, key_prefix=None, method=\"GET\", cache=None):\n if key_prefix is None:\n key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX\n cache_key = _generate_cache_header_key(key_prefix, request)\n if cache is None:\n cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]\n headerlist = cache.get(cache_key)\n if headerlist is not None:\n return _generate_cache_key(request, method, headerlist, key_prefix)\n else:\n return None",
"def get_cached_model(app_label, model_name, regenerate=False, local_hash=lambda i: i._hash):\n\n # If this model has already been generated, we'll find it here\n previous_model = models.get_model(app_label, model_name)\n\n # Before returning our locally cached model, check that it is still current\n if previous_model is not None and not regenerate:\n CACHE_KEY = HASH_CACHE_TEMPLATE % (app_label, model_name)\n if cache.get(CACHE_KEY) != local_hash(previous_model):\n logging.debug(\"Local and shared dynamic model hashes are different: %s (local) %s (shared)\" % (local_hash(previous_model), cache.get(CACHE_KEY)))\n regenerate = True\n \n # We can force regeneration by disregarding the previous model\n if regenerate:\n previous_model = None\n # Django keeps a cache of registered models, we need to make room for\n # our new one\n remove_from_model_cache(app_label, model_name)\n\n return previous_model",
"def cache_token_key_for_record(record):\n klass = record.__class__\n return \":\".join(map(str, [klass.__module__, klass.__name__, record.pk]))",
"def cache_key(self):\n return \" \".join([\n str(self.query._Query__kind),\n str(self.query._Query__ancestor),\n str(self.query._Query__filters),\n str(self.query._Query__orders),\n str(self.query._Query__app),\n str(self.query._Query__namespace)\n ]).replace(\" \", \"_\")",
"def _get_cache_key(r: WSGIRequest, c: BaseCache) -> str:\n r = _chop_querystring(r)\n r = _chop_cookies(r)\n return get_cache_key(r, None, r.method, c)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load MNIST data from `path`. | def load_mnist(path, kind="train"):
labels_path = os.path.join(path, "{:s}-labels-idx1-ubyte".format(kind))
images_path = os.path.join(path, "{:s}-images-idx3-ubyte".format(kind))
with open(labels_path, "rb") as lbpath:
_, _ = struct.unpack(">II", lbpath.read(8))
labels = np.fromfile(lbpath, dtype=np.uint8)
with open(images_path, "rb") as imgpath:
_, _, _, _ = struct.unpack(">IIII", imgpath.read(16))
images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)
return images, labels | [
"def load_mnist(path, is_train=True):\n\n\n if is_train:\n prefix = 'train'\n else:\n prefix = 't10k'\n\n\n labels_path = os.path.join(path,'{}-labels-idx1-ubyte.gz'.format(prefix))\n images_path = os.path.join(path,'{}-images-idx3-ubyte.gz'.format(prefix))\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 28,28,1)\n\n images = images/255.0\n labels = keras.utils.to_categorical(labels, 10)\n return images, labels",
"def import_images(datapath=\"../data/mnist/mnist.pkl\"):\n with open(datapath,'rb') as f:\n mnist = pickle.load(f)[\"training_images\"]\n print(\"Done Loading\")\n mnist = np.reshape(mnist,[-1,28,28])\n return mnist",
"def read_mnist(dataset=\"training\", path=\".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError, \"dataset must be 'testing' or 'training'\"\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in xrange(len(lbl)):\n yield get_img(i)",
"def load_train(self):\n images, labels = self.load(os.path.join('mnist', 'train', 'images'),\n os.path.join('mnist', 'train', 'labels'))\n self.train_data = list(zip(images, labels))",
"def load_train(self):\n images, labels = self.load(os.path.join('mnist', 'train', 'images'),\n os.path.join('mnist', 'train', 'labels'))\n self.train_data = zip(images, labels)",
"def load_imdb(path):\n f = np.load(path, allow_pickle=True)\n x_train = f[\"x_train\"]\n labels_train = f[\"y_train\"]\n x_test = f[\"x_test\"]\n labels_test = f[\"y_test\"]\n x_unsup = f[\"x_unsup\"]\n\n # Convert labels to one-hot.\n labels = np.concatenate([labels_train, labels_test])\n labels = np_utils.to_categorical(labels, NB_CLASSES)\n y_train = np.array(labels[: len(x_train)])\n y_test = np.array(labels[len(x_train) :])\n\n return (x_train, y_train), (x_test, y_test), x_unsup",
"def load(path=None):\n \n if path is None:\n path = get_path('hwdetect/data/data_sets/1_pixel_labels/ariel_26-10_5959.pkl')\n with open(path, 'rb') as f:\n ret = pickle.load(f)\n return ret",
"def load_mnist():\r\n\r\n print('Loading train data...')\r\n train_data = torch.utils.data.DataLoader(\r\n torchvision.datasets.MNIST('mnist/', \r\n train=True, \r\n download=True,\r\n transform=torchvision.transforms.Compose([\r\n torchvision.transforms.ToTensor()\r\n ])),\r\n shuffle=True,)\r\n\r\n train_input = []\r\n train_label = []\r\n \r\n cnt = 0\r\n for batch, label in tqdm(train_data):\r\n train_input.append(batch.squeeze().numpy().reshape(784,))\r\n train_label.append(label.numpy())\r\n cnt += 1\r\n if cnt == 1300: break\r\n\r\n print('Loading test data...')\r\n test_data = torch.utils.data.DataLoader(\r\n torchvision.datasets.MNIST('mnist/', \r\n train=False, \r\n download=True,\r\n transform=torchvision.transforms.Compose([\r\n torchvision.transforms.ToTensor()\r\n ])),\r\n shuffle=True,)\r\n\r\n test_input = []\r\n test_label = []\r\n \r\n for batch, label in tqdm(test_data):\r\n test_input.append(batch.squeeze().numpy().reshape(784,))\r\n test_label.append(label.numpy())\r\n\r\n return np.array(train_input), np.array(train_label), np.array(test_input), np.array(test_label)",
"def import_mnist(preprocess=True):\n print(\"Downloading MNIST data...\", end='')\n from keras.datasets import mnist\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)\n X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)\n if(preprocess):\n X_train = pre_process(X_train)\n X_test = pre_process(X_test)\n print(\"done.\")\n return X_train, y_train, X_test, y_test, X_train.shape[0]",
"def load_mnist_images():\n (mnist_x, mnist_y), (mnist_x_test, mnist_y_test) = mnist.load_data()\n\n mnist_x, mnist_x_test = np.reshape(mnist_x, (-1, 28, 28, 1)), np.reshape(mnist_x_test, (-1, 28, 28, 1))\n\n # Scale everything to 0-1\n mnist_x, mnist_x_test, = normalize_0_1([mnist_x, mnist_x_test])\n\n return (mnist_x, transform_to_one_hot(mnist_y, depth=10)), (mnist_x_test, transform_to_one_hot(mnist_y_test, depth=10))",
"def load_tiny_imagenet(path, dtype=np.float32):\n # First load wnids\n with open(os.path.join(path, 'wnids.txt'), 'r') as f:\n wnids = [x.strip() for x in f]\n\n # Map wnids to integer labels\n wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}\n\n # Use words.txt to get names for each class\n with open(os.path.join(path, 'words.txt'), 'r') as f:\n wnid_to_words = dict(line.split('\\t') for line in f)\n for wnid, words in wnid_to_words.iteritems():\n wnid_to_words[wnid] = [w.strip() for w in words.split(',')]\n class_names = [wnid_to_words[wnid] for wnid in wnids]\n\n # Next load training data.\n X_train = []\n y_train = []\n for i, wnid in enumerate(wnids):\n if (i + 1) % 20 == 0:\n print('loading training data for synset %d / %d' % (i + 1, len(wnids)))\n # To figure out the filenames we need to open the boxes file\n boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)\n with open(boxes_file, 'r') as f:\n filenames = [x.split('\\t')[0] for x in f]\n num_images = len(filenames)\n\n X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)\n y_train_block = wnid_to_label[wnid] * np.ones(num_images, dtype=np.int64)\n for j, img_file in enumerate(filenames):\n img_file = os.path.join(path, 'train', wnid, 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n ## grayscale file\n img.shape = (64, 64, 1)\n X_train_block[j] = img.transpose(2, 0, 1)\n X_train.append(X_train_block)\n y_train.append(y_train_block)\n\n # We need to concatenate all training data\n X_train = np.concatenate(X_train, axis=0)\n y_train = np.concatenate(y_train, axis=0)\n\n # Next load validation data\n with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:\n img_files = []\n val_wnids = []\n for line in f:\n img_file, wnid = line.split('\\t')[:2]\n img_files.append(img_file)\n val_wnids.append(wnid)\n num_val = len(img_files)\n y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])\n X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)\n for i, img_file in enumerate(img_files):\n img_file = os.path.join(path, 'val', 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n img.shape = (64, 64, 1)\n X_val[i] = img.transpose(2, 0, 1)\n\n # Next load test images\n # Students won't have test labels, so we need to iterate over files in the\n # images directory.\n img_files = os.listdir(os.path.join(path, 'test', 'images'))\n X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)\n for i, img_file in enumerate(img_files):\n img_file = os.path.join(path, 'test', 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n img.shape = (64, 64, 1)\n X_test[i] = img.transpose(2, 0, 1)\n\n y_test = None\n y_test_file = os.path.join(path, 'test', 'test_annotations.txt')\n if os.path.isfile(y_test_file):\n with open(y_test_file, 'r') as f:\n img_file_to_wnid = {}\n for line in f:\n line = line.split('\\t')\n img_file_to_wnid[line[0]] = line[1]\n y_test = [wnid_to_label[img_file_to_wnid[img_file]] for img_file in img_files]\n y_test = np.array(y_test)\n\n return class_names, X_train, y_train, X_val, y_val, X_test, y_test",
"def load_files(folder='../MNIST_data/', source_url=None):\n\n\t\tif source_url:\n\t\t\treturn read_data_sets(folder, source_url=source_url, one_hot=False)\n\t\telse:\n\t\t\treturn read_data_sets(folder, one_hot=False)",
"def trained_classifier_load(self, path=\"../../../datasets/multinomial_nb_classifier\"):\n \n with open(path, 'rb') as f:\n \n self.classifier = pickle.load(f)",
"def get_mnist_data(url):\n filename = download_data(url)\n basename = os.path.splitext(filename)[0]\n with gzip.open(filename, 'rb') as s_file, \\\n open(basename, 'wb') as d_file:\n shutil.copyfileobj(s_file, d_file, 65536)\n\n with open(basename, 'rb') as fdata:\n magic = struct.unpack(\">I\", fdata.read(4))[0]\n # Image Data Set\n if magic == 2051:\n num, rows, cols = struct.unpack(\">III\", fdata.read(12))\n data = np.fromfile(fdata, dtype=np.uint8).reshape(num, rows, cols)\n # Label Data Set\n elif magic == 2049:\n num = struct.unpack(\">I\", fdata.read(4))[0]\n data = np.fromfile(fdata, dtype=np.uint8)\n else:\n raise Exception('URL return neither image or label dataset')\n return data",
"def read_mnist_data(image_filename, labels_filename):\n MNIST_DIR = './MNIST_data/'\n image_filepath = os.path.join(MNIST_DIR, image_filename)\n with gzip.open(image_filepath, 'rb') as images_stream:\n magic = read32_big_endian(images_stream)\n assert magic == 2051\n num_images = read32_big_endian(images_stream)\n num_rows = read32_big_endian(images_stream)\n num_columns = read32_big_endian(images_stream)\n images = numpy.frombuffer(images_stream.read(), dtype = numpy.uint8)\n images = numpy.reshape(images, [num_images, num_rows, num_columns])\n\n labels_filepath = os.path.join(MNIST_DIR, labels_filename)\n with gzip.open(labels_filepath, 'rb') as labels_stream:\n magic = read32_big_endian(labels_stream)\n assert magic == 2049\n num_items = read32_big_endian(labels_stream)\n labels = numpy.frombuffer(labels_stream.read(), dtype = numpy.uint8)\n\n assert num_images == num_items\n\n return DataSet(images, labels, num_images)",
"def load_mnist(n_samples=None, class_0='0', class_1='8'):\n # Load data from http://openml.org/d/554\n mnist = fetch_openml('mnist_784', version=1)\n\n # take only two classes for binary classification\n mask = np.logical_or(mnist.target == class_0, mnist.target == class_1)\n\n X, y = shuffle(mnist.data[mask], mnist.target[mask], random_state=42)\n if n_samples is not None:\n X, y = X[:n_samples], y[:n_samples]\n return X, y",
"def get_mnist():\n mndata = MNIST('./data/')\n train_x, train_y = mndata.load_training()\n test_x, test_y = mndata.load_testing()\n print(\"Loaded MNIST\")\n return train_x, train_y, test_x, test_y",
"def load_img(path: str) -> np.ndarray:\n return np.array(Image.open(path))",
"def load_data_npy(path, n_labels, one_hot=True):\r\n data = np.load(path)\r\n # 0th column for ids\r\n ids = data[:, 0]\r\n if not one_hot:\r\n # Convert to one hot\r\n y = one_hot_conversion(y_basic, n_labels)\r\n # Data columns\r\n x = data[:, 2:]\r\n else:\r\n # Label column\r\n y = np.array(data[:, 1:n_labels+1], dtype=int)\r\n # Data columns\r\n x = data[:, n_labels+1:]\r\n return x, y, ids"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
To craft the attack it is easiest if the prepended bytes are an integer multiple of the block_size, this function figures out how many bytes need to be padded to mae that happen. Than the next block can be used in the attack, the index of this block is also returned | def get_prepend_padding_length_and_block_index(block_size, encryptor):
def block_getter(byte_str: bytes, block: int) -> bytes:
return byte_str[block * block_size:(1 + block) * block_size]
def get_num_of_identical_blocks(cipher1, cipher2):
for cipher_block_index in range(int(len(cipher2) / block_size)):
if not block_getter(cipher1, cipher_block_index) == block_getter(cipher2, cipher_block_index):
return cipher_block_index
prev_cipher = encryptor(b'\x00')
prev_highest_identical_block = get_num_of_identical_blocks(encryptor(b''), prev_cipher)
for prepend_padding_length in range(2, block_size * 3):
cur_cipher = encryptor(bytes([0])*prepend_padding_length)
cur_highest_identical_block = get_num_of_identical_blocks(prev_cipher, cur_cipher)
if cur_highest_identical_block > prev_highest_identical_block:
return prepend_padding_length, cur_highest_identical_block
prev_cipher = cur_cipher | [
"def find_byte(self, index, target_block, crafted_block):\n valid = []\n\n self.logger.info(\"\\tattacking byte [{}/{}]...\".format(index+1, len(target_block)))\n\n for c in range(256):\n crafted_block[index] = c\n\n self.logger.debug('\\ttrying byte {} .. in {} '.format(hex(c), binascii.hexlify(crafted_block)))\n\n if self.oracle.decrypt_block(bytes(crafted_block), target_block):\n valid.append(c)\n self.logger.info('\\t\\tpadding correct for {}!!!'.format(hex(c)))\n if index < len(target_block) - 1:\n break\n\n counter = 0\n while len(valid) > 1:\n # in case plain text block contains byte 0x02 at [-2] or 0x03 at both [-2] [-3],\n # need to check which of the bytes that yield correct padding is the one that yields 0x01\n # do so by trying to decrypt the same block with different preceeding byte\n # - only 0x01 will yield correct padding\n\n counter = counter +1\n new_valid = []\n crafted_block[index-1] = counter\n\n for byte in valid:\n crafted_block[index] = byte\n\n if self.oracle.decrypt_block(bytes(crafted_block), target_block):\n new_valid.append(byte)\n\n valid = new_valid\n\n return valid[0]",
"def detect_random_size(random_prefix, unknown_string, key):\n orig_size = len(encrypt_aes_ecb(random_prefix+unknown_string, key))\n num_cons_blocks = 17\n # The number 17 here is arbitrary. It is just unlikely that 17 identical blocks\n # would be consecutive if msg weren't block-aligned\n msg = b'A'*16*num_cons_blocks\n for prefix_size in range(16):\n prefix = b'C'*prefix_size\n ptext = random_prefix + prefix + msg + unknown_string\n #print(f'ptext[{prefix_size}]: {ptext}')\n ctext = encrypt_aes_ecb(ptext, key)\n num_blocks = len(ctext) // 16\n block_count = 0\n idx = 0\n cblocks = [ctext[16*i:16*(i+1)] for i in range(num_blocks)]\n prev = None\n for i, block in enumerate(cblocks):\n if block == prev:\n block_count += 1\n # If there were num_cons_blocks repeating blocks, that means that the random_prefix\n # concatenated with my prefix is block aligned. The length of the\n # random prefix is 16 * idx - prefix_size.\n if block_count == num_cons_blocks:\n #print(f'{idx} blocks with prefix {prefix_size}')\n random_size = 16 * idx - prefix_size\n #print(f'random_size: {random_size}')\n return random_size\n else:\n prev = block\n block_count = 1\n idx = i # The index of the first repeating block\n print('Failed to detect repeating block')",
"def _pad(self, string):\r\n return (string +\r\n (self.block_size - len(string) % self.block_size) *\r\n SecureCipher.str_to_bytes(chr(self.block_size - len(string) % self.block_size)))",
"def pad_size(data_size, blocksize):\r\n if data_size < blocksize:\r\n return blocksize - data_size\r\n if data_size % blocksize == 0:\r\n return 0\r\n return blocksize - data_size % blocksize",
"def bytealign(self) -> int:\n skipped = (8 - (self._pos % 8)) % 8\n self.pos += skipped\n return skipped",
"def find_next_valid_block(input_array, bytes_per_block, start_index):\n\n if len(input_array.shape) != 1:\n raise ValueError('input_array should be 1-d array.')\n\n first_valid_block_start = None\n\n for i in range(start_index + bytes_per_block, len(input_array)):\n\n if np.array_equal(input_array[i-10: i], oe.RECORD_MARKER):\n first_valid_block_start = i - bytes_per_block\n break\n else:\n print 'no valid block found after index:', start_index\n\n return first_valid_block_start",
"def _pad_message(text):\n block_size = AES.block_size\n padding_size = (block_size - len(text) % block_size) or block_size\n padding = chr(padding_size) * padding_size\n return text + padding",
"def __send_block(s_data, s_blocks, s_size, s_offset, noexpectphrase=True):\n j = s_offset\n for block in range(0, s_blocks):\n i = block * s_size + s_offset\n j = (block + 1) * s_size + s_offset\n if verbose:\n log.debug(\"{0}:{1}\".format(i, s_data[i:j]))\n if not noexpectphrase:\n self._uut_conn.send(s_data[i:j], expectphrase='.*', timeout=90, regex=True)\n else:\n self._uut_conn.send(s_data[i:j], expectphrase=None, timeout=120, idle_timeout=90, regex=True)\n time.sleep(0.10)\n return j",
"def getSizeOfBlock(self) -> int:\n ...",
"def _pad(self, a: bitarray) -> bitarray:\n pad_len = BLOCKSIZE - (len(a) % BLOCKSIZE) - 1\n padding = bitarray(\"1\" + \"0\" * pad_len)\n return a + padding",
"def decrypt_block(block, tester):\n random = bytearray(urandom(16))\n i = b'\\x00' * 16\n test = xor(random, i)\n\n while tester(test + block) is False:\n i = inc(i)\n test = xor(random, i)\n\n j = 1\n\n tweaked = tweak(test[:], j-1)\n\n while tester(tweaked + block) is True:\n j += 1\n tweaked = tweak(tweaked, j-1)\n\n l = 17 - j\n known = bytearray([b ^ l for b in test[-l:]])[::-1]\n\n while l != 16:\n random = bytearray(urandom(16 - l))\n i = b'\\x00' * (16 - l)\n pad = xor(bytearray([l + 1]) * l, known)\n\n head = xor(random, i)\n\n while tester(head + pad + block) is False:\n i = inc(i)\n head = xor(random, i)\n\n known = bytearray([head[-1] ^ (l+1)]) + known\n l += 1\n\n return known",
"def __padding(self, s):\n padding_length = self.block_size - len(s) % self.block_size\n return s + padding_length * chr(padding_length)",
"def pad_plaintext(text, block_size=64):\n padding_amount = block_size - (len(text) % block_size)\n return text + left_pad(decimal_to_binary(padding_amount / BYTE_LENGTH),\n padding_amount)",
"def update_block(self, hash, data, offset=0):\n return 0",
"def pad(st):\n block_size = 16\n padded_st = st + (block_size - len(st) % block_size) * chr(block_size -len(st) % block_size)\n return padded_st",
"def pkcs7pad(data, blocksize):\n blocks = len(data) // blocksize\n remainder = len(data) % blocksize\n padlen = blocksize - remainder\n padchar = bytes([padlen])\n data = data + (padchar * padlen)\n return data",
"def _block_len_in_section(block, section):\n len_start = max(block.start, section.start)\n len_end = min(block.end, section.end)\n\n return len_end - len_start + 1",
"def test103MoveOverlappingNoZeroPadding(self):\n header = self.region.header[4, 0]\n sectorlocation = header[0]\n nbt = generate_compressed_level(minsize = 5000, maxsize = 7000)\n self.region.write_chunk(4, 0, nbt)\n self.region.file.seek((sectorlocation + 1) * 4096)\n unused = self.region.file.read(4096)\n zeroes = unused.count(b'\\x00')\n self.assertNotEqual(zeroes, 4096, \\\n \"Bytes should not be zeroed after moving an overlapping chunk\")",
"def pad_message(message):\n\n # determine the pad_length based on the key BLOCK_SIZE\n if len(message) < BLOCK_SIZE:\n pad_length = (BLOCK_SIZE - len(message))\n elif len(message) > BLOCK_SIZE:\n pad_length = (len(message) % BLOCK_SIZE)\n else:\n pad_length = BLOCK_SIZE\n\n if pad_length < 16:\n pad_length += BLOCK_SIZE\n\n # reduce by one for the delimiter that is added between message and padding\n pad_length -= 1\n padding = \"\"\n\n while len(padding) < pad_length:\n padding += random.choice(RANDOM_SAMPLE)\n\n return\"{}`{}\".format(padding, message)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get source id and type from sessions according to the group id | def get_src_id_and_type_from_sessions(self, gid):
session_storage = SessionStorage(settings.Session_Storage_File)
sessions = session_storage.read()
logger.debug('sessions: ' + str(len(sessions)))
for session in sessions:
logger.debug(json.dumps(session))
if session.get('gid') == gid:
return session.get('src_id'), session.get('type')
return False, False | [
"def _tunnel_source_id(source):\n return tuple(sorted(source.items()))",
"def test_data_source_postgre_sqls_id_get(self):\n pass",
"def get_study_sessions(dicom_dir_template, files_opt, heuristic, outdir,\n session, sids, grouping='studyUID'):\n study_sessions = {}\n if dicom_dir_template:\n dicom_dir_template = os.path.abspath(dicom_dir_template)\n assert not files_opt # see above TODO\n assert sids\n # expand the input template\n if '{subject}' not in dicom_dir_template:\n raise ValueError(\n \"dicom dir template must have {subject} as a placeholder for a \"\n \"subject id. Got %r\" % dicom_dir_template)\n for sid in sids:\n sdir = dicom_dir_template.format(subject=sid, session=session)\n # and see what matches\n files = sorted(glob(sdir))\n for session_, files_ in get_extracted_dicoms(files):\n if session_ is not None and session:\n lgr.warning(\n \"We had session specified (%s) but while analyzing \"\n \"files got a new value %r (using it instead)\"\n % (session, session_))\n # in this setup we do not care about tracking \"studies\" so\n # locator would be the same None\n study_sessions[\n StudySessionInfo(\n None,\n session_ if session_ is not None else session,\n sid,\n )] = files_\n else:\n # prep files\n assert files_opt\n assert not sids\n files = []\n for f in files_opt:\n if isdir(f):\n files += sorted(find_files(\n '.*', topdir=f, exclude_vcs=True, exclude=\"/\\.datalad/\"))\n else:\n files.append(f)\n\n # in this scenario we don't care about sessions obtained this way\n files_ = []\n for _, files_ex in get_extracted_dicoms(files):\n files_ += files_ex\n\n # sort all DICOMS using heuristic\n # TODO: this one is not groupping by StudyUID but may be we should!\n seqinfo_dict = group_dicoms_into_seqinfos(\n files_,\n file_filter=getattr(heuristic, 'filter_files', None),\n dcmfilter=getattr(heuristic, 'filter_dicom', None),\n grouping=grouping)\n\n if not getattr(heuristic, 'infotoids', None):\n raise NotImplementedError(\n \"For now, if no subj template is provided, requiring \"\n \"heuristic to have infotoids\")\n\n for studyUID, seqinfo in seqinfo_dict.items():\n # so we have a single study, we need to figure out its\n # locator, session, subject\n # TODO: Try except to ignore those we can't handle?\n # actually probably there should be a dedicated exception for\n # heuristics to throw if they detect that the study they are given\n # is not the one they would be willing to work on\n ids = heuristic.infotoids(seqinfo.keys(), outdir=outdir)\n # TODO: probably infotoids is doomed to do more and possibly\n # split into multiple sessions!!!! but then it should be provided\n # full seqinfo with files which it would place into multiple groups\n lgr.info(\"Study session for %s\" % str(ids))\n study_session_info = StudySessionInfo(\n ids.get('locator'),\n ids.get('session', session) or session,\n ids.get('subject', None))\n if study_session_info in study_sessions:\n #raise ValueError(\n lgr.warning(\n \"We already have a study session with the same value %s\"\n % repr(study_session_info))\n continue # skip for now\n study_sessions[study_session_info] = seqinfo\n\n return study_sessions",
"def get_groups(source, url=\"https://fritz.science\"):\n\n response = api('GET',\n f'{url}/api/sources/{source}'\n )\n if response.status_code == 200:\n groups = response.json()['data']['groups']\n else:\n print(f'HTTP code: {response.status_code}, {response.reason}')\n\n return groups",
"def get_streamflow_site_comids(group=None, groups=None):\n lookup = pd.read_csv(inflows_lookup_file)\n if groups is not None:\n lookup = lookup.loc[lookup.group.isin(groups)]\n if group is not None:\n lookup = lookup.loc[lookup.group == groups]\n lookup = dict(zip(lookup.site_no, lookup.comid))\n return lookup",
"def source_server_group(self) -> str:\n return pulumi.get(self, \"source_server_group\")",
"def identify_events_by_src(device_by_src, pkt_src, pkt_dst, device_categorization, event_identification):\n if device_categorization[pkt_src] == 'CAMERA':\n for k, v in sorted(device_by_src.iteritems()):\n if v > 100000:\n if check_motion_event(k, pkt_src, event_identification):\n # event_identification.append([k, DEVICE_NAME[pkt_src], DEVICE_NAME[pkt_dst], '1'])\n event_identification.append([k, DEVICE_NAME[pkt_src], '1'])\n elif device_categorization[pkt_src] == 'SENSOR':\n for k, v in sorted(device_by_src.iteritems()):\n if v > 10000:\n if check_motion_event(k, pkt_src, event_identification):\n # event_identification.append([k, DEVICE_NAME[pkt_src], DEVICE_NAME[pkt_dst], '1'])\n event_identification.append([k, DEVICE_NAME[pkt_src], '1'])\n\n return event_identification",
"def _create_consistencygroup_from_src(self, context, group, volumes,\n cgsnapshot=None, snapshots=None,\n source_cg=None, source_vols=None):\n if cgsnapshot and snapshots:\n return self._create_consistencygroup_from_cgsnapshot(\n context, group, volumes, cgsnapshot, snapshots)\n elif source_cg and source_vols:\n return self._create_consistencygroup_from_consistencygroup(\n context, group, volumes, source_cg, source_vols)\n\n msg = (_(\"Unknown consistency group source for %(group)s\") %\n {'group': group['id']})\n raise exception.VolumeDriverException(message=msg)",
"def test_get_groups_id(self):\n pass",
"def get_SessionOnSID(self,studentID):\n try:\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from Session where studentID=%s', (str(studentID),))\n sessions=list()\n for row in cursor:\n sessions.append(self.getSessionOnId(row[0]))\n return sessions\n except Exception as e:\n print('error while getting session on id ' + str(e))\n return None",
"def get_source_id(idx):\n global tgas\n if tgas is None:\n from .cfg import TGASFILE\n tgas = pd.read_hdf(TGASFILE, 'df')\n\n return tgas.iloc[idx].source_id",
"def fetch_session_by_id(id):\r\n pass",
"def get_ntype_id_from_src(self, ntype):\n if ntype is None:\n if len(self._srctypes_invmap) != 1:\n raise DGLError('SRC node type name must be specified if there are more than one '\n 'SRC node types.')\n return next(iter(self._srctypes_invmap.values()))\n ntid = self._srctypes_invmap.get(ntype, None)\n if ntid is None:\n raise DGLError('SRC node type \"{}\" does not exist.'.format(ntype))\n return ntid",
"def get_source_identifier():",
"def get_session_metadata(*rds):\n if None in rds or len(rds) != 3:\n raise ValueError, 'incomplete session specification: %s' % str(rds)\n rat, day, session = rds\n try:\n the_session = metadata[rat]['days'][day]['sessions'][session]\n except KeyError, e:\n raise e, 'session not found'\n return the_session",
"def get_subject_id(session, sessions_list):\n subject_id = [x['subject_id'] for x in sessions_list\n if x['session_label'] == session]\n return subject_id[0]",
"def test_data_source_postgre_sqls_id_team_get(self):\n pass",
"def pidGetSource(self) -> float:\n ...",
"def start_session(self):\n LOG.debug('starting a new session')\n qry = build_ins_query('session', self.SESSION_COLUMNS, ['session_id'])\n vals = {\n 'source_subsystem': self._src_subsys,\n 'source_subsystem_instance': self._conf.get('source_subsystem_instance'),\n 'destination_subsystem': self._dst_subsys,\n 'destination_subsystem_instance': self._conf.get('destination_subsystem_instance'),\n 'program_name': os.path.basename(sys.argv[0]),\n 'username': getpass.getuser(),\n 'pid': os.getpid(),\n 'host_name': socket.getfqdn()\n }\n dbc = self._dbh.cursor()\n dbc.execute(qry, vals)\n self._syslog_qry(dbc.query)\n session_id = dbc.fetchone()[0]\n LOG.debug('Feed session started. session_id: %s', session_id)\n dbc.close()\n self.commit()\n return session_id"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct an array of random samples, normally distributed about n with the given standard deviation. | def samples(n, dev=0.1, count=10000):
return np.random.normal(n, dev, count) | [
"def d_normal_distribution(mu, covMat, n):\n import numpy as np\n # Use multivariate normal distribution of numpy package\n return np.random.multivariate_normal(mean=mu, cov=covMat, size=n, check_valid='ignore').tolist()",
"def _randomSamples(self, n):\n # we want to return points in unit sphere, could do using spherical coords\n # but rejection method is easier and arguably faster :)\n points = np.array([])\n while points.shape[0] < n:\n remainingPoints = n - points.shape[0]\n p = (np.random.rand(remainingPoints,3) - 0.5)*2\n #p = p[np.linalg.norm(p, axis=1) <= SAMPLE_SPHERE_RADIUS]\n\n if points.size == 0:\n points = p \n else:\n points = np.concatenate((points, p))\n return points",
"def generate_normal_distribution(_mean, _sd, lower_bound, upper_bound):\n dist = np.random.normal(_mean, _sd, 1)[0]\n if dist < lower_bound:\n dist = lower_bound\n elif dist > upper_bound:\n dist = upper_bound\n return dist",
"def sample(self, size):\n return np.random.normal(self._mean, self._std, size)",
"def get_new_data(parameters, n=10):\n num_pixels, sqrt_cov, mean_trials = parameters\n data = np.empty([n, num_pixels])\n for i in range(n):\n random_mat = np.random.normal(0, 1, num_pixels)\n new_example = (np.dot(random_mat, sqrt_cov) + mean_trials)\n data[i, :] = new_example\n return data",
"def gaussian_distribution(n, ambiant_dim, intrinsic_dim):\n \n data = np.zeros((n, ambiant_dim))\n data[:,:intrinsic_dim] = np.random.normal(size=(n, intrinsic_dim))\n return data",
"def _sample_gaussian_noise(self, n):\n check_positive_integer(n)\n delta_t = 1.0 * self.t / n\n\n noise = self.rng.normal(scale=np.sqrt(delta_t), size=n)\n\n return noise",
"def sampleGaussian(x, y, std_x, std_y, n_samples):\n\n mean = [x, y]\n cov = [[std_x, 0], [0, std_y]] # diagonal covariance\n return np.random.multivariate_normal(mean, cov, n_samples)",
"def gauss_noise(self,array,stdev):\n # If the array has 2 dimensions, this will capture it\n # Otherwise, it will evaluate the length of 1D array\n noise = np.random.normal(0,(stdev/100)*np.amax(array),array.shape)\n return array+noise",
"def norm_points(N, mu_x, mu_y, sigma):\n return sigma * numpy.random.randn(N, 2) + [mu_x,mu_y]",
"def generateDataset(N, f, sigma):\n import numpy as np\n vf = np.vectorize(lambda x: f(x) + np.random.normal(0, sigma))\n x = np.linspace(0,1,N)\n return (x, vf(x))",
"def sample_uniform(self, N):\n np.random.seed()\n return np.random.dirichlet([1]*self.k, N)",
"def random_time_trace(N=300, mu=30, sigma=5):\n times = [random.gauss(mu, sigma) for i in range(N)]\n return times",
"def generate_randoms(n,lim):\n a=np.empty(n,dtype=np.uint32)\n increase=int(lim/n)\n last_value=1\n # Generate each new number as a random between the previous_value\n # and a relative increase to ensure a sorted order\n for i in range(n):\n a[i] = random.randint(0,increase) + last_value\n last_value = a[i]\n return a",
"def bootstrap_sterr(x, B=100):\r\n N = len(x)\r\n samples = np.zeros((B, N))\r\n mus = np.zeros((B,))\r\n for b in range(B):\r\n samples[b,:] = np.random.choice(x, N, replace=True)\r\n mus[b] = np.mean(samples[b,:])\r\n return np.std(mus)",
"def uniform_distribution(n, ambiant_dim, intrinsic_dim):\n data = np.zeros((n, ambiant_dim))\n data[:,:intrinsic_dim] = np.random.uniform(size=(n, intrinsic_dim))\n return data",
"def gen_noise_param(n):\n #for now just have stdev=1 for every node\n return np.ones(n)*0.1",
"def gen_data(n_sample=50,dist='uniform',random_var=0):\n \n if dist=='uniform':\n d=np.random.random(size=n_sample)\n if dist=='normal':\n d=np.random.normal(loc=1-random_var,scale=1+random_var,size=n_sample)\n if dist=='binomial':\n d=np.random.binomial(n=10,p=0.5+random_var/10,size=n_sample)\n if dist=='exponential':\n d=np.random.exponential(scale=0.5+random_var,size=n_sample)\n if dist=='poisson':\n d=np.random.poisson(lam=1.0+random_var,size=n_sample)\n if dist=='chisquare':\n d=np.random.chisquare(df=3+int(5*random_var),size=n_sample)\n if dist=='gamma':\n d=np.random.gamma(shape=1.0+random_var,scale=0.5+random_var,size=n_sample)\n if dist=='beta':\n d=np.random.beta(a=0.5+random_var,b=1.0+random_var,size=n_sample)\n if dist=='triangular':\n d=np.random.triangular(left=0.0,mode=np.min([0.5+random_var,1.0]),right=1.0,size=n_sample)\n if dist=='lognormal':\n d=np.random.lognormal(mean=1-random_var,sigma=1+random_var,size=n_sample)\n if dist=='laplace':\n d=np.random.laplace(scale=0.5+random_var,size=n_sample)\n \n # Normalize data\n d = d+np.abs(d.min())\n d = d/(d.max()-d.min())\n \n return d",
"def random_distribution(n_items):\r\n return np.random.dirichlet([1.0 for i in range(n_items)])",
"def error(self):\n return np.random.normal(scale=self.sd, size=1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gracefully stop working on things | def _gracefully_stop(self):
pass | [
"def at_stop(self):\r\n pass",
"def try_stop(self):\n try:\n self.stop()\n except:\n e = sys.exc_info()[0]\n self.get_logger().warning(e)",
"def container_forcestop(self):\n raise ex.excError",
"def halt(self):\n self.running = False\n sys.exit(0)",
"def cancel(self):\n logging.warning(f\"called cancel on hunt {self} but {self.type} does not support cancel\")",
"def cancel():\n\t\traise NotImplementedError()",
"def stop_poisoning(self):\n self.stop = True\n # self.stop_thread = threading.Thread(target=self.restore_network)",
"def __exit__(self, exc_type, exc_value, traceback):\n self.stop()",
"def test_stop_run(self):\n pass",
"async def _abort_exposure(self) -> None:\n pass",
"def callback_stopping(self, myrun):\n pass # pragma: no cover",
"def test_stop_runs(self):\n pass",
"def stop(self):\n self._state = Checkers.State.GAME_OVER # game is aborted\n self._eog = Checkers.EoG.ABORT\n self._winner = None\n self.tend = time.time()\n ts = time.localtime(self.tend)\n tstr = f\"{ts.tm_hour:02}:{ts.tm_min:02}:{ts.tm_sec:02}\"\n self.add_event_to_history(f\"ABORTED@{tstr}\")",
"def _abort_processing(self):\n self.notify_dispatcher_abort()\n self._listener.remove_peer(str(self.data_id).encode())",
"def stopping_condition_met(self, execution):\r\n return False",
"def cog_unload(self):\n self.guild_loop.cancel()",
"def request_stop(self):\n self._stop_requested = True",
"def _stopping(self):\n \n self.__state = runlevel.STATE_STOPPING",
"def kill(self):\n\n self.running = False\n\n try:\n # teardown robot\n self.strategy.teardown()\n except Exception:\n # method not implemented by strategy\n pass",
"def ctxAbort():\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reset plans being solved so they are solved again. Use this only when the solver service is not running concurrently. | def _reset_solving_status(self):
plans = self.Plan.query.get_plan_by_col("status", self.Plan.SOLVING)
for the_plan in plans:
the_plan.status = self.Plan.TRANSLATED
# Use only in active-passive mode, so don't have to be atomic
the_plan.update() | [
"def change_plan(self, plan):\n\n # First, sort the new plan by event start times.\n plan.sort(key=lambda task: task['start'])\n\n # Queue will be modified, plan will stay as it is.\n queue = list(plan)\n\n # Establish a common time base.\n now = reactor.seconds()\n\n # Catch up with the current scheduling.\n self.schedule(now)\n\n # We need to make sure that the plan actually changed before\n # doing anything destructive, such as stopping current playback.\n cur = plan_window(self.plan, now, now + 60)\n new = plan_window(plan, now, now + 60)\n\n if cur != new:\n self.msg('Resetting schedule...')\n\n # Stop and get rid of all currently instantiated tasks.\n for task in list(self.tasks):\n self.discard_task(task)\n self.stop_task(task)\n\n # Cancel all pending events.\n for event in list(self.events):\n self.events.discard(event)\n event.cancel()\n\n else:\n self.msg('Adjusting schedule...')\n\n # Work through the new queue up to the same point so that\n # the handoff will go smoothly and tasks won't overlap.\n pop_queue_tasks(queue, now=now)\n\n # Install new plan and new queue.\n self.plan = plan\n self.queue = queue\n\n # Schedule some tasks.\n self.schedule(now)\n\n if not self.plan:\n # Reset when we have no plan at all.\n return self.no_plan()",
"def reset(self):\n ESTWithProjections.reset(self)\n\tself.bestPath = None\n\tself.bestPathCost = None",
"def reset(self):\n self.rrt.reset()\n\tself.bestPath = None\n\tself.bestPathCost = None\n self.lastPruneCost = None\n\tself.updateBestCost()",
"def unmark_as_solved(self):\n self.is_solved = False\n self.solver = None",
"def reset_solution(self):\n for k, v in self.items():\n if \"solution\" in v:\n v._data = v.data.drop_vars(\"solution\")",
"def reset_optim(self):\r\n\t\tself.optimizer.state = defaultdict(dict)",
"def reset_programs(self):\n self._buffered_loop = None",
"def recalc_queue(self):\n\n tasks_to_do = self.employee.execution_set.filter(exec_status__in=[self.ToDo, self.InProgress],\n subtask__add_to_schedule=True,\n task__exec_status__in=[self.ToDo, self.InProgress]\n )\n tasks_to_do_fixed = tasks_to_do.filter(fixed_date=True).values('planned_start', 'planned_finish')\n self.fixed_periods = self.__merge_fixed_periods__(tasks_to_do_fixed)\n self.tasks_to_do_not_fixed = tasks_to_do.filter(fixed_date=False) \\\n .order_by('exec_status', F('planned_start').asc(nulls_last=True))\n\n execution_model = apps.get_model('planner.Execution')\n last_task_finish = self.__get_last_task_finish__()\n\n tasks = []\n # plan queued tasks\n for task in self.tasks_to_do_not_fixed:\n queued_task = self.__queue_task__(task, last_task_finish)\n tasks.append(queued_task)\n last_task_finish = self.planned_finish_with_interruption(queued_task)\n\n # perform bulk_update\n execution_model.objects.bulk_update(tasks, ['planned_start', 'planned_finish', 'interruption'])",
"def planrevert(args):\n plan = args.plan\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n config.plan(plan, revert=True)\n return 0",
"def reset_finished_tasks(self):\n del self.tasks_done[:]\n del self.tasks_failed[:]\n del self.tasks_incomplete[:]",
"def reset(self):\n for beam in self._beams:\n beam.reset()\n self._free_beams = [beam for beam in self._beams]\n self._allocated_beams = []\n self._tilings = []\n self._dynamic_tilings = []",
"def reset_pls(self):\n for insurancefirm in self.insurancefirms:\n insurancefirm.reset_pl()\n\n for reininsurancefirm in self.reinsurancefirms:\n reininsurancefirm.reset_pl()\n\n for catbond in self.catbonds:\n catbond.reset_pl()",
"def reset(self, do_resets=None):\n pass",
"def solve(self):\n self.freezeInitialValues()\n solved = self.solve_puzzle(self.serialize())",
"def reset_goal(self):\n self.set_goal(np.zeros(6))",
"def reset(self):\n self.__activations = [0 for _ in range(self.__max_node)]\n self.__fitness = 0",
"def reset(self):\n self._state = self.q",
"def AdaptiveSolve(self):\n pass",
"def _update_previous_solution(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.