query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Checks that the initial force/acceleration is sufficient to start rolling. It needs to be 2.52 times higher than rolling friction force.
def init_acc(a): Roll_fric = 0.02 # [-] Rolling friction coefficient of airplane wheels if a[1] >= 2.5*Roll_fric: return True else: return False
[ "def __check_initial_conditions_feasibility(self):\n for chromosome in self.starting_gen:\n if len(chromosome) is not len(self.variables):\n raise Exception('Length of chromosome is not equal to ' +\n 'length of defined variables')", "def gravity(self) -> None:\n #will only exert it if the player is in free fall and below terminal velocity\n if self.rect.y != self.WIN.get_height() - (2*self.radius) - 1:\n if self.vector.y < 9:\n #if above the screen it falls faster\n if self.rect.y > self.WIN.get_height():\n self.changeYVector(0.11)\n else:\n self.changeYVector(0.08)", "def _check(self):\n if self.predictor.prediction_length < self.prediction_length:\n raise ValueError(\n f\"Please choose a forecasting horizon lower or equal to the one chosen when training: {self.predictor.prediction_length}\"\n )", "def low_recession_rate(self, error = 0.03):\n\n if (self.postprocessor.sim_total_rec - self.postprocessor.obs_total_rec\n < -error):\n print('Issue: The simulated daily low flow recession rate is ' +\n 'too low.\\n')\n return True\n\n return False", "def is_accelerating(self):\n m_acc = norm(self.acceleration)\n return m_acc > 0 and is_same_direction(self.velocity, self.acceleration)", "def calc_blade_friction_force():\r\n # return c_a * d * w\r\n return 0", "def acceleration3_d_available(self):\n ret = self._get_attr(\"acceleration3DAvailable\")\n return ret", "def passed(self):\n if self.wobble.radius_mm * 2 < self.tolerance.value:\n return True\n else:\n return False", "def check_backtrack(self):\n differential = self.character.stats[4] - self.dungeonlevel\n if differential < 0:\n cutoff = float(3 - differential) / float(6 - 6 * differential)\n else:\n cutoff = float(3 + 5 * differential) / float(6 + 6 * differential)\n return random.random() < cutoff", "def check_requirements(self, arm):\n for part in arm.parts:\n if isinstance(arm.parts[part], ForceSensor):\n return True\n return False", "def CheckMinLambda(self, freqfactor=2.5):\n lambdamin=self.vmin/self.fc/freqfactor\n dxArr=self.dx*np.diff( (0.5+0.5*(self.knots)) )\n dzArr=self.dz*np.diff( (0.5+0.5*(self.knots)) )\n dsmax=max(dxArr.max(), dzArr.max())\n # dsmax=max(self.dx, self.dz)\n # Need checking! (in manual: threshold value is around 4.5 points\n # per wavelength in elastic media and 5.5 in acoustic media), 4.5 grid points OR 4.5 element points\n if dsmax * 4.5 > lambdamin:\n raise ValueError('Grid spacing is too large: '+ str(dsmax)+' for '+str(lambdamin)+ ' m')\n else:\n print 'Grid spacing:', str(dsmax),'m for',lambdamin, 'm'\n return", "def force(self):\n drag = self.force_of_drag()\n return self.thrust(self.__time) - (drag['body'] + drag['fins'] + self.force_of_gravity())", "def check_scf_criteria(self):\n tols = ['toldfe', 'tolwfr', 'toldff', 'tolrff', 'tolvrs']\n nonzeros = 0\n for i in tols:\n if i in self.params.keys() and self.params[i] is not None:\n if self.params[i] != 0.0:\n nonzeros += 1\n if nonzeros == 1:\n return True\n else:\n print(\"========================================\\n\")\n print(\" WARNING !!!\\n\")\n print(\"========================================\\n\")\n print(\"you must set one and only one of variables\\n\")\n print(\"below to differ from zero.\\n\")\n print(\"[toldfe, tolwfr, toldff, tolrff, tolvrs]\\n\")\n #print(nonzeros)\n sys.exit(1)", "def get_termination(self):\n self.state = np.concatenate([self.data.qpos, self.data.qvel, self.data.act])\n if np.linalg.norm(self.state) < 1e-6:\n return 0.0", "def _checkValidity(self) -> None:\n\n fresnel_zone_dist = np.sqrt(self._probe_params.wavelength * self._det_params.obj_dist)\n fresnel_zone_npix = fresnel_zone_dist / self._det_params.pixel_pitch\n\n error_str = (f\"Step size ({self._scan_params.scan_step_npix} is too small. \"\n + f\"Ensure that the step size is at least larger than the Fresnel zone width \"\n + f\"({fresnel_zone_npix}) to ensure diversity in the diffraction patterns.\")\n assert self._scan_params.scan_step_npix > fresnel_zone_npix, error_str", "def req_real_time_price_check_end(self):\n for security in self.data:\n for ct in [self.data[security].bid_price,self.data[security].ask_price]: \n if ct < 0.0001:\n #print ct, 'not ready'\n return False\n return True", "def check(self):\r\n assert self.T.__class__ == int and self.T >= 2, 'blank sweeps period must be an integer >= 2'", "def check_scf_criteria(self):\n tols = ['toldfe', 'tolwfr', 'toldff', 'tolrff', 'tolvrs']\n nonzeros = 0\n for i in tols:\n if i in self.params.keys() and self.params[i].as_val() is not None:\n if self.params[i].as_val(t=float, dim=0) != 0.0:\n nonzeros += 1\n if nonzeros == 1:\n return True\n else:\n print(\"========================================\\n\")\n print(\" WARNING !!!\\n\")\n print(\"========================================\\n\")\n print(\"you must set one and only one of variables\\n\")\n print(\"below to differ from zero.\\n\")\n print(\"[toldfe, tolwfr, toldff, tolrff, tolvrs]\\n\")\n #print(nonzeros)\n sys.exit(1)", "def calculation_required(self, atoms=None, quantities=None):\n available_quantities = [\"energy\", \"forces\"]\n for quantity in quantities:\n if quantity not in available_quantities:\n print_warning(\"Quantity '{}' not available.\".format(quantity))\n return True\n\n if self.old_input is None:\n return True\n else:\n new_input = self.CP2K_INPUT._print_input(-1)\n return new_input != self.old_input" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that can be used for engine sizing, it shows the required torque and power at the different RPM for both the ring gear and engine. It also indicates performance of the existing engine.
def EGTS_tor_rpm_pow(torque, power, velocity, GR): # Efficiencies n_gear = 0.9875 # Gear efficiency (torque loss -> power loss) amount_gears = 2 n_emotor = 0.95 # Electricmotor efficiency (electrical loss - power loss) torque_out = (1/n_gear)**amount_gears*np.array(torque) # [Nm] Required engine output torque power_out = (1/n_gear)**amount_gears*np.array(power) # [W] Corresponding engine output power power_in = (1/n_emotor)*power_out # [W] Required engine input power # Translate velocity to w_rad_air = 1.27/2 # [m] wheel radius aircraft MLG wheels w = np.array(velocity)/w_rad_air # [rad/s] RPM = w*60/(2*np.pi) # Existing engines baseline T_ENG_268 = np.array([[0, 2000, 3000, 4000, 4500], [500, 500, 490, 482, 479]]) T_ENG_348 = np.array([[0, 1200, 2600, 3500, 4000], [900, 1000, 1000, 958.33, 941.66]]) P_ENG_268 = np.array([[0, 2200, 2830+1/3, 3600, 4500], [0, 120, 150, 180, 200]]) P_ENG_348 = np.array([[0, 3000, 3500, 4000], [0, 315, 350, 370]]) ''''gs = gridspec.GridSpec(2, 2) # Define figure layout fig = plt.figure("Engine Performance Plane") fig.suptitle("Engine Required Acceleration Performance") ax0 = fig.add_subplot(gs[0, 0]) ax0.set_title("Ring Gear") ax0.set_xlabel("RPM") ax0.set_ylabel("Torque [Nm]") ax0.plot(RPM, np.array(torque), 'red') ax1 = fig.add_subplot(gs[0, 1]) ax1.set_title("Engine") ax1.set_xlabel("RPM") ax1.set_ylabel("Torque [Nm]") ax1.plot(RPM*GR, np.array(torque_out)/GR, 'red') ax1.plot(T_ENG_268[0, :], T_ENG_268[1, :], 'gray', linestyle='--') ax = ax1.twinx() ax.set_ylabel("EMRAX 268 PEAK", color='gray') ax.tick_params(right=False, labelright=False) #ax1.plot(T_ENG_268[0, :], T_ENG_268[1, :]*2, 'gray') #ax1.plot(T_ENG_348[0, :], T_ENG_348[1, :], 'g') ax2 = fig.add_subplot(gs[1, 0]) ax2.set_title("Ring Gear") ax2.set_xlabel("RPM") ax2.set_ylabel("Power [kW]") ax2.plot(RPM, np.array(power)/1000) ax3 = fig.add_subplot(gs[1, 1]) ax3.set_title("Engine") ax3.set_xlabel("RPM") ax3.set_ylabel("Power [kW]") ax3.plot(P_ENG_268[0, :], P_ENG_268[1, :], 'gray', linestyle='--') ax = ax3.twinx() ax.set_ylabel("EMRAX 268 PEAK", color='gray') ax.tick_params(right=False, labelright=False) #ax3.plot(P_ENG_268[0, :], P_ENG_268[1, :]*2, 'gray') #ax3.plot(P_ENG_348[0, :], P_ENG_348[1, :], 'g') ax3.plot(RPM*GR, power_in/1000) fig.tight_layout() fig.subplots_adjust(top=0.88) fig.savefig('Power_ENG_Plane', bbox_inches='tight') #plt.show()''' return power_in, torque_out, RPM
[ "def EGTS_only_perf(GR):\n #Power available\n P_APU = 62 # [kW] Available apu power\n P_sen = 0 # [kW]\n P_comp = 0 # [kW]\n P_av_e = (P_APU-P_sen-P_comp)*1000/2 # [W] APU power available per engine\n\n # Efficiencies powertrain\n n_circuit = 0.97\n n_gear = 0.9875 # Gear efficiency (torque loss -> power loss)\n amount_gears = 2\n n_emotor = 0.95 # Electricmotor efficiency (electrical loss - power loss)\n\n # Airplane characteristics\n w_rad_air = 1.27/2 # [m] wheel radius aircraft MLG wheels\n m_plane = 97400 # [kg] MRW\n weight_ratio = 0.952 # [-] Landing gear weight distribution ratio\n Roll_fric = 0.02 # [-] Rolling friction coefficient of airplane wheels\n\n # Engine output torque for available power at different RPM calculation\n P_av_e_out = n_circuit*n_emotor*P_av_e # [W] engine output power\n T_egts_w_em = np.array([500]) # [Nm] engine output torque\n\n v_slow = np.arange(0, 8.1, 0.1) # [kts] Velocity range\n v_slow = v_slow*0.514444 # to m/s\n w_slow = v_slow/w_rad_air # [rad/s] corresponding rotational speed wheels\n w_slow_eng = w_slow*GR # [rad/s] corresponding rotational speed engine\n for i in range(1, len(w_slow_eng)):\n # Enough power hence full torque\n if P_av_e_out/w_slow_eng[i] > 500:\n T_egts_w_em = np.append(T_egts_w_em, [500])\n # in sufficient power hence less torque\n elif P_av_e_out/w_slow_eng[i] < 500 and P_av_e_out/w_slow_eng[i] > 0:\n T_egts_w_em = np.append(T_egts_w_em, [P_av_e_out/w_slow_eng[i]])\n # not enough power\n else:\n T_egts_w_em = np.add(T_egts_w_em, [0])\n\n # Torque en power @ wheels = engine * gear efficiency\n T_egts_w_r = n_gear**amount_gears*GR*T_egts_w_em # [W] wheel power\n F_egts_w = T_egts_w_r/w_rad_air # [Nm] engine output torque\n\n # Resultant acceleration calculation\n # Determining friction for resultant acceleration calculation\n N_mlg = m_plane*weight_ratio*9.81 # [N] Total normal force on the MLG\n N_mlg_w = N_mlg/4 # [N] Normal force per MLG wheel\n N_nlg = m_plane*(1-weight_ratio)*9.81 # [N] Total normal force of car\n F_fric = Roll_fric*N_mlg + Roll_fric*N_nlg # [N] Total force req to move plane at acceleration\n\n # Resultant force\n F_acc = 2*F_egts_w-F_fric # [N]\n\n # Resultant acceleration\n a_acc_slow = F_acc/m_plane # [m/s2]\n # Cut-off insignificant accelerations\n v_slow = v_slow[np.where(a_acc_slow >= 0.005)]\n a_acc_slow = a_acc_slow[np.where(a_acc_slow >= 0.005)]\n\n # Determine time intervals for velocity intervals w corresponding acceleration profile\n time = np.array([0])\n for i in range(1, len(v_slow)):\n time = np.append(time, [v_slow[i]/a_acc_slow[i]])\n\n # Plot\n# gs = gridspec.GridSpec(2, 2) # Define figure layout\n# fig = plt.figure(\"EGTS Only Performance\")\n# fig.suptitle(\" EGTS Only Performance \\n Pushback\")\n#\n# # Pushback velocity\n# ax1 = fig.add_subplot(gs[0, 0])\n# ax1.set_title(\"Velocity\")\n# ax1.set_xlabel(\"Time [s]\")\n# ax1.set_ylabel(\"Velocity [m/s]\")\n# ax1.plot(time[0:31], v_slow[0:31], color='g')\n# ax1.set_yticks([0, 0.5, 1, 1.5])\n# ax = ax1.twinx()\n# ax.plot(time[0:31], v_slow[0:31], color='g')\n# ax.set_ylabel(\"Velocity [kts]\")\n# ax.set_yticks(np.array([0, 0.5144, 2*0.5144, 3*0.5144]))\n# ax.set_yticklabels(['0', '1', '2', '3'])\n# # Pushback Acceleration graphs\n# ax2 = fig.add_subplot(gs[0, 1])\n# ax2.set_title(\"Acceleration\")\n# ax2.set_xlabel(\"Time [s]\")\n# ax2.set_ylabel(\"Acceleration [$m/s^2$]\")\n# ax2.set_ylim(0, max(a_acc_slow)+0.2)\n# ax2.plot(time[0:31], a_acc_slow[0:31], color='r')\n#\n# # Slow taxi title\n# ax0 = fig.add_subplot(gs[1, :])\n# ax0.axis('off')\n# ax0.set_title(\"Slow Taxi\", pad=20)\n# # Slow taxi\n# ax3 = fig.add_subplot(gs[1, 0])\n# ax3.set_title(\"Velocity\")\n# ax3.set_xlabel(\"Time [s]\")\n# ax3.set_ylabel(\"Velocity [m/s]\")\n# ax3.plot(time, v_slow, color='g')\n# ax3.plot(time, [2.88 for i in time], color='gray', linestyle='--')\n# ax3.set_yticks([0, 0.5, 1, 1.5, 2, 2.5, 3])\n# ax = ax3.twinx()\n# ax.set_ylabel(\"Velocity [kts]\")\n# ax.set_yticks(np.array([0, 0.5144, 2*0.5144, 3*0.5144, 4*0.5144, 5*0.5144, 6*0.5144]))\n# ax.set_yticklabels(['0', '1', '2', '3', '4', '5', '6'])\n# # Pushback Acceleration graphs\n# ax4 = fig.add_subplot(gs[1, 1])\n# ax4.set_title(\"Acceleration\")\n# ax4.set_xlabel(\"Time [s]\")\n# ax4.set_ylabel(\"Acceleration [$m/s^2$]\")\n# ax4.set_ylim(0, max(a_acc_slow)+0.2)\n# ax4.plot(time, a_acc_slow, color='r')\n\n # Plot & Save\n# fig.tight_layout()\n# fig.subplots_adjust(top=0.88)\n# fig.savefig('EGTS_Only_Perf', bbox_inches='tight')\n #plt.show()\n return a_acc_slow, F_acc, v_slow, time", "def set_engines(N=0):\n global reachs,pdrs,sims,intrps,bmcs,n_proc,abs_ratio,ifbip,bmcs1, if_no_bip, allpdrs,allbmcs\n bmcs1 = [9] #BMC3\n #for HWMCC we want to set N = \n if N == 0:\n N = n_proc = os.sysconf(os.sysconf_names[\"SC_NPROCESSORS_ONLN\"])\n## N = 4 # this was for hwmcc15\n N = n_proc = 2*N\n## N = n_proc = 8 ### simulate 4 processors for HWMCC - turn this off a hwmcc.\n else:\n n_proc = N\n## print 'n_proc = %d'%n_proc\n #strategy is to use 2x number of processors \n if N <= 1:\n reachs = [24]\n pdrs = [7]\n## bmcs = [30]\n bmcs = [9]\n intrps = []\n sims = []\n slps = [18]\n elif N <= 2:\n reachs = [24]\n pdrs = [7]\n bmcs = [46,47]\n intrps = []\n sims = []\n slps = [18]\n elif N <= 4: #this will be the operative one for hwmcc'15\n reachs = [24] #reachy\n pdrs = [7,34] #prdm pdr_abstract\n if if_no_bip:\n allpdrs = pdrs = [7,19] #pdrm pdrmm\n bmcs = [46,47,2] #bmc3 bmc3 -S\n intrps = [23] #interp_m\n sims = [26] #Rarity_sim\n slps = [18] #sleep\n# 0.PDR, 1.INTERPOLATION, 2.BMC, 3.SIMULATION,\n# 4.REACHX, 5.PRE_SIMP, 6.simple, 7.PDRM, 8.REACHM, 9.BMC3\n# 10.Min_ret, 11.For_ret, 12.REACHP, 13.REACHN 14.PDRseed 15.prove_part_2,\n# 16.prove_part_3, 17.verify, 18.sleep, 19.PDRMm, 20.prove_part_1,\n# 21.run_parallel, 22.INTRP_bwd, 23.Interp_m 24.REACHY 25.REACHYc 26.Rarity Sim 27.simplify\n# 28.speculate, 29.quick_sec, 30.bmc3 -S, 31.BMC2 32.extract -a 33.extract 34.pdr_abstract\n# 35.par_scorr, 36.dsat, 37.iprove\n\n# BIPS = 0.PDR, 1.INTERPOLATION, 2.BMC, 14.PDRseed, 22.INTRP_bwd, 34.pdr_abstract\n# also reparam which uses ,reparam \n\n elif N <= 8: #used for HWMCC'15\n reachs = [24] #REACHY\n allpdrs = pdrs = [7,34,14] #PDRM pdr_abstract PDR_seed\n## intrps = [41,23,1] #Interp_m\n intrps = [23] #rkb\n allbmcs = bmcs = [46,47,9,2] #BMC3 bmc3 -S BMC \n if if_no_bip:\n allpdrs = pdrs = [7,19] #PDRM PDRMm\n intrps = allintrps = [41,23] #Interp_m\n bmcs = allbmcs = [46,47,9,38]\n sims = [26] #Rarity_Sim\n slps = [18] #sleep\n else:\n reachs = [24] #REACHY REACHX\n pdrs = allpdrs\n## pdrs = [7,34,14,19,0] #PDRM pdr_abstract PDR_seed PDRMm PDR\n## pdrs = allpdrs =[7,34,14]\n## intrps = [41,23,1] #Interp_m INTERPOLATION\n## intrps = [41,23] #rkb\n intrps = [23,1] #Interp_m INTERPOLATION\n intrps = [23] #rkb\n bmcs = allbmcs #allbmcs = [9,30,2,31,38,46,47]\n if if_no_bip:\n allpdrs = pdrs = [7,19] #PDRM PDRMm\n intrps = allintrps = [41,23] #Interp_m\n reachs = [24] #REACHY\n bmcs = [46,47,9,38] \n sims = [26] #Rarity_Sim\n slps = [18] #sleep\n print 'No. engines = %d,%d '%(N,n_proc)\n print 'pdrs = %s'%str(pdrs)\n print 'bmcs = %s'%str(bmcs)", "def energyMultiplier(self) -> float:\n return self._getMultiplier('energy')", "def specific_heat_capacity(self):\n to_ret = 1.3 * units.kilojoule / (units.kg * units.kelvin)\n return to_ret.to('J/kg/kelvin')", "def clutchEngage(self, engineRPM, clutchRPM):\n pass", "def estimate_action_space(self) -> spaces.Box:\n \n low_limit, high_limit = [], []\n \n for key in self.active_actions:\n if key in ['cooling_device', 'heating_device']:\n low_limit.append(0.0)\n high_limit.append(1.0)\n \n elif key == 'electrical_storage':\n limit = self.electrical_storage.nominal_power/self.electrical_storage.capacity\n low_limit.append(-limit)\n high_limit.append(limit)\n \n else:\n if key == 'cooling_storage':\n capacity = self.cooling_storage.capacity\n cooling_demand = self.energy_simulation.__getattr__(\n 'cooling_demand', \n start_time_step=self.episode_tracker.simulation_start_time_step, \n end_time_step=self.episode_tracker.simulation_end_time_step\n )\n maximum_demand = cooling_demand.max()\n \n elif key == 'heating_storage':\n capacity = self.heating_storage.capacity\n heating_demand = self.energy_simulation.__getattr__(\n 'heating_demand', \n start_time_step=self.episode_tracker.simulation_start_time_step, \n end_time_step=self.episode_tracker.simulation_end_time_step\n )\n maximum_demand = heating_demand.max()\n\n elif key == 'dhw_storage':\n capacity = self.dhw_storage.capacity\n dhw_demand = self.energy_simulation.__getattr__(\n 'dhw_demand', \n start_time_step=self.episode_tracker.simulation_start_time_step, \n end_time_step=self.episode_tracker.simulation_end_time_step\n )\n maximum_demand = dhw_demand.max()\n\n else:\n raise Exception(f'Unknown action: {key}')\n\n maximum_demand_ratio = maximum_demand/capacity\n\n try:\n low_limit.append(max(-maximum_demand_ratio, -1.0))\n high_limit.append(min(maximum_demand_ratio, 1.0))\n except ZeroDivisionError:\n low_limit.append(-1.0)\n high_limit.append(1.0)\n \n return spaces.Box(low=np.array(low_limit, dtype='float32'), high=np.array(high_limit, dtype='float32'))", "def overhead():\n pass\n # Running OOMMF through oommfc.\n #system = oc.examples.macrospin()\n #td = oc.TimeDriver()\n #oommfc_start = time.time()\n #td.drive(system, t=1e-12, n=1, overwrite=True)\n #oommfc_stop = time.time()\n #oommfc_time = oommfc_stop - oommfc_start\n\n # Running OOMMF directly.\n #oommf_runner = get_oommf_runner()\n #mifpath = os.path.realpath(os.path.join('example-macrospin', 'drive-0',\n # 'example-macrospin.mif'))\n #oommf_start = time.time()\n #oommf_runner.call(mifpath)\n #oommf_stop = time.time()\n #oommf_time = oommf_stop - oommf_start\n #shutil.rmtree('example-macrospin')\n\n #return oommfc_time - oommf_time", "def main():\n power_system = PowerSystem(\"normal_with_pevs_case9a.raw\",\"normal_with_pevs_case9a.dyr\")\n print power_system._nbus\n print \"LOADS\"\n print len(power_system._loads)\n print \"PEVS\"\n print len(power_system._pevs)\n print \"GENERATORS\"\n print len(power_system._generators)\n print \"BRANCHES\"\n print len(power_system._branches)", "def quantity_size():", "def EnergyConsumption(self):\n req_reactants_sor_syn_kwargs = {'mol_LiOH_H2O': self.sor_syn.mol_LiOH_H2O,\n 'hc_LiOH': self.hC.hc_LiOH,\n 'mol_aluminium_hydroxide': self.sor_syn.mol_aluminium_hydroxide,\n 'hc_aluminium_hydroxide': self.hC.hc_aluminium_hydroxide_mol,\n 'mol_H2O': self.sor_syn.mol_H2O,\n 'hc_H2O': self.hC.hc_H2O,\n 'mol_HCl': self.sor_syn.mol_HCl,\n 'hc_HCl': self.hC.hc_HCl,\n 'reaction_temperature': self.reactor.reaction_temp}\n\n q_reactants_sor_syn = Sor_Syn_Chemicals.QReactants(**req_reactants_sor_syn_kwargs)\n\n req_reactor_sor_syn_kwargs = {'reaction_temperature': self.reactor.reaction_temp,\n 'reaction_time_1': self.reactor.reaction_time_1,\n 'reaction_time_2': self.reactor.reaction_time_2,\n 'surface_area': self.reactor.surface_area,\n 'thermal_conductivity': self.reactor.thermal_conductivity,\n 'wall_thickness': self.reactor.wall_thickness,\n 'liq_density_1': self.density_1,\n 'liq_density_2': self.density_2}\n\n q_reactor_sor_syn = Sor_Syn_Reactor.QReactor(**req_reactor_sor_syn_kwargs)\n\n q_reaction_sor_syn = q_reactants_sor_syn + (q_reactor_sor_syn * 10**(-3))\n\n\n req_stir_energy_sor_syn_kwargs = {'impeller_power_number': self.impeller.impeller_power_number,\n 'impeller_diameter': self.impeller.impeller_diameter,\n 'agitator_rotational_speed': self.impeller.agitator_rotational_speed,\n 'density_1': self.density_1 * 10**3,\n 'density_2': self.density_2 * 10**3,\n 'stirring_time_1': self.reactor.reaction_time_1 * 3600,\n 'stirring_time_2': self.reactor.reaction_time_2 * 3600,\n 'efficiency': self.impeller.efficiency}\n\n stirring_energy_sor_syn = uC.kiloWattHours(Impeller.StirringEnergySorSyn(**req_stir_energy_sor_syn_kwargs))\n\n grinding_energy_sor_syn = QProcesses.grinding_energy(uC.tonnes(self.total_mass_mix_2_sor_syn))\n\n filtration_energy_sor_syn = QProcesses.filtration_energy(uC.tonnes(self.total_mass_mix_2_sor_syn))\n\n pumping_energy_sor_syn = uC.kiloWattHours(QProcesses.pumping_energy(uC.tonnes(self.total_mass_mix_2_sor_syn) +\n self.water.sor_syn_washing))\n\n req_stir_energy_column_washing_kwargs = {'impeller_power_number': self.impeller.impeller_power_number,\n 'impeller_diameter': self.impeller.impeller_diameter,\n 'agitator_rotational_speed': self.impeller.agitator_rotational_speed,\n 'density': self.density_NaCl_washing * 10 ** 3,\n 'stirring_time': self.washing.stirring_time * 3600,\n 'efficiency': self.impeller.efficiency}\n stirring_energy_column_washing = uC.kiloWattHours\\\n (QProcesses.stirring_energy(**req_stir_energy_column_washing_kwargs))\n\n # assuming the brine has the density of water\n\n pumping_energy_column_extraction = uC.kiloWattHours(QProcesses.pumping_energy\n (uC.tonnes(((self.plant.brine_flow_day * 10**6 / 24) *\n self.plant.plant_uptime) * self.brine.brine_density) +\n ((self.washing.H2O_washing +\n self.stripping.H2O_stripping) * 10**3) +\n uC.tonnes(self.washing.mass_NaCl)))\n\n pumping_energy_effluent = uC.kiloWattHours\\\n (QProcesses.pumping_energy(uC.tonnes(((self.plant.brine_flow_day * 10**6 / 24) *\n self.plant.plant_uptime * self.brine.brine_density) +\n (self.washing.H2O_washing + self.stripping.H2O_stripping) *\n 10**3 + self.washing.mass_NaCl - self.stripping.Li_sol_output *\n 10**3 * self.density_LiCl_sol_stripping)))\n\n filtration_energy_FO = QProcesses.filtration_energy(self.FO.Li_sol_output * 10**(-3))\n\n pumping_energy_FO = uC.kiloWattHours(QProcesses.pumping_energy(uC.tonnes(self.stripping.Li_sol_output *\n 10**3 * self.density_LiCl_sol_stripping)))\n\n req_reactants_LC_processing_kwargs = {'mol_LiCl': uC.solidMol\n ('LiCl', self.reactant_flow.LC_processing_reactants['LiCl']),\n 'hc_LiCl': self.hC.hc_LiCl,\n 'mol_Na2CO3': uC.solidMol\n ('Na2CO3', self.reactant_flow.LC_processing_reactants['Na2CO3']),\n 'hc_Na2CO3': self.hC.hc_Na2CO3,\n 'reaction_temperature': self.LC_processing.reaction_temp}\n q_reactants_LC_processing = LC_processing.QReactants(**req_reactants_LC_processing_kwargs)\n\n q_reactor_LC_processing_kwargs = {'reaction_temperature': self.LC_processing.reaction_temp,\n 'reaction_time': self.LC_processing.reaction_time,\n 'surface_area': self.LC_processing.surface_area,\n 'thermal_conductivity': self.LC_processing.thermal_conductivity,\n 'wall_thickness': self.LC_processing.wall_thickness,\n 'liq_density': self.density_LC_processing}\n\n q_reactor_LC_processing = QReactors.batchReactor(**q_reactor_LC_processing_kwargs)\n\n q_reaction_LC_processing = q_reactants_LC_processing + (q_reactor_LC_processing[0] * 10**(-3))\n\n req_stir_energy_LC_processing_kwargs = {'impeller_power_number': self.impeller.impeller_power_number,\n 'impeller_diameter': self.impeller.impeller_diameter,\n 'agitator_rotational_speed': self.impeller.agitator_rotational_speed,\n 'density': self.density_LC_processing * 10**3,\n 'stirring_time': self.LC_processing.reaction_time * 3600,\n 'efficiency': self.impeller.efficiency}\n\n stirring_energy_LC_processing = uC.kiloWattHours(QProcesses.stirring_energy\n (**req_stir_energy_LC_processing_kwargs))\n\n filtration_energy_LC_processing = QProcesses.filtration_energy\\\n (uC.tonnes(self.reactant_flow.LC_processing_reactants['LiCl'] +\n self.reactant_flow.LC_processing_reactants['Na2CO3']))\n\n pumping_energy_LC_processing = uC.kiloWattHours(QProcesses.pumping_energy\n (uC.tonnes(self.FO.Li_sol_output * 10**3 +\n self.density_LiCl_sol_FO +\n self.reactant_flow.LC_processing_reactants['Na2CO3'])))\n\n req_reactants_LC_carbonation_kwargs = {'mol_Li2CO3': uC.solidMol\n ('Li2CO3', self.reactant_flow.LC_purification_reactants['impure Li2CO3']),\n 'hc_Li2CO3': self.hC.hc_Li2CO3_carbonation,\n 'mol_CO2': uC.solidMol\n ('CO2', self.reactant_flow.LC_purification_reactants['CO2']),\n 'hc_CO2': self.hC.hc_CO2_carbonation,\n 'mol_H2O': uC.solidMol\n ('H2O', self.reactant_flow.LC_purification_reactants['H2O']),\n 'hc_H2O': self.hC.hc_H2O,\n 'reaction_temperature': self.LC_purification.carbonation_temp}\n\n q_reactants_LC_carbonation = LC_purification.QReactants(**req_reactants_LC_carbonation_kwargs)\n\n req_reactor_LC_carbonation_kwargs = {'reaction_temperature': self.LC_purification.carbonation_temp,\n 'reaction_time': self.LC_purification.carbonation_time,\n 'surface_area': self.LC_purification.surface_area,\n 'thermal_conductivity': self.LC_purification.thermal_conductivity,\n 'wall_thickness': self.LC_purification.wall_thickness,\n 'liq_density': self.density_LC_purification}\n\n q_reactor_LC_carbonation = QReactors.batchReactor(**req_reactor_LC_carbonation_kwargs)\n\n q_reaction_LC_carbonation = q_reactants_LC_carbonation + (q_reactor_LC_carbonation[0] * 10**(-3))\n\n req_stir_energy_carbonation_kwargs = {'impeller_power_number': self.impeller.impeller_power_number,\n 'impeller_diameter': self.impeller.impeller_diameter,\n 'agitator_rotational_speed': self.impeller.agitator_rotational_speed,\n 'density': self.density_LC_purification * 10**3,\n 'stirring_time': self.LC_purification.carbonation_time * 3600,\n 'efficiency': self.impeller.efficiency}\n\n stirring_energy_carbonation = uC.kiloWattHours(QProcesses.stirring_energy(**req_stir_energy_carbonation_kwargs))\n\n filtration_energy_carbonation = QProcesses.filtration_energy\\\n (uC.tonnes(self.reactant_flow.LC_purification_intermediate['LiHCO3']))\n\n pumping_energy_carbonation = uC.kiloWattHours(QProcesses.pumping_energy\n (uC.tonnes(self.reactant_flow.LC_purification_reactants\n ['impure Li2CO3']) +\n self.reactant_flow.LC_purification_reactants['H2O'] +\n self.reactant_flow.LC_purification_reactants['CO2']))\n\n pumping_energy_carbonation_processing = uC.kiloWattHours(QProcesses.pumping_energy(uC.tonnes\n (self.reactant_flow.LC_purification_intermediate\n ['LiHCO3'])))\n\n req_reactants_LC_precipitation_kwargs = {'mol_Li2CO3': uC.solidMol\n ('Li2CO3', self.reactant_flow.LC_purification_intermediate['LiHCO3']),\n 'hc_Li2CO3': self.hC.hc_Li2CO3_carbonation,\n 'mol_CO2': uC.solidMol\n ('CO2', self.reactant_flow.LC_purification_reactants['CO2']),\n 'hc_CO2': self.hC.hc_CO2_carbonation,\n 'mol_H2O': uC.solidMol\n ('H2O', self.reactant_flow.LC_purification_reactants['H2O']),\n 'hc_H2O': self.hC.hc_H2O,\n 'reaction_temperature': self.LC_purification.precipitation_temp}\n\n q_reactants_LC_precipitation = LC_purification.QReactants(**req_reactants_LC_precipitation_kwargs)\n\n req_reactor_LC_precipitation_kwargs = {'reaction_temperature': self.LC_purification.precipitation_temp,\n 'reaction_time': self.LC_purification.precipitation_time,\n 'surface_area': self.LC_purification.surface_area,\n 'thermal_conductivity': self.LC_purification.thermal_conductivity,\n 'wall_thickness': self.LC_purification.wall_thickness,\n 'liq_density': self.density_LC_purification}\n\n q_reactor_LC_precipitation = QReactors.batchReactor(**req_reactor_LC_precipitation_kwargs)\n\n q_reaction_LC_precipitation = q_reactants_LC_precipitation + (q_reactor_LC_precipitation[0] * 10**(-3))\n\n req_stir_energy_precipitation_kwargs = {'impeller_power_number': self.impeller.impeller_power_number,\n 'impeller_diameter': self.impeller.impeller_diameter,\n 'agitator_rotational_speed': self.impeller.agitator_rotational_speed,\n 'density': self.density_LC_purification * 10**3,\n 'stirring_time': self.LC_purification.precipitation_time * 3600,\n 'efficiency': self.impeller.efficiency}\n\n stirring_energy_precipitation = uC.kiloWattHours(QProcesses.stirring_energy\n (**req_stir_energy_precipitation_kwargs))\n\n filtration_energy_precipitation = QProcesses.filtration_energy\\\n (uC.tonnes(self.reactant_flow.LC_purification_intermediate['LiHCO3']))\n\n req_drying_energy_LC_processing_kwargs = {'heat_capacity_solution': self.hC_LC_purification,\n 'mass_solution': self.total_mass_drying_LC_purification * 10**(-3),\n 'boiling_temperature': self.Tb_LC_purification,\n 'starting_temperature': self.LC_purification.washing_temperature,\n 'evaporation_enthalpy': self.Hvap_LC_purification,\n 'mass_vapour': (self.LC_purification.water_content_filtration *\n self.reactant_flow.LC_purification_product\n ['pure Li2CO3']) * 10**(-3)}\n\n drying_energy_LC_purification = uC.kiloWattHours(QProcesses.drying_energy\n (**req_drying_energy_LC_processing_kwargs))\n\n pumping_energy_precipitation_filtration = uC.kiloWattHours(QProcesses.pumping_energy\n (uC.tonnes(self.reactant_flow.LC_purification_product\n ['pure Li2CO3']) +\n self.reactant_flow.LC_purification_by_products\n ['H2O']))\n\n pumping_energy_LC_purification_wash = uC.kiloWattHours(QProcesses.pumping_energy\n (uC.tonnes(self.water.LC_purification_washing)))\n\n req_belt_conveyor_kwargs = {'belt_speed': self.BC.belt_speed, 'belt_length': self.BC.belt_length,\n 'gradient': self.BC.gradient, 'conveyor_output': self.BC.output,\n 'drive_train_efficiency': self.BC.efficiency}\n belt_conveyor_energy_average = QMachines.beltConveyor_requirement(**req_belt_conveyor_kwargs) * \\\n self.BC.hours_operation\n\n energy_df = pd.DataFrame(data={\"Reaction energy\": [q_reaction_sor_syn + q_reaction_LC_processing +\n q_reaction_LC_carbonation + q_reaction_LC_precipitation +\n stirring_energy_sor_syn + stirring_energy_column_washing +\n stirring_energy_LC_processing + stirring_energy_carbonation +\n stirring_energy_precipitation],\n \"Processing energy\": [filtration_energy_sor_syn + filtration_energy_FO +\n filtration_energy_LC_processing +\n filtration_energy_carbonation +\n filtration_energy_precipitation + grinding_energy_sor_syn +\n drying_energy_LC_purification],\n \"Transportation energy\": [pumping_energy_sor_syn +\n pumping_energy_column_extraction +\n pumping_energy_effluent + pumping_energy_FO +\n pumping_energy_LC_processing +\n pumping_energy_carbonation_processing +\n pumping_energy_carbonation +\n pumping_energy_carbonation_processing +\n pumping_energy_precipitation_filtration +\n pumping_energy_LC_purification_wash +\n belt_conveyor_energy_average]},\n index=['Geothermal_LDH'])\n energy_df['sum'] = energy_df.sum(axis=1)\n\n return energy_df", "def get_size(self):\n units = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\")\n for i, unit in enumerate(units):\n high = 10**(i*3)\n if self.size < high*1000:\n return f\"{round(self.size/high, 3)} {unit}\"", "def run(self):\n\n self.initialize()\n self.monitor = Monitor('Time operating', sim=self)\n\n if self.maint_staff_num:\n self.mtechs = Resource(capacity=self.maint_staff_num, sim=self, name='maintenance techs', qType=PriorityQ, monitored=True)\n\n if self.inspect_staff_num:\n self.inspectors = Resource(capacity=self.inspect_staff_num, sim=self, name='inspectors', qType=PriorityQ, monitored=True)\n\n if self.inspection_tools_qty:\n self.inspect_tools = Resource(capacity=self.inspection_tools_qty, sim=self, name='inspection tools', qType=PriorityQ, monitored=True)\n\n if self.maintenance_tools_qty:\n self.maint_tools = Resource(capacity=self.maintenance_tools_qty, sim=self, name='maintenance tools', qType=PriorityQ, monitored=True)\n\n # this variable is for when the machines are spread out over the service time and not serviced all at one time, can have two values 0 or 1\n spread_inspection = 1\n # !!! WARNING hardcoded here, average inspection time, should be calculated from averaging the insp_t_general variable of all assets\n # OR, this can be simply taken as an inspection job period, i.e. each job takes 5 h, with machines evenly distributed over sessions\n inspection_duration_avg = 5\n inspect_per_session = 1\n if self.inspect_intervals:\n inspect_per_session = int(round((len(self.assets_data)/(self.inspect_intervals/inspection_duration_avg))))\n if inspect_per_session < 1: inspect_per_session = 1\n\n asset_count = 0\n for asset in self.assets_data:\n inspect_delay = spread_inspection * int(asset_count/inspect_per_session) * inspection_duration_avg\n asset_count += 1\n # create and activate the assets and their operators\n self.operators.append(Operator(name=asset['operator']['name'], sim=self, belief=asset['operator']['dist']))\n self.activate(self.operators[-1], self.operators[-1].decision_moment())\n\n self.assets.append(Asset(name=asset['name'], sim=self, output_rate=asset['output_rate'], cost_rate=asset['cost_rate'],\n optime_scheduled=self.assets_schedule[asset['name']], inspection_proc=(asset['insp_t_gen'], asset['insp_cost_gen']),\n maintenance_proc=(asset['maint_t_gen'], asset['maint_cost_gen']), operator=self.operators[-1], cms=asset['cms']))\n self.activate(self.assets[-1], self.assets[-1].operating())\n\n self.operators[-1].asset = self.assets[-1]\n\n # create and assign simulation resources\n if self.maint_intervals:\n self.services.append(Service(mode = 'maintenance', asset=self.assets[-1], sim=self,intervals=self.maint_intervals,\n duration=asset['maint_t_gen'], cost=asset['maint_cost_gen']))\n self.activate(self.services[-1], self.services[-1].service_routine())\n\n if self.inspect_intervals:\n self.services.append(Service(mode = 'inspection', asset=self.assets[-1], sim=self, intervals=self.inspect_intervals,\n duration=asset['insp_t_gen'], cost=asset['insp_cost_gen']))\n self.activate(self.services[-1], self.services[-1].service_routine(), delay=inspect_delay)\n\n # create and activate the event process. Should DRY\n if 'failures' in asset['events']:\n for mode, data in asset['events']['failures'].iteritems():\n self.events.append(Event(event_type='failure', mode = mode, asset=self.assets[-1], sim=self,\n randfunc=data[0], duration=data[1], cost=data[2]))\n self.activate(self.events[-1], self.events[-1].halt_routine())\n self.assets[-1].events.append(self.events[-1])\n self.assets[-1].distributions['failure'][mode] = data\n if 'faults' in asset['events']:\n for mode, data in asset['events']['faults'].iteritems():\n self.events.append(Event(event_type='fault', mode = mode, asset=self.assets[-1], sim=self,\n randfunc=data[0], duration=data[5], cost=data[6]))\n self.activate(self.events[-1], self.events[-1].fault_routine())\n self.assets[-1].events.append(self.events[-1])\n self.assets[-1].distributions['fault'][mode] = data\n if 'incidents' in asset['events']:\n for mode, data in asset['events']['incidents'].iteritems():\n self.events.append(Event(event_type='incident', mode = mode, asset=self.assets[-1], sim=self,\n randfunc=data[0], duration=data[1], cost=data[2]))\n self.activate(self.events[-1], self.events[-1].halt_routine())\n self.assets[-1].events.append(self.events[-1])\n self.assets[-1].distributions['incident'][mode] = data\n\n self.simulate(until=self.max_time)\n\n # Output results\n if PRINTOUT:\n print \"-------------------------------------\"\n print \"Results of simulation %s:\" % (self.name)\n print \".....................................\"\n print \"num of assets: \", len(self.assets)\n for asset in self.assets:\n self.total_output += asset.output\n self.total_cost += asset.cost\n self.total_lost_output += asset.total_event_time * asset.output_rate\n self.events_occurred[asset.name] = asset.events_occured\n self.total_event_time += asset.total_event_time\n self.time_operating += asset.time_operating\n self.failures_num += asset.failures_num\n self.faults_num += asset.faults_num\n self.incidents_env_num += asset.incidents_env_num\n self.incidents_saf_num += asset.incidents_saf_num\n self.faults_detected_num += asset.faults_detected_num\n\n if PRINTOUT:\n print \"Process of asset %s:\" % asset.name\n print \"Total event time: \", asset.total_event_time\n print \"Uptime: \", asset.time_operating\n print \"Events:\", [event_name for event_name in asset.events_occured]\n print \"total revenue: %.2f\" % asset.output\n print \"total lost revenue: %.2f\" % (asset.total_event_time*asset.output_rate)\n print \"total cost: %.2f\" % asset.cost\n print \"..........................................\"", "def prepE2EStretchVsSizeAndUnsched(resultDir = ''):\n\n f = open(os.environ['HOME'] + \"/Research/RpcTransportDesign\"+\n \"/OMNeT++Simulation/analysis/PlotScripts/stretchVsUnsched.txt\", 'w')\n tw_h = 40\n tw_l = 15\n f.write('LoadFactor'.center(tw_l) + 'WorkLoad'.center(tw_h) +\n 'MsgSizeRange'.center(tw_l) + 'SizeCntPercent'.center(tw_l) +\n 'BytesPercent'.center(tw_l) + 'UnschedBytes'.center(tw_l) +\n 'MeanStretch'.center(tw_l) + 'MedianStretch'.center(tw_l) +\n '99PercentStretch'.center(tw_l) + '\\n')\n for filename in glob(os.path.join(resultDir, '*.sca')):\n\n sp = ScalarParser(filename)\n parsedStats = AttrDict()\n parsedStats.hosts = sp.hosts\n parsedStats.tors = sp.tors\n parsedStats.aggrs = sp.aggrs\n parsedStats.cores = sp.cores\n parsedStats.generalInfo = sp.generalInfo\n\tparsedStats.globalListener = sp.globalListener \n # print(sp.hosts)\n # exit()\n\n xmlConfigFile = os.environ['HOME'] + '/Research/RpcTransportDesign/OMNeT++Simulation/homatransport/src/dcntopo/config.xml'\n xmlParsedDic = AttrDict()\n xmlParsedDic = parseXmlFile(xmlConfigFile,parsedStats.generalInfo)\n msgBytesOnWireDigest = AttrDict()\n msgBytesOnWire(parsedStats, xmlParsedDic, msgBytesOnWireDigest)\n e2eStretchAndDelayDigest = AttrDict()\n e2eStretchAndDelay(parsedStats, xmlParsedDic, msgBytesOnWireDigest, e2eStretchAndDelayDigest)\n loadFactor = float(parsedStats.generalInfo.rlf) * len(xmlParsedDic.senderIds) / len(xmlParsedDic.receiverIds)\n workLoad = parsedStats.generalInfo.workloadType\n avgStretch = 0.0\n\n try:\n UnschedBytes = int(parsedStats.generalInfo.defaultReqBytes) + int(parsedStats.generalInfo.defaultUnschedBytes)\n except Exception as e:\n print('No Unsched bytes in file: %s' % (filename))\n UnschedBytes = 'NA'\n\n for elem in e2eStretchAndDelayDigest.stretch:\n sizeUpBound = elem.sizeUpBound\n sizeProbability = elem.cntPercent\n bytesPercent = elem.bytesPercent\n meanStretch = float(elem.mean)\n medianStretch = float(elem.median)\n tailStretch = float(elem.ninety9Percentile)\n avgStretch += meanStretch * float(elem.cntPercent) / 100\n f.write('{0}'.format(loadFactor).center(tw_l) + '{0}'.format(workLoad).center(tw_h) +\n '{0}'.format(sizeUpBound).center(tw_l) + '{0:.5f}'.format(sizeProbability).center(tw_l) +\n '{0:.5f}'.format(bytesPercent).center(tw_l) + '{0}'.format(UnschedBytes).center(tw_l) +\n '{0}'.format(meanStretch).center(tw_l) + '{0}'.format('NA').center(tw_l) +\n '{0}'.format('NA').center(tw_l) + '\\n')\n f.write('{0}'.format(loadFactor).center(tw_l) + '{0}'.format(workLoad).center(tw_h) +\n '{0}'.format(sizeUpBound).center(tw_l) + '{0:.5f}'.format(sizeProbability).center(tw_l) +\n '{0:.5f}'.format(bytesPercent).center(tw_l) + '{0}'.format(UnschedBytes).center(tw_l) +\n '{0}'.format('NA').center(tw_l) + '{0}'.format(medianStretch).center(tw_l) +\n '{0}'.format('NA').center(tw_l) + '\\n')\n f.write('{0}'.format(loadFactor).center(tw_l) + '{0}'.format(workLoad).center(tw_h) +\n '{0}'.format(sizeUpBound).center(tw_l) + '{0:.5f}'.format(sizeProbability).center(tw_l) +\n '{0:.5f}'.format(bytesPercent).center(tw_l) + '{0}'.format(UnschedBytes).center(tw_l) +\n '{0}'.format('NA').center(tw_l) + '{0}'.format('NA').center(tw_l) +\n '{0}'.format(tailStretch).center(tw_l) + '\\n')\n #f.write('{0}'.format(loadFactor).center(tw_l) + '{0}'.format(workLoad).center(tw_h) +\n # '{0}'.format('overallsizes').center(tw_l) + '{0}'.format(1.00).center(tw_l) +\n # '{0}'.format(100).center(tw_l) + '{0}'.format(unschedbytes).center(tw_l) +\n # '{0}'.format(avgStretch).center(tw_l) + '{0}'.format('NA').center(tw_l) +\n # '{0}'.format('NA').center(tw_l) + '\\n')\n f.close()", "def efficiency(self):\n return 0.9", "def calculate_metrics(self):\n self.build_report_boxes(self.season.country.currency)\n exchange = self.season.exchange_rate\n\n self.farmer_price = self.farmer_box.farmer_price.as_local(exchange)\n self.farmer_share = self.farmer_box.farmer_share\n self.production_cost = self.expenses_box.production_cost.as_local(exchange)\n self.cherry_to_green_ratio = self.production_box.cherry_to_green_ratio", "def tradingPerformance(self):", "def capacity_vs_maxprod(year = 2019,redo_stats = False,show_title = True,eps_fig = False):\n # year = 2019\n # redo_stats = False\n\n if redo_stats:\n stats = get_entsoe_production_stats(startyear=year,endyear=year,areas=all_areas,limit=50)\n else:\n stats = pd.read_excel(Path(data_path)/f'gen_stats.xlsx',index_col=0,header=[0,1])\n cap = get_entsoe_capacity(areas=all_areas,year=year)\n\n #%%\n large_areas = ['GB','PL','DE','NL']\n # show_title = True\n # eps_fig = False\n fig_path = Path(data_path) / 'Figures'\n fig_path.mkdir(exist_ok=True,parents=True)\n \"\"\"\n Compare ENTSO-E capacity values with maximum production stats\n \n Print latex tables and figures with capacity and generator info\n \"\"\"\n fig_size = (16/cm_per_inch,8/cm_per_inch)\n areas = all_areas\n # summarize thermal production\n thermal_data = pd.DataFrame(index=areas,columns=['pmax','capacity','diff'])\n for area in areas:\n thermal_data.at[area,'capacity'] = cap.at[area,'Thermal']\n thermal_data.at[area,'pmax'] = stats.at['max',(area,'Thermal')]\n thermal_data.at[area,'diff'] = thermal_data.at[area,'capacity'] - thermal_data.at[area,'pmax']\n thermal_data = thermal_data.fillna(0)\n\n # summarize hydro production\n hydro_data = pd.DataFrame(index=areas,columns=['pmax','capacity','diff'])\n for area in thermal_data.index:\n hydro_data.at[area,'capacity'] = cap.at[area,'Hydro']\n hydro_data.at[area,'pmax'] = stats.at['max',(area,'Hydro')]\n hydro_data.loc[area,'diff'] = hydro_data.at[area,'capacity'] - hydro_data.at[area,'pmax']\n hydro_data = hydro_data.fillna(0)\n\n f = plt.figure()\n ax = f.add_subplot(1,1,1)\n areas1 = [a for a in areas if a not in large_areas]\n areas2 = [a for a in areas if a in large_areas]\n\n for i,plot_areas in enumerate([areas1,areas2]):\n ax.cla()\n thermal_data.loc[plot_areas,['pmax','capacity']].plot.bar(ax=ax)\n plt.grid()\n if show_title:\n plt.title('Thermal capacity')\n plt.ylabel('MW')\n plt.gcf().set_size_inches(fig_size)\n plt.tight_layout()\n plt.savefig(fig_path/f'thermal_capacity_{i}.png')\n if eps_fig:\n plt.savefig(fig_path/f'thermal_capacity_{i}.eps')\n\n\n ax.cla()\n hydro_data.loc[plot_areas,['pmax','capacity']].plot.bar(ax=ax)\n plt.grid()\n if show_title:\n plt.title('Hydro capacity')\n plt.ylabel('MW')\n plt.gcf().set_size_inches(fig_size)\n plt.tight_layout()\n plt.savefig(fig_path/f'hydro_capacity_{i}.png')\n if eps_fig:\n plt.savefig(fig_path/f'hydro_capacity_{i}.eps')", "def find_core_size(self, # type: ResTech\n grid, # type: RoutingGrid\n params, # type: Dict[str, Any]\n wres, # type: int\n hres, # type: int\n wblk, # type: int\n hblk, # type: int\n ext_dir, # type: str\n max_blk_ext, # type: int\n ):\n # type: (...) -> Tuple[int, int, Dict[str, Any]]\n nxblk = wres // wblk\n nyblk = hres // hblk\n\n ans = None\n x_only = (ext_dir == 'x')\n if x_only or (ext_dir == 'y'):\n # only extend X or Y direction\n if x_only:\n bin_iter = BinaryIterator(nxblk, nxblk + max_blk_ext + 1)\n else:\n bin_iter = BinaryIterator(nyblk, nyblk + max_blk_ext + 1)\n while bin_iter.has_next():\n ncur = bin_iter.get_next()\n if x_only:\n wcur, hcur = ncur * wblk, hres\n else:\n wcur, hcur = wres, ncur * hblk\n tmp = self.get_core_info(grid, wcur, hcur, **params)\n if tmp is None:\n bin_iter.up()\n else:\n ans = tmp\n bin_iter.save()\n bin_iter.down()\n\n if ans is None:\n raise ValueError('failed to find DRC clean core with maximum %d '\n 'additional block pitches.' % max_blk_ext)\n if x_only:\n nxblk = bin_iter.get_last_save()\n else:\n nyblk = bin_iter.get_last_save()\n return nxblk, nyblk, ans\n else:\n # extend in both direction\n opt_area = (nxblk + max_blk_ext + 1) * (nyblk + max_blk_ext + 1)\n # linear search in height, binary search in width\n # in this way, for same area, use height as tie breaker\n nxopt, nyopt = nxblk, nyblk\n for nycur in range(nyblk, nyblk + max_blk_ext + 1):\n # check if we should terminate linear search\n if nycur * nxblk >= opt_area:\n break\n bin_iter = BinaryIterator(nxblk, nxblk + max_blk_ext + 1)\n hcur = nycur * hblk\n while bin_iter.has_next():\n nxcur = bin_iter.get_next()\n if nxcur * nycur >= opt_area:\n # this point can't beat current optimum\n bin_iter.down()\n else:\n tmp = self.get_core_info(grid, nxcur * wblk, hcur, **params)\n if tmp is None:\n bin_iter.up()\n else:\n # found new optimum\n ans, nxopt, nyopt = tmp, nxcur, nycur\n opt_area = nxcur * nycur\n bin_iter.down()\n\n if ans is None:\n raise ValueError('failed to find DRC clean core with maximum %d '\n 'additional block pitches.' % max_blk_ext)\n return nxopt, nyopt, ans", "def efficiency_IS(self):\r\n return ((self.inlet.h - self.outlet.h) /\r\n (self.inlet.h - self.outlet_IS.h))\r\n\r\n # def part_load(self, capacity):\r\n \"\"\"\r\n Return a new Turbine class object based upon the part load perfomance\r\n of an existing turbine (self), operating at part load capacity.\r\n\r\n Parameters\r\n ----------\r\n self : Turbine\r\n Predetermined Turbine class object.\r\n capacity : float\r\n The ratio betweent the operating mass flowrate and maximum design\r\n flowrate. e.g. a turbine operating at half load would be at 0.5\r\n capacity.\r\n\r\n Returns\r\n -------\r\n plt : Turbine\r\n Returns part-load turbine (plt) as a new Turbine class object.\r\n \"\"\"\r\n # m_act = self.m_max * capacity\r\n\r\n # Part load turbine may have to be new, sepperate module function\r\n # plt = __init__(P_1=self.inlet.P, T_1=self.inlet.T, P_2=self.outlet.P,\r\n # m_act=m_act, m_max=self.m_max)\r\n # return plt\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that calculates the (peak) power required to power each car wheel based upon the acceleration and speed.
def car_power(a, v, ratio, pow_wheel=4): # cte w_rad_car_1 = 0.537 # [m] wheel radius front tires external truck w_rad_car_2 = 0.537 # [m] wheel radius rear tires external truck 0.496 m_plane = 97400 # [kg] MRW m_car = 22000 # [kg] Weight of external vehicle m_tot = m_plane + m_car # [kg] Total mass of the system weight_ratio = 0.952 # [-] Weight distribution ratio Roll_fric = 0.02 # [-] Rolling friction coefficient for MLG gears Roll_fric_car = 0.0065 # [-] Rolling friction coefficient for car wheels n_hydrostat = 1 # [-] Efficiency hydrostatic motor # Necessary force and Torque calculations N_mlg = m_plane*weight_ratio*9.81 # [N] Total normal force on the MLG N_nlg = (m_car + m_plane*(1-weight_ratio))*9.81 # [N] Total normal force on the car N_nlg_w = N_nlg/4 # [N] Normal force per MLG wheel F_tot = m_tot*a + Roll_fric*N_mlg + Roll_fric_car*N_nlg # [N] Total force req to move plane at acceleration F_nlg = ratio*F_tot # [N] Force needed from internal F_nlg_w = F_nlg/pow_wheel # [N] Force needed from internal per wheel T_nlg_w_1 = F_nlg_w*w_rad_car_1 # [Nm] Torque per front wheel T_nlg_w_2 = F_nlg_w*w_rad_car_2 # [Nm] Torque per rear wheel # Rotational speed of wheels w_1 = v/w_rad_car_1 # [rad/s] rotational speed wheel w_2 = v/w_rad_car_2 # [rad/s] rotational speed wheel # Check if static friction is not exceeded if not stat_traction(T_nlg_w_1, N_nlg_w, w_rad_car_1): print("LOG: Acceleration: {a} \tTorque: {t} \tWheel Radius: {r}".format(a=a, t=T_nlg_w_1, r=w_rad_car_1)) raise ValueError("Exceeds Static friction") elif not stat_traction(T_nlg_w_2, N_nlg_w, w_rad_car_2): print("LOG: Acceleration: {a} \tTorque: {t} \tWheel Radius: {r}".format(a=a, t=T_nlg_w_2, r=w_rad_car_2)) raise ValueError("Exceeds Static friction") else: print("\tStatic friction check rear wheels: [{t}]. Static friction checked front wheels: [{t}]." .format(t=True), end="\r") return 1/n_hydrostat*T_nlg_w_1*w_1, 1/n_hydrostat*T_nlg_w_2*w_2
[ "def calcPower(speed, resistance_level):\r\n satoridata = [\r\n {\r\n 'level': 1,\r\n 'slope': 3.73,\r\n 'intercept': -28.67\r\n },\r\n {\r\n 'level': 2,\r\n 'slope': 5.33,\r\n 'intercept': -36.67\r\n },\r\n {\r\n 'level': 3,\r\n 'slope': 6.87,\r\n 'intercept': -43.33\r\n },\r\n {\r\n 'level': 4,\r\n 'slope': 8.27,\r\n 'intercept': -47.33\r\n },\r\n {\r\n 'level': 5,\r\n 'slope': 10.07,\r\n 'intercept': -66.33\r\n },\r\n {\r\n 'level': 6,\r\n 'slope': 11.4,\r\n 'intercept': -67.00\r\n },\r\n {\r\n 'level': 7,\r\n 'slope': 13.13,\r\n 'intercept': -83.67\r\n },\r\n {\r\n 'level': 8,\r\n 'slope': 14.4,\r\n 'intercept': -82.00\r\n },\r\n {\r\n 'level': 9,\r\n 'slope': 15.93,\r\n 'intercept': -89.67\r\n },\r\n {\r\n 'level': 10,\r\n 'slope': 17.73,\r\n 'intercept': -114.67\r\n }\r\n ]\r\n\r\n power = satoridata[resistance_level-1]['slope'] * speed + satoridata[resistance_level-1]['intercept']\r\n print(resistance_level, power)\r\n return max((0, round(power)))", "def _get_power_at_freq(self) -> float:\n\t\toriginal_span = self.span()\n\t\toriginal_rbw = self.rbw()\n\t\tneeds_reset = False\n\t\tif not (original_span == 0.25e6 and original_rbw == 1e3):\n\t\t\tneeds_reset = True\n\t\t\tself.span(0.25e6)\n\t\t\tself.rbw(1e3)\n\t\tif not self._parameters_synced:\n\t\t\t# call configure to update both\n\t\t\t# the parameters on the device and the\n\t\t\t# setpoints and units\n\t\t\tself.configure()\n\t\tdata = self._get_sweep_data()\n\t\tmax_power = np.max(data)\n\t\tif needs_reset:\n\t\t\tself.span(original_span)\n\t\t\tself.rbw(original_rbw)\n\t\t\tself.configure()\n\t\tsleep(2*self.sleep_time.get())\n\t\treturn max_power", "def power(self):\n\t\tself._pca.duty(self._pwma, 4095)\n\t\tself._pca.duty(self._pwmb, 4095)", "def airspeedMultiplier(s, obj):\n\n speed = WUps2kts(obj.V.norm())\n return 2.25 / (1 + exp(-0.024 * (speed - 212)))", "def power_output(windspeeds):\n # Set the cut off wind speeds\n minWS, maxWS = 3, 24.5\n\n # If wind speed is inside the cut off levels\n if windspeeds > minWS and windspeeds < maxWS:\n ws = np.array([windspeeds])\n return round(model.predict([ws/Speed_F])[0][0]*Power_F, 3)\n else:\n #print(\"Error\")\n return 0", "def GetEncoderSpeed(self):\n try:\n i2cRecv = self.RawRead(COMMAND_GET_ENC_SPEED, I2C_MAX_LEN)\n except KeyboardInterrupt:\n raise\n except:\n self.Print('Failed reading motor encoder move speed limit!')\n return\n\n power = float(i2cRecv[1]) / float(PWM_MAX)\n return power", "def _accel_limit_multiplier(CS, lead):\n accel_by_speed = OrderedDict([\n # (speed m/s, decel)\n (0., 0.95), # 0 kmh\n (10., 0.95), # 35 kmh\n (20., 0.925), # 72 kmh\n (30., 0.875)]) # 107 kmh\n if CS.teslaModel in [\"SP\",\"SPD\"]:\n accel_by_speed = OrderedDict([\n # (speed m/s, decel)\n (0., 0.95), # 0 kmh\n (10., 0.95), # 35 kmh\n (20., 0.925), # 72 kmh\n (30., 0.875)]) # 107 kmh\n accel_mult = _interp_map(CS.v_ego, accel_by_speed)\n if _is_present(lead):\n safe_dist_m = _safe_distance_m(CS.v_ego,CS)\n accel_multipliers = OrderedDict([\n # (distance in m, acceleration fraction)\n (0.6 * safe_dist_m, 0.15),\n (1.0 * safe_dist_m, 0.2),\n (3.0 * safe_dist_m, 0.4)])\n vrel_multipliers = OrderedDict([\n # vrel m/s, accel mult\n (0. , 1.),\n (10., 1.5)])\n\n return min(accel_mult * _interp_map(lead.vRel, vrel_multipliers) * _interp_map(lead.dRel, accel_multipliers),1.0)\n else:\n return min(accel_mult * 0.4, 1.0)", "def getTerminalPower(self):\n return float(self.query(\"MEAS:POW?\"))", "def track_max_power(initial_V, t_track):\n\n # Initialise empty lists for storing data\n times = []\n voltages = []\n currents = []\n powers = []\n current_densities = []\n efficiencies = []\n\n # Start timing\n t_start = time.time()\n t = time.time()\n\n # Set the learning rate\n a = 0.1\n\n # Turn on the Keithley output at zero volts and measure for 4s in the dark\n keithley2400.write(':SOUR:VOLT ' + '0')\n keithley2400.write('OUTP ON')\n while t - t_start < 3:\n times.append(t - t_start)\n data = keithley2400.query(':MEAS:CURR?') # Measure the current\n data = data.split(',')\n data = [float(item) for item in data]\n current_density = data[1] * 1000 / area\n power = data[0] * data[1]\n current = data[1]\n voltage = data[0]\n efficiency = np.absolute(power * 1000 * 100 / (100 * suns * area))\n current_densities.append(current_density)\n powers.append(power)\n currents.append(current)\n voltages.append(voltage)\n efficiencies.append(efficiency)\n t = time.time()\n\n # Open the shutter of the solar simulator and take a few measurements\n # around the seed voltage to initialise the tracking algorithm.\n keithley2400.write(':SOUR2:TTL 0') # open the shutter\n initial_V = initial_V - 0.02\n for i in range(2):\n times.append(t - t_start)\n keithley2400.write(':SOUR:VOLT ' + str(initial_V))\n data = keithley2400.query(':MEAS:CURR?') # Measure the current\n data = data.split(',')\n data = [float(item) for item in data]\n current_density = data[1] * 1000 / area\n power = data[0] * data[1]\n current = data[1]\n voltage = data[0]\n efficiency = np.absolute(power * 1000 * 100 / (100 * suns * area))\n current_densities.append(current_density)\n powers.append(power)\n currents.append(current)\n voltages.append(voltage)\n efficiencies.append(efficiency)\n initial_V += 0.02\n t = time.time()\n\n # Start tracking the maximum point using method of steepest descent\n i = len(voltages) - 1\n while t - t_start < t_track + 3:\n if voltages[i] != voltages[i - 1]:\n dP_dV = (powers[i] - powers[i - 1]) / (\n voltages[i] - voltages[i - 1])\n else:\n dP_dV = np.sign((1 - (-1)) * np.random.random_sample() + (\n -1)) * 0.002\n initial_V = voltages[i] - a * dP_dV\n times.append(t - t_start)\n keithley2400.write(':SOUR:VOLT ' + str(initial_V))\n data = keithley2400.query(':MEAS:CURR?') # Measure the current\n data = data.split(',')\n data = [float(item) for item in data]\n current_density = data[1] * 1000 / area\n power = data[0] * data[1]\n current = data[1]\n voltage = data[0]\n efficiency = np.absolute(power * 1000 * 100 / (100 * suns * area))\n current_densities.append(current_density)\n powers.append(power)\n currents.append(current)\n voltages.append(voltage)\n efficiencies.append(efficiency)\n t = time.time()\n i += 1\n\n return times, voltages, currents, current_densities, powers, efficiencies", "def speedMultiplier(self) -> float:\n return self._getMultiplier('speed')", "def get_solar_generator_power(self):\n return self._get_content_of_own_consumption()[5]", "def calc_jump_power(block_count, total_mass):\n ideal = math.ceil(total_mass * 0.5)\n a = 50.0 - 100.0 * block_count * total_mass\n return (-0.24 * total_mass)*a*a + 4600.0*a + 230000.0 + 1200.0 * ideal", "def calc_acceleration_from_power(self, velocity, power):\n # TODO: make formular readable - comment!\n acceleration = (power / (velocity * self.specs['mass']) * 1000 - (\n self.specs['cW'] * self.specs['frontal_area'] * RHO * velocity**2 /\n (2 * self.specs['mass']) + FR * G))\n return acceleration", "def accelerate(speed=0.0, coeff=5, factor=10):\n if speed < 10.0:\n divisor = 10.0\n else:\n divisor = speed\n newSpeed = speed + factor * coeff / divisor\n if newSpeed < 0.0:\n newSpeed = 0.0\n return newSpeed", "def bass_power(self, filtered=True):\n return self.get_freq_power(1, filtered)", "def get_power(self):\r\n return self._power", "def measure_power(self, wavelength):\n self.wavelength = wavelength\n return self.power", "def getTerminalPower(self):\n return float(self.instr.query(\"MEAS:POW?\"))", "def get_power(self):\n return self.power_total" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function calculates the egts only performance based upon the available APU power. It also plots this performance.
def EGTS_only_perf(GR): #Power available P_APU = 62 # [kW] Available apu power P_sen = 0 # [kW] P_comp = 0 # [kW] P_av_e = (P_APU-P_sen-P_comp)*1000/2 # [W] APU power available per engine # Efficiencies powertrain n_circuit = 0.97 n_gear = 0.9875 # Gear efficiency (torque loss -> power loss) amount_gears = 2 n_emotor = 0.95 # Electricmotor efficiency (electrical loss - power loss) # Airplane characteristics w_rad_air = 1.27/2 # [m] wheel radius aircraft MLG wheels m_plane = 97400 # [kg] MRW weight_ratio = 0.952 # [-] Landing gear weight distribution ratio Roll_fric = 0.02 # [-] Rolling friction coefficient of airplane wheels # Engine output torque for available power at different RPM calculation P_av_e_out = n_circuit*n_emotor*P_av_e # [W] engine output power T_egts_w_em = np.array([500]) # [Nm] engine output torque v_slow = np.arange(0, 8.1, 0.1) # [kts] Velocity range v_slow = v_slow*0.514444 # to m/s w_slow = v_slow/w_rad_air # [rad/s] corresponding rotational speed wheels w_slow_eng = w_slow*GR # [rad/s] corresponding rotational speed engine for i in range(1, len(w_slow_eng)): # Enough power hence full torque if P_av_e_out/w_slow_eng[i] > 500: T_egts_w_em = np.append(T_egts_w_em, [500]) # in sufficient power hence less torque elif P_av_e_out/w_slow_eng[i] < 500 and P_av_e_out/w_slow_eng[i] > 0: T_egts_w_em = np.append(T_egts_w_em, [P_av_e_out/w_slow_eng[i]]) # not enough power else: T_egts_w_em = np.add(T_egts_w_em, [0]) # Torque en power @ wheels = engine * gear efficiency T_egts_w_r = n_gear**amount_gears*GR*T_egts_w_em # [W] wheel power F_egts_w = T_egts_w_r/w_rad_air # [Nm] engine output torque # Resultant acceleration calculation # Determining friction for resultant acceleration calculation N_mlg = m_plane*weight_ratio*9.81 # [N] Total normal force on the MLG N_mlg_w = N_mlg/4 # [N] Normal force per MLG wheel N_nlg = m_plane*(1-weight_ratio)*9.81 # [N] Total normal force of car F_fric = Roll_fric*N_mlg + Roll_fric*N_nlg # [N] Total force req to move plane at acceleration # Resultant force F_acc = 2*F_egts_w-F_fric # [N] # Resultant acceleration a_acc_slow = F_acc/m_plane # [m/s2] # Cut-off insignificant accelerations v_slow = v_slow[np.where(a_acc_slow >= 0.005)] a_acc_slow = a_acc_slow[np.where(a_acc_slow >= 0.005)] # Determine time intervals for velocity intervals w corresponding acceleration profile time = np.array([0]) for i in range(1, len(v_slow)): time = np.append(time, [v_slow[i]/a_acc_slow[i]]) # Plot # gs = gridspec.GridSpec(2, 2) # Define figure layout # fig = plt.figure("EGTS Only Performance") # fig.suptitle(" EGTS Only Performance \n Pushback") # # # Pushback velocity # ax1 = fig.add_subplot(gs[0, 0]) # ax1.set_title("Velocity") # ax1.set_xlabel("Time [s]") # ax1.set_ylabel("Velocity [m/s]") # ax1.plot(time[0:31], v_slow[0:31], color='g') # ax1.set_yticks([0, 0.5, 1, 1.5]) # ax = ax1.twinx() # ax.plot(time[0:31], v_slow[0:31], color='g') # ax.set_ylabel("Velocity [kts]") # ax.set_yticks(np.array([0, 0.5144, 2*0.5144, 3*0.5144])) # ax.set_yticklabels(['0', '1', '2', '3']) # # Pushback Acceleration graphs # ax2 = fig.add_subplot(gs[0, 1]) # ax2.set_title("Acceleration") # ax2.set_xlabel("Time [s]") # ax2.set_ylabel("Acceleration [$m/s^2$]") # ax2.set_ylim(0, max(a_acc_slow)+0.2) # ax2.plot(time[0:31], a_acc_slow[0:31], color='r') # # # Slow taxi title # ax0 = fig.add_subplot(gs[1, :]) # ax0.axis('off') # ax0.set_title("Slow Taxi", pad=20) # # Slow taxi # ax3 = fig.add_subplot(gs[1, 0]) # ax3.set_title("Velocity") # ax3.set_xlabel("Time [s]") # ax3.set_ylabel("Velocity [m/s]") # ax3.plot(time, v_slow, color='g') # ax3.plot(time, [2.88 for i in time], color='gray', linestyle='--') # ax3.set_yticks([0, 0.5, 1, 1.5, 2, 2.5, 3]) # ax = ax3.twinx() # ax.set_ylabel("Velocity [kts]") # ax.set_yticks(np.array([0, 0.5144, 2*0.5144, 3*0.5144, 4*0.5144, 5*0.5144, 6*0.5144])) # ax.set_yticklabels(['0', '1', '2', '3', '4', '5', '6']) # # Pushback Acceleration graphs # ax4 = fig.add_subplot(gs[1, 1]) # ax4.set_title("Acceleration") # ax4.set_xlabel("Time [s]") # ax4.set_ylabel("Acceleration [$m/s^2$]") # ax4.set_ylim(0, max(a_acc_slow)+0.2) # ax4.plot(time, a_acc_slow, color='r') # Plot & Save # fig.tight_layout() # fig.subplots_adjust(top=0.88) # fig.savefig('EGTS_Only_Perf', bbox_inches='tight') #plt.show() return a_acc_slow, F_acc, v_slow, time
[ "def elia_activatedreserves_graphs(dfpricevol, price_vol, fromdate, todate, producttypes):\r\n \r\n if (producttypes == \"all\"):\r\n producttypes = [\"Bids+\", \"Bids-\", \"ICH\", \"IGCC-\", \"IGCC+\", \"R2+\", \"R2-\",\"R3 flex\",\"R3 std\", \"R3+\",\"R3-\",\"SR\",\"NRV\"]\r\n \r\n volumeslist = [s + \" in MW\" for s in producttypes]\r\n pricelist = [s + \" in euro/MWh\" for s in producttypes]\r\n dfmask=dfpricevol.loc[(dfpricevol.index >= fromdate) & (dfpricevol.index <= todate)]\r\n dfmask=dfmask.replace(0, np.nan)\r\n \r\n if (price_vol == \"both\"):\r\n fig, (ax,ax1) = plt.subplots(2,1,sharex=True)\r\n dfmask[pricelist].plot(ax=ax, grid=True,style=\".-\")\r\n dfmask[volumeslist].plot(ax=ax1,grid=True, legend = True,style=\".-\")\r\n ax1.set_xlabel(\"delivery period\")\r\n ax.set_ylabel(\"activated average energy price in euro/MWh\")\r\n ax1.set_ylabel(\"activated volumes in MW\")\r\n \r\n if (price_vol == \"volume\"):\r\n fig, ax1 = plt.subplots(1,1)\r\n dfmask[volumeslist].plot(ax=ax1,grid=True, legend = True,style=\".-\")\r\n ax1.set_xlabel(\"delivery period\")\r\n ax1.set_ylabel(\"activated volumes in MW\")\r\n \r\n if (price_vol == \"price\"):\r\n fig, ax = plt.subplots(1,1)\r\n dfmask[pricelist].plot(ax=ax, grid=True,style=\".-\")\r\n ax.set_xlabel(\"delivery period\")\r\n ax.set_ylabel(\"activated average energy price in euro/MWh\")", "def get_performance(self, testfreqtx=434e6, samplingrate=2.4e6):\n print('Performance test started!')\n freqtx = [testfreqtx]\n self.set_sdr_centerfreq(np.mean(freqtx))\n self.set_sdr_samplingrate(samplingrate)\n measurements = 100\n SIZE = [4, 8, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256]\n VAR = []\n MEAN = []\n UPDATE = []\n total_time = 0\n for i in SIZE:\n cnt = 0\n powerstack = []\n timestack = []\n elapsed_time = 0\n while cnt <= measurements:\n cnt += 1\n start_calctime = t.time()\n # use matplotlib to estimate the PSD and save the max power\n self.set_samplesize(i)\n freqmax, pxx_max = self.get_rss_peaks_at_freqtx(freqtx)\n powerstack.append(pxx_max)\n t.sleep(0.005)\n calctime = t.time() - start_calctime\n timestack.append(calctime)\n elapsed_time = elapsed_time + calctime\n calctime = np.mean(timestack)\n VAR.append(np.var(powerstack))\n MEAN.append(np.mean(powerstack))\n UPDATE.append(calctime)\n total_time += elapsed_time\n print (str(measurements) + ' measurements for batch-size ' + str(self.set_samplesize()) +\n ' * 1024 finished after ' + str(elapsed_time) + 's. => ' + str(measurements/elapsed_time) + 'Hz')\n print('')\n print ('Finished.')\n print ('Total time [sec]: ')\n print (total_time)\n plt.figure()\n plt.grid()\n plt.plot(SIZE, VAR, 'ro')\n plt.xlabel('Sample Size (*1024)')\n plt.ylabel('Variance (dB)')\n plt.figure()\n plt.grid()\n plt.errorbar(SIZE, MEAN, yerr=VAR,\n fmt='o', ecolor='g')\n plt.plot(SIZE, MEAN, 'x')\n plt.xlabel('Sample Size (*1024)')\n plt.ylabel('Mean Value (dB)')\n plt.figure()\n plt.grid()\n plt.plot(SIZE, UPDATE, 'g^')\n plt.xlabel('Sample Size (*1024)')\n plt.ylabel('Update rate (sec)')\n plt.show()\n return SIZE, VAR, MEAN, UPDATE", "def plot_particle_energy_gain():\n # beta_e = 0.005\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta00025-guide0-200-100-nppc200')\n tenergy1 = pic_info.tenergy\n kene_e1 = pic_info.kene_e\n kene_i1 = pic_info.kene_i\n\n # beta_e = 0.02\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta001-guide0-200-100-nppc400')\n tenergy2 = pic_info.tenergy\n kene_e2 = pic_info.kene_e\n kene_i2 = pic_info.kene_i\n\n # beta_e = 0.06\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta003-guide0-200-100-nppc200')\n tenergy3 = pic_info.tenergy\n kene_e3 = pic_info.kene_e\n kene_i3 = pic_info.kene_i\n\n # beta_e = 0.2\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta01-guide0-200-100-nppc200')\n tenergy4 = pic_info.tenergy\n kene_e4 = pic_info.kene_e\n kene_i4 = pic_info.kene_i\n\n # Estimate the energy gain for beta_e = 0.0072 using beta_e = 0.005\n kene_e12 = kene_e1[0] + (kene_e1 - kene_e1[0]) * 0.005 / 0.0072\n kene_i12 = kene_i1[0] + (kene_i1 - kene_i1[0]) * 0.005 / 0.0072\n\n print('The ratio of electron energy gain to its initial energy: ')\n print(' beta_e = 0.0072, 0.02, 0.06, 0.2: %f %f %f %f',\n ((kene_e12[-1]-kene_e12[0])/kene_e12[0],\n (kene_e2[-1]-kene_e2[0])/kene_e2[0],\n (kene_e3[-1]-kene_e3[0])/kene_e3[0],\n (kene_e4[-1]-kene_e4[0])/kene_e4[0]))\n # Electrons\n fig = plt.figure(figsize=[3.5, 2.5])\n ax = fig.add_axes([0.22, 0.22, 0.75, 0.73])\n ax.plot(tenergy1, (kene_e12 - kene_e12[0]) / kene_e12[0], 'b', linewidth=2)\n ax.plot(tenergy2, (kene_e2 - kene_e2[0]) / kene_e2[0], 'r', linewidth=2)\n ax.plot(\n tenergy3, (kene_e3 - kene_e3[0]) / kene_e3[0], 'orange', linewidth=2)\n ax.plot(tenergy4, (kene_e4 - kene_e4[0]) / kene_e4[0], 'g', linewidth=2)\n ax.set_xlim([0, 1190])\n #ax.set_ylim([0, 1.05])\n\n #plt.title('Energy spectrum', fontdict=font)\n ax.set_xlabel(r'$t\\Omega_{ci}$', fontdict=font, fontsize=20)\n ax.set_ylabel(r'$\\Delta K_e/K_e(0)$', fontdict=font, fontsize=20)\n plt.tick_params(labelsize=16)\n\n ax.text(\n 680, 8.8, r'$\\beta_e=0.007$', color='blue', rotation=5, fontsize=16)\n ax.text(680, 5, r'$\\beta_e=0.02$', color='red', rotation=4, fontsize=16)\n ax.text(\n 680, 2.1, r'$\\beta_e=0.06$', color='orange', rotation=0, fontsize=16)\n ax.text(\n 680, -1.5, r'$\\beta_e=0.2$', color='green', rotation=0, fontsize=16)\n # Ions\n fig = plt.figure(figsize=[3.5, 2.5])\n ax = fig.add_axes([0.22, 0.22, 0.75, 0.73])\n ax.plot(tenergy1, (kene_i12 - kene_i12[0]) / kene_i12[0], 'b', linewidth=2)\n ax.plot(tenergy2, (kene_i2 - kene_i2[0]) / kene_i2[0], 'r', linewidth=2)\n ax.plot(\n tenergy3, (kene_i3 - kene_i3[0]) / kene_i3[0], 'orange', linewidth=2)\n ax.plot(tenergy4, (kene_i4 - kene_i4[0]) / kene_i4[0], 'g', linewidth=2)\n ax.set_xlim([0, 1190])\n ax.set_ylim([-5, 30])\n\n #plt.title('Energy spectrum', fontdict=font)\n ax.set_xlabel(r'$t\\Omega_{ci}$', fontdict=font, fontsize=20)\n ax.set_ylabel(r'$\\Delta K_i/K_i(0)$', fontdict=font, fontsize=20)\n plt.tick_params(labelsize=16)\n\n ax.text(680, 22, r'$\\beta_e=0.007$', color='blue', rotation=0, fontsize=16)\n ax.text(680, 9, r'$\\beta_e=0.02$', color='red', rotation=0, fontsize=16)\n ax.text(680, 3, r'$\\beta_e=0.06$', color='orange', rotation=0, fontsize=16)\n ax.text(680, -4, r'$\\beta_e=0.2$', color='green', rotation=0, fontsize=16)\n plt.show()", "def _calculate_overall_performance(self):\n return sum(self._episodic_performances) / len(self._episodic_performances)", "def plt_ExhaustSpeeds(results, CT_selection, load_levels_pu, amb_T_RH, pump_ctrl, save_as=None, **kwargs):\r\n def_kwargs = {\r\n 'xlabel': 'Load [%]',\r\n 'ylabel': '[m/s]',\r\n 'title': 'Exhaust Air Speed vs. Load',\r\n 'legend_kw': {'loc': 'lower right', 'title': 'CT size and fan diameter'},\r\n }\r\n kwargs.update({key: val for key, val in def_kwargs.items() if key not in kwargs})\r\n kwargs.update({key: val for key, val in common_def_kwargs.items() if key not in kwargs})\r\n\r\n nCT = CT_selection.shape[0]\r\n CT_color_seq = ('#5499C7', '#52BE80', '#F39C12', '#E74C3C', '#8E44AD', '#839192', '#2E4053')\r\n Tamb, RHamb = amb_T_RH\r\n\r\n # ----------------------------------------------------- PLOT\r\n plt.figure(figsize=kwargs['figsize'])\r\n\r\n for CTidx in range(nCT):\r\n plt.plot(load_levels_pu * 100, results[Tamb, RHamb, pump_ctrl, 'exhaust speed'][:, CTidx].magnitude,\r\n label='{} kW, {} m'.format(CT_selection['Capacity [kW]'].iat[CTidx],\r\n CT_selection['Fan diameter [m]'].iat[CTidx]),\r\n color=CT_color_seq[CTidx], )\r\n\r\n ax = plt.gca()\r\n ax = basic_plot_polishing(ax, **kwargs)\r\n plt.text(0.86, 0.42, 'Ambient Conditions', fontdict={'fontweight': 0}, horizontalalignment='center',\r\n transform=ax.transAxes)\r\n plt.text(0.86, 0.37, '{}°C, {} RH'.format(Tamb, RHamb), horizontalalignment='center', transform=ax.transAxes)\r\n\r\n if save_as:\r\n plt.savefig(path.join(PathPlots, save_as), dpi=kwargs.get('dpi'))\r\n\r\n plt.show()\r\n return", "def analyzeBeam():\n\n profileEPI, normEPI, folder = beamProfile('Select EPI profiles')\n profileTIRF, normTIRF, folder = beamProfile('Select TIRF profiles', folder)\n TIRFactor = normTIRF / normEPI\n\n # Measurements in EPI\n EPIFrameFactor = frame(profileEPI).mean() / profileEPI.mean()\n EPIstd = 100 * frame(profileEPI).std() / frame(profileEPI).mean()\n EPIarea = profileTIRF.mask.size - profileEPI.mask.sum()\n\n # Measurements in TIRF\n TIRFrameFactor = frame(profileTIRF).mean() / profileTIRF.mean()\n TIRstd = 100 * frame(profileTIRF).std() / frame(profileTIRF).mean()\n TIRarea = profileTIRF.mask.size - profileTIRF.mask.sum()\n\n # Profile images saving\n im = Image.fromarray(profileEPI)\n im.save(os.path.join(folder, 'profileEPI.tiff'))\n im = Image.fromarray(profileTIRF)\n im.save(os.path.join(folder, 'profileTIRF.tiff'))\n\n # EPI profile\n plt.subplot(2, 2, 1)\n plt.imshow(profileEPI, interpolation='None', cmap=cm.cubehelix)\n plt.title('EPI profile')\n plt.colorbar()\n plt.text(800, 100,\n 'EPI frame factor={}'.format(np.round(EPIFrameFactor, 2)))\n plt.text(800, 150,\n 'EPI % standard dev={}'.format(np.round(EPIstd, 2)))\n plt.text(800, 200, 'EPI mask area={}'.format(EPIarea) + ' px^2')\n\n # TIRF profile\n plt.subplot(2, 2, 3)\n plt.imshow(profileTIRF, interpolation='None', cmap=cm.cubehelix)\n plt.title('TIRF profile')\n plt.colorbar()\n plt.text(800, 100,\n 'TIRF frame factor={}'.format(np.round(TIRFrameFactor, 2)))\n plt.text(800, 150,\n 'TIRF % standard dev={}'.format(np.round(TIRstd, 2)))\n plt.text(800, 200, 'TIRF mask area={}'.format(TIRarea) + ' px^2')\n plt.text(800, 300,\n 'TIRF intensity factor={}'.format(np.round(TIRFactor, 2)))\n\n plt.show()", "def elia_cap_graphs(dfcap, price_vol, aggregated, fromdate, todate, reserve_type, reserve_type2=\"\", reserve_type3=\"\"):\r\n \r\n dfmask=dfcap.loc[(dfcap[\"reserve type\"] == reserve_type) | (dfcap[\"reserve type\"] == reserve_type2) | (dfcap[\"reserve type\"] == reserve_type3)]\r\n dfmask=dfmask.loc[(dfmask.index >= fromdate) & (dfmask.index <= todate)]\r\n dfmask=dfmask.replace(0, np.nan)\r\n \r\n if(aggregated == False):\r\n if ((price_vol == \"both\") & (reserve_type != \"R1\")):\r\n fig, (ax,ax1) = plt.subplots(2,1,sharex=True)\r\n dfmask.groupby([\"reserve type\",\"service type\",\"country\",\"duration\"])[\"total contracted volume in MW\"].plot(figsize=(16,6),ax=ax,kind=\"line\", style='.-',grid=True)\r\n dfmask.groupby([\"reserve type\",\"service type\",\"country\",\"duration\"])[\"average price in euro/MW/h\"].plot(figsize=(16,6),legend=True, ax=ax1,kind=\"line\", style='.-',grid=True)\r\n ax1.set_xlabel(\"start of delivery period\")\r\n ax1.set_ylabel(\"average price in euro/MW/h\")\r\n ax.set_ylabel(\"contracted volumes in MW\")\r\n fig.suptitle(\"capacity average prices and contracted volumes of reserve type(s) \" + reserve_type + \" \" + reserve_type2 + \" \" + reserve_type3)\r\n\r\n if ((price_vol == \"both\") & (reserve_type == \"R1\")):\r\n fig, (ax,ax1) = plt.subplots(2,1,sharex=True)\r\n dfmask.groupby([\"reserve type\",\"service type\",\"country\"])[\"total contracted volume in MW\"].plot(figsize=(16,6),ax=ax,kind=\"line\", style='.-',grid=True)\r\n dfmask.groupby([\"reserve type\",\"service type\",\"country\"])[\"average price in euro/MW/h\"].plot(figsize=(16,6),legend=True, ax=ax1,kind=\"line\", style='.-',grid=True)\r\n ax1.set_xlabel(\"start of delivery period\")\r\n ax1.set_ylabel(\"average price in euro/MW/h\")\r\n ax.set_ylabel(\"contracted volumes in MW\")\r\n fig.suptitle(\"capacity average prices and contracted volumes of reserve type(s) \" + reserve_type + \" \" + reserve_type2 + \" \" + reserve_type3)\r\n\r\n if price_vol == \"price\":\r\n fig, ax1 = plt.subplots(1,1)\r\n dfmask.groupby([\"reserve type\",\"service type\",\"country\",\"duration\"])[\"average price in euro/MW/h\"].plot(figsize=(16,6),legend=True, ax=ax1,kind=\"line\", style='.-',grid=True)\r\n ax1.set_xlabel(\"start of delivery period\")\r\n ax1.set_ylabel(\"average price in euro/MW/h\")\r\n fig.suptitle(\"capacity average prices of reserve type(s) \" + reserve_type + \" \" + reserve_type2 + \" \" + reserve_type3)\r\n\r\n if price_vol == \"volume\": \r\n fig, ax = plt.subplots(1,1)\r\n dfmask.groupby([\"reserve type\",\"service type\",\"country\",\"duration\"])[\"total contracted volume in MW\"].plot(figsize=(16,6),ax=ax,kind=\"line\", style='.-',grid=True,legend=True)\r\n ax.set_xlabel(\"start of delivery period\")\r\n ax.set_ylabel(\"contracted volumes in MW\")\r\n fig.suptitle(\"contracted volumes of reserve type(s) \" + reserve_type + \" \" + reserve_type2 + \" \" + reserve_type3)\r\n \r\n if(aggregated == True):\r\n if price_vol == \"both\":\r\n dfvol=dfmask.groupby([\"reserve type\",\"duration\",\"delivery period\"])[\"total contracted volume in MW\"].sum()\r\n dfvol=dfvol.to_frame()\r\n dfvol=dfvol.reset_index()\r\n dfvol=dfvol.set_index([\"delivery period\"])\r\n dfvol=dfvol.sort_index(ascending=False);\r\n dfprice=dfmask.groupby([\"reserve type\",\"duration\",\"delivery period\"])[\"average price in euro/MW/h\"].mean()\r\n dfprice=dfprice.to_frame()\r\n dfprice=dfprice.reset_index()\r\n dfprice=dfprice.set_index([\"delivery period\"])\r\n dfprice=dfprice.sort_index(ascending=False);\r\n fig, (ax,ax1) = plt.subplots(2,1,sharex=True)\r\n dfvol.groupby([\"reserve type\",\"duration\"])[\"total contracted volume in MW\"].plot(figsize=(16,6),ax=ax,kind=\"line\", style='.-',grid=True)\r\n dfprice.groupby([\"reserve type\",\"duration\"])[\"average price in euro/MW/h\"].plot(figsize=(16,6),legend=True, ax=ax1,kind=\"line\", style='.-',grid=True)\r\n ax1.set_xlabel(\"start of delivery period\")\r\n ax1.set_ylabel(\"average price in euro/MW/h\")\r\n ax.set_ylabel(\"contracted volumes in MW\")\r\n fig.suptitle(\"capacity average prices and contracted volumes, aggregated of all service types of reserve type(s) \" + reserve_type + \" \" + reserve_type2 + \" \" + reserve_type3)\r\n \r\n if price_vol == \"price\":\r\n dfprice=dfmask.groupby([\"reserve type\",\"duration\",\"delivery period\"])[\"average price in euro/MW/h\"].mean()\r\n dfprice=dfprice.to_frame()\r\n dfprice=dfprice.reset_index()\r\n dfprice=dfprice.set_index([\"delivery period\"])\r\n dfprice=dfprice.sort_index(ascending=False);\r\n fig, ax1 = plt.subplots(1,1)\r\n dfprice.groupby([\"reserve type\",\"duration\"])[\"average price in euro/MW/h\"].plot(figsize=(16,6),legend=True, ax=ax1,kind=\"line\", style='.-',grid=True)\r\n ax1.set_xlabel(\"start of delivery period\")\r\n ax1.set_ylabel(\"average price in euro/MW/h\")\r\n fig.suptitle(\"capacity average prices, aggregated of all service types of reserve type(s) \" + reserve_type + \" \" + reserve_type2 + \" \" + reserve_type3)\r\n \r\n if price_vol == \"volume\":\r\n dfvol=dfmask.groupby([\"reserve type\",\"duration\",\"delivery period\"])[\"total contracted volume in MW\"].sum()\r\n dfvol=dfvol.to_frame()\r\n dfvol=dfvol.reset_index()\r\n dfvol=dfvol.set_index([\"delivery period\"])\r\n dfvol=dfvol.sort_index(ascending=False);\r\n fig, ax= plt.subplots(1,1)\r\n dfvol.groupby([\"reserve type\",\"duration\"])[\"total contracted volume in MW\"].plot(figsize=(16,6),ax=ax,kind=\"line\", style='.-',grid=True,legend=True)\r\n ax.set_xlabel(\"start of delivery period\")\r\n ax.set_ylabel(\"contracted volumes in MW\")\r\n fig.suptitle(\"contracted volumes, aggregated of all service types of reserve type(s) \" + reserve_type + \" \" + reserve_type2 + \" \" + reserve_type3)", "def capacity_vs_maxprod(year = 2019,redo_stats = False,show_title = True,eps_fig = False):\n # year = 2019\n # redo_stats = False\n\n if redo_stats:\n stats = get_entsoe_production_stats(startyear=year,endyear=year,areas=all_areas,limit=50)\n else:\n stats = pd.read_excel(Path(data_path)/f'gen_stats.xlsx',index_col=0,header=[0,1])\n cap = get_entsoe_capacity(areas=all_areas,year=year)\n\n #%%\n large_areas = ['GB','PL','DE','NL']\n # show_title = True\n # eps_fig = False\n fig_path = Path(data_path) / 'Figures'\n fig_path.mkdir(exist_ok=True,parents=True)\n \"\"\"\n Compare ENTSO-E capacity values with maximum production stats\n \n Print latex tables and figures with capacity and generator info\n \"\"\"\n fig_size = (16/cm_per_inch,8/cm_per_inch)\n areas = all_areas\n # summarize thermal production\n thermal_data = pd.DataFrame(index=areas,columns=['pmax','capacity','diff'])\n for area in areas:\n thermal_data.at[area,'capacity'] = cap.at[area,'Thermal']\n thermal_data.at[area,'pmax'] = stats.at['max',(area,'Thermal')]\n thermal_data.at[area,'diff'] = thermal_data.at[area,'capacity'] - thermal_data.at[area,'pmax']\n thermal_data = thermal_data.fillna(0)\n\n # summarize hydro production\n hydro_data = pd.DataFrame(index=areas,columns=['pmax','capacity','diff'])\n for area in thermal_data.index:\n hydro_data.at[area,'capacity'] = cap.at[area,'Hydro']\n hydro_data.at[area,'pmax'] = stats.at['max',(area,'Hydro')]\n hydro_data.loc[area,'diff'] = hydro_data.at[area,'capacity'] - hydro_data.at[area,'pmax']\n hydro_data = hydro_data.fillna(0)\n\n f = plt.figure()\n ax = f.add_subplot(1,1,1)\n areas1 = [a for a in areas if a not in large_areas]\n areas2 = [a for a in areas if a in large_areas]\n\n for i,plot_areas in enumerate([areas1,areas2]):\n ax.cla()\n thermal_data.loc[plot_areas,['pmax','capacity']].plot.bar(ax=ax)\n plt.grid()\n if show_title:\n plt.title('Thermal capacity')\n plt.ylabel('MW')\n plt.gcf().set_size_inches(fig_size)\n plt.tight_layout()\n plt.savefig(fig_path/f'thermal_capacity_{i}.png')\n if eps_fig:\n plt.savefig(fig_path/f'thermal_capacity_{i}.eps')\n\n\n ax.cla()\n hydro_data.loc[plot_areas,['pmax','capacity']].plot.bar(ax=ax)\n plt.grid()\n if show_title:\n plt.title('Hydro capacity')\n plt.ylabel('MW')\n plt.gcf().set_size_inches(fig_size)\n plt.tight_layout()\n plt.savefig(fig_path/f'hydro_capacity_{i}.png')\n if eps_fig:\n plt.savefig(fig_path/f'hydro_capacity_{i}.eps')", "def metric(self):\n topology = self.topology\n\n #metrics\n MP0 = np.ones (topology.P0)\n MP1 = np.zeros(topology.P1)\n MP2 = np.zeros(topology.P2)\n MD0 = np.ones (topology.D0)\n MD1 = np.zeros(topology.D1)\n MD2 = np.zeros(topology.D2)\n\n #precomputations\n EVP = util.gather(topology.EVi, self.primal)\n FEVP = util.gather(topology.FEi, EVP) #[faces, e3, v2, c3]\n FEM = util.normalize(FEVP.sum(axis=2))\n FEV = util.gather(topology.FEi, topology.EVi)\n\n #calculate areas; devectorization over e makes things a little more elegant, by avoiding superfluous stacking\n for e in range(3):\n areas = triangle_area_from_corners(FEVP[:,e,0,:], FEVP[:,e,1,:], self.dual)\n MP2 += areas #add contribution to primal face\n util.scatter( #add contributions divided over left and right dual face\n FEV[:,e,:], #get both verts of each edge\n np.repeat(areas/2, 2), #half of domain area for both verts\n MD2)\n\n #calc edge lengths\n MP1 += edge_length(EVP[:,0,:], EVP[:,1,:])\n for e in range(3):\n util.scatter(\n topology.FEi[:,e],\n edge_length(FEM[:,e,:], self.dual),\n MD1)\n\n #hodge operators\n self.D2P0 = MD2 / MP0\n self.P0D2 = MP0 / MD2\n\n self.D1P1 = MD1 / MP1\n self.P1D1 = MP1 / MD1\n\n self.D0P2 = MD0 / MP2\n self.P2D0 = MP2 / MD0", "def plt_AmbientAirPerformance_airflow(results, Tin, RH_values, pu_load, pump_ctrl, plot_setpoint=True,\r\n save_as=None, **kwargs):\r\n def_kwargs = {\r\n 'title': 'Air Mass Flow at {} Load'.format({1: 'Full', 0: 'No'}.get(pu_load, '{:0.1f}%'.format(pu_load * 100))),\r\n 'ylabel': '[kg/s]',\r\n 'xlabel': 'Temp (dry bulb) [°C]',\r\n 'setpoint_line': {'ls': '--', 'lw': 1, 'color': 'k'},\r\n }\r\n kwargs.update({key: val for key, val in def_kwargs.items() if key not in kwargs})\r\n kwargs.update({key: val for key, val in common_def_kwargs.items() if key not in kwargs})\r\n\r\n RH_color_seq = ('#2E86C1', '#16A085', '#D35400')\r\n\r\n # ----------------------------------------------------- PLOT\r\n plt.figure(figsize=kwargs['figsize'])\r\n\r\n for idx, RH in enumerate(RH_values):\r\n plt.plot(Tin, results[pump_ctrl, RH, 'air flow'].magnitude,\r\n label='{:0.2f} RH'.format(RH), color=RH_color_seq[idx])\r\n\r\n ax = plt.gca()\r\n ax = basic_plot_polishing(ax, **kwargs)\r\n\r\n if plot_setpoint:\r\n setpoint = kwargs['airflow_sp']\r\n ax.axhline(setpoint, **kwargs['setpoint_line'])\r\n\r\n # Text label\r\n y_lb, y_ub = ax.get_ylim()\r\n text_y = setpoint + 0.03 * (y_ub - y_lb)\r\n if text_y > y_ub * 0.95: text_y = setpoint - 0.03 * (y_ub - y_lb)\r\n\r\n plt.text(Tin.min(), text_y, 'nominal')\r\n\r\n if save_as:\r\n plt.savefig(path.join(PathPlots, save_as), dpi=kwargs.get('dpi'))\r\n\r\n plt.show()\r\n return", "def plot_ecc_vector_func_time(time, aei_functime):\n\n avg_ecc = []\n eccentricities = [x.e for x in aei_functime]\n for i in range(len(eccentricities)):\n total = np.sum(eccentricities[i])\n avg_ecc.append(total/float(len(eccentricities[i])) )\n\n\n ecc_vec = []\n\n for i in range(len(aei_functime)):\n sum_massweighted_eccvector_x = 0.\n sum_massweighted_eccvector_y = 0.\n sum_mass = 0.\n for j in range(len(aei_functime[i].mass)):\n sum_mass += aei_functime[i].mass[j] \n sum_massweighted_eccvector_x += aei_functime[i].mass[j] * aei_functime[i].e[j] * np.cos(np.deg2rad(aei_functime[i].pomega[j]))\n sum_massweighted_eccvector_y += aei_functime[i].mass[j] * aei_functime[i].e[j] * np.sin(np.deg2rad(aei_functime[i].pomega[j]))\n\n vector_magnitude = np.sqrt(sum_massweighted_eccvector_x * sum_massweighted_eccvector_x + \n sum_massweighted_eccvector_y * sum_massweighted_eccvector_y)\n ecc_vec.append(vector_magnitude/sum_mass)\n \n fig = pp.figure()\n ax = fig.add_subplot(111)\n ax2 = ax.twinx()\n\n line1 = ax.plot(time, ecc_vec, label = \"Mag ecc vec\")\n line2 = ax2.plot(time, avg_ecc, lw=.35, label = \"avg ecc\", color='red')\n ax.set_xlabel(\"Time (years)\")\n ax.set_ylabel(\"Ecc vector magnitude\")\n ax.set_xscale('log')\n\n ax2.set_ylim((0,2.2*np.max(avg_ecc)))\n ax2.set_ylabel(\"Average eccentricity\")\n ax2.set_xlim(right=3e5)\n\n lines_for_legend = line1 + line2\n fig.legend(lines_for_legend, [l.get_label() for l in lines_for_legend], loc='upper left')\n\n return fig", "def tradingPerformance(self):", "def visualize(self):\n plt.figure()\n plt.plot(self.stats_time, self.num_cores)\n plt.ylabel('total num cores used')\n savefig('{fp}/cores_used.jpeg'.format(fp=self.final_path))\n plt.close()\n del self.stats_time\n del self.num_cores\n\n print \"25th Percentile of Delayed VMs: \", float(np.percentile(self.cdf, 25) / 47657184) * 100\n print \"50th Percentile of Delayed VMs: \", float(np.percentile(self.cdf, 50) / 47657184) * 100\n print \"75th Percentile of Delayed VMs: \", float(np.percentile(self.cdf, 75) / 47657184) * 100\n print \"90th Percentile of Delayed VMs: \", float(np.percentile(self.cdf, 90) / 47657184) * 100\n print \"95th Percentile of Delayed VMs: \", float(np.percentile(self.cdf, 95) / 47657184) * 100\n print \"99th Percentile of Delayed VMs: \", float(np.percentile(self.cdf, 99) / 47657184) * 100\n\n print \"Number of Delayed VMs: \", self.cdf[-1]-self.cdf[0]\n print \"Number of max delayed VMs: \", self.values[-1]\n print \"Max Delay Time of a VM: \", self.keys[-1]\n\n plt.figure()\n plt.plot(self.keys, self.cdf)\n plt.ylabel('CDF (in number of vms delayed)')\n plt.xlabel('Delay (in seconds)')\n savefig('{fp}/cdf_vms.png'.format(fp=self.final_path), dpi=1000)\n plt.close()\n\n plt.figure()\n plt.plot(self.keys, (self.cdf / float(self.cdf[-1])) * 100)\n plt.ylabel('CDF (in percentage)')\n plt.xlabel('Delay (in seconds)')\n savefig('{fp}/cdf_vms_percentage.png'.format(fp=self.final_path), dpi=1000)\n plt.close()\n\n # plt.bar not working with large inputs.\n \"\"\"plt.figure()\n plt.bar(keys,values,width=1.0, color='g')\n plt.ylabel('Number of Vms')\n plt.xlabel('Delay in seconds')\n savefig('{fp}/histogram_vms.png'.format(fp=self.final_path),dpi=1000)\n plt.close()\"\"\"\n\n #####\n \"\"\"\n plt.figure()\n plt.plot(self.data_obj.stats_time, self.data_obj.amount_ram)\n plt.ylabel('total amount of ram used')\n savefig('{fp}/ram_used.jpeg'.format(fp=self.final_path))\n plt.close()\n plt.figure()\n \n plt.plot(self.data_obj.creation_stats_time, self.data_obj.cores_creation_lst)\n plt.ylabel('Cores Requested')\n savefig('{fp}/cores_requested.jpeg'.format(fp=self.final_path))\n plt.close()\n plt.figure()\n plt.plot(self.data_obj.creation_stats_time, self.data_obj.ram_creation_lst)\n plt.ylabel('RAM Requested')\n savefig('{fp}/ram_requested.jpeg'.format(fp=self.final_path))\n plt.close()\n plt.figure()\n plt.plot(self.data_obj.deletion_stats_time, self.data_obj.cores_deletion_lst)\n plt.ylabel('Cores Deleted')\n savefig('{fp}/cores_deletion.jpeg'.format(fp=self.final_path))\n plt.close()\n plt.figure()\n plt.plot(self.data_obj.deletion_stats_time, self.data_obj.ram_deletion_lst)\n plt.ylabel('RAM Deleted')\n savefig('{fp}/ram_deletion.jpeg'.format(fp=self.final_path))\n plt.close()\n\n del self.data_obj.creation_stats_time[0]\n del self.data_obj.cores_creation_lst[0]\n del self.data_obj.ram_creation_lst[0]\n plt.figure()\n plt.plot(self.data_obj.creation_stats_time, self.data_obj.cores_creation_lst)\n plt.ylabel('Cores Requested')\n savefig('{fp}/cores_requested_new.jpeg'.format(fp=self.final_path))\n plt.close()\n plt.figure()\n plt.plot(self.data_obj.creation_stats_time, self.data_obj.ram_creation_lst)\n plt.ylabel('RAM Requested')\n savefig('{fp}/ram_requested_new.jpeg'.format(fp=self.final_path))\n plt.close()\n\n del self.data_obj.deletion_stats_time[-1]\n del self.data_obj.cores_deletion_lst[-1]\n del self.data_obj.ram_deletion_lst[-1]\n plt.figure()\n plt.plot(self.data_obj.deletion_stats_time, self.data_obj.cores_deletion_lst)\n plt.ylabel('Cores Deleted')\n savefig('{fp}/cores_deletion_new.jpeg'.format(fp=self.final_path))\n plt.close()\n plt.figure()\n plt.plot(self.data_obj.deletion_stats_time, self.data_obj.ram_deletion_lst)\n plt.ylabel('RAM Deleted')\n savefig('{fp}/ram_deletion_new.jpeg'.format(fp=self.final_path))\n plt.close()\n \"\"\"\n ######", "def main():\n print(\"CPU temp: \", str(get_cpu_temp()))\n print(\"GPU temp: \", str(get_gpu_temp()))", "def analyze_scaling_single():\n #graph_fname = 'graphs/9797_9797_graph.gpickle'\n #graph_fname = 'graphs/2000_2000.gpickle'\n #graph_fname = 'graphs/500_500.gpickle'\n #graph_fname = 'graphs/mapleb_80_graph.gpickle'\n graph_fname = 'graphs/BronxA_001_binary_graph.gpickle'\n #graph_fname = 'graphs/BronxA_004_binary_corrected_graph.gpickle'\n #graph_fname = 'graphs/BronxA_009_binary_corrected_graph.gpickle'\n #graph_fname = 'graphs/BronxA_015_a_binary_corrected_graph.gpickle'\n #graph_fname = 'graphs/BronxA_030_a_binary_corrected_graph.gpickle'\n #graph_fname = 'graphs/BronxB_005_binary_corrected_graph.gpickle'\n #graph_fname = 'graphs/BronxB_015_binary_corrected_graph.gpickle'\n #graph_fname = 'graphs/net_final_ds14.dot'\n #graph_fname = 'grid'\n efa = EdgeFlowAnalyzer(graph_fname, random_edge=True)\n \n efa.scaling_single_edge(save=False, plot=True, \n cond_normalize=True)\n \n norm = efa.DeltaF_wt/efa.conds\n norm /= norm[efa.j]\n\n f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n\n plot_DeltaF_in_leaf(np.abs(norm), efa.G, fixed_width=True, ax=ax1)\n ax1.set_title('$\\Delta F_e/(K_e \\Delta F_{e_0})$ (hierarchical)')\n\n plot_DeltaF_in_leaf(np.abs(efa.DeltaF_wt), efa.G, \n fixed_width=True, ax=ax2)\n ax2.set_title('$\\Delta F_e/\\Delta F_{e_0}$ (hierarchical)')\n\n plot_DeltaF_in_leaf(np.abs(efa.DeltaF_uw), efa.G, \n fixed_width=True, ax=ax3)\n ax3.set_title('$\\Delta F_e/\\Delta F_{e_0}$ (homogeneous)')\n\n plt.show()", "def plot_signal_efficiency(softmaxes, labels, energies, index_dict=None, event=None,\n average_efficiencies=[0.2, 0.5, 0.8], energy_interval=25,\n min_energy=100, max_energy=1000, num_bins=100, show_plot=False,\n save_path=None):\n \n # Assertions to check for valid inputs\n assert softmaxes.any() != None\n assert labels.any() != None\n assert energies.any() != None\n assert index_dict != None\n assert event != None\n assert len(average_efficiencies) >= 3\n assert num_bins >= 100\n \n # Calculate the threshold here according to the desired average efficiencies\n _, _, threshold_0, _, _, tpr_1, threshold_1, _ = plot_ROC_curve_one_vs_one(softmaxes, labels, energies,\n {\"gamma\":0,\"e\":1}, \"gamma\",\n \"e\", 0, 1000, show_plot=False)\n \n threshold_index_dict = {}\n\n for tpr_value in average_efficiencies:\n index_list = []\n for i in range(len(tpr_1)):\n if(math.fabs(tpr_1[i]-tpr_value) < 0.001):\n index_list.append(i)\n index = index_list[math.ceil(len(index_list)/2)]\n threshold_index_dict[tpr_value] = index\n\n thresholds = []\n for key in threshold_index_dict.keys():\n thresholds.append(round(threshold_1[threshold_index_dict[key]], 2))\n \n # Get the energy intervals to plot the signal efficiency against ( replace with max(energies) ) \n energy_lb = [min_energy+(energy_interval*i) for i in range(math.ceil((max_energy-min_energy)/energy_interval))]\n energy_ub = [energy_low+energy_interval for energy_low in energy_lb]\n \n # Local color dict\n local_color_dict = {}\n local_color_dict[thresholds[0]] = \"green\"\n local_color_dict[thresholds[1]] = \"blue\"\n local_color_dict[thresholds[2]] = \"red\"\n \n # Epsilon to ensure the plots are OK for low efficiency thresholds\n epsilon = 0.0001\n \n # Plot the signal efficiency vs energy\n fig = plt.figure(figsize=(32,18), facecolor=\"w\")\n \n for threshold, efficiency in zip(thresholds, average_efficiencies):\n \n # Values to be plotted at the end\n signal_efficiency = []\n energy_values = []\n \n # Value for the previous non-zero events\n prev_non_zero_efficiency = 0.0\n \n # Iterate over the energy intervals computing the efficiency\n for energy_lower, energy_upper in zip(energy_lb, energy_ub):\n values, bins, _ = plot_classifier_response(softmaxes, labels, energies,\n {event:index_dict[event]},\n {event:index_dict[event]},\n energy_lower, energy_upper,\n num_bins=num_bins, show_plot=False)\n \n total_true_events = np.sum(values)\n num_true_events_selected = np.sum(values[bins[:len(bins)-1] > threshold-epsilon])\n\n curr_interval_efficiency = num_true_events_selected/total_true_events if total_true_events > 0 else 0\n\n if(curr_interval_efficiency == 0):\n curr_interval_efficiency = prev_non_zero_efficiency\n else:\n prev_non_zero_efficiency = curr_interval_efficiency\n\n # Add two times once for the lower energy bound and once for the upper energy bound\n signal_efficiency.append(curr_interval_efficiency)\n signal_efficiency.append(curr_interval_efficiency)\n\n # Add the lower and upper energy bounds\n energy_values.append(energy_lower)\n energy_values.append(energy_upper)\n\n label_to_use = r\"Average signal efficiency = {0}, Threshold = {1:0.3f}\".format(efficiency, threshold)\n\n plt.plot(energy_values, signal_efficiency, color=local_color_dict[threshold], linewidth=2.0,\n marker=\".\", markersize=6.0, markerfacecolor=local_color_dict[threshold], label=label_to_use)\n\n if(event is not \"e\"):\n title = r\"Signal Efficiency vs Energy for $\\{0}$ events.\".format(event)\n else:\n title = r\"Signal Efficiency vs Energy for ${0}$ events.\".format(event)\n \n plt.title(title, fontsize=20)\n plt.grid(True)\n \n plt.xlim([min_energy, max_energy])\n plt.ylim([0, 1.05])\n plt.tick_params(axis=\"both\", labelsize=20)\n \n plt.xlabel(\"Event Visible Energy (MeV)\", fontsize=20)\n plt.ylabel(\"Signal Efficiency\", fontsize=20)\n \n plt.legend(prop={\"size\":20}, bbox_to_anchor=(1.04,1), loc=\"upper left\")\n \n if save_path is not None:\n plt.savefig(save_path, format='eps', dpi=300)\n \n if show_plot:\n plt.show()\n else:\n plt.clf() # Clear the current figure\n plt.close() # Close the opened window", "def plot_energy_evolution(pic_info):\n tenergy = pic_info.tenergy\n ene_electric = pic_info.ene_electric\n ene_magnetic = pic_info.ene_magnetic\n # ene_magnetic = pic_info.ene_bx\n kene_e = pic_info.kene_e\n kene_i = pic_info.kene_i\n ene_bx = pic_info.ene_bx\n ene_by = pic_info.ene_by\n ene_bz = pic_info.ene_bz\n\n enorm = ene_magnetic[0]\n\n fig = plt.figure(figsize=[7, 5])\n xs, ys = 0.13, 0.13\n w1, h1 = 0.8, 0.8\n ax = fig.add_axes([xs, ys, w1, h1])\n ax.set_color_cycle(colors)\n p1, = ax.plot(\n tenergy, ene_magnetic / enorm, linewidth=2, label=r'$\\varepsilon_{b}$')\n p2, = ax.plot(tenergy, kene_i / enorm, linewidth=2, label=r'$K_i$')\n p3, = ax.plot(tenergy, kene_e / enorm, linewidth=2, label=r'$K_e$')\n p4, = ax.plot(\n tenergy,\n 10 * ene_electric / enorm,\n linewidth=2,\n label=r'$10\\varepsilon_{e}$')\n # ax.set_xlim([0, np.max(tenergy)])\n ax.set_xlim([0, np.max(tenergy)])\n emax = max(\n np.max(ene_magnetic / enorm),\n np.max(kene_e / enorm), np.max(kene_i / enorm))\n ax.set_ylim([0, max(emax, 1.05)])\n\n ax.set_xlabel(r'$t\\Omega_{ci}$', fontdict=font, fontsize=24)\n ax.set_ylabel(r'Energy/$\\varepsilon_{b0}$', fontdict=font, fontsize=24)\n leg = ax.legend(\n loc=1,\n prop={'size': 20},\n ncol=2,\n shadow=False,\n fancybox=False,\n frameon=False)\n for color, text in zip(colors, leg.get_texts()):\n text.set_color(color)\n\n # ax.text(0.5, 0.8, r'$\\varepsilon_{b}$',\n # color='blue', fontsize=24,\n # bbox=dict(facecolor='none', alpha=1.0, edgecolor='none', pad=10.0),\n # horizontalalignment='center', verticalalignment='center',\n # transform = ax.transAxes)\n # ax.text(0.7, 0.8, r'$\\varepsilon_e$', color='m', fontsize=24,\n # bbox=dict(facecolor='none', alpha=1.0, edgecolor='none', pad=10.0),\n # horizontalalignment='center', verticalalignment='center',\n # transform = ax.transAxes)\n # ax.text(0.5, 0.5, r'$K_e$', color='red', fontsize=24,\n # bbox=dict(facecolor='none', alpha=1.0, edgecolor='none', pad=10.0),\n # horizontalalignment='center', verticalalignment='center',\n # transform = ax.transAxes)\n # ax.text(0.7, 0.5, r'$K_i$', color='green', fontsize=24,\n # bbox=dict(facecolor='none', alpha=1.0, edgecolor='none', pad=10.0),\n # horizontalalignment='center', verticalalignment='center',\n # transform = ax.transAxes)\n\n plt.tick_params(labelsize=20)\n #plt.savefig('pic_ene.eps')\n\n print('The dissipated magnetic energy: %5.3f' %\n (1.0 - ene_magnetic[-1] / enorm))\n print('Energy gain to the initial magnetic energy: %5.3f, %5.3f' % (\n (kene_e[-1] - kene_e[0]) / enorm, (kene_i[-1] - kene_i[0]) / enorm))\n print(\n 'Initial kene_e and kene_i to the initial magnetic energy: %5.3f, %5.3f'\n % (kene_e[0] / enorm, kene_i[0] / enorm))\n print(\n 'Final kene_e and kene_i to the initial magnetic energy: %5.3f, %5.3f'\n % (kene_e[-1] / enorm, kene_i[-1] / enorm))\n init_ene = pic_info.ene_electric[0] + pic_info.ene_magnetic[0] + \\\n kene_e[0] + kene_i[0]\n final_ene = pic_info.ene_electric[-1] + pic_info.ene_magnetic[-1] + \\\n kene_e[-1] + kene_i[-1]\n print('Energy conservation: %5.3f' % (final_ene / init_ene))\n plt.show()", "def simulate_dc_low_efficiency(self,efficiency_map,device_realtime_output,kw_power_input_total):\r\n \"\"\"\r\n typical value for dc1In is 127~128\r\n typical value for dc1Out is 112\r\n \"\"\"\r\n #demon_value=1\r\n dc1_demo_option = self.getDemoOption('dc_demo')\r\n #delta_dc1=0\r\n dc1_device_id=6\r\n try:\r\n dc1_efficiency=efficiency_map['data']['dc1']\r\n if dc1_efficiency and dc1_demo_option == 1:\r\n dc1Out=device_realtime_output[dc1_device_id]\r\n dc1In=dc1Out/dc1_efficiency*100\r\n print 'Simulate DC1 efficiency here...'\r\n dc1In_simulate = 250.0\r\n delta_dc1 = dc1In_simulate - dc1In\r\n dc1In = dc1In_simulate\r\n kw_power_input_total = kw_power_input_total + delta_dc1\r\n dc1_efficiency = self.calculatePercentage(dc1Out, dc1In)\r\n else:\r\n pass \r\n \r\n \"\"\"Simulate DC1 efficiency end\"\"\"\r\n efficiency_map['data']['dc1']=dc1_efficiency\r\n except:\r\n pass\r\n return efficiency_map,kw_power_input_total", "def analyticTest():\n\t#Hp = 1 => p = rho * gSun * Hp => mmm / temp = Hp * R / gSun\n\t#pf outside, pi inside\n\tcalcFw = False\n\t#calcFw = True\n\tzz = z*1e6 #m\n\tzi = zz[len(z)-1]\n\tzf = zz[0]\n\tprint(\"zf - zi = %e m\" % (zf - zi))\n\tHp = 1e10\n\t#Hp = 1\n\tfor i, rhof in enumerate([1e-10, 1e-5, 1e-2,1, 1e2,1e3, 1e7, 1e10]):\n\t\tpfE = rhof * gSun * Hp\n\t\tpf = np.log(pfE)\n\t\tfunc = np.zeros(len(z)) - 1 / Hp\n\t\tif calcFw:\n\t\t\tpi = pf + 1/Hp * (zf - zi) \n\t\t\tnumPres = integrateFuncFw(zz, pi, func)\n\t\telse:\n\t\t\tnumPres = integrateFunc(zz, pf, func)\n\t\tnumRho = numPres - np.log(gSun * Hp)\n\t\tanPres = pf + (zf-zz) / Hp\n\t\tprint(\"log pres at top: %e, log pres at the bottom: %e, d ln p/dz = %e\" % (numPres[0], numPres[z.shape[0]-1], (numPres[0] - numPres[z.shape[0]-1]) / (zf - zi))) \n\t\tprint(\"log an pres at top: %e, log an pres at the bottom: %e\" % (anPres[0], anPres[z.shape[0]-1])) \n\t\tanRho = np.log(rhof) + (zf-zz) / Hp\n\t\tf, (ax1, ax2) = plt.subplots(2, sharex=True)\n\t\tax1.set_title('Hp=%e, rhoF = %e kg/m3, pF = %e Pa' % (Hp, rhof, pfE), y=1.08)\n\t\tax1.set_xlabel('z(Mm)')\n\t\tax1.set_ylabel('ln p/p(zi)')\n\t\tax1.plot(z, numPres - numPres[z.shape[0]-1], 'r-', label=\"num\")\n\t\tax1.plot(z, anPres - anPres[z.shape[0]-1] , 'g-', label=\"an\")\n\t\tax1.grid(True)\n\t\tax1.legend()\n\t\tax2.set_ylabel('ln rho/rho(zi)')\n\t\tax2.plot(z, numRho - numRho[z.shape[0]-1], 'r-', label='num')\n\t\tax2.plot(z, anRho - anRho[z.shape[0]-1], 'g-', label=\"an\")\n\t\tax2.legend()\n\t\tax2.grid(True)\n\t\tplt.draw()\t\n\t\t#plt.show()\n\t\tpngfile = \"analytic%s\" % ((\"%e\" % Hp).replace(\"+\",\"\"))\n\t\tif calcFw:\n\t\t\tpngfile+= 'Fw'\t\n\t\tplt.savefig('%s%d.png' % (pngfile, i))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View the connections tables as widget.
def view_election_connections_table(self, request): @request.after def add_last_modified(response): add_last_modified_header(response, self.last_modified) return { 'model': self, 'layout': ElectionLayout(self, request), 'connections': get_connection_results_api(self, object_session(self)), 'type': 'election-table', 'scope': 'connections' }
[ "def viewdatabase(self):\r\n try:\r\n if self.vertical_widget.isHidden():\r\n self.vertical_widget.setVisible(True)\r\n else:\r\n self.vertical_widget.setVisible(False)\r\n except Exception as e:\r\n print(e)", "def show(self):\n for t in self.get_tables_names():\n print t\n for c in self.get_table_column_names(t):\n print \" |_ {0}\".format(c)", "def display(self):\n\n with self.Session.begin() as session:\n inspector = inspect(self.engine)\n schemas = inspector.get_schema_names()\n main = [{table_name: inspector.get_columns(table_name, schema=schema) for table_name in inspector.get_table_names(schema=schema)} for schema in schemas]\n for i in main[0]:\n print(i)\n display(pd.read_sql_table(i, session.bind))\n print(\"\\n\\n\")", "def showTables(self):\n self.tablesList.clear()\n self.dbMan.putNameDatabase(\n self.databasesList.currentItem().text())\n self.tablesList.addItems(self.dbMan.getListNamesTables())", "def view(self, tables):\r\n if not tables:\r\n return View(self, 'No tables to show.')\r\n self.widgets.color.guess_or_remember(('histogram text', tables), ['name'])\r\n self.widgets.text.guess_or_remember(('histogram colors', tables), ['name'])\r\n self.widgets.shift.guess_or_remember(('histogram shift', tables), '0.2')\r\n self.widgets.sort_inside.guess_or_remember(('histogram sort inside', tables), ['similarity'])\r\n self.widgets.sort_outside.guess_or_remember(('histogram sort outside', tables), ['sort'])\r\n self.widgets.trim.guess_or_remember(('histogram trim', tables), ['no'])\r\n self.widgets.trim_thresh.guess_or_remember(('histogram trim thresh', tables), '0')\r\n\r\n sort_inside_options = [('unsort', 'Keep original order'), ('similarity', 'Put similar curves together')]\r\n sort_inside_options += [(x, 'Sort by %s' % x) for x in tables[0].tags.keys()]\r\n \r\n # Create the control panel view. This will enable users to choose the dimensions. \r\n control_panel_view = stack_lines(\r\n self.widgets.dims.view('Dimension', self.widgets.apply, options_from_table(tables[0])),\r\n self.widgets.text.view('Text by', self.widgets.apply, tables[0].tags.keys()),\r\n self.widgets.color.view('Color by', self.widgets.apply, tables[0].tags.keys()),\r\n self.widgets.shift.view('Shift for multiple curves', self.widgets.apply),\r\n self.widgets.sort_inside.view('Curve sorting', self.widgets.apply, \r\n sort_inside_options,\r\n multiple=False),\r\n self.widgets.sort_outside.view('Plot sorting', self.widgets.apply, \r\n [('sort', 'Put plots with many differences first'), ('unsort', 'Keep original order')],\r\n multiple=False),\r\n self.widgets.trim.view('Trim plots', self.widgets.apply,\r\n [('yes', 'Convert values lower than threshold to 0'), ('no', 'Don\\'t trim')], multiple=False),\r\n self.widgets.trim_thresh.view('Trim threshold', self.widgets.apply),\r\n self.widgets.apply.view())\r\n main_views = []\r\n shift = self.widgets.shift.value_as_float()\r\n plots_for_legend = OrderedDict()\r\n colorer = axes.Colorer()\r\n # Check that the user has already chosen dimensions. Otherwise, ask him \r\n # to do so.\r\n if self.widgets.dims.values.choices:\r\n timer = MultiTimer(len(self.widgets.dims.values.choices))\r\n for i, dim in enumerate(self.widgets.dims.values.choices):\r\n try:\r\n # Go over every dimension and create the histogram:\r\n # First create a new figure:\r\n fig = self.create_and_adjust_figure(tables)\r\n ax = fig.add_subplot(111)\r\n \r\n # Draw the histogram for every input\r\n plots = []\r\n sorted_tables = tables\r\n sort_method = self.widgets.sort_inside.values.choices[0]\r\n if sort_method == 'unsort':\r\n sorted_tables = tables\r\n elif sort_method == 'similarity':\r\n thresh = None\r\n if self.widgets.trim.get_choices()[0] == 'yes':\r\n thresh = self.widgets.trim_thresh.value_as_float()\r\n # get distances table:\r\n distances = datatable.ks_distances(tables, dim, thresh)\r\n # sort by distance\r\n sorted_tables = greedy_distance_sort(distances, tables)\r\n else:\r\n # we need to sort by tags:\r\n tag_for_sort = self.widgets.sort_inside.values.choices[0]\r\n sorted_tables = sorted(tables, key=lambda table: table.tags[tag_for_sort])\r\n for i, table in enumerate(sorted_tables):\r\n color_tags = self.widgets.color.values.choices\r\n color_key = tuple([table.tags[c] for c in color_tags])\r\n min_x = None\r\n if self.widgets.trim.get_choices()[0] =='yes':\r\n min_x = self.widgets.trim_thresh.value_as_float()\r\n plot = axes.kde1d(ax, table, dim,\r\n color=colorer.get_color(color_key),\r\n min_x=min_x,\r\n shift=shift*i)\r\n plots_for_legend[color_key] = plot\r\n # Add ticks with table names:\r\n if self.widgets.shift.value_as_float() > 0:\r\n ax.set_yticks(np.arange(0, len(tables)*shift, shift))\r\n ax.set_yticklabels([t.get_tags(self.widgets.text.values.choices) for t in sorted_tables], size='xx-small')\r\n # set axes y range:\r\n ax.set_ylim(bottom = -0.1, top=0.8+shift*(len(sorted_tables)-1))\r\n # Make sure we don't create the same widget twice. We create a new widget\r\n # for every dimension asked. \r\n widget_key = self._normalize_id(dim)\r\n if not widget_key in self.widgets:\r\n self._add_widget(widget_key, Figure)\r\n figure_widget = self.widgets[widget_key]\r\n \r\n if len(tables) > 1:\r\n from scipy.stats import ks_2samp\r\n ks, p_ks = ks_2samp(tables[0].get_cols(dim)[0], tables[1].get_cols(dim)[0])\r\n ks_view = View(self, 'ks: %.3f, p_ks: %.10f' % (ks, p_ks))\r\n final_view = stack_lines(ks_view, figure_widget.view(fig))\r\n else:\r\n ks, p_ks = 0, 0\r\n final_view = figure_widget.view(fig)\r\n # Add the new widget's view\r\n main_views.append((ks, p_ks, final_view))\r\n except Exception as e:\r\n logging.exception('Exception when drawing histogram')\r\n main_views.append((0, 0, View(self, str(e))))\r\n \r\n timer.complete_task(dim)\r\n \r\n # sort by the ks test:\r\n main_views = sorted(main_views, key=itemgetter(0), reverse=True)\r\n main_views = [v[2] for v in main_views]\r\n \r\n \r\n \r\n \r\n # create legend:\r\n legened_titles = plots_for_legend.keys()\r\n print len(legened_titles)\r\n max_title_len = max([len(str(t)) for t in legened_titles] + [0])\r\n print max_title_len\r\n WIDTH_PER_LETTER = 7\r\n EXTRA_WIDTH = 60\r\n HEIGHT_PER_LINE = 30\r\n EXTRA_HEIGHT = 50\r\n MIN_X = 300\r\n MIN_Y = 100\r\n legend_x = max(MIN_X, EXTRA_WIDTH + WIDTH_PER_LETTER * max_title_len)\r\n legend_y = max(MIN_Y, EXTRA_HEIGHT + HEIGHT_PER_LINE * len(legened_titles))\r\n fig = axes.new_figure(legend_x, legend_y)\r\n ax = fig.add_subplot(111)\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_yaxis().set_visible(False)\r\n ax.legend(plots_for_legend.values(),\r\n plots_for_legend.keys(),\r\n loc='center',\r\n mode='expand',\r\n frameon=False,\r\n prop={'size' : 'xx-small'})\r\n main_views = [self.widgets.legend_figure.view(fig)] + main_views\r\n main_view = view.stack_left(*main_views)\r\n \r\n \r\n else:\r\n main_view = View(None, 'Please select dimensions') \r\n # combine the control panel and the main view togeteher:\r\n return self.widgets.layout.view(main_view, control_panel_view)", "def show_tables(cls):\n for tbl in cls.engine.table_names():\n print(tbl)", "def list_connections(self):\n path = self.build_url(\"/connections\")\n return self.request('get', path)", "def createTableWidget(self):\n self.tableWidget = QtGui.QTableWidget(0, 6)\n self.tableWidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\n # select one row at a time\n self.tableWidget.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)\n # no editing values\n self.tableWidget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)\n\n self.tableWidget.setHorizontalHeaderLabels((self.tr(\"ID\"), self.tr(\"Name\"), self.tr(\"Priority\"), self.tr(\"Deadline\"),\n self.tr(\"Completed\"), self.tr(\"Created\")))\n #self.tableWidget.horizontalHeader().setResizeMode(0, QHeaderView.Stretch)\n self.tableWidget.verticalHeader().hide()\n self.tableWidget.setShowGrid(True)\n #self.tableWidget.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)\n #self.addActions(self.tableWidget, (self.newAction, self.aboutAction))\n self.tableWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.tableWidget.customContextMenuRequested.connect(self.openContextMenu)\n self.tableWidget.cellDoubleClicked.connect(self.rowDblClick)\n self.refreshTable()", "def selected_tables(self):\n return '<br>'.join([str(x) for x in self.tables.all()])", "def createMainInterface(self):\n self.root.title(\"Database Client\")\n for widget in self.root.winfo_children():\n widget.destroy()\n Label(self.root, text='Database Client', font='Helvetica 28 bold').grid(\n row=0, column=0, sticky=\"nsew\", pady=10)\n\n if len(self.databases) == 0:\n Label(self.root, text='No available tables', font='Helvetica 14 bold').grid(\n row=1, column=0, pady=10)\n Button(self.root, text='Add Table', font='Helvetica 14',\n command=self.addTable).grid(row=2, column=0)\n\n for i in range(len(self.databases)):\n items = list(self.databases[i].keys())\n items.pop(len(items) - 1)\n items.pop(len(items) - 1)\n cols = tuple(items)\n\n listBox = ttk.Treeview(\n self.root, columns=cols, show='headings', selectmode='browse')\n for col in cols:\n listBox.heading(col, text=col)\n listBox.grid(row=i + 1, column=0)\n\n for (key, values) in self.databases[i].items():\n if \"tableID\" in key or \"tableName\" in key:\n continue\n self.dataToInsert.append(values)\n self.dataToInsert = list(zip(*self.dataToInsert))\n\n for data in self.dataToInsert:\n listBox.insert(\"\", \"end\", values=(data))\n self.dataToInsert = []\n rowButtonsFrame = Frame(self.root)\n tableName = Label(\n rowButtonsFrame, text=self.databases[i][\"tableName\"], font='Helvetica 14 bold')\n addRowButton = Button(rowButtonsFrame, text='Add Row', font='Helvetica 14',\n command=lambda index=i: self.addTableRow(self.databases[index]))\n removeRowButton = Button(rowButtonsFrame, text='Remove Row', font='Helvetica 14',\n command=lambda index=i: self.deleteTableRow(self.databases[index]))\n rowButtonsFrame.grid(row=i + 1, column=len(self.databases[i]) - 1)\n tableName.grid(row=0, column=0)\n addRowButton.grid(row=1, column=0)\n removeRowButton.grid(row=2, column=0)\n\n tableButtonsFrame = Frame(self.root)\n addTableButton = Button(tableButtonsFrame, text='Add Table', font='Helvetica 14',\n command=self.addTable)\n removeTableButton = Button(tableButtonsFrame, text='Remove Table', font='Helvetica 14',\n command=self.deleteTable)\n tableButtonsFrame.grid(row=len(self.databases) + 1, column=0, columnspan=max(\n [len(item) for item in self.databases]) - 1)\n addTableButton.grid(row=0, column=0, pady=10)\n removeTableButton.grid(row=0, column=1, pady=10)\n self.root.bind(\"<Escape>\", lambda event: self.root.destroy())", "def _table_viewer(table, rows_per_page=25, fields=None):\n\n # TODO(gram): rework this to use datalab.utils.commands.chart_html\n\n if not table.exists():\n raise Exception('Table %s does not exist' % str(table))\n\n if not table.is_listable():\n return \"Done\"\n\n _HTML_TEMPLATE = u\"\"\"\n <div class=\"bqtv\" id=\"{div_id}\">{static_table}</div>\n <br />{meta_data}<br />\n <script src=\"/static/components/requirejs/require.js\"></script>\n <script>\n\n require.config({{\n paths: {{\n base: '/static/base',\n d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',\n plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',\n jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'\n }},\n map: {{\n '*': {{\n datalab: 'nbextensions/gcpdatalab'\n }}\n }},\n shim: {{\n plotly: {{\n deps: ['d3', 'jquery'],\n exports: 'plotly'\n }}\n }}\n }});\n\n require(['datalab/charting', 'datalab/element!{div_id}', 'base/js/events',\n 'datalab/style!/nbextensions/gcpdatalab/charting.css'],\n function(charts, dom, events) {{\n charts.render('gcharts', dom, events, '{chart_style}', [], {data},\n {{\n pageSize: {rows_per_page},\n cssClassNames: {{\n tableRow: 'gchart-table-row',\n headerRow: 'gchart-table-headerrow',\n oddTableRow: 'gchart-table-oddrow',\n selectedTableRow: 'gchart-table-selectedrow',\n hoverTableRow: 'gchart-table-hoverrow',\n tableCell: 'gchart-table-cell',\n headerCell: 'gchart-table-headercell',\n rowNumberCell: 'gchart-table-rownumcell'\n }}\n }},\n {{source_index: {source_index}, fields: '{fields}', legacy: 'true'}},\n 0,\n {total_rows});\n }}\n );\n </script>\n \"\"\"\n\n if fields is None:\n fields = datalab.utils.commands.get_field_list(fields, table.schema)\n div_id = datalab.utils.commands.Html.next_id()\n meta_count = ('rows: %d' % table.length) if table.length >= 0 else ''\n meta_name = str(table) if table.job is None else ('job: %s' % table.job.id)\n if table.job:\n if table.job.cache_hit:\n meta_cost = 'cached'\n else:\n bytes = datalab.bigquery._query_stats.QueryStats._size_formatter(table.job.bytes_processed)\n meta_cost = '%s processed' % bytes\n meta_time = 'time: %.1fs' % table.job.total_time\n else:\n meta_cost = ''\n meta_time = ''\n\n data, total_count = datalab.utils.commands.get_data(table, fields, first_row=0,\n count=rows_per_page)\n\n if total_count < 0:\n # The table doesn't have a length metadata property but may still be small if we fetched less\n # rows than we asked for.\n fetched_count = len(data['rows'])\n if fetched_count < rows_per_page:\n total_count = fetched_count\n\n chart = 'table' if 0 <= total_count <= rows_per_page else 'paged_table'\n meta_entries = [meta_count, meta_time, meta_cost, meta_name]\n meta_data = '(%s)' % (', '.join([entry for entry in meta_entries if len(entry)]))\n\n return _HTML_TEMPLATE.format(div_id=div_id,\n static_table=datalab.utils.commands.HtmlBuilder\n .render_chart_data(data),\n meta_data=meta_data,\n chart_style=chart,\n source_index=datalab.utils.commands\n .get_data_source_index(str(table)),\n fields=','.join(fields),\n total_rows=total_count,\n rows_per_page=rows_per_page,\n data=json.dumps(data, cls=datalab.utils.JSONEncoder))", "def host_facts_display(self, facts):\n table_view = TableViewWidget(\n ascii = self.config.get('ascii')\n )\n table_view.table_display(self._cfg_table_overview, facts)", "def showTable(self, table):\n layoutManager = slicer.app.layoutManager()\n currentLayout = layoutManager.layout\n layoutWithTable = slicer.modules.tables.logic().GetLayoutWithTable(currentLayout)\n layoutManager.setLayout(layoutWithTable)\n appLogic = slicer.app.applicationLogic()\n appLogic.GetSelectionNode().SetActiveTableID(table.GetID())\n appLogic.PropagateTableSelection()", "def create_connections(self):\n \n self.connect(self.cancel_button, SIGNAL('clicked()'), self.close_dialog) \n self.connect(self.import_button, SIGNAL('clicked()'), self.import_alembic_dialog)\n self.combo_box.currentIndexChanged.connect(self.on_comboBox_changed)\n self.sequence_list_wdg.currentItemChanged.connect(self.on_sequenceList_changed)\n self.shots_list_wdg.currentItemChanged.connect(self.on_shotList_changed)", "def establish_widget_connections(self, widget):\n warnings.warn(\"'PyDMApplication.establish_widget_connections' is deprecated, \"\n \"this function is now found on `utilities.establish_widget_connections`.\")\n connection.establish_widget_connections(widget)", "def DumpConnections(self):\n print \"Connections:\"\n for k in self._connections.keys():\n print \" %s --> %s\" % (`k`, `self._connections[k]`)\n print \"--\"", "def show_connections(self, **kwargs):\n status, data = self.run_gerrit_command('show-connections', **kwargs)\n\n return status, data", "def __createNodeListWidget(self):\n frame = QtGui.QFrame()\n layout = QtGui.QVBoxLayout()\n frame.setLayout(layout)\n\n # Add a label\n label = QtGui.QLabel(\"Nodes:\")\n layout.addWidget(label)\n\n # Add the list of known nodes\n self.__nodeListWidget = QtGui.QListWidget()\n layout.addWidget(self.__nodeListWidget)\n\n # Display nodes in alphabetical order\n sortedNodes = sorted(self.__graph.getNodes())\n for node in sortedNodes:\n self.__nodeListWidget.addItem(node)\n\n # Update the graph with the currently selected widget\n self.__nodeListWidget.currentItemChanged.connect(self.__onNodeClicked)\n\n self.__layout.addWidget(frame)", "def listTables(self, instance):\n raise NotImplementedException()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View the connections as SVG.
def view_election_connections_svg(self, request): layout = ElectionLayout(self, request, 'connections') return { 'path': layout.svg_path, 'name': layout.svg_name }
[ "def visualize(self):\n return nx.draw_networkx(self.graph_Hz)", "def to_svg(self, separate=False, include_junctions=False):\n serialize_as_svg(\n self.output, separate=separate, include_junctions=include_junctions\n )", "def draw(self):\n nx.drawing.draw_networkx(self.graph, **self.visual_config[\"kwargs\"])", "def show(self):\n networkx.draw(self.graph)\n matplotlib.pyplot.draw()\n matplotlib.pyplot.show()", "def _repr_svg_(self):\n f = io.BytesIO()\n self.writeSVGfile(f, page_bboxenlarge=self.ipython_bboxenlarge)\n return f.getvalue().decode(\"utf-8\")", "def to_plot(self, path: str) -> None:\n plt.subplot()\n nx.draw(self.graph, with_labels=True, font_weight=\"bold\")\n plt.savefig(path)", "def draw_architecture_graph(self):\n nx.draw(self.graph, pos=nx.circular_layout(self.graph), with_labels=False)\n plt.show()", "def draw(self, file_format):\n import pydot\n\n graph = pydot.Dot(graph_type='graph', dpi=\"52\")\n for index, atom in enumerate(self.atoms):\n atom_type = '{0!s} {1!s} '.format(index+1, atom.label if atom.label != '' else '')\n atom_type += ','.join([at.label for at in atom.atomtype])\n atom_type = '\"' + atom_type + '\"'\n graph.add_node(pydot.Node(name=str(index + 1), label=atom_type, fontname=\"Helvetica\", fontsize=\"16\"))\n for atom1 in self.atoms:\n for atom2, bond in atom1.bonds.items():\n index1 = self.atoms.index(atom1)\n index2 = self.atoms.index(atom2)\n if index1 < index2:\n bond_type = ','.join([order for order in bond.get_order_str()])\n bond_type = '\"' + bond_type + '\"'\n graph.add_edge(pydot.Edge(src=str(index1 + 1), dst=str(index2 + 1),\n label=bond_type, fontname=\"Helvetica\", fontsize=\"16\"))\n\n img = graph.create(prog='neato', format=file_format)\n return img", "def _svg_dot_graph(cfg, state):\n cfg = xact.cfg.denormalize(copy.deepcopy(cfg))\n dot = graphviz.Digraph(\n 'Process flow network',\n format = 'svg',\n graph_attr = {\n 'bgcolor': 'transparent',\n 'pad': state['graph_pad'],\n 'splines': 'ortho',\n 'rankdir': 'LR',\n 'nodesep': state['graph_nodesep'],\n 'ranksep': state['graph_ranksep'],\n 'fontname': state['font_name'],\n 'fontsize': state['font_size'],\n 'fontcolor': state['font_color']\n },\n node_attr = {\n 'shape': 'Mrecord',\n 'style': 'rounded',\n 'fixedsize': 'true',\n 'width': state['node_width'],\n 'height': state['node_height'],\n 'color': state['node_color'],\n 'labelloc': 'b',\n 'imagescale': 'true',\n 'fontname': state['font_name'],\n 'fontsize': state['font_size'],\n 'fontcolor': state['font_color']\n },\n edge_attr = {\n 'color': state['edge_color'],\n })\n\n for (id_host, cfg_host) in cfg['host'].items():\n\n with dot.subgraph(\n name = 'cluster_' + id_host,\n graph_attr = { 'style': 'rounded',\n 'pad': '10',\n 'color': state['node_color'],\n 'label': id_host}) as host:\n\n for (id_proc, cfg_proc) in cfg['process'].items():\n\n if cfg_proc['host'] != id_host:\n continue\n\n with host.subgraph(\n name = 'cluster_' + id_proc,\n graph_attr = { 'style': 'rounded',\n 'pad': '10',\n 'color': state['node_color'],\n 'label': id_proc}) as process:\n\n for (id_node, cfg_node) in cfg['node'].items():\n if cfg_node['process'] != id_proc:\n continue\n\n process.node(id_node, label = id_node)\n\n for cfg_edge in cfg['edge']:\n is_intra_process = cfg_edge['ipc_type'] == 'intra_process'\n is_on_this_process = cfg_edge['list_id_process'][0] == id_proc\n if is_intra_process and is_on_this_process:\n process.edge(cfg_edge['id_node_src'], cfg_edge['id_node_dst'])\n\n for cfg_edge in cfg['edge']:\n is_inter_process = cfg_edge['ipc_type'] == 'inter_process'\n is_on_this_host = cfg_edge['list_id_host'][0] == id_host\n if is_inter_process and is_on_this_host:\n host.edge(cfg_edge['id_node_src'], cfg_edge['id_node_dst'])\n\n for cfg_edge in cfg['edge']:\n is_inter_host = cfg_edge['ipc_type'] == 'inter_host'\n if is_inter_host:\n dot.edge(cfg_edge['id_node_src'], cfg_edge['id_node_dst'])\n\n str_all = dot.pipe().decode('utf-8')\n str_svg = str_all[str_all.find('<svg'):str_all.rfind('</svg>')+6]\n return str_svg", "def asSVGPath(self):\n segs = self.asSegments()\n pathParts = [\"M %f %f\" % (segs[0][0].x, segs[0][0].y)]\n\n operators = \"xxLQC\"\n for s in segs:\n op = operators[len(s)] + \" \"\n for pt in s[1:]:\n op = op + \"%f %f \" % (pt.x, pt.y)\n pathParts.append(op)\n if self.closed:\n pathParts.append(\"Z\")\n\n return \" \".join(pathParts)", "def plot_network(self, data, file_path):\n plt.clf()\n plt.title('Network nodes and edges')\n plt.scatter(data[:, 0], data[:, 1], c='b')\n node_pos = {}\n for u in self.network.nodes():\n vector = self.network.node[u]['vector']\n node_pos[u] = (vector[0], vector[1])\n nx.draw(self.network, pos=node_pos, node_color='r')\n plt.draw()\n plt.savefig(file_path)", "def visualize(self):\n plt.show()", "def view(self) -> Any:\n\n import graphviz\n\n scales = []\n dot = graphviz.Digraph(\"architecture\", graph_attr={\"splines\": \"true\", \"overlap\": \"true\"})\n dot.engine = \"neato\"\n\n for i, node in enumerate(self.node_names):\n scales.append(self.graph[node][\"scale\"])\n dot.node(node, label=self.graph[node][\"op\"], pos=f\"{i*1.5 + 2},-{math.log2(2*scales[-1])}!\")\n\n for scale in sorted(list(set(scales))):\n dot.node(\n f\"scale-{scale}\",\n label=f\"scale={2*scale}, ch={self.channels_per_scale[scale]}\",\n pos=f\"-1,-{math.log2(2*scale)}!\",\n )\n\n for edge in self.edge_dict:\n in_node, out_node = edge.split(\"-\")\n dot.edge(in_node, out_node)\n\n # Adds post upsample\n dot.node(\"upsample\", label=f\"Upsample + {self.post_upsample_layers} x Conv 3x3\", pos=f\"{i*1.5 + 2},0!\")\n dot.edge(\"output\", \"upsample\")\n\n # Shows the graph\n return dot", "def writeSvg(self,f, sol:list = []):\n\t\tf.write('<?xml version=\"1.1\" encoding=\"UTF-8\"?>\\n')\n\t\tlength = self.l()\n\t\theight = self.h()\n\n\t\t### Cases\n\t\tf.write('<svg width=\"{}\" height=\"{}\" viewBox=\"-1 -1 {} {}\">\\n'.format(100*(length+2),100*(height+2),(length+2),(height+2)))\n\t\tf.write('\\t<rect x=\"-1\" y=\"-1\" width=\"100%\" height=\"100%\" fill=\"white\"/>\\n')\n\n\t\tf.write('\\t<g>\\n')\n\t\tfor case in sol:\n\t\t\tif case[0] == \"-\":\n\t\t\t\tcase = case[1:]\n\t\t\t\tcolor = \"#b3b3b0\"\n\t\t\telse: \n\t\t\t\tcolor = \"#038\"\n\t\t\t(x,y) = (int(i) for i in case.split(\",\"))\n\t\t\tf.write('\\t\\t<rect x=\"{}\" y=\"{}\" width=\"1\" height=\"1\" fill=\"{}\"/>\\n'.format(x,height - y -1,color))\n\t\tf.write('\\t</g>\\n')\n\n\t\t### Murs\n\t\tf.write('\\t<path stroke=\"black\" stroke-linecap=\"round\" stroke-width=\"0.1\" d=\"M 0 0 L 0 {}\" />\\n'.format(height))\n\t\tf.write('\\t<g>\\n')\n\t\tfor x,y in ((i,j) for i in range(length) for j in range(height)):\n\t\t\tif self.getBarrier(x,y,SUD):\n\t\t\t\tf.write('\\t\\t<path stroke=\"black\" stroke-linecap=\"round\" stroke-width=\"0.1\" d=\"M {} {} L {} {}\" />\\n'.format(x,height-y,x+1,height-y))\n\t\t\tif self.getBarrier(x,y,EST):\n\t\t\t\tf.write('\\t\\t<path stroke=\"black\" stroke-linecap=\"round\" stroke-width=\"0.1\" d=\"M {} {} L {} {}\" />\\n'.format(x+1,height-y,x+1,height-(y+1)))\n\t\tf.write('\\t</g>\\n')\n\n\t\t### Valeurs\n\t\tf.write('\\t<g>\\n')\n\t\tfor i in range(len(self.__values['v'])):\n\t\t\tval = self.__values['v'][i]\n\t\t\tif val != -1:\n\t\t\t\tf.write('\\t\\t<text x=\"{}\" y=\"{}\" font-size=\"1\">{}</text>\\n'.format(i+0.25,height+1,val))\n\t\tf.write('\\t</g>\\n')\n\t\tf.write('\\t<g>\\n')\n\t\tfor j in range(len(self.__values['h'])):\n\t\t\tval = self.__values['h'][j]\n\t\t\tif val != -1:\n\t\t\t\tf.write('\\t\\t<text x=\"{}\" y=\"{}\" font-size=\"1\">{}</text>\\n'.format(length+0.25,j+0.9,val))\n\t\tf.write('\\t</g>\\n')\n\t\tf.write('</svg>\\n')", "def graph_genotype(self, fig_num=100, edge_labels=False):\n\n\t\tgraph = self.gen_networkx_graph()\n\t\t\n\t\t# create dictionary that holds positions of input/output nodes\n\t\tpos = self.gen_positions_for_networkx(graph)\n\n\t\t# create matplotlib figure used for graph\n\t\tplt.figure(fig_num)\n\t\t\n\t\t# add all nodes into graph with colors\n\t\tfor node in self.nodes:\n\t\t\tcolor = NODE_TO_COLOR[node.getActKey()]\n\t\t\tnx.draw_networkx_nodes(graph, pos, \n\t\t\t\t\t\t\t\t\tnodelist=[node.getNodeNum()],\n\t\t\t\t\t\t\t\t\tnode_color=color,\n\t\t\t\t\t\t\t\t\tnode_size=400, alpha=0.8)\n\t\t# add all connections into graph with colors\n\t\tfor con in self.connections:\n\t\t\tif(con.getStatus()):\n\t\t\t\tcolor = 'b' if con.getWeight() < 0 else 'r'\n\t\t\t\tedge_tuple = (con.getNodeIn().getNodeNum(), \n\t\t\t \t\t\t\tcon.getNodeOut().getNodeNum())\n\t\t\t\tnx.draw_networkx_edges(graph, pos,\n\t\t\t \t\t\t\t\t\tedgelist = [edge_tuple],\n\t\t\t \t\t\t\t\t\twidth=3, alpha=0.5, \n\t\t\t \t\t\t\t\t\tedge_color=color, arrows=True)\n\t\t\n\t\t# add innovation number labels for connections\n\t\tlabels = nx.get_edge_attributes(graph, 'i')\n\t\tif(edge_labels):\n\t\t\tnx.draw_networkx_edge_labels(graph, pos, edge_labels=labels, font_size=8, label_pos=.8)\n\n\t\t# create graph with title/legend and display\n\t\tplt.title(\"CPPN Genotype Visualization\")\n\t\tplt.legend(handles=PATCH_LIST, loc='upper right')\n\t\tplt.show()", "def draw(nodes, out_file=None):\n graph = nx.Graph()\n for node in nodes.values():\n graph.add_node(node.name, time=node.id)\n for neighbor_name in node.neighbors:\n graph.add_edge(node.id, neighbor_name)\n\n # pos = nx.spring_layout(G, scale=20)\n # nx.spring_layout(G, k=0.05, iterations=20)\n options = {\n 'node_size': 10,\n 'font_size': 12,\n 'with_labels': True,\n 'pos': graphviz_layout(graph)\n }\n nx.draw(graph, **options)\n if out_file is None:\n plt.plot()\n plt.show()\n else:\n plt.savefig(out_file)\n LOG.info('The topology figure is saved to %s', out_file)", "def test_case_2(self):\n graphic = Graphic(etree.parse(\"arrow.svg\").getroot())\n f = open(\"out/arrow_%s.svg\"%sys._getframe().f_code.co_name,\"w\")\n f.write(graphic.get_xml())\n f.close()", "def create_svg_icon(symbolizers):\n svg_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/svg_1_0.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n icon_paths = []\n for symbolizer in symbolizers:\n if 'PolygonSymbolizer' in symbolizer.original_tagname_:\n styles = []\n styles.extend(process_stroke_styling(symbolizer))\n styles.extend(process_fill_styling(symbolizer))\n fill_found = False\n for style in styles:\n if 'fill=' in style:\n fill_found = True\n if not fill_found:\n print('no fill found, adding it as empty style')\n styles.append('fill=\"none\"')\n polygon_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/polygon.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n template_params = {\n 'points': polygon_points,\n 'styles': ' '.join(styles)\n }\n content = polygon_template.render(**template_params)\n icon_paths.append(content)\n\n elif 'LineSymbolizer' in symbolizer.original_tagname_:\n styles = []\n styles.extend(process_stroke_styling(symbolizer))\n # TODO: Add support for geometry Handling\n line_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/line.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n template_params = {\n 'points': line_points,\n 'styles': ' '.join(styles)\n }\n content = line_template.render(**template_params)\n icon_paths.append(content)\n elif 'PointSymbolizer' in symbolizer.original_tagname_:\n # TODO: Check how to handle a Point\n if symbolizer.Graphic:\n if symbolizer.Graphic.Mark:\n styles = []\n for mark in symbolizer.Graphic.Mark:\n styles.extend(process_fill_styling(mark))\n if mark.WellKnownName == 'square':\n polygon_template = Template(\n filename=AssetResolver('pyconizer').resolve(\n 'lib/api/svg/templates/polygon.xml').abspath(),\n input_encoding='utf-8',\n output_encoding='utf-8'\n )\n template_params = {\n 'points': square_points,\n 'styles': ' '.join(styles)\n }\n content = polygon_template.render(**template_params)\n icon_paths.append(content)\n elif symbolizer.Geometry:\n # TODO: implement geometry symbolizer\n print('point symbolizer does not support geometry for now')\n # else:\n # styles = [\n # 'stroke=\"black\"',\n # 'stroke-width=\"1\"',\n # 'fill=\"red\"'\n # ]\n # polygon_template = Template(\n # filename=AssetResolver('pyconizer').resolve(\n # 'lib/api/svg/templates/circle.xml').abspath(),\n # input_encoding='utf-8',\n # output_encoding='utf-8'\n # )\n # template_params = {\n # 'x': '2',\n # 'y': '2',\n # 'radius': '1',\n # 'styles': ' '.join(styles)\n # }\n # content = polygon_template.render(**template_params)\n # class_svg_paths.append(content)\n\n # only add a svg path if it would have content\n if len(icon_paths) > 0:\n svg_content = svg_template.render(**{\n 'geometry_tag': '\\n'.join(icon_paths)\n })\n return svg_content", "def visualize(list, infected):\n g = nx.read_weighted_edgelist(list, delimiter=',')\n plt.figure(figsize=(10,10))\n\n rawInfected = set()\n\n for user in infected:\n rawInfected.add(user.id)\n\n pos=nx.spring_layout(g)\n nx.draw(g, pos, node_color = 'b',node_size= 25, with_labels = True)\n nx.draw_networkx_nodes(g, pos, nodelist = rawInfected, node_size = 50, node_color = 'g')\n\n\n # saves specific filenames\n path = ntpath.basename(list)\n fn = path.split('.')\n\n output = \"{}/{}_infected.png\".format(ntpath.dirname(list),fn[0])\n plt.savefig(output)\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a template database entry
def make_template(self): missing = TEMPLATE_REQUIRED.difference(self.data) if missing: return ("<h3>Template must have %s filled in.</h3>" % ', '.join(missing)) # Write a database entry xline = """insert into template (description, comment, calories, fat, protein, carbs, size) values (?, ?, ?, ?, ?, ?, ?)""" xparms = tuple(self.data.get(x, '') for x in """description comment calories fat protein carbs size""".split()) self.cursor.execute(xline, xparms) print(dict(command=xline, args=xparms), file=self.log_file) return "<h3>Template created at %s</h3>" % ( datetime.now().time().strftime("%I:%M:%S %p"))
[ "def _create_template(self):\n Template.objects.create(\n resume=\"a test\",\n shortcut='atest',\n subject=\"a subject\",\n body=\"A body {{ testme }}\"\n )", "def create(self, template):\n raise NotImplementedError('Create Template not implemented')", "def insert(cls, env, record):\n with env.db_transaction as db:\n\n cursor = db.cursor()\n sqlString = \"\"\"INSERT INTO ticket_template_store\n (tt_time,tt_user,tt_name,tt_field,tt_value)\n VALUES (%s,%s,%s,%s,%s)\"\"\"\n cursor.execute(sqlString, record)", "def import_template(template, template_name, description, template_type='bootstrap'):\n try:\n t = Template.query.filter(Template.name == template_name).first()\n\n if t is None:\n print('Adding new record to db')\n unescaped_template = unescape(template)\n t = Template(name=template_name, description=description, template=unescaped_template, type=template_type)\n db_session.add(t)\n db_session.commit()\n\n else:\n print('template exists in db')\n\n return True\n except SQLAlchemyError as sqe:\n print('Could not import file')\n print(str(sqe))\n return False", "def load_prep_template(fp, study, data_type):\n prep_template = load_prep_template_from_cmd(fp, study, data_type)\n click.echo(\"Prep template successfully added to the database with id %s\"\n % prep_template.id)", "def new_template():\n form = NewTemplateForm()\n keywords = ProvisioningKeyword.query.all()\n\n if form.validate_on_submit():\n template = ProvisioningTemplate(\n name=form.name.data,\n template=form.template.data,\n options=form.compress_options(),\n enabled=request.form.get(\"enabled\", False)\n )\n db.session.add(template)\n db.session.commit()\n flash('Template {} successfully created'.format(template.name),\n 'form-success')\n return redirect(url_for('provisioning.templates'))\n\n return render_template('provisioning/new_template.html', \n form=form,\n keywords=keywords\n )", "def _update_template(self, content):\r\n t, created = Template.objects.get_or_create(resource=self.resource)\r\n t.content = content\r\n t.save()", "def _create_template():\n if os.path.exists(DOMAIN_TEMPLATE_FILE):\n return\n\n with open(DOMAIN_TEMPLATE_FILE, 'w') as template:\n template.write(DOMAIN_TEMPLATE)", "def create_template(template_name, created_by, created_on):\n\n template = Template(template_name=template_name,\n created_by=created_by,\n created_on=created_on)\n\n db.session.add(template)\n\n db.session.commit()\n\n return template", "def load_sample_template(fp, study):\n sample_temp = load_sample_template_from_cmd(fp, study)\n click.echo(\"Sample template successfully added to the database with id %s\"\n % sample_temp.id)", "def test_otoroshi_controllers_adminapi_templates_controller_create_from_template_simple(self):\n pass", "def create_or_update_template(inps_dict):\n\n inps = inps_dict\n\n print('\\n*************** Template Options ****************')\n # write default template\n\n print (\"Custom Template File: \", inps.customTemplateFile)\n\n inps.project_name = get_project_name(inps.customTemplateFile)\n print (\"Project Name: \", inps.project_name)\n\n inps.work_dir = get_work_directory(None, inps.project_name)\n print(\"Work Dir: \", inps.work_dir)\n\n # Creates default Template\n inps = create_default_template(inps)\n\n\n return inps", "def create_template(self):\n options = {\n 'dir': os.path.join(os.path.dirname(__file__)),\n 'template': self.template,\n 'project': self.project,\n }\n return self.env.run(\n '%(dir)s/bin/mrbob -O %(project)s --config '\n '%(dir)s/test_answers_%(template)s.ini %(dir)s/bobtemplates/simplesconsultoria/%(template)s'\n % options)", "def on_cdb_create_doc_from_template_now(cls, ctx):\n def _uniquote(s):\n if isinstance(s, unicode):\n v = s.encode('utf-8')\n else:\n v = s\n return urllib.quote(v)\n\n if misc.CDBApplicationInfo().rootIsa(misc.kAppl_HTTPServer):\n url = \"/cs-documents/template_creation\"\n if ctx.relationship_name:\n # We have to provide information about the relationship and the\n # parent\n rs = relships.Relship.ByKeys(ctx.relationship_name)\n cdef = entities.CDBClassDef(rs.referer)\n o = support._RestKeyObj(cdef, ctx.parent)\n key = support.rest_key(o)\n url += u\"?classname=%s&rs_name=%s&keys=%s\" % \\\n (_uniquote(rs.referer),\n _uniquote(rs.rolename),\n _uniquote(key))\n\n ctx.url(url)\n\n # Get the project\n if not ctx.catalog_selection:\n kwargs = {}\n # If we are in a decomposition, evaluate the predefined attributes\n if \"decompositionclsid\" in ctx.sys_args.get_attribute_names():\n decomposition = ctx.sys_args[\"decompositionclsid\"]\n if decomposition:\n # get predefined attrs, e.g. from decompositions\n cdef = entities.CDBClassDef(decomposition)\n predef_args = cdef.getPredefinedOpArgs(\"CDB_Search\", True)\n for arg in predef_args:\n # This one is for the catalog configuration\n # to behave as if the attributes were in the\n # dialog\n kwargs[arg.name] = arg.value\n # This one is for _set_template_catalog_query_args\n kwargs[arg.name + \"_initalqueryarg\"] = arg.value\n\n ctx.start_selection(catalog_name=\"cdb_doc_template\", **kwargs)\n else:\n znumber = ctx.catalog_selection[0][\"z_nummer\"]\n zidx = ctx.catalog_selection[0][\"z_index\"]\n template = Document.ByKeys(znumber, zidx)\n predef = [(\"erzeug_system\", template[\"erzeug_system\"])]\n ueargs = [(\"runeditaftercreate\", \"1\")]\n # Zerlegungsattribute vorbelegen\n if \"decompositionclsid\" in ctx.sys_args.get_attribute_names():\n decomposition = ctx.sys_args[\"decompositionclsid\"]\n if decomposition:\n # get predefined attrs, e.g. from decompositions\n cdef = entities.CDBClassDef(decomposition)\n predef_args = cdef.getPredefinedOpArgs(\"CDB_Create\", True)\n for arg in predef_args:\n predef.append((arg.name, arg.value))\n\n ctx.set_followUpOperation(opname=\"CDB_Create\",\n keep_rship_context=True,\n opargs=ueargs,\n predefined=predef,\n tmpl_object=template)", "def set_templatefile(self):\n\n self.par_template = filedialog.askopenfilename()\n self.entry_template.delete(0, END)\n self.entry_template.insert(0, self.par_template)\n LOGGER.debug('Template: %s', self.par_template)", "def createContent(self, entry):\n uri = \"/content/\" + self.username + \"/\"\n return self.Post(entry, uri= uri)", "def test_api_v3_entity_templates_post(self):\n pass", "def do_env_template_create(mc, args):\n env_template = mc.env_templates.create(\n {\"name\": args.name, \"is_public\": args.is_public})\n _print_env_template_list([env_template])", "def createTeam():\n conn = sqlite3.connect(conf['path'])\n cur = conn.cursor()\n # Add dp as string later\n # name varchar(32) not null,\\\n # desc varchar(250) unique not null,\\\n cur.execute('''Create Table If Not Exists Team(\\\n pid integer Primary Key,\\\n umail varchar(32),\\\n utc timestamp not null\\\n );''')\n conn.commit()\n conn.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Knock down the wall between cells self and other.
def knock_down_wall(self, other, wall): self.walls[wall] = False other.walls[Cell.wall_pairs[wall]] = False
[ "def break_wall(self, other):\n if self.i == other.i:\n if self.j == other.j - 1:\n self.walls['right'] = False\n other.walls['left'] = False\n elif self.j == other.j + 1:\n self.walls['left'] = False\n other.walls['right'] = False\n else:\n raise ValueError('Can break a wall only between two neighboring cells')\n elif self.j == other.j:\n if self.i == other.i - 1:\n self.walls['bottom'] = False\n other.walls['top'] = False\n elif self.i == other.i + 1:\n self.walls['top'] = False\n other.walls['bottom'] = False\n else:\n raise ValueError('Can break a wall only between two neighboring cells')\n else:\n raise ValueError('Can break a wall only between two neighboring cells')", "def _change_wall(self,):\n \n pass", "def jump(self):\n if self.grounded:\n self.vy -= 30\n elif self.doublejump == 1:\n self.vy -= 30\n self.doublejump = 0", "def walls(self):", "def __follow_wall(self):\r\n\r\n if self.__controller.laser_minimum < 0.5 and not self.__gap_detected:\r\n if not self.__gap_detected:\r\n self.__following_wall = True\r\n self.__controller.stop()\r\n\r\n self.__move_to_most_open_space()\r\n else: \r\n self.__turning_left = False\r\n self.__turning_right = False\r\n if self.__following_wall:\r\n if self.__controller.laser_data[0] >= 1.2:\r\n self.__gap_detected = True\r\n self.__following_wall = False\r\n\r\n if self.__gap_detected:\r\n if self.__controller.laser_minimum < 0.6:\r\n self.__controller.stop()\r\n self.__gap_detected = False\r\n else:\r\n self.__controller.drift_right()\r\n else:\r\n self.__controller.forwards()", "def setWall(self, x1_y1, x2_y2):\n x1, y1 = x1_y1\n x2, y2 = x2_y2\n if x1 > x2: # make sure x1 < x2\n (x1,y1,x2,y2) = (x2,y2,x1,y1)\n if x2 - x1 == 0:\n x1 -= 0.001\n dx = (x2 - x1)\n dy = (y2 - y1)\n m = dy / dx # slope\n b = y1 - x1 * m\n x = x1\n (lx,ly) = (x1,x2)\n step = dx / math.sqrt(dx * dx + dy * dy)\n while x < x2:\n y = x * m + b\n blockx = math.floor(x + 0.5)\n blocky = math.floor(y + 0.5)\n self.occupied.add( (blockx, blocky) )\n if x != x1 and lx != blockx and ly != blocky:\n self.occupied.add( (blockx-1, blocky) )\n (lx, ly) = (blockx, blocky)\n x +=step\n # Remove these walls from dirt\n self.dirt = self.dirt - self.occupied\n self.dirtStarting = self.dirtStarting - self.occupied", "def kick_ball(self, side):", "def jump(self):\n if abs(self.vy) < .25 and not self.jump1:\n r = random.random()\n if r < .025:\n self.vy = -10\n self.jump1 = True", "def move(self):\n self.x1 -= self.VEL\n self.x2 -= self.VEL\n\n # if floor moves out of the window\n if self.x1 + self.WIDTH < 0:\n self.x1 = self.x2 + self.WIDTH\n\n if self.x2 + self.WIDTH < 0:\n self.x2 = self.x1 + self.WIDTH", "def jump(self):\n self.vel = -10.5\n self.tick_count = 0 # keep track of when we last jump\n self.height = self.y # from which position bird started jumping", "def connect(cell1, cell2):\n if cell1.pos.x == cell2.pos.x:\n if cell1.pos.y == cell2.pos.y + 1:\n cell1.down = cell2\n cell2.up = cell1\n elif cell1.pos.y == cell2.pos.y - 1:\n cell1.up = cell2\n cell2.down = cell1\n if cell1.pos.y == cell2.pos.y:\n if cell1.pos.x == cell2.pos.x + 1:\n cell1.left = cell2\n cell2.right = cell1\n elif cell1.pos.x == cell2.pos.x - 1:\n cell1.right = cell2\n cell2.left = cell1", "def cut_the_edge():\n while beepers_present():\n move()\n turn_around()\n if front_is_clear():\n move()\n pick_beeper()\n move()", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # if there is then it's good to jump\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def double_left_click_obvious_cells(self):\n for cell in self.list_active_cells():\n if self.neighboring_flags(cell.row, cell.column) == self.neighboring_bombs(cell.row, cell.column):\n cell.double_left_click()\n self.remove_active_cell(cell)\n self.updated = True", "def make_cross_wall(self):\n if self.orient == \"e\":\n self.cross_wall = self.coord1.wall_south\n if self.orient == \"s\":\n self.cross_wall = self.coord1.wall_east", "def __go_to_waypoint(self):\r\n\r\n if self.__controller.blue_square_found:\r\n left_blue_pixel_count = np.count_nonzero(self.__controller.blue_mask_left==255)\r\n right_blue_pixel_count = np.count_nonzero(self.__controller.blue_mask_right==255)\r\n\r\n if left_blue_pixel_count >= right_blue_pixel_count:\r\n self.__controller.drift_left()\r\n else:\r\n self.__controller.drift_right()\r\n else:\r\n self.__moving_to_waypoint = False\r\n\r\n if self.__controller.laser_minimum < 0.5:\r\n self.__controller.stop()\r\n self.__move_to_most_open_space()\r\n\r\n self.__moving_to_waypoint = False", "def bounceAgainst(self, other):\n if self.invincibility_frames > 0: return\n x_direction_sign = 1\n y_direction_sign = 1\n if(self.rect.left < other.rect.left):\n x_direction_sign = -1\n if(self.rect.top < other.rect.top):\n y_direction_sign = -1\n new_xvel = 4 * x_direction_sign\n new_yvel = y_direction_sign\n self.xvel = new_xvel\n self.yvel = new_yvel \n self.movement_state = BOUNCING_MOVEMENT_STATE\n self.bounce_count = 15", "def stay_put(self):\n self.go_to(self.pos.x,self.pos.y, self.pos.theta)", "def setWall(self, row, col):\n if self.grid[row][col] != 2 and self.grid[row][col] != 3:\n self.grid[row][col] = 1\n #print(\"Wall set at (\", row, \", \", col, \")\")", "def turn_around():\n\tturn_right()\n\tturn_right()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the accuracy of a proposed partition with reference to a known true partition. Pr(A|B) is the probability of inferring relationship of type A between two individuals given that the true relationship is type B. This function estimates the total proportion of pairwise relationships inferred correctly, as well as Pr(FS|FS), and Pr(HS|HS). For half sibling arrays, Pr(FS|HS) = 1Pr(HS|HS) and Pr(HS|FS) = 1Pr(FS|FS). ARGUMENTS
def partition_accuracy(true, proposed, rtype='all'): if rtype not in ['all', 'fs', 'hs']: print "rtype must be one of 'all', 'fs' or 'hs'." #return None real_FS = relation_matrix(true) part_FS = relation_matrix(proposed) r_given_r = real_FS == part_FS if rtype is 'all': correct = (r_given_r * np.triu(np.ones(real_FS.shape), 1)).sum() total = np.triu(np.ones(real_FS.shape), 1).sum() accuracy = correct/total if rtype is 'fs': # Pr(FS|FS) depends on the correct relationships, conditioned on being a true FS relationship. fs_given_fs = r_given_r * real_FS correct = (fs_given_fs * np.triu(real_FS,1)).sum() total = np.triu(real_FS,1).sum() accuracy = correct/total if rtype is 'hs': real_HS = 1- real_FS # real halfsibling relationships hs_given_hs = r_given_r * real_HS correct = (hs_given_hs * np.triu(real_HS, 1)).sum() total = np.triu(real_HS,1).sum() accuracy = correct/total return accuracy
[ "def testAnalyaticalPartitionIsCorrect(self):\n # Here we enumerate a set of positive rational numbers n/d alongside\n # numerically approximated values of Z(n / d) up to 10 digits of precision,\n # stored as (n, d, Z(n/d)). This was generated with an external mathematica\n # script.\n ground_truth_rational_partitions = (\n (1, 7, 4.080330073), (1, 6, 4.038544331), (1, 5, 3.984791180),\n (1, 4, 3.912448576), (1, 3, 3.808203509), (2, 5, 3.735479786),\n (3, 7, 3.706553276), (1, 2, 3.638993131), (3, 5, 3.553489270),\n (2, 3, 3.501024540), (3, 4, 3.439385624), (4, 5, 3.404121259),\n (1, 1, 3.272306973), (6, 5, 3.149249092), (5, 4, 3.119044506),\n (4, 3, 3.068687433), (7, 5, 3.028084866), (3, 2, 2.965924889),\n (8, 5, 2.901059987), (5, 3, 2.855391798), (7, 4, 2.794052016),\n (7, 3, 2.260434598), (5, 2, 2.218882601), (8, 3, 2.190349858),\n (3, 1, 2.153202857), (4, 1, 2.101960916), (7, 2, 2.121140098),\n (5, 1, 2.080000512), (9, 2, 2.089161164), (6, 1, 2.067751267),\n (7, 1, 2.059929623), (8, 1, 2.054500222), (10, 3, 2.129863884),\n (11, 3, 2.113763384), (13, 3, 2.092928254), (14, 3, 2.085788350),\n (16, 3, 2.075212740), (11, 2, 2.073116001), (17, 3, 2.071185791),\n (13, 2, 2.063452243), (15, 2, 2.056990258)) # pyformat: disable\n for numer, denom, z_true in ground_truth_rational_partitions:\n z = distribution.analytical_base_partition_function(numer, denom)\n self.assertAllClose(z, z_true, atol=1e-9, rtol=1e-9)", "def average_precision(rels):\n\n nonzero_indices = np.asarray(rels).nonzero()[0] \n if has_no_relevant_items(nonzero_indices):\n # AP equals 0.0 if there is no relevant items\n return 0.0\n\n return np.sum([precision_k(rels, k+1) for k, r in enumerate(rels) if r != 0.0]) / float(len(nonzero_indices))", "def percentage_pt(y_pred, y_true):\n y_pred_soft = y_pred.exp() / (y_pred.exp().sum(-1)).unsqueeze(-1)\n\n perc = (y_pred_soft.max(dim=1)[1] == y_true.max(dim=1)[1]).sum()\n return perc", "def score(predicted: pd.Series, actual: pd.Series) -> float:\n return sum(predicted == actual) / len(predicted)", "def score(prop, cluster):\r\n return len([other for other in cluster if other[1] == prop[1]]) / (1.0 * len(cluster))", "def pivot_accuracy(predict_geno, truth_geno):\r\n n_pos = len(predict_geno)\r\n p1 = predict_geno.POP1.astype(str).to_numpy()\r\n p2 = predict_geno.POP2.astype(str).to_numpy()\r\n t1 = truth_geno.POP1.astype(str).to_numpy()\r\n t2 = truth_geno.POP2.astype(str).to_numpy()\r\n pivots1 = find_pivots(np.equal(p1, t1))\r\n pivots2 = find_pivots(np.equal(p2, t2))\r\n # TODO some way to scale the number of correct pivots?\r\n # TODO incorrect pivots vs number of pivots??\r\n # Should it be compared to truth or predicted? Percent wrong OR\r\n # number wrong vs expected number?\r\n # Should it matter how close it is to the actual point? Nah that's what accuracy is for?\r\n total1 = find_pivots(np.equal(p1, p1[0])) + 1 * (p1[0] != t1[0])\r\n total2 = find_pivots(np.equal(p2, p2[0])) + 1 * (p2[0] != t2[0])\r\n # Get total percent of correct switches in the predicted genome\r\n if total1 + total2 == 0:\r\n percent_correct = 1\r\n else:\r\n percent_correct = 1 - (pivots1 + pivots2) / (total1 + total2)\r\n # Get skew\r\n expected1 = find_pivots(np.equal(t1, t1[0]))\r\n expected2 = find_pivots(np.equal(t2, t2[0]))\r\n if expected1 + expected2 == 0:\r\n skew = (total1 + total2)/2\r\n else:\r\n skew = (total1 + total2) / (expected1 + expected2)\r\n return percent_correct, skew", "def compute_expected_outcome(\n primary_index, protected_class_prediction, outcome_prob, protected_class_status, outcome\n):\n return ((protected_class_prediction[primary_index] + outcome[primary_index]) == 2).mean() / \\\n protected_class_prediction[primary_index].mean()", "def final_results(actual, predicted):\n n = len(actual)\n diff = np.sum((actual - predicted) ** 2)\n rmse = np.sqrt(diff / n)\n\n spearman = 1 - ((6 * diff) / (n ** 3 - n))\n\n PRECISION_K = int(n / 5)\n top_actual = set(heapq.nlargest(PRECISION_K, range(n), actual.__getitem__))\n top_predicted = set(heapq.nlargest(PRECISION_K, range(n), predicted.__getitem__))\n precision = len(top_actual.intersection(top_predicted)) / PRECISION_K\n\n return rmse, precision, spearman", "def calc_precision(confusion_matrix):\n predicted = confusion_matrix.sum(1)\n correct = confusion_matrix.diagonal()\n return correct / predicted", "def calculate_precision(self):\n test_classes = [f[0] for f in self.test_set]\n correct_counts = {c: 0 for c in test_classes}\n total_counts = {c: 0 for c in test_classes}\n\n for feature_dict in self.test_set:\n actual_class = feature_dict[0]\n predicted_class = self.predict(feature_dict[1])\n\n if actual_class == predicted_class:\n correct_counts[actual_class] += 1\n total_counts[actual_class] += 1\n else:\n total_counts[predicted_class] += 1\n\n print(\"=== Precision Statistics ===\")\n for c in correct_counts:\n try:\n if not total_counts[c] == 0:\n self.precision[c] = (correct_counts[c] * 1.0) / (total_counts[c] * 1.0)\n print(\"%s class precision:\" % (c.upper()), self.precision[c])\n else:\n print(\"%s class precision:\" % (c.upper()), \"N/A\")\n except KeyError:\n continue # predicted class may be not int test_classes", "def uas(gold_tree, predicted_tree):\n # Exercise 5.5\n assert gold_tree.n == predicted_tree.n and gold_tree.is_tree() and predicted_tree.is_tree()\n deplist1 = set(gold_tree.edges())\n correct = 0\n for dep2 in predicted_tree.edges():\n if dep2 in deplist1:\n correct += 1\n return correct / len(deplist1)", "def partition_score(game, player):\n # First, see if we've reached an end-game situation\n # +inf means this game state is a win for the current player\n # -inf means this game state is a loss for the current player\n util = game.utility(player)\n \n # If we're at an endgame, then that's the heuristic score for this node\n if util != 0:\n return util\n \n # Next, check for a partition on the board.\n # Partitions are only possible if we have a certain number of moves that have occurred.\n if ( game.move_count >= 2 * game.height ) or ( game.move_count >= 2 * game.width ):\n \n # Grab the set of blank spaces and each player's position\n blank_spaces = game.get_blank_spaces()\n player_location = game.get_player_location(player)\n opponent_location = game.get_player_location(game.get_opponent(player))\n \n # Find all partitions on the game board as lines where each is a list of the form: list<(int, int)>\n partition_lines = find_partitions(game.width, game.height, blank_spaces)\n player_contig = -1\n opponent_contig = -1\n for line in partition_lines:\n \n # Check to see if players are on either side of this partition line\n partitioned = False\n if line[0][0] == line[1][0]:\n # ROW-line : Row indexes match across line\n # See if player row locations differ and are separated by this line\n if player_location[0] != opponent_location[0] and \\\n ( ( player_location[0] > line[0][0] and opponent_location[0] < line[0][0] ) or \\\n ( player_location[0] < line[0][0] and opponent_location[0] > line[0][0] ) ):\n \n # Players are on either side of this partition!\n # Count contiguous squares for each player if it hasn't already been done.\n partitioned = True\n if player_contig == -1:\n player_contig = count_contig(player_location, blank_spaces)\n if opponent_contig == -1:\n opponent_contig = count_contig(opponent_location, blank_spaces)\n elif line[0][1] == line[1][1]:\n # COLUMN-line : Column indexes match across line\n # See if player row locations differ and are separated by this line\n if player_location[1] != opponent_location[1] and \\\n ( ( player_location[1] > line[0][1] and opponent_location[1] < line[0][1] ) or \\\n ( player_location[1] < line[0][1] and opponent_location[1] > line[0][1] ) ):\n \n # Players are on either side of this partition!\n # Count contiguous squares for each player if it hasn't already been done.\n partitioned = True\n if player_contig == -1:\n player_contig = count_contig(player_location, blank_spaces)\n if opponent_contig == -1:\n opponent_contig = count_contig(opponent_location, blank_spaces)\n \n # If this line counts as a partition, we should be able to determine a winner\n if partitioned == True:\n # If the contiguous space for the current player is greater than the opponent,\n # then the current player should win\n if player_contig > opponent_contig:\n return float(\"inf\")\n else:\n # Else if there's less contiguous space or a tie in space, the current player\n # should most likely lose\n return float(\"-inf\")\n \n\n # Otherwise, the heuristic is the difference in available moves between\n # the current player and the opposition\n return float(len(game.get_legal_moves(player)) - 2.0 * len(game.get_legal_moves(game.get_opponent(player))))", "def create_even_policy(board: GameState) -> np.ndarray:\n child_valid_flags = board.get_valid_moves()\n valid_count = child_valid_flags.sum()\n if valid_count:\n child_predictions = child_valid_flags / valid_count\n else:\n child_predictions = (child_valid_flags + 1) / child_valid_flags.size\n return child_predictions", "def analyze(kind,truth,preds):\n \n truth = truth.flatten()\n preds = preds.flatten()\n \n if kind == 'abs':\n loss = np.absolute(np.subtract(truth, preds))\n output = np.divide(np.float(np.sum(loss)), np.size(truth))\n elif kind == 'acc':\n correct = np.equal(truth, preds)\n output = np.divide(np.float(np.sum(correct)), np.size(truth))\n \n return output", "def specificity(confusion):\n tot = np.sum(confusion)\n tp = np.diagonal(confusion)\n act = np.sum(confusion, axis=1)\n prd = np.sum(confusion, axis=0)\n fp = prd - tp\n an = tot - act\n tn = an - fp\n return tn / an", "def specificity(confusion):\n classes = confusion.shape[0]\n\n total = np.array([np.sum(confusion)] * classes)\n FN = np.sum(confusion, axis=0)\n FP = np.sum(confusion, axis=1)\n TP = confusion.diagonal()\n\n TN = total - FN - FP + TP\n FP = FN - TP\n\n return TN / (TN + FP)", "def _calc_matching_prob(self):\n if not self.professional:\n return 1", "def a_partition(par):\n if par.m_q < 0:\n raise NotImplementedError(\"Q<0 not implemented.\")\n \n _parts = [_partition_gs, _partition_mq, _partition_left]\n for c_pairs in _parts:\n pairs = c_pairs(par)\n if is_valid(pairs, par) and not is_singular(pairs, par): \n return pairs\n\n # never get here\n raise RuntimeError(\"Failed to generate a_partition for %s\" % par)", "def test_tree_probability_calculation(self):\n height = 3\n width = 3\n tree_prob = ImageLoader.calculate_tree_probabilities_snake_shape(width, height)\n assert (tree_prob[(0, 0), (0, 1)] == 0.75), \"side edge probability does not equal to 0.75\"\n assert (tree_prob[(0, 1), (0, 0)] == 0.75), \"side edge probability does not equal to 0.75\"\n assert (tree_prob[(1, 1), (1, 0)] == 0.5), \"center edge probability does not equal to 0.5\"\n\n side_edge_count = 0\n center_edge_count = 0\n for keys in tree_prob:\n if tree_prob[keys] == 0.75:\n side_edge_count += 1\n else:\n center_edge_count += 1\n\n assert (side_edge_count == 16), \"number of side edges not correct: %d\" % (side_edge_count)\n assert (center_edge_count == 8), \"number of center edges not correct\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if omdf.policy is installed in portal_wuickinstaller.
def test_product_installed(self): installer = getToolByName(self.portal, 'portal_quickinstaller') self.assertTrue(installer.isProductInstalled('reptheory.policy'))
[ "def _has_access_policy(self, sysmeta_pyxb):\n return bool(getattr(sysmeta_pyxb, \"accessPolicy\", False))", "def test_dependencies_installed(self):\n installer = getToolByName(self.portal, 'portal_quickinstaller')\n self.assertTrue(installer.isProductInstalled('ContentWellPortlets'))", "def test_plone_app_dexterity_installed(self):\n qi = self.portal.portal_quickinstaller\n self.assertTrue(qi.isProductInstalled('plone.app.dexterity'))", "def test_product_installed(self):\n self.assertTrue(self.installer.isProductInstalled('rapido.plone'))", "def HasWOS(self):\n return self.__has('WOS')", "def nw_control_policy_exists(handle, name, cdp=None, mac_register_mode=None, uplink_fail_action=None,\r\n forge=None, lldp_transmit=None, lldp_receive=None, descr=None, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/nwctrl-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n if ((cdp and mo.cdp != cdp) and\r\n (mac_register_mode and mo.mac_register_mode != mac_register_mode) and\r\n (uplink_fail_action and mo.uplink_fail_action != uplink_fail_action) and\r\n (forge and mo.forge != forge) and\r\n (lldp_transmit and mo.lldp_transmit != lldp_transmit) and\r\n (lldp_receive and mo.lldp_receive != lldp_receive) and\r\n (descr and mo.descr != descr)):\r\n return False\r\n return True\r\n return False", "def verify_use_incredibuild_win(ctx, option_name, value):\t\n\tif not _is_user_option_true(value):\n\t\treturn (True,\"\",\"\")\t\n\t(res, warning, error) = _verify_incredibuild_licence('Make && Build Tools Extension Package', 'Windows')\t\n\treturn (res, warning, error)", "def test_product_installed(self):\n self.assertTrue(\n self.installer.is_product_installed(\"collective.behavior.banner\")\n )", "def _do_admission_control_check():\n return not sstbf.is_sstbf_configured()", "def _is_installed(self):\n return self._system.exists(os.path.join(self.get_install_path(), \"bin/root\"))", "def is_installed(self):\n return False", "def test_admin_policy() -> None:\n # Make sure it's valid\n POLICY_SCHEMA(system_policies.ADMIN_POLICY)\n\n perms = PolicyPermissions(system_policies.ADMIN_POLICY, None)\n assert perms.check_entity(\"light.kitchen\", \"read\")\n assert perms.check_entity(\"light.kitchen\", \"control\")\n assert perms.check_entity(\"light.kitchen\", \"edit\")", "def test_product_is_uninstalled(self):\n qi = self.portal.portal_quickinstaller\n self.assertFalse(qi.isProductInstalled(PROJECTNAME))", "def is_installed(tool):\n r, o = _is_installed(tool)\n if r != 0:\n return 0\n return 1", "def is_installed() -> bool:\n if platform.system() in (\"Linux\", \"Darwin\"):\n return shutil.which(CMD) is not None\n return Path(DEFAULT_WIN_OPENSCAD_PATH).exists()", "def appNeedsSetup(self, app):\n return app.getLink('setup') and app['configured'] == '0'", "def check_gzweb(ctx):\n return os.path.exists(ctx.get_product_file('bin', 'gzweb'))", "def IsInstalled (namespace: str) -> bool:\n\n\treturn _allMods.get(namespace, None) is not None", "def check_availability(self, proxy):\n if self.db.select_proxy(proxy).fetchone():\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that all product dependencies are installed.
def test_dependencies_installed(self): installer = getToolByName(self.portal, 'portal_quickinstaller') self.assertTrue(installer.isProductInstalled('ContentWellPortlets'))
[ "def test_product_installed(self):\n self.assertTrue(self.installer.isProductInstalled('collective.sassy'))", "def test_get_installs(self):\n pass", "def test_product_installed(self):\n self.assertTrue(self.installer.isProductInstalled('rapido.plone'))", "def test_product_installed(self):\n self.assertTrue(\n self.installer.is_product_installed(\"collective.behavior.banner\")\n )", "def test_post_installs(self):\n pass", "def test_module_dependencies_test_mode(self):\n options = self.desiInstall.get_options(['--test', 'desutil', '1.9.5'])\n self.desiInstall.baseproduct = 'desiutil'\n self.desiInstall.working_dir = join(self.data_dir, 'desiutil')\n self.assertTrue(self.desiInstall.options.test)\n deps = self.desiInstall.module_dependencies()\n self.assertListEqual(self.desiInstall.deps, [])\n self.assertLog(-1, 'Test Mode. Skipping loading of dependencies.')", "def test_product_installed(self):\n installer = getToolByName(self.portal, 'portal_quickinstaller')\n self.assertTrue(installer.isProductInstalled('reptheory.policy'))", "def test_get_all_installed():\n path_dirs = test_env.PathDirs()\n invalid_dirs = test_env.PathDirs(vis_dir=\"/tmp/doesntexist\")\n get_status.get_all_installed(path_dirs)\n get_status.get_all_installed(invalid_dirs)", "def assert_requirements_exist(self):\n\n for test in self._tests:\n test.assert_requirements_exist()", "def test_requirement_in_ha_core():\n request = requests.get(\n \"https://raw.githubusercontent.com/home-assistant/home-assistant/dev/setup.py\"\n )\n res = request.text.split(\"REQUIRES = [\")[-1].split(\"]\")[0]\n requirements = {}\n for line in res.split(\"\\n\"):\n if \"=\" in line and not \"#\" in line:\n line = line.split('\"')[1]\n package = line.split(\">\")[0].split(\"=\")[0]\n version = line.split(\"=\")[-1]\n requirements[package] = version\n\n with open(MANIFEST_FILE, \"r\") as manifest_file:\n for line in json.loads(manifest_file.read())[\"requirements\"]:\n package = line.split(\">\")[0].split(\"=\")[0]\n assert package not in requirements", "def test_packages(host):\n\n assert host.package('curl').is_installed", "def test_packages(host):\n\n assert host.package('yarn').is_installed", "def check_installed_packages():\n viki_config = VikiConfig()\n missing_packages = get_missing_packages(viki_config)\n\n if len(missing_packages) > 0:\n print \"[WARNING] - There are missing packages for full VIKI support:\"\n print \"\\n\".join(map((lambda x: x['name']), missing_packages))\n return False\n else:\n print \"[OK] - All ROS package dependencies are met!\"\n print \"Note: only second level dependencies of already installed packages have been checked\"\n return True", "def testPackageInstalled(self):\n self.Patch(\n setup_common,\n \"CheckCmdOutput\",\n return_value=self.PKG_INFO_INSTALLED)\n\n self.assertTrue(setup_common.PackageInstalled(\"fake_package\"))", "def test_product_installed(self):\n self.assertTrue(self.installer.isProductInstalled(\n 'plonetheme.spot'))", "def test_install_module(self):\n pass", "def test_product_uninstalled(self):\n self.assertFalse(\n self.installer.is_product_installed(\"collective.behavior.banner\")\n )", "def testRecipeModulesAllPresent(self):\n for recipe in self._recipes_manager.GetRecipes():\n declared_modules = set()\n wanted_modules = set()\n for module in recipe.contents['modules']:\n module_name = module['name']\n runtime_name = module.get('runtime_name', module_name)\n declared_modules.add(runtime_name)\n for wanted in module['wants']:\n wanted_modules.add(wanted)\n\n for wanted_module in wanted_modules:\n self.assertIn(wanted_module, declared_modules,\n msg='recipe: {0:s}'.format(recipe.contents['name']))", "def _check_dependencies():\n logger.info('Checking program dependencies ...')\n\n if not which('ruby'):\n logger.warn('Ruby not found')\n logger.info('Running apt-get update ...')\n run('apt-get update')\n logger.info('Installing ruby ...')\n run('apt-get install git-core ruby ruby-dev libopenssl-ruby build-essential wget ssl-cert curl rubygems -y')\n\n # Check if `gem` is available\n if not which('gem'):\n logger.warn('Gem not found')\n logger.info('Installing rubygems ...')\n run('gem install rubygems-update && update_rubygems')\n\n # Check if chef is available\n if not which('chef-solo'):\n logger.warn('chef-solo not found')\n logger.info('Installing Chef ...')\n run('gem install chef --no-ri --no-rdoc')\n\n logger.info('All dependencies is met')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the errormessage of this UnexpectedError.
def errormessage(self): return self._errormessage
[ "def get_api_error_message(self):\n if self.has_error(self.last_json_result[\"EOXRecord\"][0]):\n msg = \"%s (%s)\" % (self.get_error_description(self.last_json_result[\"EOXRecord\"][0]),\n self.last_json_result[\"EOXRecord\"][0]['EOXError']['ErrorID'])\n return msg\n\n return \"no error\"", "def http_error_message(self) -> str:\n return pulumi.get(self, \"http_error_message\")", "def errorMessage(self):\n\n if self.lastError:\n return self.lastError[\"M\"]\n else:\n return \"\"", "def get_errmsg(self):\n return self._errmsg", "def get_error_message(response) -> str:\n if not ErrorHelper.is_error(response):\n return None\n \n message = f\"{response.message} {response.message_detail}\"\n return message", "def error_string(self):\n if 1 <= self._error_reason <= 3:\n reason_string = self._error_reason_strings[self._error_reason-1]\n return reason_string.format(self.error_data)\n else:\n return \"Reason {} Data {}\".format(\n self.error_reason, hexlify(self.error_data))", "def run_error_message(self) -> str:\n return pulumi.get(self, \"run_error_message\")", "def strerror(self):\n return self._parse_error", "def extract_error_message(error):\n try:\n return error.description or error.__class__.__name__\n except AttributeError:\n try:\n return str(error.message) or error.__class__.__name__\n except AttributeError:\n return str(error) or error.__class__.__name__", "def description(self) -> str:\n return self._error_description", "def error_message(exception):\n if sys.version_info[0] < 3:\n return exception.message\n elif len(exception.args):\n return exception.args[0]\n raise Exception(\"Error message was not possible to retrieve due to version compatibility\")", "def get_error_message(result):\r\n\t\tif result is None:\r\n\t\t\treturn 'Invalid result (connection error)'\r\n\t\telif result.has_key('error') and result['error'] > 0:\r\n\t\t\tif result.has_key('message'):\r\n\t\t\t\treturn result['message']\r\n\t\t\telse:\r\n\t\t\t\treturn BtSyncApi.get_error_text(result['error'])\r\n\t\telif result.has_key('result') and result['result'] > 0:\r\n\t\t\tif result.has_key('message'):\r\n\t\t\t\treturn result['message']\r\n\t\t\telse:\r\n\t\t\t\treturn BtSyncApi.get_error_text(result['result'])\r\n\t\telse:\r\n\t\t\treturn 'No error'", "def debug_message(self):\n if not self.exception:\n return None\n return getattr(self.exception, 'debug_message', None)", "def get_error_file_name(self):\n return self.error_file_name", "def error(self) -> str:\n error_file = ErrorFile()\n return f'-e \"{error_file.path}\"'", "def get_error(self):\n\n return (self.error_msg, self.error_state)", "def __get_error(self):\n return self.__frame_error", "def get_custom_error(self):\n\n return self.custom_error", "def get_traceback_message():\n exc_parts = [str(l) for l in sys.exc_info()]\n err_type_parts = str(exc_parts[0]).strip().split('.')\n err_type = err_type_parts[-1].strip(\"'>\")\n tb_data = traceback.format_exc()\n tb_line = tb_data.splitlines()[-3]\n line_num = tb_line.split(',')[1]\n st_data = traceback.extract_stack()\n err_file = os.path.basename(st_data[-1][0])\n msg = 'Error! The {0} program encountered an unrecoverable {1}, {2}, at {3} of {4}!'.\\\n format(cfg_data.prog_name,\n err_type, exc_parts[1], line_num.strip(), err_file)\n return msg" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Outputs a time series DataFrame with t ,t1, t2, t3, Age, Age1, Age2, Age3 for each playerseason. Since it's grouped into playerseasons, players can be seen multiple times
def combine_playerseasons(): # import name, year, minutes, my_stat dfs = pd.read_excel('NBA_proj.xlsx', sheet_name=[0,1,2,3,4,5,6], usecols='A:B,E,V') # filter for player-seasons with > 500 minutes for k in dfs.keys(): dfs[k] = dfs[k][dfs[k]['Minutes'] > 500] # concat everything df = pd.concat([dfs[0], dfs[1], dfs[2], dfs[3], dfs[4], dfs[5], dfs[6]]).reset_index().drop('index', axis=1) df = df.rename({'my_stat': 't'}, axis=1) # add lags df['t-1'] = df.apply(lambda row: df.t[(df.Name == row.Name) & (df.Year == row.Year - 1)].values, axis=1) df['t-2'] = df.apply(lambda row: df.t[(df.Name == row.Name) & (df.Year == row.Year - 2)].values, axis=1) df['t-3'] = df.apply(lambda row: df.t[(df.Name == row.Name) & (df.Year == row.Year - 3)].values, axis=1) # convert np arrays to values and NaNs for i,row in df.iterrows(): if row['t-1'].size != 0: df.loc[i,'t-1'] = row['t-1'].item(0) else: df.loc[i,'t-1'] = np.nan if row['t-2'].size != 0: df.loc[i,'t-2'] = row['t-2'].item(0) else: df.loc[i,'t-2'] = np.nan if row['t-3'].size != 0: df.loc[i,'t-3'] = row['t-3'].item(0) else: df.loc[i,'t-3'] = np.nan # add age, age-1, age-2, age-3 age_df = get_player_ages() df['Age'] = np.zeros(len(df.Name)) missing_ages = [] j = 1 for i,row in df.iterrows(): try: # 'Age' is from 19-20 season and corresponds to 't' df.loc[i,'Age'] = age_df.loc[row.Name,'Current_Age'] - (2020 - row.Year) except KeyError: if row.Year == 2019: missing_ages.append(row.Name) print("{}. Error getting Age for {}".format(j, row.Name)) j += 1 df.loc[i,'Age'] = np.nan print("MISSING AGES: ") for i,name in enumerate(missing_ages): print("{}. {}".format(i,name)) df['Age-1'] = df.Age - 1 df['Age-2'] = df.Age - 2 df['Age-3'] = df.Age - 3 # transform age based on age curve formula; use delta to current year from last year #age_curve_df = pd.read_csv('age_curve.csv').set_index('ages') df['tAge'] = (-40.9 + 3.78*df.Age - 0.11*df.Age**2 + 0.001*df.Age**3) \ - (-40.9 + 3.78*(df.Age-1) - 0.11*(df.Age-1)**2 + 0.001*(df.Age-1)**3) df['tAge-1'] = (-40.9 + 3.78*(df.Age-1) - 0.11*(df.Age-1)**2 + 0.001*(df.Age-1)**3) \ - (-40.9 + 3.78*(df.Age-2) - 0.11*(df.Age-2)**2 + 0.001*(df.Age-2)**3) df['tAge-2'] = (-40.9 + 3.78*(df.Age-2) - 0.11*(df.Age-2)**2 + 0.001*(df.Age-2)**3) \ - (-40.9 + 3.78*(df.Age-3) - 0.11*(df.Age-3)**2 + 0.001*(df.Age-3)**3) df['tAge-3'] = (-40.9 + 3.78*(df.Age-3) - 0.11*(df.Age-3)**2 + 0.001*(df.Age-3)**3) \ - (-40.9 + 3.78*(df.Age-4) - 0.11*(df.Age-4)**2 + 0.001*(df.Age-4)**3) print(df.head(10)) print(df.tail(10)) print(df.shape) df.to_csv('player-seasons.csv') return df
[ "def player_season_stats(player_name, player_id):\n\n try:\n player_gamelog = playergamelog.PlayerGameLog(player_id=str(player_id), season='2020',\n season_type_all_star='Regular Season')\n except:\n raise Exception(f'Failed to get data on player {player_name}')\n sleep(0.25)\n temp = required_stats.copy()\n temp.extend(['GAME_DATE', 'Player_ID'])\n data = player_gamelog.get_data_frames()[0][temp]\n\n return data # return data as df which will be added to another larger df as a dictionary", "def query_team_data(self, seasons: List[int], params: Tuple[Any, ...]) -> pd.DataFrame:\n df = pd.read_sql(\"\"\"\n SELECT f.id, f.date, f.season, f.league, f.homeTeamID, f.awayTeamID,\n t1.name AS home, t2.name AS away, f.home_goals, f.away_goals, f.winner,\n ts.rating, ts.goals, ts.errors, ts.red_cards, ts.shots, f.oddsDC_1X, f.oddsDC_X2\n FROM TeamStats ts\n JOIN Fixtures f ON f.id = ts.fixtureID \n JOIN Teams t1 ON f.homeTeamID = t1.id\n JOIN Teams t2 ON f.awayTeamID = t2.id\n WHERE ts.teamID = ? AND (f.homeTeamID = ? OR f.awayTeamID = ?) AND\n f.season IN ({})\n ORDER BY f.date, f.id\n \"\"\".format(\",\".join(\"?\" * len(seasons))),\n self.conn, params=params)\n\n return df", "def process_season_matches(season_matches_df):\n\n def expand_raw_fields(row):\n row_data = dict()\n row_data['awayTeamName'] = row.awayTeam['name']\n row_data['homeTeamName'] = row.homeTeam['name']\n row_data['matchId'] = row.id\n row_data['matchDateTime'] = row.utcDate\n row_data['homeScore'] = row.score['fullTime']['homeTeam']\n row_data['awayScore'] = row.score['fullTime']['awayTeam']\n row_data['matchDay'] = row.matchday\n row_data['season'] = row.season\n row_data['competition'] = row.competitionName\n\n return row_data\n\n def create_table_records(row):\n home_row_data = dict()\n home_row_data['teamName'] = row.homeTeamName\n home_row_data['homeOrAway'] = 'home'\n home_row_data['goalsFor'] = row.homeScore\n home_row_data['goalsAgainst'] = row.awayScore\n home_row_data['matchDay'] = row.matchDay\n home_row_data['matchId'] = row.matchId\n home_row_data['goalDiff'] = row.homeScore - row.awayScore\n home_row_data['played'] = 1\n home_row_data['season'] = row.season\n home_row_data['competition'] = row.competitionName\n\n if home_row_data['goalDiff'] > 0:\n points = 3\n home_row_data['gamesWon'] = 1\n elif home_row_data['goalDiff'] == 0:\n points = 1\n home_row_data['gamesDrawn'] = 1\n else:\n points = 0\n home_row_data['gamesLost'] = 1\n\n home_row_data['points'] = points\n\n # repeat for away team\n away_row_data = dict()\n away_row_data['teamName'] = row.awayTeamName\n away_row_data['homeOrAway'] = 'away'\n away_row_data['goalsFor'] = row.awayScore\n away_row_data['goalsAgainst'] = row.homeScore\n away_row_data['matchDay'] = row.matchDay\n away_row_data['matchId'] = row.matchId\n away_row_data['goalDiff'] = row.awayScore - row.homeScore\n away_row_data['played'] = 1\n away_row_data['season'] = row.season\n away_row_data['competition'] = row.competitionName\n\n if away_row_data['goalDiff'] > 0:\n points = 3\n away_row_data['gamesWon'] = 1\n elif away_row_data['goalDiff'] == 0:\n points = 1\n away_row_data['gamesDrawn'] = 1\n else:\n points = 0\n away_row_data['gamesLost'] = 1\n\n away_row_data['points'] = points\n\n return [home_row_data, away_row_data]\n\n expanded_df_dict = season_matches_df.apply(expand_raw_fields, axis=1)\n expanded_df = pd.DataFrame.from_records(expanded_df_dict)\n expanded_df['matchDateTime'] = pd.to_datetime(expanded_df.matchDateTime)\n\n table_df_deep_list = expanded_df.apply(create_table_records, axis=1)\n table_df_flat_list = [l for sublist in table_df_deep_list for l in sublist]\n table_df = pd.DataFrame.from_records(table_df_flat_list)\n\n grouped_table_df = table_df.groupby(['matchDay', 'teamName']).max().groupby('teamName').cumsum()\n\n return expanded_df, table_df, grouped_table_df", "def get_team_df(df):\n team_df = None\n # get all team names as list\n teams = df['team'].drop_duplicates().to_list()\n # print(teams)\n\n # create temp df to sort by only that team\n for team in teams:\n temp_team_df = df[(df['team'] == team)]\n dates = temp_team_df['date'].drop_duplicates().to_list()\n\n # for each unique date, create another temp df\n for date in dates:\n # sum up all stats on date, store into team_df\n date_df = temp_team_df[(temp_team_df['date'] == date)]\n # print(date_df.iloc[0])\n d = {key: [date_df[key].sum()] for key in constants.ScatterFilters.team_y_keys}\n temp_series = date_df.iloc[0]\n d['opponent'] = temp_series['opponent']\n d['outcome'] = temp_series['outcome']\n d['location'] = temp_series['location']\n # print(d)\n temp_df = pd.DataFrame(d, index=[team])\n temp_df['date'] = [date]\n # temp_player = date_df.iteritems()[0]\n\n if team_df is None:\n team_df = temp_df\n else:\n team_df = pd.concat([temp_df, team_df])\n\n # print(team_df.shape)\n # print(team_df.head(10))\n return team_df", "def query_fixtures_data(self, seasons: List[int]) -> pd.DataFrame:\n df = pd.read_sql(\"\"\"\n SELECT f.id, f.date, f.season, f.league, \n t1.name AS home, t2.name AS away, f.home_goals, f.away_goals, \n f.oddsDC_1X AS home_odds_wd, f.oddsDC_X2 AS away_odds_wd,\n ts1.rating AS home_rating, ts2.rating AS away_rating,\n ts1.errors AS home_errors, ts2.errors AS away_errors, \n ts1.red_cards AS home_red_cards, ts2.red_cards AS away_red_cards,\n ts1.shots AS home_shots, ts2.shots AS away_shots\n FROM Fixtures f\n JOIN Teams t1 ON f.homeTeamID = t1.id\n JOIN Teams t2 ON f.awayTeamID = t2.id\n JOIN TeamStats ts1 ON f.homeStatsID = ts1.id\n JOIN TeamStats ts2 ON f.awayStatsID = ts2.id\n WHERE f.season IN ({})\n ORDER BY f.date, f.id\n \"\"\".format(\",\".join(\"?\" * len(seasons))),\n self.conn, params=seasons)\n\n return df", "def test_player_season_stats_by_player(self):\n pass", "def get_player_regular_season_stats(self) -> list:\n # TODO: finish building method to return list of dicts\n player_stats = []\n player_ids = self._get_player_ids()\n raw_player_data = self._get_raw_data()", "def test_player_season_stats_by_team(self):\n pass", "def spew_season(player_name, row):\n print(f'{player_name}')", "def get_pts_game():\n nba_stats_url = \"https://stats.nba.com/stats/leagueLeaders?LeagueID=00&PerMode=PerGame&Scope=S&Season=2017-18&SeasonType=Regular+Season&StatCategory=PTS\"\n pts_game_dict = get(nba_stats_url).json()\n players = [pts_game_dict[\"resultSet\"][\"rowSet\"][i][2] for i in range(len(pts_game_dict[\"resultSet\"][\"rowSet\"]))]\n pts_game = [pts_game_dict[\"resultSet\"][\"rowSet\"][i][22] for i in range(len(pts_game_dict[\"resultSet\"][\"rowSet\"]))]\n df = pd.DataFrame()\n df[\"player\"] = players\n df[\"pts_game\"] = pts_game\n return df", "def test_player_season_stats(self):\n pass", "def get_all_data():\n character_df = get_character_df().T\n season_df = get_season_df()\n df = character_df.join(season_df['season'], how='inner')\n df['episode'] = df.index\n df = df.set_index(['season', 'episode'], drop=True)\n\n total_counts = df.sum(axis=0).sort_values(ascending=False)\n total_counts = total_counts[~total_counts.index.isin(LINES_TO_SKIP)]\n df = df.loc[:, total_counts.index]\n df = df.fillna(0).astype(np.int)\n return df", "def get_player_stats_for_season(self, player_key):\n return self.query(\n \"https://fantasysports.yahooapis.com/fantasy/v2/league/\" + self.get_league_key() + \"/players;player_keys=\" +\n str(player_key) + \"/stats\", [\"league\", \"players\", \"0\", \"player\"], Player)", "def simulate_seasons(df, n, hash: str, **kwargs):\n index_cols = [\"player\", \"team\", \"pos\"]\n\n ppg_cache = kwargs.get(\"cache\", f\"simulation_cache_ppg_{hash}.csv\")\n games_cache = kwargs.get(\"cache\", f\"simulation_cache_games_{hash}.csv\")\n if path.isfile(ppg_cache) and path.isfile(games_cache):\n games_df = pd.read_csv(games_cache, index_col=index_cols)\n ppg_df = pd.read_csv(ppg_cache, index_col=index_cols)\n if len(games_df.columns) == len(ppg_df.columns) >= n:\n logging.info(\"Loading simulations from cache\")\n return ppg_df, games_df\n else:\n logging.info(\"Simulation cache does not have sufficient iterations.\")\n # seed based on randomness\n np.random.seed()\n\n # the \"n\" in the binomial drawing\n max_games = df[\"g\"].to_numpy(dtype=int)\n # the \"p\" in the binomial drawing\n frac_games = np.vectorize(lambda pos: pos_games_available[pos] / games_in_season)(\n df.index.get_level_values(\"pos\")\n )\n # compute the alpha and beta parameters for the beta-binomial distribution\n # Assume that alpha and beta are on the order of 1, which is close to the value\n # observed for most positions (1-2)\n alphas = frac_games * 1.5\n # we need an epsilon so that beta > 0\n betas = (1 - frac_games) * 1.5 + 1e-8\n\n logging.info(\"Simulating %s seasons...\", n)\n sim_tags = [str(i_sim) for i_sim in range(n)]\n sim_games = pd.DataFrame(index=df.index, columns=sim_tags, dtype=int)\n sim_ppg = pd.DataFrame(index=df.index, columns=sim_tags, dtype=float)\n # TODO: instead of looping, can we use the size parameter in scipy?\n # This would generate a 2D array, which we'd have to put into columns\n # There are difficulties broadcasting over alpha/beta and size at the same time\n for n_sim in sim_tags:\n # the fraction is drawn from a beta distribution\n ps = st.beta.rvs(alphas, betas)\n games = st.binom.rvs(max_games, ps)\n sim_games[n_sim] = games\n x_fields = [\"exp_proj_low\", \"exp_proj\", \"exp_proj_high\"]\n # TODO: Reconsider this confidence interval. It's motivated by equally\n # distributing ~5 experts over the quantile, but this is a pretty arbitrary\n # choice.\n ps = [0.2, 0.5, 0.8]\n for (idx, xlow, xmid, xhi) in df[x_fields].itertuples(name=None):\n # Some of the data are incomplete on the edges. Add a little buffer to\n # make the distributions nice.\n xlow = min(xlow, xmid - 10.)\n # xlow = min(xlow, max(xmid - 10., 0))\n xhi = max(xhi, xmid + 10.)\n assert xlow <= xmid <= xhi\n\n points_dist = MetaLogistic(cdf_xs=[xlow, xmid, xhi], cdf_ps=ps)\n points = points_dist.rvs(size=n)\n # NOTE: This is terrible performance, do something better\n sim_ppg.loc[idx, sim_tags] = points\n # the imported projections assume that players will not miss time, so\n # divide by the max possible games.\n sim_ppg[sim_tags] /= np.expand_dims(max_games, axis=-1)\n sim_games.to_csv(games_cache)\n sim_ppg.to_csv(ppg_cache)\n return sim_ppg, sim_games", "def get_player_stats(self):\n\n\n return pd.concat([player.get_stats() for player in self.players],axis=0,sort=True).reset_index(drop=True)", "def read_games(self):\n\n urlmask = 'http://www.football-data.co.uk/mmz4281/{}/{}.csv'\n filemask = 'MatchHistory_{}_{}.csv'\n col_rename = {\n 'Div': 'league',\n 'Date': 'date',\n 'HomeTeam': 'home_team',\n 'AwayTeam': 'away_team',\n }\n\n df_list = []\n current_season_ends = str(date.today().year)[-2:]\n for lkey, skey in itertools.product(self._selected_leagues.values(),\n self.seasons):\n filepath = Path(datadir(), filemask.format(lkey, skey))\n url = urlmask.format(skey, lkey)\n current_season = skey[-2:] >= current_season_ends\n if current_season or (not filepath.exists()):\n self._download_and_save(url, filepath)\n\n df_list.append(\n pd.read_csv(str(filepath),\n parse_dates=['Date'],\n infer_datetime_format=True,\n dayfirst=True,\n encoding='UTF-8',\n )\n .assign(season=skey)\n )\n\n df = (\n pd.concat(df_list)\n .rename(columns=col_rename)\n .pipe(self._translate_league)\n .replace({'home_team': TEAMNAME_REPLACEMENTS,\n 'away_team': TEAMNAME_REPLACEMENTS})\n .dropna(subset=['home_team', 'away_team'])\n )\n\n df['game_id'] = df.apply(self._make_game_id, axis=1)\n df.set_index(['league', 'season', 'game_id'], inplace=True)\n df.sort_index(inplace=True)\n return df", "def _add_team_stats(df_player_stats: pd.DataFrame, df_team_stats: pd.DataFrame) -> pd.DataFrame:\n logging.info('Enriching player stats with team stats...')\n df_team_stats = df_team_stats.rename(columns={\n column: \"team_\" + column for column in df_team_stats if column not in ('team', 'week', 'year')\n })\n return df_player_stats.merge(df_team_stats, how='left', on=['team', 'week', 'year'])", "def test_team_season_stats(self):\n pass", "def get_streaks(df):\n df = df.sort_values(by=[\"season\", \"team\", \"spieltag\"])\n df[\"5gamesnogoal\"] = (\n (df[\"goals_for\"] == 0)\n & (df[\"goals_for\"].shift(1) == 0)\n & (df[\"goals_for\"].shift(2) == 0)\n & (df[\"goals_for\"].shift(3) == 0)\n & (df[\"goals_for\"].shift(4) == 0)\n )\n df.loc[df[\"season\"].shift(4) != df[\"season\"], \"5gamesnogoal\"] = False\n print(\n \"Streaks of five goalless games in a row: \\n\".format(\n df[[\"team\", \"season\", \"spieltag\"]][df[\"5gamesnogoal\"]]\n )\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display a message about what I'm learning.
def display_message(): msg = "I'm learning to store code in functions." print(msg)
[ "def print_greeting():\n print(\n \"\\nHi there! \\nI can help you figure out the notes of a scale or mode of your choice!\"\n )", "def greet_user(self):\n print(\"Greetings \" + self.f_name.title() + \" \" + self.l_name.title() + \" we hope you enjoy your stay with us!\")", "def greet_user(self):\n print(\"\\nHello, \" + self.full_name.title() + \"!\")", "def display_help_about():\n showinfo(\"Help about.\", \"Password checker version 1.1\")", "def greeting(self):\n #############################################################################\n # TODO: Write a short greeting message #\n #############################################################################\n\n greeting_message = \"Hi my name's Celeste! If you're ready to find your next favorite movie, tell me about some movie you've seen.\"\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return greeting_message", "def learn():\n\treturn render_template(\"learn.html\")", "def printInfo(self):\r\n\r\n about = \"Student name is {0}, {1}, and {2} is taking {3}.\".format(\r\n self.lastName, self.firstName, self.pronoun, len(self._courseList))\r\n\r\n print(about)", "def display_intro():\n print(\"\\n\")\n print(\"*\"*120)\n print(\"\"\"\n Welcome to Number Guessing Game! We will generate a number (1-10) for you to guess until you\n get the correct answer. After each guess we will tell you if the random number is LOWER, HIGHER \n or CORRECT. Good luck and have fun!\n \"\"\")\n print(\"*\"*120)\n print(\"\\n\")", "def tell_story(self):\n name = self.name_entry.get()\n verb = self.verb_entry.get()\n noun = self.noun_entry.get()\n\n places = \"\"\n if self.castle.get():\n places += \"castle, \"\n if self.mountain_temple.get():\n places += \"mountain temple, \"\n if self.beach_cottage.get():\n places += \"beach cottage. \"\n\n # create the story\n story = \"There was a princess called \"\n story += name\n story += \" She lived in a \"\n story += places\n story += \"where she \"\n story += verb\n story += \" and keep \"\n story += noun\n\n # show the story\n self.tell_story_txt.delete(0.0, END)\n self.tell_story_txt.insert(0.0, story)", "def lab3():\n\tsayHello()\n\tsayHelloHelp(\"jsl\")", "def welcome():\r\n print(''' _ _ \r\n | | | | \r\n | |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __ \r\n | __ |/ _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\ \r\n | | | | (_| | | | | (_| | | | | | | (_| | | | |\r\n |_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\r\n __/ | \r\n |___/''')", "def show_about_ninja(self):\r\n about = about_ninja.AboutNinja(self)\r\n about.show()", "def tutorial():\n\tprint \"King's Decision puts you in the position of a king forced to make quick choices.\"\n\tprint \"You will be presented with a number of random situations and given a number of \"\n\tprint \"choices to choose from. You will have 15 seconds to make a snap decision; if you \"\n\tprint \"fail to come to a decision, you will automatically choose to behead the person presenting \"\n\tprint \"the case, much to the chagrin of your court and subjects. If you do this twice, the people \"\n\tprint \"will revolt and kill you.\"\n\tprint \"\\n\"\n\tprint \"The goal is to come to prudent, informed, and honorable decisions. Bad decisions will\"\n\tprint \"bring consequences, such as growing unrest among the people. If you are able to make\"\n\tprint \"good decisions five times in a row, you will win the title of 'the Great', and win the game.\"\n\tprint \"Best of luck to you, the king!\"\n\ttime.sleep(5)\n\traw_input(\"Press any key to begin the game.\")\n\tgame_start()", "def show_help_topic(self):\n \n pass", "def display_question(self):\n print(self.question)\n print(\"\")", "def display_message(text):\n\n clear_shell()\n print figlet.renderText(text)\n sleep(.75)\n clear_shell()", "def tell_story(self):\n # get values from the GUI\n person = self.person_ent.get()\n noun = self.noun_ent.get()\n verb = self.verb_ent.get()\n adjectives = \"\"\n if self.is_itchy.get():\n adjectives += \"нетерпеливое, \"\n if self.is_joyous.get():\n adjectives += \"радостное, \"\n if self.is_electric.get():\n adjectives += \"пронизывающее, \"\n body_part = self.body_part.get()\n\n # create the story\n story = \"Изветсный исследователь \"\n story += person\n story += \" уже отчаялся завершить дело своей жизни - поиск затерянного города \"\n story += noun.title()\n story += \" пока в один день \"\n story += noun\n story += \" не нашел \"\n story += person + \". \"\n story += \"Мощное \"\n story += adjectives\n story += \"ни с чем не сравнимое чувство. \"\n story += \"После стольких лет поиска цель наконец была достигнута \"\n story += person\n story += ' ощутил как на его ' + body_part + \" скатилась слеза. \"\n story += \" Затем \"\n story += noun\n story += \" перешли в атаку \"\n story += person + \". \"\n story += \" Мораль истории? Если задумали\"\n story += verb\n story += \" будьте осторожны.\"\n\n # display the story\n self.story_txt.delete(0.0, END)\n self.story_txt.insert(0.0, story)", "def tell_user_command_was_not_understood(self):\n self.speak(\"Sorry, I don't understand what you want.\", True)", "def help_chuck(self):\n print_say(\"Tell a joke about Chuck Norris\", self)", "def alert():\n showinfo(\"A propos\", \"Jeu crée par Sanjeevan et Enrick\\n\\nProjet M1106 - Année 2019/2020\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a tuple containing start/end strings of offering (UTC).
def getTimeIntervalOffering(self): codec16 = QtCore.QTextCodec.codecForName("UTF-16") start = unicode(codec16.fromUnicode(self.lblStartTime.text()), 'UTF-16') end = unicode(codec16.fromUnicode(self.lblEndTime.text()), 'UTF-16') #print "SOS:234", type(start), start, end return (start, end)
[ "def get_str_dates(self):\r\n start_str = self.start_datetime.strftime(\"%B %d, %Y\")\r\n end_str = self.end_datetime.strftime(\"%B %d, %Y\")\r\n return start_str + \" - \" + end_str", "def get_start_end_info(info):\n starttime = None\n stoptime = None\n startdate = None\n stopdate = None\n for line in info[0].decode(\"utf-8\").splitlines():\n if line.find(\"Start_Time\") != -1:\n starttime = line.split('=')[1]\n if line.find(\"End_Time\") != -1:\n stoptime = line.split('=')[1]\n if line.find(\"Start_Date\") != -1:\n startdate = line.split('=')[1]\n if line.find(\"End_Date\") != -1:\n stopdate = line.split('=')[1]\n return starttime, startdate, stoptime, stopdate", "def get_start_and_end(self):\n\n output = {\"start\": None, 'end': None}\n ev = CalendarEvent.objects.filter(id=self.id)\n ev = ev.annotate(start_time=Min('eventshift__start_time'))\n e = ev.annotate(end_time=Max('eventshift__end_time'))\n output['start'] = e[0].start_time\n output['end'] = e[0].end_time\n return output", "def getTimeInfo(self):\n\n txt = [\n f'SPC DAY {self.currentDay} {self.outlookType.upper()} OUTLOOK',\n f'ISSUED: {self.issued}',\n f'VALID: {self.start} - {self.end}']\n return '\\n'.join( txt )", "def get_timestring(self):\n date = DateFormat(self.date).format(\"jS F Y\")\n start = DateFormat(self.start_time).format(\"P\")\n end = DateFormat(self.end_time).format(\"P\")\n\n return f\"{date}, {start} to {end}\"", "def _prepare_scheduling_start_and_end(\n start: Any, end: Any, timezone: str\n) -> Tuple[pendulum.datetime, Optional[pendulum.datetime]]:\n timezone = timezone or \"UTC\"\n\n if start is not None:\n start = pendulum.instance(start).in_tz(timezone)\n\n if end is not None:\n end = pendulum.instance(end).in_tz(timezone)\n\n return start, end", "def get_str(self):\r\n start_str = self.start_time.strftime(\"%I:%M %p\")\r\n end_str = self.end_time.strftime(\"%I:%M %p\")\r\n return self.WEEKDAY_TO_STR[self.weekday] + \" \" + start_str + \" - \" + end_str", "def get_date_string(self):\n start = self.start_date.strftime(\"%m/%d/%Y\")\n end = self.end_date.strftime(\"%m/%d/%Y\")\n return (start + \" - \" + end)", "def get_coverage_time(self) -> tuple[str, str] | None:\n attr_start = self.get_attr('time_coverage_start')\n if attr_start is None:\n return None\n\n attr_end = self.get_attr('time_coverage_end')\n if attr_end is None:\n return None\n\n # pylint: disable=no-member\n return (attr_start.decode('ascii'),\n attr_end.decode('ascii'))", "def get_start_end_info_from_xml(self, raw_xml):\n\n xml_root = ElementTree.fromstring(raw_xml)\n\n time_start_list = xml_root.findall('.//Attribute[@Name=\"time_coverage_start\"]')\n if len(time_start_list) > 0:\n if len(time_start_list) > 1:\n print(\"Encountered more than 1 time_coverage_start tag. Using 1st value.\")\n start = self.get_time_coverage_xml(time_start_list[0])\n else:\n time_start_list = xml_root.findall('.//Attribute[@Name=\"Scene Start time\"]')\n if len(time_start_list) > 1:\n print(\"Encountered more than 1 Scene Start time tag. Using 1st value.\")\n start_str = self.get_time_coverage_xml(time_start_list[0])\n start = self.get_goci_time(start_str)\n\n time_end_list = xml_root.findall('.//Attribute[@Name=\"time_coverage_end\"]')\n if len(time_end_list) > 0:\n if len(time_end_list) > 1:\n print(\"Encountered more than 1 time_coverage_end tag. Using 1st value.\")\n stop = self.get_time_coverage_xml(time_end_list[0])\n else:\n time_end_list = xml_root.findall('.//Attribute[@Name=\"Scene end time\"]')\n if len(time_end_list) > 1:\n print(\"Encountered more than 1 Scene end time tag. Using 1st value.\")\n stop_str = self.get_time_coverage_xml(time_end_list[0])\n stop = self.get_goci_time(stop_str)\n return start, stop", "def __parse_start_end_dates(self):\r\n year_string_format = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2}')\r\n\r\n self.start_date = \"&startDT=\"+self.date_range[0]\r\n self.end_date = \"&endDT=\"+self.date_range[-1]", "def get_start_end(*rds):\n meta = get_session_metadata(*rds)\n return meta['start'], meta['end']", "def getDateList(start, end=None):\n\n start_date_time = datetime.strptime(start, \"%Y%m%d\")\n if end is None:\n oneday = timedelta(days=1)\n end_date_time = start_date_time + oneday\n end = end_date_time.strftime(\"%Y%m%d\")\n return start, end\n else:\n end_date_time = datetime.strptime(end, \"%Y%m%d\")\n delta = (end_date_time - start_date_time).days\n return [(start_date_time + timedelta(days=ii)).strftime(\"%Y%m%d\") for ii in xrange(0, delta + 1)][:-1]", "def calculateStartTimeAndEndTimes():\n\n utcOffset = get_utc_offset()\n workingStart = config[\"workingStart\"]\n workingFinish = config[\"workingFinish\"]\n utc_now = utc.localize(datetime.utcnow())\n workingStart = workingStart.split(\":\")\n workingFinish = workingFinish.split(\":\")\n workStart = utc_now.replace(\n hour=int(workingStart[0]), minute=int(workingStart[1]), second=0, microsecond=0,\n )\n workFinish = utc_now.replace(\n hour=int(workingFinish[0]),\n minute=int(workingFinish[1]),\n second=0,\n microsecond=0,\n )\n utcDelta = timedelta(minutes=utcOffset)\n workStart = workStart - utcDelta\n workFinish = workFinish - utcDelta\n\n sunriseDateTime, sunsetDateTime = getSunriseSunsetDateTimes()\n\n return max(workStart, sunriseDateTime), min(workFinish, sunsetDateTime)", "def get_changing_times2(recfile):\n times = recfile[0][1]\n startings = [t[0] for t in times]\n endings = [t[1] for t in times]\n return startings, endings", "def get_dates(self):\n data = self.cleaned_data\n if data['this_year'] and not data['marking_period']:\n start = SchoolYear.objects.get(active_year=True).start_date\n end = SchoolYear.objects.get(active_year=True).end_date\n elif not data['this_year'] and not data['all_years']:\n start = data['date_begin']\n end = data['date_end']\n elif data['marking_period']:\n start = data['marking_period'].all().order_by('start_date')[0].start_date\n end = data['marking_period'].all().order_by('-end_date')[0].end_date\n else: # all of time\n start = date(1980, 1, 1)\n end = date(2980, 1, 1)\n return (start, end)", "def parse_start_end(start, end):\n if start == \"Leave empty for start\" and end == \"Leave empty for end\":\n return slice(0, None), 0\n elif start == \"Leave empty for start\" and end != \"Leave empty for end\":\n return slice(0, int(end)), 0\n elif start != \"Leave empty for start\" and end == \"Leave empty for end\":\n return slice(int(start), None), int(start)\n else: # start != \"Leave empty for start\" and end != \"Leave empty for end\":\n return slice(int(start), int(end)), int(start)", "def get_calendar_names() -> list[str]:\n ...", "def get_peak_so_data(self):\n return (self.price_zone_code, self.hour_start, self.hour_end)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set index for PyQT4 list box, based on a list of text items.
def set_listbox(box, text_list): if not isinstance(text_list, list): text_list = [text_list,] result = False # sorry, no.\\ for text in text_list: items = box.findItems(text, QtCore.Qt.MatchFixedString) if items: item = items[0] box.setItemSelected(item, True) result = True return result
[ "def indexFromItem(self, QListWidgetItem): # real signature unknown; restored from __doc__\n pass", "def listItemSelected(self, index):\n self.selectedTitle = self.listBox.getSelectedItem()\n if self.selectedTitle == \"\":\n self.outputArea.setText(\"\")\n else:\n self.outputArea.setText(str(self.database[self.selectedTitle]))", "def set_irga_li7500a(self):\n idx = self.view.selectedIndexes()[0]\n selected_item = idx.model().itemFromIndex(idx)\n parent = selected_item.parent()\n parent.child(selected_item.row(), 1).setText(\"Li-7500A\")", "def set_current(self):\n\n if self.content.size() <= 0:\n return # Non item in listbox\n if self.index < 0:\n self.index = 0\n self.content.selection_clear(0, tk.END)\n self.content.selection_set(self.index)\n self.current.delete(\"1.0\", tk.END)\n self.current.insert(tk.END, self.content.get(self.index))\n self.current.focus()", "def set_items(self, str_items):\n # wx does not emit events on SetItems or SetSelection, so we\n # do not need any feedback guards here.\n self.widget.SetItems(str_items)\n self.widget.SetSelection(self.shell_obj.index)", "def lista_listwidget(self):\n\n for item in self.lista:\n self.listwidget.addItem(item)", "def editChildList(self, textList):\n if len(textList) == len(self.childList):\n # assume rename if length is same\n for child, text in zip(self.childList, textList):\n if child.title() != text:\n child.setTitle(text, True)\n globalref.updateViewItem(child)\n else: # find new child positions if length differs\n oldLen = len(self.childList)\n nodeFormat = self.nodeFormat()\n newType = globalref.docRef.treeFormats.get(nodeFormat.childType,\n None)\n if not newType:\n newType = oldLen and self.childList[0].nodeFormat() or \\\n nodeFormat\n globalref.docRef.undoStore.addChildListUndo(self)\n newChildList = []\n for text in textList:\n try:\n newChildList.append(self.childList.pop(\\\n [child.title() for child in self.childList].index(text)))\n except ValueError:\n newItem = TreeItem(self, newType.name)\n newItem.setInitDefaultData()\n newItem.setTitle(text)\n newItem.setUniqueID(True)\n newChildList.append(newItem)\n if oldLen == 0 and newChildList:\n self.open = True\n for child in self.childList:\n child.parent = None\n self.childList = newChildList\n globalref.updateLeftView()\n globalref.docRef.modified = True", "def set_items(self, items):\n widget = self.widget\n sel = widget.GetCurrentSelection()\n widget.SetItems(items)\n widget.SetSelection(sel)", "def set_index(self, index: int) -> None:\n self.combo.setCurrentIndex(index)", "def setSelectedIndex(menu,index):\n\tassertMenu(menu)\n\tassert type(index) is int\n\tassert index >= 0 and index < len(menu[\"buttonList\"]),\"Index out of range. Tried is : %r and it have to be in [0,%r]\" % (index,len(menu[\"buttonList\"])-1)\n\tmenu[\"currentIndex\"] = index\n\tmenu[\"lastIndex\"] = index", "def __onListEdit(self, ev):\n ev.data.label = ev.label", "def textScrollList(string, allowMultiSelection=bool, height=int, allItems=bool, defineTemplate=\"string\", docTag=\"string\", numberOfItems=bool, numberOfPopupMenus=bool, useTemplate=\"string\", append=\"string\", highlightColor=float, deselectIndexedItem=int, deselectAll=bool, selectItem=\"string\", doubleClickCommand=\"string\", numberOfRows=int, dragCallback=\"string\", deleteKeyCommand=\"string\", parent=\"string\", annotation=\"string\", enable=bool, deselectItem=\"string\", preventOverride=bool, lineFont=int, popupMenuArray=bool, uniqueTag=\"string\", selectUniqueTagItem=\"string\", appendPosition=int, font=\"string\", exists=bool, removeItem=\"string\", enableBackground=bool, showIndexedItem=int, visibleChangeCommand=\"string\", visible=bool, selectIndexedItem=int, fullPathName=bool, dropCallback=\"string\", numberOfSelectedItems=bool, selectCommand=\"string\", noBackground=bool, removeAll=bool, backgroundColor=float, allowAutomaticSelection=bool, manage=bool, removeIndexedItem=int, width=int, isObscured=bool):\n pass", "def move_index(self, index):\n\n if not self.ff_list is self.parent.focus_get():\n self.ff_list.focus()\n\n self.ff_list.activate(index)\n self.ff_list.selection_clear(0, END)\n self.ff_list.selection_set(index)\n self.ff_list.see(index)\n\n self.update_image(0)", "def set(self, index: 'int const', item: 'SoType') -> \"void\":\n return _coin.SoTypeList_set(self, index, item)", "def set_item(self, index, new_item):\n row = index.row() if hasattr(index, \"row\") else index\n self.collection[row] = new_item\n self.dataChanged.emit(self.index(\n row, 0), self.index(row, self.rowCount() - 1))", "def add_element_in_q_list_widget(list_widget, element):\n item = QListWidgetItem() # Create instance of list item for QListWidget\n item.setText(element) # Set text for QListWidgetItem instance\n item.setCheckState(Qt.Checked)\n list_widget.addItem(item) # Adding QListWidgetItem instance into the QListWidget", "def move_index(self):\n\n index = bpy.context.scene.list_index\n list_length = len (bpy.context.scene.my_list) - 1\n # (index starts at 0)\n new_index = index + (-1 if self.direction == 'UP' else 1)\n bpy.context.scene.list_index = max (0 , min (new_index , list_length))", "def insertItems(self, row: int, *args: str) -> None:\n\n for arg in args:\n self.listbox.insertItem(row, arg)", "def selector(self):\n try:\n self.selection = int(self._listbox.curselection()[0])\n self.flag_chk = self._AnimalData.is_selected(self.selection)\n if self.flag_chk is False:\n self._AnimalData.select(self.selection)\n self.ani_string = self._AnimalData.to_tabbed_string(self.selection)\n self._listbox.delete(self.selection, None)\n self._listbox.add_it(self.selection,self.ani_string)\n self._listbox.itemconfig(self.selection,fg=COLOURS[self.selection % len(COLOURS)])\n self._canvas.redraw()\n except IndexError:\n messagebox.showerror(\"Selection Error\",\"No Index selected: Please select an index.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove all offering details when no SOS is selected.
def removeOfferings(self): self.clearOfferingRelatedItems() self.lbxOfferings.clear()
[ "def clearOfferingRelatedItems(self):\n self.lblDescription.setText('-')\n self.lblTL_X.setText('-')\n self.lblTL_Y.setText('-')\n self.lblBR_X.setText('-')\n self.lblBR_Y.setText('-')\n self.lblSRS.setText('-')\n self.lblEndTime = QtGui.QLabel('-')\n self.lblStartTime = QtGui.QLabel('-')\n #self.temporal_widget.resetTime() # STC widget\n self.cbProcedure.clear()\n self.cbRequest.clear()\n self.cbResponseFormat.clear()\n self.cbResponseMode.clear()\n self.cbResultModel.clear()\n self.lbObservedProperty.clear()\n self.cbFOI.clear()\n #self.cbTime.clear()\n #self.cbSpatial.clear()", "def clearEntry(self):\n \n ans = askokcancel(\"Verify delete\", \"Really remove item?\") #popup window\n if ans:\n self.productList = shelve.open(shelvename)\n self.getSelection = self.listBox.curselection() #get index of selection\n self.selectedEntry = self.listBox.get(self.getSelection) #get tuple from selection\n (self.productNum, self.descrip, self.colors, self.cost, self.price, \n self.quan) = self.selectedEntry #unpack tuple\n self.entry = self.selectedEntry[0]\n del self.productList[self.entry]\n self.productList.close()\n showinfo(title = \"Product removed\",\n message = \"The product has been removed from inventory.\")\n self.getInven()", "def clear_pricing_data():\r\n invalidate_pricing_cache()", "def empty( self ):\n LectionInSystem.objects.filter( system=self ).delete()", "def clear_all(self):\n self.food_type_dropdown.set(\"\")\n self.food_names_dropdown.set(\"\")\n self.servings_dropdown.set(\"\")", "def get(self):\n # self.hts.multiEvents[\"STOCK\"].exit()\n self.hts.remove_real()\n self.hts.clear_market_state(\"{0:05}\".format(1))\n print(\"REMOVE ALL REAL\")", "def clear_provider_terms(self):\n pass", "def clear_license_terms(self):\n pass", "def clear_provider_terms(self):\n raise errors.Unimplemented()", "def remove_service(self):\n self.driver.find_element_by_xpath(customization_button).click()\n ''' TODO: Need to be changed by other type of wait'''\n time.sleep(3)\n self.driver.find_element_by_xpath(remove_button).click()\n ''' TODO: Need to be changed by other type of wait'''\n time.sleep(3)\n self.driver.find_element_by_xpath(finalization_button).click()", "def clear_license_terms(self):\n raise errors.Unimplemented()", "def clear_leases(self):\n cmd = 'systemctl stop udhcpd.service && ' \\\n 'rm -f /var/lib/misc/udhcpd.leases && ' \\\n 'systemctl start udhcpd.service'\n self.exec_command(f\"sudo bash -c '{cmd}'\")", "def purgeScenes(self) -> None:\r\n\r\n\r\n print(\"\\n\\nPurging scenes!\\n\\n\")\r\n\r\n self.scenes = []\r\n self.requests.append({\"type\": \"GetSceneList\"})", "def clear_provider_id_terms(self):\n pass", "def remove(self):\n\t\tc = Common()\n\t\tc.banner()\n\t\tc.client_hosts()\n\n\t\toperatingSystem = run(\"/bin/cat /etc/issue | /usr/bin/awk '{print $1}'\")\n\n\t\tif(operatingSystem=='Debian'):\n\t\t\trun('aptitude -y purge puppet')\n\t\t\trun('find /var/lib/puppet -type f -print0 | xargs -0r rm')\n\t\telse:\n\t\t\tprint '--->\\tOS not supported'\n\t\t\tsys.exit(0)\n\n\t\ttry:\n\t\t\tsubprocess.call(['/usr/sbin/puppetca', '--clean', '%s.%s' % (c.client_name(),self.domain)])\n\t\texcept Exception, e:\n\t\t\tprint 'error :', e\n\t\t\tpass\n\n\t\tsleep(3)\n\t\texit(0)", "def on_inventory_cleared(self):\n for inv in self.get_inventories():\n inv.clear()", "def ClearProducts(cls):\n\t\tcls.__products = []", "def do_remove(self, arg):\n for investigator in pool.investigators:\n if arg == str(investigator):\n if investigator in selected:\n selected.remove(investigator)\n print('%s has been deselected.' % arg)\n print()\n self._print_selected()\n else:\n print('%s was not selected.' % arg)\n return\n\n print('Unknown investigator: select an investigator to remove (double TAB to autocomplete).')", "def clear_provider_id_terms(self):\n raise errors.Unimplemented()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset all displayed values related to offering and request.
def clearOfferingRelatedItems(self): self.lblDescription.setText('-') self.lblTL_X.setText('-') self.lblTL_Y.setText('-') self.lblBR_X.setText('-') self.lblBR_Y.setText('-') self.lblSRS.setText('-') self.lblEndTime = QtGui.QLabel('-') self.lblStartTime = QtGui.QLabel('-') #self.temporal_widget.resetTime() # STC widget self.cbProcedure.clear() self.cbRequest.clear() self.cbResponseFormat.clear() self.cbResponseMode.clear() self.cbResultModel.clear() self.lbObservedProperty.clear() self.cbFOI.clear() #self.cbTime.clear() #self.cbSpatial.clear()
[ "def clear_all(self):\n self.food_type_dropdown.set(\"\")\n self.food_names_dropdown.set(\"\")\n self.servings_dropdown.set(\"\")", "def reset(self):\n self.name.set('')\n self.number.set('')", "def Reset(self):\r\n self.grid = self.EmptyGrid()\r\n self.count = self.EmptyGrid()", "def __reset__(self):\n self._values = {}\n self._errors = {}\n self._raw_values = {}\n (f.__reset__() for f in self.__subcontainers__)", "def clear_form(self):\n self.item_id = self.NO_ITEM\n self.item = pd.Series()\n \n self.item_id_edit.setText(self.NO_ITEM)\n self.quantityEdit.setText(\"\")\n \n self.manufacturerEdit.setText(self.NO_MANUFACTURER)\n self.categoryEdit.setText(self.NO_CATEGORY)\n self.descriptionEdit.setText(self.NO_DESCRIPTION)\n \n self.describe_label.setText(\"\")", "def reset(self):\n self.reward_list = []\n self.action_list = []", "def reset_filter(self):\n self.data_filtered = self.data.copy()\n self.summary_ix = []\n self.summary = []\n self.filter_counts = {}\n self.removed = []\n self.kept = []", "def reset(self):\n self.clear()\n dict.update(self, self.defaults)", "def reset(self):\n self.setMinValue(1)\n self.setMaxValue(None)\n self.clearGuesses()\n self.setGameInProgress(True)", "def reset_all(self):\n # sets self.attacks and self.potions to the starting default values of their respective dictionaries for a given entity\n self.attacks = copy.deepcopy(self.starting_attacks)\n self.potions = copy.deepcopy(self.starting_potions)", "def reset_params(self):\n pass", "def resetWidgetValues(self):\n self.contrast_spinbox.setValue(1.0)\n self.brightness_spinbox.setValue(0)\n self.filter_2D_cb.setChecked(False)\n self.canny_cb.setChecked(False)", "def reset(self):\n self.stored_episodes = {key: [] for key in self.experience_keys}\n self.current_episode = {key: [] for key in self.experience_keys}", "def reset(self):\r\n self.values = self.default_json_values()\r\n self.save()", "def reset_objects(self):\n self.objects.choices = []", "def reset(self):\n self._open_amount= 0\n self._last_price = 0", "def reset(self):\n self.x_mean_pr, self.x_cov_pr = None, None\n self.x_mean_sm, self.x_cov_sm = None, None\n self.xx_cov, self.xy_cov = None, None\n self.pr_mean, self.pr_cov, self.pr_xx_cov = None, None, None\n self.fi_mean, self.fi_cov = None, None\n self.sm_mean, self.sm_cov = None, None\n self.D, self.N = None, None\n self.flags = {'filtered': False, 'smoothed': False}", "def reset(self):\n self._value = self._default_value", "def reset(self):\n self.potentials = None\n self.in_spike_counts = None\n self.out_spike_counts = None\n self.frequency_duration = 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update offering details containers when new offering selected.
def offeringsChanged(self): self.clearOfferingRelatedItems() if self.lbxOfferings.selectedItems(): # assumes that a max of one offering can be selected selected_offering = self.lbxOfferings.selectedItems()[0].text() else: selected_offering = None if self.parent_widget.service and \ self.parent_widget.service.service_valid and self.contents: for content in self.contents: if selected_offering == content.id: # description if content.description: self.lblDescription.setText(content.description) elif content.name: self.lblDescription.setText(content.name) else: self.lblDescription.setText(content.id) # service operations for service in self.parent_widget.service.service_operations: self.cbRequest.addItem(service) # update other offering details... if content.time: #print "SOS:365 (offering change) Time Rng", content.time self.setTimeIntervalOffering((content.time[0], content.time[1])) if content.bounding_box: self.lblTL_X.setText(str(content.bounding_box[0])) self.lblTL_Y.setText(str(content.bounding_box[1])) self.lblBR_X.setText(str(content.bounding_box[2])) self.lblBR_Y.setText(str(content.bounding_box[3])) self.lblSRS.setText(str(content.bounding_box[4])) self.cbProcedure.addItem('') if content.procedure: for pr in content.procedure: self.cbProcedure.addItem(pr) self.cbResponseFormat.addItem('') if content.response_format: for rf in content.response_format: self.cbResponseFormat.addItem(rf) self.cbResponseMode.addItem('') if content.response_mode: for rm in content.response_mode: self.cbResponseMode.addItem(rm) self.cbResultModel.addItem('') if content.result_model: for rd in content.result_model: self.cbResultModel.addItem(rd) if content.observed_property: for op in content.observed_property: self.lbObservedProperty.addItem(op) self.cbFOI.addItem('') if content.feature_of_interest: for foi in content.feature_of_interest: self.cbFOI.addItem(foi)
[ "def offering_detail(request, course_sec_id):\n\n offering = get_object_or_404(Offering, course_sec_id=course_sec_id)\n\n # Is this offering already in ScheduleBuilder?\n try:\n builder = Builder.objects.get(profile=request.user.profile, offering=offering)\n scheduled = True\n except:\n pass\n\n # Allow instructors of a specific offering to override some course details\n if request.user.profile in [i.profile for i in offering.instructors.all()]:\n user_can_edit_offering = True\n\n if request.method == 'POST':\n course_edit_form = OfferingIntraEditForm(request.POST, instance=offering)\n if course_edit_form.is_valid():\n course_edit_form.save()\n messages.success(request, \"Course Offering details overridden\")\n return HttpResponseRedirect(reverse('offering_detail',args=[offering.course_sec_id]))\n\n else:\n\n '''\n The form's initial values are tricksy because the title and body displayed\n on the *Offering* are inherited from the parent Course object. But when the\n form is saved, it saves overrides into the Offering object itself. To avoid\n presenting a blank form, show inherited values *unless* the object has\n previously been overridden.\n '''\n\n if not offering.title:\n init_title = offering.course.long_title\n else:\n init_title = offering.title\n\n if not offering.title:\n init_description_override = offering.course.description\n else:\n init_description_override = offering.description_override\n\n course_edit_form = OfferingIntraEditForm(\n instance=offering,\n initial={'title': init_title, 'description_override': init_description_override}\n )\n\n\n return render_to_response(\n 'courses/offering_detail.html',\n locals(),\n context_instance=RequestContext(request)\n )", "def add_offering(self, offering):\n\n if offering:\n self.offerings.append(offering)", "def scrape_offering(self):\n\n if self.logged_in:\n no_apts = self.get_no_apartments()\n\n db_conn_status = db_connection.is_connected()\n\n if not db_conn_status:\n db_connection.connect()\n\n try:\n # For each apartment\n i = 1\n # For avoiding dangerous loops\n j = 0\n while i <= no_apts:\n\n if j >= 5:\n raise ApartmentException(\"Cannot get past apartment \\\"{0}\\\"\".format(apt_name))\n\n info = self.get_apartment_and_offer(i)\n if info is not None:\n apt_name = info[0]\n end_date_and_time = info[1]\n\n try:\n db_connection.set_is_offered(apt_name, end_date_and_time)\n # Only advance to next apartment if the current one was successfully scraped.\n i = i + 1\n j = 0\n\n except DatabaseException as e:\n j = j + 1\n print(\"Failure to insert some data: \" + str(e))\n\n except DatabaseException as e:\n print(str(e))\n\n finally:\n if not db_conn_status:\n db_connection.disconnect()\n\n else:\n # Apartments from current offering\n print(\"Cannot get offering. Not logged in.\")", "def updateItems(self):\n selected = self.userInput.selected()\n if selected:\n for item in self.items[selected.value()]:\n self.itemSelect.addOption(item)", "def episode_selected(self):\n\n # =-- If an Episode is playing then stop that player\n if self.episode_player_state == self.player_states[2]: self.episode_player_controller()\n\n # --- Just in case no EpiosdesListWidget items have been added yet\n # or the item added has no text yet ...\n if self.EpisodesListWidget.count() == 0: return\n try:\n selected_episode_id = self.EpisodesListWidget.currentItem().text()\n except AttributeError:\n return\n\n # --- Clean up any old entries that might be here\n for text_box in self.episode_details_values: text_box.setText(\"\")\n\n selected_episode_id = selected_episode_id.split(\"\\n\")[FIRST]\n selected_episode_id = selected_episode_id.split(\":\")[LAST]\n selected_episode_id = selected_episode_id.strip()\n\n for item in self.list_of_episodes:\n if str(item[\"id\"]) == selected_episode_id:\n episode_values = list(item[\"attributes\"].values())\n index = 0\n episode_stream_url = item[\"attributes\"][\"audio_url\"]\n for episode_value_text in self.episode_details_values:\n episode_value_text.setText(str(episode_values[index]))\n episode_value_text.setCursorPosition(0)\n index += 1\n break # No need to look ay further\n else:\n pass\n\n self.EpisodesListWidget.setWordWrap(True)\n\n # --- Insert the Episode player widget\n\n pixmap = QPixmap(os.path.join(RESOURCE_PATH, \"play.png\"))\n pixmap_resized = pixmap.scaled(150, 150, Qt.KeepAspectRatio)\n self.episode_player_button.setPixmap(pixmap_resized)\n self.EpisodePlayer = vlc.MediaPlayer(episode_stream_url)\n self.episode_player_state = self.player_states[1] # Media ready\n self.episode_player_label.setText(self.episode_player_state)", "def service_offering(self, service_offering):\n\n self._service_offering = service_offering", "def change_service_offering(self, api_client, serviceOfferingId):\n cmd = {'id': self.id, 'serviceofferingid': serviceOfferingId}\n return api_client.changeServiceForVirtualMachine(**cmd)", "def loadOfferings(self):\n if self.parent_widget.service and self.parent_widget.service.service_valid:\n self.removeOfferings() # clear current data\n self.contents = self.parent_widget.service.service.__dict__['contents']\n #print \"SOS:401 self.contents\", self.contents\n for content in self.contents:\n item = QtGui.QListWidgetItem(content.id)\n self.lbxOfferings.addItem(item)", "def UpdateData(self, event = None):\n #currentSelection = self.confList.GetStringSelection()\n #self.state.Edit(\"JconfSelection\", currentSelection)\n self.React()\n self.UpdateDisplay()\n return", "def update_item_view(self):\n try:\n selected = self.ui.items.selectedItems()[0].name\n except IndexError:\n return\n\n try:\n item = self.items.get_item(selected)\n except TypeError:\n logging.warning(\"Unable to load asset \"+selected)\n return\n\n inv_icon_file = self.items.get_item_icon(selected)\n if inv_icon_file is not None:\n icon = QPixmap.fromImage(ImageQt(inv_icon_file))\n else:\n image_file = self.items.get_item_image(selected)\n if image_file is not None:\n icon = QPixmap.fromImage(ImageQt(image_file))\n else:\n icon = QPixmap.fromImage(QImage.fromData(self.assets.items().missing_icon()))\n\n # last ditch\n try:\n icon = self.scale_image_icon(icon, 64, 64)\n self.ui.item_icon.setPixmap(icon)\n except TypeError:\n logging.warning(\"Unable to load item image: \"+selected)\n self.ui.item_icon.setPixmap(QPixmap())\n\n self.ui.short_desc.setText(generate_item_info(item[0]))\n\n # populate default variant table\n\n try:\n row = 0\n self.ui.info.setRowCount(len(item[0]))\n for key in sorted(item[0].keys()):\n text = str(key) + \": \" + str(item[0][key])\n table_item = QTableWidgetItem(text)\n table_item.setToolTip(text)\n self.ui.info.setItem(row, 0, table_item)\n row += 1\n except TypeError:\n self.ui.info.setRowCount(0)\n logging.error(\"No item data\")\n\n self.item_browse_select = selected", "def edit(self, data_mgr):\n reserved_words = [\"unknown\"]\n\n selection = ''\n while selection not in ('e', 'E'):\n selection = input(\"Edit: \\n[N]ame / [V]olumes / [A]uthor / \"\n \"[P]ublisher \\n[Alt]ernate Names /\"\n \"[C]ompletion Status / [E]nd: \").strip()\n # Change Name\n if selection in ('n', 'N'):\n print(\"Current Name: {0}\".format(self.name))\n series_name = input(\"Enter new series name or leave \"\n \"blank if unchanged: \")\n if series_name == \"\":\n print(\"Name not changed.\")\n elif series_name.lower() in reserved_words:\n print(\"'{0}' is a reserved word. Name not changed.\"\n .format(series_name))\n else:\n cur = data_mgr.query(\"Select name FROM Series WHERE \"\n \"name = '{0}'\"\n .format(series_name\n .replace(\"'\", \"''\")))\n row = cur.fetchall()\n if row:\n print(\"New name already present in database,\"\n \"not changed\")\n else:\n self.name = series_name\n print(\"Name changed to \\\"{0}\\\".\".format(series_name))\n\n # Change Volumes\n elif selection in ('v', 'V'):\n print(\"Volumes Owned: {0}\".format(self.get_volumes_owned()))\n if self.edit_volumes():\n return True\n\n # Change Author\n elif selection in ('a', 'A'):\n print(\"Current Author: {0}\".format(self.author))\n author = input(\"Enter author or leave blank if unchanged: \")\n if author == \"\":\n pass\n else:\n self.author = author\n print(\"Author changed to \\\"{0}\\\".\".format(author))\n\n # Change Publisher\n elif selection in ('p', 'P'):\n print(\"Current Publisher: {0}\".format(self.publisher))\n publisher = input(\"Enter publisher or leave blank \"\n \"if unchanged: \")\n if publisher == \"\":\n pass\n else:\n self.publisher = publisher\n print(\"Publisher changed to \\\"{0}\\\".\".format(publisher))\n\n # Change Alternate Names\n elif selection.lower() == \"alt\":\n print(\"Current Alt. Names: {0}\".format(self.alt_names))\n alt_names = input(\"Enter any alternate names \"\n \"for this series: \")\n if alt_names != \"\":\n self.alt_names = alt_names\n\n # Change Completion Status\n elif selection in ('c', 'C'):\n is_completed = input(\"Have you completed this series? (y/n) \"\n \"(Leave blank if unchanged): \").strip()\n if is_completed not in ('y', 'Y', 'n', 'N'):\n pass\n elif is_completed in ('y', 'Y'):\n self.is_completed = 1\n else:\n self.is_completed = 0\n\n print(\"----------------------------------------\")\n print(self.full_string())\n print(\"----------------------------------------\")\n\n save_series = input(\"Save changes? (y/N): \").strip()\n if save_series in ('y', 'Y'):\n self.update_database_entry(data_mgr)\n print(\"Series updated!\")\n\n return False", "def update_widgets(self):\n self.request_update = True", "def on_vendors_changed(self, vendors: list):\n self.update_vendors(vendors)\n self.update_vendors_ui()\n self.selected_vendor_index = self.vendor_combo_box.currentIndex()", "def update(self):\n print('Updating seating chart...')\n for period in self.periods:\n if period in self.class_lists:\n new_seating, version = self.new_tables(period)\n self.seating_chart[period] = new_seating\n\n # Verify success:\n if new_seating:\n print('Period {}'.format(period))\n for i in range(len(new_seating)):\n print('Table {}: {}'.format(i + 1, new_seating[i]))\n print('Version = {}'.format(version))\n else:\n print('Period {}: Failed to update seating.'.format(period))", "def podcast_selected(self):\n\n if self.episode_player_state == self.player_states[2]: self.episode_player_controller()\n\n # --- Just in case no PodcastListWidget items have been added yet\n # or the item added has no text yet ...\n if self.PodcastListWidget.count() == 0: return\n try: selected_podcast_id = self.PodcastListWidget.currentItem().text()\n except AttributeError: return\n\n # --- Clean out any old entries that might be laying around\n for text_box in self.podcast_details_values: text_box.setText(\"\")\n\n selected_podcast_id = selected_podcast_id.split(\"\\n\")[FIRST]\n selected_podcast_id = selected_podcast_id.split(\":\")[LAST]\n selected_podcast_id = selected_podcast_id.strip()\n\n for item in self.list_of_podcasts:\n if str(item[\"id\"]) == selected_podcast_id:\n podcast_values = list(item[\"attributes\"].values())\n index = 0\n for podcast_value_text in self.podcast_details_values:\n podcast_value_text.setText(str(podcast_values[index]))\n podcast_value_text.setCursorPosition(0)\n index += 1\n break # No need to look ay further\n else:\n pass\n\n # --- Populate the Episodes tab\n self.populate_episodes(selected_podcast_id)\n self.EpisodesListWidget.setIconSize(QSize(100, 100))", "def edit_appliance_details(*props): # pylint: disable=unused-argument\n pass", "def clearOfferingRelatedItems(self):\n self.lblDescription.setText('-')\n self.lblTL_X.setText('-')\n self.lblTL_Y.setText('-')\n self.lblBR_X.setText('-')\n self.lblBR_Y.setText('-')\n self.lblSRS.setText('-')\n self.lblEndTime = QtGui.QLabel('-')\n self.lblStartTime = QtGui.QLabel('-')\n #self.temporal_widget.resetTime() # STC widget\n self.cbProcedure.clear()\n self.cbRequest.clear()\n self.cbResponseFormat.clear()\n self.cbResponseMode.clear()\n self.cbResultModel.clear()\n self.lbObservedProperty.clear()\n self.cbFOI.clear()\n #self.cbTime.clear()\n #self.cbSpatial.clear()", "def update_pdfbox(self,pdfs=None,n_pdfs=None):\n self.PDFSelectionBox.update(pdfs=pdfs,n_pdfs=n_pdfs)", "def update_spikes(self):\n # full rebuild (to be safe):\n historical_spikes_pdata, historical_spikes_pc = build_active_spikes_plot_data_df(self.spikes_df, spike_geom=SpikeRenderingPyVistaMixin.spike_geom_cone.copy(), enable_debug_print=self.debug_logging)\n self.plots_data['spikes_pf_active'] = {'historical_spikes_pdata':historical_spikes_pdata, 'historical_spikes_pc':historical_spikes_pc}\n \n # Update just the values that could change:\n self.plots_data['spikes_pf_active']['historical_spikes_pdata']['render_opacity'] = self.spikes_df['render_opacity'].values\n # ?? Is this rebuild needed after updating the pdata to see the changes in the pc_data (which is what is actually plotted)???\n self.plots_data['spikes_pf_active']['historical_spikes_pc'] = self.plots_data['spikes_pf_active']['historical_spikes_pdata'].glyph(scale=False, geom=SpikeRenderingPyVistaMixin.spike_geom_cone.copy()) \n # spike_history_pdata['render_opacity'] = active_flat_df['render_opacity'].values\n \n if self.plots_data['spikes_pf_active']['historical_spikes_pc'].n_points >= 1:\n self.plots['spikes_pf_active'] = self.p.add_mesh(self.plots_data['spikes_pf_active']['historical_spikes_pc'], name='spikes_pf_active', scalars='rgb', rgb=True, show_scalar_bar=False, lighting=True, render=False)\n needs_render = True\n else:\n self.p.remove_actor(self.plots['spikes_pf_active'])\n needs_render = True\n\n if needs_render:\n self.p.render()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the offerings from the service metadata.
def loadOfferings(self): if self.parent_widget.service and self.parent_widget.service.service_valid: self.removeOfferings() # clear current data self.contents = self.parent_widget.service.service.__dict__['contents'] #print "SOS:401 self.contents", self.contents for content in self.contents: item = QtGui.QListWidgetItem(content.id) self.lbxOfferings.addItem(item)
[ "def parse_offer(self, response):", "def list(cls, api_client, **kwargs):\n\n cmd = {}\n cmd.update(kwargs)\n if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():\n cmd['listall'] = True\n return api_client.listServiceOfferings(**cmd)", "def load_biothings(self):\n # load biothings schema\n self.mp = MappingParser()\n # loop through API metadata\n for _api, _info in metadata.items():\n # use the mapping parser module to load relationship of each API\n # into the network\n if _info.get('api_name') == 'CORD API':\n mapping_file = self._auto_generate_cord_mapping(_info.get('doc_type'))\n elif _info.get('api_name') == 'SEMMED API':\n mapping_file = self._auto_generate_semmed_mapping(_info.get('doc_type'))\n elif 'mapping_url' in _info:\n self.registry[_api] = {}\n mapping_file = Path.joinpath(CURRENT_PATH.parent,\n 'smartapi/schema', _api + '.json')\n else:\n continue\n self.mp.load_mapping(mapping_file, api=_api)\n self.registry[_api] = {\n 'mapping': self.mp.mapping,\n 'graph': self.mp.connect(),\n 'type': self.mp.type\n }\n self.G.add_edges_from(self.registry[_api]['graph'].edges(data=True))\n return self.G", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n constants.LOADBALANCER, self)", "def scrape_offering(self):\n\n if self.logged_in:\n no_apts = self.get_no_apartments()\n\n db_conn_status = db_connection.is_connected()\n\n if not db_conn_status:\n db_connection.connect()\n\n try:\n # For each apartment\n i = 1\n # For avoiding dangerous loops\n j = 0\n while i <= no_apts:\n\n if j >= 5:\n raise ApartmentException(\"Cannot get past apartment \\\"{0}\\\"\".format(apt_name))\n\n info = self.get_apartment_and_offer(i)\n if info is not None:\n apt_name = info[0]\n end_date_and_time = info[1]\n\n try:\n db_connection.set_is_offered(apt_name, end_date_and_time)\n # Only advance to next apartment if the current one was successfully scraped.\n i = i + 1\n j = 0\n\n except DatabaseException as e:\n j = j + 1\n print(\"Failure to insert some data: \" + str(e))\n\n except DatabaseException as e:\n print(str(e))\n\n finally:\n if not db_conn_status:\n db_connection.disconnect()\n\n else:\n # Apartments from current offering\n print(\"Cannot get offering. Not logged in.\")", "def _get_offers(self):\n try:\n offers = requests.get(\"{base_url}{offers}\".format(base_url=self.AWS_PRICE_BASE_URL,\n offers=self.AWS_REGION_OFFERS))\n\n if offers.status_code == 200:\n response = json.loads(offers.text)\n regions = response.get('regions')\n if regions:\n offer = regions.get(self.region).get(self.AWS_OFFERS_KEY)\n offers_url = \"{base_url}{offer}\".format(base_url=self.AWS_PRICE_BASE_URL,\n offer=offer)\n data = requests.get(offers_url)\n if data.status_code == 200:\n return data.text\n else:\n return json.dumps(dict())\n return False\n except requests.ConnectionError as exp:\n print(exp)\n exit(1)", "def _load_adapters():\n filepath = os.path.join(os.getcwd(), os.path.dirname(__file__), adapter_JSON)\n f = open(filepath, 'r')\n adapters = json.load(f)\n f.close()\n return sorted(adapters)", "def list(self):\r\n return self.partA_offers, self.partB_offers", "def _load_api_from_router(self):\n for description in FRITZ_DESCRIPTIONS:\n source = f\"{self.address}:{self.port}/{description}\"\n try:\n self.device_manager.add_description(source)\n except FritzResourceError:\n # resource not available:\n # this can happen on devices not providing\n # an igddesc-file.\n # ignore this\n # But if the \"tr64desc.xml\" file is missing the router\n # may not have TR-064 activated. In this case raise a\n # useful error-message.\n if description == FRITZ_TR64_DESC_FILE:\n raise FritzConnectionException(\n FRITZ_APPLICATION_ACCESS_DISABLED\n )\n self.device_manager.scan()\n self.device_manager.load_service_descriptions(self.address, self.port)", "def load_data_set(self) -> None:\n return", "def get_offer(self, data):\n payload = {}\n payload.update(self.generic_service)\n payload.update(self.product_service)\n\n r = requests.get(\"http://catalog.bizrate.com/services/catalog/v1/us/{0}\".format(\"offer\"), params=payload)\n print(\"URL: \")\n print(r.url)\n\n print(\"RESPONSE: \")\n print(r.json())\n\n return", "def parse_offer(url):\n log.info(url)\n html_parser = BeautifulSoup(get_content_for_url(url).content, \"html.parser\")\n offer_content = str(html_parser.body)\n poster_name = get_poster_name(offer_content)\n price, currency, add_id = parse_tracking_data(str(html_parser.head))\n if not all([add_id, poster_name]):\n log.info(\"Offer {0} is not available anymore.\".format(url))\n return\n region = parse_region(offer_content)\n if len(region) == 4:\n city, powiat, voivodeship, district = region\n elif len(region) == 3:\n city, voivodeship, district = region\n elif len(region) == 2:\n city, voivodeship = region\n district = None\n else:\n city, voivodeship, district = None, None, None\n data_dict = get_gpt_script(offer_content)\n result = {\n \"title\": get_title(offer_content),\n \"add_id\": add_id,\n \"price\": price,\n \"currency\": currency,\n \"city\": city,\n \"district\": district,\n \"voivodeship\": voivodeship,\n \"gps\": get_gps(offer_content),\n \"description\": parse_description(offer_content),\n \"poster_name\": poster_name,\n \"url\": url,\n \"date_added\": get_date_added(offer_content),\n \"date_added_readable\": dt.datetime.fromtimestamp(get_date_added(offer_content)).isoformat(),\n \"images\": get_img_url(offer_content),\n \"private_business\": data_dict.get(\"private_business\"),\n }\n flat_data = parse_flat_data(offer_content, data_dict)\n if flat_data and any(flat_data.values()):\n result.update(flat_data)\n return result", "def offeringsChanged(self):\n self.clearOfferingRelatedItems()\n if self.lbxOfferings.selectedItems():\n # assumes that a max of one offering can be selected\n selected_offering = self.lbxOfferings.selectedItems()[0].text()\n else:\n selected_offering = None\n if self.parent_widget.service and \\\n self.parent_widget.service.service_valid and self.contents:\n for content in self.contents:\n if selected_offering == content.id:\n # description\n if content.description:\n self.lblDescription.setText(content.description)\n elif content.name:\n self.lblDescription.setText(content.name)\n else:\n self.lblDescription.setText(content.id)\n # service operations\n for service in self.parent_widget.service.service_operations:\n self.cbRequest.addItem(service)\n # update other offering details...\n if content.time:\n #print \"SOS:365 (offering change) Time Rng\", content.time\n self.setTimeIntervalOffering((content.time[0],\n content.time[1]))\n if content.bounding_box:\n self.lblTL_X.setText(str(content.bounding_box[0]))\n self.lblTL_Y.setText(str(content.bounding_box[1]))\n self.lblBR_X.setText(str(content.bounding_box[2]))\n self.lblBR_Y.setText(str(content.bounding_box[3]))\n self.lblSRS.setText(str(content.bounding_box[4]))\n self.cbProcedure.addItem('')\n if content.procedure:\n for pr in content.procedure:\n self.cbProcedure.addItem(pr)\n self.cbResponseFormat.addItem('')\n if content.response_format:\n for rf in content.response_format:\n self.cbResponseFormat.addItem(rf)\n self.cbResponseMode.addItem('')\n if content.response_mode:\n for rm in content.response_mode:\n self.cbResponseMode.addItem(rm)\n self.cbResultModel.addItem('')\n if content.result_model:\n for rd in content.result_model:\n self.cbResultModel.addItem(rd)\n if content.observed_property:\n for op in content.observed_property:\n self.lbObservedProperty.addItem(op)\n self.cbFOI.addItem('')\n if content.feature_of_interest:\n for foi in content.feature_of_interest:\n self.cbFOI.addItem(foi)", "def test_offers_list(self):\n pass", "def test_offers_retrieve(self):\n pass", "def _loadViperServices(self):\n servicesPath = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"service\"\n )\n for serviceFile in os.listdir(servicesPath):\n if serviceFile.startswith(\"__\") or serviceFile.startswith(\".\"):\n continue\n\n serviceName = serviceFile.replace(\".py\", \"\")\n servicePath = os.path.join(\n servicesPath, serviceFile\n )\n\n if not os.path.isfile(servicePath):\n continue\n\n # importing service\n serviceSpec = importlib.util.spec_from_file_location(\n serviceName,\n servicePath\n )\n service = importlib.util.module_from_spec(serviceSpec)\n serviceSpec.loader.exec_module(service)\n\n # initializing service\n serviceInstance = service.Service(self)\n self.addService(\"viper\", serviceName, serviceInstance)", "def _set_aladdin_recommendations(self):\n\n import hashlib\n import json\n import requests\n from requests import RequestException\n from http import HTTPStatus\n from azure.cli.core import __version__ as version\n\n api_url = 'https://app.aladdin.microsoft.com/api/v1.0/suggestions'\n correlation_id = telemetry._session.correlation_id # pylint: disable=protected-access\n subscription_id = telemetry._get_azure_subscription_id() # pylint: disable=protected-access\n # Used for DDOS protection and rate limiting\n user_id = telemetry._get_user_azure_id() # pylint: disable=protected-access\n hashed_user_id = hashlib.sha256(user_id.encode('utf-8')).hexdigest()\n\n headers = {\n 'Content-Type': 'application/json',\n 'X-UserId': hashed_user_id\n }\n context = {\n 'versionNumber': version,\n 'errorType': self._get_error_type()\n }\n\n if telemetry.is_telemetry_enabled():\n if correlation_id:\n context['correlationId'] = correlation_id\n if subscription_id:\n context['subscriptionId'] = subscription_id\n\n parameters = self._normalize_parameters(self.parameters)\n parameters = [item for item in parameters if item not in ['--debug', '--verbose', '--only-show-errors']]\n query = {\n \"command\": self.command,\n \"parameters\": ','.join(parameters)\n }\n\n response = None\n try:\n response = requests.get(\n api_url,\n params={\n 'query': json.dumps(query),\n 'clientType': 'AzureCli',\n 'context': json.dumps(context)\n },\n headers=headers,\n timeout=1)\n telemetry.set_debug_info('AladdinResponseTime', response.elapsed.total_seconds())\n\n except RequestException as ex:\n logger.debug('Recommendation requests.get() exception: %s', ex)\n telemetry.set_debug_info('AladdinException', ex.__class__.__name__)\n\n recommendations = []\n if response and response.status_code == HTTPStatus.OK:\n for result in response.json():\n # parse the response and format the recommendation\n command, parameters, placeholders = result['command'],\\\n result['parameters'].split(','),\\\n result['placeholders'].split('♠')\n recommendation = 'az {} '.format(command)\n for parameter, placeholder in zip(parameters, placeholders):\n recommendation += '{} {} '.format(parameter, placeholder)\n recommendations.append(recommendation.strip())\n\n self.aladdin_recommendations.extend(recommendations)", "def load_accommodation():\n row_query = ListQuery()\n row_query.start_index = str(1)\n rows_feed = client.GetListFeed(key=app.config['SPREADSHEET_KEY'], visibility='public', projection='full', wksht_id=accommodation.id.text.split('/')[-1])\n\n records = []\n\n for row in rows_feed.entry:\n records.append ( Record ( spreadsheet_key=app.config['SPREADSHEET_KEY'],\n worksheet_id=accommodation.id.text.split('/')[-1],\n row_entry=row\n )\n )\n\n return [Accommodation(r) for r in records]", "def _load_metadata_of_missing_apps(self):\n for tarfile in os.listdir(constants.HELM_APP_ISO_INSTALL_PATH):\n # Get the app name from the tarball name\n # If the app has the metadata loaded already, by conductor restart,\n # then skip the tarball extraction\n app_name = None\n pattern = re.compile(\"^(.*)-([0-9]+\\.[0-9]+-[0-9]+)\")\n\n match = pattern.search(tarfile)\n if match:\n app_name = match.group(1)\n\n if app_name and \\\n app_name in self.apps_metadata[constants.APP_METADATA_APPS]:\n LOG.info(\"{} metadata already loaded, skip loading from \"\n \"the bundled tarball.\".format(app_name))\n continue\n\n # Proceed with extracting the tarball\n tarball_name = '{}/{}'.format(\n constants.HELM_APP_ISO_INSTALL_PATH, tarfile)\n\n with cutils.TempDirectory() as app_path:\n if not cutils.extract_tarfile(app_path, tarball_name):\n LOG.error(\"Failed to extract tar file {}.\".format(\n os.path.basename(tarball_name)))\n continue\n\n # If checksum file is included in the tarball, verify its contents.\n if not cutils.verify_checksum(app_path):\n LOG.error(\"Checksum validation failed for %s.\" % tarball_name)\n continue\n\n try:\n name, version, patches = \\\n self._kube_app_helper._verify_metadata_file(\n app_path, None, None)\n except exception.SysinvException as e:\n LOG.error(\"Extracting tarfile for %s failed: %s.\" % (\n tarball_name, str(e)))\n continue\n\n metadata_file = os.path.join(app_path,\n constants.APP_METADATA_FILE)\n if os.path.exists(metadata_file):\n with io.open(metadata_file, 'r', encoding='utf-8') as f:\n # The RoundTripLoader removes the superfluous quotes by default.\n # Set preserve_quotes=True to preserve all the quotes.\n # The assumption here: there is just one yaml section\n metadata = yaml.load(\n f, Loader=yaml.RoundTripLoader, preserve_quotes=True)\n\n if name and metadata:\n # Update metadata only if it was not loaded during conductor init\n # The reason is that we don't want to lose the modified version\n # by loading the default metadata from the bundled app.\n kube_app.AppOperator.update_and_process_app_metadata(\n self.apps_metadata, name, metadata, overwrite=False)\n\n # Prevent this function from running until conductor restart\n self._has_loaded_missing_apps_metadata = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a valid EPSG srsName according to OGC 09048r3
def get_valid_srs(self, srsURN): srs = None try: srs_items = srsURN.split(':') code = srs_items[len(srs_items) - 1] #print "SOS:427", srs_items, code if code and int(code) > 0: return 'urn:ogc:def:crs:EPSG::' + code # omit any version no. else: return 'urn:ogc:def:crs:EPSG::4326' except: self.raiseError(self, 'Unable to construct valid srsName from %s'\ % srsURN) return srs
[ "def check_epsg_code(str):\n crs = pyproj.CRS(str)\n return crs.to_epsg()", "def epsg_from_crs(crs): # -> int | None:\n ...", "def explicit_crs_from_epsg(crs=..., epsg=...): # -> CRS:\n ...", "def projection(self):\n try:\n return '{0} (EPSG:{1})'.format(self.dataset().GetProjection().split(',')[0].split('\"')[1],\\\n self.epsg())\n except:\n return ''", "def get_esriwkt(epsg):\n try:\n with urllib.request.urlopen(\"http://spatialreference.org/ref/epsg/{0}/esriwkt/\".format(epsg)) as response:\n return str(response.read()).strip(\"b\").strip(\"'\")\n except Exception:\n pass\n try:\n with urllib.request.urlopen(\n \"http://spatialreference.org/ref/sr-org/epsg{0}-wgs84-web-mercator-auxiliary-sphere/esriwkt/\".format(\n epsg)) as response:\n return str(response.read()).strip(\"b\").strip(\"'\")\n # sr-org codes are available at \"https://spatialreference.org/ref/sr-org/{0}/esriwkt/\".format(epsg)\n # for example EPSG:3857 = SR-ORG:6864 -> https://spatialreference.org/ref/sr-org/6864/esriwkt/ = EPSG:3857\n except Exception as e:\n logging.error(\"Could not find epsg code on spatialreference.org. Returning default WKT(epsg=4326).\")\n print(e)\n return 'GEOGCS[\"GCS_WGS_1984\",DATUM[\"D_WGS_1984\",SPHEROID[\"WGS_1984\",6378137,298.257223563]],PRIMEM[\"Greenwich\",0],UNIT[\"Degree\",0.017453292519943295],UNIT[\"Meter\",1]]'", "def get_epsg(self):\n assert not (self.projection is None or len(self.projection) == 0), \"No projection is defined\"\n proj = osr.SpatialReference(wkt=self.projection)\n try:\n epsg = int(proj.GetAttrValue('AUTHORITY', 1))\n except ValueError:\n assert False, \"Failed to convert '%s' to epsg code\" % proj.GetAttrValue('AUTHORITY', 1)\n return epsg", "def proj_srs_convert(srs):\n res = osr.SpatialReference()\n epsg = srs.to_epsg()\n\n if epsg:\n res.ImportFromEPSG(epsg)\n else:\n proj4 = srs.to_proj4()\n res.ImportFromProj4(proj4)\n \n res.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n return res", "def srid_to_proj(srid):\n from django.contrib.gis.gdal import SpatialReference\n srs = SpatialReference(srid)\n return srs.proj.strip()", "def lookup_crs_name(station: str, crs_dict: dict) -> str:\n try:\n return crs_dict.get(station)\n except:\n return \"No CRS apps found\"", "def getPRJwkt(epsg):\n\n import urllib\n f=urllib.urlopen(\"http://spatialreference.org/ref/epsg/{0}/prettywkt/\".format(epsg))\n return f.read()", "def _getNameFromGraticule(graticule):\n\n lat = graticule['lat']\n lng = graticule['lng']\n\n return '%(lath)s%(lat)02d%(lngh)s%(lng)03d' % {\n 'lngh': 'W' if lng < 0 else 'E',\n 'lng': abs(lng),\n 'lath': 'S' if lat < 0 else 'N',\n 'lat': abs(lat)\n }", "def test_default_crs(self):\n x = geo_uri(\"geo:0,0,0;a=1;b=2;c=ab%2dcd\")\n x = geo_uri(\"geo:0,0,0\")\n self.assertEqual('wgs84', x.crs)\n self.assertTrue(isinstance(x, geouri.GeoURI_WGS84))\n self.assertIsNone(x.uncertainty)\n self.assertEqual(\"geo:0,0,0\", str(geo_uri(\"geo:0,0,0\")))", "def pt_organization_uri(name):\n\n\treturn 'organization/' + alphaNumeric(name.strip().lower(), '')", "def get_semester_name(semester):\n # TODO: wymuszanie formatu roku \"XXXX/YY\" zamiast \"XXXX\"\n if len(semester.year) != 7:\n return '(BŁĄD) {0} {1}'.format(semester.year, semester.get_type_display())\n return '{0} {1}'.format(semester.year, semester.get_type_display())", "def street_name(self):\n return self.generator.parse(\"{{name}}s {{street_suffix}}\")", "def test_urn(self):\n self.assertEqual(\"urn:ogc:def:crs:EPSG::4979\", geo_uri(\"geo:48.2010,16.3695,183\").crs_urn)\n self.assertEqual(\"urn:ogc:def:crs:EPSG::4326\", geo_uri(\"geo:48.198634,16.371648;crs=wgs84;u=40\").crs_urn)", "def getSpatialReference (self):\n return __spatialRef__.name", "def getNameBy3LetterCode(ISO3):\n return countriesBy3LetterCode[ISO3].name", "def get_utm_zone(lon: float, lat: float) -> str:\n utm_band = str((math.floor((lon + 180) / 6) % 60) + 1)\n if len(utm_band) == 1:\n utm_band = \"0\" + utm_band\n if lat >= 0:\n epsg_code = \"326\" + utm_band\n else:\n epsg_code = \"327\" + utm_band\n return f\"EPSG:{epsg_code}\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a calculator for the potential, given a slab
def calculator(self): self.logger.debug('Recieved a calculator access call') return potential.load(self.potentialname,self.element, self.slab)
[ "def select_calculator(model_name, n=150, size=(10, 40, 100)):\n a, b, c = size\n d_factor = 0.06 # for paracrystal models\n if model_name == 'sphere':\n calculator = build_model(\n 'sphere', n=n, radius=c)\n a = b = c\n elif model_name == 'sc_paracrystal':\n a = b = c\n dnn = c\n radius = 0.5*c\n calculator = build_model(\n 'sc_paracrystal', n=n,\n dnn=dnn, d_factor=d_factor, radius=(1-d_factor)*radius,\n background=0)\n elif model_name == 'fcc_paracrystal':\n a = b = c\n # nearest neigbour distance dnn should be 2 radius, but I think the\n # model uses lattice spacing rather than dnn in its calculations\n dnn = 0.5*c\n radius = sqrt(2)/4 * c\n calculator = build_model(\n 'fcc_paracrystal', n=n,\n dnn=dnn, d_factor=d_factor, radius=(1-d_factor)*radius,\n background=0)\n elif model_name == 'bcc_paracrystal':\n a = b = c\n # nearest neigbour distance dnn should be 2 radius, but I think the\n # model uses lattice spacing rather than dnn in its calculations\n dnn = 0.5*c\n radius = sqrt(3)/2 * c\n calculator = build_model(\n 'bcc_paracrystal', n=n,\n dnn=dnn, d_factor=d_factor, radius=(1-d_factor)*radius,\n background=0)\n elif model_name == 'cylinder':\n calculator = build_model(\n 'cylinder', n=n, qmax=0.3, radius=b, length=c)\n a = b\n elif model_name == 'ellipsoid':\n calculator = build_model(\n 'ellipsoid', n=n, qmax=1.0,\n radius_polar=c, radius_equatorial=b)\n a = b\n elif model_name == 'triaxial_ellipsoid':\n calculator = build_model(\n 'triaxial_ellipsoid', n=n, qmax=0.5,\n radius_equat_minor=a, radius_equat_major=b, radius_polar=c)\n elif model_name == 'parallelepiped':\n calculator = build_model(\n 'parallelepiped', n=n, length_a=a, length_b=b, length_c=c)\n else:\n raise ValueError(\"unknown model %s\"%model_name)\n\n return calculator, (a, b, c)", "def select_calculator(model_name, n=150, size=(10,40,100)):\n a, b, c = size\n if model_name == 'sphere':\n calculator = build_model('sphere', n=n, radius=c)\n a = b = c\n elif model_name == 'bcc_paracrystal':\n calculator = build_model('bcc_paracrystal', n=n, dnn=c,\n d_factor=0.06, radius=40)\n a = b = c\n elif model_name == 'cylinder':\n calculator = build_model('cylinder', n=n, qmax=0.3, radius=b, length=c)\n a = b\n elif model_name == 'ellipsoid':\n calculator = build_model('ellipsoid', n=n, qmax=1.0,\n radius_polar=c, radius_equatorial=b)\n a = b\n elif model_name == 'triaxial_ellipsoid':\n calculator = build_model('triaxial_ellipsoid', n=n, qmax=0.5,\n radius_equat_minor=a,\n radius_equat_major=b,\n radius_polar=c)\n elif model_name == 'parallelepiped':\n calculator = build_model('parallelepiped', n=n, a=a, b=b, c=c)\n else:\n raise ValueError(\"unknown model %s\"%model_name)\n\n return calculator, (a, b, c)", "def term(self, values):\n if len(values) == 3:\n factor1, op, factor2 = values\n if op == \"*\":\n return Multiply(factor1, factor2)\n else:\n return Divide(factor1, factor2)\n\n (num,) = values\n return num", "def _compute(eq, name, grid, component=None, reshape=True):\n if name not in data_index[\"desc.equilibrium.equilibrium.Equilibrium\"]:\n raise ValueError(\"Unrecognized value '{}'.\".format(name))\n assert component in [\n None,\n \"R\",\n \"phi\",\n \"Z\",\n ], f\"component must be one of [None, 'R', 'phi', 'Z'], got {component}\"\n\n components = {\n \"R\": 0,\n \"phi\": 1,\n \"Z\": 2,\n }\n\n label = data_index[\"desc.equilibrium.equilibrium.Equilibrium\"][name][\"label\"]\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n data = eq.compute(name, grid=grid)[name]\n\n if data_index[\"desc.equilibrium.equilibrium.Equilibrium\"][name][\"dim\"] > 1:\n if component is None:\n data = np.linalg.norm(data, axis=-1)\n label = \"|\" + label + \"|\"\n else:\n data = data[:, components[component]]\n label = \"(\" + label + \")_\"\n if component in [\"R\", \"Z\"]:\n label += component\n else:\n label += r\"\\phi\"\n\n label = (\n r\"$\"\n + label\n + \"~(\"\n + data_index[\"desc.equilibrium.equilibrium.Equilibrium\"][name][\"units\"]\n + \")$\"\n )\n\n if reshape:\n data = data.reshape((grid.num_theta, grid.num_rho, grid.num_zeta), order=\"F\")\n\n return data, label", "def calc(self, n1, op, n2):\n if op == \"*\":\n return n1 * n2\n if op == \"/\":\n return n1 // n2\n if op == \"+\":\n return n1 + n2\n if op == \"-\":\n return n1 - n2", "def create_scalings(self):\n\n self.f_r_vbf_names = [] # the RooFormulae that scale the components (VBF)\n self.f_r_ggf_names = [] # the RooFormulae that scale the components (GGF)\n\n def pow_to_mul_string(expr):\n \"\"\" Convert integer powers in an expression to Muls,\n like a**2 => a*a. Returns a string \"\"\"\n pows = list(expr.atoms(Pow))\n if any(not e.is_Integer for b, e in (i.as_base_exp() for i in pows)):\n raise ValueError(\"A power contains a non-integer exponent\")\n s = str(expr)\n repl = zip(pows, (Mul(* [b] * e, evaluate=False)\n for b, e in (i.as_base_exp() for i in pows)))\n for fr, to in repl:\n s = s.replace(str(fr), str(to))\n return s\n\n # loop on the GGF scalings\n for i, s in enumerate(self.ggf_formula.sample_list):\n f_name = 'f_ggfhhscale_sample_{0}'.format(i)\n f_expr = self.ggf_formula.coeffs[i] # the function that multiplies each sample\n\n # print f_expr\n # for ROOFit, this will convert expressions as a**2 to a*a\n s_expr = pow_to_mul_string(f_expr)\n\n couplings_in_expr = []\n if 'kl' in s_expr:\n couplings_in_expr.append('kl')\n if 'kt' in s_expr:\n couplings_in_expr.append('kt')\n\n # no constant expressions are expected\n if len(couplings_in_expr) == 0:\n raise RuntimeError('GGF HH : scaling expression has no coefficients')\n\n for idx, ce in enumerate(couplings_in_expr):\n # print '..replacing', ce\n symb = '@{}'.format(idx)\n s_expr = s_expr.replace(ce, symb)\n\n arglist = ','.join(couplings_in_expr)\n exprname = 'expr::{}(\\\"{}\\\" , {})'.format(f_name, s_expr, arglist)\n # print exprname\n self.modelBuilder.factory_(exprname) # the function that scales each VBF sample\n\n f_prod_name_pmode = f_name + '_r_gghh'\n prodname_pmode = 'prod::{}(r_gghh,{})'.format(f_prod_name_pmode, f_name)\n\n # the function that scales this production mode\n self.modelBuilder.factory_(prodname_pmode)\n\n # will just print out the values\n # self.modelBuilder.out.function(f_prod_name).Print(\"\")\n\n f_prod_name = f_prod_name_pmode + '_r'\n prodname = 'prod::{}(r,{})'.format(f_prod_name, f_prod_name_pmode)\n self.modelBuilder.factory_(prodname) # the function that scales this production mode\n # self.modelBuilder.out.function(f_prod_name).Print(\"\") ## will just print out the values\n\n self.f_r_ggf_names.append(f_prod_name) # bookkeep the scaling that has been created\n\n # loop on the VBF scalings\n for i, s in enumerate(self.vbf_formula.sample_list):\n f_name = 'f_vbfhhscale_sample_{0}'.format(i)\n f_expr = self.vbf_formula.coeffs[i] # the function that multiplies each sample\n\n # print f_expr\n # for ROOFit, this will convert expressions as a**2 to a*a\n s_expr = pow_to_mul_string(f_expr)\n\n couplings_in_expr = []\n if 'CV' in s_expr:\n couplings_in_expr.append('CV')\n if 'C2V' in s_expr:\n couplings_in_expr.append('C2V')\n if 'kl' in s_expr:\n couplings_in_expr.append('kl')\n\n # no constant expressions are expected\n if len(couplings_in_expr) == 0:\n raise RuntimeError('VBF HH : scaling expression has no coefficients')\n\n for idx, ce in enumerate(couplings_in_expr):\n # print '..replacing', ce\n symb = '@{}'.format(idx)\n s_expr = s_expr.replace(ce, symb)\n\n arglist = ','.join(couplings_in_expr)\n exprname = 'expr::{}(\\\"{}\\\" , {})'.format(f_name, s_expr, arglist)\n # print exprname\n self.modelBuilder.factory_(exprname) # the function that scales each VBF sample\n\n f_prod_name_pmode = f_name + '_r_qqhh'\n prodname_pmode = 'prod::{}(r_qqhh,{})'.format(f_prod_name_pmode, f_name)\n\n # the function that scales this production mode\n self.modelBuilder.factory_(prodname_pmode)\n\n # will just print out the values\n # self.modelBuilder.out.function(f_prod_name_pmode).Print(\"\")\n\n f_prod_name = f_prod_name_pmode + '_r'\n prodname = 'prod::{}(r,{})'.format(f_prod_name, f_prod_name_pmode)\n self.modelBuilder.factory_(prodname) # the function that scales this production mode\n # self.modelBuilder.out.function(f_prod_name).Print(\"\") # will just print out the values\n\n self.f_r_vbf_names.append(f_prod_name) # bookkeep the scaling that has been created", "def create_cobb_douglas(self, output, multiplier, exponents):\n def production_function(**goods):\n ret = multiplier * reduce(operator.mul,\n [goods[name] ** exponent\n for name, exponent in exponents.items()])\n return {output: ret}\n return production_function", "def constructSymbolicHubbard2(hdim,vdim,t,U):\n hdim = hdim*2\n nqubits = vdim*hdim\n # first horizontal line of sites\n firstLine = range(1,hdim-1)\n spinDownList = [x for x in firstLine if x % 2 == 1]\n coefficients=[]\n operators=[]\n # Generating the horizontal contributions to the hamiltonian\n for j in range(0,vdim):\n offset = j*hdim\n for i in spinDownList:\n #print(i)\n operators.append([i+offset,-i-2-offset])\n coefficients.append(-t)\n operators.append([i+1+offset,-i-3-offset])\n coefficients.append(-t) \n # periodic boundary conditions\n if hdim > 2:\n operators.append([hdim-1+offset,-1-offset])\n coefficients.append(-t)\n operators.append([hdim+offset,-2-offset])\n coefficients.append(-t) \n #print(\"spinDownList\",spinDownList)\n #print(\"horizontal contributions:\",operators)\n\n # Generating the vertical contributions to the hamiltonian\n # open boundary conditions\n firstLine = range(1,hdim+1)\n spinDownList = [x for x in firstLine if x % 2 == 1]\n #print(\"spinDownList\",spinDownList)\n for j in range(1,vdim):\n offset1 = (j-1)*hdim\n offset2 = j*hdim\n for i in spinDownList:\n #print(i)\n operators.append([i+offset1,-i-offset2])\n coefficients.append(-t)\n operators.append([i+1+offset1,-i-1-offset2])\n coefficients.append(-t) \n #print(\"vertical contributions:\",operators)\n \n # repulsion terms\n allQubits = range(1,nqubits+1)\n spinDownListAll = [x for x in allQubits if x % 2 == 1]\n for i in spinDownListAll:\n operators.append([i,-i,i+1,-i-1])\n coefficients.append(U)\n \n #print(\"repulsion contributions:\",operators)\n return operators, coefficients, nqubits", "def generate_operators_helper(maths_oper_drew_by_lot, num_of_calculations):\n if num_of_calculations > 0:\n tmp = random.sample(maths_operations, min(num_of_calculations, len(maths_operations)))\n maths_oper_drew_by_lot += tmp\n num_of_calculations -= len(tmp)\n generate_operators_helper(maths_oper_drew_by_lot, num_of_calculations)\n return maths_oper_drew_by_lot", "def init_operators(op_type, number=8, size=5, centers=5, sigma=.1):\n return [op_type(size, sigma = sigma, centers = centers)\n for i in range(number)]", "def calculate(self, op, a, b):\n if op == \"+\":\n return a + b\n elif op == \"-\":\n return a - b\n elif op == \"*\":\n return a * b\n elif op == \"/\":\n return a / b", "def build_calculator(\n _log,\n required_properties,\n force_handle,\n position_conversion,\n force_conversion,\n property_conversion,\n calculator,\n device,\n):\n _log.info(f\"Using {calculator}\")\n\n position_conversion = MDUnits.parse_mdunit(position_conversion)\n force_conversion = MDUnits.parse_mdunit(force_conversion)\n\n if calculator == \"schnet_calculator\":\n\n model = load_model(device=device)\n return SchnetPackCalculator(\n model,\n required_properties=required_properties,\n force_handle=force_handle,\n position_conversion=position_conversion,\n force_conversion=force_conversion,\n property_conversion=property_conversion,\n )\n else:\n raise NotImplementedError", "def get_spin_operators(d):\n eye = np.eye(d, dtype=complex)\n s = (d-1)/2.\n # print(s)\n sx = np.zeros([d, d], dtype=complex)\n sy = np.zeros([d, d], dtype=complex)\n sz = np.zeros([d, d], dtype=complex)\n\n for a in range(d):\n if a != 0:\n sx[a, a - 1] = np.sqrt((s + 1) * (2 * a) - (a + 1) * a) / 2\n sy[a, a - 1] = 1j * np.sqrt((s + 1) * (2 * a) - (a + 1) * a) / 2\n if a != d - 1:\n sx[a, a + 1] = np.sqrt((s + 1) * (2 * a + 2) - (a + 2) * (a + 1)) / 2\n sy[a, a + 1] = -1j * np.sqrt((s + 1) * (2 * a + 2) - (a + 2) * (a + 1)) / 2\n sz[a, a] = s - a\n if d == 2:\n sx *= 2\n sy *= 2\n sz *= 2\n return sx, sy, sz, eye", "def gear_mechanism_eval(outputs):\n\n\t# get number of gears and a list of radius\n\tradii = np.array([x[0] for x in outputs])\n\tplacements = np.array([x[1] for x in outputs])\n\t\n\treturn ((np.var(radii)*np.var(placements)*len(outputs)), )", "def sop(t_t): #tt_sop\n s=t_t.SOP() # actually returns isop in internal format '--1-001 1'.\n return cubes(s) #converts into '--1-001' format", "def calc(diff, pool, torment=5, charmed=1):\n root = wodDice.PoolCalc(pool, diff, torment, charmed)\n s = root.summary()\n result = {\n 'pool': pool,\n 'diff': diff,\n 'torment': torment,\n 'charmed': charmed,\n 'FailuresPercent': s['totalFail'],\n 'BotchesPercent': s['botch'],\n 'TormentPercent': s['torment'],\n 'Percent': s['success'][1:],\n 'expectedSuccesses': s['expectedSuccesses'],\n }\n return flask.json.jsonify(result)", "def bellman_operator(V, cp, return_policy=False):\n # === Simplify names, set up arrays === #\n R, w, Lambda_H, Lambda_E, Pi, beta, u, b = cp.R, cp.w, cp.Lambda_H,cp.Lambda_E, cp.Pi, cp.beta, cp.u, cp.b\n asset_grid, z_vals = cp.asset_grid, cp.z_vals\n new_V = np.empty(V.shape)\n new_h = np.empty(V.shape)\n new_l = np.empty(V.shape)\n z_idx = list(range(len(z_vals)))\n\n\n # === Linear interpolation of V along the asset grid === #\n #vf = lambda a, i_z: np.interp(a, asset_grid, V[:, i_z])\n vf = lambda a, i_z: np.interp(a, asset_grid, V[:, i_z])\n\n # === Solve r.h.s. of Bellman equation === #\n\n def do_bell(i_a):\n a = asset_grid[i_a]\n #print(a)\n for i_z, z in enumerate(z_vals):\n def obj(x): # objective function to be *minimized*\n y = sum(vf(x[0], j) * Pi[i_z, j] for j in z_idx)\n return - u(R*a +w*z*(1-x[1]) - x[0],x[1]) -x[0]*Lambda_H + z*x[1]*Lambda_E - beta * y \n bnds = ((b, cp.grid_max ),(0+1e-4,1- 1e-4))\n cons = ({'type': 'ineq', 'fun': lambda x: R * a + w*z*(1-x[1])-b -x[0]}, {'type': 'ineq', 'fun': lambda x: x[0]})\n h0 = [b, .438]\n #print(h0)\n h_star = optimize.minimize(obj, h0, bounds = bnds,constraints=cons)\n #h_star3= fminbound(obj, b, R * a + w*z + b)\n #print(obj(h_star.x[0]), obj(h_star3))\n if h_star.success != True:\n h_star = optimize.minimize(obj, h0, bounds = bnds,constraints=cons, options={'eps': 1.4901161193847656e-02, 'maxiter': 100, 'ftol': 1e-05})\n if h_star.success != True:\n print(h_star.message)\n #print(h_star.x[1],h_star.x[0])\n if h_star.x[1] == .4328:\n print(a)\n new_h[i_a, i_z],new_l[i_a, i_z], new_V[i_a, i_z] = h_star.x[0],h_star.x[1], -obj(h_star.x)\n if return_policy:\n return new_h[i_a,:], new_l[i_a, :]\n else:\n return new_V[i_a,:]\n\n rang = np.arange(len(asset_grid))\n Pool = ProcessingPool(96)\n new = Pool.map(do_bell, rang)\n #Pool.clear\n return np.asarray(new)", "def calc(operand_1, operand_2):\n return operand_2*operand_1", "def __init__(self,xsym,ax,Lmax,Mmax,lmax,parity='natural',ax2=None,psum=None,Lmin=None,Mmin=None,lsum=None): \n\n # set the defaults\n def default_if_None(val,deflt): \n if val is None: \n return deflt\n else:\n return val\n\n self.xsym=xsym\n self.ax =ax\n self.bx =default_if_None(ax2,ax)\n self.ax.show('radial axis 1')\n self.bx.show('radial axis 2')\n L0=default_if_None(Lmin,Lmax)\n M0=default_if_None(Mmin,Mmax)\n ls=default_if_None(lsum,2*lmax)+1\n ks=default_if_None(psum,self.ax.order()+self.bx.order())\n\n self.gaunt=GauntCoeffTable(2*lmax)\n self.bang=[]\n self.len=-1\n block_i0=0\n count=0\n for L in range(L0,Lmax+1):\n for M in range(M0,Mmax+1):\n for l1 in range(lmax+1):\n for l2 in range(lmax+1):\n if l1+l2>ls: continue\n if parity=='natural' and (L+l1+l2)%2==1: continue\n if parity=='unnatural' and (L+l1+l2)%2==0: continue\n if xsym!=0 and l1<l2: continue # skip exchange symmetric angular part\n self.bang.append(BasTwoAngle(L,M,l1,l2))\n ba=self.bang[-1]\n\n # generate product basis\n for e1 in self.ax.e:\n for e2 in self.bx.e:\n ba.brad.append(BasTwoRadial(e1.centrifugal(l1),e2.centrifugal(l2)))\n br=ba.brad[-1]\n for k1 in range(e1.n):\n for k2 in range(e2.n):\n count+=1\n br.k1.append(k1)\n br.k2.append(k2)\n itotal=block_i0+e1.i0+k1+self.ax.len()*(e2.i0+k2)\n# print 'block',L,M,l1,l2,itotal,block_i0\n self.len=max(self.len,itotal+1)\n br.i.append(itotal)\n block_i0=block_i0+self.ax.len()*self.bx.len() \n print 'total',self.len" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write the results_dict to the database
def write_results(self,results_dict):
[ "def export_results_to_db(db_conn, output_dict):\n print_log_msg(log_str='Export core GF completeness results to TRAPID database.')\n cursor = db_conn.cursor()\n # Kind of dumb way to create the request, but it works.\n columns = ', '.join(sorted(output_dict))\n values = ', '.join([\"\\'{insert_value}\\'\".format(insert_value=output_dict[k]) for k in sorted(output_dict)])\n export_query = \"INSERT INTO completeness_results ({columns}) VALUES ({values})\".format(columns=columns, values=values)\n cursor.execute(export_query)\n db_conn.commit()", "def save_results(self):\n with CursorFromPool() as cur:\n for i, result in enumerate(self._parse_results(), start=1):\n logger.info('Inserting run {} - sample {} into results table...'.format(result.run_id, i))\n cur.execute(\"\"\"\n INSERT INTO results (run_id, assay_id, sample_role, sample_type, sample_id, result, units,\n result_status, username, flags, cntrl_cts, comments, dwp_id, mwp_id, mwp_position, start_ts,\n end_ts)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\n \"\"\", result)", "def save_report(results, srv):\n db = srv.connectDatabase('profile_results')\n db.commitOne({'results': results}, timestamp=True)", "def add_to_database(results):\n\n err = CLIENT.write(['%s,hashid=%s warnings=%d,errors=%d,status=\"%s\"' % (DB_NAME, results['hashid'], results['warnings'], results['errors'], results['status'])], {'db':DB_NAME}, protocol='line')\n if not err:\n log_to_file (\"[ERROR] %s fail to post to InfluxDB\" % (results['hashid']))", "def writeResultToFile(results, filename='all_searches.txt'):\n with open(filename, 'w') as f:\n for query in results:\n f.writelines(query.__repr__() + '\\n')", "def save_results(self, execution_id: str, results: QueryResults) -> None:\n if not (self.enabled and self.write):\n return\n _save_results(self.local_storage, execution_id, results)", "def write_database(db, database_file):\n with open(database_file, 'w') as f:\n for job_id, (filename, status, date) in db.items():\n f.write(\"{}\\t{}\\t{}\\t{}\\n\".format(filename, job_id, status, date))", "def saveToFile(dict):\n f = codecs.open(database_path, \"w\", \"utf-8\")\n f.write(str(dict))\n f.close()", "def saveAnalysisToMongo(self, recog_results, identity_real, ie):\n data = self.getAnalysisData(recog_results, identity_real, ie)", "def _save_result_leaderboard(self, results):\n result_leaderboard = dict()\n for res in results:\n result_leaderboard[res[\"question_id\"]] = {\n \"direct_answer\": res[\"pred_ans\"],\n \"multiple_choice\": \"\",\n }\n\n result_file = registry.get_path(\"result_dir\") + \"_leaderboard.json\"\n\n with open(result_file, \"w\") as f:\n json.dump(result_leaderboard, f)\n\n logging.info(f\"Saved results for leaderboard evaluation at {result_file}\")", "def _add_results(self, results, trial_id):\n for result in results:\n self.logger.debug(\"Appending result: %s\" % result)\n result[\"trial_id\"] = trial_id\n result_record = ResultRecord.from_json(result)\n result_record.save()", "def saveResults(self):\n fname = join(self.seriesOutputDir, 'results.json')\n with open(fname, 'w') as outputFile:\n outputFile.write(json.dumps(self.results))", "def save_results(self):\n\n # Save the results\n self.output_results = '{}_results.dat'.format(self.method)\n with open(self.output_results, 'w') as f:\n f.write(str(self.result))\n print('Results file saved to {}'.format(self.output_results))", "def write_aldb(self):\n pass", "def writeToMySQL(self, connection):\n pass # TODO -- write", "def save_rms_data(filename, results):\n with open(filename, \"w\") as f:\n json.dump(results, f)", "def write_game_scores(self):\n for game_scores_dict in self._data:\n try:\n sql = \"\"\"INSERT INTO GOG_SCRAPPER_DB.game_scores\n (title_sku, \n score_quote_datetime,\n score)\n VALUES(%s,%s,%s) \n \"\"\"\n val = (game_scores_dict[config.KEYNAME_GAME_SKU],\n datetime.now().strftime(config.DATETIME_FORMAT),\n game_scores_dict[config.KEYNAME_GAME_SCORE]\n )\n self.cursor.execute(\"SET SESSION MAX_EXECUTION_TIME=9999\")\n self.cursor.execute(sql, val)\n except Exception:\n pass", "def write_toDB(self, db, cursor):\n for handler in self.seqHandler_hash.values():\n for spectra in handler.spectras:\n spectra.save( db, cursor)", "def write(self):\n\n d = {} # to contain mappings of term to file cursor value\n with open(self.p_file, \"wb\") as f:\n for word, posting_list in self.dictionary.items():\n cursor = f.tell()\n d[word] = cursor # updating respective (term to file cursor value) mappings\n pickle.dump(posting_list, f, protocol=4)\n\n with open(self.d_file, \"wb\") as f:\n pickle.dump(d, f) # (term to file cursor value) mappings dictionary\n pickle.dump(self.doc_lengths, f) # document lengths regardless of zone/field types\n pickle.dump(self.docid_term_mappings, f) # (doc_id to K most common terms) mappings" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CMS interface to reversion api helper function. Registers model for reversion only if reversion is available. Auto excludes publisher fields.
def reversion_register(model_class, fields=None, follow=(), format="xml", exclude_fields=None): if not 'reversion' in settings.INSTALLED_APPS: return if fields and exclude_fields: raise ValueError("Just one of fields, exclude_fields arguments can be passed.") opts = model_class._meta local_fields = opts.local_fields + opts.local_many_to_many if fields is None: fields = [field.name for field in local_fields] exclude_fields = exclude_fields or [] if 'publisher' in settings.INSTALLED_APPS: from publisher import Publisher if issubclass(model_class, Publisher): # auto exclude publisher fields exclude_fields += ['publisher_is_draft', 'publisher_public', 'publisher_state'] fields = filter(lambda name: not name in exclude_fields, fields) from cms.utils import reversion_hacks reversion_hacks.register_draft_only(model_class, fields, follow, format)
[ "def register_model_version_relation(self, version: Text,\n model_id: int,\n project_snapshot_id: int = None) -> ModelVersionRelationMeta:\n try:\n model_version_relation = MetaToTable.model_version_relation_to_table(version=version,\n model_id=model_id,\n project_snapshot_id=project_snapshot_id,\n store_type=type(self).__name__)\n model_version_relation.save()\n # update reference field\n if model_id is not None:\n model_relation = MongoModelRelation.objects(uuid=model_id).first()\n if model_relation.model_version_relation is None:\n model_relation.model_version_relation = [model_version_relation]\n else:\n model_relation.model_version_relation.append(model_version_relation)\n model_relation.save()\n\n if project_snapshot_id is not None:\n project_snapshot = MongoProjectSnapshot.objects(uuid=project_snapshot_id).first()\n if project_snapshot.model_version_relation is None:\n project_snapshot.model_version_relation = [model_version_relation]\n else:\n project_snapshot.model_version_relation.append(model_version_relation)\n project_snapshot.save()\n\n model_version_relation_meta = ModelVersionRelationMeta(version=version, model_id=model_id,\n project_snapshot_id=project_snapshot_id)\n return model_version_relation_meta\n except mongoengine.OperationError as e:\n raise AIFlowException('Registered ModelVersion (name={}) already exists. '\n 'Error: {}'.format(model_version_relation.version, str(e)))", "def modelcatalog(self, user):\n return super().modelcatalog(user)", "def _put(self, **kwargs):\n return super(VersionedModel, self).put(**kwargs)", "def version_model(self, user):\n self.version = datetime.now().strftime(\"%Y-%m-%d-%h-%M-%S\")\n self.generate_model_id(user)\n self.select_model(user=user)", "def register_model(name: str) -> None:\n # Add the model to the list of valid models.\n VALID_MODELS.append(name)", "def build_model(self):\n ...", "def handle_content_model_generic_relation(self, cms_config):\n for versionable in cms_config.versioning:\n inject_generic_relation_to_version(versionable.content_model)", "def test_project_reversion(admin_client):\n\n # create the first revision of Org\n with reversion.create_revision():\n key = mommy.make(Org, name=\"first name\")\n reversion.set_date_created(datetime(2017, 4, 30, tzinfo=pytz.UTC))\n # try to get the object from the database.\n key_from_db = Org.objects.get(pk=key.pk)\n assert key.name == key_from_db.name\n\n # change a field and save it.\n with reversion.create_revision():\n key_from_db.name = \"updated name\"\n key_from_db.save()\n reversion.set_date_created(datetime(2017, 5, 15, tzinfo=pytz.UTC))\n\n # change a field again and save it.\n with reversion.create_revision():\n key_from_db.name = \"no name\"\n key_from_db.save()\n reversion.set_date_created(datetime(2017, 6, 1, tzinfo=pytz.UTC))\n\n # create an unrelated project with just one revision.\n with reversion.create_revision():\n unrelated_key = mommy.make(Org, name=\"unrelated project\")\n reversion.set_date_created(datetime(2017, 7, 30, tzinfo=pytz.UTC))\n\n # refresh the value from the database.\n key.refresh_from_db()\n\n # django sanity check: the first object should equal the new one.\n assert key.name == \"no name\"\n assert key_from_db.name == \"no name\"\n\n # Get all the revisions for all projects.\n all_versions = Version.objects.get_for_model(Org)\n assert len(all_versions) == 4\n\n # Get only the revisions for the one that we made 3 revisions for.\n # (excludes the unrelated project.)\n versions = Version.objects.get_for_object(key)\n assert len(versions) == 3", "def __init__(self, model, watched_fields,\n created_tpl=None, update_tpl=None, delete_tpl=None,\n master_field=None):\n self.model = resolve_model(model, strict=True)\n self.watched_fields = fields_list(self.model, watched_fields)\n if master_field:\n self.master_field = master_field\n if created_tpl:\n self.created_tpl = rt.get_template(created_tpl)\n if update_tpl:\n self.update_tpl = rt.get_template(update_tpl)\n if delete_tpl:\n self.delete_tpl = rt.get_template(delete_tpl)", "def handle_admin_classes(self, cms_config):\n replace_admin_for_models(\n [versionable.content_model for versionable in cms_config.versioning],\n )", "def _need_to_reconstruct_model(self):\n raise NotImplementedError()", "def test_version_admin_registered(self):\n version_proxies = [\n model for model in admin.site._registry if issubclass(model, Version)\n ]\n source_models_in_proxies = [model._source_model for model in version_proxies]\n source_model_to_proxy = dict(zip(source_models_in_proxies, version_proxies))\n\n self.assertIn(PollContent, source_models_in_proxies)\n self.assertIn(\n VersionAdmin,\n admin.site._registry[source_model_to_proxy[PollContent]].__class__.mro()\n )\n\n self.assertIn(BlogContent, source_models_in_proxies)\n self.assertIn(\n VersionAdmin,\n admin.site._registry[source_model_to_proxy[BlogContent]].__class__.mro()\n )\n\n self.assertNotIn(Comment, source_models_in_proxies)", "def update_model(newModel, newAdmin=None):\n global reservationModel\n reservationModel = newModel\n from django.contrib import admin\n if not reservationModel in admin.site._registry:\n admin.site.register(reservationModel, DefaultReservationAdmin if not newAdmin else newAdmin)", "def test_is_content_model_versioned(self):\n extension = VersioningCMSExtension()\n extension.versionables = [VersionableItem(\n content_model=PollContent, grouper_field_name='poll',\n copy_function=default_copy\n )]\n\n self.assertTrue(extension.is_content_model_versioned(PollContent))", "def register_model(self, model, bundle):\n if model in self._model_registry:\n raise AlreadyRegistered('The model %s is already registered' \\\n % model)\n\n if bundle.url_params:\n raise Exception(\"A primary model bundle cannot have dynamic \\\n url_parameters\")\n\n self._model_registry[model] = bundle", "def get_model_spec(self, *args, **kwargs):", "def init_hook():\n # pylint: disable=unused-variable\n @signals.Restful.model_put.connect_via(all_models.Audit)\n @signals.Restful.model_deleted.connect_via(all_models.Audit)\n def handle_audit_permission_put(sender, obj, src=None, service=None):\n \"\"\"Make sure admins cannot delete/update archived audits\"\"\"\n # pylint: disable=unused-argument\n if obj.archived and not db.inspect(\n obj).get_history('archived', False).has_changes():\n raise Forbidden()\n\n # pylint: disable=unused-variable\n @signals.Restful.model_deleted.connect_via(all_models.Assessment)\n @signals.Restful.model_deleted.connect_via(all_models.AssessmentTemplate)\n @signals.Restful.model_posted.connect_via(all_models.Assessment)\n @signals.Restful.model_posted.connect_via(all_models.AssessmentTemplate)\n @signals.Restful.model_put.connect_via(all_models.Assessment)\n @signals.Restful.model_put.connect_via(all_models.AssessmentTemplate)\n @signals.Restful.model_put.connect_via(all_models.Snapshot)\n def handle_archived_object(sender, obj=None, src=None, service=None):\n \"\"\"Make sure admins cannot delete/update archived audits\"\"\"\n # pylint: disable=unused-argument\n if obj.archived:\n raise Forbidden()\n\n @signals.Restful.model_deleted.connect_via(all_models.Comment)\n @signals.Restful.model_deleted.connect_via(all_models.Document)\n @signals.Restful.model_deleted.connect_via(all_models.UserRole)\n @signals.Restful.model_posted.connect_via(all_models.Comment)\n @signals.Restful.model_posted.connect_via(all_models.Document)\n @signals.Restful.model_posted.connect_via(all_models.Snapshot)\n @signals.Restful.model_posted.connect_via(all_models.UserRole)\n def handle_archived_context(sender, obj=None, src=None, service=None):\n \"\"\"Make sure admins cannot delete/update archived audits\"\"\"\n # pylint: disable=unused-argument\n if (hasattr(obj, 'context') and\n hasattr(obj.context, 'related_object') and getattr(\n obj.context.related_object, 'archived', False)):\n raise Forbidden()\n\n @signals.Restful.model_posted.connect_via(all_models.Relationship)\n @signals.Restful.model_deleted.connect_via(all_models.Relationship)\n def handle_archived_relationships(sender, obj=None, src=None, service=None):\n \"\"\"Make sure users can not map objects to archived audits\"\"\"\n # pylint: disable=unused-argument\n if (getattr(obj, 'source_type', None) == 'Issue' or\n getattr(obj, 'destination_type', None) == 'Issue'):\n # Issues can be mapped even if audit is archived so skip the permission\n # check here\n return\n if (hasattr(obj, 'context') and\n hasattr(obj.context, 'related_object') and getattr(\n obj.context.related_object, 'archived', False)):\n raise Forbidden()", "def put(self, **kwargs):\n creating_new_model = not self.version_unifier_key\n\n if creating_new_model:\n version_unifier = VersionUnifier(parent=self._feaux_parent_key)\n self.version_unifier_key = version_unifier.put()\n\n else:\n self._reset_entity()\n\n self._parent_key = self.version_unifier_key\n return self._put(**kwargs)", "def handle_new_model(request, model_name):\n dbsession = DBSession()\n data = JSONAPIValidator(not_empty=True).to_python(request.body)\n item = COMPONENTS[model_name]['class'].from_dict(data, dbsession)\n if item:\n with transaction.manager:\n dbsession.add(item)\n dbsession.flush()\n item_data, item_included = item.as_dict(request=request)\n response = {'data': item_data}\n if item_included:\n response['included'] = filter_list(item_included)\n return response\n return {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a Key with given urlsafe
def get_key_from_urlsafe(urlsafe): return ndb.Key(urlsafe=urlsafe)
[ "def _make_cache_key(url, permanent=False):\n return \"WebTemplate:%s:%s\" % (url, {True: \"p\", False: \"t\"}[permanent])", "def encode_key_as_urlsafe(obj):\n return obj.urlsafe()", "def construct_unique_key_google(google_baseurl, params):\n google_unique_key = f\"UNIQUE_KEY---{str(google_baseurl)}---{str(params)}---{str(google_secrets.google_api_key)}\"\n\n return google_unique_key", "def construct_unique_key(baseurl, params):\n param_strings = []\n connector = '_'\n for k in params.keys():\n param_strings.append(f'{k}_{params[k]}')\n param_strings.sort()\n unique_key = baseurl + connector + connector.join(param_strings)\n return unique_key", "def encode_url_key(\n self,\n key: str,\n ) -> str:\n return force_unicode(key).replace('_', '-')", "def gen_api_key():\n m = hashlib.sha256()\n m.update(get_random_word(12))\n return unicode(m.hexdigest()[:12])", "def _create_key(self):\n return uuid.uuid4().hex", "def url_key(url: str) -> str:\n url_parts = urlparse(url)\n path = url_parts.path[:-1] if url_parts.path.endswith('/') else url_parts.path\n return f\"{url_parts.netloc}/{path}/{url_parts.query}\"", "def uglify_hapikey(url: str) -> str:\n url_parse = parse.urlparse(url)\n parse_query = parse.parse_qs(url_parse.query)\n if \"hapikey\" not in parse_query:\n return url\n parse_query[\"hapikey\"][0] = f\"{parse_query['hapikey'][0][0:4]}****\"\n parse_result = parse.ParseResult(\n scheme=url_parse.scheme,\n netloc=url_parse.netloc,\n path=url_parse.path,\n params=url_parse.params,\n query=parse.urlencode(parse_query, doseq=True, safe=\"*\"),\n fragment=url_parse.fragment,\n )\n return parse.urlunparse(parse_result)", "def generate_key(query_template, params):\n query_payload = str(query_template) + str(params)\n return hashlib.sha256(query_payload.encode('utf-8')).hexdigest()", "def _build_cache_key(self, *args):\n return self.key if not self.key_mod else self.key % tuple(args)", "def get_object_from_urlsafe(urlsafe):\n return get_key_from_urlsafe(urlsafe)", "def create_key():\n keys = f'{consumer_key}:{consumer_secret_key}'.encode('ascii')\n b64_encoded_keys = base64.b64encode(keys)\n b64_encoded_keys = b64_encoded_keys.decode('ascii')\n\n return b64_encoded_keys", "def test_generateKey(self):\n\n self.assertEqual(PartTestTemplate.generateTestKey('bob'), 'bob')\n self.assertEqual(PartTestTemplate.generateTestKey('bob%35'), 'bob35')\n self.assertEqual(PartTestTemplate.generateTestKey('bo b%35'), 'bob35')\n self.assertEqual(PartTestTemplate.generateTestKey('BO B%35'), 'bob35')\n self.assertEqual(PartTestTemplate.generateTestKey(' % '), '')\n self.assertEqual(PartTestTemplate.generateTestKey(''), '')", "def create_key(username):\n\n key = str(username) + str(datetime.datetime.now())\n msg = 'opendsa.cc.vt.edu'\n hash_key = hmac.new(key, msg, sha1)\n return hash_key.digest().encode('hex')", "def test_safe_key(self):\n self.assertEqual(\n safe_key(\"hello world\"),\n \"b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9\",\n )", "def build_url(self, base_url, params):\r\n url_substr_list = [base_url, '?key=', self.api_key]\r\n for param, value in params.iteritems():\r\n encoded_value = urllib.quote(str(value))\r\n url_substr_list.append('&')\r\n url_substr_list.extend([param, '=', encoded_value])\r\n return ''.join(url_substr_list)", "def key(self, key: any):\n self.suburl(str(key))\n return self", "def build_key(cls, user_id):\n key = ndb.Key(cls, user_id)\n return key" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return model class from any query. Note that the model needs to be imported once in the application
def get_model_class_from_query(query): return ndb.Model._lookup_model(query.kind)
[ "def query_class(self):\n return self.query_class_loader.get_class(name=self.query_class_name)", "def model_class(self):\n try:\n entity = self._only_full_mapper_zero(\"\")\n except Exception: # pragma: no cover\n class_ = None\n else:\n class_ = entity.mapper.class_\n\n return class_", "def query(self, model=None):\r\n return session_query(self.session, model or self.model)", "def query(self, model=None):\n return session_query(self.session, model or self.model)", "def model_instance(self) -> any:\n pass", "def get(self, **query):\n return self.model(self.repository.get(**query))", "def get_instance(self, model: Type[Model],\n fields: Dict[Field, Any]) -> Optional[Model]:\n table_name = model._alt_name or model.__name__.lower()\n sql_fields = []\n sql_values = []\n for field, value in fields.items():\n sql_field = f\"{field.name}=?\"\n sql_fields.append(sql_field)\n sql_values.append(value)\n\n # Determine the field of the queries.\n fields = []\n for field in model._fields.values():\n fields.append(field.name)\n fields = \", \".join(fields)\n\n # Send the query.\n filters = \" AND \".join(sql_fields)\n rows = self._execute(SELECT_QUERY.format(table_name=table_name,\n fields=fields, filters=filters), sql_values)\n\n # Loop over the rows.\n rows = self.cursor.fetchall()\n if len(rows) == 0 or len(rows) < 1:\n return None\n\n row = rows[0]\n instance_data = {}\n for i, field in enumerate(model._fields.values()):\n value = row[i]\n\n # Convert this SQL value to Python if necessary.\n if isinstance(value, bytes) and field.field_type is not bytes:\n # Unpickle the data.\n value = pickle.loads(value)\n\n instance_data[field.name] = value\n\n # Create an instance object.\n return model(**instance_data)", "def _class_from_model_type(hdulist):\n raise NotImplementedError(\n \"stdatamodels does not yet support automatic model class selection\")\n # from . import _defined_models as defined_models\n\n # if hdulist:\n # primary = hdulist[0]\n # model_type = primary.header.get('DATAMODL')\n\n # if model_type is None:\n # new_class = None\n # else:\n # new_class = defined_models.get(model_type)\n # else:\n # new_class = None\n\n # return new_class", "def get_object_for_this_type(self, **kwargs):\r\n return self.model_class().objects.get(**kwargs)", "def qc_model(self) -> qcel.models.common_models.Model:\n model = qcel.models.common_models.Model(method=self.method, basis=self.basis)\n return model", "def lookup_model(cls, model_name):\n return cls.driver.Model._decl_class_registry[model_name.capitalize()]", "def load(cls):\r\n\r\n try:\r\n return cls.objects.get()\r\n except cls.DoesNotExist: # pragma: no cover\r\n return cls()", "def get_model(self):\n return self._model", "def get_model(cObj, appname, modelname):\n #try:\n # module=__import__(appname)\n # model = getattr(getattr(module,'models'), modelname)\n # return model\n #except Exception, ex:\n # return None\n try:\n ctype = ContentType.objects.get(app_label=appname.lower(),\n model=modelname.lower())\n return ctype.model_class()\n except Exception, ex:\n return None", "def generate_model_from_table(self, table: sqlalchemy.Table) -> pydantic.BaseModel:", "def model_handler_cls(cls):", "def get_model(model_pk):\n return Model.objects.get(pk=model_pk)", "def get(self, cls, **kwargs):\n if kwargs:\n return self.sa_session.query(cls).filter_by(**kwargs).one()\n else:\n return self.sa_session.query(cls)", "def _get_model(self):\n # NOTE: need to import this here otherwise it causes a circular\n # Reference and doesn't work i.e. settings imports loggers\n # imports models imports settings...\n\n if self.model:\n return self.model\n try:\n from ttaa_base.models import System_Log\n self.model = System_Log\n return self.model\n except Exception as e:\n print(self.errors['model_not_found'])\n\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a Key with given urlsafe and get the object
def get_object_from_urlsafe(urlsafe): return get_key_from_urlsafe(urlsafe)
[ "def get_key_from_urlsafe(urlsafe):\n return ndb.Key(urlsafe=urlsafe)", "def encode_key_as_urlsafe(obj):\n return obj.urlsafe()", "def key(self, key: any):\n self.suburl(str(key))\n return self", "def _make_cache_key(url, permanent=False):\n return \"WebTemplate:%s:%s\" % (url, {True: \"p\", False: \"t\"}[permanent])", "def get_cache_key(instance, extra=None):\n return '%s.%s.%s' % (instance.__class__.__name__, instance.short_url, extra) if extra else '%s.%s' % (instance.__class__.__name__, instance.short_url)", "def construct_unique_key(baseurl, params):\n param_strings = []\n connector = '_'\n for k in params.keys():\n param_strings.append(f'{k}_{params[k]}')\n param_strings.sort()\n unique_key = baseurl + connector + connector.join(param_strings)\n return unique_key", "def create_new_key_url_pair(url):\n key = create_new_key()\n while key_url_dict.get(key):\n key = create_new_key()\n url_key_dict[url] = key\n key_url_dict[key] = create_new_url_object(url)\n return key", "def build_key(cls, user_id):\n key = ndb.Key(cls, user_id)\n return key", "def url_key(url: str) -> str:\n url_parts = urlparse(url)\n path = url_parts.path[:-1] if url_parts.path.endswith('/') else url_parts.path\n return f\"{url_parts.netloc}/{path}/{url_parts.query}\"", "def _build_cache_key(self, *args):\n return self.key if not self.key_mod else self.key % tuple(args)", "def get_shortened_url():\n url = request.args.get(\"url\")\n if not is_valid_url(url):\n return make_response(\"The url was not valid! Make sure to start the url with http:// or https://\", 404)\n key = url_key_dict.get(url)\n if key:\n if not expired(key):\n return make_response(prefix + key, 200)\n key_url_dict.pop(key, None)\n url_key_dict.pop(url, None)\n key = create_new_key_url_pair(url)\n return make_response(prefix + key, 200)\n key = create_new_key_url_pair(url)\n return make_response(prefix + key, 200)", "def get_apiauth_object_by_key(key):\n return API_Key.query.filter_by(key=key).first()", "def encode_url_key(\n self,\n key: str,\n ) -> str:\n return force_unicode(key).replace('_', '-')", "def build(self, key):\n if key == \"?\":\n return key\n if key.startswith(\"-\"):\n prefix = \"-\"\n key = key[1:]\n else:\n prefix = \"\"\n if key.startswith(self.shared_fields):\n return '%smaster__%s' % (prefix, key)\n else:\n return '%s%s' % (prefix, key)", "def _learn_cache_key(\n r: WSGIRequest, s: HttpResponse, t: int, c: BaseCache\n) -> str:\n r = _chop_querystring(r)\n r = _chop_cookies(r)\n return learn_cache_key(r, s, t, None, c)", "def get_key(struc):\r\n\r\n if not struc:\r\n return None\r\n\r\n key = struc.string_key()\r\n if not key:\r\n return None\r\n if key is True:\r\n key = struc.label\r\n else:\r\n key = struc.label + '/' + key\r\n return key", "def get_key(self, key, bucket_name=None):\n if not bucket_name:\n (bucket_name, key) = self.parse_s3_url(key)\n \n obj = self.get_resource_type('s3').Object(bucket_name, key)\n obj.load()\n return obj", "def key(obj):\n try:\n return obj.key()\n except AttributeError:\n return obj", "def construct_unique_key_google(google_baseurl, params):\n google_unique_key = f\"UNIQUE_KEY---{str(google_baseurl)}---{str(params)}---{str(google_secrets.google_api_key)}\"\n\n return google_unique_key" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all middleware classes
def get_middlewares(): middlewares = [] for middleware in settings.get('MIDDLEWARE_CLASSES', []): middlewares.append(load_class(middleware)()) return middlewares
[ "def all_adapters():\n return AdapterHandler().get_all_classes()", "def inspect_middleware(app: App) -> 'MiddlewareInfo':\n types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)\n\n type_infos = []\n for stack in types_:\n current = []\n for method in stack:\n _, name = _get_source_info_and_name(method)\n cls = type(method.__self__)\n _, cls_name = _get_source_info_and_name(cls)\n current.append(MiddlewareTreeItemInfo(name, cls_name))\n type_infos.append(current)\n middlewareTree = MiddlewareTreeInfo(*type_infos)\n\n middlewareClasses = []\n names = 'Process request', 'Process resource', 'Process response'\n for m in app._unprepared_middleware:\n fns = app_helpers.prepare_middleware([m], True, app._ASGI)\n class_source_info, cls_name = _get_source_info_and_name(type(m))\n methods = []\n for method, name in zip(fns, names):\n if method:\n real_func = method[0]\n source_info = _get_source_info(real_func)\n methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))\n m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)\n middlewareClasses.append(m_info)\n\n return MiddlewareInfo(\n middlewareTree, middlewareClasses, app._independent_middleware\n )", "def get_class_names(self):\n return self.request32('get_class_names')", "def _setup_middlewares(app):\n pass", "def all_handlers():\r\n r = []\r\n for oh in bpy.app.handlers: #so can also remove dupplicates\r\n try:\r\n for h in oh:\r\n r.append(h)\r\n except:\r\n pass\r\n return r", "def get_classes(self, ):\n return self.attrs.get(self.AttributeNames.CLASS, None)", "def load_middlewares(app):\n\n # Error handlers\n app.register_blueprint(mod_err)\n\n # Handles the service id checking\n get_user_id(app)\n\n # CORS\n CORS(app, allow_headers=app.config['ALLOWED_HEADERS'],\n origins=app.config['ALLOWED_ORIGINS'],\n methods=app.config['ALLOWED_METHODS'],\n support_credentials=True)", "def middleware(self, *args, **kwargs):\n kwargs[\"bp_group\"] = True\n\n def register_middleware_for_blueprints(fn):\n for blueprint in self.blueprints:\n blueprint.middleware(fn, *args, **kwargs)\n\n return register_middleware_for_blueprints", "def all_configurable_classes(self) -> TypingList[MetaHasTraits]:\n # Call explicitly the method on this class, to avoid infinite recursion\n # when a subclass calls this method in _classes_default().\n classes = NbGrader._classes_default(self)\n\n # include the coursedirectory\n classes.append(CourseDirectory)\n\n # include the authenticator\n classes.append(Authenticator)\n\n # include all the apps that have configurable options\n for _, (app, _) in self.subcommands.items():\n if len(app.class_traits(config=True)) > 0:\n classes.append(app)\n\n # include plugins that have configurable options\n for pg_name in plugins.__all__:\n pg = getattr(plugins, pg_name)\n if pg.class_traits(config=True):\n classes.append(pg)\n\n # include all preprocessors that have configurable options\n for pp_name in preprocessors.__all__:\n pp = getattr(preprocessors, pp_name)\n if len(pp.class_traits(config=True)) > 0:\n classes.append(pp)\n\n # include all the exchange actions\n for ex_name in exchange.__all__:\n ex = getattr(exchange, ex_name)\n if hasattr(ex, \"class_traits\") and ex.class_traits(config=True):\n classes.append(ex)\n\n # include all the default exchange actions\n for ex_name in exchange.default.__all__:\n ex = getattr(exchange, ex_name)\n if hasattr(ex, \"class_traits\") and ex.class_traits(config=True):\n classes.append(ex)\n\n # include all the converters\n for ex_name in converters.__all__:\n ex = getattr(converters, ex_name)\n if hasattr(ex, \"class_traits\") and ex.class_traits(config=True):\n classes.append(ex)\n\n return classes", "def classList():\n module_dir = dir(module)\n module_class = []\n for each in module_dir:\n if type(getattr(module, each)) == types.ClassType:\n module_class.append(each)\n return module_class", "def _get_classes_in_module(mod: Any) -> Iterable[Type[EventHandler]]:\n assert hasattr(mod, \"__all__\")\n for name in mod.__all__:\n cls = getattr(mod, name)\n try:\n if issubclass(cls, EventHandler):\n yield cls\n except TypeError:\n pass", "def find_instrument_classes():\n\n instrument_classes = []\n for name, obj in inspect.getmembers(instruments):\n if inspect.isclass(obj):\n instrument_classes.append(obj.__name__)\n\n return instrument_classes", "def dir_class(obj):\n names = dir(obj)\n names = [name for name in names if not name.startswith('_')]\n names = [name for name in names if not name.isupper()]\n request_class = [name for name in names if \"Req\" in name]\n response_class = [name for name in names if \"Resp\" in name]\n return request_class, response_class #返回接收包和发送包的对应类名称列表", "def get_controller_modules(self):\n controller_prefix = self.controller_prefix\n for controller_name in get_controllers(self.controller_prefix):\n if controller_name.startswith(u'_'): continue\n\n remove = controller_name in sys.modules\n\n controller_module = importlib.import_module(controller_name)\n yield controller_module\n\n if remove:\n sys.modules.pop(controller_name, None)", "def get_classes():\n session = Session()\n classes = session.query(Class)\n session.commit()\n\n return classes", "def getAllRemoteHandlers():\n return MetaRemoteModel.subclass_handlers", "def get_collection_classes():\n return CollectionMetaclass.collection_classes", "def get_forwarder_classnames():\n classnames = []\n for class_element in Forwarder.__subclasses__():\n classnames.append(class_element.__name__)\n return classnames", "def _load_modules(self):\n modules = []\n for cls in self._head_cls_list:\n logger.info(\"Load %s: %s\",\n dashboard_utils.DashboardHeadModule.__name__, cls)\n c = cls(self)\n dashboard_utils.ClassMethodRouteTable.bind(c)\n modules.append(c)\n return modules" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the roofline model for the given platforms. Returns The achievable performance
def roofline(num_platforms, peak_performance, peak_bandwidth, intensity): assert isinstance(num_platforms, int) and num_platforms > 0 assert isinstance(peak_performance, numpy.ndarray) assert isinstance(peak_bandwidth, numpy.ndarray) assert isinstance(intensity, numpy.ndarray) assert (num_platforms == peak_performance.shape[0] and num_platforms == peak_bandwidth.shape[0]) achievable_perf = numpy.zeros((num_platforms, len(intensity))) for i in range(num_platforms): achievable_perf[i:] = numpy.minimum(peak_performance[i], peak_bandwidth[i] * intensity) return achievable_perf
[ "def process(hw_platforms, sw_apps, xkcd):\n assert isinstance(hw_platforms, list)\n assert isinstance(sw_apps, list)\n assert isinstance(xkcd, bool)\n\n # arithmetic intensity\n arithmetic_intensity = numpy.logspace(START, STOP, num=N, base=2)\n # Hardware platforms\n platforms = [p[0] for p in hw_platforms]\n\n # Compute the rooflines\n achievable_perf = roofline(len(platforms),\n numpy.array([p[1] for p in hw_platforms]),\n numpy.array([p[2] for p in hw_platforms]),\n arithmetic_intensity)\n norm_achievable_perf = roofline(len(platforms),\n numpy.array([(p[1] * 1e3) / p[3]\n for p in hw_platforms]),\n numpy.array([(p[2] * 1e3) / p[3]\n for p in hw_platforms]),\n arithmetic_intensity)\n\n # Apps\n if sw_apps != []:\n apps = [a[0] for a in sw_apps]\n apps_intensity = numpy.array([a[1] for a in sw_apps])\n\n # Plot the graphs\n if xkcd:\n matplotlib.pyplot.xkcd()\n fig, axes = matplotlib.pyplot.subplots(1, 2)\n for axis in axes:\n axis.set_xscale('log', base=2)\n axis.set_yscale('log', base=2)\n axis.set_xlabel('Arithmetic Intensity (FLOP/byte)', fontsize=12)\n axis.grid(True, which='major')\n\n matplotlib.pyplot.setp(axes, xticks=arithmetic_intensity,\n yticks=numpy.logspace(1, 20, num=20, base=2))\n\n axes[0].set_ylabel(\"Achieveable Performance (GFLOP/s)\", fontsize=12)\n axes[1].set_ylabel(\"Normalized Achieveable Performance (MFLOP/s/$)\",\n fontsize=12)\n\n axes[0].set_title('Roofline Model', fontsize=14)\n axes[1].set_title('Normalized Roofline Model', fontsize=14)\n\n for idx, val in enumerate(platforms):\n axes[0].plot(arithmetic_intensity, achievable_perf[idx, 0:],\n label=val, marker='o')\n axes[1].plot(arithmetic_intensity, norm_achievable_perf[idx, 0:],\n label=val, marker='o')\n\n if sw_apps != []:\n color = matplotlib.pyplot.cm.rainbow(numpy.linspace(0, 1, len(apps)))\n for idx, val in enumerate(apps):\n for axis in axes:\n axis.axvline(apps_intensity[idx], label=val,\n linestyle='-.', marker='x', color=color[idx])\n\n for axis in axes:\n axis.legend()\n fig.tight_layout()\n matplotlib.pyplot.show()", "def test_mlperftiny_models(platform, board, workspace_dir, serial_number, model_name, project_type):\n if platform != \"zephyr\":\n pytest.skip(reason=\"Other platforms are not supported yet.\")\n\n use_cmsis_nn = False\n relay_mod, params, model_info = mlperftiny_get_module(model_name)\n target = tvm.micro.testing.get_target(platform, board)\n project_options = {\"config_main_stack_size\": 4000, \"serial_number\": serial_number}\n\n if use_cmsis_nn:\n project_options[\"cmsis_path\"] = os.getenv(\"CMSIS_PATH\")\n\n if model_name == \"ad\":\n predictor = predict_ad_labels_aot\n else:\n predictor = predict_labels_aot\n\n samples, labels = get_test_data(model_name, project_type)\n if project_type == \"host_driven\":\n with create_aot_session(\n platform,\n board,\n target,\n relay_mod,\n params,\n build_dir=workspace_dir,\n # The longest models take ~5 seconds to infer, but running them\n # ten times (with NUM_TESTING_RUNS_PER_SAMPLE) makes that 50\n timeout_override=server.TransportTimeouts(\n session_start_retry_timeout_sec=300,\n session_start_timeout_sec=150,\n session_established_timeout_sec=150,\n ),\n project_options=project_options,\n use_cmsis_nn=use_cmsis_nn,\n ) as session:\n aot_executor = tvm.runtime.executor.aot_executor.AotModule(\n session.create_aot_executor()\n )\n args = {\n \"session\": session,\n \"aot_executor\": aot_executor,\n \"input_data\": samples,\n \"runs_per_sample\": 10,\n }\n predicted_labels, runtimes = zip(*predictor(**args))\n\n avg_runtime = float(np.mean(runtimes) * 1000)\n print(f\"Model {model_name} average runtime: {avg_runtime}\")\n\n elif project_type == \"mlperftiny\":\n runtime = Runtime(\"crt\")\n executor = Executor(\n \"aot\", {\"unpacked-api\": True, \"interface-api\": \"c\", \"workspace-byte-alignment\": 8}\n )\n\n config = {\"tir.disable_vectorize\": True}\n if use_cmsis_nn:\n from tvm.relay.op.contrib import cmsisnn\n\n config[\"relay.ext.cmsisnn.options\"] = {\"mcpu\": target.mcpu}\n relay_mod = cmsisnn.partition_for_cmsisnn(relay_mod, params, mcpu=target.mcpu)\n\n with tvm.transform.PassContext(opt_level=3, config=config):\n module = tvm.relay.build(\n relay_mod, target=target, params=params, runtime=runtime, executor=executor\n )\n\n temp_dir = tvm.contrib.utils.tempdir()\n model_tar_path = temp_dir / \"model.tar\"\n export_model_library_format(module, model_tar_path)\n workspace_size = mlf_extract_workspace_size_bytes(model_tar_path)\n\n extra_tar_dir = tvm.contrib.utils.tempdir()\n extra_tar_file = extra_tar_dir / \"extra.tar\"\n with tarfile.open(extra_tar_file, \"w:gz\") as tf:\n with tempfile.TemporaryDirectory() as tar_temp_dir:\n model_files_path = os.path.join(tar_temp_dir, \"include\")\n os.mkdir(model_files_path)\n header_path = generate_c_interface_header(\n module.libmod_name,\n [model_info[\"input_name\"]],\n [model_info[\"output_name\"]],\n [],\n {},\n [],\n 0,\n model_files_path,\n {},\n {},\n )\n tf.add(header_path, arcname=os.path.relpath(header_path, tar_temp_dir))\n\n create_header_file(\n \"output_data\",\n np.zeros(\n shape=model_info[\"output_shape\"],\n dtype=model_info[\"output_dtype\"],\n ),\n \"include/tvm\",\n tf,\n )\n\n input_total_size = 1\n input_shape = model_info[\"input_shape\"]\n for i in range(len(input_shape)):\n input_total_size *= input_shape[i]\n\n # float input\n if model_name == \"ad\":\n input_total_size *= 4\n\n template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects(platform))\n project_options.update(\n {\n \"extra_files_tar\": str(extra_tar_file),\n \"project_type\": project_type,\n \"board\": board,\n \"compile_definitions\": [\n f\"-DWORKSPACE_SIZE={workspace_size + 512}\", # Memory workspace size, 512 is a temporary offset\n # since the memory calculation is not accurate.\n f'-DTARGET_MODEL={MLPERF_TINY_MODELS[model_name][\"index\"]}', # Sets the model index for project compilation.\n f\"-DTH_MODEL_VERSION=EE_MODEL_VERSION_{model_name.upper()}01\", # Sets model version. This is required by MLPerfTiny API.\n f\"-DMAX_DB_INPUT_SIZE={input_total_size}\", # Max size of the input data array.\n ],\n }\n )\n\n if model_name != \"ad\":\n project_options[\"compile_definitions\"].append(\n f'-DOUT_QUANT_SCALE={model_info[\"quant_output_scale\"]}'\n )\n project_options[\"compile_definitions\"].append(\n f'-DOUT_QUANT_ZERO={model_info[\"quant_output_zero_point\"]}'\n )\n\n project = tvm.micro.project.generate_project_from_mlf(\n template_project_path, workspace_dir / \"project\", model_tar_path, project_options\n )\n project.build()\n project.flash()\n with project.transport() as transport:\n aot_transport_find_message(transport, MLPERFTINY_READY_MSG, timeout_sec=200)\n print(f\"Testing {model_name} on {_mlperftiny_get_name(transport)}.\")\n assert _mlperftiny_get_name(transport) == \"microTVM\"\n if model_name != \"ad\":\n accuracy = _mlperftiny_test_dataset(transport, [samples, labels], 100)\n print(f\"Model {model_name} accuracy: {accuracy}\")\n else:\n mean_error = _mlperftiny_test_dataset_ad(transport, [samples, None], 100)\n print(\n f\"\"\"Model {model_name} mean error: {mean_error}.\n Note that this is not the final accuracy number.\n To calculate that, you need to use sklearn.metrics.roc_auc_score function.\"\"\"\n )", "def get_rpi(season_games, team):\n def get_wp(team):\n \"\"\" Return team win percentage \"\"\"\n team_wins = season_games[season_games['Wteam'] == team]\n team_losses = season_games[season_games['Lteam'] == team]\n return float(len(team_wins)) / float(len(team_losses) + len(team_wins))\n\n def get_opponents(team):\n \"\"\" Return list of all opponents \"\"\"\n team_wins = season_games[season_games['Wteam'] == team]\n team_losses = season_games[season_games['Lteam'] == team]\n opponents = [] # to be a list of all game opponents\n [opponents.append(t) for t in team_wins['Lteam']]\n [opponents.append(t) for t in team_losses['Wteam']]\n return opponents\n\n def get_owp(team):\n \"\"\" Return float average opponent win percentage \"\"\"\n opponents = get_opponents(team)\n opponent_wps = [get_wp(t) for t in opponents]\n return float(np.sum(opponent_wps)) / float(len(opponents))\n\n def get_oowp(team):\n \"\"\" Return float opponent's average opponent win percentage \"\"\"\n opponents = get_opponents(team)\n opponent_owps = [get_owp(t) for t in opponents]\n return float(np.sum(opponent_owps)) / float(len(opponents))\n\n wp = get_wp(team)\n owp = get_owp(team)\n oowp = get_oowp(team)\n return (wp * 0.25) + (owp * 0.50) + (oowp * 0.25)", "def detect_platform(self):\r\n\t\t#no parameters\r\n\t\tfor x in self.platform :\r\n\t\t\t\r\n\t\t\tif (x[1] <= (self.vecteur_vitesse[1] + self.position[1]) or self.position[1] + 1 == x[1]) and self.position[1] <= x[1]:\r\n\t\t\t\tif self.position[0] >= x[0] and self.position[0] <= x[2] :\r\n\t\t\t\t\tself.position[1] = x[1] - 1 # -1 pour rester sur la plateforme\r\n\t\t\t\t\tself.sol = True\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tif self.position[1]-self.last_position+self.perso_hauteur_saut >= self.perso_degat_chutte:\r\n\t\t\t\t\t\t\tself.death()\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\tfinally :\r\n\t\t\t\t\t\tself.last_position = self.position[1]\r\n\t\t\t\t\t\tif x[4] == 2 :\r\n\t\t\t\t\t\t\tself.death()\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\tif x[4] == 5 :\r\n\t\t\t\t\t\t\tself.win()\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\telif x[4] == 3 and self.var == \"flee\":\r\n\t\t\t\t\t\t\tself.perso_hauteur_saut = self.perso_hauteur_saut*2\r\n\t\t\telif (x[3] >= (self.position[1] + self.vecteur_vitesse[1])) and self.position[1] >= x[3]:\r\n\t\t\t\tif self.position[0] >= x[0] and self.position[0] <= x[2] :\r\n\t\t\t\t\tself.position[1] = x[3] + 4 # +2 pour l'affichage\r\n\t\t\t\t\tself.vecteur_vitesse[1] = 0\r\n\t\t\t\t\tif x[4] == 2 :\r\n\t\t\t\t\t\tself.death()\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\tif x[4] == 5 :\r\n\t\t\t\t\t\tself.win()\r\n\t\t\t\t\telif x[4] == 1 and self.var == \"strong\":\r\n\t\t\t\t\t\tself.roof = True\r\n\t\t\t\t\t\tself.grabbed_platform = x\r\n\t\t\t\r\n\t\t\tif ((self.position[0] - self.perso_vitesse) <= x[2] ) and self.position[0] > x[2]:\r\n\t\t\t\tif x[1] <= self.position[1] and self.position[1] <= x[3] + 1 :\r\n\t\t\t\t\tself.limit_left = False\r\n\t\t\t\t\tself.grinding = True\r\n\t\t\t\t\tif x[4] == 2 :\r\n\t\t\t\t\t\tself.death()\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\tif x[4] == 5 :\r\n\t\t\t\t\t\tself.win()\r\n\t\t\t\t\tif self.var == \"strong\" :\r\n\t\t\t\t\t\tself.vecteur_vitesse[1] = -1\r\n\t\t\t\t\telse :\r\n\t\t\t\t\t\tself.vecteur_vitesse[1] = 0\r\n\t\t\t\t\tself.position[0] = x[2] + 2 #pygame est très soupe au lait... du coup j'ai ajusté...\r\n\t\t\tif ((self.perso_vitesse + self.position[0]) >= x[0] ) and self.position[0] < x[0]:\r\n\t\t\t\tif x[1] <= self.position[1] and self.position[1] <= x[3] + 1 :\r\n\t\t\t\t\tself.limit_right = False\r\n\t\t\t\t\tself.grinding = True\r\n\t\t\t\t\tif x[4] == 2 :\r\n\t\t\t\t\t\tself.death()\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\tif x[4] == 5 :\r\n\t\t\t\t\t\tself.win()\r\n\t\t\t\t\tif self.var == \"strong\" :\r\n\t\t\t\t\t\tself.vecteur_vitesse[1] = -1\r\n\t\t\t\t\telse :\r\n\t\t\t\t\t\tself.vecteur_vitesse[1] = 0\t\t\t\t\r\n\t\t\t\t\tself.position[0] = x[0] - 1 #pygame est très soupe au lait... du coup j'ai ajusté...\r\n\t\t\r\n\t\tif self.roof :\r\n\t\t\tif self.position[0] < self.grabbed_platform[0] or self.position[0] > self.grabbed_platform[2] :\r\n\t\t\t\tself.roof = False\r\n\t\tif self.position[1] >= 674 :\r\n\t\t\tself.sol = True\r\n\t\telif self.position[1] <= 1 :\r\n\t\t\tself.vecteur_vitesse[1] = 0\r\n\t\tif self.position[0] <= 1 :\r\n\t\t\tself.limit_left = False\r\n\t\telif self.position[0] >= 1199 :\r\n\t\t\tself.limit_right = False", "def predict_qoe(self):\r\n\t\tfor prediction_metric in self.prediction_metrics:\r\n\t\t\tfor service in VIDEO_SERVICES:\r\n\t\t\t\tthese_players = [player for player in self.players if\\\r\n\t\t\t\t\tself.players[player]['service'] == service and self.players[player]['features'][prediction_metric] is not None]\r\n\t\t\t\t# Predictions are run in parallel, since this is fastest\r\n\t\t\t\tall_player_features = [self.players[player][\"features\"][prediction_metric] \\\r\n\t\t\t\t\tfor player in these_players]\r\n\t\t\t\t\r\n\t\t\t\tif all_player_features == []: continue\r\n\r\n\t\t\t\tif not self.use_perfect or prediction_metric != \"buffer\":\r\n\t\t\t\t\t# Call the prediction function\r\n\t\t\t\t\tpredicted_metrics = self.prediction_models[prediction_metric][service](np.array(all_player_features))\r\n\t\t\t\telse: # buffer and we want to use perfect information\r\n\t\t\t\t\t# get the buffers from the zmq stream\r\n\t\t\t\t\tstats_msg = None\r\n\t\t\t\t\twhile True:\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tstats_msg = self.zmq_pull_socket.recv_pyobj(flags=zmq.NOBLOCK)\r\n\t\t\t\t\t\t\t# update players with new info\r\n\t\t\t\t\t\t\tfor player in stats_msg:\r\n\t\t\t\t\t\t\t\tfor k in stats_msg[player]:\r\n\t\t\t\t\t\t\t\t\tself.players[\"10.0.0.{}\".format(player+1)][\"ground_truth_values\"][k].append(\r\n\t\t\t\t\t\t\t\t\t\tstats_msg[player][k])\r\n\t\t\t\t\t\texcept zmq.ZMQError:\r\n\t\t\t\t\t\t\tbreak # No new messages\r\n\t\t\t\t\t# use most recent ground truth info for each player\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tpredicted_metrics = [self.players[player][\"ground_truth_values\"][\"buffer\"][-1] for player in self.players]\r\n\t\t\t\t\texcept IndexError:\r\n\t\t\t\t\t\t# no information yet -- just wait\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t# save predictions for other parts of the pipeline\r\n\t\t\t\tfor predicted_metric, player in zip(predicted_metrics, these_players):\r\n\t\t\t\t\tself.players[player][\"predictions\"][prediction_metric].append((time.time(), predicted_metric))\r\n\t\t\t\t# Log predictions for post-mortem analysis\r\n\t\t\t\tself.log_stat(\"pred\", [(player, prediction_metric, predicted_metric, time.time()) \r\n\t\t\t\t\tfor predicted_metric, player in zip(predicted_metrics, these_players)])", "def evaluate_models(num_splits=10):\n models = {\"Decision Tree\": tree.DecisionTreeClassifier(),\n \"Nearest Neighbor\": neighbors.KNeighborsClassifier(),\n \"Random Forest\": ensemble.RandomForestClassifier(),\n \"Linear SVM\": svm.SVC(kernel=\"linear\"), # the linear kernel shows best performance\n \"LDA\": discriminant_analysis.LinearDiscriminantAnalysis(),\n \"Neural Net\": neural_network.MLPClassifier(solver=\"lbfgs\")} # small datasets favor an lbfgs solver\n\n data = pd.read_csv(f\"features_{num_splits}_splits.csv\", index_col=[0])\n # All the models can achieve near perfect accuracy without normalization except for neural networks\n for feature in [\"Mean\", \"Variance\", \"Up\"]:\n data[feature] = (data[feature] - data[feature].mean())/data[feature].std()\n y = data[\"Class\"]\n x = data.drop([\"Class\"], axis=1)\n\n # performing the model testing\n x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, test_size=0.3, random_state=1)\n performance = {}\n sources = [\"droneA\", \"droneB\", \"wifi\", \"random-signal\"]\n for name, model in models.items():\n model.fit(x_train, y_train)\n predictions = model.predict(x_test)\n report = metrics.classification_report(predictions, y_test, output_dict=True, zero_division=0)\n # The report gives summary results that are not used, so those are filtered out\n performance[name] = {source: report[source] for source in sources}\n\n return performance", "def precompile_process():\r\n SystemParam.MODEL = \"Heisenberg\"\r\n #SystemParam.MODEL= \"Ising\"\r\n SystemParam.SYMMETRY = \"Z2\"\r\n SystemParam.USE_CUSTOM_RANDOM = False\r\n SystemParam.USE_REFLECTION = False\r\n SystemParam.NUM_OF_THREADS = None\r\n SystemParam.only_NN = True\r\n SystemParam.only_NNN = False", "def main():\n # import all the data\n # TODO: call the load_data() function here and load data from file\n\n \n train_red_x, train_red_y = load_data('hw2_winequality-red_train.npy')\n test_red_x, test_red_y = load_data('hw2_winequality-red_test.npy')\n train_white_x, train_white_y = load_data('hw2_winequality-white_train.npy')\n test_white_x, test_white_y = load_data('hw2_winequality-white_test.npy')\n \n \"\"\"\n n_train_red, _ = np.shape(train_red_x)\n n_test_red, _ = np.shape(test_red_x)\n n_train_white, _ = np.shape(train_white_x)\n n_test_white, _ = np.shape(test_white_x)\n \n \n\n \n partition_factor = 5\n \n for i in range(partition_factor):\n # Red wine\n partitioned_train_red_x = train_red_x[math.floor(n_train_red*(i/partition_factor)):math.floor(n_train_red*(i+1)/partition_factor), :]\n partitioned_train_red_y = train_red_y[math.floor(n_train_red*(i/partition_factor)):math.floor(n_train_red*(i+1)/partition_factor), :]\n partitioned_test_red_x = test_red_x[math.floor(n_test_red*(i/partition_factor)):math.floor(n_test_red*(i+1)/partition_factor), :]\n partitioned_test_red_y = test_red_y[math.floor(n_test_red*(i/partition_factor)):math.floor(n_test_red*(i+1)/partition_factor), :]\n\n red_wine_run(partitioned_train_red_x, partitioned_train_red_y, partitioned_test_red_x, partitioned_test_red_y, i+1)\n \n partitioned_train_red_y = bc.classify_real_result(partitioned_train_red_y)\n partitioned_test_red_y = bc.classify_real_result(partitioned_test_red_y)\n training_start = time.time()\n clf0, clf1, clf2 = sf.training_with_svm(partitioned_train_red_x, partitioned_train_red_y)\n training_time = time.time() - training_start\n sf.validate_with_svm(partitioned_test_red_x, partitioned_test_red_y, clf0, clf1, clf2)\n test_start = time.time()\n sf.test_with_svm(partitioned_test_red_x, partitioned_test_red_y, clf0, clf1, clf2, i+1, \"Red wine\", training_time, test_start)\n # White wine\n partitioned_train_white_x = train_white_x[math.floor(n_train_white*(i/partition_factor)):math.floor(n_train_white*(i+1)/partition_factor),:]\n partitioned_train_white_y = train_white_y[math.floor(n_train_white*(i/partition_factor)):math.floor(n_train_white*(i+1)/partition_factor),:]\n partitioned_test_white_x = test_white_x[math.floor(n_test_white*(i/partition_factor)):math.floor(n_test_white*(i+1)/partition_factor),:]\n partitioned_test_white_y = test_white_y[math.floor(n_test_white*(i/partition_factor)):math.floor(n_test_white*(i+1)/partition_factor),:]\n\n white_wine_run(partitioned_train_white_x, partitioned_train_white_y, partitioned_test_white_x, partitioned_test_white_y, i+1)\n\n partitioned_train_white_y = bc.classify_real_result(partitioned_train_white_y)\n partitioned_test_white_y = bc.classify_real_result(partitioned_test_white_y)\n training_start = time.time()\n clf0, clf1, clf2 = sf.training_with_svm(partitioned_train_white_x, partitioned_train_white_y)\n training_time = time.time()-training_start\n sf.validate_with_svm(partitioned_test_white_x, partitioned_test_white_y, clf0, clf1, clf2)\n test_start = time.time()\n sf.test_with_svm(partitioned_test_white_x, partitioned_test_white_y, clf0, clf1, clf2, i+1, \"White wine\", training_time, test_start)\n \n cf.add_lines_to_file(\"data_test_long.txt\", 5)\n \"\"\"\n # Tests\n time_red = time.time()\n red_wine_run(train_red_x, train_red_y, test_red_x, test_red_y)\n print(\"Time it took for code to run on red wine: {}\".format(time.time()-time_red))\n\n time_white = time.time()\n white_wine_run(train_white_x, train_white_y, test_white_x, test_white_y)\n print(\"Time it took for code to run on white wine: {}\".format(time.time()-time_white))\n \n \n \"\"\"\n start_time = time.time()\n train_red_y = bc.classify_real_result(train_red_y)\n test_red_y = bc.classify_real_result(test_red_y)\n training_start = time.time()\n clf0, clf1, clf2 = sf.training_with_svm(train_red_x, train_red_y)\n training_time = time.time() - training_start\n sf.validate_with_svm(test_red_x, test_red_y, clf0, clf1, clf2)\n test_start = time.time()\n sf.test_with_svm(test_red_x, test_red_y, clf0, clf1, clf2, 1, 'Red wine', training_time, test_start)\n\n start_time = time.time()\n train_white_y = bc.classify_real_result(train_white_y)\n test_white_y = bc.classify_real_result(test_white_y)\n training_start = time.time()\n clf0, clf1, clf2 = sf.training_with_svm(train_white_x, train_white_y)\n training_time = time.time()-training_start\n sf.validate_with_svm(test_white_x, test_white_y, clf0, clf1, clf2)\n test_start = time.time()\n sf.test_with_svm(test_white_x, test_white_y, clf0, clf1, clf2, 1, 'White wine', training_time, test_start)\n \"\"\"", "def get_physics_quality(self) -> float:\r\n\r\n self.start()\r\n self.communicate([{\"$type\": \"simulate_physics\",\r\n \"value\": False},\r\n {\"$type\": \"create_empty_environment\"}])\r\n\r\n avatar_id = \"A\"\r\n delta_theta = 15\r\n\r\n print(\"Writing the physics quality to the record.\")\r\n\r\n scale = TDWUtils.get_unit_scale(self.record)\r\n\r\n # Add the object.\r\n object_id = self.get_unique_id()\r\n self.communicate({\"$type\": \"add_object\",\r\n \"name\": self.record.name,\r\n \"url\": self.asset_bundle_path,\r\n \"scale_factor\": self.record.scale_factor,\r\n \"id\": object_id})\r\n\r\n # Scale the object to unit size.\r\n self.communicate({\"$type\": \"scale_object\",\r\n \"id\": object_id,\r\n \"scale_factor\": {\"x\": scale, \"y\": scale, \"z\": scale}})\r\n\r\n # Create the avatar.\r\n # Set the pass masks.\r\n self.communicate(TDWUtils.create_avatar(avatar_id=avatar_id))\r\n\r\n self.communicate({\"$type\": \"set_pass_masks\",\r\n \"avatar_id\": avatar_id,\r\n \"pass_masks\": [\"_id\"]})\r\n\r\n id_pass = []\r\n pink_pass = []\r\n for show_collider_hulls in [False, True]:\r\n x = 1.75\r\n y = 0.5\r\n z = 0\r\n\r\n # Show collider hulls.\r\n if show_collider_hulls:\r\n self.communicate({\"$type\": \"show_collider_hulls\",\r\n \"id\": object_id})\r\n\r\n # Reset the avatar.\r\n resp = self.communicate([{\"$type\": \"teleport_avatar_to\",\r\n \"avatar_id\": avatar_id,\r\n \"position\": {\"x\": x, \"y\": y, \"z\": z}},\r\n {\"$type\": \"look_at\",\r\n \"avatar_id\": avatar_id,\r\n \"object_id\": object_id,\r\n \"use_centroid\": True},\r\n {\"$type\": \"send_images\",\r\n \"frequency\": \"always\"}])\r\n\r\n # Equatorial orbit.\r\n theta = 0\r\n while theta < 360:\r\n # Get the number of pixels that aren't black.\r\n img = np.array(Image.open(BytesIO(Images(resp[0]).get_image(0))))\r\n grade = (256 * 256) - np.sum(np.all(img == np.array([0, 0, 0]), axis=2))\r\n\r\n if show_collider_hulls:\r\n pink_pass.append(grade)\r\n else:\r\n id_pass.append(grade)\r\n\r\n theta += delta_theta\r\n\r\n # Get the new position.\r\n rad = radians(theta)\r\n x1 = cos(rad) * x - sin(rad) * z\r\n z1 = sin(rad) * x + cos(rad) * z\r\n\r\n # Teleport the avatar.\r\n # Look at the object.\r\n resp = self.communicate([{\"$type\": \"teleport_avatar_to\",\r\n \"avatar_id\": avatar_id,\r\n \"position\": {\"x\": x1, \"y\": y, \"z\": z1}},\r\n {\"$type\": \"look_at\",\r\n \"avatar_id\": avatar_id,\r\n \"object_id\": object_id,\r\n \"use_centroid\": True}\r\n ])\r\n\r\n # Reset the avatar.\r\n resp = self.communicate([{\"$type\": \"teleport_avatar_to\",\r\n \"avatar_id\": avatar_id,\r\n \"position\": {\"x\": x, \"y\": y, \"z\": z}},\r\n {\"$type\": \"look_at\",\r\n \"avatar_id\": avatar_id,\r\n \"object_id\": object_id,\r\n \"use_centroid\": True},\r\n {\"$type\": \"send_images\",\r\n \"frequency\": \"always\"}])\r\n # Polar orbit.\r\n theta = 0\r\n while theta < 360:\r\n # Get the number of pixels that aren't black.\r\n img = np.array(Image.open(BytesIO(Images(resp[0]).get_image(0))))\r\n grade = (256 * 256) - np.sum(np.all(img == np.array([0, 0, 0]), axis=2))\r\n\r\n if show_collider_hulls:\r\n pink_pass.append(grade)\r\n else:\r\n id_pass.append(grade)\r\n\r\n theta += delta_theta\r\n\r\n # Get the new position.\r\n rad = radians(theta)\r\n x1 = cos(rad) * x - sin(rad) * z\r\n y1 = (sin(rad) * x + cos(rad) * z) + y\r\n\r\n # Teleport the avatar.\r\n # Look at the object.\r\n resp = self.communicate([{\"$type\": \"teleport_avatar_to\",\r\n \"avatar_id\": avatar_id,\r\n \"position\": {\"x\": x1, \"y\": y1, \"z\": 0}},\r\n {\"$type\": \"look_at\",\r\n \"avatar_id\": avatar_id,\r\n \"object_id\": object_id,\r\n \"use_centroid\": True}\r\n ])\r\n\r\n grades = [1 - (float(h) / float(i)) for h, i in zip(pink_pass, id_pass)]\r\n\r\n physics_quality = float(sum(grades)) / len(grades)\r\n print(\"Physics quality: \" + str(physics_quality))\r\n\r\n # Kill the build.\r\n self.kill_build()\r\n return physics_quality", "def calculate_rdtie_of_project(projs):\n\tnew_projs = []\n\tfor proj in projs:\n\t\tif os.path.exists(\"../raw_data/\"+proj+\".csv\"):\n\t\t\tnew_projs.append(proj)\n\tprint(new_projs)\n\tprint(\"-------------------------\")\n\n\tresults_path = \"../experiment/results/\" # create directory \"experiment/results\" to save the results of each approach\n\tif not os.path.exists(results_path):\n\t\tos.makedirs(results_path)\n\n\t# ## using original predicted results (rank-based approach)\n\t# Do not remove any configurations\n\tprint(\"RDTie using Rank-based\")\n\tfor proj in new_projs:\n\t\tresults = calculate_RDTie(proj)\n\t\tline = proj+\":\"+str(results)+\"\\n\"\n\t\t# print(line)\n\t\tresult_text = \"../experiment/results/rank_based_RDTie.txt\"\n\t\twith open(result_text, \"a\") as f:\n\t\t\tf.write(line)\n\n\t# ## using Classification (Random Forest, method=0)\n\t# Remove 90% tied configurations\n\tprint(\"RDTie using Classification (random forest)\")\n\tfor proj in new_projs:\n\t\tresults = calculate_RDTie_filter(proj, method=0, frac=0.9)\n\t\tline = proj+\":\"+str(results)+\"\\n\"\n\t\t# print(line)\n\t\tresult_text = \"../experiment/results/classification_RDTie.txt\"\n\t\twith open(result_text, \"a\") as f:\n\t\t\tf.write(line)\n \n\t# ## using Random\n\t# Remove 90% tied configurations\n\tprint(\"RDTie using Random Deletion\")\n\tfor proj in new_projs:\n\t\tresults = calculate_RDTie_filter(proj, method=1, frac=0.9)\n\t\tline = proj+\":\"+str(results)+\"\\n\"\n\t\t# print(line)\n\t\tresult_text = \"../experiment/results/random_rank_RDTie.txt\"\n\t\twith open(result_text, \"a\") as f:\n\t\t\tf.write(line)\n\n\t# ## using Direct LTR\n\tprint(\"RDTie using direct LTR\")\n\tfor proj in new_projs:\n\t\tresults = calculate_RDTie_filter(proj, method=2, frac=0.9)\n\t\tline = proj+\":\"+str(results)+\"\\n\"\n\t\t# print(line)\n\t\tresult_text = \"../experiment/results/direct_ltr_RDTie.txt\"\n\t\twith open(result_text, \"a\") as f:\n\t\t\tf.write(line)\n\n\t# ## using ReConfig (method=1)\n\t# Remove 90% tied configurations\n\tprint(\"RDTie using ReConfig\")\n\tfor proj in new_projs:\n\t\tresults = calculate_RDTie_filter(proj, method=3, frac=0.9)\n\t\tline = proj+\":\"+str(results)+\"\\n\"\n\t\t# print(line)\n\t\tresult_text = \"../experiment/results/reconfig_RDTie.txt\"\n\t\twith open(result_text, \"a\") as f:\n\t\t\tf.write(line)\n \n\t# ## using Outlier detection (One class svm)\n\t# Remove the configurations predicted as -1 (outlier)\n\tprint(\"RDTie using Outlier Detection (one class svm)\")\n\tfor proj in new_projs:\n\t\tresults = calculate_RDTie_outlier(proj)\n\t\tline = proj+\":\"+str(results)+\"\\n\"\n\t\t# print(line)\n\t\tresult_text = \"../experiment/results/outlier_detection_RDTie.txt\"\n\t\twith open(result_text, \"a\") as f:\n\t\t\tf.write(line)\n\n\n\n\t################################################################################\n\n\t## using RD* to calulate the rank-based(Nair et al.)\n\tprint(\"Evaluate rank-based by RD*\")\n\tfor proj in new_projs:\n\t\tresults = calculate_RD_rankbased(proj)\n\t\tline = proj+\":\"+str(results)+\"\\n\"\n\t\t# print(line)\n\t\tresult_text = \"../experiment/results/Nair_RD.txt\"\n\t\twith open(result_text, \"a\") as f:\n\t\t\tf.write(line)\n\n\t## using RD* to calulate the reconfig\n\tprint(\"Evaluate reconfig by RD*\")\n\tfor proj in new_projs:\n\t\tresults = calculate_RD_reconfig(proj)\n\t\tline = proj+\":\"+str(results)+\"\\n\"\n\t\t# print(line)\n\t\tresult_text = \"../experiment/results/ReConfig_RD.txt\"\n\t\twith open(result_text, \"a\") as f:\n\t\t\tf.write(line)\n\n\t# ## using reConfig with different filter ratios\n\tprint(\"RDTie using reConfig with the filter ratio of 80%\")\n\tfor proj in new_projs:\n\t\tresults = calculate_RDTie_filter(proj, method=3, frac=0.8)\n\t\tline = proj+\":\"+str(results)+\"\\n\"\n\t\t# print(line)\n\t\tresult_text = \"../experiment/results/reconfig_RDTie_0.8.txt\"\n\t\twith open(result_text, \"a\") as f:\n\t\t\tf.write(line)\n\n\tprint(\"RDTie using reConfig with the filter ratio of 70%\")\n\tfor proj in new_projs:\n\t\tresults = calculate_RDTie_filter(proj, method=3, frac=0.7)\n\t\tline = proj+\":\"+str(results)+\"\\n\"\n\t\t# print(line)\n\t\tresult_text = \"../experiment/results/reconfig_RDTie_0.7.txt\"\n\t\twith open(result_text, \"a\") as f:\n\t\t\tf.write(line)\n\n\tprint(\"RDTie using ReConfig with the filter ratio of 60%\")\n\tfor proj in new_projs:\n\t\tresults = calculate_RDTie_filter(proj, method=3, frac=0.6)\n\t\tline = proj+\":\"+str(results)+\"\\n\"\n\t\t# print(line)\n\t\tresult_text = \"../experiment/results/reconfig_RDTie_0.6.txt\"\n\t\twith open(result_text, \"a\") as f:\n\t\t\tf.write(line)\n\t#################################################################################", "def find_best_arm(self, pose):\n assert isinstance(pose, PoseStamped)\n self.tf_listener.waitForTransform(\"base_link\", pose.header.frame_id,\n rospy.Time.now(), rospy.Duration(1))\n newpose = self.tf_listener.transformPose(\"base_link\", pose)\n if newpose.pose.position.y > 0:\n return \"left_arm\"\n else:\n return \"right_arm\"", "def compare_observed_models_new(self):\n radii = [3,6]\n num_dists = len(radii)+1\n fl2dist_class = {}\n num_iters = 0\n fl2num_trips = {}\n #first element is hausdorff distance, second is sum hausdorff, third is dsn\n #these are measurements over all combinations of two different paths for a given fl\n # pick two paths at random. If they are the same, pick two paths at random again.\n fl2similarity_measures_mult = {}\n dist2tot_trips_mult = defaultdict(float)\n #these are the measurements for all fl pairs and all models. Do not only examine differing paths\n fl2similarity_measures = {}\n dist2tot_trips = defaultdict(float)\n\n dist2num_models = defaultdict(float)\n tot_models = 0.0\n\n for fl in self.fl2models:\n dist = self.node_dist(fl[0],fl[1])\n dist_class = len(radii)\n for i in range(len(radii)):\n if dist <= radii[i]:\n dist_class = i\n break\n fl2dist_class[fl] = dist_class\n models = self.fl2models[fl]\n num_models = len(models)\n probs = [0.0 for i in range(len(models))]\n model_array = []\n total_trips = 0.0\n model_i = 0\n for model in models:\n count = len(models[model])\n probs[model_i] += count\n total_trips += count\n model_array.append(model)\n #print \"Trips with model %d: %d\" % (model_i,count)\n model_i += 1\n dist2num_models[dist_class] += num_models*total_trips\n tot_models += num_models*total_trips\n dist2tot_trips[dist_class] += total_trips\n fl2num_trips[fl] = total_trips\n fl2similarity_measures[fl] = [0.0,0.0,0.0]\n if len(model_array) == 1:\n for i in range(3):\n fl2similarity_measures[fl][i] = 0.0\n continue\n dist2tot_trips_mult[dist_class] += total_trips\n probs = map(lambda x: x/total_trips,probs)\n diag_sum = sum(map(lambda x: x*x,probs))\n denom = 1.0-diag_sum\n weights = [[0.0 for i in range(num_models)] for i in range(num_models)]\n for i in range(num_models):\n for j in range(i+1,num_models):\n weights[i][j] = (2*probs[i]*probs[j])/denom\n\n \"\"\"Calculate weighted similarity measures for different path measurements\"\"\"\n fl2similarity_measures_mult[fl] = [0.0,0.0,0.0]\n for i in range(len(model_array)):\n for j in range(i+1,len(model_array)):\n weight = weights[i][j]\n haus,ampsd,dsn = self.path_diff_measures(model_array[i],model_array[j])\n #print \"%s: haus %.2f, ampsd %.2f, dsn %.2f\" % (str((i,j)),haus,ampsd,dsn) \n fl2similarity_measures_mult[fl][0] += weight*haus\n fl2similarity_measures_mult[fl][1] += weight*ampsd\n fl2similarity_measures_mult[fl][2] += weight*dsn\n measures = fl2similarity_measures_mult[fl]\n #print \"Diff path overall: haus %.2f, ampsd %.2f, dsn %.2f\" % (measures[0],measures[1],measures[2])\n \"\"\"Reconfigure weights to correspond to all possible combinations\"\"\"\n weights_with_diag = [[0.0 for i in range(num_models)] for i in range(num_models)]\n for i in range(num_models):\n for j in range(i,num_models):\n if i == j:\n weights_with_diag[i][j] = probs[i]*probs[i]\n else:\n weights_with_diag[i][j] = weights[i][j]*denom\n \"\"\"Calculate weighted similarity measures for any two paths, can be the same\"\"\"\n weight_sum = 0.0\n for i in range(num_models):\n weight_sum += sum(weights_with_diag[i])\n #print \"weight sum: %f\" % weight_sum\n for i in range(len(model_array)):\n for j in range(i,len(model_array)):\n weight = weights_with_diag[i][j]\n haus,ampsd,dsn = self.path_diff_measures(model_array[i],model_array[j])\n #print \"%s: haus %.2f, ampsd %.2f, dsn %.2f\" % (str((i,j)),haus,ampsd,dsn) \n fl2similarity_measures[fl][0] += weight*haus\n fl2similarity_measures[fl][1] += weight*ampsd\n fl2similarity_measures[fl][2] += weight*dsn\n measures = fl2similarity_measures[fl]\n\n #print \"overall: haus %.2f, ampsd %.2f, dsn %.2f\\n\" % (measures[0],measures[1],measures[2])\n num_iters += 1\n dist2haus = defaultdict(float)\n dist2ampsd = defaultdict(float)\n dist2dsn = defaultdict(float)\n dist2haus_mult = defaultdict(float)\n dist2ampsd_mult = defaultdict(float)\n dist2dsn_mult = defaultdict(float)\n tot_haus = 0.0\n tot_ampsd = 0.0\n tot_dsn = 0.0\n tot_haus_mult = 0.0\n tot_ampsd_mult = 0.0\n tot_dsn_mult = 0.0\n tot_mult_trips = 0.0\n tot_trips = 0.0\n for fl in fl2num_trips:\n num_trips = fl2num_trips[fl]\n tot_trips += num_trips\n dist_class = fl2dist_class[fl]\n if len(self.fl2models[fl]) > 1:\n mult_meas = fl2similarity_measures_mult[fl]\n weighted_haus = num_trips*mult_meas[0]\n weighted_ampsd = num_trips*mult_meas[1]\n weighted_dsn = num_trips*mult_meas[2] \n dist2haus_mult[dist_class] += weighted_haus \n dist2ampsd_mult[dist_class] += weighted_ampsd \n dist2dsn_mult[dist_class] += weighted_dsn \n tot_haus_mult += weighted_haus\n tot_ampsd_mult += weighted_ampsd\n tot_dsn_mult += weighted_dsn\n tot_mult_trips += num_trips\n meas = fl2similarity_measures[fl]\n weighted_haus = num_trips*meas[0]\n weighted_ampsd = num_trips*meas[1]\n weighted_dsn = num_trips*meas[2] \n dist2haus[dist_class] += weighted_haus\n dist2ampsd[dist_class] += weighted_ampsd\n dist2dsn[dist_class] += weighted_dsn\n tot_haus += weighted_haus\n tot_ampsd += weighted_ampsd\n tot_dsn += weighted_dsn\n for i in range(num_dists):\n num_trips_mult = dist2tot_trips_mult[i]\n num_trips = dist2tot_trips[i]\n dist2num_models[i] = dist2num_models[i]/num_trips\n dist2haus_mult[i] = dist2haus_mult[i]/num_trips_mult\n dist2ampsd_mult[i] = dist2ampsd_mult[i]/num_trips_mult\n dist2dsn_mult[i] = dist2dsn_mult[i]/num_trips_mult\n dist2haus[i] = dist2haus[i]/num_trips\n dist2ampsd[i] = dist2ampsd[i]/num_trips\n dist2dsn[i] = dist2dsn[i]/num_trips\n print \"\"\n if i == 0:\n print \"0 <= Radius <= %d\" % radii[0]\n elif i < len(radii):\n print \"%d < Radius <= %d\" % (radii[i-1],radii[i])\n else:\n print \"%d < Radius\" % (radii[-1]+1)\n print \"average number of models per fl pair: %.2f\" % dist2num_models[i]\n print \"%d trips for pairs with multiple paths\" % num_trips_mult\n print \"%d total trips\" % num_trips\n print \"Diff paths average hausdorff %.2f, average ampsd %.2f, average dsn %.2f\" % (dist2haus_mult[i],dist2ampsd_mult[i],dist2dsn_mult[i])\n print \"average hausdorff %.2f, average ampsd %.2f, average dsn %.2f\" % (dist2haus[i],dist2ampsd[i],dist2dsn[i])\n\n tot_models = tot_models/tot_trips\n tot_haus_mult = tot_haus_mult/tot_mult_trips\n tot_ampsd_mult = tot_ampsd_mult/tot_mult_trips\n tot_dsn_mult = tot_dsn_mult/tot_mult_trips\n tot_haus = tot_haus/tot_trips\n tot_ampsd = tot_ampsd/tot_trips\n tot_dsn = tot_dsn/tot_trips\n print \"\"\n print \"Overall\"\n print \"average number of models per fl pair: %.2f\" % tot_models\n print \"Diff paths average hausdorff %.2f, average ampsd %.2f, average dsn %.2f\" % (tot_haus_mult,tot_ampsd_mult,tot_dsn_mult)\n print \"average hausdorff %.2f, average ampsd %.2f, average dsn %.2f\" % (tot_haus,tot_ampsd,tot_dsn)\n return", "def build_separate_models():\n # NOTE: The models should be ready to be built here.\n # Also if you have the means of getting the data yourself then you can uncomment the below and get the info from\n # those functions (only do this if you made any changes or don't trust the data I provided).\n #team_df = team_model.get_model_data()\n #player_df = player_model.get_model_data()\n #elo_df = elo_ratings.get_elo().reset_index(drop=True)\n team_df = pd.read_csv(\"./data/team_model_data.csv\", index_col=0).reset_index(drop=True)\n player_df = pd.read_csv(\"./data/player_model_data.csv\", index_col=0).reset_index(drop=True)\n elo_df = pd.read_csv(\"./data/elo_df.csv\", index_col=0).reset_index(drop=True)\n\n # Add b2b from teams into the players model data\n player_df = player_df.merge(team_df[['game_id', 'home_b2b', 'away_b2b']], how='inner', on=['game_id'])\n\n # Add in elo probability to the team model\n team_df['elo_prob'] = elo_df['home_prob']\n\n # Train and Test the Team, Player, elo, and the meta\n build_team_model(team_df)\n build_player_model(player_df)\n build_meta_clf(team_df, player_df, elo_df)", "def set_engines(N=0):\n global reachs,pdrs,sims,intrps,bmcs,n_proc,abs_ratio,ifbip,bmcs1, if_no_bip, allpdrs,allbmcs\n bmcs1 = [9] #BMC3\n #for HWMCC we want to set N = \n if N == 0:\n N = n_proc = os.sysconf(os.sysconf_names[\"SC_NPROCESSORS_ONLN\"])\n## N = 4 # this was for hwmcc15\n N = n_proc = 2*N\n## N = n_proc = 8 ### simulate 4 processors for HWMCC - turn this off a hwmcc.\n else:\n n_proc = N\n## print 'n_proc = %d'%n_proc\n #strategy is to use 2x number of processors \n if N <= 1:\n reachs = [24]\n pdrs = [7]\n## bmcs = [30]\n bmcs = [9]\n intrps = []\n sims = []\n slps = [18]\n elif N <= 2:\n reachs = [24]\n pdrs = [7]\n bmcs = [46,47]\n intrps = []\n sims = []\n slps = [18]\n elif N <= 4: #this will be the operative one for hwmcc'15\n reachs = [24] #reachy\n pdrs = [7,34] #prdm pdr_abstract\n if if_no_bip:\n allpdrs = pdrs = [7,19] #pdrm pdrmm\n bmcs = [46,47,2] #bmc3 bmc3 -S\n intrps = [23] #interp_m\n sims = [26] #Rarity_sim\n slps = [18] #sleep\n# 0.PDR, 1.INTERPOLATION, 2.BMC, 3.SIMULATION,\n# 4.REACHX, 5.PRE_SIMP, 6.simple, 7.PDRM, 8.REACHM, 9.BMC3\n# 10.Min_ret, 11.For_ret, 12.REACHP, 13.REACHN 14.PDRseed 15.prove_part_2,\n# 16.prove_part_3, 17.verify, 18.sleep, 19.PDRMm, 20.prove_part_1,\n# 21.run_parallel, 22.INTRP_bwd, 23.Interp_m 24.REACHY 25.REACHYc 26.Rarity Sim 27.simplify\n# 28.speculate, 29.quick_sec, 30.bmc3 -S, 31.BMC2 32.extract -a 33.extract 34.pdr_abstract\n# 35.par_scorr, 36.dsat, 37.iprove\n\n# BIPS = 0.PDR, 1.INTERPOLATION, 2.BMC, 14.PDRseed, 22.INTRP_bwd, 34.pdr_abstract\n# also reparam which uses ,reparam \n\n elif N <= 8: #used for HWMCC'15\n reachs = [24] #REACHY\n allpdrs = pdrs = [7,34,14] #PDRM pdr_abstract PDR_seed\n## intrps = [41,23,1] #Interp_m\n intrps = [23] #rkb\n allbmcs = bmcs = [46,47,9,2] #BMC3 bmc3 -S BMC \n if if_no_bip:\n allpdrs = pdrs = [7,19] #PDRM PDRMm\n intrps = allintrps = [41,23] #Interp_m\n bmcs = allbmcs = [46,47,9,38]\n sims = [26] #Rarity_Sim\n slps = [18] #sleep\n else:\n reachs = [24] #REACHY REACHX\n pdrs = allpdrs\n## pdrs = [7,34,14,19,0] #PDRM pdr_abstract PDR_seed PDRMm PDR\n## pdrs = allpdrs =[7,34,14]\n## intrps = [41,23,1] #Interp_m INTERPOLATION\n## intrps = [41,23] #rkb\n intrps = [23,1] #Interp_m INTERPOLATION\n intrps = [23] #rkb\n bmcs = allbmcs #allbmcs = [9,30,2,31,38,46,47]\n if if_no_bip:\n allpdrs = pdrs = [7,19] #PDRM PDRMm\n intrps = allintrps = [41,23] #Interp_m\n reachs = [24] #REACHY\n bmcs = [46,47,9,38] \n sims = [26] #Rarity_Sim\n slps = [18] #sleep\n print 'No. engines = %d,%d '%(N,n_proc)\n print 'pdrs = %s'%str(pdrs)\n print 'bmcs = %s'%str(bmcs)", "def eval_mario_winrate(model, evals, level, vis_on):\n game_instance = games.mario.Mario(model, evals, np.random.randint(0, 2 ** 16), level=level, vis_on=vis_on,\n use_visualization_tool=True)\n results = game_instance.run(advanced_results=True)\n print(\"Mario winrate (avg dist): {}\".format(results))\n return results", "def run_avg_results():\n\n # List of logs to be measured (tested)\n items = [\"logs_2017-06-23_14-16-00\",\n \"logs_2017-06-23_14-16-59\",\n \"logs_2017-06-23_14-17-58\",\n \"logs_2017-06-23_14-18-48\",\n \"logs_2017-06-23_14-19-39\"]\n\n results = []\n game = \"2048\"\n evals = 1000\n for item in items:\n prefix = \"C:/Users/Jan/Documents/GitHub/general-ai/Experiments/best_models_repeats/2048/MLP+ES/\"\n postfix = \"/best/best_0.json\"\n file_name = prefix + item + postfix\n logdir = prefix + item\n\n # SELECT PROPER MODEL\n model = MLP.load_from_file(file_name, game)\n # model = EchoState.load_from_file(file_name, game)\n\n # RUN MODEL\n # 2048\n result = run_2048_extended(model, evals)\n\n # MARIO\n # result = eval_mario_winrate(model=model, evals=evals, level=\"spikes\", vis_on=False)\n\n # ALHAMBRA\n # First element is result of our model (rest are original models from previous work)\n # result = eval_alhambra_avg_score(model, evals)[0]\n\n # TORCS\n # For reinforcement learning, please run model separately (tensorflow needs to be restarted)\n results.append(result)\n\n results = np.array(results)\n file_name = \"{}_stats_{}.txt\".format(game, utils.miscellaneous.get_pretty_time())\n with open(file_name, \"w\") as f:\n f.write(\"--GAME {} STATISTICS-- {} trainings of the same model\".format(game.upper(), len(items)))\n f.write(os.linesep)\n f.write(\"Model: {}\".format(model.get_name()))\n f.write(os.linesep)\n f.write(\"Total games: {} (for each model)\".format(evals))\n f.write(os.linesep)\n f.write(\"MAX TEST: {}\".format(np.max(results)))\n f.write(os.linesep)\n f.write(\"AVG TEST: {}\".format(np.mean(results)))\n f.write(os.linesep)\n f.write(\"MIN TEST: {}\".format(np.min(results)))", "def compare_observed_models(self):\n num_iters = 0\n tot_ovr_trips_mult_paths = 0.0\n fl2num_trips = {}\n #first element is hausdorff distance, second is sum hausdorff, third is dsn\n fl2similarity_measures = {}\n for fl in self.fl2models:\n models = self.fl2models[fl]\n num_models = len(models)\n probs = [0.0 for i in range(len(models))]\n model_array = []\n total_trips = 0.0\n model_i = 0\n for model in models:\n count = len(models[model])\n probs[model_i] += count\n total_trips += count\n model_array.append(model)\n #print \"Trips with model %d: %d\" % (model_i,count)\n model_i += 1\n if len(model_array) == 1:\n continue\n tot_ovr_trips_mult_paths += total_trips\n fl2num_trips[fl] = total_trips\n probs = map(lambda x: x/total_trips,probs)\n diag_sum = sum(map(lambda x: x*x,probs))\n denom = 1.0-diag_sum\n weights = [[0.0 for i in range(num_models)] for i in range(num_models)]\n for i in range(num_models):\n for j in range(i+1,num_models):\n weights[i][j] = (2*probs[i]*probs[j])/denom\n # \"\"\"\n fl2similarity_measures[fl] = [0.0,0.0,0.0]\n for i in range(len(model_array)):\n for j in range(i+1,len(model_array)):\n weight = weights[i][j]\n haus,sum_haus,dsn = self.path_diff_measures(model_array[i],model_array[j])\n #print \"%s: haus %.2f, sum_haus %.2f, dsn %.2f\" % (str((i,j)),haus,sum_haus,dsn) \n fl2similarity_measures[fl][0] += weight*haus\n fl2similarity_measures[fl][1] += weight*sum_haus\n fl2similarity_measures[fl][2] += weight*dsn\n measures = fl2similarity_measures[fl]\n #\"\"\"\n \"\"\"\n for i in range(len(model_array)):\n print \"path %d\" % i\n self.draw_grid(model_array[i])\n weights_with_diag = [[0.0 for i in range(num_models)] for i in range(num_models)]\n for i in range(num_models):\n for j in range(i,num_models):\n if i == j:\n weights_with_diag[i][j] = probs[i]*probs[i]\n else:\n weights_with_diag[i][j] = weights[i][j]*denom\n fl2similarity_measures[fl] = [0.0,0.0,0.0]\n weight_sum = 0.0\n for i in range(num_models):\n #for j in range(num_models):\n # sys.stdout.write(\"%.3f \" % weights_with_diag[i][j])\n #print \"\"\n weight_sum += sum(weights_with_diag[i])\n #print \"weight sum: %f\" % weight_sum\n for i in range(len(model_array)):\n for j in range(i,len(model_array)):\n weight = weights_with_diag[i][j]\n haus,sum_haus,dsn = self.path_diff_measures(model_array[i],model_array[j])\n #print \"%s: haus %.2f, sum_haus %.2f, dsn %.2f\" % (str((i,j)),haus,sum_haus,dsn) \n fl2similarity_measures[fl][0] += weight*haus\n fl2similarity_measures[fl][1] += weight*sum_haus\n fl2similarity_measures[fl][2] += weight*dsn\n measures = fl2similarity_measures[fl]\n \"\"\"\n #print \"overall: haus %.2f, sum_haus %.2f, dsn %.2f\" % (measures[0],measures[1],measures[2])\n #print \"\"\n #if num_iters > 6:\n # break\n num_iters += 1\n overall_haus = 0.0\n overall_sum_haus = 0.0\n overall_dsn = 0.0\n for fl in fl2num_trips:\n if len(self.fl2models[fl]) == 1:\n continue\n num_trips = fl2num_trips[fl]\n meas = fl2similarity_measures[fl]\n overall_haus += num_trips*meas[0]\n overall_sum_haus += num_trips*meas[1]\n overall_dsn += num_trips*meas[2]\n overall_haus = overall_haus/tot_ovr_trips_mult_paths\n overall_sum_haus = overall_sum_haus/tot_ovr_trips_mult_paths\n overall_dsn = overall_dsn/tot_ovr_trips_mult_paths\n print \"\\naverage hausdorff %.2f, average sum hausdorff %.2f, average dsn %.2f\" % (overall_haus,overall_sum_haus,overall_dsn)\n return", "def compute_optimum(self):\n assert self.sample_dataframe is not None and len(\n self.sample_dataframe) > 0\n\n slo_type = self.sample_dataframe['slo_type'].iloc[0]\n if slo_type == 'latency':\n perf_arr = 1. / self.sample_dataframe['qos_value']\n else:\n perf_arr = self.sample_dataframe['qos_value']\n\n perf_over_cost = perf_arr / self.sample_dataframe['cost']\n return np.max(perf_over_cost)", "def generate_linearized_ROM():\n from sofacontrol.baselines.rompc.rompc_utils import TPWL2LinearROM\n\n tpwl_file = 'tpwl_model_snapshots'\n linrom_file = 'rompc_model'\n\n tpwl_loc = join(path, '{}.pkl'.format(tpwl_file))\n save_loc = join(path, '{}.pkl'.format(linrom_file))\n TPWL2LinearROM(tpwl_loc, save_loc)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processes the hw_platforms and sw_apps to plot the Roofline.
def process(hw_platforms, sw_apps, xkcd): assert isinstance(hw_platforms, list) assert isinstance(sw_apps, list) assert isinstance(xkcd, bool) # arithmetic intensity arithmetic_intensity = numpy.logspace(START, STOP, num=N, base=2) # Hardware platforms platforms = [p[0] for p in hw_platforms] # Compute the rooflines achievable_perf = roofline(len(platforms), numpy.array([p[1] for p in hw_platforms]), numpy.array([p[2] for p in hw_platforms]), arithmetic_intensity) norm_achievable_perf = roofline(len(platforms), numpy.array([(p[1] * 1e3) / p[3] for p in hw_platforms]), numpy.array([(p[2] * 1e3) / p[3] for p in hw_platforms]), arithmetic_intensity) # Apps if sw_apps != []: apps = [a[0] for a in sw_apps] apps_intensity = numpy.array([a[1] for a in sw_apps]) # Plot the graphs if xkcd: matplotlib.pyplot.xkcd() fig, axes = matplotlib.pyplot.subplots(1, 2) for axis in axes: axis.set_xscale('log', base=2) axis.set_yscale('log', base=2) axis.set_xlabel('Arithmetic Intensity (FLOP/byte)', fontsize=12) axis.grid(True, which='major') matplotlib.pyplot.setp(axes, xticks=arithmetic_intensity, yticks=numpy.logspace(1, 20, num=20, base=2)) axes[0].set_ylabel("Achieveable Performance (GFLOP/s)", fontsize=12) axes[1].set_ylabel("Normalized Achieveable Performance (MFLOP/s/$)", fontsize=12) axes[0].set_title('Roofline Model', fontsize=14) axes[1].set_title('Normalized Roofline Model', fontsize=14) for idx, val in enumerate(platforms): axes[0].plot(arithmetic_intensity, achievable_perf[idx, 0:], label=val, marker='o') axes[1].plot(arithmetic_intensity, norm_achievable_perf[idx, 0:], label=val, marker='o') if sw_apps != []: color = matplotlib.pyplot.cm.rainbow(numpy.linspace(0, 1, len(apps))) for idx, val in enumerate(apps): for axis in axes: axis.axvline(apps_intensity[idx], label=val, linestyle='-.', marker='x', color=color[idx]) for axis in axes: axis.legend() fig.tight_layout() matplotlib.pyplot.show()
[ "def initialize_geometries(self):\n process = self.dataselector.selected_process\n\n if process == 'Diffractie (Kd)':\n # Lines from location to breakwater head\n for name in self.result_locations['Naam'].array:\n self.elements[name], = self.ax.plot([], [], color='grey', lw=0.75)\n # Width between breakwater heads perp to flow direction\n self.elements['Beq'], = self.ax.plot([], [], color='k', lw=1.25)\n # Representative Wave length\n self.elements['Lr'], = self.ax.plot([], [], color='k', lw=1.25)\n\n elif process == 'Transmissie (Kt)':\n for breakwater in self.breakwaters.itertuples():\n # Shading area per breakwater\n self.elements[breakwater.Index] = self.ax.add_patch(mplPolygon([(0, 0), (0, 0)], color='grey', lw=0.75, alpha=0.2))\n # Text at breakwater\n pt = breakwater.geometry.interpolate(breakwater.geometry.length / 2)\n rotation = np.degrees(geometry.get_orientation(breakwater.geometry, pt))\n self.elements[f'vb_{breakwater.Index}'] = self.ax.text(pt.x, pt.y, '', rotation=(rotation+90) % 180 - 90, va='bottom', ha='center')\n \n elif process == 'Lokale Golfgroei (Hs,lg)':\n # Fetch lines\n for name in self.result_locations['Naam'].array:\n self.elements[name], = self.ax.plot([], [], color='grey', lw=0.75)\n\n elif process == 'Golfbreking (-)':\n # Wave direction lines\n for name in self.result_locations['Naam'].array:\n self.elements[name], = self.ax.plot([], [], color='grey', lw=0.75)\n\n # If no data visualisation\n if process == '':\n self.mapwidget.set_visible('support_locations')\n self.mapwidget.set_visible('result_locations')\n self.canvas.draw_idle()\n\n else:\n # Scatter\n self.resultxy = [np.array([row.geometry.x, row.geometry.y]) for row in self.result_locations.sort_values(by='Naam').itertuples()]\n self.rotations = np.zeros(len(self.resultxy))\n self.values = np.zeros(len(self.resultxy))\n self.markerpath = np.array([[0.0, -0.14], [0.4, -0.36], [0.0, 0.5], [-0.4, -0.36], [0.0, -0.14]])\n theta = np.linspace(0, 2*np.pi, 50)\n self.circlepath = np.c_[np.cos(theta) * 0.3, np.sin(theta) * 0.3]\n\n self.elements['scatter'] = PatchCollection(\n [PathPatch(matplotlib.path.Path(self.markerpath * 300 + crd[None, :]), facecolor='none', edgecolor='k') for crd in self.resultxy])\n self.ax.add_collection(self.elements['scatter'])\n \n self.set_location_values(np.zeros(len(self.result_locations)))\n self.mapwidget.remove_plot_element('support_locations')\n self.mapwidget.remove_plot_element('result_locations')", "def plot_fig(file_path, file_type, area, xp, obs_topo, calc_topo, obs_grav, calc_grav, obs_mag, calc_mag, layer_count,\n layer_lock_list, plotx_list,ploty_list, densities, absolute_densities, reference_densities, segy_plot_list,\n well_list, well_name_list,t_canvas, d_canvas, nt_canvas, model_aspect, use_tight_layout, poly_alpha, fs,\n ms, lw, font_type, layer_colors, draw_polygons, draw_layers, draw_floating_layers, draw_colorbar,\n draw_xy_data, xy_size, xy_color, colorbar_x, colorbar_y, colorbar_size_x, colorbar_size_y,\n layer_line_width, layer_alpha, grav_rms_value, mag_rms_value, grav_y_min, grav_y_max, xy_list_save,\n draw_wells, wells, well_fs, well_line_width, draw_faults, faults):\n ### FUTURE:\n # 1. external_lines\n\n # SET DEFAULT PLOTTING PARAMS\n\n # FIGURE FONT SIZE\n fs = fs\n\n # FONT TYPE\n plt.rc('font', family=font_type, size=fs)\n\n # MARKER SIZE\n ms = ms\n\n # LINE WIDTH\n lw = lw\n\n # AXIS TICK FONT SIZE\n plt.rc('xtick', labelsize=8.0)\n plt.rc('ytick', labelsize=8.0)\n\n # AXIS TICK POSITIONS'\n rcParams['xtick.direction'] = 'out'\n rcParams['ytick.direction'] = 'out'\n\n # SET FONT TYPE AS ONE WHICH IS EDITABLE BY VECTOR PROGRAMS ILLUSTRATOR/INKSCAPE ETC\n rcParams['pdf.fonttype'] = 42\n\n # GRID LINE STYLE\n #plt.rc('grid', c='0.5', ls='-', lw=1)\n\n # DIR CONTAINING BOREHOLE ICON\n\n borehole_dir = os.path.dirname(os.path.abspath(__file__))+'../docs/icons/'\n\n # NUMBER OF ROWS IN PLOT\n if t_canvas is True and d_canvas is True and nt_canvas is True:\n num_rows = 12 # %DETERMINES AXES SIZING\n if t_canvas is False and d_canvas is True and nt_canvas is True:\n num_rows = 11 # DETERMINES AXES SIZING\n if t_canvas is True and d_canvas is False and nt_canvas is True:\n num_rows = 11 # DETERMINES AXES SIZING\n if t_canvas is True and d_canvas is True and nt_canvas is False:\n num_rows = 11 # DETERMINES AXES SIZING\n if t_canvas is False and d_canvas is False and nt_canvas is True:\n num_rows = 10 # DETERMINES AXES SIZING\n if t_canvas is True and d_canvas is False and nt_canvas is False:\n num_rows = 10 # DETERMINES AXES SIZING\n if t_canvas is False and d_canvas is True and nt_canvas is False:\n num_rows = 10 # DETERMINES AXES SIZING\n if t_canvas is False and d_canvas is False and nt_canvas is False:\n num_rows = 9 # DETERMINES AXES SIZING\n\n # DETERMINE AXES\n if obs_topo:\n y_start_topo = np.append(obs_topo[:, 1], calc_grav).min() - abs(np.append(obs_topo[:, 1], calc_topo).min()/20)\n y_end_topo = np.append(obs_topo[:, 1], calc_grav).max() + (np.append(obs_topo[:, 1], calc_topo).max()/20)\n\n if obs_grav:\n y_start_grav = np.append(obs_grav[:, 1], calc_grav).min() - abs(np.append(obs_grav[:, 1], calc_grav).min()/20)\n y_end_grav = np.append(obs_grav[:, 1], calc_grav).max() + (np.append(obs_grav[:, 1], calc_grav).max()/20)\n\n if obs_mag:\n y_start_mag = np.append(obs_mag[:, 1], calc_mag).min() - abs(np.append(obs_mag[:, 1], calc_mag).min()/20)\n y_end_mag = np.append(obs_mag[:, 1], calc_mag).max() + abs(np.append(obs_mag[:, 1], calc_mag).max()/20)\n\n x_start_model, x_end_model, y_end_model, y_start_model = np.array(area)\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n # START FIGURE PLOTTING\n\n fig = plt.figure(dpi=720, facecolor='w', edgecolor='k', figsize=(11.69, 8.27))\n row_counter = 0\n\n # PLOT TOPOGRAPHY CANVAS\n if t_canvas and obs_topo:\n ax1 = plt.subplot2grid((num_rows, 1), (row_counter, 0), rowspan=1, colspan=1)\n row_counter += 1\n\n # AXIS OPTIONS\n plt.ylabel('Topography. (km)', fontsize=fs)\n plt.xlim(x_start_model, x_end_model)\n plt.ylim(y_start_topo, y_end_topo)\n ax1.set_xticks([])\n\n # PLOT TOPO DATA'\n ax1.scatter(obs_topo[:, 0], obs_topo[:, 1], marker='o', color='b', s=ms)\n\n # SET AXIS POSTIONS\n ax1.spines['right'].set_color('none')\n ax1.spines['bottom'].set_position('center')\n ax1.spines['top'].set_position('center')\n ax1.tick_params(axis='x', which='both', labelbottom='off', labeltop='off')\n ax1.tick_params(axis='y', which='both', left='on', right='off', labelright='off')\n\n # PLOT GRAVITY CANVAS\n if d_canvas and obs_grav:\n ax2 = plt.subplot2grid((num_rows, 1), (row_counter, 0), rowspan=1, colspan=1)\n row_counter += 1\n\n # AXIS OPTIONS\n plt.ylabel('Bouguer Anomaly (mGal)', fontsize=fs, labelpad=-1)\n plt.xlim(x_start_model, x_end_model)\n plt.ylim(grav_y_min, grav_y_max)\n ax2.set_xticklabels([])\n\n # PLOT GRAVITY DATA\n if obs_grav:\n ax2.scatter(obs_grav[:, 0], obs_grav[:, 1], marker='o', color='b', s=ms)\n\n # SET AXIS POSITIONS\n ax2.spines['right'].set_color('none')\n ax2.spines['bottom'].set_position('center')\n ax2.spines['top'].set_position('center')\n ax2.tick_params(axis='x', which='both', labelbottom='off', labeltop='off')\n ax2.tick_params(axis='y', which='both', left='on', right='off', labelright='off')\n\n if calc_grav is not None:\n ax2.plot(xp*0.001, calc_grav, color='r', linewidth=lw)\n\n # RMS VALUE\n if grav_rms_value != 0:\n ax2.annotate('RMS misfit: ' + str(grav_rms_value), xy=(10, 60), xytext=(10, 60),\n fontsize=fs, horizontalalignment='left', clip_on=False)\n\n # PLOT MAGNETIC CANVAS\n if nt_canvas and obs_mag:\n ax3 = plt.subplot2grid((num_rows, 1), (row_counter, 0), rowspan=1, colspan=1)\n row_counter += 1\n\n # AXIS OPTIONS\n plt.ylabel('Magnetic \\n Anom. (nT)', fontsize=fs)\n plt.xlim(x_start_model, x_end_model)\n plt.ylim(y_start_mag, y_end_mag)\n ax3.set_xticks([])\n\n # PLOT MAGNETIC DATA'\n if obs_mag:\n ax3.scatter(obs_mag[:, 0], obs_mag[:, 1], marker='o', color='b', s=ms)\n\n # SET AXIS POSITIONS\n ax3.spines['right'].set_color('none')\n ax3.spines['bottom'].set_position('center')\n ax3.spines['top'].set_position('center')\n ax3.tick_params(axis='x', which='both', labelbottom='off', labeltop='off')\n ax3.tick_params(axis='y', which='both', left='on', right='off', labelright='off')\n\n if calc_mag is not None:\n ax3.plot(xp*0.001, calc_mag, color='r', linewidth=lw)\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n # FORWARD MODEL PANEL\n ax4 = plt.subplot2grid((num_rows, 1), (row_counter, 0), rowspan=6, colspan=1)\n ax4.set_aspect(model_aspect)\n plt.tick_params(\n axis='x', # CHANGES APPLY TO THE X-AXIS\n which='both', # BOTH MAJOR AND MINOR TICKS ARE AFFECTED\n bottom='on', # TICKS ALONG THE BOTTOM EDGE ARE OFF\n top='off', # TICKS ALONG THE TOP EDGE ARE OFF\n labelbottom='on') # LABELS ALONG THE BOTTOM EDGE ARE OFF\n\n # PLOT SEISMIC DATA\n for s in range(0, len(segy_plot_list)):\n if segy_plot_list[s]:\n ax4.add_image(copy.copy(segy_plot_list[s]))\n\n if draw_faults is True:\n # PLOT FAULTS\n for i in range(0, len(faults)-1):\n fault = faults[i][0]\n # CHECK IF FAULT IS SET AS VISIBLE; IF YES, THEN PLOT\n if fault.get_visible() == True:\n x = fault.get_xdata()\n y = fault.get_ydata()\n ax4.plot(x, y, color='k', linewidth=0.5, zorder=1, alpha=1.0)\n else:\n continue\n\n if draw_wells:\n # PLOT WELL DATA\n for w in range(0, len(well_list)):\n if well_name_list[w] == \"None\" or well_name_list[w] == []:\n continue\n else:\n if not wells[w][0].get_visible():\n continue\n else:\n well_data = np.array(well_list[w])\n y1 = well_data[0, 1].astype(float)* -1.\n y2 = well_data[-1, -1].astype(float)+(y1.astype(float))\n well_x_location = well_data[1, 1]\n wellx = (well_x_location, well_x_location)\n welly = (y1, y2)\n ax4.plot(wellx, welly, linestyle='-', linewidth=well_line_width, color='black')\n\n ax4.annotate(well_name_list[w], xy=(well_x_location, -0.5),\n xytext=(well_x_location, y1-0.1),\n fontsize=well_fs, weight='bold',\n horizontalalignment='center', color='black',\n bbox=dict(boxstyle=\"round,pad=.2\", fc=\"0.8\",\n ec='None'), clip_on=True)\n # PLOT WELL HORIZONS\n for i in range(2, len(well_data)):\n y = [well_data[i, 1].astype(float)-well_data[0, 1].astype(float),\n well_data[i, 1].astype(float)-well_data[0, 1].astype(float)]\n x = [well_data[1, 1].astype(float)-1, well_data[1, 1].astype(float)+1]\n ax4.plot(x, y, linestyle='-', linewidth=well_line_width, color='black')\n horizon_y_pos = well_data[i, 1].astype(float)-well_data[0, 1].astype(float) + 0.01\n horizon = well_data[i, 0].astype(str)\n\n # ALTERNATE POSITION OF ODDs/EVENs TO TRY AND AVOID OVERLAP\n if i % 2 == 0:\n horizon_x_pos = well_data[1, 1].astype(float) - 3.05\n ax4.annotate(horizon, xy=(horizon_x_pos, horizon_y_pos),\n xytext=(horizon_x_pos, horizon_y_pos), fontsize=well_fs,\n weight='bold', horizontalalignment='left', verticalalignment='top',\n color='black', bbox=dict(boxstyle=\"round,pad=.4\", fc=\"0.8\",\n ec='None'), clip_on=True)\n else:\n horizon_x_pos = well_data[1, 1].astype(float) + 3.05\n ax4.annotate(horizon, xy=(horizon_x_pos, horizon_y_pos),\n xytext=(horizon_x_pos, horizon_y_pos), fontsize=well_fs,\n weight='bold', horizontalalignment='right',\n verticalalignment='bottom', color='black',\n bbox=dict(boxstyle=\"round,pad=.4\", fc=\"0.8\", ec='None'), clip_on=True)\n\n # PLOT LAYER POLYGONS\n if draw_polygons is True:\n for i in range(layer_count, -1, -1):\n # CREATE POLYGONS\n if layer_lock_list[i] == 0 and i >= 1:\n # CREATE POLYGON\n plotx_polygon = np.append(np.array(plotx_list[i]), np.array(plotx_list[i-1])[::-1])\n ploty_polygon = np.append(np.array(ploty_list[i]), np.array(ploty_list[i-1])[::-1])\n else:\n plotx_polygon = np.array(plotx_list[i])\n ploty_polygon = np.array(ploty_list[i])\n\n # DEFINE COLOR MAP\n colormap = cm.coolwarm\n cnorm = colors.Normalize(vmin=-0.8, vmax=0.8)\n colormap = cm.ScalarMappable(norm=cnorm, cmap=colormap)\n\n # CREATE POLYGON FILL\n if densities[i] != 0 and absolute_densities == True:\n next_color = colormap.to_rgba(0.001*densities[i] - 0.001*reference_densities[i])\n elif densities[i] != 0:\n next_color = colormap.to_rgba(0.001 * densities[i])\n else:\n next_color = colormap.to_rgba(0.0)\n\n # DRAW\n ax4.fill(plotx_polygon, ploty_polygon, color=next_color, alpha=poly_alpha, closed=True, ec='none', zorder=1)\n\n # PLOT LAYER LINES\n if draw_layers is True:\n for i in range(layer_count, -1, -1):\n # CREATE POLYGONS\n if layer_lock_list[i] == 0 and i >= 1:\n plotx = np.array(plotx_list[i])\n ploty = np.array(ploty_list[i])\n ax4.plot(plotx, ploty, color=layer_colors[i], linewidth=layer_line_width, alpha=layer_alpha, zorder=1)\n else:\n pass\n\n if draw_floating_layers is True:\n for i in range(layer_count, -1, -1):\n # CREATE POLYGONS\n if layer_lock_list[i] == 1 and i >= 1:\n plotx = np.append(plotx_list[i], plotx_list[i][0])\n ploty = np.append(ploty_list[i], ploty_list[i][0])\n ax4.plot(plotx, ploty, color=layer_colors[i], linewidth=layer_line_width, alpha=layer_alpha, zorder=1)\n else:\n pass\n\n # DRAW OTHER XY DATA e.g. EARTHQUAKE HYPOCENTERS\n if draw_xy_data:\n for i in range(0, len(xy_list_save)):\n if xy_list_save[i]:\n xy = xy_list_save[i]\n ax4.scatter(xy[:, 0], xy[:, 1], marker='o', edgecolors='none', facecolors=xy_color,\n s=xy_size, gid=i, alpha=1.0, zorder=2)\n\n # PLOT EXTERNAL LINES\n # for i in range(0, len(external_lines)):\n # # READ FILE PATH OF LINE'\n # line_file_path = external_lines[i]\n # # DRAW LINE ON FIGURE'\n # draw_line(line_file_path, ax3)\n\n # AXIS OPTIONS\n plt.xlabel('Distance (km)', fontsize=fs, labelpad=-1)\n plt.ylabel('Depth (Km)', fontsize=fs, labelpad=-1)\n plt.xlim(x_start_model, x_end_model)\n plt.ylim(y_end_model, y_start_model)\n ax4.spines['top'].set_color('none')\n ax4.tick_params(axis='x', which='both', labelbottom='on', labeltop='off')\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n # SET AXIS DIMENSIONS SO X AXIS IS THE SAME AS THE MODEL PLOT\n pos1 = ax4.get_position()\n if t_canvas and obs_topo:\n pos2 = ax1.get_position()\n ax1.set_position([pos1.x0, pos2.y0, pos1.width, pos2.height])\n if d_canvas and obs_grav:\n pos2 = ax2.get_position()\n ax2.set_position([pos1.x0, pos2.y0, pos1.width, pos2.height*2])\n if nt_canvas and obs_mag:\n pos2 = ax3.get_position()\n ax3.set_position([pos1.x0, pos2.y0, pos1.width, pos2.height])\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n # PLOT COLOR MAP\n if draw_colorbar:\n colormap = cm.coolwarm\n cnorm = colors.Normalize(vmin=-0.8, vmax=0.8)\n C = cm.ScalarMappable(cmap=colormap, norm=cnorm)\n C._A = []\n c_cax = plt.axes([colorbar_x, colorbar_y, colorbar_size_x, colorbar_size_y])\n cbar = plt.colorbar(C, ticks=[-0.8, -0.4, 0.0, 0.4, 0.8], orientation=\"horizontal\", cax=c_cax)\n cbar.set_label('Density contrast ($g/cm^{3}$)', fontsize=fs, labelpad=-1)\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n # WRITE OUT FIG\n if use_tight_layout:\n if file_type == \"svg\":\n plt.savefig(file_path+'.'+file_type, bbox_inches='tight', dpi=720, format='svg')\n elif file_type == \"pdf\":\n plt.savefig(file_path+'.'+file_type, bbox_inches='tight', dpi=720, format='pdf')\n elif file_type == \"ps\":\n plt.savefig(file_path+'.'+file_type, bbox_inches='tight', dpi=720, format='ps')\n elif file_type == \"eps\":\n plt.savefig(file_path+'.'+file_type, bbox_inches='tight', dpi=720, format='eps')\n elif file_type == \"png\":\n plt.savefig(file_path+'.'+file_type, bbox_inches='tight', dpi=720, format='png')\n else:\n if file_type == \"svg\":\n plt.savefig(file_path+'.'+file_type, dpi=720, format='svg')\n elif file_type == \"pdf\":\n plt.savefig(file_path+'.'+file_type, dpi=720, format='pdf')\n elif file_type == \"ps\":\n plt.savefig(file_path+'.'+file_type, dpi=720, format='ps')\n elif file_type == \"eps\":\n plt.savefig(file_path+'.'+file_type, dpi=720, format='eps')\n elif file_type == \"png\":\n plt.savefig(file_path+'.'+file_type, dpi=720, format='png')\n return", "def plot_collector_probes(self,external_cp=None):\n probes=self.parse_file()\n \n rminusrsep0=self.get_rminusrsep(probenum=0)\n zminuszsep1=self.get_zminuszsep(probenum=1)\n \n fig = mpl.pyplot.figure(figsize=[6,8])\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212,sharex=ax1)\n \n # add dashed lines for another case if external_cp is specified\n if external_cp != None:\n ext = external_cp\n ext_probes = ext.parse_file()\n ext_rminusrsep0 = ext.get_rminusrsep(probenum=0)\n ext_zminuszsep1 = ext.get_zminuszsep(probenum=1)\n ax2.plot(ext_rminusrsep0,ext_probes[0]['IMPFLUX_IDF'],'r--',lw=4,label='ITF DIII-D')\n ax2.plot(ext_rminusrsep0,ext_probes[0]['IMPFLUX_ODF'],'b--',lw=4,label='OTF DIII-D')\n #ax1.plot(ext_zminuszsep1,ext_probes[1]['IMPFLUX_IDF'],'r--',lw=4,label='ITF DIII-D')\n #ax1.plot(ext_zminuszsep1,ext_probes[1]['IMPFLUX_ODF'],'b--',lw=4,label='OTF DIII-D')\n # plot midplane probe profile only here, since it represents a \"good\" signal\n ax1.plot(ext_rminusrsep0,ext_probes[0]['IMPFLUX_IDF'],'r--',lw=4,label='ITF DIII-D')\n ax1.plot(ext_rminusrsep0,ext_probes[0]['IMPFLUX_ODF'],'b--',lw=4,label='OTF DIII-D')\n \n # main plots\n ax2.plot(rminusrsep0,probes[0]['IMPFLUX_IDF'],'r',lw=4,label='ITF')\n ax2.plot(rminusrsep0,probes[0]['IMPFLUX_ODF'],'b',lw=4,label='OTF')\n ax2.set_ylabel('Calc W flux to probe (/m2/s)',size=16)\n ax2.set_xlabel('R-Rsep (cm)',size=16)\n ax2.tick_params(axis='both',which='major',labelsize=12)\n ax2.legend(loc='upper right',fontsize=14,edgecolor='w')\n ax2.set_title('Midplane probe: 3cm',fontsize=16)\n \n ax1.plot(zminuszsep1,probes[1]['IMPFLUX_IDF'],'r',lw=4,label='ITF')\n ax1.plot(zminuszsep1,probes[1]['IMPFLUX_ODF'],'b',lw=4,label='OTF')\n ax1.set_ylabel('Calc W flux to probe (/m2/s)',size=16)\n ax1.set_xlabel('Z-Zsep (cm)',size=16)\n ax1.tick_params(axis='both',which='major',labelsize=12)\n ax1.legend(loc='upper right',fontsize=14,edgecolor='w')\n ax1.set_title('Crown probe: 3cm',fontsize=16)\n \n #ax1.set_xlim([3,14])\n #ax1.set_xlim([2,10])\n ax1.set_xlim([0,14])\n ax1.set_ylim([0,2e21])\n ax2.set_ylim([0,2e21])\n \n mpl.pyplot.subplots_adjust(hspace=0.4)\n mpl.pyplot.show()", "def old_run_plots(self,params):\n lw = 2\n \n \n # Plot voltage at soma and dendrites (apical proximal and distal)\n pylab.figure(1)\n pylab.plot(h.tvec,h.vsoma,lw=lw,c='k',label='v_soma')\n #pylab.plot(h.tvec,h.vdend,lw=lw,c='r',label='v_dend')\n #pylab.plot(h.tvec,h.vdend2,lw=lw,c='b',label='v_dend2')\n pylab.xlim(h.tstart-20,h.tstop+20)\n pylab.ylim(-120,40)\n # If optogenetics were included, draw blocks for times that illumination occurred in appropriate colours \n if params.has_key('opdict'):\n for (opsin,opexpressions) in params['opdict'].iteritems():\n for opexp in opexpressions:\n if opexp[0] is None or opexp[0].lower() == 'none':\n continue\n for pulsenum in range(opexp[1][6]): \n pulse_start = opexp[1][2]+pulsenum*(opexp[1][3]+opexp[1][4])\n self.plot_optogenetic(opsin,pulse_start,pulse_start+opexp[1][3],yoffset=40)\n # once we've plotted an activation for one area, that should be sufficient i.e. we don't need to plot apical *and* soma, only the first \n # TODO: think how to extend this to allow for different areas to be indicated i.e. ChR in soma vs ChR in apical dendritic arbor\n break\n pylab.title('V')\n ax = pylab.gca()\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n spine.set_position(('outward',5))\n ax.tick_params(direction='out')\n elif loc in ['right','top']:\n spine.set_color('none') \n pylab.legend()\n pylab.xlabel('time (ms)')\n pylab.ylabel('V (mV)')\n \n \"\"\"\n # Plot currents at soma and i_syn\n pylab.figure(2)\n pylab.plot(h.tvec,h.isyn,lw=lw,c='g',label='i_syn')\n pylab.plot(h.tvec,h.isoma,lw=lw,c='k',label='i_soma')\n if params.has_key('opdict'):\n for (opsin,opexpressions) in params['opdict'].iteritems():\n for opexp in opexpressions:\n if opexp[0] is None or opexp[0].lower() == 'none':\n continue\n h('objref list_i_opsin')\n h('list_i_opsin = new List()')\n h('list_i_opsin.append(i_%s)'%opsin)\n pylab.plot(h.tvec,h.list_i_opsin.object(0),color=opsin_dict[opsin]['color'],label='i_%s'%opsin)\n break\n pylab.xlim(h.tstart-20,h.tstop+20)\n #pylab.ylim(-3,6)\n pylab.title('I')\n ax = pylab.gca()\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n spine.set_position(('outward',5))\n ax.tick_params(direction='out')\n elif loc in ['right','top']:\n spine.set_color('none') \n pylab.legend()\n pylab.xlabel('time (ms)')\n pylab.ylabel('I (nA)')\n \"\"\"\n \n if params['expname'] is not None:\n savename = params['expname']\n pylab.figure(1)\n pylab.savefig(savename+'_voltage.png')\n #pylab.figure(2)\n #pylab.savefig(savename+'_current.png')\n print \"Saved figures under %s*.png\"%savename\n pylab.close('all')\n else:\n pylab.show()", "def updatelines(self):\n if self.wavefunction is not None:\n self.rewavefunctionlines.set_ydata(real(self.wavefunction))\n self.imwavefunctionlines.set_ydata(imag(self.wavefunction))\n if self.grid is None:\n self.rewavefunctionlines.set_xdata(arange(len(self.wavefunction)))\n self.imwavefunctionlines.set_xdata(arange(len(self.wavefunction)))\n else:\n self.rewavefunctionlines.set_xdata(self.grid)\n self.imwavefunctionlines.set_xdata(self.grid)\n #self.force_redraw()", "def main():\n rs_fcfs = fcfs.run(processes)\n rs_sjf = sjf.run(processes)\n rs_pr = priority.run(processes)\n rs_prp = priority_preemptive.run(processes)\n rs_srtf = srtf.run(processes)\n rs_rr = rr.run(processes)\n\n print('\\n FCFS')\n table.plot(rs_fcfs['processes'])\n graph.plot_gantt(rs_fcfs)\n\n print('\\n SJF')\n table.plot(rs_sjf['processes'])\n graph.plot_gantt(rs_sjf)\n\n print('\\n PR')\n table.plot(rs_pr['processes'])\n graph.plot_gantt(rs_pr)\n\n print('\\n PRP')\n table.plot(rs_prp['processes'])\n graph.plot_gantt(rs_prp)\n\n print('\\n SRTF')\n table.plot(rs_srtf['processes'])\n graph.plot_gantt(rs_srtf)\n\n print('\\n RR')\n table.plot(rs_rr['processes'])\n graph.plot_gantt(rs_rr)\n\n graph.plot_comparision(\n [rs_fcfs, rs_sjf, rs_srtf, rs_pr, rs_prp, rs_rr])", "def add_curves_Wyoming(ax,datetime,station,linewidth=1.0,LH_Tdepend=False):\n from siphon.simplewebservice.wyoming import WyomingUpperAir\n\n date = datetime\n station = station\n df = WyomingUpperAir.request_data(date, station)\n pressure = df['pressure'].values\n Temp = df['temperature'].values\n Temp_dew = df['dewpoint'].values\n altitude = df['height'].values\n q = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Temp_dew*units('degC')),pressure*units('mbar'))\n q = mpcalc.specific_humidity_from_mixing_ratio(q)\n qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Temp*units('degC')),pressure*units('mbar'))\n \n # specific energies\n if LH_Tdepend == False:\n mse = mpcalc.moist_static_energy(altitude*units('meter'),Temp*units('degC'),q)\n mse_s = mpcalc.moist_static_energy(altitude*units('meter'),Temp*units('degC'),qs)\n dse = mpcalc.dry_static_energy(altitude*units('meter'),Temp*units('degC'))\n else:\n # A short course in cloud physics, Roger and Yau (1989)\n Lvt = (2500.8 - 2.36*T.magnitude + 0.0016*T.magnitude**2 - \n 0.00006*T.magnitude**3)*units('joule/gram') # latent heat of evaporation\n #Lf = 2834.1 - 0.29*T - 0.004*T**2 # latent heat of fusion\n \n mse = Cp_d*T + g*altitude + Lvt*q\n mse_s = Cp_d*T + g*altitude + Lvt*qs\n dse = mpcalc.dry_static_energy(altitude,T)\n \n # adding curves on the main axes\n ax.plot(dse.magnitude, pressure, 'k', linewidth=linewidth)\n ax.plot(mse.magnitude, pressure, 'b', linewidth=linewidth)\n ax.plot(mse_s.magnitude, pressure, 'r', linewidth=linewidth)", "def generate_plot(platforms, output_file):\n labels = []\n values = []\n for platform in platforms:\n name = platform['name']\n adapted_price = platform['adjusted_price']\n price = platform['original_price']\n if price > 2000:\n continue #i.e. skip\n if len(name)>15:\n name=platform['abbreviation']\n #This needs to be changed in the demo\n labels.insert(0,u\"{0}\\n$ {1}\\n$ {2}\".format(name, price, round(adapted_price,2)))\n values.insert(0, adapted_price)\n\n #define the size of the bar and size of the graph \n width = 0.3\n ind = np.arange(len(values))\n fig = plt.figure(figsize=(len(labels) * 1.8, 10))\n\n ax = fig.add_subplot(1, 1, 1)\n ax.bar(ind, values, width, align='center')\n\n # Format the X and Y axis labels. Also set the ticks on the x-axis slightly\n # farther apart and give then a slight tilting effect.\n plt.ylabel('Adjusted price')\n plt.xlabel('Year / Console')\n ax.set_xticks(ind + 0.3)\n ax.set_xticklabels(labels)\n fig.autofmt_xdate()\n plt.grid(True)\n\n #plt.show(dpi=72) \n #uncomment if you want to save the file\n plt.savefig(output_file, dpi=72)", "def plot_launching_window(self):\n # Find date of window opening\n if not self.launching_dates:\n self.launching_dates = self.log[self.log.key == 'launch_window_open'].datetime.tolist()\n\n # Plot lines\n for date in self.launching_dates:\n plt.axvline(date, linewidth=2, color='r', alpha=0.5)", "def courbe_A_O_MobiYes2():\n\tolsr = preproc.preprocXspeed_YNbOvhd('output.2b.MOBILITY.OLSR.txt')\n\taodv = preproc.preprocXspeed_YNbOvhd('output.2b.MOBILITY.AODV.txt')\n\n\tfig = plt.figure()\n\n\tplt.plot( olsr[0], olsr[1], marker='o', markerfacecolor='b', markersize=2, color='b', linewidth=1, label=\"OLSR\")\n\tplt.plot( aodv[0], aodv[1], marker='o', markerfacecolor='red', markersize=2, color='red', linewidth=1, label=\"AODV\")\n\n\t#print(olsr)\n\n\tplt.legend()\n\n\tplt.yticks(np.arange(3000, 17000, 1000))\n\n\tfig.suptitle('Nombre de paquets Overhead en fonction de la vitesse max', fontsize=12)\n\tplt.xlabel('Vitesse max (m/s)', fontsize=10)\n\tplt.ylabel('#Paquets Overhead', fontsize=10)\n\n\tplt.savefig('courbes/courbe_OLSR_AODV_avecMobi_Over.Vit.svg',format='svg', dpi=1200)", "def plot_run(data, time, starttime, runs, rnum, gaugeno, inputg, winnum, winsize, grid, *pred):\r\n \r\n fig, (ax1, ax2) = plt.subplots(2,sharex=True, sharey=False,figsize=(12,10))\r\n \r\n title = 'Run # %s' % rnum\r\n \r\n if pred:\r\n title =title + ', Predicted: %sm' % np.around(pred[0],2)\r\n \r\n ax1.set_title(title, fontsize=20,)\r\n \r\n fig.add_subplot(111, frameon=False) # used for centering the y-axis label\r\n \r\n ax1.plot(time[(rnum,inputg)]/60, data[(rnum,inputg)], label=\"Gauge # %s\" % str(inputg), color='blue')\r\n ax1.grid(True)\r\n ax1.legend(loc='upper left')\r\n \r\n \r\n #Plot window of data used and reference line for threshold. \r\n start = starttime[winnum][runs.index(rnum)]*grid\r\n end = (start + (winsize[winnum]-1)*grid)\r\n ax1.axvline(start/60, color ='red', ls='--', lw=1, alpha = 0.8)\r\n ax1.axvline(end/60, color ='red', ls='--', lw=1, alpha = 0.8)\r\n\r\n ax2.plot(time[(rnum,gaugeno)]/60, data[(rnum,gaugeno)], label='Gauge # %s' % str(gaugeno), color='blue')\r\n ax2.grid(True)\r\n ax2.legend(loc='upper left')\r\n \r\n plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False) # used for centering the y-axis label\r\n plt.xlabel('Minutes after quake', fontsize=16)\r\n plt.ylabel('Surface elevation (meters)', fontsize=16)", "def plotData(self) :\n \n # plot the data!\n if len(self.figwindows) == 0 :\n self.figwindows.append(plotgui.PlotWindow())\n self.figwindows[0].move(0,0)\n self.figwindows.append(plotgui.PlotWindow())\n self.figwindows[1].move(400, 0)\n self.figwindows.append(plotgui.PlotWindow())\n self.figwindows[2].move(800, 0)\n self.figwindows.append(plotgui.PlotWindow())\n self.figwindows[3].move(1200, 0)\n \n self.traces = []\n \n fig = self.figwindows[0].init_plot()\n self.traces.append(self.Trace(fig, self.ts, self.ps, 'b-','Position'))\n fig.hold(True)\n self.traces.append(self.Trace(fig, self.ts, self.target_ps, 'r--','Target Position'))\n fig.legend(loc=2)\n fig.xaxis.label.set_text('Time (s)')\n fig.yaxis.label.set_text('Position (encoder tics)')\n fig.title.set_text('Position Tracking')\n # NOTE: additional properties of the plot (text size, etc) are set using \n # the matplotlibrc file in the project folder.\n \n self.figwindows[0].render_plot()\n self.figwindows[0].show()\n \n fig = self.figwindows[1].init_plot()\n #fig.plot(ts, vs, 'c-', label='Velocity')\n fig.hold(True)\n self.traces.append(self.Trace(fig, self.ts, self.target_vs, 'r--','Target Velocity'))\n self.traces.append(self.Trace(fig, self.ts, self.cmd_vs, 'g-', 'Command Velocity'))\n fig.legend(loc=2)\n fig.xaxis.label.set_text('Time (s)')\n fig.yaxis.label.set_text('Velocity (encoder tics/min)')\n fig.title.set_text('Velocity Tracking')\n \n self.figwindows[1].render_plot()\n self.figwindows[1].show()\n \n fig = self.figwindows[2].init_plot()\n self.traces.append(self.Trace(fig, self.ts, self.ps, 'b-', 'Encoder Position'))\n fig.hold(True)\n self.traces.append(self.Trace(fig, self.ts, self.motor_ps, 'g-', 'Motor Step Position'))\n fig.legend(loc=2)\n fig.xaxis.label.set_text('Time (s)')\n fig.yaxis.label.set_text('Position (encoder tics)')\n fig.title.set_text('Motor Reported Location')\n \n self.figwindows[2].render_plot()\n self.figwindows[2].show()\n \n fig = self.figwindows[3].init_plot()\n self.traces.append(self.Trace(fig, self.ts, self.pos_error_derivs, 'b-', 'Position Error Derivative'))\n fig.xaxis.label.set_text('Time (s)')\n fig.yaxis.label.set_text('Error change (tics/update)')\n fig.title.set_text('Position Error Derivative')\n \n self.figwindows[3].render_plot()\n self.figwindows[3].show()", "def courbe802_11():\n\tsimulationTime = 2 # Temps total de la simulation ici deux secondes\n\n\tdata_6Mb = preproc.preprocX_distYRRate('output.6Mb.txt', simulationTime)\n\tdata_54Mb = preproc.preprocX_distYRRate('output.54Mb.txt', simulationTime)\n\tdata_802_11g = preproc.preprocX_distYRRate('output.802.11g.txt', simulationTime)\n\tdata_802_11n = preproc.preprocX_distYRRate('output.802.11n.txt', simulationTime)\n\n\tfig = plt.figure()\n\n\tplt.plot( data_6Mb[0], data_6Mb[1], marker='v', markerfacecolor='m', markersize=2, color='r', linewidth=1, label=\"802.11a_6Mbps \")\n\tplt.plot( data_54Mb[0], data_54Mb[1], marker='^', markerfacecolor='g', markersize=2, color='r', linewidth=1, label=\"802.11a_54Mbps\")\n\tplt.plot( data_802_11g[0], data_802_11g[1], marker='o', markerfacecolor='b', markersize=2, color='b', linewidth=1, label=\"802.11g\")\n\tplt.plot( data_802_11n[0], data_802_11n[1], marker='o', markerfacecolor='g', markersize=2, color='g', linewidth=1, label=\"802.11n\")\n\n\tplt.legend()\n\n\tplt.yticks(np.arange(0, 65, 5))\n\n\tfig.suptitle('Debit en reception en fonction de la distance', fontsize=12)\n\tplt.xlabel('Distance (m)', fontsize=10)\n\tplt.ylabel('Debit en reception (Mbps)', fontsize=10)\n\n\tplt.savefig('courbes/courbe_802.2_DebReceptio__Dist.svg',format='svg', dpi=1200)", "def plot_detection(self):\n import matplotlib.pyplot as plt\n import ipywidgets as ipy\n\n # Define mask\n sf = self._sf\n win_size = 10\n mask = self.get_mask()\n highlight = self._data * mask\n highlight = np.where(highlight == 0, np.nan, highlight)\n highlight_filt = self._data_filt * mask\n highlight_filt = np.where(highlight_filt == 0, np.nan, highlight_filt)\n\n n_epochs = int((self._data.shape[-1] / sf) / win_size)\n times = np.arange(self._data.shape[-1]) / sf\n\n # Define xlim and xrange\n xlim = [0, win_size]\n xrng = np.arange(xlim[0] * sf, (xlim[1] * sf + 1), dtype=int)\n\n # Plot\n fig, ax = plt.subplots(figsize=(12, 4))\n plt.plot(times[xrng], self._data[0, xrng], \"k\", lw=1)\n plt.plot(times[xrng], highlight[0, xrng], \"indianred\")\n plt.xlabel(\"Time (seconds)\")\n plt.ylabel(\"Amplitude (uV)\")\n fig.canvas.header_visible = False\n fig.tight_layout()\n\n # WIDGETS\n layout = ipy.Layout(width=\"50%\", justify_content=\"center\", align_items=\"center\")\n\n sl_ep = ipy.IntSlider(\n min=0,\n max=n_epochs,\n step=1,\n value=0,\n layout=layout,\n description=\"Epoch:\",\n )\n\n sl_amp = ipy.IntSlider(\n min=25,\n max=500,\n step=25,\n value=150,\n layout=layout,\n orientation=\"horizontal\",\n description=\"Amplitude:\",\n )\n\n dd_ch = ipy.Dropdown(\n options=self._ch_names, value=self._ch_names[0], description=\"Channel:\"\n )\n\n dd_win = ipy.Dropdown(\n options=[1, 5, 10, 30, 60],\n value=win_size,\n description=\"Window size:\",\n )\n\n dd_check = ipy.Checkbox(\n value=False,\n description=\"Filtered\",\n )\n\n def update(epoch, amplitude, channel, win_size, filt):\n \"\"\"Update plot.\"\"\"\n n_epochs = int((self._data.shape[-1] / sf) / win_size)\n sl_ep.max = n_epochs\n xlim = [epoch * win_size, (epoch + 1) * win_size]\n xrng = np.arange(xlim[0] * sf, (xlim[1] * sf), dtype=int)\n # Check if filtered\n data = self._data if not filt else self._data_filt\n overlay = highlight if not filt else highlight_filt\n try:\n ax.lines[0].set_data(times[xrng], data[dd_ch.index, xrng])\n ax.lines[1].set_data(times[xrng], overlay[dd_ch.index, xrng])\n ax.set_xlim(xlim)\n except IndexError:\n pass\n ax.set_ylim([-amplitude, amplitude])\n\n return ipy.interact(\n update, epoch=sl_ep, amplitude=sl_amp, channel=dd_ch, win_size=dd_win, filt=dd_check\n )", "def _update_plot(self):\n # global ptr\n\n self.ptr += 1\n\n t_initial = time.time()\n tic = time.perf_counter()\n # received_data = []\n received_data = self.rx_tx_ctrl.zedboard.read_temperature_sensor(AppConstants.number_sensors)\n # print(received_data[0])\n # print(received_data[1])\n # print(received_data[2])\n\n # sensor 1\n self.sensor_1[:-1] = self.sensor_1[1:]\n self.sensor_1[-1] = received_data[0]\n if self.plot_graph.chBox_1.isChecked():\n self.plot_sensor_1.setData(self.sensor_1, pen=AppConstants.plot_colors['s1']) # 'r') # QPen(QColor(255, 0, 255)), width=1) # 'r')\n self.plot_sensor_1.setPos(self.ptr, 0)\n else:\n self.plot_sensor_1.setData(self.sensor_1, pen=None)\n self.plot_sensor_1.setPos(self.ptr, 0)\n\n # sensor 2\n self.sensor_2[:-1] = self.sensor_2[1:]\n self.sensor_2[-1] = (received_data[1])\n if self.plot_graph.chBox_2.isChecked():\n self.plot_sensor_2.setData(self.sensor_2, pen=AppConstants.plot_colors['s2'])\n self.plot_sensor_2.setPos(self.ptr, 0)\n else:\n self.plot_sensor_2.setData(self.sensor_2, pen=None)\n self.plot_sensor_2.setPos(self.ptr, 0)\n\n # self.plot_sensor_2.setData(self.sensor_2, pen='g')\n\n # sensor 3\n self.sensor_3[:-1] = self.sensor_3[1:]\n self.sensor_3[-1] = (received_data[2])\n if self.plot_graph.chBox_3.isChecked():\n self.plot_sensor_3.setData(self.sensor_3, pen=AppConstants.plot_colors['s3'])\n self.plot_sensor_3.setPos(self.ptr, 0)\n else:\n self.plot_sensor_3.setData(self.sensor_3, pen=None)\n self.plot_sensor_3.setPos(self.ptr, 0)\n\n # self.plot_sensor_3.setData(self.sensor_3, pen='y')\n #\n # sensor 4\n self.sensor_4[:-1] = self.sensor_4[1:]\n self.sensor_4[-1] = received_data[3]\n if self.plot_graph.chBox_4.isChecked():\n self.plot_sensor_4.setData(self.sensor_4, pen=AppConstants.plot_colors['s4'])\n self.plot_sensor_4.setPos(self.ptr, 0)\n else:\n self.plot_sensor_4.setData(self.sensor_4, pen=None)\n self.plot_sensor_4.setPos(self.ptr, 0)\n\n #\n # sensor 5\n self.sensor_5[:-1] = self.sensor_5[1:]\n self.sensor_5[-1] = (received_data[4])\n if self.plot_graph.chBox_5.isChecked():\n self.plot_sensor_5.setData(self.sensor_5, pen=AppConstants.plot_colors['s5'])\n self.plot_sensor_5.setPos(self.ptr, 0)\n else:\n self.plot_sensor_5.setData(self.sensor_5, pen=None)\n self.plot_sensor_5.setPos(self.ptr, 0)\n # # self.plot_sensor_2.setData(self.sensor_2, pen='g')\n #\n # sensor 6\n self.sensor_6[:-1] = self.sensor_6[1:]\n self.sensor_6[-1] = (received_data[5])\n if self.plot_graph.chBox_6.isChecked():\n self.plot_sensor_6.setData(self.sensor_6, pen=AppConstants.plot_colors['s6'])\n self.plot_sensor_6.setPos(self.ptr, 0)\n else:\n self.plot_sensor_6.setData(self.sensor_6, pen=None)\n self.plot_sensor_6.setPos(self.ptr, 0)\n #\n # sensor 7\n self.sensor_7[:-1] = self.sensor_7[1:]\n self.sensor_7[-1] = received_data[6]\n if self.plot_graph.chBox_7.isChecked():\n self.plot_sensor_7.setData(self.sensor_7, pen=AppConstants.plot_colors['s7'])\n self.plot_sensor_7.setPos(self.ptr, 0)\n else:\n self.plot_sensor_7.setData(self.sensor_7, pen=None)\n self.plot_sensor_7.setPos(self.ptr, 0)\n #\n # sensor 8\n self.sensor_8[:-1] = self.sensor_8[1:]\n self.sensor_8[-1] = (received_data[7])\n if self.plot_graph.chBox_8.isChecked():\n self.plot_sensor_8.setData(self.sensor_8, pen=AppConstants.plot_colors['s8'])\n self.plot_sensor_8.setPos(self.ptr, 0)\n else:\n self.plot_sensor_8.setData(self.sensor_8, pen=None)\n self.plot_sensor_8.setPos(self.ptr, 0)\n\n # # self.plot_sensor_2.setData(self.sensor_2, pen='g')\n #\n # sensor 9\n self.sensor_9[:-1] = self.sensor_9[1:]\n self.sensor_9[-1] = (received_data[8])\n if self.plot_graph.chBox_9.isChecked():\n self.plot_sensor_9.setData(self.sensor_9, pen=AppConstants.plot_colors['s9'])\n self.plot_sensor_9.setPos(self.ptr, 0)\n else:\n self.plot_sensor_9.setData(self.sensor_9, pen=None)\n self.plot_sensor_9.setPos(self.ptr, 0)\n # # self.plot_sensor_3.setData(self.sensor_3, pen='y')\n #\n # sensor 10\n self.sensor_10[:-1] = self.sensor_10[1:]\n self.sensor_10[-1] = received_data[9]\n if self.plot_graph.chBox_10.isChecked():\n self.plot_sensor_10.setData(self.sensor_10, pen=AppConstants.plot_colors['s10'])\n self.plot_sensor_10.setPos(self.ptr, 0)\n else:\n self.plot_sensor_10.setData(self.sensor_10, pen=None)\n self.plot_sensor_10.setPos(self.ptr, 0)\n #\n # sensor 11\n self.sensor_11[:-1] = self.sensor_11[1:]\n self.sensor_11[-1] = (received_data[10])\n if self.plot_graph.chBox_11.isChecked():\n self.plot_sensor_11.setData(self.sensor_11, pen=AppConstants.plot_colors['s11'])\n self.plot_sensor_11.setPos(self.ptr, 0)\n else:\n self.plot_sensor_11.setData(self.sensor_11, pen=None)\n self.plot_sensor_11.setPos(self.ptr, 0)\n # # self.plot_sensor_2.setData(self.sensor_2, pen='g')\n #\n # sensor 12\n self.sensor_12[:-1] = self.sensor_12[1:]\n self.sensor_12[-1] = (received_data[11])\n if self.plot_graph.chBox_12.isChecked():\n self.plot_sensor_12.setData(self.sensor_12, pen=AppConstants.plot_colors['s12'])\n self.plot_sensor_12.setPos(self.ptr, 0)\n else:\n self.plot_sensor_12.setData(self.sensor_12, pen=None)\n self.plot_sensor_12.setPos(self.ptr, 0)\n #\n # sensor 13\n self.sensor_13[:-1] = self.sensor_13[1:]\n self.sensor_13[-1] = received_data[12]\n if self.plot_graph.chBox_13.isChecked():\n self.plot_sensor_13.setData(self.sensor_13, pen=AppConstants.plot_colors['s13'])\n self.plot_sensor_13.setPos(self.ptr, 0)\n else:\n self.plot_sensor_13.setData(self.sensor_13, pen=None)\n self.plot_sensor_13.setPos(self.ptr, 0)\n #\n # sensor 14\n self.sensor_14[:-1] = self.sensor_14[1:]\n self.sensor_14[-1] = (received_data[13])\n if self.plot_graph.chBox_14.isChecked():\n self.plot_sensor_14.setData(self.sensor_14, pen=AppConstants.plot_colors['s14'])\n self.plot_sensor_14.setPos(self.ptr, 0)\n else:\n self.plot_sensor_14.setData(self.sensor_14, pen=None)\n self.plot_sensor_14.setPos(self.ptr, 0)\n # # self.plot_sensor_2.setData(self.sensor_2, pen='g')\n #\n # sensor 15\n self.sensor_15[:-1] = self.sensor_15[1:]\n self.sensor_15[-1] = (received_data[14])\n if self.plot_graph.chBox_15.isChecked():\n self.plot_sensor_15.setData(self.sensor_15, pen=AppConstants.plot_colors['s15'])\n self.plot_sensor_15.setPos(self.ptr, 0)\n else:\n self.plot_sensor_15.setData(self.sensor_15, pen=None)\n self.plot_sensor_15.setPos(self.ptr, 0)\n #\n # sensor 16\n self.sensor_16[:-1] = self.sensor_16[1:]\n self.sensor_16[-1] = (received_data[15])\n if self.plot_graph.chBox_16.isChecked():\n self.plot_sensor_16.setData(self.sensor_16, pen=AppConstants.plot_colors['s16'])\n self.plot_sensor_16.setPos(self.ptr, 0)\n else:\n self.plot_sensor_16.setData(self.sensor_16, pen=None)\n self.plot_sensor_16.setPos(self.ptr, 0)\n\n # value to LCD display\n self.plot_graph.lcd.display(received_data[0])\n t_final = time.time()\n toc = time.perf_counter()\n print(\"Plot time: \", t_final-t_initial)\n print(f\"Plot update time {toc-tic:0.4f} sec\")", "def drawActogram(self):\n try:\n sender = ''.join([x for x in self.sender().text()\n if x.isnumeric()])\n print('plotting ' + sender)\n sender = int(sender)-1\n\n time_ = []\n status = []\n\n with open(self.name[sender].text(), 'rb') as f:\n for buff in iter(lambda: f.read(8), b''):\n anteroom_tuple = struct.unpack('=If', buff)\n time_.append(anteroom_tuple[0])\n status.append(anteroom_tuple[1])\n\n time_ = np.asarray(time_)/(24*3600) + 719163 - 5/24\n status = np.asarray(status)\n\n days = np.floor(time_)\n x = (time_ - days) * 24\n y = status + (days[-1] - days)\n\n self.win = pg.GraphicsWindow()\n pg.setConfigOptions(antialias=True)\n self.p1 = self.win.addPlot()\n\n for i in range(int(days[0]), int(days[-1]) + 1):\n self.p1.plot(x[days == i], y[days == i], pen='r')\n self.p1.plot(x[days == i-1] + 24, # double-plot\n y[days == i-1] + 1, pen='r')\n self.p1.plot(x[days == int(days[-1])] + 24, # double-plot\n y[days == int(days[-1])] + 1, pen='r') # last day\n \n # Set axis layout\n self.xax = self.p1.getAxis('bottom')\n self.xax.setTickSpacing(24, 2)\n self.yax = self.p1.getAxis('left') \n self.p1.showGrid(x=True, y=True)\n\n except FileNotFoundError:\n print('No file')", "def plot_data(self, frame_ordering):\n\n self.ax.set_ylim(0, self.rank_frames.number + 1)\n self.y = array(range(1, self.rank_frames.number + 1))\n if frame_ordering == \"chronological\":\n if self.line_chronological is not None:\n self.line_chronological.remove()\n self.x = array(self.rank_frames.frame_ranks)\n plt.ylabel('Frame numbers ordered chronologically')\n plt.gca().invert_yaxis()\n plt.xlabel('Quality')\n self.line_chronological, = plt.plot(self.x, self.y, lw=1, color='blue')\n plt.grid(True)\n else:\n if self.line_quality is not None:\n self.line_quality.remove()\n self.x = array(\n [self.rank_frames.frame_ranks[i] for i in self.rank_frames.quality_sorted_indices])\n plt.ylabel('Frame numbers ordered by quality')\n plt.gca().invert_yaxis()\n plt.xlabel('Quality')\n self.line_quality, = plt.plot(self.x, self.y, lw=1, color='green')\n plt.grid(True)\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()", "def init_line_plot():\n _fig, axis = plt.subplots()\n axis.set_title('Line histogram (BGR)')\n axis.set_xlabel('Bin')\n axis.set_ylabel('Frequency (num of pixels)')\n axis.set_xlim(0, const.MAX_PIXEL_VAL-1)\n axis.set_ylim(0, 54000)\n\n line_blue, = axis.plot(const.FULL_BINS, np.zeros((const.MAX_PIXEL_VAL,)), color='b', label='Blue')\n line_green, = axis.plot(const.FULL_BINS, np.zeros((const.MAX_PIXEL_VAL,)), color='g', label='Green')\n line_red, = axis.plot(const.FULL_BINS, np.zeros((const.MAX_PIXEL_VAL,)), color='r', label='Red')\n\n axis.legend()\n\n return line_blue, line_green, line_red", "def _init_plots(self):\n handle_dict = {}\n nans = np.zeros((1, 2), dtype=float)\n nans.fill(np.nan)\n n_steps = self.data_config['sequence_length'] - 1\n ########################################################################\n # Configuration dictionaries\n ########################################################################\n for config in [self.run_config, self.train_config, self.model_config, self.data_config]:\n plot_config(self.vis, config)\n ########################################################################\n # Total free energy, conditional log likelihood, KL divergence\n ########################################################################\n handle_dict['fe'] = plot_line(self.vis, nans, np.ones((1, 2)), legend=['Train', 'Val'],\n title='Total Free Energy', xlabel='Epochs',\n ylabel='Free Energy (Nats)', xformat='log', yformat='log')\n handle_dict['cll'] = plot_line(self.vis, nans, np.ones((1, 2)), legend=['Train', 'Val'],\n title='Total Conditional Log Likelihood', xlabel='Epochs',\n ylabel='Conditional Log Likelihood (Nats)',\n xformat='log', yformat='log')\n handle_dict['kl'] = plot_line(self.vis, nans, np.ones((1, 2)), legend=['Train', 'Val'],\n title='Total KL Divergence', xlabel='Epochs',\n ylabel='KL Divergence (Nats)', xformat='log', yformat='log')\n ########################################################################\n # Per step free energy, conditional log likelihood, KL divergence\n ########################################################################\n step_legend = []\n for split in ['Train', 'Val']:\n for step_num in range(1, n_steps + 1):\n step_legend.append(split + ', Step ' + str(step_num))\n handle_dict['fe_step'] = plot_line(self.vis,\n nans.repeat(n_steps, 1),\n np.ones((1, 2 * n_steps)),\n legend=step_legend,\n title='Per Step Free Energy',\n xlabel='Epochs',\n ylabel='Free Energy (Nats)',\n xformat='log', yformat='log')\n handle_dict['cll_step'] = plot_line(self.vis,\n nans.repeat(n_steps, 1),\n np.ones((1, 2 * n_steps)),\n legend=step_legend,\n title='Per Step Conditional Log Likelihood',\n xlabel='Epochs',\n ylabel='Conditional Log Likelihood (Nats)',\n xformat='log', yformat='log')\n handle_dict['kl_step'] = plot_line(self.vis,\n nans.repeat(n_steps, 1),\n np.ones((1, 2 * n_steps)),\n legend=step_legend,\n title='Per Step KL Divergence',\n xlabel='Epochs',\n ylabel='KL Divergence (Nats)',\n xformat='log', yformat='log')\n ########################################################################\n # Latent distribution parameter magnitudes\n ########################################################################\n it_legend = []\n for split in ['Train', 'Val']:\n for it_num in range(self.train_config['inference_iterations']+1):\n it_legend.append(split + ', Iteration ' + str(it_num))\n handle_dict['post_mean'] = plot_line(self.vis,\n nans.repeat(self.train_config['inference_iterations']+1, 1),\n np.ones((1, 2 * (self.train_config['inference_iterations']+1))),\n legend=it_legend,\n title='Approx. Posterior Mean Magnitude',\n xlabel='Epochs', ylabel='Mean Mag.',\n xformat='log', yformat='log')\n handle_dict['post_log_var'] = plot_line(self.vis,\n nans.repeat(self.train_config['inference_iterations']+1, 1),\n np.ones((1, 2 * (self.train_config['inference_iterations']+1))),\n legend=it_legend,\n title='Approx. Posterior Log Variance Magnitude',\n xlabel='Epochs', ylabel='Log Variance Mag.',\n xformat='log', yformat='log')\n handle_dict['prior_mean'] = plot_line(self.vis,\n nans.repeat(self.train_config['inference_iterations']+1, 1),\n np.ones((1, 2 * (self.train_config['inference_iterations']+1))),\n legend=it_legend,\n title='Prior Mean Magnitude',\n xlabel='Epochs', ylabel='Mean Mag.',\n xformat='log', yformat='log')\n handle_dict['prior_log_var'] = plot_line(self.vis,\n nans.repeat(self.train_config['inference_iterations']+1, 1),\n np.ones((1, 2 * (self.train_config['inference_iterations']+1))),\n legend=it_legend,\n title='Prior Log Variance Magnitude',\n xlabel='Epochs', ylabel='Log Variance Mag.',\n xformat='log', yformat='log')\n ########################################################################\n # Inference gradient magnitudes\n ########################################################################\n it_legend = []\n for split in ['Train', 'Val']:\n for it_num in range(self.train_config['inference_iterations']+1):\n it_legend.append(split + ', Iteration ' + str(it_num))\n handle_dict['mean_grad'] = plot_line(self.vis,\n nans.repeat(self.train_config['inference_iterations']+1, 1),\n np.ones((1, 2 * (self.train_config['inference_iterations']+1))),\n legend=it_legend,\n title='Mean Gradient Magnitude',\n xlabel='Epochs', ylabel='Mean Gradient Mag.',\n xformat='log', yformat='log')\n handle_dict['log_var_grad'] = plot_line(self.vis,\n nans.repeat(self.train_config['inference_iterations']+1, 1),\n np.ones((1, 2 * (self.train_config['inference_iterations']+1))),\n legend=it_legend,\n title='Log Variance Gradient Magnitude',\n xlabel='Epochs', ylabel='Log Variance Gradient Mag.',\n xformat='log', yformat='log')\n ########################################################################\n # Model parameter gradient magnitudes\n ########################################################################\n handle_dict['param_grad'] = plot_line(self.vis, nans, np.ones((1, 2)),\n legend=['Inf.', 'Gen.'],\n title='Parameter Gradient Mag.',\n xlabel='Epochs', ylabel='Parameter Gradient',\n xformat='log', yformat='log')\n ########################################################################\n # Inference improvement\n ########################################################################\n it_legend = []\n for split in ['Train', 'Val']:\n for it_num in range(1, self.train_config['inference_iterations']+1):\n it_legend.append(split + ', Iteration ' + str(it_num))\n handle_dict['inf_improvement'] = plot_line(self.vis,\n nans.repeat(self.train_config['inference_iterations'], 1),\n np.ones((1, 2*self.train_config['inference_iterations'])),\n legend=it_legend,\n title='Inference Improvement',\n xlabel='Epochs', ylabel='Relative Improvement (%)',\n xformat='log', yformat='linear')\n ########################################################################\n # Misc.\n ########################################################################\n it_legend = []\n for split in ['Train', 'Val']:\n for it_num in range(self.train_config['inference_iterations']+1):\n it_legend.append(split + ', Iteration ' + str(it_num))\n handle_dict['lr'] = plot_line(self.vis, nans, np.ones((1, 2)), legend=['Inf.', 'Gen.'],\n title='Learning Rates', xlabel='Epochs',\n ylabel='Learning Rate', xformat='log', yformat='log')\n handle_dict['out_log_var'] = plot_line(self.vis,\n nans.repeat(self.train_config['inference_iterations']+1, 1),\n np.ones((1, 2 * (self.train_config['inference_iterations']+1))),\n legend=it_legend,\n title='Output Log Variance',\n xlabel='Epochs', ylabel='Output Log Variance',\n xformat='log', yformat='linear')\n ########################################################################\n return handle_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads CSV file and returns a list of row_lenary tuples
def read_file(filename, row_len, csv_name): assert isinstance(row_len, int) elements = [] try: in_file = open(filename, 'r', encoding='utf-8') \ if filename is not None else sys.stdin reader = csv.reader(in_file, dialect='excel') for row in reader: if len(row) != row_len: print(f"Error: Each row in {csv_name} must be " f"contain exactly {row_len} entries!", file=sys.stderr) sys.exit(1) element = tuple([row[0]] + [float(r) for r in row[1:]]) elements.append(element) if filename is not None: in_file.close() except IOError as ex: print(ex, file=sys.stderr) sys.exit(1) return elements
[ "def get_csv_rows(file_path):\n # log.info(\"Module: {} Function: {}\".format(__name__, sys._getframe().f_code.co_name))\n list = []\n # log.info(file_path)\n if not os.path.isfile(file_path):\n err_msg = \"File does not exist: {}\".format(file_path)\n raise Exception(err_msg)\n with open(file_path, mode='r') as infile:\n reader = csv.reader(infile)\n for row in reader:\n list.append(row)\n # input()\n return list", "def read_csv_file_values(csv_file_path_name, has_header=True):\n with open(csv_file_path_name) as fd:\n reader = csv.reader(fd)\n\n # Skip the very first header row.\n if has_header:\n next(reader)\n\n rows = [values for values in reader]\n\n return rows", "def read_list(csv_file):\n try:\n with open(csv_file) as csvfile:\n reader = csv.reader(csvfile, dialect='excel', quoting=csv.QUOTE_NONNUMERIC)\n datalist = []\n datalist = list(reader)\n return datalist\n except IOError as (errno, strerror):\n print(\"I/O error({0}): {1}\".format(errno, strerror)) \n return", "def csvparser(cfile):\r\n\r\n csvreader = csv.reader(cfile, delimiter = ',')\r\n tile = []\r\n count = 0\r\n for row in csvreader:\r\n if count == 0:\r\n count += 1\r\n continue\r\n else:\r\n tile.append(row[1:])\r\n return tile", "def convert_CSV_to_list(self, filepath):\n outdata = []\n #open file path and run csv reader, add to list row by row\n with open(filepath, 'r') as fin:\n reader = csv.reader(fin)\n #outdata = list[reader]\n for row in reader:\n outdata.append(row)\n #spit data out to data loader\n return outdata", "def parse(csvfilename):\n table = []\n with open(csvfilename, \"r\") as csvfile:\n for line in csvfile:\n line = line.rstrip()\n columns = line.split(',')\n table.append(columns)\n return table", "def read_csv(path):\n with open(path, 'r', encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=\",\")\n return list(reader)", "def read_csv_file1(filename):\n f = open(filename)\n data = []\n for row in csv.reader(f):\n data.append(row)\n print(data) \n f.close()", "def count_csv_rows(csvfile):\n csvfile.seek(0)\n csvreader = csv.reader(csvfile, delimiter=',')\n count = 0\n for row in csvreader:\n count += 1\n csvfile.seek(0)\n print(\"row count=\", count)\n return count", "def import_csv(in_csv, delimit=','):\n with open(in_csv, encoding='utf-8') as source:\n sourcereader = csv.reader(source, delimiter=delimit)\n data_list = []\n for row in sourcereader:\n data_list.append(row)\n return data_list", "def read_csv_to_list (file, encoding=\"utf8\"):\r\n\r\n try:\r\n \r\n with open(file, encoding=encoding) as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n return data\r\n \r\n except:\r\n print(\"Unexpected error:\", sys.exc_info()[0]) \r\n return None", "def csv_to_list(filepath,col=u\"0\",header=None):\n df = pandas.read_csv(filepath,header=header)\n result = df.iloc[:,int(col)].values.tolist()\n BuiltIn().log(\"Return %d values from `%s`\" % (len(result),filepath))\n return result", "def read_csv(csv_in_path: str) -> tuple[set[str], TDictRows]:\n available_activities: set[str] = set()\n in_data: list[TStrDict] = []\n\n with codecs.open(csv_in_path, \"r\", encoding=\"utf-8-sig\") as f_in:\n reader = csv.DictReader(f_in)\n\n validate_input_csv(reader.fieldnames)\n\n for row in reader:\n # TODO: break out block into a function.\n\n original_activities_str = row[ACTIVITIES_KEY]\n if original_activities_str is None:\n raise ValueError(\n f\"The {ACTIVITIES_KEY} column is present but blank.\"\n \" Fix the formatting of your CSV. Got row: \\n {row}\"\n )\n activities_list = process_activities(original_activities_str)\n\n available_activities.update(activities_list)\n\n row[\"activities_list\"] = activities_list # type: ignore\n in_data.append(row)\n\n return available_activities, in_data", "def yield_csv_rows(csv_filename, csv_flavor = COMMA_DELIM):\n with open(csv_filename, 'r') as csvfile:\n spamreader = csv.reader(csvfile, **csv_flavor)\n for row in spamreader:\n yield row", "def read_file(filename, tuple_type=None):\n\n with open(filename) as f:\n # Ignore header\n next(f)\n\n rows = csv.reader(f, delimiter=',', quotechar='\"')\n #yield from map(convert_to_tuple, rows, [tuple_type]) # Need to map tuple_type to a list of same length..\n yield from map(functools.partial(convert_to_tuple, tuple_type=tuple_type), rows)", "def readinMATRIX(csvpath):\n\n G = []\n with open (csvpath, 'rb') as csvfile:\n myreader = csv.reader(csvfile)\n for row in myreader:\n G.append(row)\n return G", "def read_initial(input_file):\n return [\n [(0 if cell == \"\" else int(cell)) for cell in row]\n for row in csv.reader(open(input_file))\n ]", "def read_data(path):\n infile = open(path)\n infile.readline() # discard headers\n for line in infile:\n line = line.strip()\n row = line.split(',')\n print(row)", "def data_reader(fp):\n try:\n reader = csv.reader(fp)\n except:\n db.FailedToReadCSV(Exception)\n \n x = []\n\n for x in reader:\n if (len(x) < 2):\n continue\n if ( not x[0].strip() ) or ( x[0].startswith('#') ):\n continue\n yield x" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add column to list of sentences
def add_column(sentences, columns): new_sentences = [] for sentence, column in zip(sentences, columns): new_sentences.append( [tup + [col] for tup, col in zip(sentence, column)] ) return new_sentences
[ "def get_column(sentences, i):\n columns = []\n for sentence in sentences:\n columns.append([tup[i] for tup in sentence])\n return columns", "def add_tokenized_column(self, df, column_name_to_tokenize):\n COL = column_name_to_tokenize\n df_with_tokens = df.assign(**{f'tokens_{COL}': lambda df: df[COL].apply(lambda x: str(x).split())})\n return df_with_tokens", "def column_multiline(xs: Iterable[str]) -> str:\n spaces = f\"\\n{' ':^6s} | \"\n return \"| \" + next(xs) + spaces + f\"{spaces}\".join(xs)", "def format_column_list(self, column, regex=r' +'):\n\n c = self.__check_column(column)\n if c:\n self.df[c] = list(map(lambda x: re.split(regex, x), self.df[c]))", "def newCol(self, df, name, list):\n df[name] = list\n return df", "def __add_annotation(self, df, col_text, current_index, annotations):\n spans = []\n for label, items in annotations.items():\n if items:\n item_list = [\n i.strip() for i in items.split(self.delimiter) if i.strip() != \"\"\n ]\n matcher = PhraseMatcher(self.nlp.vocab, attr=self.attr)\n matcher.add(label, [self.nlp(item) for item in item_list])\n doc = self.nlp(df[col_text][current_index])\n matches = matcher(doc)\n spans_new = []\n for match_id, start, end in matches:\n span = Span(doc, start, end, label=\"\")\n spans_new.append(span)\n spans_filtered = spacy.util.filter_spans(spans_new)\n spans.extend(\n [(span.start_char, span.end_char, label) for span in spans_filtered]\n )\n else:\n continue\n entities = {\"entities\": spans}\n df.at[current_index, \"annotations\"] = (df[col_text][current_index], entities)", "def add_recommendations_column():\n open_db_connection()\n add_column = \"ALTER TABLE plant_data ADD primary_recommendations varchar\"\n cursor.execute(add_column)\n add_column = \"ALTER TABLE plant_data ADD secondary_recommendations varchar\"\n cursor.execute(add_column)\n close_db_connection()", "def split_rows(sentences, column_names):\r\n new_sentences = []\r\n texts=[]\r\n root_values = ['0', 'ROOT', 'ROOT', 'ROOT', 'ROOT', 'ROOT', '0', 'ROOT', '0', 'ROOT']\r\n start = [dict(zip(column_names, root_values))]\r\n for sentence in sentences:\r\n info=[]\r\n rows = sentence.split('\\n')\r\n sentence = [dict(zip(column_names, row.split())) for row in rows if row[0] != '#']\r\n sentence = start + sentence\r\n new_sentences.append(sentence)\r\n if \"newdoc id\" in rows[0]: # beginnings of new docs\r\n info.append(rows[1])\r\n info.append(rows[2])\r\n texts.append(info)\r\n else:\r\n info.append(rows[0])\r\n info.append(rows[1])\r\n texts.append(info)\r\n return new_sentences, texts", "def _add_sentence_tokens(self, data: pd.DataFrame) -> pd.DataFrame:\n tokenizer = nltk.data.load(\"tokenizers/punkt/english.pickle\")\n data[_Column.TOKENS.name] = data[_Column.MESSAGE.name].apply(tokenizer.tokenize)\n return data", "def add_sentence(self, sentence):\n self.texts.append(sentence)", "def addColumn(self, column_list, position=None):\r\n # Empty column to fill up sbtab_dataset with ''\r\n empty_list = []\r\n\r\n # If new column is too small, add empty entries to new column\r\n if len(column_list) < (len(self.sbtab_dataset.dict)-1):\r\n for i in range((len(self.sbtab_dataset.dict) - 1) - len(column_list)):\r\n column_list.append('')\r\n\r\n # If new column is too long, add empty entries to sbtab_dataset\r\n elif len(column_list) > (len(self.sbtab_dataset.dict) - 1):\r\n for i in range(len(self.sbtab_dataset.dict[0])):\r\n empty_list.append('')\r\n for i in range(len(column_list) - (len(self.sbtab_dataset.dict) - 1)):\r\n self.value_rows.append(empty_list)\r\n empty_list = copy.deepcopy(empty_list)\r\n\r\n # If no position is set, add new column to the end\r\n if not position:\r\n for i, row in enumerate(self.value_rows):\r\n row.append(column_list[i+1])\r\n self.columns_dict[column_list[0]] = len(self.columns)\r\n self.columns = self.columns_dict.keys()\r\n else:\r\n for i, row in enumerate(self.value_rows):\r\n row.insert(position - 1, column_list[i + 1])\r\n self.columns_dict[column_list[0]] = position - 1\r\n self.columns = self.columns_dict.keys()\r\n\r\n # Update object\r\n self.update()", "def opinion_paragraph_sents():\n sid = SentimentIntensityAnalyzer()\n\n sentlist = []\n for op in article.opinions():\n for paragraph in op.fulltext.splitlines():\n sentlist.append([round(sid.polarity_scores(paragraph)['compound'] *100, 2), op.source, paragraph])\n\n df = pd.DataFrame(sentlist, columns=['score', 'source', 'paragraph'])\n df.to_csv('./dataframes/sents_opinion_paragraphs.csv')\n\n return sentlist", "def add_interpunction(self):\n for par in self.paragraphs:\n for observ in par.observations:\n\n # add a . after each sentence\n observ.observation_new += \".\"\n\n # add a capital letter at the beginning\n first_word = observ.observation_new.split()[0]\n # check if the first word is a number\n if not represents_integer(first_word):\n # first word is not an integer, so add capital letter\n observ.observation_new = observ.observation_new[0].capitalize() + observ.observation_new[1:]", "def preprocessed_sentences_sql(query='''SELECT * FROM sentences2;'''):\n\n try:\n params = config.config()\n # Connect to the PostgreSQL database\n conn = psycopg2.connect(**params)\n # Create a new cursor\n cur = conn.cursor()\n\n nlp_sentences = pd.read_sql_query(query, conn)\n # Close the cursor and connection to so the server can allocate\n # bandwidth to other requests\n cur.close()\n conn.close()\n\n # Add REGEX columns to data.\n nlp_sentences['words_as_string'] = nlp_sentences['words'].apply(lambda x: ','.join(map(str, x)))\n\n nlp_sentences['words_as_string'] = nlp_sentences['words_as_string']\\\n .replace(r'\\W{4,}', '', regex=True)\\\n .replace(',,,', 'comma_sym', regex=True)\\\n .replace(',', ' ', regex=True)\\\n .replace('comma_sym', ', ', regex=True)\\\n .replace('-LRB- ', '(', regex=True)\\\n .replace('LRB', '(', regex=True)\\\n .replace(' -RRB-', r')', regex=True)\\\n .replace('RRB', r')', regex=True)\\\n .replace('-RRB', r')', regex=True)\n\n # REGEX Values\n\n nlp_sentences = utils.find_re(nlp_sentences, find_val = 'dms_regex',\\\n search_col = 'words_as_string', new_col_name = 'dms_regex')\n nlp_sentences = utils.find_re(nlp_sentences, find_val = 'dd_regex',\\\n search_col = 'words_as_string', new_col_name = 'dd_regex')\n nlp_sentences = utils.find_re(nlp_sentences, find_val = 'digits_regex',\\\n search_col = 'words_as_string', new_col_name = 'digits_regex')\n\n # Format words to lowercase\n nlp_sentences['words_l'] = nlp_sentences['words']\\\n .astype(str).str.lower().transform(ast.literal_eval)\n\n return nlp_sentences\n\n except Exception as e:\n print(e)\n print('No SQL found. If you have a tsv file, try using preprocessed_sentences_tsv().')", "def add_new_column(header, rows, column_name, column_generator):\n updated_rows = []\n for row in rows:\n mutable_row = list(row)\n mutable_row.append(column_generator(row))\n updated_rows.append(mutable_row)\n mutable_header = list(header)\n mutable_header.append(column_name)\n return mutable_header, updated_rows", "def AddCol(Lst, addcol):\r\n NumCols = len(Lst[0])\r\n startLen = len(Lst)\r\n addcolLen = len(addcol)\r\n if addcolLen > startLen:\r\n for i in range(addcolLen-startLen):\r\n Lst.append([''for i in range(NumCols)])\r\n j = 0\r\n for i in range(len(Lst)):\r\n if j < len(addcol):\r\n Lst[i].append(addcol[j])\r\n j+=1\r\n else:\r\n Lst[i].append('')\r\n return Lst", "def createColumns(self):\n self.tableWidget.insertColumn(self.tableWidget.columnCount())", "def transform(self, corpus):\n text = []\n for sentence in corpus:\n text.append(\" \".join(sentence.astype(str)))\n df = container.DataFrame(text, generate_metadata=True)\n\n # create metadata for the text feature columns\n for column_index in range(df.shape[1]):\n col_dict = dict(df.metadata.query((metadata_base.ALL_ELEMENTS, column_index)))\n col_dict['structural_type'] = type(1.0)\n col_dict['name'] = 'fastlvm_' + str(column_index)\n col_dict['semantic_types'] = ('http://schema.org/Text',\n 'https://metadata.datadrivendiscovery.org/types/Attribute')\n df.metadata = df.metadata.update((metadata_base.ALL_ELEMENTS, column_index), col_dict)\n return df", "def bag_of_words(df, col):\n\tseries = df[col]\n\twrds \n\treturn wrds" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a column of information from sentences
def get_column(sentences, i): columns = [] for sentence in sentences: columns.append([tup[i] for tup in sentence]) return columns
[ "def separate_columns(parser_regex, line_text):\n raw_columns = re.search(parser_regex, line_text).groups()\n return [c.strip() for c in raw_columns]", "def get_doc_text(self, doc_name):\n doc_sents = []\n doc_sents_start_idx = []\n\n tmp = self._db_select(\"SELECT id FROM document WHERE name = \\\"{0}\\\"\".format(doc_name))\n if tmp:\n doc_id = tmp[0][0]\n doc_sent_query = \"SELECT stable_id FROM context WHERE type = \\\"sentence\\\" and stable_id like \\\"{0}::%\\\"\".format(doc_name)\n doc_sent_info = self._db_select(doc_sent_query)\n doc_sent_query = \"SELECT text FROM sentence WHERE document_id = {0}\".format(str(doc_id))\n doc_sent_text = self._db_select(doc_sent_query)\n\n if doc_sent_info and doc_sent_text:\n doc_text = \"\"\n for i in range(len(doc_sent_info)):\n if i < len(doc_sent_info) - 1:\n\n doc_text = doc_text + doc_sent_text[i][0]\n doc_sents.append(doc_sent_text[i][0])\n\n id_info = doc_sent_info[i][0].split(\":\")\n doc_sents_start_idx.append(id_info[3])\n end_index = int(id_info[4])\n\n id_info = doc_sent_info[i+1][0].split(\":\")\n start_index = int(id_info[3])\n\n diff = start_index - end_index\n if diff > 0:\n for j in range(diff):\n doc_text = doc_text + \" \"\n\n doc_text = doc_text + doc_sent_text[len(doc_sent_text)-1][0]\n\n # adding information of the last sentence\n id_info = doc_sent_info[len(doc_sent_info) - 1][0].split(\":\")\n doc_sents.append(doc_sent_text[len(doc_sent_text)-1][0])\n doc_sents_start_idx.append(int(id_info[3]))\n\n return doc_text, doc_sents, doc_sents_start_idx\n return \"\", [], []\n return \"\", [], []", "def get_sentences(*arg):\n\tsql_query =\"\"\"\n\tSELECT edited_message FROM twitch;\n\t\"\"\"\n\tmessages = pd.read_sql_query(sql_query,con)\n\tmessages['edited_message']=messages['edited_message'].apply(lambda x:re.split('[^a-z0-9]',x.lower()))\n\tif len(arg)!=0:\n\t\tmessages['edited_message']=messages['edited_message'].apply(english_stemmer)\n\tsentences = list(messages['edited_message'])\n\treturn sentences", "def sentence_entities(sentence):\n\n\n nlp = Rating.nlp_load(sentence)\n return [(ent.text, ent.label_) for ent in nlp.ents]", "def splitSentences(self,txt):\n \n txt = txt.split()\n #txt = txt.split(\"\\s\") #DM to account for longer documents in formative evaluation - change back for impression sections only\n\n #attribute side header to each corresponding sentence\n sentences = []\n wordLoc = 0\n \n\n while(wordLoc < len(txt) ):\n currentWord = txt[wordLoc]\n if( currentWord[-1] in '.?!' ):\n if( currentWord in self.exceptionTerms ):\n wordLoc += 1\n # per discussion with A.G. dropped this exception, since assuming numbers only use decimal points if there \n # are actual decimal point digits expressed and thus the period would not be the last character of the word.\n #elif( self.digits.intersection(currentWord) and \n #not set('()').intersection(currentWord)): # word doesn't include parentheses. Is this necessary?\n #wordLoc += 1\n else:\n sentences.append(unicode(\" \"+' '.join(txt[:wordLoc+1]))) \n txt = txt[wordLoc+1:]\n wordLoc = 0\n else:\n wordLoc += 1\n\n # if any texts remains (due to failure to identify a final sentence termination,\n # then take all remaining text and put into a sentence\n if( txt ):\n sentences.append(unicode(\" \"+' '.join(txt)) )\n \n #print sentences;raw_input()\n return sentences", "def get_sentence_data(data_file):\n # Read sentences from file\n sents = []\n with open(data_file) as file:\n for line in file:\n # add them as arrays to make expansion easier\n sents.append(line.strip().split())\n \n # Get binary feature vects (d) and labels (l) from sents\n d = []\n l = []\n for line in sents:\n vect = numpy.zeros(feature_size)\n for i in line[1:]:\n i = i.split(\":\")\n word = i[0]\n value = i[1]\n #print word, value\n try:\n vect[features_index[word]] = float(value)\n except:\n pass\n l.append(line[0])\n d.append(vect)\n \n return d, l", "def text_error_cols(text): \n po = ParseOptions(min_null_count=0, max_null_count=999)\n en_dir = Dictionary() # open the dictionary only once\n sent = Sentence(text, en_dir, po)\n linkages = sent.parse()\n if sent.null_count() == 0 :\n return []\n else:\n error_cols=[]\n iws=[]\n for lkg in linkages:\n words=[w for w in lkg.words()]\n #desc(lkg)\n for k,w in enumerate(words):\n if is_no_link_ward(w):\n if k in iws:\n break\n else:\n iws.append(k)\n js=text_words2col_begin_end(text,words)\n error_cols.append(js[k-1])\n return error_cols", "def extract_text(bug_id, df, is_one):\n\n row = df.loc[df['bug_id'] == int(bug_id)].iloc[0]\n\n if is_one:\n short_desc = row.one_short_desc\n description = row.one_desc\n both = row.one_both\n return [short_desc, description, both]\n else:\n short_desc = row.bi_short_desc\n description = row.bi_desc\n both = row.bi_both\n return [short_desc, description, both]", "def getTextTagged(dataset):\n \n# nlp=spacy.load('fr_core_news_sm') #Load the pre-existed french model of spacy\n data={\"Name\":dataset[\"Name\"],\"TextTagged\":[]}\n texttagged=[]\n for text in dataset[\"Text\"]:\n ret=tagging(text,nlp)\n texttagged.append(ret[0])\n data[\"TextTagged\"]=texttagged\n return pd.DataFrame(data)", "def split_rows(sentences, column_names):\r\n new_sentences = []\r\n texts=[]\r\n root_values = ['0', 'ROOT', 'ROOT', 'ROOT', 'ROOT', 'ROOT', '0', 'ROOT', '0', 'ROOT']\r\n start = [dict(zip(column_names, root_values))]\r\n for sentence in sentences:\r\n info=[]\r\n rows = sentence.split('\\n')\r\n sentence = [dict(zip(column_names, row.split())) for row in rows if row[0] != '#']\r\n sentence = start + sentence\r\n new_sentences.append(sentence)\r\n if \"newdoc id\" in rows[0]: # beginnings of new docs\r\n info.append(rows[1])\r\n info.append(rows[2])\r\n texts.append(info)\r\n else:\r\n info.append(rows[0])\r\n info.append(rows[1])\r\n texts.append(info)\r\n return new_sentences, texts", "def opinion_paragraph_sents():\n sid = SentimentIntensityAnalyzer()\n\n sentlist = []\n for op in article.opinions():\n for paragraph in op.fulltext.splitlines():\n sentlist.append([round(sid.polarity_scores(paragraph)['compound'] *100, 2), op.source, paragraph])\n\n df = pd.DataFrame(sentlist, columns=['score', 'source', 'paragraph'])\n df.to_csv('./dataframes/sents_opinion_paragraphs.csv')\n\n return sentlist", "def get_sent(self, num):\n \n assert num in self.res.sent_num.values, \"Sentence index is out of range for this dataset\"\n this_sentence = self.res[self.res.sent_num == num]\n \n star_if_true = lambda boolean: '*' if boolean else ''\n check_if_true = lambda boolean: '✓' if boolean else ''\n printout = pd.DataFrame({'true': self.tagset[this_sentence.y_true],\n 'predict': self.tagset[this_sentence.y_predict],\n 'correct?': (this_sentence.y_true == this_sentence.y_predict) \\\n .map(check_if_true).values,\n 'oov?': this_sentence.oov.map(star_if_true).values,\n 'ambiguous?': this_sentence.ambig.map(star_if_true).values},\n index = this_sentence.token,)\n print(printout)", "def _interview_text_data(self, caption):\n text_data = []\n for item_list in caption:\n text_data.append(item_list['text'])\n return ' '.join(text_data)", "def get_column_contents(self, col, include_whitespace=False):\n out = []\n capture = False\n capture_row = 0\n current = []\n for tok in self:\n if tok.aligned and tok.col == col:\n capture_row = tok.row\n capture = True\n elif (tok.aligned and tok.col != col) or (capture and tok.row != capture_row):\n capture = False\n if current:\n out.append(current)\n current = []\n\n if capture and (include_whitespace or not tok.is_whitespace()):\n current.append(tok)\n if current:\n out.append(current)\n return out", "def _processline (self, line) :\n uttid, text = re.split (r\"\\t\", line.strip ())\n spkrid = uttid[0:5]\n gender = uttid[0].lower ()\n text = self._cleantext (text)\n\n return uttid, text, gender, spkrid", "def sentence_factory(markup):\n sentence = []\n for line in markup.split(\"\\n\"):\n line = line.strip()\n if not line:\n continue\n token, offset, ner, lemma = line.split()\n sentence.append({\n \"word\": token,\n \"CharacterOffsetBegin\": offset,\n \"NER\": ner,\n \"lemma\": lemma,\n })\n return sentence", "def extractStrings(self):\n def extractRow(row):\n return [entry.text for entry in row]\n return [extractRow(row) for row in self.array2d]", "def _GetColumn(data, token):\n last_newline = data.rfind('\\n', 0, token.lexpos)\n if last_newline < 0:\n last_newline = 0\n column = token.lexpos - last_newline\n return column", "def extract_from_file(filename):\n\n data_table = pd.DataFrame.from_csv(\"data.csv\")\n\n #This is line is just extracting the raw twitter posts\n raw_body_text = data_table['raw_body_text']\n \n #sentiment_category = data_table['sentiment']\n author_follower_count = data_table['author_followers_count']\n is_reshare = data_table['is_reshare']\n loc = data_table['location']\n return raw_body_text,author_follower_count, is_reshare, loc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a tag sequence from conll format to other format.
def tags_from_conll(tags, scheme='bio'): def entity_span_from_conll(entity_span, scheme=scheme): if not entity_span: return entity_span # Logic are performed in order of precedence. if 'e' in scheme: entity_span[-1] = 'E' + entity_span[-1][1:] if 'b' in scheme: entity_span[0] = 'B' + entity_span[0][1:] if 's' in scheme and len(entity_span) == 1: entity_span[0] = 'S' + entity_span[0][1:] if 'i' in scheme: for i in range(1, len(entity_span) - 1): entity_span[i] = 'I' + entity_span[i][1:] return entity_span new_tags = tags[:] if not new_tags: return new_tags if isinstance(tags[0], str): new_tags = [new_tags] for k, sent_tag in enumerate(new_tags): i = 0 for j, tag in enumerate(sent_tag): flag = False if tag[0] in 'BO': # 'O' and 'B' indicates the end of previous sequence flag = True # If two tags are different, 'I' is also an indicator of separation elif tag[0] == 'I' and j and sent_tag[j - 1][1:] != tag[1:]: flag = True if flag: sent_tag[i:j] = entity_span_from_conll(sent_tag[i:j], scheme=scheme) i = j + (tag[0] == 'O') # If tag is not 'O', we should include it in following sequence continue sent_tag[i:] = entity_span_from_conll(sent_tag[i:], scheme=scheme) if isinstance(tags[0], str): new_tags = new_tags[0] return new_tags
[ "def tags_to_conll(tags):\n def entity_span_to_conll(entity_span, prev_is_same_entity=False):\n if not entity_span:\n return entity_span\n for i in range(len(entity_span)):\n entity_span[i] = 'I' + entity_span[i][1:]\n if prev_is_same_entity:\n entity_span[0] = 'B' + entity_span[0][1:]\n return entity_span\n\n new_tags = tags[:]\n if not new_tags:\n return new_tags\n if isinstance(tags[0], str):\n new_tags = [new_tags]\n\n for k, sent_tag in enumerate(new_tags):\n i = 0\n for j, tag in enumerate(sent_tag):\n if tag[0] in 'OBS':\n prev_is_same_entity = i and (sent_tag[i - 1][1:] == sent_tag[i][1:])\n # print(i, j, sent_tag[i-1], sent_tag[i], sent_tag[i - 1][1:] == tag[1:])\n sent_tag[i:j] = entity_span_to_conll(sent_tag[i:j], prev_is_same_entity=prev_is_same_entity)\n i = j + (tag[0] == 'O')\n else:\n continue\n prev_is_same_entity = i and i <= j and (sent_tag[i - 1][1:] == sent_tag[i][1:])\n sent_tag[i:] = entity_span_to_conll(sent_tag[i:], prev_is_same_entity=prev_is_same_entity)\n\n if isinstance(tags[0], str):\n new_tags = new_tags[0]\n return new_tags", "def _iob1_to_iob2(tags: List[str]) -> List[str]:\n # https://gist.github.com/allanj/b9bd448dc9b70d71eb7c2b6dd33fe4ef\n\n result = []\n for i, tag in enumerate(tags):\n if tag == \"O\":\n result.append(\"O\")\n continue\n\n split = tag.split(\"-\")\n if len(split) != 2 or split[0] not in [\"I\", \"B\"]:\n raise ValueError(\"Invalid IOB1 sequence\")\n\n if split[0] == \"B\":\n result.append(tag)\n elif i == 0 or tags[i - 1] == \"O\":\n result.append(\"B\" + tag[1:])\n elif tags[i - 1][1:] == tag[1:]:\n result.append(tag)\n else:\n result.append(\"B\" + tag[1:])\n\n return result", "def tag2ot(ote_tag_sequence):\n n_tags = len(ote_tag_sequence)\n ot_sequence = []\n beg, end = -1, -1\n for i in range(n_tags):\n tag = ote_tag_sequence[i]\n if tag == 'S':\n ot_sequence.append((i, i))\n elif tag == 'B':\n beg = i\n elif tag == 'E':\n end = i\n if end > beg > -1:\n ot_sequence.append((beg, end))\n beg, end = -1, -1\n return ot_sequence", "def tag2ts(ts_tag_sequence):\n n_tags = len(ts_tag_sequence)\n ts_sequence, sentiments = [], []\n beg, end = -1, -1\n for i in range(n_tags):\n ts_tag = ts_tag_sequence[i]\n # current position and sentiment\n eles = ts_tag.split('-')\n if len(eles) == 2:\n pos, sentiment = eles\n else:\n pos, sentiment = 'O', 'O'\n if sentiment != 'O':\n # current word is a subjective word\n sentiments.append(sentiment)\n if pos == 'S':\n # singleton\n ts_sequence.append((i, i, sentiments[0]))\n sentiments = []\n elif pos == 'B':\n beg = i\n elif pos == 'E':\n end = i\n # schema1: only the consistent sentiment tags are accepted\n # that is, all of the sentiment tags are the same\n if end > beg > -1 and len(set(sentiments)) == 1:\n ts_sequence.append((beg, end, sentiment))\n sentiments = []\n beg, end = -1, -1\n return ts_sequence", "def data_to_conll(sentences):\n new_sentences = []\n for sentence in sentences:\n tags = [tup[-1] for tup in sentence]\n new_tags = tags_to_conll(tags)\n new_sentences.append([\n tup[:-1] + [tag] for tup, tag in zip(sentence, new_tags)\n ])\n return new_sentences", "def _decode_seq_tags(self):\n if self.decode_group_size > 0:\n raise NotImplementedError('Unsupported cnn group for CRF')\n else:\n self._decode_with_seq_encodes()\n # self._decode_cnn_pooling_all()\n # self._decode_sim_WX_B()\n self._compute_seqtag_scores_and_loss()\n self._add_weight_decay_regularizer()", "def iob2(tags):\n # print(\"before tags:{}\".format(tags))\n for i, tag in enumerate(tags):\n # print(\"i:{}\\ttag:{}\".format(i, tag))\n if tag == 'O':\n continue\n split = tag.split('-')\n if len(split) != 2 or split[0] not in ['I', 'B']:\n return False\n if split[0] == 'B':\n continue\n elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2\n tags[i] = 'B' + tag[1:]\n elif tags[i - 1][1:] == tag[1:]:\n continue\n else: # conversion IOB1 to IOB2\n tags[i] = 'B' + tag[1:]\n # print(\"after tags:{}\\n\".format(tags))\n # exit()\n return True", "def test_SeqLike_interconversion():\n seq = \"TCGCACACTGCA\"\n\n a1 = SeqLike(seq, \"dna\").nt().to_str()\n a2 = SeqLike(seq, \"dna\").aa().to_str()\n assert isinstance(a1, str)\n assert isinstance(a2, str)\n\n b1 = SeqLike(seq, \"dna\").nt().to_seq()\n b2 = SeqLike(seq, \"dna\").aa().to_seq()\n assert isinstance(b1, Seq)\n assert isinstance(b2, Seq)\n\n c1 = SeqLike(seq, \"dna\").nt().to_seqrecord()\n c2 = SeqLike(seq, \"dna\").aa().to_seqrecord()\n assert isinstance(c1, SeqRecord)\n assert isinstance(c2, SeqRecord)\n\n seq1 = SeqLike(seq, \"dna\").nt()\n seq2 = SeqLike(seq, \"dna\").aa()\n d1 = seq1.to_onehot()\n d2 = seq2.to_onehot()\n assert isinstance(d1, np.ndarray)\n assert isinstance(d2, np.ndarray)\n\n e1 = SeqLike(d1, \"dna\", alphabet=seq1.alphabet).to_str()\n e2 = SeqLike(d2, \"aa\", alphabet=seq2.alphabet).to_str()\n assert e1 == a1\n assert e2 == a2\n # when interconverting, should the letter_annotations be empty?\n assert seq2.letter_annotations == {}\n seq3 = SeqLike(seq2, \"dna\")\n seqnums = [str(i + 1) for i in range(len(seq3))]\n assert seq3.letter_annotations[\"seqnums\"] == seqnums\n assert seq3[:2].letter_annotations[\"seqnums\"] == seqnums[:2]", "def toIOB(self, tags):\n for i in range(len(tags)):\n tag = tags[i]\n if tag[0] == 'S':\n tags[i] = 'B'+tag[1:]\n elif tag[0] == 'E':\n tags[i] = 'I'+tag[1:]\n return tags", "def translateSequence(seq):\n aa = ''\n for i in xrange(0, len(seq), 3):\n aa += codonToAminoAcid(seq[i:i+3])\n return aa", "def _convert_seq(sequence, new_class):\n return [new_class(obj) for obj in sequence]", "def _seq_from_struct(self):\n seq = []\n ch = self.structure[0][0][4]\n fasta = ''\n for atom in self.structure[0]:\n if atom[2] == ' CA ':\n if atom[4] == ch:\n fasta += AA_code(atom[3])\n else:\n seq.append(fasta)\n ch = atom[4]\n fasta = AA_code(atom[3])\n seq.append(fasta)\n return seq", "def taggedsents_to_conll(sentences):\n for sentence in sentences:\n yield from taggedsent_to_conll(sentence)\n yield \"\\n\\n\"", "def translate(rec):\n # Truncate to nearest codon\n end = len(rec.seq) // 3 * 3\n rec.seq = rec.seq[:end].translate()\n rec.description = f\"translated {rec.description}\"\n return rec", "def convert(mdi_model):\r\n\r\n timer = timer_m.Timer()\r\n reporter_m.info(\"Converting MDI to TAG ...\")\r\n\r\n tag_model = tag_m.TAG()\r\n\r\n # type conversions\r\n mdi_model.tags_to_type(mdi_m.MDIFreeTag)\r\n\r\n # tag_data\r\n for num_tag in range(len(mdi_model.tags)):\r\n\r\n tag_data = MDIToModel._to_tag_data(mdi_model, num_tag)\r\n tag_model.tags.append(tag_data)\r\n\r\n # header\r\n MDIToModel._calc_tag_header(tag_model, mdi_model)\r\n\r\n time = timer.time()\r\n reporter_m.info(\"Converting MDI to TAG DONE (time={})\".format(time))\r\n\r\n return tag_model", "def __convert_first_level_tags(self, chunk, tag):\n\n html_tag = self.first_level_tags[tag]\n if html_tag == '<blockquote>':\n for index, line in enumerate(chunk):\n line = line + '<br>'\n chunk[index] = line\n\n chunk = list(map(lambda elem: elem[len(tag):], chunk))\n if html_tag in ('<ul>', '<ol>'):\n chunk = [\n self.__enclose_in_html_tag(elem, '<li>') for elem in chunk\n ]\n chunk[0] = html_tag + chunk[0]\n chunk[-1] = chunk[-1] + self.__create_closing_html_tag(html_tag)\n return chunk", "def ChangedSequence(data, seq_constructor=Sequence):\n return seq_constructor(str(data).replace('.','-'))", "def rna_to_dna(self):\n self.seq = self.seq.replace(\"U\", \"T\")", "def collapse_tags(self, my_etree):\n chars = []\n is_tag_start = False # True if inside tag\n tag_start_node = None # Pointer to current node. \n tag_start_char = '['\n tag_end_char = ']'\n\n # For every node with text\n for node,text in self._itertext(my_etree):\n # Go through each node's text character by character\n for i,c in enumerate(text):\n if c == tag_start_char: # Tag is starting!\n assert not is_tag_start # Better not already be inside a tag!\n is_tag_start = True \n tag_start_node = node \n chars = []\n elif c == tag_end_char: # Tag is ending\n assert is_tag_start # Better have seen a tag start!\n is_tag_start = False\n # If tag_start_node is the same as current node, then we don't need to do anything\n # But otherwise:\n if node != tag_start_node:\n # Tag started in different node, so move all the chars we've encountered since then\n # to the tag_start_node\n chars.append(c)\n tag_start_node.text += ''.join(chars)\n node.text = text[i+1:] # Remove characters from this node\n else:\n # Normal text character\n if is_tag_start and node != tag_start_node:\n # Need to save these chars to append to text in openbrac_node\n chars.append(c)\n\n # If we're here, that means we've consumed all the text in the current node.\n # Check if this node was part of a tag, yet did not start the tag\n if is_tag_start and node!= tag_start_node:\n # Need to remove this text completely as we've saved all of it inside chars for moving\n # into the start_node\n node.text = \"\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a tag sequence to conll format from our format.
def tags_to_conll(tags): def entity_span_to_conll(entity_span, prev_is_same_entity=False): if not entity_span: return entity_span for i in range(len(entity_span)): entity_span[i] = 'I' + entity_span[i][1:] if prev_is_same_entity: entity_span[0] = 'B' + entity_span[0][1:] return entity_span new_tags = tags[:] if not new_tags: return new_tags if isinstance(tags[0], str): new_tags = [new_tags] for k, sent_tag in enumerate(new_tags): i = 0 for j, tag in enumerate(sent_tag): if tag[0] in 'OBS': prev_is_same_entity = i and (sent_tag[i - 1][1:] == sent_tag[i][1:]) # print(i, j, sent_tag[i-1], sent_tag[i], sent_tag[i - 1][1:] == tag[1:]) sent_tag[i:j] = entity_span_to_conll(sent_tag[i:j], prev_is_same_entity=prev_is_same_entity) i = j + (tag[0] == 'O') else: continue prev_is_same_entity = i and i <= j and (sent_tag[i - 1][1:] == sent_tag[i][1:]) sent_tag[i:] = entity_span_to_conll(sent_tag[i:], prev_is_same_entity=prev_is_same_entity) if isinstance(tags[0], str): new_tags = new_tags[0] return new_tags
[ "def _iob1_to_iob2(tags: List[str]) -> List[str]:\n # https://gist.github.com/allanj/b9bd448dc9b70d71eb7c2b6dd33fe4ef\n\n result = []\n for i, tag in enumerate(tags):\n if tag == \"O\":\n result.append(\"O\")\n continue\n\n split = tag.split(\"-\")\n if len(split) != 2 or split[0] not in [\"I\", \"B\"]:\n raise ValueError(\"Invalid IOB1 sequence\")\n\n if split[0] == \"B\":\n result.append(tag)\n elif i == 0 or tags[i - 1] == \"O\":\n result.append(\"B\" + tag[1:])\n elif tags[i - 1][1:] == tag[1:]:\n result.append(tag)\n else:\n result.append(\"B\" + tag[1:])\n\n return result", "def tags_from_conll(tags, scheme='bio'):\n def entity_span_from_conll(entity_span, scheme=scheme):\n if not entity_span:\n return entity_span\n # Logic are performed in order of precedence.\n if 'e' in scheme:\n entity_span[-1] = 'E' + entity_span[-1][1:]\n if 'b' in scheme:\n entity_span[0] = 'B' + entity_span[0][1:]\n if 's' in scheme and len(entity_span) == 1:\n entity_span[0] = 'S' + entity_span[0][1:]\n if 'i' in scheme:\n for i in range(1, len(entity_span) - 1):\n entity_span[i] = 'I' + entity_span[i][1:]\n return entity_span\n\n new_tags = tags[:]\n if not new_tags:\n return new_tags\n if isinstance(tags[0], str):\n new_tags = [new_tags]\n\n for k, sent_tag in enumerate(new_tags):\n i = 0\n for j, tag in enumerate(sent_tag):\n flag = False\n if tag[0] in 'BO': # 'O' and 'B' indicates the end of previous sequence\n flag = True\n # If two tags are different, 'I' is also an indicator of separation\n elif tag[0] == 'I' and j and sent_tag[j - 1][1:] != tag[1:]:\n flag = True\n if flag:\n sent_tag[i:j] = entity_span_from_conll(sent_tag[i:j], scheme=scheme)\n i = j + (tag[0] == 'O') # If tag is not 'O', we should include it in following sequence\n continue\n sent_tag[i:] = entity_span_from_conll(sent_tag[i:], scheme=scheme)\n\n if isinstance(tags[0], str):\n new_tags = new_tags[0]\n return new_tags", "def _decode_seq_tags(self):\n if self.decode_group_size > 0:\n raise NotImplementedError('Unsupported cnn group for CRF')\n else:\n self._decode_with_seq_encodes()\n # self._decode_cnn_pooling_all()\n # self._decode_sim_WX_B()\n self._compute_seqtag_scores_and_loss()\n self._add_weight_decay_regularizer()", "def tag2ot(ote_tag_sequence):\n n_tags = len(ote_tag_sequence)\n ot_sequence = []\n beg, end = -1, -1\n for i in range(n_tags):\n tag = ote_tag_sequence[i]\n if tag == 'S':\n ot_sequence.append((i, i))\n elif tag == 'B':\n beg = i\n elif tag == 'E':\n end = i\n if end > beg > -1:\n ot_sequence.append((beg, end))\n beg, end = -1, -1\n return ot_sequence", "def data_to_conll(sentences):\n new_sentences = []\n for sentence in sentences:\n tags = [tup[-1] for tup in sentence]\n new_tags = tags_to_conll(tags)\n new_sentences.append([\n tup[:-1] + [tag] for tup, tag in zip(sentence, new_tags)\n ])\n return new_sentences", "def tag2ts(ts_tag_sequence):\n n_tags = len(ts_tag_sequence)\n ts_sequence, sentiments = [], []\n beg, end = -1, -1\n for i in range(n_tags):\n ts_tag = ts_tag_sequence[i]\n # current position and sentiment\n eles = ts_tag.split('-')\n if len(eles) == 2:\n pos, sentiment = eles\n else:\n pos, sentiment = 'O', 'O'\n if sentiment != 'O':\n # current word is a subjective word\n sentiments.append(sentiment)\n if pos == 'S':\n # singleton\n ts_sequence.append((i, i, sentiments[0]))\n sentiments = []\n elif pos == 'B':\n beg = i\n elif pos == 'E':\n end = i\n # schema1: only the consistent sentiment tags are accepted\n # that is, all of the sentiment tags are the same\n if end > beg > -1 and len(set(sentiments)) == 1:\n ts_sequence.append((beg, end, sentiment))\n sentiments = []\n beg, end = -1, -1\n return ts_sequence", "def toIOB(self, tags):\n for i in range(len(tags)):\n tag = tags[i]\n if tag[0] == 'S':\n tags[i] = 'B'+tag[1:]\n elif tag[0] == 'E':\n tags[i] = 'I'+tag[1:]\n return tags", "def iob2(tags):\n # print(\"before tags:{}\".format(tags))\n for i, tag in enumerate(tags):\n # print(\"i:{}\\ttag:{}\".format(i, tag))\n if tag == 'O':\n continue\n split = tag.split('-')\n if len(split) != 2 or split[0] not in ['I', 'B']:\n return False\n if split[0] == 'B':\n continue\n elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2\n tags[i] = 'B' + tag[1:]\n elif tags[i - 1][1:] == tag[1:]:\n continue\n else: # conversion IOB1 to IOB2\n tags[i] = 'B' + tag[1:]\n # print(\"after tags:{}\\n\".format(tags))\n # exit()\n return True", "def taggedsents_to_conll(sentences):\n for sentence in sentences:\n yield from taggedsent_to_conll(sentence)\n yield \"\\n\\n\"", "def translateSequence(seq):\n aa = ''\n for i in xrange(0, len(seq), 3):\n aa += codonToAminoAcid(seq[i:i+3])\n return aa", "def _seq_from_struct(self):\n seq = []\n ch = self.structure[0][0][4]\n fasta = ''\n for atom in self.structure[0]:\n if atom[2] == ' CA ':\n if atom[4] == ch:\n fasta += AA_code(atom[3])\n else:\n seq.append(fasta)\n ch = atom[4]\n fasta = AA_code(atom[3])\n seq.append(fasta)\n return seq", "def test_SeqLike_interconversion():\n seq = \"TCGCACACTGCA\"\n\n a1 = SeqLike(seq, \"dna\").nt().to_str()\n a2 = SeqLike(seq, \"dna\").aa().to_str()\n assert isinstance(a1, str)\n assert isinstance(a2, str)\n\n b1 = SeqLike(seq, \"dna\").nt().to_seq()\n b2 = SeqLike(seq, \"dna\").aa().to_seq()\n assert isinstance(b1, Seq)\n assert isinstance(b2, Seq)\n\n c1 = SeqLike(seq, \"dna\").nt().to_seqrecord()\n c2 = SeqLike(seq, \"dna\").aa().to_seqrecord()\n assert isinstance(c1, SeqRecord)\n assert isinstance(c2, SeqRecord)\n\n seq1 = SeqLike(seq, \"dna\").nt()\n seq2 = SeqLike(seq, \"dna\").aa()\n d1 = seq1.to_onehot()\n d2 = seq2.to_onehot()\n assert isinstance(d1, np.ndarray)\n assert isinstance(d2, np.ndarray)\n\n e1 = SeqLike(d1, \"dna\", alphabet=seq1.alphabet).to_str()\n e2 = SeqLike(d2, \"aa\", alphabet=seq2.alphabet).to_str()\n assert e1 == a1\n assert e2 == a2\n # when interconverting, should the letter_annotations be empty?\n assert seq2.letter_annotations == {}\n seq3 = SeqLike(seq2, \"dna\")\n seqnums = [str(i + 1) for i in range(len(seq3))]\n assert seq3.letter_annotations[\"seqnums\"] == seqnums\n assert seq3[:2].letter_annotations[\"seqnums\"] == seqnums[:2]", "def convert(mdi_model):\r\n\r\n timer = timer_m.Timer()\r\n reporter_m.info(\"Converting MDI to TAG ...\")\r\n\r\n tag_model = tag_m.TAG()\r\n\r\n # type conversions\r\n mdi_model.tags_to_type(mdi_m.MDIFreeTag)\r\n\r\n # tag_data\r\n for num_tag in range(len(mdi_model.tags)):\r\n\r\n tag_data = MDIToModel._to_tag_data(mdi_model, num_tag)\r\n tag_model.tags.append(tag_data)\r\n\r\n # header\r\n MDIToModel._calc_tag_header(tag_model, mdi_model)\r\n\r\n time = timer.time()\r\n reporter_m.info(\"Converting MDI to TAG DONE (time={})\".format(time))\r\n\r\n return tag_model", "def __convert_first_level_tags(self, chunk, tag):\n\n html_tag = self.first_level_tags[tag]\n if html_tag == '<blockquote>':\n for index, line in enumerate(chunk):\n line = line + '<br>'\n chunk[index] = line\n\n chunk = list(map(lambda elem: elem[len(tag):], chunk))\n if html_tag in ('<ul>', '<ol>'):\n chunk = [\n self.__enclose_in_html_tag(elem, '<li>') for elem in chunk\n ]\n chunk[0] = html_tag + chunk[0]\n chunk[-1] = chunk[-1] + self.__create_closing_html_tag(html_tag)\n return chunk", "def to_conll_format(self):\n self._change_coref_values()\n lines = []\n header = '#begin document ({}); part {}\\n'.format(self.file_id, self.part)\n lines.append(header)\n for i in range(len(self.sents)):\n sent = self.sents[i]\n for row_dict in sent:\n row_vals = [self.file_id,\n str(int(self.part)),\n row_dict['word_num'],\n row_dict['word'],\n row_dict['pos'],\n row_dict['parse'],\n row_dict['lemma'],\n '-',\n row_dict['sense'],\n '-',\n row_dict['ne'],\n '-',\n row_dict['coref']\n ]\n line = '\\t'.join(row_vals) + '\\n'\n lines.append(line)\n lines.append('\\n')\n lines.append('#end document\\n')\n return ''.join(lines)", "def get_annotated_sequence(segments, seg_to_seq, linkers=\"GSGPG\", N_tag=\"\", C_tag=\"\"):\n\n seg_to_seq = seq_to_seq_map(seg_to_seq)\n N = len(segments) \n if u.is_str(linkers):\n linkers = [linkers]*(N-1)\n \n assert len(linkers)==N-1, (\"Length of linkers must be one less than the number of segments.\"+\n \"Is {NL}, but should be {N}\".format(NL=len(linkers), N=N))\n \n max_seg_len = max([len(seg_to_seq[s]) for s in segments])\n \n aa_segments = [seg_to_seq[s].ljust(max_seg_len).replace('-', '') +\"\\t|\"+s for s in segments]\n \n lines = [N_tag] + list(u.roundrobin(aa_segments, linkers)) + [C_tag]\n lines = \"\\n\".join(lines)\n return lines", "def collapse_tags(self, my_etree):\n chars = []\n is_tag_start = False # True if inside tag\n tag_start_node = None # Pointer to current node. \n tag_start_char = '['\n tag_end_char = ']'\n\n # For every node with text\n for node,text in self._itertext(my_etree):\n # Go through each node's text character by character\n for i,c in enumerate(text):\n if c == tag_start_char: # Tag is starting!\n assert not is_tag_start # Better not already be inside a tag!\n is_tag_start = True \n tag_start_node = node \n chars = []\n elif c == tag_end_char: # Tag is ending\n assert is_tag_start # Better have seen a tag start!\n is_tag_start = False\n # If tag_start_node is the same as current node, then we don't need to do anything\n # But otherwise:\n if node != tag_start_node:\n # Tag started in different node, so move all the chars we've encountered since then\n # to the tag_start_node\n chars.append(c)\n tag_start_node.text += ''.join(chars)\n node.text = text[i+1:] # Remove characters from this node\n else:\n # Normal text character\n if is_tag_start and node != tag_start_node:\n # Need to save these chars to append to text in openbrac_node\n chars.append(c)\n\n # If we're here, that means we've consumed all the text in the current node.\n # Check if this node was part of a tag, yet did not start the tag\n if is_tag_start and node!= tag_start_node:\n # Need to remove this text completely as we've saved all of it inside chars for moving\n # into the start_node\n node.text = \"\"", "def translate(rec):\n # Truncate to nearest codon\n end = len(rec.seq) // 3 * 3\n rec.seq = rec.seq[:end].translate()\n rec.description = f\"translated {rec.description}\"\n return rec", "def to_tags(seq_len: int, spans: List[Span]) -> list:\n tags = [\"O\"] * seq_len\n for span in spans:\n pos = span.start\n if pos < seq_len:\n tags[pos] = \"B-{0}\".format(span.label)\n pos += 1\n while pos < min(span.end + 1, seq_len):\n tags[pos] = \"I-{0}\".format(span.label)\n pos += 1\n\n return tags" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert sentences to conll format. Can also be used to convert a sequence of tags to conll format.
def data_to_conll(sentences): new_sentences = [] for sentence in sentences: tags = [tup[-1] for tup in sentence] new_tags = tags_to_conll(tags) new_sentences.append([ tup[:-1] + [tag] for tup, tag in zip(sentence, new_tags) ]) return new_sentences
[ "def taggedsents_to_conll(sentences):\n for sentence in sentences:\n yield from taggedsent_to_conll(sentence)\n yield \"\\n\\n\"", "def tags_to_conll(tags):\n def entity_span_to_conll(entity_span, prev_is_same_entity=False):\n if not entity_span:\n return entity_span\n for i in range(len(entity_span)):\n entity_span[i] = 'I' + entity_span[i][1:]\n if prev_is_same_entity:\n entity_span[0] = 'B' + entity_span[0][1:]\n return entity_span\n\n new_tags = tags[:]\n if not new_tags:\n return new_tags\n if isinstance(tags[0], str):\n new_tags = [new_tags]\n\n for k, sent_tag in enumerate(new_tags):\n i = 0\n for j, tag in enumerate(sent_tag):\n if tag[0] in 'OBS':\n prev_is_same_entity = i and (sent_tag[i - 1][1:] == sent_tag[i][1:])\n # print(i, j, sent_tag[i-1], sent_tag[i], sent_tag[i - 1][1:] == tag[1:])\n sent_tag[i:j] = entity_span_to_conll(sent_tag[i:j], prev_is_same_entity=prev_is_same_entity)\n i = j + (tag[0] == 'O')\n else:\n continue\n prev_is_same_entity = i and i <= j and (sent_tag[i - 1][1:] == sent_tag[i][1:])\n sent_tag[i:] = entity_span_to_conll(sent_tag[i:], prev_is_same_entity=prev_is_same_entity)\n\n if isinstance(tags[0], str):\n new_tags = new_tags[0]\n return new_tags", "def convert_cn_sentence(self, sentence, reverse=False):\n cutted_sentence = list(jieba.cut(sentence))\n words_seq = []\n for word in cutted_sentence:\n if word in self.word_set:\n words_seq.append(word)\n else:\n for ch in word:\n if ch in self.word_set:\n words_seq.append(ch)\n elif is_chinese(ch):\n words_seq.append(self.UNK)\n else:\n words_seq.append(self.PUNC)\n index_seq = []\n for word in words_seq:\n index_seq.append(self.word2index[word])\n # input sentence may need to reverse\n if reverse:\n words_seq.reverse()\n index_seq.reverse()\n return words_seq, index_seq", "def to_conll_format(self):\n self._change_coref_values()\n lines = []\n header = '#begin document ({}); part {}\\n'.format(self.file_id, self.part)\n lines.append(header)\n for i in range(len(self.sents)):\n sent = self.sents[i]\n for row_dict in sent:\n row_vals = [self.file_id,\n str(int(self.part)),\n row_dict['word_num'],\n row_dict['word'],\n row_dict['pos'],\n row_dict['parse'],\n row_dict['lemma'],\n '-',\n row_dict['sense'],\n '-',\n row_dict['ne'],\n '-',\n row_dict['coref']\n ]\n line = '\\t'.join(row_vals) + '\\n'\n lines.append(line)\n lines.append('\\n')\n lines.append('#end document\\n')\n return ''.join(lines)", "def _convert_task_to_conversations(self, model: str):\n self._print_progress(\n f'Converting task data to conversations format for {model}'\n )\n config = self._get_task_conversion_config(model)\n\n with capture_output():\n parser = convert_task_setup_args()\n parser.set_params(**config)\n opt = parser.parse_args(args=[])\n convert_task_data(opt)", "def tag_sentence(self, sentence):\n fp_lapos = os.path.expanduser('~/cltk_data/multilingual/software/lapos')\n fp_model = os.path.expanduser('~/cltk_data/{0}/model/{1}_models_cltk/taggers/pos'.format(self.language, self.language)) # rel from Lapos dir\n try:\n lapos_command = 'cd {0} && echo \"{1}\" | ./lapos -t -m {2}'.format(fp_lapos, sentence, fp_model)\n p_out = subprocess.check_output(lapos_command,\n shell=True,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n except subprocess.CalledProcessError as cp_err:\n logger.error('Lapos call failed. Check installation.')\n logger.error(sentence)\n print(cp_err)\n raise\n\n # Parse output from Lapos\n # TODO: Make this cleaner/faster\n output_list = p_out.split('\\n')\n output_list_filtered = [l for l in output_list if not l.startswith('loading the models')]\n output_list_filtered = [l for l in output_list_filtered if not l == 'done']\n output_list_filtered = [l for l in output_list_filtered if l]\n\n for line in output_list_filtered:\n word_tags = line.split(' ')\n tagged_sentence = []\n for word_tag in word_tags:\n word, tag = word_tag.split('/')\n word_tag_tuple = (word, tag)\n tagged_sentence.append(word_tag_tuple)\n\n return tagged_sentence", "def preprocess_sentences(sentences, vocab):\n # Add sentence boundaries, canonicalize, and handle unknowns\n words = flatten([\"<s>\"] + s + [\"</s>\"] for s in sentences)\n words = [canonicalize_word(w, wordset=vocab.word_to_id)\n for w in words]\n return np.array(vocab.words_to_ids(words))", "def _assemble_conversion(stmt):\n reactants = [_assemble_agent_str(r) for r in stmt.obj_from]\n products = [_assemble_agent_str(r) for r in stmt.obj_to]\n sb = SentenceBuilder()\n if stmt.subj is not None:\n subj_str = _assemble_agent_str(stmt.subj)\n sb.append(subj_str)\n sb.append(' catalyzes the conversion of ')\n sb.append_as_list(reactants)\n sb.append(' into ')\n sb.append_as_list(products)\n else:\n sb.append_as_list(reactants)\n sb.append(' is converted into ')\n sb.append_as_list(products)\n sb.make_sentence()\n return sb", "def breakdown_to_sentences(self, sentance_size=80):\n sentence_representation = []\n for element in self.corpus:\n z = map(''.join, zip(*[iter(element)] * sentance_size))\n w_split = [i.split() for i in z]\n sentence_representation += w_split\n return sentence_representation", "def tags_from_conll(tags, scheme='bio'):\n def entity_span_from_conll(entity_span, scheme=scheme):\n if not entity_span:\n return entity_span\n # Logic are performed in order of precedence.\n if 'e' in scheme:\n entity_span[-1] = 'E' + entity_span[-1][1:]\n if 'b' in scheme:\n entity_span[0] = 'B' + entity_span[0][1:]\n if 's' in scheme and len(entity_span) == 1:\n entity_span[0] = 'S' + entity_span[0][1:]\n if 'i' in scheme:\n for i in range(1, len(entity_span) - 1):\n entity_span[i] = 'I' + entity_span[i][1:]\n return entity_span\n\n new_tags = tags[:]\n if not new_tags:\n return new_tags\n if isinstance(tags[0], str):\n new_tags = [new_tags]\n\n for k, sent_tag in enumerate(new_tags):\n i = 0\n for j, tag in enumerate(sent_tag):\n flag = False\n if tag[0] in 'BO': # 'O' and 'B' indicates the end of previous sequence\n flag = True\n # If two tags are different, 'I' is also an indicator of separation\n elif tag[0] == 'I' and j and sent_tag[j - 1][1:] != tag[1:]:\n flag = True\n if flag:\n sent_tag[i:j] = entity_span_from_conll(sent_tag[i:j], scheme=scheme)\n i = j + (tag[0] == 'O') # If tag is not 'O', we should include it in following sequence\n continue\n sent_tag[i:] = entity_span_from_conll(sent_tag[i:], scheme=scheme)\n\n if isinstance(tags[0], str):\n new_tags = new_tags[0]\n return new_tags", "def preprocess(in_sentence, language):\r\n # TODO: Implement Function\r\n # if language is english\r\n start = \"SENTSTART \"\r\n end = \" SENTEND\"\r\n out_sentence = in_sentence.strip().lower()\r\n \r\n if language == \"e\":\r\n out_sentence = re.sub(r'([,:;()\\-+<>=.?!*/\"])',r' \\1 ',out_sentence)\r\n\r\n \r\n if language == \"f\":\r\n out_sentence = re.sub(r'([,:;()\\-+<>=.?!*/\"])',r' \\1 ',out_sentence)\r\n\r\n #for l', I think this we do not have to do this step since next step covers this\r\n out_sentence = re.sub(r'(\\b)(l\\')(\\w+)',r'\\1\\2 \\3',out_sentence)\r\n #for consonant assume y is not a consonant\r\n out_sentence = re.sub(r'(\\b)([aeiouqwrtypsdfghjklzxcvbnm]\\')(\\w+)',r'\\1\\2 \\3',out_sentence)\r\n #for que\r\n out_sentence = re.sub(r'(\\b)(qu\\')(\\w+)',r'\\1\\2 \\3',out_sentence)\r\n #for on and il\r\n out_sentence = re.sub(r'(\\w+)(\\')(on|il)(\\b)',r'\\1\\2 \\3\\4',out_sentence)\r\n #for d’abord, d’accord, d’ailleurs, d’habitude special cases\r\n out_sentence = re.sub(r'(d\\') (abord|accord|ailleurs|habitude)(\\b)',r'\\1\\2\\3',out_sentence)\r\n \r\n out_sentence = start + out_sentence + end\r\n out_sentence = re.sub(r' {2,}',r' ',out_sentence) \r\n return out_sentence", "def convert(self, token_tml):\n sents = []\n cur_sent = []\n last_sent = -1\n for line in open(token_tml):\n line = line.strip()\n if not line:\n continue\n fn, sent_id, tok_id, \\\n surface_form, tmlTag, tmlTagId, tmlTagLoc = [eval(v) for v in line.split('|||')]\n cur_ent = [tok_id,\n surface_form,\n self.consolidate_fact_value(fn, sent_id, tmlTagId) \\\n if (tmlTag == 'EVENT')\\\n else \"_\"]\n\n if sent_id != last_sent:\n if cur_sent:\n toks = nlp(unicode(\" \".join([word[1] for word in cur_sent])))\n dep_feats = self.get_dep_feats(toks, cur_sent)\n sents.append([fb_feat + dep_feat\n for (fb_feat, dep_feat) in zip(cur_sent, dep_feats)])\n cur_sent = [cur_ent]\n else:\n cur_sent.append(cur_ent)\n last_sent = sent_id\n\n return '\\n\\n'.join(['\\n'.join(['\\t'.join(map(str, word))\n for word in sent])\n for sent in sents\n if len(sent) > self.sentence_threshold]) + \"\\n\\n\" # filter short sentences", "def tokenize(self, document):\n raw_text = '\\n'.join(document.text) if isinstance(document.text, list) else document.text\n conllu_output_string = ''\n tokenizer = self.nlp.generate_tokenizer(self.lang)\n for par_id, text in enumerate(raw_text.split('\\n')):\n conllu_output_string += self._reldi_tokenizer(self.nlp.process[self.type](tokenizer, text, self.lang), par_id + 1)\n\n document.conll_file = conll.CoNLLFile(input_str=conllu_output_string)", "def format_output(doc):\n sentences = []\n for sent in doc.sents:\n verbs = [w.text for w in sent if w.pos_ == 'VERB']\n sentences.append(ujson.dumps(verbs))\n return tuple(sentences)", "def get_sentences_from_unparsed_text(doc, save_in_dir):\n\n # Delete previous parsing results (if existing)\n if os.path.exists(config.PARSE_RESULTS_PATH+'/coref.conll'):\n os.remove(config.PARSE_RESULTS_PATH+'/coref.conll')\n if os.path.exists(config.PARSE_RESULTS_PATH+'/coref.html'):\n os.remove(config.PARSE_RESULTS_PATH+'/coref.html')\n \n input_type = 'cat'\n CorZu_type = 'CorZu'\n \n # Command line string\n # Parse document and store results in /CorZu_results \n cmd = \"%(type)s %(filename)s | \" \\\n \"python %(parse_path)s/ParZu_NEW/parzu -q -o conll > \"\\\n \"%(parse_res_path)s/parsed.conll && \"\\\n \"python %(parse_path)s/%(corzu)s/extract_mables_from_conll.py \"\\\n \"%(parse_res_path)s/parsed.conll > \"\\\n \"%(parse_res_path)s/markables.txt && \"\\\n \"python %(parse_path)s/%(corzu)s/corzu.py \"\\\n \"%(parse_res_path)s/markables.txt \"\\\n \"%(parse_res_path)s/parsed.conll > \"\\\n \"%(parse_res_path)s/coref.conll \"\\\n \"&& python %(parse_path)s/%(corzu)s/conll_to_html.py \"\\\n \"%(parse_res_path)s/coref.conll > \"\\\n \"%(parse_res_path)s/coref.html\" % {'corzu':CorZu_type, 'type':input_type, 'filename': doc,'parse_path':config.PARSER_PATH, 'parse_res_path':config.PARSE_RESULTS_PATH}\n \n\n # Execute\n process = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)\n stdout, stderr = process.communicate()\n \n # Catch parsing errors from ParZu or CorZu by checking for output files\n if not os.path.isfile(config.PARSE_RESULTS_PATH+'/parsed.conll'):\n raise IOError('Sorry, CorZu failed. Coref.conll file does not exist. Try another document.')\n else:\n with open(config.PARSE_RESULTS_PATH+'/parsed.conll', \"r\") as infile:\n infile = infile.read()\n if len(infile)<1:\n raise IOError('Sorry, ParZu failed. No parsing results.')\n \n if not os.path.isfile(config.PARSE_RESULTS_PATH+'/coref.conll'):\n raise IOError('Sorry, CorZu failed. Coref.conll file does not exist. Try another document.')\n \n \n # Open the parsed result file, split at sentence boarders and get single sentences\n with open(config.PARSE_RESULTS_PATH+'/coref.conll', \"r\") as infile:\n infile = infile.read()\n sentences = infile.split('\\n\\n')[:-1]\n \n # If filename for saving is given, save parsing results\n if save_in_dir:\n shutil.copy2(config.PARSE_RESULTS_PATH+'/coref.conll', save_in_dir)\n \n return sentences", "def _assemble_conversion(stmt):\n reactants = _join_list([_assemble_agent_str(r) for r in stmt.obj_from])\n products = _join_list([_assemble_agent_str(r) for r in stmt.obj_to])\n\n if stmt.subj is not None:\n subj_str = _assemble_agent_str(stmt.subj)\n stmt_str = '%s catalyzes the conversion of %s into %s' % \\\n (subj_str, reactants, products)\n else:\n stmt_str = '%s is converted into %s' % (reactants, products)\n return _make_sentence(stmt_str)", "def split_into_sentences(text):\n if \".)\" in text: text = text.replace(\".)\", \"<prd>)\")\n sentences = text.split(\".\")\n text = text.replace(\"<prd>\", \".\")\n for s in sentences:\n s = s.replace(\"<prd>\", \".\")\n return sentences", "def make_sentence(self):\n self.sentence = _make_sentence(self.sentence)", "def find_conclusion_sentences(self):\n for sentence in self.knowledge:\n new_mines=sentence.known_mines()\n new_safes=sentence.known_safes()\n if len(new_mines)>0:\n for mine in new_mines:\n self.mark_mine(mine)\n elif len(new_safes)>0:\n for safe in new_safes:\n self.mark_safe(safe)\n else:\n continue #skips next lines and goes to next sentence\n # if known_mines or safes is successful, all cells are marked mine or safe\n # then \"concluded\" sentence can be removed from knowledge base\n self.knowledge.remove(sentence) # only runs when if or elif is true because of \"continue\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a listener function for the given target.
def listen( target: Any, identifier: str, fn: Callable[..., Any], *args: Any, **kw: Any ) -> None: _event_key(target, identifier, fn).listen(*args, **kw)
[ "def listens_for(\n target: Any, identifier: str, *args: Any, **kw: Any\n) -> Callable[[Callable[..., Any]], Callable[..., Any]]:\n\n def decorate(fn: Callable[..., Any]) -> Callable[..., Any]:\n listen(target, identifier, fn, *args, **kw)\n return fn\n\n return decorate", "def listen(self, target):\n LOG.debug(\"Listen to %s\", target)\n listener = ProtonListener(self)\n self._ctrl.add_task(ListenTask(target, listener))\n return listener", "def add_listener(self, listener):\n self.__listeners.append(listener)", "def add_event_listener(self, name, func=None):\n\n def decorator(func):\n self._listeners.setdefault(name, []).append(func)\n return func\n\n if func is not None:\n decorator(func)\n return None\n else:\n return decorator", "def listen(self, executor, target, callback):\n executor.listen(target, callback)\n self._cancel_cb = lambda: executor.stop_listening(target, callback)", "def register_listener(self, *listeners):\n for listener in listeners:\n for event_type in listener.event_callbacks:\n self.__dispatchers[event_type].register(listener)", "def registerProxyListener(self, listener):\n # type: (IProxyListener) -> ()", "def register_operation(self, op_name, target_fn):\n self._operations[op_name] = target_fn", "def on(self, event, listener):\n self.__events[event].append(listener)\n self.emit('newListener', event, listener)", "def on(self, event, f=None):\n\n def _on(f):\n # Fire 'new_listener' *before* adding the new listener!\n self.emit('new_listener', event, f)\n\n # Add the necessary function\n evts = event.split(\" \")\n for evt in evts:\n self._events[evt].append(f)\n\n # Return original function so removal works\n return f\n\n if f is None:\n return _on\n else:\n return _on(f)", "def register(kind, listener):\n assert isinstance(listener, Listener)\n kind = _guard_kind(kind)\n _registered[kind].append(listener)", "def set_listener(self, listener):\n self.__listener = listener", "def register(target: Target) -> Target:\n from . import cli\n\n global ALL_TARGETS\n\n ALL_TARGETS += (target,)\n cli.TARGETS_TYPE.choices += (target.__name__,) # type: ignore\n return target", "def attach_listener(self, route, callback):\n pass", "def add_listener(self, cb, event):\n self.event_listeners[event].append(cb)", "def add_event_listener(\n self,\n name: Optional[str],\n fn: Callable[[str, Mapping[str, Any]], Awaitable[Any]]\n ) -> Callable[[], None]:\n self.event_listeners[name].append(fn)\n\n return lambda: self.event_listeners[name].remove(fn)", "def registerHttpListener(self, listener):\n # type: (IHttpListener) -> None", "def register_listener(self):\n self._clear_node_listener = self._node.add_on_changed_listener(\n self._on_node_updated\n )", "def on(self, event, listener, calls=0):\r\n new_listener = Listener(0, listener, calls)\r\n if event in self.__events:\r\n self.__events[event].append(new_listener)\r\n else:\r\n self.__events[event] = [new_listener]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorate a function as a listener for the given target + identifier.
def listens_for( target: Any, identifier: str, *args: Any, **kw: Any ) -> Callable[[Callable[..., Any]], Callable[..., Any]]: def decorate(fn: Callable[..., Any]) -> Callable[..., Any]: listen(target, identifier, fn, *args, **kw) return fn return decorate
[ "def listen(\n target: Any, identifier: str, fn: Callable[..., Any], *args: Any, **kw: Any\n) -> None:\n\n _event_key(target, identifier, fn).listen(*args, **kw)", "def listen(name, highlander=False, singleton=False):\n def decorator(func):\n\n listeners = _events.get(name, [])\n if highlander:\n listeners = (func,)\n else:\n assert not isinstance(listeners, tuple), \\\n 'A listener has already claimed this event {}'.format(name)\n listeners.append(func)\n _events[name] = listeners\n\n wrapper, wrapped = undecorate(func)\n\n @functools.wraps(wrapped)\n def wrap(*args, **kwargs):\n result = wrapped(*args, **kwargs)\n if singleton:\n _events[name].remove(func)\n return result\n\n return wrapper(wrap)\n return decorator", "def add_event_listener(self, name, func=None):\n\n def decorator(func):\n self._listeners.setdefault(name, []).append(func)\n return func\n\n if func is not None:\n decorator(func)\n return None\n else:\n return decorator", "def on(self, event, f=None):\n\n def _on(f):\n # Fire 'new_listener' *before* adding the new listener!\n self.emit('new_listener', event, f)\n\n # Add the necessary function\n evts = event.split(\" \")\n for evt in evts:\n self._events[evt].append(f)\n\n # Return original function so removal works\n return f\n\n if f is None:\n return _on\n else:\n return _on(f)", "def addHandler(identifier, handler): #@NoSelf", "def on(obj, event=None):\n def wrap(funk):\n obj.on(event or funk.__name__, funk)\n return funk\n return wrap", "def add_event_handler(self, func, mask=(IN_ATTRIB | IN_CREATE)):\n self.handle_hook.append((func, mask))", "def build_decorator(cls, what):\n def _decorator(self, func):\n \"\"\"\n Actual hook decorator\n \"\"\"\n HookRegistry().register(self._when, what, func) # pylint: disable=protected-access\n return func\n _decorator.__name__ = _decorator.fn_name = what\n setattr(cls, what, _decorator)", "def _role_listener_deco(\n *, priority: int = 0\n) -> Callable[[Callable[..., T]], Callable[..., T]]:\n\n def decorator(func: Callable[..., T]) -> Callable[..., T]:\n func._listener_priority = priority # type: ignore[attr-defined]\n return func\n\n return decorator", "def attach(object, name):\n def decorator(func):\n setattr(object, name, func)\n return func\n return decorator", "def extends(id):\n def wrapper(fn):\n fn._extension_point = id\n return fn\n return wrapper", "def listener(self, *packet_types, **kwds):\n def listener_decorator(handler_func):\n self.register_packet_listener(handler_func, *packet_types, **kwds)\n return handler_func\n\n return listener_decorator", "def register_trigger(self, event_name):\r\n\r\n def decorator(f):\r\n self.add_trigger(event_name, f)\r\n return f\r\n\r\n return decorator", "def create_handle_decorator(registry, filter=Always()):\n assert isinstance(filter, CLIFilter)\n\n def handle(*keys, **kw):\n save_before = kw.pop('save_before', lambda e: True)\n\n # Chain the given filter to the filter of this specific binding.\n if 'filter' in kw:\n kw['filter'] = kw['filter'] & filter\n else:\n kw['filter'] = filter\n\n def decorator(handler_func):\n @registry.add_binding(*keys, **kw)\n @wraps(handler_func)\n def wrapper(event):\n if save_before(event):\n event.cli.current_buffer.save_to_undo_stack()\n handler_func(event)\n return handler_func\n return decorator\n return handle", "def add_event_listener(\n self,\n name: Optional[str],\n fn: Callable[[str, Mapping[str, Any]], Awaitable[Any]]\n ) -> Callable[[], None]:\n self.event_listeners[name].append(fn)\n\n return lambda: self.event_listeners[name].remove(fn)", "def set_hook(f: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n @wraps(f)\n def set_hook_wrapper(self, **kwargs):\n f(self, **kwargs)\n self.attribution_model.is_hooked = True\n\n return set_hook_wrapper", "def delegate(attribute_name, method_names):\n # hack for python 2.7 as nonlocal is not available\n d = {\n 'attribute': attribute_name,\n 'methods': method_names\n }\n\n def decorator(cls):\n attribute = d['attribute']\n if attribute.startswith(\"__\"):\n attribute = \"_\" + cls.__name__ + attribute\n for name in d['methods']:\n setattr(cls, name, eval(\"lambda self, *a, **kw: \"\n \"self.{0}.{1}(*a, **kw)\".format(attribute, name)))\n return cls\n return decorator", "def endpoint_id_arg(*args, **kwargs):\n\n def decorate(f, **kwargs):\n \"\"\"\n Work of actually decorating a function -- wrapped in here because we\n want to dispatch depending on how this is invoked\n \"\"\"\n metavar = kwargs.get(\"metavar\", \"ENDPOINT_ID\")\n f = click.argument(\"endpoint_id\", metavar=metavar, type=click.UUID)(f)\n return f\n\n return detect_and_decorate(decorate, args, kwargs)", "def decorator(ctx, target_file_name: list):\n cli_factory: CliFactory = ctx.obj['factory']\n if len(target_file_name) == 0:\n target_file_name = [\"__init__\"]\n # set initial state\n append_functions = 0\n append_content = []\n\n # append target functions\n for file_name in target_file_name:\n _export_decorator: azfs.az_file_client.ExportDecorator = cli_factory.load_export_decorator(file_name)\n newly_added, tmp_append_content = _load_functions(_export_decorator)\n append_functions += newly_added\n append_content.extend(tmp_append_content)\n\n # read `az_file_client.py`\n az_file_client_content = _read_az_file_client_content()\n\n # append newly added content\n az_file_client_content.extend(append_content)\n\n # over-write `az_file_client.py`\n _write_az_file_client_content(az_file_client_content)\n click.echo(f\"{append_functions} functions are successfully added.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the given option. Might raise a ValueError.
def _set_option(msat_config, name, value): check = mathsat.msat_set_option(msat_config, name, value) if check != 0: raise PysmtValueError("Error setting the option '%s=%s'" % (name,value))
[ "def set(self, *args, **kargs):\n self.set_option(*args, **kargs)", "def set_option(self, key, value):\n self.options.set(key, value)", "def set_server_option(self, option): # real signature unknown; restored from __doc__\n pass", "def setOption(name, value):\n \n if _fwk is not None:\n logging.warning(\"psana.setOption() called after DataSource(), has no effect\")\n \n _options[name] = str(value)", "def set_option(self, option, value):\n var_name = option['value'].replace('_values', '')\n setattr(self, var_name, value)\n # Logs.debug(\"set option \",self.get_option(option))", "def set(self, section, option, value=None):\r\n self._validate_value_types(option=option, value=value)\r\n super(ConfigParser, self).set(section, option, value)", "def __set_opt(option):\n return \"--\" + option", "def setsockopt(self, option, value):\n raise NotImplementedError", "def set_option(self, section, option, value, write=False):\n if not self.has_section(section):\n self.add_section(section)\n if isinstance(value, basestring):\n value = to_unicode(value)\n if value.startswith(' ') or value.endswith(' '):\n value = \"%(ws)s%(value)s%(ws)s\" % {\"value\" : value,\n \"ws\" : self.mrk_ws}\n RawConfigParser.set(self, section, str(option), value)\n if write:\n self.write()", "def set_option(self, varname, value):\n option = self.get_option_by_varname(varname)\n option.set_value(value)", "def set(self, section, option, value):\r\n if value.__class__ != str:\r\n value = str(value) # this will prevent floating point inaccuracies from being saved to file\r\n else:\r\n value = repr(value)\r\n if not self.has_section(section):\r\n raise ConfigParser.NoSectionError(section)\r\n if not self.has_option(section, option):\r\n raise ConfigParser.NoOptionError(option, section)\r\n ConfigParser.RawConfigParser.set(self, section, option, value)", "def set_string_option(self, option, value, default, fmt):\n\n if value is None:\n self._options[option] = (value, default)\n elif type(value) is types.StringType:\n self._options[option] = (value, fmt % value)\n else:\n Errors.OptionError('%s=%s' % (option, value,))", "def set_option(a_option_name, a_value):\n if a_value is None or str(a_value) == \"\":\n execute(\"let &\" + a_option_name + \" = \\\"\\\"\")\n else:\n execute(\"let &\" + a_option_name + \" = \\\"\" + str(a_value) + \"\\\"\")", "def set_option_provider(self, option_provider):\n self.__option_provider = option_provider", "def test_set_option(self, debug_session, tdevice):\n debug_session.connect()\n\n debug_session.set_option(tdevice[\"option\"], True)", "def _do_option(self, line):\n if line.startswith('option verbosity'):\n self._verbosity = int(line[len('option verbosity '):])\n self._write('ok')\n else:\n self._write('unsupported')", "def set_option(self, key: str, value: aiowamp.WAMPType) -> None:\n if self.options is None:\n self.options = {key: value}\n return\n\n self.options[key] = value", "def setoption(self, name, value):\n\n if name.upper() == \"STAGECERTIFICATEFILE\":\n if not(self._validate_certificatefile(\"STAGECERTIFICATEFILE\", value)):\n return True # value found, but not set\n else:\n # reset all key/cert data, it might change now\n self.privatekey = None\n self.fingerprint = None\n self.certificate = None\n self.publickeyxml = None\n\n if name.upper() == \"TIMEOUT\" and (not isint(value) or int(value) < 1 or int(value) > 100):\n print_error(\"TIMEOUT should be 1 <= TIMEOUT <= 100\")\n return True # value found, but not set\n if name.upper() == \"RETRIES\" and (not isint(value) or int(value) < 0 or int(value) > 100):\n print_error(\"RETRIES should be 0 <= RETRIES <= 100\")\n return True # value found, but not set\n\n return ModuleBase.setoption(self, name, value)", "def setOption(self, key, value):\n if self.readyMoves:\n log.warning(\n \"Options set after 'readyok' are not sent to the engine\",\n extra={\"task\": self.defname},\n )\n if key == \"cores\":\n self.optionQueue.append(\"cores %s\" % value)\n elif key == \"memory\":\n self.optionQueue.append(\"memory %s\" % value)\n elif key.lower() == \"ponder\":\n self.__setPonder(value == 1)\n else:\n self.optionQueue.append(\"option %s=%s\" % (key, value))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
After a call to solve() yielding UNSAT, returns the unsat core as a set of formulae
def get_unsat_core(self): self._check_unsat_core_config() if self.options.unsat_cores_mode == "all": terms = mathsat.msat_get_unsat_core(self.msat_env()) if terms is None: raise InternalSolverError( mathsat.msat_last_error_message(self.msat_env())) return set(self.converter.back(t) for t in terms) else: return self.get_named_unsat_core().values()
[ "def stickel_method(U: Set[Equation], ac_symbol: Function) -> SubstituteTerm:\n # Gather all variables for fresh var calculation\n ALL_VARS = vars_from_equations(U)\n original_from_generalized : Dict[Variable, Term] = dict()\n\n def generalize_term(t: Term) -> Variable:\n \"\"\"\n Returns a generalized variable for every\n term that's not a variable.\n \"\"\"\n vt = t\n if isinstance(t, Variable):\n original_from_generalized[t] = t\n else:\n vt = None\n for gen_var, og_term in original_from_generalized.items():\n if t == og_term:\n vt = gen_var\n break\n if vt is None:\n vt = fresh_variable(ALL_VARS)\n ALL_VARS.add(vt)\n original_from_generalized[vt] = t\n return vt\n\n var_count = Counter()\n # Go through each equation\n for e in U:\n LS, RS = flatten_equation(e, ac_symbol)\n # print(\"LS\", LS)\n # print(\"RS\", RS)\n\n # Generalize left and right sides\n LS_VARS = [generalize_term(t) for t in LS]\n RS_VARS = [generalize_term(t) for t in RS]\n\n # Calculate multiplicity\n VARS_IN_EQ = set(LS_VARS).union(set(RS_VARS))\n for x in VARS_IN_EQ:\n num = LS_VARS.count(x) - RS_VARS.count(x)\n var_count[x] += num\n\n # Create the equation with variable coeficients\n # being the counts above\n sympy_expression = 0\n var_map: Dict[sympy.core.Symbol, Variable] = dict()\n for x, count in var_count.items():\n # Construct Z3 variable\n sympy_var = symbols(x.symbol + \"_0\", integer=True, positive=True)\n var_map[sympy_var] = x\n\n # Construct part of expression\n sympy_expression += count * sympy_var\n\n\n # Determine the ordering of the diophantine solver output\n sympy_ordering = list(sympy_expression.expand(force=True).free_symbols)\n sympy_ordering.sort(key=default_sort_key)\n\n # Solve diophantine equation\n # print(original_from_generalized)\n # print(sympy_expression)\n basis_vector = diop_linear(sympy_expression)\n basis_tables = generate_basis_table(basis_vector)\n\n sigma = False\n while not sigma:\n # Generate the basis table\n basis_table = next(basis_tables)\n # print(basis_table)\n\n # Create variables representing each row\n row_vars = n_fresh_variables(ALL_VARS, len(basis_table))\n ALL_VARS = ALL_VARS.union(set(row_vars))\n\n # Craft intermediate substitution from basis table\n sub_basis: Dict[Variable, Term] = dict()\n for column, sympy_var in enumerate(sympy_ordering):\n term = None\n for i, row in enumerate(basis_table):\n if row[column] == 0:\n continue\n row_var = row_vars[i]\n for _ in range(row[column]):\n if term is None:\n term = row_var\n else: # z_2 + z_4\n term = ac_symbol(term, row_var)\n sub_basis[var_map[sympy_var]] = term\n\n # [TODO] [IN PROGRESS] Unify variables in the generalized terms with\n # their counterparts in the original terms.\n # print(sub_basis)\n new_eqs = set()\n for gen_var, basis_var in sub_basis.items():\n rhs = original_from_generalized[gen_var]\n new_eqs.add(Equation(\n basis_var,\n rhs\n ))\n sigma = syntactic_unification(new_eqs)\n\n\n # Currently returning one posisble unifier but we can keep generating\n # using the basis vector\n return {sigma}", "def solve(self):\r\n\r\n # A pre-allocation for the matrix used to solve the system\r\n matrix = []\r\n\r\n # Each unknown must be put into a list so sympy can solve it\r\n unknowns_list = list(self.dict_of_variables.keys())\r\n\r\n # Each equation (except for the 'Total') will be appended to the matrix. This is done to allow for the user\r\n # or the code (when this feature is added) to easily double check the variables for accuracy\r\n for key, equation in self.equations_dict.items():\r\n if key != 'Total':\r\n matrix.append(equation)\r\n\r\n # sympy does it's thing and returns a dict in the form of {symbol: solution}\r\n solutions = sp.solve(matrix, unknowns_list, dict=True)\r\n\r\n # This loop updates the dict_of_variables with the newly solved values for each\r\n for solutions_set in solutions:\r\n\r\n # This is done because the solutions are given in a list containing a dictionary: [{}], which is weird\r\n for count in range(len(solutions_set)):\r\n\r\n # The newly solved variables can be used to solve other ControlVolumes\r\n self.dict_of_variables[unknowns_list[count]] = solutions_set[unknowns_list[count]]", "def calc_a_sat(self):\n\n # get the flux data at 100kW\n flux_data = extract_mcnp('n', self.experiment.P)\n\n # sum to only energy dependent (exclude the first cos group)\n flux = np.sum(flux_data[:, 1:, 1:, 0], axis=(0, 1))\n\n # get response functions\n responses = response_data()\n\n # this pulls only the rfs for the gold foil tube\n response_functions = []\n for name, response in responses.items():\n if 'au' in name:\n response_functions.append(response.int)\n response_functions = np.array(response_functions)\n\n # fold the rfs and the flux together, convert to uCi / atom\n a_sat_atom = np.sum(response_functions * flux, axis=1) * (1 / 3.7E4)\n\n # only care about the ones that match the experiment\n self.a_sat_atom = a_sat_atom[:self.experiment.n]\n\n return", "def components(self):\n if self.connective == None:\n ret = (self, None)\n if self.connective == \"not\":\n if self.subformula1.subformula2 == None:\n # literal\n ret = (self, None)\n else:\n (comp1, comp2) = self.subformula1.components()\n ret = (comp1.negate(), comp2.negate())\n else:\n if self.connective == \"and\":\n ret = (self.subformula1, self.subformula2)\n elif self.connective == \"or\":\n ret = (self.subformula1, self.subformula2)\n elif self.connective == \"impl\":\n ret = (self.subformula1.negate(), self.subformula2)\n elif self.connective == \"implr\":\n ret = (self.subformula1, self.subformula2.negate())\n elif self.connective == \"nand\":\n ret = (self.subformula1.negate(), self.subformula2.negate())\n elif self.connective == \"nor\":\n ret = (self.subformula1.negate(), self.subformula2.negate())\n elif self.connective == \"nimpl\":\n ret = (self.subformula1, self.subformula2.negate())\n elif self.connective == \"nimplr\":\n ret = (self.subformula1.negate(), self.subformula2)\n\n return copy.deepcopy(ret)", "def all_equations(self):\n return self._r_all_equations(self)", "def main(u):\n print '=== Testing exact solution: %s ===' % u(t)\n print \"Initial conditions u(0)=%s, u'(0)=%s:\" % \\\n (u(t).subs(t, 0), sym.diff(u(t), t).subs(t, 0))\n # Method of manufactured solution requires fitting f\n global f # source term in the ODE\n f = sym.simplify(ode_source_term(u))\n # Residual in discrete equations (should be 0)\n print 'residual step1:', residual_discrete_eq_step1(u)\n print 'residual:', residual_discrete_eq(u)", "def test_exam_lsolve1(self):\n \n x = symbol('x'); \n eq = (3*x+5 == numeric(8));\n return [str(item) for item in lsolve([eq], [x])];", "def unify(state: ProofState,\n equations: List[Tuple[Term, Term]],\n only_schematic_vars: bool = True) -> Optional[Tuple[Mapping[str, Term], bool]]:\n\n applied_notation = False\n\n def unify_modulo_notation(state: ProofState, left: Term, right: Term) -> Optional[List[Tuple[Term, Term]]]:\n \"\"\"\n Additional unification rules\n if the heads of left and right are different,\n try to apply notation axiom to one of them\n \"\"\"\n\n nonlocal applied_notation\n\n if not isinstance(left, Application) or \\\n not isinstance(right, Application):\n return None\n\n result = NotationProver.rewrite_to_same_head_symbol(state.composer, left, right, with_proof=False)\n if result is None:\n return None\n _, left, _, right = result\n\n assert left.symbol == right.symbol, f\"{left} != {right}\"\n applied_notation = True\n\n return [(left, right)]\n\n subst = Unification.unify(\n equations,\n variable_class=SchematicVariable if only_schematic_vars else Metavariable,\n # newer schematic variable are used as substitution variables\n # with higher priority than older schematic variables\n variable_order=(lambda v1, v2: v1 > v2) if only_schematic_vars else (lambda v1, v2: True),\n # add extra unification algorithm for notations\n additional_unifier=lambda t1, t2: unify_modulo_notation(state, t1, t2),\n )\n\n if subst is not None:\n if not Tactic.check_schematic_substitution(state, subst):\n return None\n return subst, applied_notation\n\n return None", "def get_named_unsat_core(self):\n self._check_unsat_core_config()\n if self.options.unsat_cores_mode == \"named\":\n\n assumptions = mathsat.msat_get_unsat_assumptions(self.msat_env())\n pysmt_assumptions = set(self.converter.back(t) for t in assumptions)\n\n res = {}\n n_ass_map = self._named_assertions_map()\n cnt = 0\n for key in pysmt_assumptions:\n if key in n_ass_map:\n (name, formula) = n_ass_map[key]\n if name is None:\n name = \"_a_%d\" % cnt\n cnt += 1\n res[name] = formula\n return res\n\n else:\n return dict((\"_a%d\" % i, f)\n for i,f in enumerate(self.get_unsat_core()))", "def satdemo(trace=None):\n\n print()\n print((\"*\" * mult))\n print(\"Satisfiers Demo\")\n print((\"*\" * mult))\n\n folmodel()\n\n formulas = [\n \"(boy x)\",\n \"(x = x)\",\n \"((boy x) or (girl x))\",\n \"((boy x) and (girl x))\",\n \"(love x adam)\",\n \"(love adam x)\",\n \"(not (x = adam))\",\n \"some z22. (love z22 x)\",\n \"some y. (love x y)\",\n \"all y. ((girl y) implies (love y x))\",\n \"all y. ((girl y) implies (love x y))\",\n \"all y. ((girl y) implies ((boy x) and (love x y)))\",\n \"((boy x) and all y. ((girl y) implies (love y x)))\",\n \"((boy x) and all y. ((girl y) implies (love x y)))\",\n \"((boy x) and some y. ((girl y) and (love x y)))\",\n \"((girl x) implies (dog x))\",\n \"all y. ((dog y) implies (x = y))\",\n \"(not some y. (love x y))\",\n \"some y. ((love y adam) and (love x y))\",\n ]\n\n if trace:\n print(m2)\n\n for fmla in formulas:\n g2.purge()\n print(\n (\n \"The satisfiers of '%s' are: %s\"\n % (fmla, m2.satisfiers(fmla, \"x\", g2, trace))\n )\n )", "def get_equilibrium_T(self):\n return self.equations.get_equilibrium_T()", "def _calculate_inv_trans_equations(self):\n x1, x2, x3 = symbols(\"x1, x2, x3\", cls=Dummy, reals=True)\n x, y, z = symbols(\"x, y, z\", cls=Dummy)\n\n equations = self._transformation(x1, x2, x3)\n\n solved = solve([equations[0] - x,\n equations[1] - y,\n equations[2] - z], (x1, x2, x3), dict=True)[0]\n solved = solved[x1], solved[x2], solved[x3]\n self._transformation_from_parent_lambda = \\\n lambda x1, x2, x3: tuple(i.subs(list(zip((x, y, z), (x1, x2, x3)))) for i in solved)", "def compute_Manufactured_solution(Expression='(1/(sigma*sqrt(2*pi)))*exp(-((t-mu)**2)/(2*sigma**2))',\n sigma=0.5, mu=0.5, Domain='[-1.5,2.5]',\n solver='euler', Nsteps = 100, erase='No' \n ):\n \n from sympy import symbols, diff, integrate, Rational, lambdify, exp, sin, cos\n t = symbols('t')\n #### Turn strings into python expressions ####\n f = eval(Expression)\n [t0, tend] = eval(Domain)\n solver = eval(solver)\n #### Perform needed differentiations based on the differential equation ####\n dfdt = diff(f, t)\n d2fdt = diff(dfdt, t)\n d3fdt = diff(d2fdt, t)\n RHS = d3fdt + dfdt*d2fdt + f\n #### Create Python functions of f, RHS and needed differentiations of f ####\n f = lambdify([t], f)\n dfdt = lambdify([t], dfdt)\n d2fdt = lambdify([t], d2fdt)\n RHS = lambdify([t], RHS)\n \n #### Discretize time ####\n time = np.linspace(t0, tend, Nsteps + 1)\n \n def func(y, t):\n \"\"\" Function that returns the dfn/dt of the differential equation f + f''*f + f''' = RHS\n as a system of 1st order equation; f = f1 \n f1' = f2 \n f2' = f3\n f3' = RHS - f1 - f2*f3\n \n Args:\n y(array): solutian array [f1, f2, f3] at time t\n t(time): current time\n\n Returns:\n yout(array): differantiation array [f1', f2', f3'] at time t\n \"\"\"\n yout = np.zeros_like(y)\n yout[:] = [y[1], y[2], RHS(t) - y[0]- y[1]*y[2]]\n \n return yout\n \n #### Solve for fnumeric and evaluate fanalytic on the domain: ####\n y0 = np.array([f(t0), dfdt(t0), d2fdt(t0)]) #initial values calculated from the differentiations of f at t0\n u = solver(func, y0, time)\n fnumeric = u[:,0]\n fanalytic = np.zeros_like(fnumeric)\n i = 0\n for t in time:\n fanalytic[i] = f(t)\n i = i + 1\n \n if erase=='Yes':\n plt.figure()\n print \"hello\"\n \n #### Create plots and generate BytesIo plot data ####\n plt.figure()\n legends = []\n plt.plot(time, fanalytic,'g')\n \n \n plt.plot(time, fnumeric, 'r--')\n analyticlegend = 'fanalytic'#, sigma = ' + str(sigma) + ', mu = ' + str(mu)\n numericlegend = 'fnumeric'#, solver = ' + solvername + ', Nsteps = ' + NstepsStr\n legends.append(analyticlegend)\n legends.append(numericlegend)\n plt.title('Normal distribution')\n plt.xlabel('t')\n plt.ylabel('f(t)')\n plt.legend(legends,loc='best',frameon=False)\n \n # Make Matplotlib write to BytesIO file object and grab\n # return the object's string\n from io import BytesIO\n figfile = BytesIO()\n plt.savefig(figfile, format='png')\n figfile.seek(0) # rewind to beginning of file\n import base64\n figdata_png = base64.b64encode(figfile.getvalue())\n figfile = BytesIO()\n plt.savefig(figfile, format='svg')\n figfile.seek(0)\n figdata_svg = '<svg' + figfile.getvalue().split('<svg')[1]\n figdata_svg = unicode(figdata_svg,'utf-8')\n return figdata_png#, figdata_svg", "def solve_equations(self, period):\n ___SOLVE_EQUATIONS___", "def Resolve(splu,RHS):\n\t# array 2D -> array 1D\n\tf2 = RHS.ravel()\n\n\t# Solving the linear system\n\tx = lg.lsqr(splu.tocsc(),f2)\n\n\treturn x[0].reshape(RHS.shape)", "def _coefficients(self):\n\n self.first_eq = self.general_solution.subs(t, self.t0) - self.x0\n self.second_eq = self.general_solution.subs(t, self.t1) - self.x1\n\n self.__make_Cs()\n self.__make_equations()\n\n coefficients = solve(self.equations, self.Cs)\n self.coefficients = coefficients", "def return_quadratic_cost_function_expansion_variables(self):\n # returns a list of length len(Time)-1, each element with shape (1,1), where n is the number of states.\n l = list(\n map(\n lambda x,u: u.T * self.R * u * self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,1), where n is the number of states.\n lx = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,1)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,1), where n is the number of states.\n lu = list(\n map(\n lambda x,u: self.R * u * self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,n), where m is the number of inputs and n is the number of states.\n lux = list(\n map(\n lambda x,u: np.matrix(np.zeros((1,2)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,m), where n is the number of states and m is the number of inputs.\n lxu = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,1)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,m), where m is the number of inputs.\n luu = list(\n map(\n lambda x,u: self.R*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,n), where n is the number of states.\n lxx = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,2)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n return(l,lx,lu,lux,lxu,luu,lxx)", "def quadratic_solver(coefficients):\n complex_nums=[complex(i) for i in coefficients] #Make sure we can get all soln, not just reals\n a=complex_nums[0]\n b=complex_nums[1]\n c=complex_nums[2]\n\n lead = -b/a\n discriminant = np.sqrt(b**2-4*a*c)\n tail = discriminant/a\n\n x1 = lead+tail\n x2 = lead-tail\n return x1 #How do I return both x1 and x2", "def solve(equation):\r\n\r\n if not validator.is_valid(equation):\r\n raise Invalid(\"not valid\")\r\n #make haircut for the minuses\r\n equation = solver_helper.minuses_haircut(equation)\r\n #strip the expression from it's brackets if necessary\r\n # if an expression needs to be striped twice then it's\r\n # invalid equation\r\n if solver_helper.needs_to_be_bracket_striped(equation):\r\n equation = solver_helper.strip_outer_brackets(equation)\r\n if solver_helper.needs_to_be_bracket_striped(equation):\r\n raise Exception(\"unnecessary brackets on an expression: (\" +\r\n str(equation) + \")\")\r\n #make a list\r\n lst = solver_helper.make_a_list(equation)\r\n #(on the list)\r\n\r\n # while there are expressions, solve them\r\n # (expression is an equation in between brackets)\r\n\r\n i = finder.find_expression(lst)\r\n while i != -1:\r\n res = solve(lst[i])\r\n lst[i] = res\r\n i = finder.find_expression(lst)\r\n\r\n if solver_helper.list_is_valid(lst):\r\n pass\r\n #while len(lst) > 1 or lst[0] is not an expression\r\n #find the strongest operator and operate\r\n lst = clear_from_operators(lst)\r\n if solver_helper.list_is_valid(lst):\r\n pass\r\n if len(lst) > 1:\r\n raise Exception(\"an operator is missing between two expressions\")\r\n return lst[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
After a call to solve() yielding UNSAT, returns the unsat core as a dict of names to formulae
def get_named_unsat_core(self): self._check_unsat_core_config() if self.options.unsat_cores_mode == "named": assumptions = mathsat.msat_get_unsat_assumptions(self.msat_env()) pysmt_assumptions = set(self.converter.back(t) for t in assumptions) res = {} n_ass_map = self._named_assertions_map() cnt = 0 for key in pysmt_assumptions: if key in n_ass_map: (name, formula) = n_ass_map[key] if name is None: name = "_a_%d" % cnt cnt += 1 res[name] = formula return res else: return dict(("_a%d" % i, f) for i,f in enumerate(self.get_unsat_core()))
[ "def stickel_method(U: Set[Equation], ac_symbol: Function) -> SubstituteTerm:\n # Gather all variables for fresh var calculation\n ALL_VARS = vars_from_equations(U)\n original_from_generalized : Dict[Variable, Term] = dict()\n\n def generalize_term(t: Term) -> Variable:\n \"\"\"\n Returns a generalized variable for every\n term that's not a variable.\n \"\"\"\n vt = t\n if isinstance(t, Variable):\n original_from_generalized[t] = t\n else:\n vt = None\n for gen_var, og_term in original_from_generalized.items():\n if t == og_term:\n vt = gen_var\n break\n if vt is None:\n vt = fresh_variable(ALL_VARS)\n ALL_VARS.add(vt)\n original_from_generalized[vt] = t\n return vt\n\n var_count = Counter()\n # Go through each equation\n for e in U:\n LS, RS = flatten_equation(e, ac_symbol)\n # print(\"LS\", LS)\n # print(\"RS\", RS)\n\n # Generalize left and right sides\n LS_VARS = [generalize_term(t) for t in LS]\n RS_VARS = [generalize_term(t) for t in RS]\n\n # Calculate multiplicity\n VARS_IN_EQ = set(LS_VARS).union(set(RS_VARS))\n for x in VARS_IN_EQ:\n num = LS_VARS.count(x) - RS_VARS.count(x)\n var_count[x] += num\n\n # Create the equation with variable coeficients\n # being the counts above\n sympy_expression = 0\n var_map: Dict[sympy.core.Symbol, Variable] = dict()\n for x, count in var_count.items():\n # Construct Z3 variable\n sympy_var = symbols(x.symbol + \"_0\", integer=True, positive=True)\n var_map[sympy_var] = x\n\n # Construct part of expression\n sympy_expression += count * sympy_var\n\n\n # Determine the ordering of the diophantine solver output\n sympy_ordering = list(sympy_expression.expand(force=True).free_symbols)\n sympy_ordering.sort(key=default_sort_key)\n\n # Solve diophantine equation\n # print(original_from_generalized)\n # print(sympy_expression)\n basis_vector = diop_linear(sympy_expression)\n basis_tables = generate_basis_table(basis_vector)\n\n sigma = False\n while not sigma:\n # Generate the basis table\n basis_table = next(basis_tables)\n # print(basis_table)\n\n # Create variables representing each row\n row_vars = n_fresh_variables(ALL_VARS, len(basis_table))\n ALL_VARS = ALL_VARS.union(set(row_vars))\n\n # Craft intermediate substitution from basis table\n sub_basis: Dict[Variable, Term] = dict()\n for column, sympy_var in enumerate(sympy_ordering):\n term = None\n for i, row in enumerate(basis_table):\n if row[column] == 0:\n continue\n row_var = row_vars[i]\n for _ in range(row[column]):\n if term is None:\n term = row_var\n else: # z_2 + z_4\n term = ac_symbol(term, row_var)\n sub_basis[var_map[sympy_var]] = term\n\n # [TODO] [IN PROGRESS] Unify variables in the generalized terms with\n # their counterparts in the original terms.\n # print(sub_basis)\n new_eqs = set()\n for gen_var, basis_var in sub_basis.items():\n rhs = original_from_generalized[gen_var]\n new_eqs.add(Equation(\n basis_var,\n rhs\n ))\n sigma = syntactic_unification(new_eqs)\n\n\n # Currently returning one posisble unifier but we can keep generating\n # using the basis vector\n return {sigma}", "def get_unsat_core(self):\n self._check_unsat_core_config()\n if self.options.unsat_cores_mode == \"all\":\n\n terms = mathsat.msat_get_unsat_core(self.msat_env())\n if terms is None:\n raise InternalSolverError(\n mathsat.msat_last_error_message(self.msat_env()))\n return set(self.converter.back(t) for t in terms)\n else:\n return self.get_named_unsat_core().values()", "def solve(self):\r\n\r\n # A pre-allocation for the matrix used to solve the system\r\n matrix = []\r\n\r\n # Each unknown must be put into a list so sympy can solve it\r\n unknowns_list = list(self.dict_of_variables.keys())\r\n\r\n # Each equation (except for the 'Total') will be appended to the matrix. This is done to allow for the user\r\n # or the code (when this feature is added) to easily double check the variables for accuracy\r\n for key, equation in self.equations_dict.items():\r\n if key != 'Total':\r\n matrix.append(equation)\r\n\r\n # sympy does it's thing and returns a dict in the form of {symbol: solution}\r\n solutions = sp.solve(matrix, unknowns_list, dict=True)\r\n\r\n # This loop updates the dict_of_variables with the newly solved values for each\r\n for solutions_set in solutions:\r\n\r\n # This is done because the solutions are given in a list containing a dictionary: [{}], which is weird\r\n for count in range(len(solutions_set)):\r\n\r\n # The newly solved variables can be used to solve other ControlVolumes\r\n self.dict_of_variables[unknowns_list[count]] = solutions_set[unknowns_list[count]]", "def subs(self, substitution_dict):\r\n\r\n # This list is created to store equations that are no longer useful and remove them\r\n # This occurs when an equation (generally an info equation) is used in another ControlVolume which implies that\r\n # all variables in that equation have been solved and it cannot provide any new relationships to the system\r\n remove_equation_list = []\r\n\r\n # For each solved variable in substitution_dict\r\n for substitution, solution in substitution_dict.items():\r\n\r\n # The substitution must meet 3 characteristics: it must exist in the current ControlVolume as a variable,\r\n # the variable in the ControlVolume must be unknown (a sympy Symbol, not a value), and the substitution must\r\n # be solved (it itself has zero unknowns in the form of sympy Symbols)\r\n if substitution in self.dict_of_variables and type(self.dict_of_variables[substitution]) == sp.Symbol and \\\r\n len(solution.atoms(sp.Symbol)) < 1:\r\n\r\n # If this is true, then the ControlVolume can remove the variable from it's dict_of_variables as it has\r\n # already been solved and doesn't need to be solved again. The total unknowns decreases by one\r\n self.dict_of_variables.pop(substitution)\r\n self.unknowns -= 1\r\n\r\n # Each equation needs to substitute the unknown for it's solved solution using the sympy subs() method\r\n for key, equation in self.equations_dict.items():\r\n self.equations_dict[key] = equation.subs(substitution, solution)\r\n\r\n # This if statement checks if the equation has become irrelevant (nothing to solve, just 0)\r\n # If the equation lacks unknowns, it will be removed from the equations_dict for the ControlVolume\r\n if len(self.equations_dict[key].atoms(sp.Symbol)) == 0:\r\n remove_equation_list.append(key)\r\n\r\n # This loop removes every equation that is no longer useful\r\n for key in remove_equation_list:\r\n self.equations_dict.pop(key)\r\n\r\n # After a substitution is done, the degrees of freedom have likely changed, so the ControlVolume will update it\r\n self.degrees_of_freedom_update()", "def _solve(self):\n data = {}\n for n in xrange(NUM_SAMPLES):\n # accumulate and solve system of equations\n # component equations\n equations = reduce(list.__add__, (component.equations() for component in\n self.components), [])\n # one KCL equation per node in the circuit (excluding ground node)\n KCL = {}\n for component in self.components:\n component.KCL_update(KCL)\n equations.extend([KCL[node] for node in KCL if node is not self.gnd])\n # assert that ground voltage is 0\n equations.append([(1, self.gnd)])\n # solve system of equations\n data[n * T] = solve_equations(equations)\n # step components, providing them the solution for the current time step\n for component in self.components:\n component.step(data[n * T])\n return data", "def unify(state: ProofState,\n equations: List[Tuple[Term, Term]],\n only_schematic_vars: bool = True) -> Optional[Tuple[Mapping[str, Term], bool]]:\n\n applied_notation = False\n\n def unify_modulo_notation(state: ProofState, left: Term, right: Term) -> Optional[List[Tuple[Term, Term]]]:\n \"\"\"\n Additional unification rules\n if the heads of left and right are different,\n try to apply notation axiom to one of them\n \"\"\"\n\n nonlocal applied_notation\n\n if not isinstance(left, Application) or \\\n not isinstance(right, Application):\n return None\n\n result = NotationProver.rewrite_to_same_head_symbol(state.composer, left, right, with_proof=False)\n if result is None:\n return None\n _, left, _, right = result\n\n assert left.symbol == right.symbol, f\"{left} != {right}\"\n applied_notation = True\n\n return [(left, right)]\n\n subst = Unification.unify(\n equations,\n variable_class=SchematicVariable if only_schematic_vars else Metavariable,\n # newer schematic variable are used as substitution variables\n # with higher priority than older schematic variables\n variable_order=(lambda v1, v2: v1 > v2) if only_schematic_vars else (lambda v1, v2: True),\n # add extra unification algorithm for notations\n additional_unifier=lambda t1, t2: unify_modulo_notation(state, t1, t2),\n )\n\n if subst is not None:\n if not Tactic.check_schematic_substitution(state, subst):\n return None\n return subst, applied_notation\n\n return None", "def get_values():\n fcts = {\"arccos\": \"acos\",\n \"arcsin\": \"asin\",\n \"arctan\": \"atan\",\n \"conj\": \"conjugate\",\n \"abs\": \"Abs\",\n \"int\": \"integrate\",\n \"des\": \"apart\"\n }\n\n operators = {}\n\n constants = {\"i\": \"I\",\n \"j\": \"J\",\n \"inf\": \"oo\",\n \"ipi\": \"I*pi\",\n \"e\": \"E\"}\n\n advanced = {\"Laplace\": lambda __wild_sym__:\n laplace_transform(parse_expr(str(__wild_sym__)), parse_expr(\"t\"),\n parse_expr(\"s\"), noconds=True),\n \"Linv\": lambda __wild_sym__:\n inverse_laplace_transform(parse_expr(str(__wild_sym__)), parse_expr(\"s\"),\n parse_expr(\"t\"), noconds=True),\n \"step\": lambda __wild_sym__: Heaviside(__wild_sym__),\n \"dirac\": lambda __wild_sym__: DiracDelta(__wild_sym__),\n \"sym\": lambda __wild_sym__:\n Symbol(str(__wild_sym__)),\n }\n advanced[\"L\"] = advanced[\"Laplace\"]\n\n return fcts, operators, constants, advanced", "def get_solution(self):\n assert (\n self.state == self.STATE_SOLVED_OK\n ), \"solver failed, no solution available\"\n retval = {}\n if hasattr(self, \"X\") and isinstance(self.X, dict):\n for k in self.X:\n retval[k] = self.X[k].varValue\n return retval", "def get_solverinfo(self):\n sinfo = {\"state\": \"finished\"}\n if self._xml_root is not None:\n\n ############################################################\n # check solver status, number of SAT calls and program size\n ############################################################\n for message in self._xml_root.iterfind(\"message\"):\n tagtext = message.find(\"text\")\n if tagtext is not None:\n msgtxt = tagtext.text\n if msgtxt is None:\n continue;\n\n ####################\n # solver status\n ####################\n # when using mathsat, then SIGINT leads to false positives. Take into account\n # <text>SMT2 solver returned error message: \"Interrupted by signal: 2\"</text>\n if \"SMT2 solver returned error message\" in msgtxt:\n sinfo[\"state\"] = \"interrupted\"\n\n ######################\n # number of SAT calls\n ######################\n # expect: 4302 variables, 24772 clauses\n match = re.search(r\"(\\d+) variables, (\\d+) clauses\", msgtxt)\n if match:\n sinfo[\"numvariables\"] = int(match.group(1))\n sinfo[\"numclauses\"] = int(match.group(2))\n\n ######################\n # backend time\n ######################\n # expect: Runtime decision procedure: 0.461s\n match = re.search(r\"Runtime decision procedure: (\\d+\\.?\\d*)s\", msgtxt)\n if match:\n sinfo[\"runtime\"] = float(match.group(1))\n\n ######################\n # backend time\n ######################\n # expect: \"Running SMT2 QF_AUFBV using MathSAT\" or similar\n match = re.search(r\"Running (.*)\", msgtxt)\n if match:\n sinfo[\"backend\"] = match.group(1)\n\n #######################\n # size of program\n #######################\n # expect: size of program expression: 2554 steps\n match = re.search(r\"size of program expression: (\\d+) steps\", msgtxt);\n if match:\n sinfo[\"programsize\"] = int(match.group(1))\n\n # enough for now\n return sinfo", "def _decompose(poly, *symbols):\n result, indices, N = {}, {}, len(symbols)\n\n for i, sym in enumerate(symbols):\n indices[sym] = i\n\n poly = sympify(poly).expand()\n\n if poly.is_Add:\n terms = poly.args\n else:\n if poly.is_Number:\n return { (0,) * N : poly }\n else:\n terms = [ poly ]\n\n for term in terms:\n if not term.has_any_symbols(*symbols):\n coeff, monom = term, (0,) * N\n else:\n if term.is_Mul:\n factors = term.args\n else:\n factors = [ term ]\n\n coeff, monom = S.One, [0] * N\n\n for factor in factors:\n if factor.has_any_symbols(*symbols):\n if factor.is_Pow:\n if factor.exp.is_Integer:\n b, e = factor.base, factor.exp.p\n\n if b.is_Symbol and e > 0:\n monom[indices[b]] += e\n continue\n elif factor.is_Symbol:\n monom[indices[factor]] += 1\n continue\n\n raise PolynomialError(\"Can't decompose %s\" % factor)\n else:\n coeff *= factor\n\n monom = tuple(monom)\n\n if result.has_key(monom):\n coeff += result[monom]\n\n if not coeff:\n del result[monom]\n continue\n\n result[monom] = coeff\n\n if not result:\n return { (0,) * N : S.One }\n else:\n return result", "def _complementarities(self):\n tmp = {}\n for complementarity in self.__complementarities:\n expr = eval(\"self.solver.problem.\" + complementarity)\n tmp[complementarity] = self.solver.problem._lambdify_factory(expr.subs(self.solver.problem._subs))\n\n return tmp", "def sifts_best_unps(structure: Structure) -> Dict[str, str]:\n \n chain_to_best_unp = {}\n \n query = \"SELECT pdbid,uniprot_acc,mapping_pdb_chain FROM sifts_mappings_pdb_uniprot_best_isoforms where pdbid=%(pdbid)s ORDER BY mapping_pdb_chain\"\n with PDBMapSQLdb() as db:\n db.activate_dict_cursor()\n db.execute(query,{'pdbid': structure.id})\n for row in db.fetchall():\n chain_to_best_unp[row['mapping_pdb_chain']] = row['uniprot_acc']\n \n return chain_to_best_unp", "def get_variables(self) -> dict:\n tmp = self.formula.split()\n var = {}\n for t in tmp:\n if t not in ['!', '->', '(', ')']:\n var[t] = False\n return var", "async def inchi_to_formula(self, inchi):\n props = await self.get_props_from_inchi(inchi)\n if props:\n for prop in props:\n if prop['urn']['label'] == 'Molecular Formula':\n return prop['value']['sval']", "def parse_equation(equation: str):\n terms = parse_equation_terms(equation)\n\n # Construct standardised and code representations of the equation\n template = re.sub(r'\\s+', ' ', term_re.sub('{}', equation))\n equation = template.format(*[str(t) for t in terms])\n code = template.format(*[t.code for t in terms])\n\n # `symbols` stores the final symbols and is successively updated in the\n # loop below\n symbols = {}\n\n # `functions` keeps track of functions seen, to avoid duplicating entries\n # in `symbols`\n functions = {}\n\n for term in terms:\n symbol = Symbol(name=term.name,\n type=term.type,\n lags=term.index,\n leads=term.index,\n equation=None,\n code=None)\n\n name = symbol.name\n\n if symbol.type == Type.FUNCTION:\n # Function previously encountered: Test for equality against the\n # previous entry\n if name in functions:\n assert symbol == functions[name]\n # Otherwise, store\n else:\n symbols[name] = symbol\n functions[name] = symbol\n continue\n\n # Update endogenous variables with the equation and code information\n # from above\n if symbol.type == Type.ENDOGENOUS:\n symbol = symbol._replace(equation=equation, code=code)\n\n symbols[name] = symbols.get(name, symbol).combine(symbol)\n\n return list(symbols.values())", "def main(u):\n print '=== Testing exact solution: %s ===' % u(t)\n print \"Initial conditions u(0)=%s, u'(0)=%s:\" % \\\n (u(t).subs(t, 0), sym.diff(u(t), t).subs(t, 0))\n # Method of manufactured solution requires fitting f\n global f # source term in the ODE\n f = sym.simplify(ode_source_term(u))\n # Residual in discrete equations (should be 0)\n print 'residual step1:', residual_discrete_eq_step1(u)\n print 'residual:', residual_discrete_eq(u)", "def transform_into_CNF(dicts):\n\n terminals, rules = dicts\n\n for terminal_LHS in terminals.keys():\n if len(terminal_LHS.split(\" \")) > 1:\n raise NotImplementedError\n\n # variable used in order to track the use of intermediary rules (see\n # normalization)\n normalization_tracker = 0\n\n # it may be the case that the normaliztion process will produce a dictionary\n # items that start with the already existing keys. In order to avoid\n # overwriting the current key, value pair a temporary dictionary is created\n temporary_rules_dict = defaultdict(list)\n\n # Given the fact that the rules' dictionary has right-hand sides as the keys,\n # and that some RHS are too long for CFG, those RHS do not constitute a valid\n # key and thus must be deleted\n keys_to_remove = []\n\n # the list will be used to store unit productions, i.e. rules such as A -> B\n unit_productions = []\n\n for RHS, LHS in rules.items():\n # print(LHS, \"->\", RHS)\n\n RHS_symbols = RHS.split(\" \")\n if len(RHS_symbols) == 1:\n # find a chain that leads the LHS to a terminal symbol\n # unit_productions.append(LHS, RHS)\n # pass\n # zaimplementować: może być na sam koniec; jeżeli na koniec to\n # usuń regułę PP -> NP\n # print(LHS, RHS_symbols)\n\n if terminals.get(RHS_symbols[0]):\n # get() method returns None if there is no such key\n # check whether the right-hand side of the rule is in any\n # rule that results in a terminal symbol\n # consider the example:\n # PP -> NP (1)\n # NP -> 'I' (2)\n # (1) & (2) --> PP -> 'I'\n # print(RHS_symbols[0], terminals.get(RHS_symbols[0]))\n\n # Given the example above the new right-hand side of the rule\n # that produces a terminal symbol is the left-hand side of the\n # unit production\n terminals[LHS[0]] = terminals.pop(RHS_symbols[0])\n\n else:\n # this block handles the situation as in the example below:\n # AP -> Adj (1)\n # Adj -> AP2 (2)\n # AP2 -> 'piękny' (3)\n # (1) & (2) & (3) --> AP -> 'piękny'\n # The previous block wouldn't work because the AP is not on the\n # left-hand side of any rule resulting in a terminal symbol\n print(LHS[0], RHS_symbols[0])\n # TODO: zaimplementować tak, żeby Adj ostatecznie przechodził\n # w 'piękny'\n # pass\n # start constructing a chain of rules\n\n elif len(RHS_symbols) > 2:\n # normalization is needed\n\n keys_to_remove.append(RHS)\n\n while len(RHS_symbols) > 2:\n tmp_RHS_list = RHS_symbols[:2]\n tmp_LHS = \"X\" + str(normalization_tracker)\n\n temporary_rules_dict[\" \".join(tmp_RHS_list)].append(tmp_LHS)\n\n RHS_symbols.pop(0)\n RHS_symbols[0] = tmp_LHS\n\n normalization_tracker += 1\n\n\n # this is the link between the original input dictionary and the\n # temporary dict\n # note that use the first and only element of the LHS list\n temporary_rules_dict[\" \".join(RHS_symbols)].append(LHS[0])\n\n # integrate temporary dictionary with the original (input) one:\n for RHS_tmp, LHS_tmp in temporary_rules_dict.items():\n rules[RHS_tmp].append(LHS_tmp[0])\n\n # delete the rules that contain too long a right-hand side\n for key in keys_to_remove:\n rules.pop(key, None)\n # print(terminals)\n return rules", "def get_atomic_formula(out_data=None, log_data=None, restart_data=None,\n scfout_data=None, dat_data=None):\n atomic_formula = {}\n parameters = {'symbols': list, 'positions': list, 'scaled_positions': list,\n 'magmoms': list, 'cell': list}\n datas = [out_data, log_data, restart_data, scfout_data, dat_data]\n atoms_unitvectors = None\n atoms_spncrd_unit = 'ang'\n atoms_unitvectors_unit = 'ang'\n for data in datas:\n # positions unit save\n if 'atoms_speciesandcoordinates_unit' in data:\n atoms_spncrd_unit = data['atoms_speciesandcoordinates_unit']\n # cell unit save\n if 'atoms_unitvectors_unit' in data:\n atoms_unitvectors_unit = data['atoms_unitvectors_unit']\n # symbols, positions or scaled_positions\n if 'atoms_speciesandcoordinates' in data:\n atoms_spncrd = data['atoms_speciesandcoordinates']\n # cell\n if 'atoms_unitvectors' in data:\n atoms_unitvectors = data['atoms_unitvectors']\n # pbc\n if 'scf_eigenvaluesolver' in data:\n scf_eigenvaluesolver = data['scf_eigenvaluesolver']\n # ???\n for openmx_keyword in data.keys():\n for standard_keyword in parameters.keys():\n if openmx_keyword == standard_keyword:\n atomic_formula[standard_keyword] = data[openmx_keyword]\n\n atomic_formula['symbols'] = [i[1] for i in atoms_spncrd]\n\n openmx_spncrd_keyword = [[i[2], i[3], i[4]] for i in atoms_spncrd]\n # Positions\n positions_unit = atoms_spncrd_unit.lower()\n positions = np.array(openmx_spncrd_keyword, dtype=float)\n if positions_unit == 'ang':\n atomic_formula['positions'] = positions\n elif positions_unit == 'frac':\n scaled_positions = np.array(openmx_spncrd_keyword, dtype=float)\n atomic_formula['scaled_positions'] = scaled_positions\n elif positions_unit == 'au':\n positions = np.array(openmx_spncrd_keyword, dtype=float) * Bohr\n atomic_formula['positions'] = positions\n\n # If Cluster, pbc is False, else it is True\n atomic_formula['pbc'] = scf_eigenvaluesolver.lower() != 'cluster'\n\n # Cell Handling\n if atoms_unitvectors is not None:\n openmx_cell_keyword = atoms_unitvectors\n cell = np.array(openmx_cell_keyword, dtype=float)\n if atoms_unitvectors_unit.lower() == 'ang':\n atomic_formula['cell'] = openmx_cell_keyword\n elif atoms_unitvectors_unit.lower() == 'au':\n atomic_formula['cell'] = cell * Bohr\n\n # If `positions` and `scaled_positions` are both given, delete `scaled_..`\n if atomic_formula.get('scaled_positions') is not None and \\\n atomic_formula.get('positions') is not None:\n del atomic_formula['scaled_positions']\n return atomic_formula", "def solve(variables: List[str], formula: List[Clause]) -> Mapping[int, bool]:\n partial_assignment = partial_solve({}, formula)\n return create_total_assignment(variables, partial_assignment)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the most generic, yet compatible type between ty1 and ty2
def _most_generic(self, ty1, ty2): if ty1 == ty2: return ty1 assert ty1 in [types.REAL, types.INT], str(ty1) assert ty2 in [types.REAL, types.INT], str(ty2) return types.REAL
[ "def get_precedent_type(type1, type2):\n if type1 == complex or type2 == complex:\n return complex\n\n if type1 == float or type2 == float:\n return float\n\n return int", "def _merge_tensor_type(old_type, obtained_type, backend):\n if old_type is None:\n return obtained_type\n # Update the inferred dtype.\n if obtained_type is None:\n return old_type\n # One of old_type or obtained_type must be a TensorType because that's when\n # the caller's pattern_map2 would call _merge_tensor_type.\n if not isinstance(old_type, instructions.TensorType):\n raise ValueError('Type mismatch: Expected struct type {}, got {}'.format(\n old_type, obtained_type))\n if not isinstance(obtained_type, instructions.TensorType):\n raise ValueError('Type mismatch: Expected tensor type {}, got {}'.format(\n old_type, obtained_type))\n dtype = old_type.dtype\n obtained_dtype = obtained_type.dtype\n if dtype is None:\n dtype = obtained_dtype\n elif obtained_dtype is not None:\n dtype = backend.merge_dtypes(dtype, obtained_dtype)\n # Update the inferred shape.\n shape = old_type.shape\n obtained_shape = obtained_type.shape\n if shape is None:\n shape = obtained_shape\n elif obtained_shape is not None:\n shape = backend.merge_shapes(shape, obtained_shape)\n return instructions.TensorType(dtype, shape)", "def generalize(self, type):\n free_types = self.free_types(type)\n if not free_types: return type\n return types.polymorphic(free_types, type)", "def _same_type(type1, type2):\n return type1.qname() == type2.qname()", "def _get_compatible_spec(value_or_spec1, value_or_spec2):\n spec1 = _get_spec_for(value_or_spec1)\n spec2 = _get_spec_for(value_or_spec2)\n\n # pylint: disable=protected-access\n common = spec1._without_tensor_names().most_specific_common_supertype(\n [spec2._without_tensor_names()])\n if common is None:\n raise TypeError(f\"No common supertype of {spec1} and {spec2}.\")\n return common", "async def infer_type_generic_compare(engine, x, y):\n await engine.assert_same('type', x, y)\n return Bool()", "def get(*py_types):\n if len(py_types) == 0:\n return TypeVar.get()\n\n if len(py_types) == 1:\n py_type = py_types[0]\n if isinstance(py_type, Type):\n return py_type\n if isinstance(py_type, list):\n return ListType.get(Type.get(*py_type))\n if isinstance(py_type, tuple):\n return ProductType.get(*py_type)\n\n if py_type == ():\n return UnitType.get()\n\n return PyType.get(py_type)\n\n return ProductType.get(*py_types)", "def of_type(typ: Type):\n if typ == Type.Integer:\n return AbstractInt()\n elif typ == Type.Float:\n return AbstractFloat()\n elif typ == Type.Bool:\n return AbstractBool()\n elif typ.is_tuple:\n return tuple(Value.of_type(t) for t in typ.tuple_elems())\n elif typ.is_tensor:\n v = Value.of_type(typ.tensor_elem_type)\n if isinstance(v, AbstractTensor):\n return AbstractTensor(\n (_core_cost[\"assumed_vector_size\"],) + v.shp, v.typ\n )\n return AbstractTensor((_core_cost[\"assumed_vector_size\"],), Value.typeof(v))\n raise ValueError(f\"Unknown type {typ}\")", "def values_to_type(left, right):\n ltype = types.value_to_type(left)\n rtype = types.value_to_type(right)\n\n # Both are Python\n if not isinstance(ltype, ir.Type) and not isinstance(rtype, ir.Type):\n if ltype is float or rtype is float:\n return float\n\n return int\n\n # At least 1 is IR\n ltype = types.type_to_ir_type(ltype)\n rtype = types.type_to_ir_type(ltype)\n\n if ltype is types.float64 or rtype is types.float64:\n return types.float64\n\n if ltype is types.float32 or rtype is types.float32:\n return types.float32\n\n if ltype is types.int64 or rtype is types.int64:\n return types.int64\n\n return types.int32", "def type_type_consistent(type_a: Type, type_b: Type) -> bool:\n return type_a == type_b", "def JoinTypes(types):\n queue = collections.deque(types)\n seen = set()\n new_types = []\n while queue:\n t = queue.popleft()\n if isinstance(t, pytd.UnionType):\n queue.extendleft(reversed(t.type_list))\n elif isinstance(t, pytd.NothingType):\n pass\n elif t not in seen:\n new_types.append(t)\n seen.add(t)\n\n if len(new_types) == 1:\n return new_types.pop()\n elif any(isinstance(t, pytd.AnythingType) for t in new_types):\n return pytd.AnythingType()\n elif new_types:\n return pytd.UnionType(tuple(new_types)) # tuple() to make unions hashable\n else:\n return pytd.NothingType()", "def promote_type(orig_type, new_type):\n\n if not new_type:\n return orig_type\n\n if not orig_type:\n return new_type\n\n try:\n orig_type = orig_type.__name__\n except AttributeError:\n pass\n\n try:\n new_type = new_type.__name__\n except AttributeError:\n pass\n\n type_precidence = ['unknown', 'int', 'float', 'date', 'time', 'datetime', 'str', 'bytes', 'unicode']\n\n # TODO This will fail for dates and times.\n\n if type_precidence.index(new_type) > type_precidence.index(orig_type):\n return new_type\n else:\n return orig_type", "def _unify_types(*, param_type, arg_type, bindings, scope):\n if isinstance(param_type, ast.TypeParameter):\n if param_type in bindings:\n reified_param_type = bindings[param_type]\n if reified_param_type == arg_type:\n return reified_param_type\n else:\n bindings[param_type] = arg_type\n return arg_type\n elif isinstance(param_type, ast.ReifiedType):\n if (isinstance(arg_type, ast.ReifiedType) and\n param_type.class_ == arg_type.class_ and\n len(param_type.type_arguments) ==\n len(arg_type.type_arguments)):\n type_arguments = [\n _unify_types(p, a, bindings, scope)\n for p, a in zip(\n param_type.type_arguments,\n arg_type.type_arguments,\n )\n ]\n return ast.ReifiedType(\n mark=param_type.mark,\n class_=param_type.class_,\n type_arguments=type_arguments,\n )\n elif param_type == arg_type:\n return arg_type\n raise scope.error(\n f'binding {arg_type} to {param_type} failed '\n f'({bindings})')", "def unify(t1, t2):\n\n a = prune(t1)\n b = prune(t2)\n if isinstance(a, TypeVariable):\n if a != b:\n if occurs_in_type(a, b):\n raise InferenceError(\"recursive unification\")\n a.instance = b\n elif isinstance(a, TypeOperator) and isinstance(b, TypeVariable):\n unify(b, a)\n elif isinstance(a, TypeOperator) and isinstance(b, TypeOperator):\n if a.name != b.name or len(a.types) != len(b.types):\n raise InferenceError(\"Type mismatch: {0} != {1}\".format(str(a), str(b)))\n for p, q in zip(a.types, b.types):\n unify(p, q)\n else:\n assert 0, \"Not unified\"", "def unify(t1, t2):\n\n a = prune(t1)\n b = prune(t2)\n if isinstance(a, TypeVariable):\n if a != b:\n if occurs_in_type(a, b):\n raise InferenceError(\"recursive unification\")\n a.instance = b\n elif isinstance(b, TypeVariable):\n unify(b, a)\n elif isinstance(a, TypeOperator) and isinstance(b, TypeOperator):\n if len(a.types) != len(b.types):\n raise InferenceError(\"Type mismatch: {0} != {1}\".format(type(a), type(b)))\n else:\n if a.name != b.name:\n raise InferenceError(\"Type mismatch: {0} != {1}\".format(a.name, b.name))\n try:\n for p, q in zip(a.types, b.types):\n unify(p, q)\n except InferenceError:\n raise\n elif isinstance(a, MultiType) and isinstance(b, MultiType):\n if len(a.types) != len(b.types):\n raise InferenceError(\"Type mismatch: {0} != {1}\".format(type(a), type(b)))\n for p, q in zip(a.types, b.types):\n unify(p, q)\n elif isinstance(b, MultiType):\n return unify(b, a)\n elif isinstance(a, MultiType):\n types = []\n for t in a.types:\n try:\n t_clone = fresh(t, {})\n b_clone = fresh(b, {})\n unify(t_clone, b_clone)\n types.append(t)\n except InferenceError as e:\n pass\n if types:\n if len(types) == 1:\n a.instance = types[0]\n unify(a.instance, b)\n else:\n # too many overloads are found, so extract as many information as we can, and leave the remaining\n # over-approximated\n def try_unify(t, ts):\n if isinstance(t, TypeVariable):\n return\n if any(isinstance(tp, TypeVariable) for tp in ts):\n return\n for i, tt in enumerate(t.types):\n its = [prune(tp.types[i]) for tp in ts]\n if any(isinstance(it, TypeVariable) for it in its):\n continue\n it0 = its[0]\n it0ntypes = len(it0.types)\n if all(((it.name == it0.name) and (len(it.types) == it0ntypes)) for it in its):\n ntypes = [TypeVariable() for _ in range(it0ntypes)]\n new_tt = TypeOperator(it0.name, ntypes)\n new_tt.__class__ = it0.__class__\n unify(tt, new_tt)\n try_unify(prune(tt), [prune(it) for it in its])\n try_unify(b, types)\n else:\n #print(\"while unifying:\")\n #print(\"-\", str(a))\n #print(\"-\", str(b))\n raise InferenceError(\"Not unified {} and {}, no overload found\".format(type(a), type(b)))\n else:\n raise RuntimeError(\"Not unified {} and {}\".format(type(a), type(b)))", "def can_be_cast_to(_type1, _type2):\n if isinstance(_type2, Any):\n return True\n elif _type1 == _type2:\n return True\n elif _type2 == String():\n return can_to_str(_type1)\n elif isinstance(_type2, Enum):\n return isinstance(_type1, String) or isinstance(_type2, Enum)\n elif isinstance(_type1, Object) and isinstance(_type2, Object):\n if not _type2.strict_checking:\n return True\n else:\n for prop_name, prop_type in _type2.props.iteritems():\n if prop_name not in _type1.props:\n return False\n if not can_be_cast_to(_type1.props[prop_name], prop_type):\n return False\n return True\n return False", "def new_number_type(a, b):\n if isinstance(a, mt.TlFloat) or isinstance(b, mt.TlFloat):\n return mt.TlFloat\n else:\n return mt.TlInt", "def _parameterized_type(self, base_type: Any, parameters):\n if self._matches_named_type(base_type, \"typing.Literal\"):\n return _pytd_literal(parameters, self.aliases)\n elif self._matches_named_type(base_type, \"typing.Annotated\"):\n return _pytd_annotated(parameters)\n assert not any(isinstance(p, types.Pyval) for p in parameters), parameters\n arg_is_paramspec = False\n is_callable = False\n if self._matches_named_type(base_type, \"builtins.tuple\"):\n if len(parameters) == 2 and parameters[1] is self.ELLIPSIS:\n parameters = parameters[:1]\n builder = pytd.GenericType\n else:\n builder = pytdgen.heterogeneous_tuple\n elif self._matches_named_type(base_type, \"typing.Concatenate\"):\n assert parameters\n builder = pytd.Concatenate\n elif self._matches_named_type(base_type, \"typing.Callable\"):\n if parameters[0] is self.ELLIPSIS:\n parameters = (pytd.AnythingType(),) + parameters[1:]\n if parameters and isinstance(parameters[0], pytd.NamedType):\n if parameters[0].name in self.paramspec_names:\n arg_is_paramspec = True\n is_callable = True\n builder = pytdgen.pytd_callable\n elif pytdgen.is_any(base_type):\n builder = lambda *_: pytd.AnythingType()\n else:\n assert parameters\n builder = pytd.GenericType\n self._check_for_illegal_parameters(base_type, parameters, is_callable)\n parameters = self._remove_unsupported_features(parameters, is_callable)\n if arg_is_paramspec:\n # Hack - Callable needs an extra arg for paramspecs\n return builder(base_type, parameters, arg_is_paramspec)\n else:\n return builder(base_type, parameters)", "def unify(self, other):\n return Type.engine.unify(self, other)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the signature of the given term.
def _get_signature(self, term, args): decl = mathsat.msat_term_get_decl(term) tag = mathsat.msat_decl_get_tag(self.msat_env(), decl) try: return self.term_sig[tag](term, args) except KeyError: raise ConvertExpressionError("Unsupported expression:", mathsat.msat_term_repr(term))
[ "def _get_signature(self, req):\r\n sig = req.params.get('Signature') or req.params.get('X-Amz-Signature')\r\n if sig is None and 'Authorization' in req.headers:\r\n auth_str = req.headers['Authorization']\r\n sig = auth_str.partition(\"Signature=\")[2].split(',')[0]\r\n\r\n return sig", "def signature(self):\n return parser.CliSignature.from_signature(\n self.func_signature,\n extra=itertools.chain(self._process_alt(self.alt), self.extra))", "def get_signature(self):\n\t\treturn self.signature_value;", "def computeSignature(self, image, signature=...) -> signature:\n ...", "def signature(self) -> Signature:\n return self.__signature__", "def key_signature(self, sf, mi):\n pass", "def _get_full_signature_list(self):\n return self._interpreter.GetSignatureDefs()", "def show_signature(self, name, arity):", "def parse(cls, expr: str) -> \"Signature\":\n return _parse_and_convert(expr, rule_name=\"onlySignature\")", "def getSignature(self) -> str:\n return self.__transactionSignature", "def get_signature_string(self):\n return \"_\".join(self._signature)", "def signature_path(self):\n return self._signature_path", "def signature(s):\n t = list(s)\n t.sort()\n t = \"\".join(t)\n return t", "def input_signature(self):\n return _raw_util.raw_regenerate_peak2_sptr_input_signature(self)", "def signature_hint(self):\n return bytes(self.public_key().ed25519[-4:])", "def find_itemname_from_signature(signature):\n return ''.join(re.split(r\"[\\(\\[\\s]\", signature)[0])", "def input_signature(self):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr_input_signature(self)", "def term_frequency(self, term):\n return self._frequencies[re.sub(r'\\W+', '', term).lower()]", "def getSignature(firmware_data):\r\n start = firmware_data[:-2].rfind('\\x00') + 1\r\n ret = firmware_data[start:]\r\n if not 'Version' in ret or not 'Date' in ret:\r\n raise Exception(\"Invalid signature\")\r\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a function that for the given op. This is used in the construction of back_fun, to simplify the code.
def _back_adapter(self, op): def back_apply(term, args): return op(*args) return back_apply
[ "def _e_op_func(self, e_op):\n if isinstance(e_op, Qobj):\n return lambda t, ado_state: (ado_state.rho * e_op).tr()\n elif isinstance(e_op, QobjEvo):\n return lambda t, ado_state: e_op.expect(t, ado_state.rho)\n elif callable(e_op):\n return e_op\n raise TypeError(f\"{e_op!r} has unsupported type {type(e_op)!r}.\")", "def create_operation(model, fieldname, operator, argument, relation=None):\n opfunc = OPERATORS.get(operator)\n field = getattr(model, relation or fieldname, None)\n if opfunc and field:\n return opfunc(field, argument)", "def get_func_graphs(op):\n\n def _get_func_graph_for_branch(name_attr_list, cached_attr_name=None):\n \"\"\"Generates and returns a FuncGraph for the given branch.\"\"\"\n func_graph = None\n if cached_attr_name is not None:\n func_graph = getattr(op, cached_attr_name, None)\n inputs = op.inputs[1:] # First input is pred.\n if func_graph is None:\n input_shapes = [t.shape for t in inputs]\n func_graph = util.get_func_graph(op, input_shapes, name_attr_list.name)\n for external_t, internal_t in zip(inputs, func_graph.inputs):\n handle_data_util.copy_handle_data(external_t, internal_t)\n func_graph.function_captures.reset_captures(inputs, func_graph.inputs)\n # Link the op so that the gradient code can use it.\n func_graph._forward_cond = op\n return func_graph\n\n if op.type in [\"If\", \"StatelessIf\"]:\n return (_get_func_graph_for_branch(\n op.get_attr(\"then_branch\"), \"_true_graph\"),\n _get_func_graph_for_branch(\n op.get_attr(\"else_branch\"), \"_false_graph\"))\n elif op.type in [\"Case\", \"StatelessCase\"]:\n return [_get_func_graph_for_branch(branch_fn, \"_branch_graph_{}\".format(i))\n for i, branch_fn in enumerate(op.get_attr(\"branches\"))]\n else:\n raise ValueError(\"Unsupported op type: {}\".format(op.type))", "def apply_unary_op(self, op: Operation) -> \"Lambda\":\n new = self.contextually_paren(op, from_right=True)\n\n return replace(\n new,\n tree=op.render_ast(self.tree),\n body_str=op.render_str(new),\n last_op=op,\n )", "def comp_function(model, fun=None, var=None, out=None, name=None, runtime=0):\n model_new = model.copy()\n\n ## Dispatch to core builder for consistent behavior\n fun, var, out, name, runtime = _comp_function_data(\n model, fun, var, out, name, runtime\n )\n\n ## Add new function\n model_new.functions.append(Function(fun, var, out, name, runtime))\n\n model_new.update()\n return model_new", "def createFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def apply_binary_op(\n self, op: Operation, other: Any, from_right: bool = False\n ) -> \"Lambda\":\n new = self.contextually_paren(op, from_right)\n\n other_ast = subscript(name(\"closure\"), constant(len(self.closure)))\n new_closure = self.closure.chain([other])\n\n if from_right:\n new_tree = op.render_ast(other_ast, new.tree)\n body_str = op.render_str(repr(other), new)\n else:\n new_tree = op.render_ast(new.tree, other_ast)\n body_str = op.render_str(new, repr(other))\n\n return replace(\n new,\n tree=new_tree,\n closure=new_closure,\n body_str=body_str,\n last_op=op,\n )", "def _binary_op(result_name, func_name, arg1_name, arg2_name):\n funcs = {'add': '+', 'sub': '-', 'mul': '*', 'div': '/'}\n return f\"{result_name} = {arg1_name} {funcs[func_name]} {arg2_name}\"", "def create_train_function(self):\n action_prob = self.model.output\n\n action_one_hot_placeholder = back.placeholder(shape=(None, self.num_actions),\n name=\"action_one_hot\")\n\n discounted_reward_placeholder = back.placeholder(shape=(None, ),\n name='discount_reward')\n\n log_prob = back.sum(action_one_hot_placeholder * back.log(action_prob), axis=1)\n\n loss = back.mean(- log_prob * discounted_reward_placeholder)\n\n adam = keras.optimizers.Adam(lr=self.alpha)\n\n updates = adam.get_updates(params=self.model.trainable_weights,\n loss=loss)\n\n self.train_fcn = back.function(inputs=[self.model.input, action_one_hot_placeholder, discounted_reward_placeholder],\n outputs=[],\n updates=updates)", "def operation(op: Operation):\n\n def decorator(func: t.Callable):\n @functools.wraps(func)\n def wrapper(self: DataFrame, *args, **kwargs):\n if self.last_op == Operation.INIT:\n self = self._convert_leaf_to_cte()\n self.last_op = Operation.NO_OP\n last_op = self.last_op\n new_op = op if op != Operation.NO_OP else last_op\n if new_op < last_op or (last_op == new_op == Operation.SELECT):\n self = self._convert_leaf_to_cte()\n df: t.Union[DataFrame, GroupedData] = func(self, *args, **kwargs)\n df.last_op = new_op # type: ignore\n return df\n\n wrapper.__wrapped__ = func # type: ignore\n return wrapper\n\n return decorator", "def register_unary_op(op):\n to_lower_op = make_unary_op(op)\n cuda_lower(op, MaskedType)(to_lower_op)", "def op_left(op):\n\n def method(self, other):\n return op(self.value, value_left(self, other))\n\n return method", "def _make_ufunc(ufunc):\n def new_ufunc(array, *args):\n return apply_ufunc(ufunc, array, args)\n return new_ufunc", "def add_operation(self, name, op_func):\n \n self.operations[name] = op_func", "def _generate_pack_op(self):\n obj = self.original_fn.__self__ if self.is_method else None\n fn = self.original_fn.pack_fn\n key = f\"{id(obj)}_{id(fn)}\"\n if self.is_method:\n setattr(obj, self.pack_fn_name, PackFunc(fn, key, obj))\n else:\n fn.__globals__[self.pack_fn_name] = PackFunc(fn, key, obj)", "def operatorConvert(op):\n operator = BitArray(3)\n if op == '+':\n return operator\n elif op == '-':\n operator.invert(2)\n return operator\n elif op == '*': \n operator.invert(1)\n return operator\n elif op == '/':\n operator.invert([1, 2])\n return operator\n elif op == 'mod':\n operator.invert(0)\n return operator\n elif op == '^':\n operator.invert([0, 2])\n return operator\n elif op == 'nck':\n operator.invert([0, 1])\n return operator\n elif op == '%':\n operator.invert([0, 1, 2])\n return operator", "def _create_binary_operator(operator_func, description, list_kword=None):\n\n class _BinaryOperatorImpl(_BinaryOperator):\n \"\"\"Implements a binary operator specfication.\"\"\"\n\n def __init__(self, rvalue, key=None, description=description,\n default=DEFAULT_NOT_SET):\n _BinaryOperator.__init__(self, description, default, operator_func,\n rvalue, key, list_kword)\n return _BinaryOperatorImpl", "def op(operator):\n return a(Token('OP', operator)) >> tok_to_value", "def _Id_make_comparison_function(op):\n def decorate(fn):\n def cmp_fn(self, other):\n try:\n return op((str(self._prefix), self._seqno),\n (str(other._prefix), other._seqno))\n except AttributeError:\n # fall back to safe comparison as `str`\n gc3libs.log.debug(\n \"Wrong job ID: comparing '%s' (%s) with '%s' (%s)\"\n % (self, type(self), other, type(other)))\n return op(str(self), str(other))\n return cmp_fn\n return decorate" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a PySMT formula into a MathSat Term. This function might throw a InternalSolverError exception if an error during conversion occurs.
def convert(self, formula): # Rewrite to avoid UF with bool args rformula = self._ufrewriter.walk(formula) res = self.walk(rformula) if mathsat.MSAT_ERROR_TERM(res): msat_msg = mathsat.msat_last_error_message(self.msat_env()) raise InternalSolverError(msat_msg) if rformula != formula: warn("MathSAT convert(): UF with bool arguments have been translated") return res
[ "def make_sympy(self, xml=None): # lint-amnesty, pylint: disable=too-many-statements\n\n if self.the_sympy:\n return self.the_sympy\n\n if xml is None:\t # root\n if not self.is_mathml():\n return my_sympify(self.expr)\n if self.is_presentation_mathml():\n cmml = None\n try:\n cmml = self.cmathml\n xml = etree.fromstring(str(cmml))\n except Exception as err:\n if 'conversion from Presentation MathML to Content MathML was not successful' in cmml: # lint-amnesty, pylint: disable=unsupported-membership-test\n msg = \"Illegal math expression\"\n else:\n msg = 'Err %s while converting cmathml to xml; cmml=%s' % (err, cmml)\n raise Exception(msg) # lint-amnesty, pylint: disable=raise-missing-from\n xml = self.fix_greek_in_mathml(xml)\n self.the_sympy = self.make_sympy(xml[0])\n else:\n xml = etree.fromstring(self.expr)\n xml = self.fix_greek_in_mathml(xml)\n self.the_sympy = self.make_sympy(xml[0])\n return self.the_sympy\n\n def gettag(expr):\n return re.sub('{http://[^}]+}', '', expr.tag)\n\n def op_plus(*args):\n return args[0] if len(args) == 1 else op_plus(*args[:-1]) + args[-1]\n\n def op_times(*args):\n return reduce(operator.mul, args)\n\n def op_minus(*args):\n if len(args) == 1:\n return -args[0]\n if not len(args) == 2: # lint-amnesty, pylint: disable=unneeded-not\n raise Exception('minus given wrong number of arguments!')\n #return sympy.Add(args[0],-args[1])\n return args[0] - args[1]\n\n opdict = {\n 'plus': op_plus,\n 'divide': operator.div, # lint-amnesty, pylint: disable=no-member\n 'times': op_times,\n 'minus': op_minus,\n 'root': sympy.sqrt,\n 'power': sympy.Pow,\n 'sin': sympy.sin,\n 'cos': sympy.cos,\n 'tan': sympy.tan,\n 'cot': sympy.cot,\n 'sinh': sympy.sinh,\n 'cosh': sympy.cosh,\n 'coth': sympy.coth,\n 'tanh': sympy.tanh,\n 'asin': sympy.asin,\n 'acos': sympy.acos,\n 'atan': sympy.atan,\n 'atan2': sympy.atan2,\n 'acot': sympy.acot,\n 'asinh': sympy.asinh,\n 'acosh': sympy.acosh,\n 'atanh': sympy.atanh,\n 'acoth': sympy.acoth,\n 'exp': sympy.exp,\n 'log': sympy.log,\n 'ln': sympy.ln,\n }\n\n def parse_presentation_symbol(xml):\n \"\"\"\n Parse <msub>, <msup>, <mi>, and <mn>\n \"\"\"\n tag = gettag(xml)\n if tag == 'mn':\n return xml.text\n elif tag == 'mi':\n return xml.text\n elif tag == 'msub':\n return '_'.join([parse_presentation_symbol(y) for y in xml])\n elif tag == 'msup':\n return '^'.join([parse_presentation_symbol(y) for y in xml])\n raise Exception('[parse_presentation_symbol] unknown tag %s' % tag)\n\n # parser tree for Content MathML\n tag = gettag(xml)\n\n # first do compound objects\n\n if tag == 'apply':\t\t# apply operator\n opstr = gettag(xml[0])\n if opstr in opdict:\n op = opdict[opstr] # pylint: disable=invalid-name\n args = [self.make_sympy(expr) for expr in xml[1:]]\n try:\n res = op(*args)\n except Exception as err:\n self.args = args # pylint: disable=attribute-defined-outside-init\n self.op = op # pylint: disable=attribute-defined-outside-init, invalid-name\n raise Exception('[formula] error=%s failed to apply %s to args=%s' % (err, opstr, args)) # lint-amnesty, pylint: disable=raise-missing-from\n return res\n else:\n raise Exception('[formula]: unknown operator tag %s' % (opstr))\n\n elif tag == 'list':\t\t# square bracket list\n if gettag(xml[0]) == 'matrix':\n return self.make_sympy(xml[0])\n else:\n return [self.make_sympy(expr) for expr in xml]\n\n elif tag == 'matrix':\n return sympy.Matrix([self.make_sympy(expr) for expr in xml])\n\n elif tag == 'vector':\n return [self.make_sympy(expr) for expr in xml]\n\n # atoms are below\n\n elif tag == 'cn':\t\t\t# number\n return sympy.sympify(xml.text)\n\n elif tag == 'ci':\t\t\t# variable (symbol)\n if len(xml) > 0 and (gettag(xml[0]) == 'msub' or gettag(xml[0]) == 'msup'):\t # subscript or superscript\n usym = parse_presentation_symbol(xml[0])\n sym = sympy.Symbol(str(usym))\n else:\n usym = six.text_type(xml.text)\n if 'hat' in usym:\n sym = my_sympify(usym)\n else:\n if usym == 'i' and self.options is not None and 'imaginary' in self.options:\t # i = sqrt(-1)\n sym = sympy.I\n else:\n sym = sympy.Symbol(str(usym))\n return sym\n\n else:\t\t\t\t# unknown tag\n raise Exception('[formula] unknown tag %s' % tag)", "def parse_mathml_rhs(node, var_table=None, logger=None,\n number_post_processor=None, derivative_post_processor=None):\n def parsex(node):\n \"\"\"\n Parses a mathml expression.\n \"\"\"\n def chain(kind, node, unary=None):\n \"\"\"\n Parses operands for chained operators (for example plus, minus,\n times and division).\n\n The argument ``kind`` must be the myokit expression type being\n parsed, ``node`` is a DOM node and ``unary``, if given, should be\n the unary expression type (unary Plus or unary Minus).\n \"\"\"\n ops = []\n node = dom_next(node)\n while node:\n ops.append(parsex(node))\n node = dom_next(node)\n n = len(ops)\n if n < 1:\n raise MathMLError('Operator needs at least one operand.')\n if n < 2:\n if unary:\n return unary(ops[0])\n else:\n raise MathMLError('Operator needs at least two operands')\n ex = kind(ops[0], ops[1])\n for i in xrange(2, n):\n ex = kind(ex, ops[i])\n return ex\n # Start parsing\n name = node.tagName\n if name == 'apply':\n # Brackets, can be ignored in an expression tree.\n return parsex(dom_child(node))\n elif name == 'ci':\n # Reference\n var = str(node.firstChild.data).strip()\n if var_table:\n try:\n var = var_table[var]\n except KeyError:\n logger.warn('Unable to resolve reference to <' + str(var)\n + '>.')\n return myokit.Name(var)\n elif name == 'diff':\n # Derivative\n # Check time variable\n bvar = dom_next(node, 'bvar')\n if derivative_post_processor:\n derivative_post_processor(parsex(dom_child(bvar, 'ci')))\n # Check degree, if given\n d = dom_child(bvar, 'degree')\n if d is not None:\n d = parsex(dom_child(d, 'cn')).eval()\n if not d == 1:\n raise MathMLError('Only derivatives of degree one are'\n ' supported.')\n # Create derivative and return\n x = dom_next(node, 'ci')\n if x is None:\n raise MathMLError('Derivative of an expression found: only'\n ' derivatives of variables are supported.')\n return myokit.Derivative(parsex(x))\n elif name == 'cn':\n # Number\n number = parse_mathml_number(node, logger)\n if number_post_processor:\n return number_post_processor(node, number)\n return number\n #\n # Algebra\n #\n elif name == 'plus':\n return chain(myokit.Plus, node, myokit.PrefixPlus)\n elif name == 'minus':\n return chain(myokit.Minus, node, myokit.PrefixMinus)\n elif name == 'times':\n return chain(myokit.Multiply, node)\n elif name == 'divide':\n return chain(myokit.Divide, node)\n #\n # Functions\n #\n elif name == 'exp':\n return myokit.Exp(parsex(dom_next(node)))\n elif name == 'ln':\n return myokit.Log(parsex(dom_next(node)))\n elif name == 'log':\n if dom_next(node).tagName != 'logbase':\n return myokit.Log10(parsex(dom_next(node)))\n else:\n return myokit.Log(\n parsex(dom_next(dom_next(node))),\n parsex(dom_child(dom_next(node))))\n elif name == 'root':\n # Check degree, if given\n next = dom_next(node)\n if next.tagName == 'degree':\n # Degree given, return x^(1/d) unless d is 2\n d = parsex(dom_child(next))\n x = parsex(dom_next(next))\n if d.is_literal() and d.eval() == 2:\n return myokit.Sqrt(x)\n return myokit.Power(x, myokit.Divide(myokit.Number(1), d))\n else:\n return myokit.Sqrt(parsex(next))\n elif name == 'power':\n n2 = dom_next(node)\n return myokit.Power(parsex(n2), parsex(dom_next(n2)))\n elif name == 'floor':\n return myokit.Floor(parsex(dom_next(node)))\n elif name == 'ceiling':\n return myokit.Ceil(parsex(dom_next(node)))\n elif name == 'abs':\n return myokit.Abs(parsex(dom_next(node)))\n elif name == 'quotient':\n n2 = dom_next(node)\n return myokit.Quotient(parsex(n2), parsex(dom_next(n2)))\n elif name == 'rem':\n n2 = dom_next(node)\n return myokit.Remainder(parsex(n2), parsex(dom_next(n2)))\n #\n # Trigonometry\n #\n elif name == 'sin':\n return myokit.Sin(parsex(dom_next(node)))\n elif name == 'cos':\n return myokit.Cos(parsex(dom_next(node)))\n elif name == 'tan':\n return myokit.Tan(parsex(dom_next(node)))\n elif name == 'arcsin':\n return myokit.ASin(parsex(dom_next(node)))\n elif name == 'arccos':\n return myokit.ACos(parsex(dom_next(node)))\n elif name == 'arctan':\n return myokit.ATan(parsex(dom_next(node)))\n #\n # Redundant trigonometry (CellML includes this)\n #\n elif name == 'csc':\n # Cosecant: csc(x) = 1 / sin(x)\n return myokit.Divide(myokit.Number(1),\n myokit.Sin(parsex(dom_next(node))))\n elif name == 'sec':\n # Secant: sec(x) = 1 / cos(x)\n return myokit.Divide(myokit.Number(1),\n myokit.Cos(parsex(dom_next(node))))\n elif name == 'cot':\n # Contangent: cot(x) = 1 / tan(x)\n return myokit.Divide(myokit.Number(1),\n myokit.Tan(parsex(dom_next(node))))\n elif name == 'arccsc':\n # ArcCosecant: acsc(x) = asin(1/x)\n return myokit.ASin(myokit.Divide(myokit.Number(1),\n parsex(dom_next(node))))\n elif name == 'arcsec':\n # ArcSecant: asec(x) = acos(1/x)\n return myokit.ACos(myokit.Divide(myokit.Number(1),\n parsex(dom_next(node))))\n elif name == 'arccot':\n # ArcCotangent: acot(x) = atan(1/x)\n return myokit.ATan(myokit.Divide(myokit.Number(1),\n parsex(dom_next(node))))\n #\n # Hyperbolic trigonometry (CellML again)\n #\n elif name == 'sinh':\n # Hyperbolic sine: sinh(x) = 0.5 * (e^x - e^-x)\n x = parsex(dom_next(node))\n return myokit.Multiply(myokit.Number(0.5), myokit.Minus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'cosh':\n # Hyperbolic cosine: cosh(x) = 0.5 * (e^x + e^-x)\n x = parsex(dom_next(node))\n return myokit.Multiply(myokit.Number(0.5), myokit.Plus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'tanh':\n # Hyperbolic tangent: tanh(x) = (e^2x - 1) / (e^2x + 1)\n x = parsex(dom_next(node))\n e2x = myokit.Exp(myokit.Multiply(myokit.Number(2), x))\n return myokit.Divide(myokit.Minus(e2x, myokit.Number(1)),\n myokit.Plus(e2x, myokit.Number(1)))\n #\n # Inverse hyperbolic trigonometry (CellML...)\n #\n elif name == 'arcsinh':\n # Inverse hyperbolic sine: asinh(x) = log(x + sqrt(1 + x*x))\n x = parsex(dom_next(node))\n return myokit.Log(myokit.Plus(x, myokit.Sqrt(myokit.Plus(\n myokit.Number(1), myokit.Multiply(x, x)))))\n elif name == 'arccosh':\n # Inverse hyperbolic cosine:\n # acosh(x) = log(x + sqrt(x + 1) * sqrt(x - 1))\n x = parsex(dom_next(node))\n return myokit.Log(myokit.Plus(x, myokit.Multiply(myokit.Sqrt(\n myokit.Plus(x, myokit.Number(1))), myokit.Sqrt(\n myokit.Minus(x, myokit.Number(1))))))\n elif name == 'arctanh':\n # Inverse hyperbolic tangent:\n # atanh(x) = 0.5 * (log(1 + x) - log(1 - x))\n x = parsex(dom_next(node))\n return myokit.Multiply(myokit.Number(0.5), myokit.Minus(\n myokit.Log(myokit.Plus(myokit.Number(1), x)), myokit.Log(\n myokit.Minus(myokit.Number(1), x))))\n #\n # Hyperbolic redundant trigonometry (CellML...)\n #\n elif name == 'csch':\n # Hyperbolic cosecant: csch(x) = 2 / (exp(x) - exp(-x))\n x = parsex(dom_next(node))\n return myokit.Divide(myokit.Number(2), myokit.Minus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'sech':\n # Hyperbolic secant: sech(x) = 2 / (exp(x) + exp(-x))\n x = parsex(dom_next(node))\n return myokit.Divide(myokit.Number(2), myokit.Plus(\n myokit.Exp(x), myokit.Exp(myokit.PrefixMinus(x))))\n elif name == 'coth':\n # Hyperbolic cotangent:\n # coth(x) = (exp(2*x) + 1) / (exp(2*x) - 1)\n x = parsex(dom_next(node))\n e2x = myokit.Exp(myokit.Multiply(myokit.Number(2), x))\n return myokit.Divide(myokit.Plus(e2x, myokit.Number(1)),\n myokit.Minus(e2x, myokit.Number(1)))\n #\n # Inverse hyperbolic redundant trigonometry (CellML has a lot to answer\n # for...)\n #\n elif name == 'arccsch':\n # Inverse hyperbolic cosecant:\n # arccsch(x) = log(sqrt(1 + 1/x^2) + 1/x)\n xi = myokit.Divide(myokit.Number(1), parsex(dom_next(node)))\n return myokit.Log(myokit.Plus(myokit.Sqrt(myokit.Number(1),\n myokit.Power(xi, myokit.Number(2))), xi))\n elif name == 'arcsech':\n # Inverse hyperbolic secant:\n # arcsech(x) = log(sqrt(1/x - 1) * sqrt(1/x + 1) + 1/x)\n xi = myokit.Divide(myokit.Number(1), parsex(dom_next(node)))\n return myokit.Log(myokit.Plus(myokit.Multiply(\n myokit.Sqrt(myokit.Minus(xi, myokit.Number(1))),\n myokit.Sqrt(myokit.Plus(xi, myokit.Number(1)))), xi))\n elif name == 'arccoth':\n # Inverse hyperbolic cotangent:\n # arccoth(x) = 0.5 * (log(1 + 1/x) - log(1 - 1/x))\n xi = myokit.Divide(myokit.Number(1), parsex(dom_next(node)))\n return myokit.Multiply(myokit.Number(0.5), myokit.Minus(\n myokit.Log(myokit.Plus(myokit.Number(1), xi)),\n myokit.Log(myokit.Minus(myokit.Number(1), xi))))\n #\n # Logic\n #\n elif name == 'and':\n return chain(myokit.And, node)\n elif name == 'or':\n return chain(myokit.Or, node)\n elif name == 'not':\n return chain(None, node, myokit.Not)\n elif name == 'eq' or name == 'equivalent':\n n2 = dom_next(node)\n return myokit.Equal(parsex(n2), parsex(dom_next(n2)))\n elif name == 'neq':\n n2 = dom_next(node)\n return myokit.NotEqual(parsex(n2), parsex(dom_next(n2)))\n elif name == 'gt':\n n2 = dom_next(node)\n return myokit.More(parsex(n2), parsex(dom_next(n2)))\n elif name == 'lt':\n n2 = dom_next(node)\n return myokit.Less(parsex(n2), parsex(dom_next(n2)))\n elif name == 'geq':\n n2 = dom_next(node)\n return myokit.MoreEqual(parsex(n2), parsex(dom_next(n2)))\n elif name == 'leq':\n n2 = dom_next(node)\n return myokit.LessEqual(parsex(n2), parsex(dom_next(n2)))\n elif name == 'piecewise':\n # Piecewise contains at least one piece, optionally contains an\n # \"otherwise\". Syntax doesn't ensure this statement makes sense.\n conds = []\n funcs = []\n other = None\n piece = dom_child(node)\n while piece:\n if piece.tagName == 'otherwise':\n if other is None:\n other = parsex(dom_child(piece))\n elif logger:\n logger.warn('Multiple <otherwise> tags found in'\n ' <piecewise> statement.')\n elif piece.tagName == 'piece':\n n2 = dom_child(piece)\n funcs.append(parsex(n2))\n conds.append(parsex(dom_next(n2)))\n elif logger:\n logger.warn('Unexpected tag type in <piecewise>: '\n + '<' + piece.tagName + '>.')\n piece = dom_next(piece)\n if other is None:\n other = myokit.Number(0)\n # Create string of if statements\n args = []\n f = iter(funcs)\n for c in conds:\n args.append(c)\n args.append(f.next())\n args.append(other)\n return myokit.Piecewise(*args)\n #\n # Constants\n #\n elif name == 'pi':\n return myokit.Number('3.14159265358979323846')\n elif name == 'exponentiale':\n return myokit.Exp(myokit.Number(1))\n elif name == 'true':\n # This is corrent, even in Python True == 1 but not True == 2\n return myokit.Number(1)\n elif name == 'false':\n return myokit.Number(0)\n #\n # Unknown/unhandled elements\n #\n else:\n if logger:\n logger.warn('Unknown element: ' + name)\n ops = []\n node = dom_child(node) if dom_child(node) else dom_next(node)\n while node:\n ops.append(parsex(node))\n node = dom_next(node)\n return myokit.UnsupportedFunction(name, ops)\n # Remove math node, if given\n if node.tagName == 'math':\n node = dom_child(node)\n #TODO: Check xmlns?\n return parsex(node)", "def conversion_by_law(self, t, m):\n def f(x):\n if isinstance(x, t):\n return f(m.__call__(x))\n elif isinstance(x, Expression):\n return x.__class__(*(f(s) for s in x.scope))\n else:\n return x\n\n return Formula(f(self.expression))", "def to_13wpm(self):\r\n #Replace Ci by:(-bi,wi)(ConjunctiveNormalForm((-x1∧-x2)↔bi),∞)\r\n\r\n if formula.is_13wpm():\r\n print(\"is (1,3) formula\")\r\n return formula\r\n\r\n formula13 = WCNFFormula()\r\n #print(formula13.num_vars)\r\n\r\n \"\"\"Soft to 1\"\"\"\r\n for clause in self.soft:\r\n if len(clause[1])>1:\r\n #print(clause)\r\n #print(type(clause)) POS 0 = peso POS 1 literales\r\n aux=formula13.new_var()\r\n formula13.add_clause([-aux],weight=clause[0])\r\n clause[1].append(aux)\r\n formula13.add_clause([formula13.new_var()],weight=clause[0])\r\n formula13.add_clause([formula13.new_var()],weight=clause[0])\r\n else:\r\n formula13.add_clause([formula13.new_var()],weight=clause[0])\r\n\r\n #formula13.add_clause([formula13.new_var()], clause[0])\r\n\r\n \"\"\" Hard to 3\"\"\"\r\n for clause in self.hard:\r\n #print(clause)\r\n #print(type(clause))\r\n aux\r\n i=0\r\n if len(clause)>3:\r\n partitions=(len(clause)/2)\r\n while i < partitions:\r\n\r\n if i!=0 or i!=int(partitions)-1:#First and last partition are different\r\n newclause=clause[:1] #Just 1 literal for intermedial partitions\r\n last_aux=-aux\r\n newclause.append(last_aux)\r\n aux1=formula13.new_var()\r\n newclause.append(aux1)\r\n aux=aux1\r\n formula13._add_clause(newclause,weight=TOP_WEIGHT)\r\n clause=clause[1:]\r\n i+=1\r\n\r\n else: #First and last partition would have 2 literales\r\n aux1=formula13.new_var()\r\n aux=aux1\r\n newclause=clause[1][:2]\r\n newclause.append(aux)\r\n formula13.add_clause(newclause,weight=TOP_WEIGHT)\r\n clause=clause[1][2:]\r\n i+=1\r\n\r\n else:\r\n formula13.add_clause(clause,weight=TOP_WEIGHT)\r\n print(formula13.is_13wpm())\r\n return formula13\r\n\r\n\r\n\r\n \"\"\" if len(literals) == 2:\r\n new = self.new_var()\r\n print(new)\r\n print(type(new))\r\n\r\n formula13.add_clause([-new], 1)\r\n\r\n new_clause = tuple(TOP_WEIGHT, literals+new)\r\n formula13.add_clause(new_clause)\r\n\r\n if len(clause[1]) > 3:\r\n pass\r\n\r\n else:\r\n formula13.add_clause([clause[0]], TOP_WEIGHT)\"\"\"", "def parse_mathml(s):\n import xml.dom.minidom\n x = xml.dom.minidom.parseString(s)\n return parse_mathml_rhs(dom_child(x))", "def math_formula(self) -> str:\n formula = self.random.choice(MATH_FORMULAS)\n return formula", "async def inchi_to_formula(self, inchi):\n props = await self.get_props_from_inchi(inchi)\n if props:\n for prop in props:\n if prop['urn']['label'] == 'Molecular Formula':\n return prop['value']['sval']", "def formulaConv(vect, formula):\n try:\n from sympy import lambdify, symbols\n except:\n print('Please install sympy to convert channel ')\n X = symbols('X')\n expr = lambdify(X, formula, modules='numpy', dummify=False)\n return expr(vect)", "def eval_formula(self, formula, a, b, c, d):\n if a == \"\": a = 0.0\n if b == \"\": b = 0.0\n if c == \"\": c = 0.0\n if d == \"\": d = 0.0\n try:\n a = float(a)\n except:\n raise ValueError, _(\"'a' value must be a float number\")\n try:\n b = float(b)\n except:\n raise ValueError, _(\"'b' value must be a float number\")\n try:\n c = float(c)\n except:\n raise ValueError, _(\"'c' value must be a float number\")\n try:\n d = float(d)\n except:\n raise ValueError, _(\"'d' value must be a float number\")\n # spaces are erased\n sre.sub(\"[ ]\",\"\",formula)\n # operators and varibles are replaced\n formula = formula.replace(\"+\", \" + \")\n formula = formula.replace(\"-\", \" - \")\n formula = formula.replace(\"*\", \" * \")\n formula = formula.replace(\"/\", \" / \")\n formula = formula.replace(\"^\", \" ** \")\n formula = formula.replace(\"(\", \" ( \")\n formula = formula.replace(\")\", \" ) \")\n formula = formula.replace(\"a\", str(a))\n formula = formula.replace(\"b\", str(b))\n formula = formula.replace(\"c\", str(c))\n formula = formula.replace(\"d\", str(d))\n formula = formula.replace(\"p\", \"3.1415926\")\n _list_formula = formula.split(\" \")\n _formula2 = \"\"\n for oper in _list_formula:\n try:\n _float_oper= str(float(oper))\n _formula2 = _formula2 + _float_oper\n except ValueError:\n _formula2 = _formula2 + oper\n _g = {\"__builtins__\":{}}\n try:\n return eval(_formula2, _g)\n except:\n raise ValueError, _(\"Invalid formula\")", "def set_formula(self, formula):\n self.formula = formula\n return self", "def tautology(formula):\n return onallvaluations(formula)", "def parse_formula(iter):\n if iter.is_structure():\n # this is a nested formula\n type = TypeFormula\n key = iter.peek().get_word()\n next(iter)\n if key[0] in reserved:\n raise ValueError('Error: Formula must not start with reserved '\n 'char!')\n children = parse_list_template(parse_formula, iter)\n else:\n # non nested formula\n key = iter.get_word()\n children = []\n if key[0] == '?':\n key = parse_variable(iter)\n type = TypeVariable\n else:\n type = TypeConstant\n return Formula(key, children, type)", "def encode_as_formula(rule: InferenceRule) -> Formula:\n formula = str(rule.conclusion)\n for assumption in reversed(rule.assumptions):\n formula = \"({assum}->{f})\".format(assum=assumption, f=formula)\n return Formula.parse(formula)\n # Task 6.4a", "def compile_math(math):\n if isinstance(math, str):\n math = (\n math\n .replace('&&', 'and')\n .replace('||', 'or')\n .replace('^', '**')\n )\n\n model = evalidate.base_eval_model.clone()\n model.nodes.extend(VALID_MATH_EXPRESSION_NODES)\n model.allowed_functions.extend(MATHEMATICAL_FUNCTIONS.keys())\n\n math_node = evalidate.Expr(math, model=model)\n compiled_math = compile(math_node.node, '<math>', 'eval')\n return compiled_math", "def eval_formula(formula, valuation):\n# recursively evaluates the formula according to the clause formula.name\n return eval_switch[formula[0]](formula.value, valuation)", "def _type_to_msat(self, tp):\n if tp.is_bool_type():\n return self.boolType\n elif tp.is_real_type():\n return self.realType\n elif tp.is_int_type():\n return self.intType\n elif tp.is_function_type():\n stps = [self._type_to_msat(x) for x in tp.param_types]\n rtp = self._type_to_msat(tp.return_type)\n msat_type = mathsat.msat_get_function_type(self.msat_env(),\n stps,\n rtp)\n if mathsat.MSAT_ERROR_TYPE(msat_type):\n msat_msg = mathsat.msat_last_error_message(self.msat_env())\n raise InternalSolverError(msat_msg)\n return msat_type\n elif tp.is_array_type():\n i = self._type_to_msat(tp.index_type)\n e = self._type_to_msat(tp.elem_type)\n msat_type = mathsat.msat_get_array_type(self.msat_env(), i, e)\n if mathsat.MSAT_ERROR_TYPE(msat_type):\n msat_msg = mathsat.msat_last_error_message(self.msat_env())\n raise InternalSolverError(msat_msg)\n return msat_type\n elif tp.is_bv_type():\n return mathsat.msat_get_bv_type(self.msat_env(), tp.width)\n elif tp.is_custom_type():\n return mathsat.msat_get_simple_type(self.msat_env(), str(tp))\n else:\n raise NotImplementedError(\"Usupported type for '%s'\" % tp)", "def transformation_to_pell(eq):\n\n\n var, coeff, diop_type = classify_diop(eq)\n if diop_type == \"quadratic\":\n return _transformation_to_pell(var, coeff)", "def gemmEquations(self, node, makeEquations): \n nodeName = node.output[0]\n \n # Get inputs\n inputName1, inputName2, inputName3 = node.input\n shape1 = self.shapeMap[inputName1]\n shape2 = self.shapeMap[inputName2]\n shape3 = self.shapeMap[inputName3]\n input1 = self.varMap[inputName1]\n input2 = self.constantMap[inputName2]\n input3 = self.constantMap[inputName3]\n \n self.shapeMap[nodeName] = self.shapeMap[inputName3]\n if makeEquations:\n \n # Pad shape if needed\n if len(shape1) == 1:\n shape1 = [1] + shape1\n input1 = input1.reshape(shape1)\n elif shape1[1] == 1:\n shape1 = shape1[::-1]\n input1 = input1.reshape(shape1)\n if len(shape3) == 1:\n shape3 = [1] + shape3\n input3 = input3.reshape(shape3)\n if shape1[0] != shape3[0]:\n shape3 = shape3[::-1]\n input3 = input3.reshape(shape3)\n\n # Assume that first input is variables, second is Matrix for MatMul, and third is bias addition\n assert shape1[-1] == shape2[0]\n assert shape1[0] == shape3[0]\n assert shape2[1] == shape3[1]\n\n # Create new variables\n self.shapeMap[nodeName] = self.shapeMap[node.input[2]]\n outputVariables = self.makeNewVariables(nodeName)\n outputVariables = outputVariables.reshape(shape3)\n # Generate equations\n for i in range(shape1[0]):\n for j in range(shape2[1]):\n e = MarabouUtils.Equation()\n for k in range(shape1[1]):\n e.addAddend(input2[k][j], input1[i][k])\n\n # Put output variable as the last addend last\n e.addAddend(-1, outputVariables[i][j])\n e.setScalar(-input3[i][j])\n self.addEquation(e)", "def convert_sympy_cell_to_py_cell(cell: str, var_dict: dict) -> str:\n acc = []\n lines = cell.split(\"\\n\")\n for line in lines:\n # try:\n if \"=\" in line:\n lhs, rhs = line.split(\"=\", 1)\n obj_str = rhs.strip()\n if test_for_sympy_eqn(obj_str, var_dict):\n sym_obj = get_sympy_obj(obj_str, var_dict)\n lhs = sym_obj.lhs\n rhs = sym_obj.rhs\n acc.append(str(lhs) + \"=\" + str(rhs))\n elif test_for_sympy_expr(obj_str, var_dict):\n sym_obj = get_sympy_obj(obj_str, var_dict)\n acc.append(lhs + \"=\" + str(sym_obj))\n else:\n acc.append(line)\n else:\n obj_str = line.strip()\n if test_for_sympy_eqn(obj_str, var_dict):\n sym_obj = get_sympy_obj(obj_str, var_dict)\n lhs = sym_obj.lhs\n rhs = sym_obj.rhs\n acc.append(str(lhs) + \"=\" + str(rhs))\n elif test_for_sympy_symbol(obj_str, var_dict):\n sym_obj = get_sympy_obj(obj_str, var_dict)\n acc.append(str(sym_obj))\n elif test_for_sympy_expr(obj_str, var_dict):\n raise ValueError(\n f\"The result of a sympy expr must be assigned to a new variable, e.g. x = {line}\"\n )\n else:\n acc.append(line)\n # except:\n # raise ValueError(f\"%%render sympy: Should only be used for a cell filled with sympy objects, not: {line}\")\n return \"\\n\".join(acc)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a pySMT type into a MathSAT type.
def _type_to_msat(self, tp): if tp.is_bool_type(): return self.boolType elif tp.is_real_type(): return self.realType elif tp.is_int_type(): return self.intType elif tp.is_function_type(): stps = [self._type_to_msat(x) for x in tp.param_types] rtp = self._type_to_msat(tp.return_type) msat_type = mathsat.msat_get_function_type(self.msat_env(), stps, rtp) if mathsat.MSAT_ERROR_TYPE(msat_type): msat_msg = mathsat.msat_last_error_message(self.msat_env()) raise InternalSolverError(msat_msg) return msat_type elif tp.is_array_type(): i = self._type_to_msat(tp.index_type) e = self._type_to_msat(tp.elem_type) msat_type = mathsat.msat_get_array_type(self.msat_env(), i, e) if mathsat.MSAT_ERROR_TYPE(msat_type): msat_msg = mathsat.msat_last_error_message(self.msat_env()) raise InternalSolverError(msat_msg) return msat_type elif tp.is_bv_type(): return mathsat.msat_get_bv_type(self.msat_env(), tp.width) elif tp.is_custom_type(): return mathsat.msat_get_simple_type(self.msat_env(), str(tp)) else: raise NotImplementedError("Usupported type for '%s'" % tp)
[ "def _msat_type_to_type(self, tp):\n if mathsat.msat_is_bool_type(self.msat_env(), tp):\n return types.BOOL\n elif mathsat.msat_is_rational_type(self.msat_env(), tp):\n return types.REAL\n elif mathsat.msat_is_integer_type(self.msat_env(), tp):\n return types.INT\n else:\n check_arr, idx_type, val_type = \\\n mathsat.msat_is_array_type(self.msat_env(), tp)\n if check_arr != 0:\n i = self._msat_type_to_type(idx_type)\n e = self._msat_type_to_type(val_type)\n return types.ArrayType(i, e)\n\n check_bv, bv_width = mathsat.msat_is_bv_type(self.msat_env(), tp)\n if check_bv != 0:\n return types.BVType(bv_width)\n\n # It must be a function type, currently unsupported\n raise NotImplementedError(\"Function types are unsupported\")", "def bv2pysmt(bv, boolean=False, parse_shifts_rotations=False, env=None):\n msg = \"unknown conversion of {} to a pySMT type\".format(type(bv).__name__)\n\n if not hasattr(bv2pysmt, \"_counter\"):\n bv2pysmt._counter = -1\n bv2pysmt._counter += 1\n\n ## debugging\n # print(f\"{' '*bv2pysmt._counter}bv2pysmt({bv}, boolean={boolean}, parse_shifts_rotations={parse_shifts_rotations})\")\n\n if env is None:\n env = environment.reset_env()\n fm = env.formula_manager\n\n # preprocessing bv\n\n if isinstance(bv, int):\n bv2pysmt._counter -= 1\n return bv\n if isinstance(bv, property.Property):\n bv = bv.val\n\n while True:\n # _get_base_op_expr/doit/BvIdentity might return a Variable or Constant\n if isinstance(bv, operation.PartialOperation):\n bv = bv._get_base_op_expr()\n elif isinstance(bv, operation.SecondaryOperation):\n bv = bv.doit(eval_sec_ops=True)\n elif isinstance(bv, operation.BvIdentity):\n bv = bv.args[0]\n else:\n break\n\n assert isinstance(bv, core.Term)\n\n pysmt_bv = None\n\n if isinstance(bv, core.Variable):\n if boolean:\n assert bv.width == 1\n pysmt_bv = fm.Symbol(bv.name, env.type_manager.BOOL())\n else:\n pysmt_bv = fm.Symbol(bv.name, env.type_manager.BVType(bv.width))\n\n elif isinstance(bv, core.Constant):\n if boolean:\n assert bv.width == 1\n pysmt_bv = fm.Bool(bool(bv))\n else:\n pysmt_bv = fm.BV(bv.val, bv.width)\n\n elif isinstance(bv, operation.Operation):\n if boolean:\n assert bv.width == 1\n\n if type(bv) in [operation.BvNot, operation.BvAnd, operation.BvOr, operation.BvXor]:\n # -- Operations that requires boolean arguments to output a boolean---\n args = [bv2pysmt(a, boolean, parse_shifts_rotations, env) for a in bv.args]\n\n if type(bv) == operation.BvNot:\n if boolean:\n pysmt_bv = fm.Not(*args)\n else:\n pysmt_bv = fm.BVNot(*args)\n elif type(bv) == operation.BvAnd:\n if boolean:\n pysmt_bv = fm.And(*args)\n else:\n pysmt_bv = fm.BVAnd(*args)\n elif type(bv) == operation.BvOr:\n if boolean:\n pysmt_bv = fm.Or(*args)\n else:\n pysmt_bv = fm.BVOr(*args)\n else:\n assert type(bv) == operation.BvXor\n if boolean:\n pysmt_bv = fm.Xor(*args)\n else:\n pysmt_bv = fm.BVXor(*args)\n\n elif type(bv) == operation.Ite:\n # fm.Ite can either output a boolean or a BV, but\n # fm.Ite always requires a Boolean type for args[0] and\n # bv2pysmt(bv.args[0], boolean=True, ...) might cause an error\n args = [None for _ in range(len(bv.args))]\n try:\n args[0] = bv2pysmt(bv.args[0], True, parse_shifts_rotations, env)\n except Exception as e:\n raise e\n # args[0] = bv2pysmt(bv.args[0], False, parse_shifts_rotations, env)\n # if args[0].get_type().is_bv_type():\n # args[0] = fm.Equals(args[0], fm.BV(1, 1))\n args[1:] = [bv2pysmt(a, boolean, parse_shifts_rotations, env) for a in bv.args[1:]]\n pysmt_bv = fm.Ite(*args)\n\n else:\n # -- Operations that don't require boolean arguments to output a boolean ---\n\n args = [bv2pysmt(a, False, parse_shifts_rotations, env) for a in bv.args]\n\n if isinstance(bv, operation.BvComp): # for PropConcat\n if boolean:\n pysmt_bv = fm.EqualsOrIff(*args)\n else:\n pysmt_bv = fm.BVComp(*args)\n\n elif type(bv) == operation.BvUlt:\n pysmt_bv = fm.BVULT(*args)\n elif type(bv) == operation.BvUle:\n pysmt_bv = fm.BVULE(*args)\n elif type(bv) == operation.BvUgt:\n pysmt_bv = fm.BVUGT(*args)\n elif type(bv) == operation.BvUge:\n pysmt_bv = fm.BVUGE(*args)\n\n else:\n # -- Operations that don't support boolean arguments or boolean outputs ---\n\n if type(bv) in [operation.BvShl, operation.BvLshr]:\n if not parse_shifts_rotations or _is_power_of_2(args[0].bv_width()):\n if type(bv) == operation.BvShl:\n pysmt_bv = fm.BVLShl(*args)\n elif type(bv) == operation.BvLshr:\n pysmt_bv = fm.BVLShr(*args)\n else:\n x, r = bv.args\n offset = 0\n while not _is_power_of_2(x.width):\n x = operation.zero_extend(x, 1)\n r = operation.zero_extend(r, 1)\n offset += 1\n shift = bv2pysmt(type(bv)(x, r), False, parse_shifts_rotations, env)\n pysmt_bv = fm.BVExtract(shift, end=shift.bv_width() - offset - 1)\n elif type(bv) == operation.RotateLeft:\n if not parse_shifts_rotations or _is_power_of_2(args[0].bv_width()):\n pysmt_bv = fm.BVRol(*args)\n else:\n x, r = bv.args\n n = x.width\n rol = operation.Concat(x[n - r - 1:], x[n - 1: n - r])\n pysmt_bv = bv2pysmt(rol, False, parse_shifts_rotations, env)\n elif type(bv) == operation.RotateRight:\n if not parse_shifts_rotations or _is_power_of_2(args[0].bv_width()):\n pysmt_bv = fm.BVRor(*args)\n else:\n x, r = bv.args\n n = x.width\n rot = operation.Concat(x[r - 1:], x[n - 1: r])\n pysmt_bv = bv2pysmt(rot, False, parse_shifts_rotations, env)\n\n elif isinstance(bv, operation.Extract): # for PropExtract\n # pySMT Extract(bv, start, end)\n pysmt_bv = fm.BVExtract(args[0], args[2], args[1])\n elif type(bv) == operation.Concat:\n pysmt_bv = fm.BVConcat(*args)\n\n elif type(bv) == operation.BvNeg:\n pysmt_bv = fm.BVNeg(*args)\n elif type(bv) == operation.BvAdd:\n pysmt_bv = fm.BVAdd(*args)\n elif type(bv) == operation.BvSub:\n pysmt_bv = fm.BVSub(*args)\n elif type(bv) == operation.BvMul:\n pysmt_bv = fm.BVMul(*args)\n elif type(bv) == operation.BvUdiv:\n pysmt_bv = fm.BVUDiv(*args)\n elif type(bv) == operation.BvUrem:\n pysmt_bv = fm.BVURem(*args)\n\n if pysmt_bv is not None:\n if boolean:\n pysmt_bv = fm.EqualsOrIff(pysmt_bv, fm.BV(1, 1))\n else:\n raise ValueError(f\"invalid primary operation {bv.vrepr()}\")\n\n if pysmt_bv is not None:\n try:\n pysmt_bv_width = pysmt_bv.bv_width()\n except (AssertionError, TypeError):\n pysmt_bv_width = 1 # boolean type\n\n assert bv.width == pysmt_bv_width\n bv2pysmt._counter -= 1\n return pysmt_bv\n else:\n raise NotImplementedError(msg)", "def castToType(stat):\n if typeFromString(stat) == 'int':\n return int(stat)\n elif typeFromString(stat) == 'float':\n return float(stat)\n else:\n return str(stat)", "def python_type_to_sql(self, t):\n types={\n \"bool\": \"INTEGER\",\n \"int\": \"INTEGER\",\n \"float\": \"DOUBLE\",\n \"str\": \"TEXT\",\n \"datetime.date\": \"TEXT\"\n }\n return types[t]", "def map_data_type(cls, schematics_type):\n if not inspect.isclass(schematics_type):\n schematics_type = schematics_type.__class__\n type_map = cls._schematics_to_sqlalchemy\n sqla_type = getattr(schematics_type, 'sqlalchemy_type',\n type_map.get(schematics_type, None))\n return sqla_type", "def setType(self, ttype):\n if ttype == LINEAR_IMPLICIT:\n self.type = ttype\n elif ttype == NONLINEAR:\n self.type = ttype\n else:\n raise DREAMException(\"Solver: Unrecognized solver type: {}.\".format(ttype))", "async def infer_type_scalar_cast(track, x, t):\n await track.will_check(Number, x)\n await track.check(TypeType, t)\n new_t = await t['value']\n if new_t is ANYTHING:\n raise MyiaTypeError(f'Type to cast to must be known at compile time.')\n elif not ismyiatype(new_t, Number):\n raise MyiaTypeError(f'Cannot cast to {new_t}')\n return new_t", "def trans_type(_value, _type):\n if _type == 'int':\n return int(_value)\n if _type == 'string':\n return str(_value)\n return _value", "def recast(val, datatype):\n ret_val = val\n if datatype == 'string':\n ret_val = str(val)\n elif datatype == 'boolean':\n # AWS returns 1s and 0s for boolean for most of the cases\n if val.isdigit():\n ret_val = bool(int(val))\n # AWS returns 'TRUE,FALSE' for Oracle engine\n elif val == 'TRUE':\n ret_val = True\n elif val == 'FALSE':\n ret_val = False\n elif datatype == 'integer':\n if val.isdigit():\n ret_val = int(val)\n elif datatype == 'float':\n ret_val = float(val) if val else 0.0\n\n return ret_val", "def VimExpressionToPythonType(vim_expression):\n\n result = vim.eval( vim_expression )\n if not ( isinstance( result, str ) or isinstance( result, bytes ) ):\n return result\n\n try:\n return int( result )\n except ValueError:\n return ToUnicode( result )", "def src_to_sympy(src):\n a_ast = src_to_ast(src, translation_unit=False)\n a = ast_to_asr(a_ast)\n py_src = call_visitor(a)\n return py_src", "def conversion_by_law(self, t, m):\n def f(x):\n if isinstance(x, t):\n return f(m.__call__(x))\n elif isinstance(x, Expression):\n return x.__class__(*(f(s) for s in x.scope))\n else:\n return x\n\n return Formula(f(self.expression))", "def convert(self, formula):\n # Rewrite to avoid UF with bool args\n rformula = self._ufrewriter.walk(formula)\n res = self.walk(rformula)\n if mathsat.MSAT_ERROR_TERM(res):\n msat_msg = mathsat.msat_last_error_message(self.msat_env())\n raise InternalSolverError(msat_msg)\n if rformula != formula:\n warn(\"MathSAT convert(): UF with bool arguments have been translated\")\n return res", "def cast(\n expression: _ColumnExpressionOrLiteralArgument[Any],\n type_: _TypeEngineArgument[_T],\n) -> Cast[_T]:\n return Cast(expression, type_)", "async def infer_type_arith_unary(engine, x):\n t = await x['type']\n if not isinstance(t, (Int, Float)):\n raise MyiaTypeError(f'Expected number, not {t}')\n return t", "def return_type(self) -> ast.Type:", "def _translate_type(type_name):\n if not isinstance(type_name, str):\n raise Exception('Type name must be a string')\n type_name = _sanitize_identifier(type_name)\n\n return _ASN1_BUILTIN_TYPES.get(type_name, type_name)", "def str2type(type, string):\n if type in {'B', 'b', 'H', 'h', 'l', 'L'}:\n return int(string, 0)\n elif type in {'f', 'd'}:\n return float(string)\n raise ValueError('Type is not in accepted types. ')", "def to_jax_type(self):\n # Currently, this function is used to decide the return type for\n # 'QuantOps.to_quantized.' The AQT implementation works by having a\n # conversion to an int dtype and then back to a fp dtype happen *within*\n # to_quantized, so that Jax backprop works correctly. Thus\n # counter-intuitively, we need this to return a fp dtype for 'aqt' since the\n # return type for 'to_quantized' overall is fp. TODO(malmaud): As part of\n # the refactor of this module, clean this up to eliminate the\n # counter-intuitive behavior.\n if self.value in ['aqt', 'fake_quant']: # pylint: disable=comparison-with-callable\n return SCALE_DTYPE\n elif self.value == 'fake_quant_with_int': # pylint: disable=comparison-with-callable\n return jnp.int8\n else:\n raise RuntimeError(f'QuantType {self.value} is unknown.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a MathSAT type into a PySMT type.
def _msat_type_to_type(self, tp): if mathsat.msat_is_bool_type(self.msat_env(), tp): return types.BOOL elif mathsat.msat_is_rational_type(self.msat_env(), tp): return types.REAL elif mathsat.msat_is_integer_type(self.msat_env(), tp): return types.INT else: check_arr, idx_type, val_type = \ mathsat.msat_is_array_type(self.msat_env(), tp) if check_arr != 0: i = self._msat_type_to_type(idx_type) e = self._msat_type_to_type(val_type) return types.ArrayType(i, e) check_bv, bv_width = mathsat.msat_is_bv_type(self.msat_env(), tp) if check_bv != 0: return types.BVType(bv_width) # It must be a function type, currently unsupported raise NotImplementedError("Function types are unsupported")
[ "def _type_to_msat(self, tp):\n if tp.is_bool_type():\n return self.boolType\n elif tp.is_real_type():\n return self.realType\n elif tp.is_int_type():\n return self.intType\n elif tp.is_function_type():\n stps = [self._type_to_msat(x) for x in tp.param_types]\n rtp = self._type_to_msat(tp.return_type)\n msat_type = mathsat.msat_get_function_type(self.msat_env(),\n stps,\n rtp)\n if mathsat.MSAT_ERROR_TYPE(msat_type):\n msat_msg = mathsat.msat_last_error_message(self.msat_env())\n raise InternalSolverError(msat_msg)\n return msat_type\n elif tp.is_array_type():\n i = self._type_to_msat(tp.index_type)\n e = self._type_to_msat(tp.elem_type)\n msat_type = mathsat.msat_get_array_type(self.msat_env(), i, e)\n if mathsat.MSAT_ERROR_TYPE(msat_type):\n msat_msg = mathsat.msat_last_error_message(self.msat_env())\n raise InternalSolverError(msat_msg)\n return msat_type\n elif tp.is_bv_type():\n return mathsat.msat_get_bv_type(self.msat_env(), tp.width)\n elif tp.is_custom_type():\n return mathsat.msat_get_simple_type(self.msat_env(), str(tp))\n else:\n raise NotImplementedError(\"Usupported type for '%s'\" % tp)", "def bv2pysmt(bv, boolean=False, parse_shifts_rotations=False, env=None):\n msg = \"unknown conversion of {} to a pySMT type\".format(type(bv).__name__)\n\n if not hasattr(bv2pysmt, \"_counter\"):\n bv2pysmt._counter = -1\n bv2pysmt._counter += 1\n\n ## debugging\n # print(f\"{' '*bv2pysmt._counter}bv2pysmt({bv}, boolean={boolean}, parse_shifts_rotations={parse_shifts_rotations})\")\n\n if env is None:\n env = environment.reset_env()\n fm = env.formula_manager\n\n # preprocessing bv\n\n if isinstance(bv, int):\n bv2pysmt._counter -= 1\n return bv\n if isinstance(bv, property.Property):\n bv = bv.val\n\n while True:\n # _get_base_op_expr/doit/BvIdentity might return a Variable or Constant\n if isinstance(bv, operation.PartialOperation):\n bv = bv._get_base_op_expr()\n elif isinstance(bv, operation.SecondaryOperation):\n bv = bv.doit(eval_sec_ops=True)\n elif isinstance(bv, operation.BvIdentity):\n bv = bv.args[0]\n else:\n break\n\n assert isinstance(bv, core.Term)\n\n pysmt_bv = None\n\n if isinstance(bv, core.Variable):\n if boolean:\n assert bv.width == 1\n pysmt_bv = fm.Symbol(bv.name, env.type_manager.BOOL())\n else:\n pysmt_bv = fm.Symbol(bv.name, env.type_manager.BVType(bv.width))\n\n elif isinstance(bv, core.Constant):\n if boolean:\n assert bv.width == 1\n pysmt_bv = fm.Bool(bool(bv))\n else:\n pysmt_bv = fm.BV(bv.val, bv.width)\n\n elif isinstance(bv, operation.Operation):\n if boolean:\n assert bv.width == 1\n\n if type(bv) in [operation.BvNot, operation.BvAnd, operation.BvOr, operation.BvXor]:\n # -- Operations that requires boolean arguments to output a boolean---\n args = [bv2pysmt(a, boolean, parse_shifts_rotations, env) for a in bv.args]\n\n if type(bv) == operation.BvNot:\n if boolean:\n pysmt_bv = fm.Not(*args)\n else:\n pysmt_bv = fm.BVNot(*args)\n elif type(bv) == operation.BvAnd:\n if boolean:\n pysmt_bv = fm.And(*args)\n else:\n pysmt_bv = fm.BVAnd(*args)\n elif type(bv) == operation.BvOr:\n if boolean:\n pysmt_bv = fm.Or(*args)\n else:\n pysmt_bv = fm.BVOr(*args)\n else:\n assert type(bv) == operation.BvXor\n if boolean:\n pysmt_bv = fm.Xor(*args)\n else:\n pysmt_bv = fm.BVXor(*args)\n\n elif type(bv) == operation.Ite:\n # fm.Ite can either output a boolean or a BV, but\n # fm.Ite always requires a Boolean type for args[0] and\n # bv2pysmt(bv.args[0], boolean=True, ...) might cause an error\n args = [None for _ in range(len(bv.args))]\n try:\n args[0] = bv2pysmt(bv.args[0], True, parse_shifts_rotations, env)\n except Exception as e:\n raise e\n # args[0] = bv2pysmt(bv.args[0], False, parse_shifts_rotations, env)\n # if args[0].get_type().is_bv_type():\n # args[0] = fm.Equals(args[0], fm.BV(1, 1))\n args[1:] = [bv2pysmt(a, boolean, parse_shifts_rotations, env) for a in bv.args[1:]]\n pysmt_bv = fm.Ite(*args)\n\n else:\n # -- Operations that don't require boolean arguments to output a boolean ---\n\n args = [bv2pysmt(a, False, parse_shifts_rotations, env) for a in bv.args]\n\n if isinstance(bv, operation.BvComp): # for PropConcat\n if boolean:\n pysmt_bv = fm.EqualsOrIff(*args)\n else:\n pysmt_bv = fm.BVComp(*args)\n\n elif type(bv) == operation.BvUlt:\n pysmt_bv = fm.BVULT(*args)\n elif type(bv) == operation.BvUle:\n pysmt_bv = fm.BVULE(*args)\n elif type(bv) == operation.BvUgt:\n pysmt_bv = fm.BVUGT(*args)\n elif type(bv) == operation.BvUge:\n pysmt_bv = fm.BVUGE(*args)\n\n else:\n # -- Operations that don't support boolean arguments or boolean outputs ---\n\n if type(bv) in [operation.BvShl, operation.BvLshr]:\n if not parse_shifts_rotations or _is_power_of_2(args[0].bv_width()):\n if type(bv) == operation.BvShl:\n pysmt_bv = fm.BVLShl(*args)\n elif type(bv) == operation.BvLshr:\n pysmt_bv = fm.BVLShr(*args)\n else:\n x, r = bv.args\n offset = 0\n while not _is_power_of_2(x.width):\n x = operation.zero_extend(x, 1)\n r = operation.zero_extend(r, 1)\n offset += 1\n shift = bv2pysmt(type(bv)(x, r), False, parse_shifts_rotations, env)\n pysmt_bv = fm.BVExtract(shift, end=shift.bv_width() - offset - 1)\n elif type(bv) == operation.RotateLeft:\n if not parse_shifts_rotations or _is_power_of_2(args[0].bv_width()):\n pysmt_bv = fm.BVRol(*args)\n else:\n x, r = bv.args\n n = x.width\n rol = operation.Concat(x[n - r - 1:], x[n - 1: n - r])\n pysmt_bv = bv2pysmt(rol, False, parse_shifts_rotations, env)\n elif type(bv) == operation.RotateRight:\n if not parse_shifts_rotations or _is_power_of_2(args[0].bv_width()):\n pysmt_bv = fm.BVRor(*args)\n else:\n x, r = bv.args\n n = x.width\n rot = operation.Concat(x[r - 1:], x[n - 1: r])\n pysmt_bv = bv2pysmt(rot, False, parse_shifts_rotations, env)\n\n elif isinstance(bv, operation.Extract): # for PropExtract\n # pySMT Extract(bv, start, end)\n pysmt_bv = fm.BVExtract(args[0], args[2], args[1])\n elif type(bv) == operation.Concat:\n pysmt_bv = fm.BVConcat(*args)\n\n elif type(bv) == operation.BvNeg:\n pysmt_bv = fm.BVNeg(*args)\n elif type(bv) == operation.BvAdd:\n pysmt_bv = fm.BVAdd(*args)\n elif type(bv) == operation.BvSub:\n pysmt_bv = fm.BVSub(*args)\n elif type(bv) == operation.BvMul:\n pysmt_bv = fm.BVMul(*args)\n elif type(bv) == operation.BvUdiv:\n pysmt_bv = fm.BVUDiv(*args)\n elif type(bv) == operation.BvUrem:\n pysmt_bv = fm.BVURem(*args)\n\n if pysmt_bv is not None:\n if boolean:\n pysmt_bv = fm.EqualsOrIff(pysmt_bv, fm.BV(1, 1))\n else:\n raise ValueError(f\"invalid primary operation {bv.vrepr()}\")\n\n if pysmt_bv is not None:\n try:\n pysmt_bv_width = pysmt_bv.bv_width()\n except (AssertionError, TypeError):\n pysmt_bv_width = 1 # boolean type\n\n assert bv.width == pysmt_bv_width\n bv2pysmt._counter -= 1\n return pysmt_bv\n else:\n raise NotImplementedError(msg)", "def map_data_type(cls, schematics_type):\n if not inspect.isclass(schematics_type):\n schematics_type = schematics_type.__class__\n type_map = cls._schematics_to_sqlalchemy\n sqla_type = getattr(schematics_type, 'sqlalchemy_type',\n type_map.get(schematics_type, None))\n return sqla_type", "def python_type_to_sql(self, t):\n types={\n \"bool\": \"INTEGER\",\n \"int\": \"INTEGER\",\n \"float\": \"DOUBLE\",\n \"str\": \"TEXT\",\n \"datetime.date\": \"TEXT\"\n }\n return types[t]", "def setType(self, ttype):\n if ttype == LINEAR_IMPLICIT:\n self.type = ttype\n elif ttype == NONLINEAR:\n self.type = ttype\n else:\n raise DREAMException(\"Solver: Unrecognized solver type: {}.\".format(ttype))", "def get_equivalent_popo_model_type(sqlalchemy_type: type) -> type:\n if sqlalchemy_type not in _SQLALCHEMY_TO_POPO_CONVERSIONS:\n raise ValueError(\"No conversion of SQLAlchemy model of type `%s` known\" % sqlalchemy_type)\n\n return _SQLALCHEMY_TO_POPO_CONVERSIONS[sqlalchemy_type]", "def trans_type(_value, _type):\n if _type == 'int':\n return int(_value)\n if _type == 'string':\n return str(_value)\n return _value", "def _translate_type(type_name):\n if not isinstance(type_name, str):\n raise Exception('Type name must be a string')\n type_name = _sanitize_identifier(type_name)\n\n return _ASN1_BUILTIN_TYPES.get(type_name, type_name)", "def castToType(stat):\n if typeFromString(stat) == 'int':\n return int(stat)\n elif typeFromString(stat) == 'float':\n return float(stat)\n else:\n return str(stat)", "def VimExpressionToPythonType(vim_expression):\n\n result = vim.eval( vim_expression )\n if not ( isinstance( result, str ) or isinstance( result, bytes ) ):\n return result\n\n try:\n return int( result )\n except ValueError:\n return ToUnicode( result )", "def type2pyFormat(cdt_type):\n return formatDic.get(cdt_type, '\"%s\"')", "def get(*py_types):\n if len(py_types) == 0:\n return TypeVar.get()\n\n if len(py_types) == 1:\n py_type = py_types[0]\n if isinstance(py_type, Type):\n return py_type\n if isinstance(py_type, list):\n return ListType.get(Type.get(*py_type))\n if isinstance(py_type, tuple):\n return ProductType.get(*py_type)\n\n if py_type == ():\n return UnitType.get()\n\n return PyType.get(py_type)\n\n return ProductType.get(*py_types)", "def return_type(self) -> ast.Type:", "def symtype(expr):\n stypes = [s.dtype for s in symlist(expr).values()]\n if len(stypes) == 0:\n return DEFAULT_SYMBOL_TYPE\n elif _checkEqualIvo(stypes):\n return stypes[0]\n else:\n raise TypeError('Cannot infer symbolic type from expression \"%s\"'\n ' with symbols [%s]' %\n (str(expr), ', '.join([str(s) + \": \" + str(s.dtype) for s in symlist(expr)])))", "def convert_spt_code_to_string_to_code(spectral_codes, decimals=1):\n if isinstance(spectral_codes, float):\n spectral_codes = [spectral_codes]\n\n spectral_types = []\n for spt in spectral_codes:\n spt_type = ''\n\n # Identify major type\n if 60 <= spt < 70:\n spt_type = 'M'\n elif 70 <= spt < 80:\n spt_type = 'L'\n elif 80 <= spt < 90:\n spt_type = 'T'\n elif 90 <= spt < 100:\n spt_type = 'Y'\n\n # Numeric part of type\n format = f'.{decimals}f'\n spt_type = f'{spt_type}{spt % 10:{format}}'\n logger.debug(f\"Converting: {spt} -> {spt_type}\")\n\n spectral_types.append(spt_type)\n\n return spectral_types", "def typeof_pyval(self, val):\n if isinstance(val, utils.INT_TYPES):\n # Ensure no autoscaling of integer type, to match the\n # typecode() function in _dispatcher.c.\n return types.int64\n\n tp = self.typingctx.resolve_data_type(val)\n if tp is None:\n tp = types.pyobject\n return tp", "def convert_to_type(type: str, val: str) -> Union[str, int, float, bytes, bool, Any]:\n if type is None or type in (\"str\", \"string\"):\n return val\n elif type in (\"int\", \"integer\"):\n return int(val)\n elif type in (\"float\", \"number\"):\n return float(val)\n elif type == \"bytes\":\n return val.encode(\"utf-8\")\n elif type == \"bool\":\n if isinstance(val, bool):\n return val\n return False if val.lower() in (\"false\", \"0\", \"no\") else True\n elif type == \"json\":\n if val in (\"\", None):\n return val\n if isinstance(val, str):\n return json.loads(val)\n return val\n else:\n raise ValueError(\n \"variable type can only be: bool, str, int, float, bytes or json\"\n )", "def to_type(value_type, value):\n # Convert to the specified type\n result = TypeConverter.to_nullable_type(value_type, value)\n if result != None:\n return result\n\n # Define and return default value based on type\n result_type = TypeConverter.to_type_code(value_type)\n if result_type == TypeCode.String:\n return None\n elif result_type == TypeCode.Integer:\n return 0\n elif result_type == TypeCode.Long:\n return 0\n elif result_type == TypeCode.Float:\n return 0.0\n else:\n return None", "def save_symm_pkl(cryst_ptgrp, op_type):\n\n tol = 1e-10\n if op_type == 'matrices':\n symm_ops = generate_symm_mats(cryst_ptgrp, tol)\n fstr = 'mats'\n elif op_type == 'quats':\n symm_ops = np.array(generate_symm_quats(cryst_ptgrp, tol))\n fstr = 'quats'\n\n pkl_file = 'symm_' + fstr + '_' + cryst_ptgrp + '.pkl'\n jar = open(pkl_file, 'wb')\n pickle.dump(symm_ops, jar)\n jar.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the Quantifier Eliminator using 'fm' or 'lw'.
def __init__(self, environment, logic=None, algorithm='lw'): if algorithm not in ['fm', 'lw']: raise PysmtValueError("Algorithm can be either 'fm' or 'lw'") if logic is not None and (not logic <= LRA and algorithm != "lw"): raise PysmtValueError("MathSAT quantifier elimination for LIA"\ " only works with 'lw' algorithm") QuantifierEliminator.__init__(self) IdentityDagWalker.__init__(self, env=environment) self.msat_config = mathsat.msat_create_default_config("QF_LRA") self.msat_env = MSatEnv(self.msat_config) mathsat.msat_destroy_config(self.msat_config) self.set_function(self.walk_identity, op.SYMBOL, op.REAL_CONSTANT, op.BOOL_CONSTANT, op.INT_CONSTANT) self.logic = logic self.algorithm = algorithm self.converter = MSatConverter(environment, self.msat_env)
[ "def initialize(fmp_file, lo_freq, lo_multiply=8, fm_rate=5.0):\n dt = 1.0 / float(fm_rate)\n listfreq = fmlolc.listfreq(fmp_file, lo_freq, lo_multiply)\n\n finalize()\n\n with fmlolc.SCPI(**fmlolc.INFO_XFFTS) as xffts:\n xffts('XFFTS:CMDUSEDSECTIONS 1 1 1 1')\n xffts('XFFTS:CMDSYNCTIME {0:.3E}'.format(1e6*dt))\n xffts('XFFTS:CMDBLANKTIME 5000')\n xffts('XFFTS:CONFIG')\n\n with fmlolc.SCPI(**fmlolc.INFO_SG) as sg:\n sg('FREQ:MODE LIST')\n sg('LIST:TYPE LIST')\n sg('LIST:TRIG:SOUR EXT')\n sg('LIST:DWEL {0:.3E}'.format(dt))\n sg('LIST:FREQ {0}'.format(listfreq))\n sg('TRIG:SLOP POS')", "def set_quantizers(self):\n for quantizer in self.quantizers:\n if hasattr(quantizer, \"use_ste\"):\n quantizer.use_ste = self.use_ste\n if hasattr(quantizer, \"use_variables\"):\n quantizer.use_variables = True\n if hasattr(quantizer, \"built\"):\n # If the quantizer has been built but not using tf.Variable then it\n # builds again to create tf.Variables.\n if quantizer.built and not isinstance(quantizer.qnoise_factor,\n tf.Variable):\n quantizer.build(use_variables=True)\n\n # Set the qnoise_factor to 0.0 to pretrain without quantization.\n self.set_qnoise_factor(quantizer, qnoise_factor=0.0)", "def init_lnq(cls, lnq):\n return cls(lnq, np.zeros_like(lnq), np.ones_like(lnq))", "def __init__(self, sample, quantifier, blank, standard, standard_blank=None):\n self.sample = sample\n self.quantifier = quantifier\n self.blank = blank\n self.standard = standard\n\n if not standard_blank:\n self.standard_blank = blank\n else:\n self.standard_blank = standard_blank", "def __init__(self, *args, **kwargs):\n\n # flag used for signaling the end of tuning\n self.tuning_end_trigger = False\n\n model = pm.modelcontext(kwargs.get(\"model\", None))\n initial_values = model.initial_point\n\n # flag to that variance reduction is activated - forces DEMetropolisZMLDA\n # to store quantities of interest in a register if True\n self.mlda_variance_reduction = kwargs.pop(\"mlda_variance_reduction\", False)\n if self.mlda_variance_reduction:\n # Subsampling rate of MLDA sampler one level up\n self.mlda_subsampling_rate_above = kwargs.pop(\"mlda_subsampling_rate_above\")\n self.sub_counter = 0\n self.Q_last = np.nan\n self.Q_reg = [np.nan] * self.mlda_subsampling_rate_above\n\n # call parent class __init__\n super().__init__(*args, **kwargs)\n\n # modify the delta function and point to model if VR is used\n if self.mlda_variance_reduction:\n self.model = model\n self.delta_logp_factory = self.delta_logp\n self.delta_logp = lambda q, q0: -self.delta_logp_factory(q0, q)", "def __init__(self, *args, **kwargs):\n model = pm.modelcontext(kwargs.get(\"model\", None))\n initial_values = model.initial_point\n\n # flag to that variance reduction is activated - forces MetropolisMLDA\n # to store quantities of interest in a register if True\n self.mlda_variance_reduction = kwargs.pop(\"mlda_variance_reduction\", False)\n if self.mlda_variance_reduction:\n # Subsampling rate of MLDA sampler one level up\n self.mlda_subsampling_rate_above = kwargs.pop(\"mlda_subsampling_rate_above\")\n self.sub_counter = 0\n self.Q_last = np.nan\n self.Q_reg = [np.nan] * self.mlda_subsampling_rate_above\n\n # call parent class __init__\n super().__init__(*args, **kwargs)\n\n # modify the delta function and point to model if VR is used\n if self.mlda_variance_reduction:\n self.model = model\n self.delta_logp_factory = self.delta_logp\n self.delta_logp = lambda q, q0: -self.delta_logp_factory(q0, q)", "def __init__(self, kWMultiplier=0, kWhMultiplier=0, ctRatioMultiplier=None, billingMultiplier=None, vtRatioMultiplier=None, demandMultiplier=None, *args, **kw_args):\n #: Meter kW (pulse) multiplier, used as a multiplier for a meter register reading to determine the actual amount of usage for which to bill a customer.\n self.kWMultiplier = kWMultiplier\n\n #: Meter kWh multiplier, used as a multiplier for a meter register reading to determine the actual amount of usage for which to bill a customer.\n self.kWhMultiplier = kWhMultiplier\n\n self.ctRatioMultiplier = ctRatioMultiplier\n\n self.billingMultiplier = billingMultiplier\n\n self.vtRatioMultiplier = vtRatioMultiplier\n\n self.demandMultiplier = demandMultiplier\n\n super(ElectricMeteringFunction, self).__init__(*args, **kw_args)", "def __init__(self, split: int, sigmas: Dict[str, float] = dict(), **kwargs) -> None:\n super().__init__(premade_coupler(split)[0], sigmas, **kwargs)", "def _init_required_calculators(self):\n self._required_calculators = None\n pass", "def __init__(self, n,m,fn,fm,f_expx,f_expy,exc_n=[],exc_m=[]):\n self.exponents=[get_list(n),get_list(m)]\n self.excluded=[get_list(exc_n),get_list(exc_m)]\n self.get_limits()\n self.get_exponents()\n self.get_dimensions()\n self.func_coeff=[fn,fm]\n self.func_exp=[f_expx,f_expy]", "def __init__(self,duration,name):\n Interval.__init__(self,duration,name)\n self.type=\"Tone\"\n #self.freq = 0 #raj\n self.freqType = \"Donno\" #raj\n self.summa = 0\n \"\"\" Tone frequency in Hz. \n Based on the current setup and scaling. Frequency range = [1000,11160] Hz\"\"\"", "def set_qnoise_factor(self, quantizer, qnoise_factor):\n\n # Updating the qnoise_factor of the quantizer.\n quantizer.update_qnoise_factor(qnoise_factor)\n # Updating the qnoise_factor of the callback.\n self.qnoise_factor = qnoise_factor", "def __init__(self, micro, freq, serial):\n self.micro = micro\n self.freq = freq\n self.serial = serial\n self.cache = {}\n self.clear_stats()", "def __init__(self, min_cut = 0.1, max_cut = 0.9):\n self._min_cut = min_cut\n self._max_cut = max_cut\n self._stopwords = set(stopwords.words('english') + list(punctuation))", "def setFlux(self): \n self.f[0] = self.q[1]\n \n self.f[1] = (self.gamma-1.0)*self.q[2] +\\\n 0.5*(3.0-self.gamma)*(self.q[1]**2)/self.q[0]\n\n \n self.f[2] = self.gamma*self.q[2]*self.q[1]/self.q[0] +\\\n 0.5*(1.0-self.gamma)*((self.q[1])**3)/(self.q[0])**2", "def infer_quant(token):\n if re.fullmatch(QUANT_PATTERN, token.shape_):\n if \".\" in token.text:\n return float(token.text)\n return int(token.text)", "def setQueryDelimiters(*args, **kwargs):\n \n pass", "def init(self, part: Part):\n FMConstrMgr.init(self, part)\n self.illegal = [d < self.lowerbound for d in self.diff]", "def initialize_filter(self, f_size, scale=1.0):\n stddev = scale / np.sqrt(np.prod(f_size))\n return np.random.normal(loc=0, scale=stddev, size=f_size)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets product name and price from the user
def input_product(): product_name = input("Enter the name:") product_price = input("Enter the price:") return product_name, product_price
[ "def input_name_and_price():\n obj_product = Product(product_name='', product_price=0)\n try:\n obj_product.product_name = str(input('Product name: '))\n obj_product.product_price = str(input('Product price: '))\n except Exception as e:\n print(e)\n return obj_product", "def input_product_details():\r\n new_product_object = Product(\"\", None) # Initialize new object\r\n new_product_object.product_name = input(\"What is the Product's Name?: \")\r\n new_product_object.product_price = input(\"What is the Product's Price?: \")\r\n return new_product_object", "def product_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"product_name\")", "def get_product_name_and_size(self, driver):\n # size and units\n frame = driver.find_element_by_class_name(\"priceTable\")\n details = frame.find_elements_by_xpath('.//tr')\n for i in details:\n if \"Size:\" in i.text:\n size, units = i.text.replace(\"Size: \", \"\").split()\n # name\n description_frame = driver.find_element_by_id(\"prodDescription\")\n name = description_frame.find_element_by_class_name(\"prodName\").text\n return name, size, units", "def get_products():", "def query_price(request):\n pricing_id = request.POST.get('pricing_id', 0)\n list_type = request.POST.get('list_type', '')\n pricing = get_object_or_404(DirectoryPricing, pk=pricing_id)\n price = pricing.get_price_for_user(request.user, list_type=list_type)\n return HttpResponse(simplejson.dumps({'price': price}))", "def getPrice(self, productBox):\n priceSelector = productBox.css(\"p.product-price\")\n salePrice = ''\n price = ''\n if priceSelector.css(\"del\") != []:\n price = priceSelector.css(\"del::text\").get()\n salePrice = priceSelector.css(\"span.highlight::text\").get()\n salePrice = int(''.join(filter(str.isdigit, salePrice)))\n else:\n price = priceSelector.css(\"::text\").get().strip()\n price = int(''.join(filter(str.isdigit, price))) #remove none number characters \n return price, salePrice", "def get_product_name():\n return \"SmartAlpha\"", "def search_product():\n\n total_rows = Product.select().count()\n try:\n product_id = int(input(f'Input product id (hint: between 1 and {total_rows}:) '))\n product = Product.get_by_id(product_id)\n\n print(\"\"\"\n Your search result is:\n \"\"\")\n print(f'NAME --------------|{product.product_name}')\n print(f'PRICE -------------|${(product.product_price / 100)}')\n print(f'QTY ---------------|{product.product_quantity}')\n print(f'DATE UPDATED ------|{product.date_updated}')\n except ValueError:\n print(f'Please enter a number value from 1 to {total_rows}')\n except Product.DoesNotExist:\n print(f\"\"\"The product does not exist.\n{product_id} is not within 1 to {total_rows}\"\"\")", "def get_product(product_name):\n product = get_product_info(product_name)\n if not product:\n # TODO: Move flaskparser somewhere else\n flaskparser.abort(404)\n else:\n return ProductSchema().jsonify(product)", "def get_product(self, name):\n return self._products[name]", "def get_product(self, data):\n payload = {}\n payload.update(self.generic_service)\n payload.update(self.product_service)\n\n r = requests.get(\"http://catalog.bizrate.com/services/catalog/v1/us/{0}\".format(\"product\"), params=payload)\n print(\"URL: \")\n print(r.url)\n\n print(\"RESPONSE: \")\n print(r.json())\n\n return", "def fetch_product(identifier):", "def customProductValues():\n # Gets Custom Product Value from Entry\n customproduct = customProductEntry.get()\n # Gets Custom Price Value from Entry\n customprice = customPriceEntry.get()\n\n # Checks if length is less or equal to 0\n if len(customproduct) <= 0 and len(customprice) <= 0:\n # Displays an error that the cart can't be empty.\n messagebox.showerror(\n message=\"Product name and/or price can not be empty.\")\n # Checks if one is more than 0 and the other is less or equal to 0\n elif len(customproduct) > 0 and len(customprice) <= 0:\n # Displays an error to the user.\n messagebox.showerror(\n message=\"Price can not be empty.\")\n # Checks if one is less or equal to 0 and the other is more than 0\n elif len(customproduct) <= 0 and len(customprice) > 0:\n # Displays an error to the user.\n messagebox.showerror(\n message=\"Product Name can not be empty.\")\n # If both values values is > 0, this will run.\n else:\n # Tries to change the data type of the Entry's\n try:\n customproduct = str(customproduct)\n customprice = float(customprice)\n\n # Calls the addProduct Function with the product and price\n addProduct(customproduct, customprice)\n # Clears both of Entrys from 0 to end.\n customPriceEntry.delete(0, 'end')\n customProductEntry.delete(0, 'end')\n # If it isn't able to change the data types, it will display an error message.\n except:\n messagebox.showerror(message=\"Price has to be a number.\")", "def get_product(self, product_name):\n if product_name in self.products:\n return self.products[product_name]\n return None", "def get_product_name(self):\n\n games_name = [] # To hold game names.\n phones_name = [] # To hold phone names.\n products = [] # To hold all category product names.\n\n conn = self.create_connection()\n cursor = conn.cursor()\n cursor.execute('SELECT name_of_game FROM ps4_tbl')\n games = cursor.fetchall()\n for each_game in games:\n games_name.append(each_game[0].lower())\n\n cursor.execute('SELECT name_of_phone FROM phone_tbl')\n phones = cursor.fetchall()\n for each_phone in phones:\n phones_name.append(each_phone[0].lower())\n\n products.extend(games_name)\n products.extend(phones_name)\n\n cursor.close()\n conn.close()\n\n return products", "def inputPurchase():\n purchase = input(\"Please input purchase variables: \")\n return purchase", "def get_price(self):\n return str(self.gui.spn_price.textFromValue(self.gui.spn_price.value()))", "def price_on_amazon(self):\n #Getting html page data from amazon url\n res = requests.get(self.url_data[1], headers=self.headers)\n res.raise_for_status()\n soup = BeautifulSoup(res.text, 'lxml')\n #Filtering the data\n price = soup.find(\"span\", {\"id\": \"priceblock_dealprice\"})\n if price == None:\n price = soup.find(\"span\", {\"id\": \"priceblock_ourprice\"})\n if price == None:\n return ['Not Found', 0]\n product_name = soup.find(\"span\", \n {\"id\": \"productTitle\"}).text.replace('\\n', '')\n #Purifying filtered data and converting into desired format\n price = price.text\n price = price.split('.')[0]\n price = price.replace(',', '')\n current_price = ''\n if not price.isnumeric():\n price = price[1:]\n for txt in list(price):\n if txt in [str(i) for i in range(10)]:\n current_price += txt\n \n data = [f\"{product_name[:35]}...\", int(current_price)]\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Path to repository on the ci (locally).
def repo_dir(self):
[ "def get_repositories_path(self):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"../../packages\"))", "def repo_path(conf, repo):\n\n return os.path.join(conf[\"mirrors\"], repo_name(repo))", "def get_git_repo_path():\n return os.path.join(DEEPSPEECH_CLONE_PATH, '.git')", "def relpath(repo_path):\n repo_path = '../../' + repo_path\n repo_path = repo_path.replace('../../infra/', '../')\n repo_path = repo_path.replace('../bots/', '')\n return repo_path", "def repo_root() -> str:\n thisdir = os.path.dirname(os.path.abspath(__file__))\n root = os.path.join(thisdir, \"..\")\n if not os.path.isdir(root):\n raise FileNotFoundError(\"The Artie directory seems to have been altered in a way that I can't understand.\")\n\n return os.path.abspath(root)", "def full_repository(self):\n base = self.base_repository\n if base:\n if not base.endswith('/'):\n base += '/'\n return urlparse.urljoin(base, self.repository)\n else:\n return self.repository", "def default_build_location():\n return os.path.join(repo_root(), \"build-artifacts\")", "def _get_local_repo_base_path(self):\n return os.path.join(os.path.expanduser('~'), \".localcache\")", "def get_git_root():\n\n rpath = git.Repo('.', search_parent_directories=True).working_tree_dir\n rpath = rpath + '/'\n return rpath", "def get_repo_url():\n default_repo = 's3://gluonnlp-numpy-data'\n repo_url = os.environ.get('GLUONNLP_REPO_URL', default_repo)\n if repo_url[-1] != '/':\n repo_url = repo_url + '/'\n return repo_url", "def get_checkout_path() -> str:\n utilities_path = get_project_path()\n return os.path.dirname(utilities_path)", "def clone_url(self, repo):\n return f'git@{self.host}:{self.vendor}/{repo}.git'", "def _get_repository_path(repository=None):\n if repository:\n return '/acr/v1/{}'.format(repository)\n return '/acr/v1/_catalog'", "def feedstock_repo(fctx: FeedstockContext) -> str:\n repo = fctx.feedstock_name + \"-feedstock\"\n if repo.endswith(\".git\"):\n repo = repo[:-4]\n return repo", "def templates_repo() -> str:\n repo_path = os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"data/templates\")\n )\n return repo_path", "def resolve_repo_path(repo_item_path):\n path = Path(os.path.join(\n os.path.dirname(__file__), \"../..\", repo_item_path)).resolve()\n return str(path)", "def config_path(self):\n if lib.git_repository_is_bare(self._repo):\n return pathjoin(self.path, 'config')\n else:\n return pathjoin(self.path, '.git', 'config')", "def find_repo(self):\n if self.root_repo:\n return self.root_repo\n\n self.find_repo_from_options() or self.find_repo_from_pwd()\n if not self.root_repo:\n raise RepoNotFound(\"groot-based git repository not found\")\n\n self.log(\"# groot repo: %s\" % (self.root_repo))\n\n return self.root_repo", "def __repositoryBaseUrl(self):\n CraftCore.debug.trace(\"VersionSystemSourceBase __repositoryBaseUrl\")\n # @todo move to SvnSource\n server = CraftCore.settings.get(\"General\", \"KDESVNSERVER\", \"svn://anonsvn.kde.org\")\n\n return server + \"/home/kde/\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes a list of words and computes all posible permutations of the words in the string seperated by spaces. E.g. it receives ['a','b',..] and returns ['a b', 'b a',...]
def permute_words(n, r): return [" ".join(map(str, comb)) for comb in permutations(n, r)]
[ "def permute(letter_list):\n results = []\n re_str = '^'+''.join(map(lambda w: w+'?', sorted(letter_list)))+'$'\n for word in WORDS:\n letters = ''.join(sorted(word))\n if re.search(re_str, letters):\n results.append(word)\n return map_results(sorted(results, key=len, reverse=True))", "def permute(word, depth=2) -> set:\n mutations = set(word.permutations())\n if depth:\n new = list()\n for mutation in mutations:\n# printer(mutation)\n new += permute(mutation, depth-1)\n #new += novel\n return new\n return [word]", "def _compute_word_pairs(self, words):\n # Sort the words first so the tuples are always ordered the same\n return combinations(sorted(words), r=2)", "def Permutations(s):\n\n\tperms = [s]\n\n\tif len(s) <s= 1:\n\t\treturn perms\n\n\tfor pos, i in enumerate(s):\n\n\t\trest_of_string = s[:pos] + s[pos+1:]\n\n\t\tsub_perms = Permutations(rest_of_string)\n\n\t\tfor sub in sub_perms:\n\t\t\tif i+sub not in perms:\n\t\t\t\tperms.append(i+sub)\n\n\treturn perms", "def permutations(self) -> Iterator[Tuple[Any, ...]]:\r\n # We are returning the itertools.permutations object\r\n # because if we convert it to a list this would\r\n # take an eternity depending on the length of the string\r\n return permutations(self.string)", "def list_permutations(xs):\n return list(permutations(xs))", "def unscramble_words(scrambled_words, word_list):\n output = []\n for i in scrambled_words:\n for k in word_list:\n if len(i) > len(k):\n if anagram(i, k):\n output.append(k)\n else:\n if(anagram(k, i)):\n output.append(k)\n print(output)\n return output", "def lexicographic_permutations():\n ans = list()\n x = copy.copy(MILLIONTH)\n nums = copy.copy(NUMS)\n while nums:\n a = x // fac(len(nums) - 1)\n x = x % fac(len(nums) - 1)\n # 刚好整除 要退一位 不进位\n a = a - 1 if x == 0 else a\n ans.append(nums[a])\n nums.remove(nums[a])\n return ''.join(str(x) for x in ans)", "def get_permutations(x):\n str_x = str(x)\n return [ to_int(tuple) for tuple in itertools.permutations(str_x) ]", "def permutations(values):\n values = list(values)\n if not values:\n return just(()).map(lambda _: [])\n\n def build_permutation(swaps):\n initial = list(values)\n for i, j in swaps:\n initial[i], initial[j] = initial[j], initial[i]\n return initial\n n = len(values)\n index = integers(0, n - 1)\n return lists(tuples(index, index), max_size=n ** 2).map(build_permutation)", "def get_permutations(num_items) :\n return list(itertools.permutations(range(num_items), num_items))", "def repeatedChars(words):\n\n wFeatures = []\n for i in range(len(words)):\n rgx = re.compile(r\"(\\w)\\1{2,}\") #matches same char, of same case\n if rgx.search(words[i]):\n m = rgx.search(words[i]).group()[1:]\n feat = re.sub(m, '', words[i])\n while rgx.search(feat):\n m = rgx.search(feat).group()[1:]\n feat = re.sub(m, '', feat)\n wFeatures += (feat.lower().strip(string.punctuation)+\"_REPEATED\")\n return wFeatures", "def permutations(lst):\n\n if len(lst) <= 1:\n return [lst]\n else:\n result = []\n x = lst[0]\n xs = permutations(lst[1:])\n\n for i in xs:\n for j in range(len(lst)):\n new_i = i[:j] + [x] + i[j:]\n result.append(new_i)\n\n return result\n\n # else:\n # result = []\n # for i in range(len(lst)):\n # x = lst[i]\n # xs = lst[:i] + lst[i + 1:]\n # for p in permutations(xs):\n # result.append([x] + p)\n # return result", "def all_email_permuter(first_name, middle_name, last_name, domain_name):\n first_name = first_name.lower()\n middle_name = middle_name.lower()\n last_name = last_name.lower()\n domain_name = domain_name.lower()\n\n print(f\"Name: {first_name}-{middle_name}-{last_name}-{domain_name}\")\n\n all_names = [\n [first_name, first_name[0]],\n [middle_name, middle_name[0]],\n [last_name, last_name[0]]\n ]\n\n # print(\"All Name: \", all_names)\n\n punctuations = \". _ \".split()\n\n # print(\"punctuations: \", punctuations)\n # cartesian product with 2 punctuations\n cartesian_prod_A = list(product(\n all_names[0],\n all_names[1],\n all_names[2],\n punctuations,\n punctuations\n ))\n print(\"\\ncartesian_prod_A: \", cartesian_prod_A)\n\n # cartesian product with 2 punctuations\n cartesian_prod_B = list(product(\n all_names[0],\n all_names[1],\n all_names[2],\n punctuations\n ))\n\n print(\"\\ncartesian_prod_B: \", cartesian_prod_B)\n\n # cartesian product without punctuations\n cartesian_prod_C = list(product(all_names[0], all_names[1], all_names[2]))\n\n # print(\"cartesian_prod_C: \", cartesian_prod_C)\n # List comprehension method\n # combinations = [s for x in cartesian_prod_A for s in permutations(\n # x, 3) if s[0] not in punctuations if s[-1]not in punctuations]\n # print(\"combination 2: \", combinations)\n\n combinations = []\n\n for x in cartesian_prod_A:\n permutations_A_5 = permutations(x, 5)\n\n for s in permutations_A_5:\n if s[0] not in punctuations:\n if s[-1] not in punctuations:\n combinations.append(s)\n\n permutations_A_4 = permutations(x, 4)\n\n for s in permutations_A_4:\n if s[0] not in punctuations:\n if s[-1] not in punctuations:\n combinations.append(s)\n # print(\"xy: \", combinations)\n\n permutations_A_3 = permutations(x, 3)\n\n for s in permutations_A_3:\n if s[0] not in punctuations:\n if s[-1] not in punctuations:\n combinations.append(s)\n # print(\"xyz: \", combinations2)\n\n print(\"combination A: \", len(combinations))\n # print(\"combination A: \", combinations)\n\n for x in cartesian_prod_B:\n permutations_A_4 = permutations(x, 4)\n\n for s in permutations_A_4:\n if s[0] not in punctuations:\n if s[-1] not in punctuations:\n combinations.append(s)\n # print(\"xy: \", combinations)\n\n permutations_A_3 = permutations(x, 3)\n\n for s in permutations_A_3:\n if s[0] not in punctuations:\n if s[-1] not in punctuations:\n combinations.append(s)\n # print(\"xyz: \", combinations2)\n\n print(\"combination B: \", len(combinations))\n\n\n# combinations.extend([\"\".join(s) for x in cartesian_prod_C for s in permutations(\n# x, 2) if s[0] not in punctuations if s[-1]not in punctuations])\n#\n# print(\"combination 2: \", combinations)\n\n # combinations2 = []\n for x in cartesian_prod_C:\n permutations_B_3 = permutations(x, 3)\n\n for s in permutations_B_3:\n if s[0] not in punctuations:\n if s[-1] not in punctuations:\n combinations.append(s)\n\n permutations_B_2 = permutations(x, 2)\n\n for s in permutations_B_2:\n if s[0] not in punctuations:\n if s[-1] not in punctuations:\n combinations.append(s)\n\n print(\"after prod_c: \", len(combinations))\n\n#\n combinations = [\"\".join(s) for s in combinations]\n#\n# print(\"combination 3: \", combinations)\n#\n combinations.extend([first_name, middle_name, last_name])\n#\n print(\"total size: \", len(combinations))\n\n # # set method is used to convert any of the iterable to sequence of iterable elements with distinct elements.\n combinations = list(set(combinations))\n print(\"filtered length: \", len(combinations))\n sorted_combinations = sorted(combinations)\n\n new_comb = []\n for x in sorted_combinations:\n flag = True\n for i, j in zip(x, x[1:]):\n if i in punctuations:\n if j in punctuations:\n flag = False\n break\n\n if flag is True:\n new_comb.append(x)\n\n print(\"new_comb: \", len(new_comb))\n\n print(new_comb)\n\n#\n permuted_emails = [f\"{s}@{domain_name}\" for s in combinations]\n # print(\"permuted_emails: \", permuted_emails)\n\n return permuted_emails", "def apply_permutation(original_list, new_list, p):\n for i in range(0, len(p)):\n new_list[i] = original_list[p[i]]\n\n return new_list", "def group_anagrams(strs):\n str_dict = {}\n for string in strs:\n str_sorted = ''.join(sorted(string))\n if str_sorted not in str_dict:\n str_dict[str_sorted] = [string]\n else:\n str_dict[str_sorted].append(string)\n result = []\n for v in str_dict.values():\n if len(v) > 1:\n result.extend(v)\n return result", "def permute(l):\n\n perms = []\n\n if len(l) == 0:\n return []\n elif len(l) == 1:\n return [l]\n else:\n for i in l:\n i_perms = permute([j for j in l if j != i])\n for perm in i_perms:\n perms += [[i] + perm]\n return perms", "def generate_permutations(arr, pos=0):\n if pos == len(arr):\n output.append(''.join(arr))\n return\n for i in range(len(arr)):\n swap(arr, pos, i)\n generate_permutations(arr, pos + 1)\n swap(arr, pos, i)", "def character_trigrams_split_word(input_string):\n return list(map(\"\".join, zip(*[input_string[i:] for i in range(3)])))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes a set of words and filters out the ones with letters not in anagram.
def filter_words(words): word_bag = set() for line in words: word = line.rstrip('\n') if (is_anagram(word)): word_bag.add(word) return word_bag
[ "def filterCipher(charsequence):\n return filter(lambda x: x in anagram_characters, charsequence)", "def find_matching_words(anagram, word_list):\r\n pass", "def unscramble_words(scrambled_words, word_list):\n output = []\n for i in scrambled_words:\n for k in word_list:\n if len(i) > len(k):\n if anagram(i, k):\n output.append(k)\n else:\n if(anagram(k, i)):\n output.append(k)\n print(output)\n return output", "def keepWord(words, letter):\n return [word for word in words if letter in word]", "def test_remove_unusable_words(self):\n dictionary = {3715217: ['sett', 'test'], 451: ['me'], 131387: ['pls']}\n test_dict = anagram_generator.remove_unusable_words(dictionary, list('test'))\n self.assertDictEqual({3715217: ['sett', 'test']}, test_dict)", "def find_anagrams(words):\n anagrams = {}\n\n for word in words:\n anagrams.setdefault(alphabetize(word), [word])\n if word not in anagrams[alphabetize(word)]:\n anagrams[alphabetize(word)].append(word)\n\n return anagrams", "def filter_letters(letter_strings):\n # There is probably a cute one liner, but this is easy to follow and\n # probably same speed\n unique_letters = set()\n if isinstance(letter_strings, str):\n letter_strings = [letter_strings]\n for string in letter_strings:\n if string: # Catch possible None values\n for letter in string:\n unique_letters.add(letter)\n try:\n retval = ''.join(sorted(unique_letters))\n except:\n reval = ''\n return retval", "def check_anagram(word):\r\n pass", "def removeWord(words, letter):\n return [word for word in words if letter not in word]", "def is_anagram(word1, word2):\n \n word1_list = [i for i in word1.lower() if i != \" \"]\n word2_list = [j for j in word2.lower() if j != \" \"]\n \n word1_list.sort()\n word2_list.sort()\n \n return word1_list == word2_list\n pass", "def find_anagrams_from_phrase(phrase_dict, words):\n sub_anagrams = []\n\n for word in words:\n word_dict = Counter(word.lower())\n if phrase_dict & word_dict == word_dict:\n sub_anagrams.append(word)\n return sub_anagrams", "def anagram(main_str, str_list):\n return [_str for _str in str_list if str_list and Counter(_str) == Counter(main_str)]", "def filter_word(sequence, filter_word_list):\n if isinstance(sequence[0], str):\n return [x for x in sequence if x not in filter_word_list]\n return [[x for x in i if x not in filter_word_list] for i in sequence]", "def is_isogram(word):\n letters = set()\n for letter in word.lower():\n if letter in letters:\n return False\n letters.add(letter)\n\n return True", "def set_words(self):\n words = possible_words(self.letters)\n self.word_set = {word for word in words if self.letters[0] in word}", "def get_allowed_letters(self, word, index):\n words = self.search(word)\n return set([w[0][index] for w in words])", "def test_filter_anagrams(self):\n\n anagrams = collect_anagrams(self.raw_data, min_word_length=4)\n filtered = list(filter_anagrams(anagrams, match_word_length=True))\n\n self.assertEqual(len(filtered), 2)\n\n anagrams = collect_anagrams(self.raw_data, min_word_length=4)\n filtered = list(filter_anagrams(anagrams))\n\n self.assertEqual(len(filtered), 5)", "def avoids(word, forbidden):\n # Feels like there should be a more efficient way to do this using\n # set intersection, but I'll just check the word character by character\n for letter in forbidden:\n if word.find(letter)!=-1:\n return False\n return True", "def get_distinct_letters(data):\n dist_letters = []\n for word in data.word_lst:\n for letter in word:\n if letter not in dist_letters:\n dist_letters.append(letter)\n for letter in data.result:\n if letter not in dist_letters:\n dist_letters.append(letter)\n return dist_letters" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
builds phrases of certain length using word_candids_map and check if the phrase is one of the phrases we are looking for.
def search_for_phrases(word_candids_map, phrase_len, word_len_map, start_time): candid_comb_len = phrase_len - 1 phrase = [""] * phrase_len # take one word at a time and build phrases with it and different # combination of its candidates for word, candids in word_candids_map.items(): candid_combos = combinations(candids, candid_comb_len) phrase[0] = word for combo in candid_combos: # build up the phrase and calculate its length phrase_length = word_len_map[word]+candid_comb_len for i, w in enumerate(combo): phrase[i+1] = w phrase_length += word_len_map[w] if(phrase_length == ANAGRAM_LENGTH+(candid_comb_len-2)): # check if the phrase built can be an anagram if(is_anagram("".join(phrase))): # look at all the different arrangement of words in phrase phrase_permuts = permute_words(phrase, phrase_len) for maybe_the_one in phrase_permuts: check_hash(maybe_the_one, start_time) # let the caller know when all the phrases are found if(targets_found == HASHES_LEN): return True # tell the caller that there are still phrases to find return False
[ "def match_phrases(post, common_words, phrase_length, phrase_map):\n result = set()\n trimmed_words = post.words_removed(common_words, True)\n for i in range(len(trimmed_words) - (phrase_length - 1)):\n phrase = tuple(trimmed_words[i:i+phrase_length])\n try:\n result.add(phrase_map[phrase])\n except KeyError:\n pass\n return result", "def get_word_list(wordmap, ids, word_length, clue):\n\n result_list = []\n clue = clue.split(' ')\n for (id, rank) in ids:\n word = wordmap[id][0]\n if len(word) == word_length and \\\n word.lower() == word \\\n and all(char not in word for char in ['.', ',', ':', '?', '/', '!']) \\\n and word not in clue :\n \n result_list.append(Answer(wordmap[id][0], rank))\n return result_list", "def findmatches(phrase_list, outlst, dw, subst_map={}, outstr=\"\"):\n if len(phrase_list) == 0:\n outlst.append(outstr[:-1])\n # print outstr\n return\n for word in phrase_list[0][1]: # iterate over possible mappings of first word\n word = word.upper()\n newmap = validword(word, phrase_list[0][0], subst_map) # uses deepcopy\n if newmap != {}:\n # only go deeper if we're onto something promising\n findmatches(phrase_list[1:], outlst, dw,\n newmap, outstr + word + \" \")", "def find_anagrams_from_phrase(phrase_dict, words):\n sub_anagrams = []\n\n for word in words:\n word_dict = Counter(word.lower())\n if phrase_dict & word_dict == word_dict:\n sub_anagrams.append(word)\n return sub_anagrams", "def find_matching_words(anagram, word_list):\r\n pass", "def wordphrases(self, t):\n count = 0\n words = t.split(\" \")\n new_words = []\n # First handling the case where the text is just one word :\n # cannot generate any bigram.\n if len(words) == 1:\n new_words = words\n # Then regular cases :\n else:\n j = 0\n while j < (len(words) - 1): # = for each word in the sentence\n big = (\n words[j],\n words[j + 1],\n ) # getting the (j-th, j+1-th)words\n # writing the corresponding bigram :\n bigrams = self.parsing_char_.join(big)\n # If the bigram is enough frequent to be gathered :\n if bigrams in self.phrasewords_:\n # Then add the bigram as a new word in 'new_sent_sent'\n new_words.append(\"_\".join(big))\n count = count + 1 # Count the number of gathered\n # bigrams\n # Directly go to the j+2-th word in order to avoid\n # repeating the j+1-th word\n j = j + 2\n # If the bigram is not frequent enough :\n else:\n if j == (len(words) - 2):\n new_words.append(words[j])\n new_words.append(words[j + 1])\n j = j + 2\n # Add j-th word\n else:\n new_words.append(words[j])\n # Go to j+1-th word\n j = j + 1\n\n return \" \".join(new_words)", "def build_the_phrase(self, files_map):\n raise NotImplementedError()", "def hard_words(word_list):\n dict_max = 0\n for word in word_list:\n if len(word) > dict_max:\n dict_max = len(word)\n\n length = random.randint(8, dict_max)\n constraints = init_constraint(length)\n return ''.join(constraints)", "def search_phrase(self, index): \r\n\r\n \r\n # Boolean AND of all phrase terms\r\n\r\n answer = None # Stores set of docid\r\n \r\n for term in self.text:\r\n \r\n if index[0].has_key(term):\r\n\r\n if answer == None: # first term \r\n answer = set(index[0][term].keys())\r\n \r\n else:\r\n # Perform set intersection of sets of docid for each term\r\n answer = answer.intersection(set(index[0][term].keys()))\r\n\r\n else:\r\n answer = set() # Term has not been found. answer is now empty set.\r\n \r\n # checking the positions of the phrase terms \r\n \r\n t1 = self.text[0] # first term in the phrase\r\n final = set()\r\n\r\n for docid in answer:\r\n t1_positions = index[0][t1][docid][0] # set\r\n for pos in t1_positions: \r\n \r\n flag = 0\r\n\r\n for i in range(1, len(self.text)): \r\n t = self.text[i]\r\n ti_positions = index[0][t][docid][0] # set of positions for ith term in the phrase in that docid \r\n if pos + i not in ti_positions:\r\n flag = 1\r\n break \r\n \r\n if flag == 0: # indicates an occurence of the phrase in that docid\r\n break\r\n\r\n if flag == 0:\r\n final.add(docid) # add the docid that has an occurence of the phrase to the results\r\n \r\n return final", "def check_spelling(checked_word, dist, word_list):\n alphabet = set(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', \n 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'])\n candidates = set([])\n \n for word in word_list:\n smtrx = seq.build_scoring_matrix(alphabet, 2, 1, 0)\n amtrx = seq.compute_alignment_matrix(checked_word, word, smtrx)\n score = seq.compute_global_alignment(checked_word, word, smtrx, amtrx)[0]\n if len(checked_word) + len(word) - score <= dist:\n candidates.add(word)\n \n return candidates", "def check_spelling(checked_word, dist, word_list):\n\n # Initialize variables for this function\n _words_found = set()\n _len_checked = len(checked_word)\n _counter = 0\n\n # Generate the scoring matrix needed to compare the word\n _str_alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n _alphabet = set(_str_alphabet)\n scoring_matrix = sa.build_scoring_matrix(_alphabet, 2, 1, 0)\n\n # Iterate through the word list to find the words within distance\n for _word in word_list:\n _counter += 1\n _alignment_matrix = sa.compute_alignment_matrix(checked_word, _word, scoring_matrix)\n if ( _len_checked + len(_word) -\n _alignment_matrix[_len_checked][len(_word)] ) <= dist:\n _words_found.add(_word)\n if _counter % 1000 == 0:\n print \"..........\", _counter, \"words processed.\"\n\n return _words_found", "def _compile_vocabulary(self, phrases):\n text = \" \".join([(\"<s> %s </s>\" % phrase) for phrase in phrases])\n self._logger.debug('Compiling languagemodel...')\n vocabulary = self._compile_languagemodel(text, self.languagemodel_file)\n self._logger.debug('Starting dictionary...')\n self._compile_dictionary(vocabulary, self.dictionary_file)", "def match_words(post, common_words, freq_gt, word_map):\n trimmed_words = post.words_removed(common_words, True)\n return {word_map[w] for w in trimmed_words if w in word_map}", "def check_spelling(checked_word, dist, word_list):\r\n \r\n len_checked = len(checked_word)\r\n alphabet = ''.join(chr(i) for i in range(ord('a'), ord('z')+1))\r\n scoring_matrix = build_scoring_matrix(alphabet, 2, 1, 0)\r\n ans = set([])\r\n \r\n for word in word_list:\r\n global_ali_mx = compute_alignment_matrix(checked_word, word, scoring_matrix, True)\r\n score = compute_global_alignment(checked_word, word, scoring_matrix, global_ali_mx)\r\n \r\n if len_checked + len(word) - score[0] <= dist:\r\n ans.add(word)\r\n \r\n return ans", "def matches_phrases(self, phrases):\n return (self.compiled_revision == self.phrases_to_revision(phrases))", "def blooms_suggestion(in_string):\n create_words = ['design', 'assembl', 'construct', 'conjectur', 'develop',\n 'formulat', 'author', 'investigat', 'creat', 'adapt', 'plan',\n 'produc', 'buil', 'solv', 'compos', 'think', 'thought' 'theoriz', 'modif',\n 'improv']\n evaluate_words = ['apprais', 'argu', 'defend', 'judg', 'select', 'support',\n 'valu', 'critiqu', 'weigh', 'evaluat', 'assess', 'compar', 'conclud',\n 'debat', 'decid', 'measur', 'opinion', 'prov', 'support', 'test', \n 'validat', 'interpret']\n analyze_words = ['differentiat', 'organiz', 'relat', 'compar', 'contrast',\n 'distinguish', 'examin', 'experiment', 'question', 'test',\n 'analyz', 'arrang', 'breakdown', 'categoriz', 'differen',\n 'dissect', 'inspect', 'research', 'highlight', 'find', 'question']\n apply_words = ['execut', 'implement', 'solv', 'use', 'using' \n 'interpret', 'operat', 'schedul', 'sketch', 'appl',\n 'act', 'administer', 'build', 'choos', 'connect', 'construct', 'develop',\n 'teach', 'plan', 'employ', 'demonstrat', 'show', 'analysis']\n understand_words = ['describ', 'explain', 'identif', 'locat', 'recogniz', 'report', \n 'select', 'translat', 'understand', 'ask', 'cit', 'classif', \n 'compar', 'contrast', 'discuss', 'rephrase', 'infer', 'summariz', \n 'purpos', 'show', 'demonstrat', 'express', 'example','exemplif', 'comprehend']\n remember_words = ['defin', 'duplicat', 'list', 'memoriz', 'repeat', 'stat',\n 'remember', 'copy', 'recogniz', 'tell', 'retell', 'reproduc',\n 'recit', 'read', 'knowledge']\n score_dict = {\n 'Evaluation' : 0,\n 'Synthesis' : 0,\n 'Analysis' : 0,\n 'Application' : 0,\n 'Comprehension' : 0,\n 'Knowledge' : 0,\n }\n\n low_string = in_string.lower()\n\n score_dict[\"Evaluation\"] = count_level_score(evaluate_words,low_string)\n score_dict[\"Synthesis\"] = count_level_score(create_words,low_string)\n score_dict[\"Analysis\"] = count_level_score(analyze_words,low_string)\n score_dict['Application'] = count_level_score(apply_words,low_string)\n score_dict['Comprehension'] = count_level_score(understand_words,low_string)\n score_dict[\"Knowledge\"] = count_level_score(remember_words,low_string)\n suggestion = max(score_dict, key=score_dict.get)\n \n if score_dict[suggestion] == 0:\n suggestion = 'none'\n\n return(suggestion)", "def _spellcheck(filename, dictionaries=['.dict4spell.txt'], newdict=None,\n remove_multiplicity=False, strip_file='.strip'):\n\n try:\n f = open(filename, 'r')\n except IOError:\n print '\\nfile %s does not exist!' % filename\n _abort()\n\n verbose = 1 if option('debug') else 0\n\n text = f.read()\n f.close()\n\n # Remove inline verbatim and !bc and !bt blocks\n text2 = re.sub(r'`.+?`', '`....`', text) # remove inline verbatim\n code = re.compile(r'^!bc(.*?)\\n(.*?)^!ec *\\n', re.DOTALL|re.MULTILINE)\n text2 = code.sub('', text2)\n tex = re.compile(r'^!bt\\n(.*?)^!et *\\n', re.DOTALL|re.MULTILINE)\n text2 = tex.sub('', text2)\n\n # First check for double words\n\n pattern = r\"\\b([\\w'\\-]+)(\\s+\\1)+\\b\"\n found = False\n offset = 30 # no of chars before and after double word to be printed\n start = 0\n while start < len(text2)-1:\n m = re.search(pattern, text2[start:])\n if m:\n # Words only\n word = m.group(0)\n try:\n [float(w) for w in word.split()]\n is_word = False\n except ValueError:\n # Drop words with underscore, ...\n #drop = ['_', '--',\n is_word = '_' not in word\n\n if is_word:\n print \"\\ndouble words detected in %s (see inside [...]):\\n------------------------\" % filename\n print \"%s[%s]%s\\n------------------------\" % \\\n (text2[max(0,start+m.start()-offset):start+m.start()],\n word,\n text2[start+m.end():min(start+m.end()+offset,\n len(text2)-1)])\n found = True\n start += m.end()\n else:\n break\n if found:\n pass\n #print '\\nAbort because of double words.'\n #sys.exit(1)\n\n # Continue with spell checking\n\n if os.path.isfile(strip_file):\n execfile(strip_file)\n else:\n environments = []\n replacements = []\n common_typos = []\n # Add standard definitions (above)\n environments += _environments\n replacements += _replacements\n common_typos += _common_typos\n\n # Add standard latex definitions when spellchecking latex\n if os.path.splitext(filename)[1] == '.tex':\n # Make sure to do latex first (\\label{} before label{})\n environments = _latex_environments + environments\n replacements = _latex_replacements + replacements\n\n\n _grep_common_typos(text, filename, common_typos)\n\n text = _strip_environments(text, environments, verbose)\n #print 'Text after environment strip:\\n', text\n\n text = _do_regex_replacements(text, replacements, verbose)\n #print 'Text after regex replacements:\\n', text\n\n # Write modified text to scratch file and run ispell\n scratchfile = 'tmp_stripped_%s' % filename\n f = open(scratchfile, 'w')\n text = text.replace(' ', ' ').replace('\\n\\n', '\\n')\n f.write(text)\n f.close()\n personal_dictionaries = []\n p_opt = '' # personal dictionary specification for ispell\n for dictionary in dictionaries:\n if os.path.isfile(dictionary):\n p_opt += \" -p`pwd`/%s\" % dictionary\n f = open(dictionary, 'r')\n personal_dictionaries += f.readlines()\n f.close()\n else:\n print 'Dictionary file %s does not exist.' % dictionary\n\n personal_dictionaries = list(sets.Set(personal_dictionaries))\n misspellings = 'tmp_misspelled_' + filename + '~'\n cmd = 'cat %s | ispell -l -t -d american %s > %s' % \\\n (scratchfile, p_opt, misspellings)\n #cmd = 'cat %s | aspell -t -d american list %s > %s'\n system(cmd)\n\n # Load misspellings, remove duplicates\n f = open(misspellings, 'r')\n words = f.readlines()\n f.close()\n words2 = list(sets.Set(words)) # remove multiple words\n if len(words2) > 0: # do we have misspellings?\n print '%d misspellings in %s' % (len(words2), filename)\n if remove_multiplicity:\n f = open(misspellings, 'w')\n f.write(words2)\n f.close()\n else:\n os.remove(misspellings)\n\n # Make convenient updates of personal dictionaries\n if newdict is not None:\n accepted_words = words2 + personal_dictionaries\n if os.path.isfile(newdict):\n f = open(newdict, 'r')\n newdict_words = f.readlines()\n f.close()\n newdict_add = words2 + newdict_words\n newdict_add = sorted(list(sets.Set(newdict_add)))\n union = accepted_words + newdict_words\n union = sorted(list(sets.Set(union)))\n #print '%s %d: %d misspellings (%d from personal dicts) -> %d' % (newdict, len(newdict_words), len(words2), len(personal_dictionaries), len(union))\n else:\n union = accepted_words\n newdict_add = words2\n # union is the potentially new personal dictionary\n #\n f = open(newdict, 'w')\n f.writelines(newdict_add)\n f.close()\n f = open('new_dictionary.txt~', 'w')\n f.writelines(union)\n f.close()\n #if len(newdict_add) > 0:\n # print '%s: %d, %s: %d items' % (newdict, len(newdict_add), 'new_dictionary.txt~', len(union))", "def includes_phrase(self, text):\n\n phrase = self.phrase.lower()\n phrase_words = phrase.split(' ')\n\n # remove punctuation\n text = [' ' if c in string.punctuation else c for c in text.lower()]\n text_words = [word for word in ''.join(text).split(' ') if len(word)]\n\n if len(phrase_words) == 1:\n return phrase in text_words\n\n # work through multiple words\n try:\n start_w_index = text_words.index(phrase_words[0])\n phrase_word_count = 1\n index = start_w_index + phrase_word_count\n status = False\n\n # as long as other words follow\n while index < len(text_words):\n if phrase_words[phrase_word_count] == text_words[index]:\n phrase_word_count += 1\n else: # word is not in phrase\n break\n if phrase_word_count == len(phrase_words): # all words\n status = True\n break\n index += 1\n return status\n except ValueError: # first phrase word not in text\n return False", "def get_jokes(number, flag=1):\n\n nouns = [\"автомобиль\", \"лес\", \"огонь\", \"город\", \"дом\"]\n adverbs = [\"сегодня\", \"вчера\", \"завтра\", \"позавчера\", \"ночью\"]\n adjectives = [\"веселый\", \"яркий\", \"зеленый\", \"утопичный\", \"мягкий\"]\n\n size = len(nouns)\n if number > size and not flag:\n return print(f\"Impossible to create phrases with non-repeated words. \"\n f\"Please, chose number less than {size + 1}.\")\n\n for i in range(number):\n idx_noun, idx_adv, idx_adj = r.randrange(size)\n\n noun = nouns[idx_noun]\n adverb = adverbs[idx_adv]\n adjective = adjectives[idx_adj]\n\n print(f'{noun} {adverb} {adjective}')\n\n if not flag and size:\n # replace selected element with the last element of the list\n # reduce size by one\n nouns[idx_noun] = nouns[size - 1]\n adverbs[idx_adv] = adverbs[size - 1]\n adjectives[idx_adj] = adjectives[size - 1]\n size -= 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Place elements at starting thing locations.
def place_starts(self): # Create a list of things that the grid generation starts at. start_things = [] for thing_type in self.config.start_thing_types: start_things.extend(self.map_data.get_thing_list(thing_type)) # Add the initial things as initial elements to the navigation grid. for thing in start_things: pos = Vector3() pos.x = thing.x pos.y = thing.y pos.z = self.map_data.get_floor_z(pos.x, pos.y) collision, _ = self.collider.check_position(pos, self.config.player_radius, self.config.player_height) if collision == True: print 'Thing at {} has no room to spawn, ignoring.'.format(pos) continue self.add_walkable_element(pos) # Add teleporter destinations as starting elements. for teleporter in self.map_data.teleporters: if teleporter.kind == Teleporter.TELEPORTER_THING: dest = Vector3() dest.x = teleporter.dest.x dest.y = teleporter.dest.y else: dest = Vector3() dest.x, dest.y = self.map_data.get_line_center(teleporter.dest_line) dest.z = self.map_data.get_floor_z(dest.x, dest.y) collision, _ = self.collider.check_position(dest, self.config.player_radius, self.config.player_height) if collision == True: print 'Teleporter destination at {} has no room to spawn, ignoring.'.format(dest) continue self.add_walkable_element(dest) print 'Added {} starting elements.'.format(len(start_things))
[ "def place_grid(grid, final_grid, start_x):\n for x in range(grid.width):\n for y in range(grid.height):\n cell = grid.get_cell(x, y)\n final_grid.set_cell(start_x + x, y, cell)", "def setup_locators(self):\n for i in range(0, 9):\n locator = cmd.spaceLocator(n=self.locators[i], a=True)[0]\n self.locators.append(locator)\n cmd.xform(locator, t=self.initlocPos[i], ws=True)\n cmd.select(clear=True)", "def adjustNorthElements():\n mxd = arcpy.mapping.MapDocument(\"CURRENT\")\n insetLst = arcpy.mapping.ListDataFrames(mxd)[1:]\n northArrowLst = arcpy.mapping.ListLayoutElements(mxd, wildcard=\"*north*\")\n\n # Reset inset data frames.\n for index, df in enumerate(insetLst):\n northArrowLst[index].elementHeight = 0.5666\n northArrowLst[index].elementWidth = 0.272\n northArrowLst[index].elementPositionY = df.elementPositionY + df.elementHeight - (northArrowLst[index].elementHeight / 2) - 0.25\n northArrowLst[index].elementPositionX = df.elementPositionX + df.elementWidth - (northArrowLst[index].elementWidth / 2) - 0.25", "def startingLocations():\n\tfor i in range(len(boot.locations)):\n\t\tname = list(boot.locations)[i]\n\t\tname = data.Location(boot.locations[list(boot.locations)[i]][0],boot.locations[list(boot.locations)[i]][1],boot.locations[list(boot.locations)[i]][2],boot.locations[list(boot.locations)[i]][3],boot.locations[list(boot.locations)[i]][4])\n\t\tlocations.append(name)", "def grow(self):\n self.parts.insert(0, self.head_position().next(self.direction))", "def insert_blanks(self, insert_indices):\n if self.coordinates is None or self.singular:\n return\n self.coordinates = np.insert(self.coordinates, insert_indices, -1,\n axis=1)", "def place_agent(self):\n if self.initial_position is None:\n indx = np.random.randint(0, len(self.possible_locations))\n self.position = self.possible_locations[indx]\n else:\n self.position = np.copy(self.initial_position)", "def position_at_beginning(self, bblk):\n check_is_basic_block(bblk)\n # Avoids using \"blk.instructions\", which will fetch all the\n # instructions into a list. Don't try this at home, though.\n inst_ptr = _core.LLVMGetFirstInstruction(bblk.ptr)\n if inst_ptr:\n # Issue #10: inst_ptr can be None if b/b has no insts.\n inst = _make_value(inst_ptr)\n self.position_before(inst)", "def calc_initial_position(self, words):\n D = np.array([word.distance for word in words])\n positions = mathematics.mds(D)\n logger.debug('initial positions by MDS are following:')\n for (i, position) in enumerate(positions):\n logger.debug('%s, %s, %s' % (words[i].surface, position['x'], position['y']))\n words[i].x = position['x']\n words[i].y = position['y']\n return words", "def reinsert(self):\n vec1 = self.get_random_keys(1)\n nr_1 = self._get_random_nr(vec1[0])\n self.move_elem(vec1[0], vec1[0], nr_1)", "def insert_before_element(self, item, element):\n if item is not None and element is not None:\n element.insert_before(item)\n else:\n raise IndexError", "def _set_all_lines_to_initial_positions(self) -> Paragraph:\n self.lines[1] = [None] * len(self.lines[0])\n for line_no in range(len(self.lines[0])):\n self[line_no].move_to(\n self.get_center() + self.lines_initial_positions[line_no],\n )\n return self", "def insert_before(self, *nodes):\n self.parent_node.insert(self.index(), *nodes)", "def moveToFirst(self):\n pass", "def create_locators(self):\n mc.spaceLocator(n='cn_headroot_jnt_L')\n mc.spaceLocator(n='cn_low_jaw_jnt_L')\n mc.move(0, 2, 0)\n mc.spaceLocator(n='cn_low_jaw_tip_jnt_L')\n mc.move(0, 4, 0)", "def place_spawn_items(width, height, cell_size, xmin, ymin, punishement=True):\n #Still the original function\n #Initialize\n item_start = 20;\n num_items = 4\n output=[]\n items = []\n locations=[]\n punishements=[]\n \"\"\"\n #Place spawn at random\n spawn_i = random.randint(0, width-1)\n spawn_j = random.randint(0, height-1) \n locations.append((spawn_i,spawn_j))\"\"\"\n #Place spawn not randomly\n spawn_i = 0\n spawn_j = height//2\n \n #Generate output\n spawn = (xmin + spawn_i*cell_size + cell_size/2, ymin + spawn_j*cell_size + cell_size/2)\n \n #this chooses which items \n item_tids = [2018, 2019, 2012, 2013]\n colors = ['g','r', 'b', 'c']\n\n #let's place the items\n for i in range(num_items):\n item_i = random.randint(0, width-1)\n #item_j = random.randint(0, height-1)\n item_j = random.randint(0, 1)*(height-1)\n \n #This avoid items superposition\n while (item_i, item_j) in locations:\n item_i = random.randint(0, width-1)\n item_j = random.randint(0, 1)*(height-1) \n locations.append((item_i, item_j))\n \n #add in the wad output\n item_x = xmin + item_i*cell_size + cell_size/2\n item_y = ymin + item_j*cell_size + cell_size/2\n output += create_object(item_x, item_y, item_tids[i], item_start + i)\n \n #add the item\n items.append((item_x, item_y, colors[i]))\n \n #let's place the punishement items\n for i in range(width):\n #place in upper combs\n for j in range(height//2):\n if not (0,j) in locations:\n idx=100*len(punishements)+2-j%((height//2)/2)\n item_x = xmin + i*cell_size + cell_size/2\n item_y = ymin + j*cell_size + cell_size/2\n tid=2013\n output += create_object(item_x, item_y, tid, idx) # unvisible=True)\n items.append((item_x, item_y, colors[2]))\n #place in lower combs\n for j in range(h//2+1,height):\n if not (width, j) in locations:\n idx=100*len(punishements)+(j-height//2)%((height-height//2)/2)\n item_x = xmin + i*cell_size + cell_size/2\n item_y = ymin + j*cell_size + cell_size/2\n tid=2013\n output += create_object(item_x, item_y, tid, idx) # unvisible=True)\n items.append((item_x, item_y, colors[2]))\n \n return items, output, spawn", "def position_atoms(self):\n atoms = []\n for i in range(self._atoms):\n x = random.randint(1,8)\n y = random.randint(1,8)\n atoms.append([x,y])\n return atoms", "def __initialise_grid_position(self):\n\n self.grid_position = self.start_position", "def add_unit(self):\n if len(self.body) != 0: # Check if just the head or not\n # Create the new element on the last\n index = len(self.body) - 1\n pos_x = self.body[index][0]\n pos_y = self.body[index][1]\n\n self.body.append([pos_x, pos_y])\n else:\n # Set the segment outside the screen, it will be redraw\n # at the correct place after the mouvement by the if bloc\n self.body.append([1000, 1000])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove elements from the elements_prune set from the element list.
def remove_pruned_elements(self): # Filter prune elements from the element list. self.elements = filter(lambda element: element not in self.element_prune, self.elements) # Remove pruned elements from the element hash table. for element in self.element_prune: element_hash = element.pos.x + (element.pos.y * self.size.x) elements = self.element_hash.get(element_hash) if elements is None: return del elements[element.pos.z] if len(elements) == 0: del self.element_hash[element_hash] # Remove the now invalid element connections. for element in self.elements: for direction in Element.DIR_RANGE: if element.elements[direction] in self.element_prune: element.elements[direction] = None self.element_prune.clear()
[ "def subtract_list_from_other_via_set_diff(lista, elems_to_remove):\n s1 = set(lista)\n s2 = set(elems_to_remove)\n remains_in_s1 = s1 - s2\n lista = list(remains_in_s1)\n return lista", "def filter_out_elements(self, eles):\n\n if isinstance(eles, list):\n for ele in eles:\n self.dynamic_dataset.drop(\n self.dynamic_dataset.loc[self.dynamic_dataset['{} Loading'.format(ele)] > 0].index,\n inplace=True\n )\n else:\n self.dynamic_dataset.drop(columns=['{} Loading'.format(eles)], inplace=True)\n\n self.shuffle_dynamic_dataset()", "def remove_subsets(self, strict=False):\n # A dictionary with the type of each element as the key, and the element itself as the value\n typed_list = {}\n for element in self.models:\n if type(element) in typed_list.keys():\n typed_list[type(element)].append(element)\n else:\n typed_list[type(element)] = [element]\n new_models = []\n for _, elements in six.iteritems(typed_list):\n i = 0\n length = len(elements)\n to_remove = []\n # Iterate through the list of elements and if any subsets are found, add the\n # indices to a list of values to remove\n while i < length:\n j = 0\n while j < length:\n if i != j and elements[i].is_subset(elements[j]) and j not in to_remove:\n if strict and elements[i] == elements[j]:\n # Do not remove the element if it is not a strict subset depending on the value of strict\n pass\n else:\n to_remove.append(i)\n j += 1\n i += 1\n\n # Append any values that are not in the list of objects to remove\n i = 0\n while i < length:\n if i not in to_remove:\n new_models.append(elements[i])\n i += 1\n self.models = new_models", "def reddit_list_filter(self, roms_to_keep):\n for game in roms_to_keep:\n if not self.is_relevant(game):\n roms_to_keep.remove(game)\n\n return roms_to_keep", "def _get_elements_to_keep(self):\n return self._get_elements(self.xpaths_to_keep)", "def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n self.transformer.layer[layer].attention.prune_heads(heads)", "def clean_isolated(\n self, obstructions: Tuple[GriddedPerm, ...], gp: GriddedPerm\n ) -> Set[GriddedPerm]:\n cleaned_obs: Set[GriddedPerm] = set()\n for ob in obstructions:\n cells_to_remove: Set[Cell] = set()\n for factor in ob.factors():\n if self._griddedperm_implied_by_requirement(factor, (gp,)):\n cells_to_remove.update(factor.pos)\n if cells_to_remove:\n cleaned_obs.add(ob.remove_cells(cells_to_remove))\n return cleaned_obs", "def _get_elements_to_discard(self):\n return self._get_elements(self.xpaths_to_discard)", "def remove_useless_nodes(self):\n if isinstance(self.elements, dict):\n useful_node_ids = np.unique(np.concatenate([\n np.ravel(v.data) for v in self.elements.values()]))\n else:\n useful_node_ids = np.unique(self.elements.data)\n original_sorted_indices = np.argsort(self.nodes.ids)\n original_node_ids = self.nodes.ids[original_sorted_indices]\n if len(original_node_ids) == len(useful_node_ids):\n if np.all(useful_node_ids == original_node_ids):\n return\n else:\n raise ValueError('Node IDs are inconsistent with elements')\n print('Nodes not used in elements found. Removing.')\n\n filter_useful_nodes = np.ones(len(original_node_ids), dtype=bool)\n original_node_index = 0\n useful_node_index = 0\n while useful_node_index < len(useful_node_ids):\n if original_node_ids[original_node_index] != useful_node_ids[\n useful_node_index]:\n filter_useful_nodes[original_node_index] = False\n original_node_index += 1\n continue\n\n original_node_index += 1\n useful_node_index += 1\n filter_useful_nodes[original_node_index:] = False\n useful_indices = original_sorted_indices[filter_useful_nodes]\n\n # Overwrite data\n self.nodes = FEMAttribute(\n self.nodes.name, self.nodes.ids[useful_indices],\n self.nodes.data[useful_indices])\n for key, value in self.nodal_data.items():\n self.nodal_data[key] = FEMAttribute(\n value.name, self.nodes.ids, value.data[useful_indices])\n return", "def purge(plugin_set):\n for ck, cv in BY_REQUIREMENTS.iteritems():\n for pk, pv in cv.iteritems():\n for p in pv:\n if p in plugin_set:\n plugin_set |= pk\n for ck, cv in BY_REQUIREMENTS.iteritems():\n for pk, pv in cv.iteritems():\n to_delete = set()\n for p in pv:\n if p not in plugin_set:\n to_delete.add(p)\n for p in to_delete:\n pv.remove(p)", "def remove_all(el, lst):\n\n while el in lst:\n lst.remove(el)", "def removeSets() :\n\tcleanSet = ['Blocking_Set', 'Proxy_Set', 'Render_Set', 'Anim_Set']\n\tsetGrps = mc.ls(type = 'objectSet')\n\n\tfor eachSet in setGrps : \n\t\tif eachSet in cleanSet : \n\t\t\tmc.delete(eachSet)", "def prune():\n prune_stacks()\n prune_repos()\n prune_batch_jobs()\n prune_images()", "def listRemoveElements(data,sel):\r\n for element in sel:\r\n for i in range(len(data)):\r\n if element == data[i]:\r\n data.pop(i)\r\n break;", "def remove_unused_edges(self, all_nodes, prevs):\n processed = set()\n all_nodes_local = all_nodes.copy()\n for act_node in all_nodes_local:\n if act_node in processed or not act_node:\n continue\n processed.add(act_node)\n if act_node.true_branch and (act_node.true_branch == act_node.false_branch):\n self.delete_node(act_node, True, all_nodes, prevs)\n continue\n if act_node.num_true or act_node.num_false:\n if act_node.num_true == 0:\n self.delete_node(act_node, False, all_nodes, prevs)\n continue\n if act_node.num_false == 0:\n self.delete_node(act_node, True, all_nodes, prevs)\n continue\n return self", "def drop_overly_pruned(self, prune_iter: int) -> Tuple[Tuple[nn.Module, str], ...]:\n # exclude param(layer)s to prevent 100% sparsity\n exclude_param_index: Set[int] = set()\n while len(exclude_param_index) != len(self.params_to_prune):\n pruner_cpy = copy.deepcopy(self)\n params_to_prune = pruner_cpy.update_params_to_prune(exclude_param_index)\n\n # try pruning\n pruner_cpy.prune_target_ratio(prune_iter, params_to_prune)\n if pruner_cpy.new_allzero_params(exclude_param_index):\n continue\n else:\n break\n\n # nothing to prune -> early stop\n if len(exclude_param_index) == len(self.params_to_prune):\n self.early_stop()\n\n # safely prunes\n return self.update_params_to_prune(exclude_param_index)", "def remove_duplicates(self):\n seen = set()\n self.nodes = [x for x in self.nodes if x not in seen and not seen.add(x)]", "def prune(self):\n self.branches = [b for b in self.branches if not self.contr(b)]", "def getSizePlueOneItemSet(Klist):\n candidate = list()\n for e in Klist:\n for f in Klist:\n a = e.union(f)\n if len(a) == len(e)+1:\n candidate.append(a)\n #print(candidate)\n #print(len(candidate))\n newlist = []\n for i in candidate:\n if i not in newlist:\n newlist.append(i)\n candidate = newlist\n #print(candidate)\n \"\"\" here is the normal pruning process \"\"\"\n newlist = []\n for e in candidate:\n counter = 0\n for f in globOriginalList:\n if(f.issuperset(e)):\n counter = counter+ 1\n if((counter/float(globNumberOfTransactions)) >= globMinSup):\n newlist.append(e)\n #print(len(candidate))\n return newlist" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an element at the x,y,z coordinates, or None if no element exists at those coordinates.
def get_element_xyz(self, x, y, z): element_hash = x + (y * self.size.x) elements = self.element_hash.get(element_hash) if elements is not None: return elements.get(z) return None
[ "def getElementByCoordinates(x, y, z, dim=-1, strict=False):", "def get_object(self, x, y, z):\r\n for block in self._blocks:\r\n if (x, y, z) == block.location():\r\n return block\r\n if (x, y, z) == self._drone.location():\r\n return self._drone\r\n return None", "def getElementByCoordinates(x, y, z, dim=-1, strict=False):\n api_elementTag_ = c_size_t()\n api_elementType_ = c_int()\n api_nodeTags_, api_nodeTags_n_ = POINTER(c_size_t)(), c_size_t()\n api_u_ = c_double()\n api_v_ = c_double()\n api_w_ = c_double()\n ierr = c_int()\n lib.gmshModelMeshGetElementByCoordinates(\n c_double(x),\n c_double(y),\n c_double(z),\n byref(api_elementTag_),\n byref(api_elementType_),\n byref(api_nodeTags_), byref(api_nodeTags_n_),\n byref(api_u_),\n byref(api_v_),\n byref(api_w_),\n c_int(dim),\n c_int(bool(strict)),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshGetElementByCoordinates returned non-zero error code: \",\n ierr.value)\n return (\n api_elementTag_.value,\n api_elementType_.value,\n _ovectorsize(api_nodeTags_, api_nodeTags_n_.value),\n api_u_.value,\n api_v_.value,\n api_w_.value)", "def getLocalCoordinatesInElement(elementTag, x, y, z):", "def get_entity_at(self, x, y):\n for entity in self.entities:\n if entity.x == x and entity.y == y:\n return entity\n return None", "def get_element(coord, grid):\n row = coord[0]\n col = coord[1]\n return grid[row][col]", "def getLocalCoordinatesInElement(elementTag, x, y, z):\n api_u_ = c_double()\n api_v_ = c_double()\n api_w_ = c_double()\n ierr = c_int()\n lib.gmshModelMeshGetLocalCoordinatesInElement(\n c_size_t(elementTag),\n c_double(x),\n c_double(y),\n c_double(z),\n byref(api_u_),\n byref(api_v_),\n byref(api_w_),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshGetLocalCoordinatesInElement returned non-zero error code: \",\n ierr.value)\n return (\n api_u_.value,\n api_v_.value,\n api_w_.value)", "def element_at_or_default(self, n):\n try:\n return self.element_at(n)\n except NoElementsError:\n return None", "def get_root_element(self, position):\n if not self.covers_tile(position):\n return None\n x, y = position\n instance = self._element_grid[x][y]\n try:\n xx, yy = instance\n except (ValueError, TypeError):\n return instance\n return self._element_grid[x - xx][y - yy]", "def get_point(self, x: int, y: int) -> None:\n return self.grid[y][x]", "def lookup_tile(self, coords):\n if not self.tile_coords_valid:\n return None\n index = self.tile_coords_to_array_index(coords)\n return self.tiles[index.y, index.x]", "def element(**args):\n if len(args) != 1:\n raise TypeError(\"This routine accepts a single argument\")\n\n if args.get(\"symbol\"):\n symbol = args[\"symbol\"]\n for e in elements.all():\n if e.symbol() == symbol:\n return e\n return None\n elif args.get(\"atomic_number\"):\n atnum = args[\"atomic_number\"]\n for e in elements.all():\n if e.atomicNumber() == atnum:\n return e\n return None", "def _get(elements: Sequence[T], index: Optional[int]) -> Optional[T]:\n return None if index is None else elements[index]", "def locate_cell(self, x):\n x = np.asarray(x, dtype=np.float)\n cell = self._c_locator(self.coordinates._ctypes,\n x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))\n if cell == -1:\n return None\n else:\n return cell", "def getgeo(self, x: float, y: float, z: float) -> tuple[float, float, float]:\n logger.debug(\"input x,y(%s, %s)\", x, y)\n x -= self.refxyz[0]\n y = -(y - self.refxyz[1])\n if z is None:\n z = self.refxyz[2]\n else:\n z -= self.refxyz[2]\n px = self.refproj[0] + self.pixels2meters(x)\n py = self.refproj[1] + self.pixels2meters(y)\n lon, lat = self.to_geo.transform(px, py)\n alt = self.refgeo[2] + self.pixels2meters(z)\n logger.debug(\"result lon,lat,alt(%s, %s, %s)\", lon, lat, alt)\n return lat, lon, alt", "def get_objs(self,x,y):\n if self.inworldv(x,y):\n return self.objs[x][y]\n return None", "def xyValue(self, x, y):\n try:\n return self.board_array[x][y]\n except IndexError as e:\n print(\"IndexError in xyValue: {}\".format(str(e)))\n return None", "def point(x=0.,y=0.,z=0.):\n return Formex([[[x,y,z]]])", "def get_index(self, x):\n if x in self.d:\n return self.d[x]\n else:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of elements at the 2d coordinates, or None if no list exists at those coordinates.
def get_element_list(self, pos2): element_hash = pos2.x + (pos2.y * self.size.x) return self.element_hash.get(element_hash)
[ "def get_objs(self,x,y):\n if self.inworldv(x,y):\n return self.objs[x][y]\n return None", "def points2D(self) -> tuple[Point2D, ...]:\n return tuple(map(Point2D, self._xs, self._ys))", "def get_coordinates(w, target):\n \n returned_list = []\n \n for i in range(len(w)):\n \n for j in range(len(w[i])):\n \n if w[i][j] == target:\n returned_list.append((i, j))\n \n return returned_list", "def get_objs_at_coordinates(self, coordinates):\n result = [\n item\n for item, item_coords in self.itemcoordinates.items()\n if item_coords == coordinates and item is not None\n ]\n return list(result)", "def returnGridAsArrayOfCoordinates(cls):", "def get_coordinates(self):\n resource_list = self.get_resource()\n coordinates = namedtuple('coordinates', ['latitude', 'longitude'])\n try:\n return [coordinates(*resource['point']['coordinates'])\n for resource in resource_list]\n except (KeyError, TypeError):\n try:\n if isinstance(resource_list, dict):\n resource_list = [resource_list]\n return [coordinates(resource['Point']['Latitude'],\n resource['Point']['Longitude'])\n for resource in resource_list]\n except (KeyError, ValueError) as exc:\n print(exc)", "def pix_coords(\n points:list,\n window:pygs.Window,\n pcsys:dict = pcsys\n ) -> list:\n return [pix_coord(point, window, pcsys) for point in points]", "def get_point_list(data):\n try:\n ls_pair = data.split(' ')\n except AttributeError:\n lnd_points = data.xpath(\"(.//@points)[1]\")\n s_points = lnd_points[0]\n ls_pair = s_points.split(' ')\n l_xy = list()\n for s_pair in ls_pair: # s_pair = 'x,y'\n (sx, sy) = s_pair.split(',')\n l_xy.append((int(sx), int(sy)))\n return l_xy", "def get_coordinates(self, variable: VariableDefinition) -> List[VariableDefinition]:\n coordinate_names = variable.get_coordinate_names()\n return [self.coords.get(coord_name) for coord_name in coordinate_names]", "def get_entities_at(self, x, y):\n result = []\n for entity in self.entities:\n if entity.x == x and entity.y == y:\n result.append(entity)\n return result", "def getFlattenCoords (self, skipOptional = True):\n return [d.coord for d in self.root.flatten (skipOptional)]", "def cartesian_to_list(cartesian_coordinate):\n x, y = cartesian_coordinate\n\n return y, x", "def coord_y(self) -> List[float]:\n if len(self.__points) == 0:\n return []\n if len(self.__points[0]) > 1:\n return [p[1] for p in self.points]", "def get_point_list(self, name):\r\n if name in self.point_lists:\r\n return self.point_lists[name]\r\n \r\n return None", "def getArticulationPoints(self) -> List[java.awt.geom.Point2D]:\n ...", "def points(self):\n return [self.point1, self.point2]", "def getElementByCoordinates(x, y, z, dim=-1, strict=False):", "def _extract_svg_coordinates_helper_function_(paths, number_of_samples=30):\n path_coordinates = []\n x_coord = []\n y_coord = []\n\n for idx in paths:\n for jdy in idx:\n for j in range(number_of_samples):\n path_coordinates.append(jdy.point(j / (number_of_samples - 1)))\n\n for k in range(len(path_coordinates)):\n xi = path_coordinates[k].real\n yi = path_coordinates[k].imag\n\n x_coord.append(xi)\n y_coord.append(yi)\n\n return list(zip(np.asarray(x_coord), np.asarray(y_coord)))", "def get_element(coord, grid):\n row = coord[0]\n col = coord[1]\n return grid[row][col]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Maps a 2d map position to an element position.
def map_to_element(self, pos2): return ((pos2.x / self.element_size) + 1, (pos2.y / self.element_size) + 1)
[ "def element_to_map(self, pos2):\n \n return ((pos2.x * self.element_size) - (self.element_size / 2), (pos2.y * self.element_size) - (self.element_size / 2))", "def position_map(context, map):\n ob = context.active_object\n scn = context.scene\n nodes = map['mat'].node_tree.nodes\n #\n #Nodes Creation\n tex_coord = nodes.new(\"ShaderNodeTexCoord\")\n mapping = nodes.new(\"ShaderNodeMapping\")\n mapping.rotation[1] = 1.5708 #Radian rotation of 90 degrees in Y\n mapping.translation[0] = abs(min_vertex(ob.data, 'z'))\n gr_tex = nodes.new(\"ShaderNodeTexGradient\")\n #\n #Nodes linking\n links = map['mat'].node_tree.links\n links.new(tex_coord.outputs[3], mapping.inputs[0])\n links.new(mapping.outputs[0], gr_tex.inputs[0])\n links.new(gr_tex.outputs[0], map['output'].inputs[0])\n #\n #Baking\n map['mat'].node_tree.nodes.active = map['image_node']\n enable_color_bake_settings()\n bpy.ops.object.bake(type='DIFFUSE')\n return map['image']", "def cell_mapping(self,cell) :\n\n j = np.floor(cell/self.param.n_x)\n i = cell - j*self.param.n_x\n\n return i,j", "def translate_to_tile(self, tile_x, pos_x, tile_y, pos_y):\n x = int(tile_x) * DISPLAY_SIZE['x'] + pos_x\n y = int(tile_y) * DISPLAY_SIZE['y'] + pos_y\n return x, y", "def mkmapcoords(self, pts):\n return(mk_mapcoords(pts, self.vol_verts, self.element, self.dim))", "def __pos__(self):\n return _almathswig.Position2D___pos__(self)", "def __getitem__(self, pos):\n return tile.MapTile(self, pos)", "def pix2map(geoTransform, i, j):\n geoTransform = correct_geoTransform(geoTransform)\n if type(i) in (np.ma.core.MaskedArray, np.ndarray):\n are_two_arrays_equal(i, j)\n else: # if only a float is given\n i,j = correct_floating_parameter(i), correct_floating_parameter(j)\n\n x = geoTransform[0] + \\\n np.multiply(geoTransform[1], j) + np.multiply(geoTransform[2], i)\n\n y = geoTransform[3] + \\\n np.multiply(geoTransform[4], j) + np.multiply(geoTransform[5], i)\n\n # # offset the center of the pixel\n # x += geoTransform[1] / 2.0\n # y += geoTransform[5] / 2.0\n return x, y", "def map_coord(self, geom):\n coord = {\"skycoord\": self.radec}\n\n cols = {k.upper(): v for k, v in self.table.columns.items()}\n\n for axis in geom.axes:\n try:\n col = cols[axis.name.upper()]\n coord[axis.name] = u.Quantity(col).to(axis.unit)\n except KeyError:\n raise KeyError(f\"Column not found in event list: {axis.name!r}\")\n\n return MapCoord.create(coord)", "def map_to_surface(self, coords):\r\n return ((coords[0] - self.map_left) * self.sprite_size, \r\n (coords[1] - self.map_top) * self.sprite_size)", "def position2DFromTransform(pT):\n return _almathswig.position2DFromTransform(pT)", "def position2DFromTransformInPlace(pT, pPos):\n return _almathswig.position2DFromTransformInPlace(pT, pPos)", "def map_coord(self, coord):\n\n # if we arenty in bounds return -1\n if (not self.in_bounds(coord.y, coord.x)):\n return -1\n\n # else we can return the map value\n return self.map[coord.y][coord.x]", "def pixel_to_map(geotransform, coordinates):\n coordinates_map = np.empty(coordinates.shape)\n coordinates_map[..., 0] = (geotransform[0] +\n geotransform[1] * coordinates[..., 0] +\n geotransform[2] * coordinates[..., 1])\n coordinates_map[..., 1] = (geotransform[3] +\n geotransform[4] * coordinates[..., 0] +\n geotransform[5] * coordinates[..., 1])\n return (coordinates_map)", "def getLocalCoordinatesInElement(elementTag, x, y, z):", "def _get_pos(self):\r\n \r\n return (self.rect.midbottom[0]-(MAP_TILE_WIDTH/2))/MAP_TILE_WIDTH, (self.rect.midbottom[1]-(MAP_TILE_HEIGHT))/MAP_TILE_HEIGHT", "def map(self, val=None, attr=\"position\", box=None, src=None): ###\n if val == None and box != None:\n val = getattr(box, attr)\n if val == None:\n return None\n if src == None:\n src = self.internal\n if src == None:\n src = self.__class__(rect=(0, 0, 1, 1))\n if attr in [\"size\"]:\n val = (\n self.map(val[0], \"width\", src=src),\n self.map(val[1], \"height\", src=src),\n )\n if attr in [\"position\"]:\n val = (self.map(val[0], \"x\", src=src), self.map(val[1], \"y\", src=src))\n if attr in [\"rect\"]:\n val = (\n self.map(val[0], \"left\", src=src),\n self.map(val[1], \"bottom\", src=src),\n self.map(val[2], \"right\", src=src),\n self.map(val[3], \"top\", src=src),\n )\n if attr in [\"left\", \"right\", \"x\"]:\n val = self.left + self.map(float(val) - src.left, \"width\", src=src)\n if attr in [\"bottom\", \"top\", \"y\"]:\n val = self.bottom + self.map(float(val) - src.bottom, \"height\", src=src)\n if attr in [\"width\"]:\n val = float(val) * self.width / src.width\n if attr in [\"height\"]:\n val = float(val) * self.height / src.height\n if box != None:\n setattr(box, attr, val)\n if attr in [\"position\"]:\n val = Point(val)\n if attr in [\"size\"]:\n val = Size(val)\n return val", "def maptransform(mmap, record):\n lons = [item[0] for item in record['geometry']['coordinates'][0]]\n lats = [item[1] for item in record['geometry']['coordinates'][0]]\n record['geometry']['coordinates'] = [[(x, y)\n for (x, y) in zip(mmap(lons, lats)[0], mmap(lons, lats)[1])]]\n return record", "def img2map(geoTransform, pixel):\r\n\r\n if len(geoTransform) != 6:\r\n raise Exception('Need 6 parameters for the geoTransform variable')\r\n\r\n if len(pixel) != 2:\r\n raise Exception('Need 2 dimensions for the pixel variable')\r\n\r\n if type(pixel[0]) == numpy.ndarray:\r\n mapx = []\r\n mapy = []\r\n for i in range(len(pixel[0])):\r\n mapx.append(pixel[1][i] * geoTransform[1] + geoTransform[0])\r\n mapy.append(geoTransform[3] - (pixel[0][i] * (numpy.abs(geoTransform[5]))))\r\n\r\n mapx = numpy.array(mapx)\r\n mapy = numpy.array(mapy)\r\n else:\r\n mapx = pixel[1] * geoTransform[1] + geoTransform[0]\r\n mapy = geoTransform[3] - (pixel[0] * (numpy.abs(geoTransform[5])))\r\n\r\n return (mapx,mapy)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Maps the center of a 2d element position to a map position.
def element_to_map(self, pos2): return ((pos2.x * self.element_size) - (self.element_size / 2), (pos2.y * self.element_size) - (self.element_size / 2))
[ "def xy2center(self, x, y):\n x = x - 10.97 / 2\n y = y - 23.78 / 2\n return x, y", "def map_to_element(self, pos2):\n \n return ((pos2.x / self.element_size) + 1, (pos2.y / self.element_size) + 1)", "def mkmapcoords(self, pts):\n return(mk_mapcoords(pts, self.vol_verts, self.element, self.dim))", "def getCenter(self):\n (left, top), (right, bottom) = self.getCoords()\n x = left + (right - left) / 2\n y = top + (bottom - top) / 2\n return x, y", "def tile_centre(z, x, y):\n\n from tilequeue.tile import num2deg\n\n lat, lon = num2deg(x + 0.5, y + 0.5, z)\n return (lon, lat)", "def translate_to_cell_center(self):\n if self.cell is None:\n raise NameError(\"cell not defined\")\n else:\n self.translate_to_zero()\n cell_center = (self.cell[0] + self.cell[1] + self.cell[2]) / 2\n self.translate(cell_center)", "def get_center(self):\n\t\thx = self.h[0]\n\t\thy = self.h[1]\n\t\thz = self.h[2]\n\n\t\treturn sum([self.xyz0, [hx/2, hy/2, hz/2]], axis=0)", "def center(self):\n self.simple_center_pos = {}\n self.update()", "def position_map(context, map):\n ob = context.active_object\n scn = context.scene\n nodes = map['mat'].node_tree.nodes\n #\n #Nodes Creation\n tex_coord = nodes.new(\"ShaderNodeTexCoord\")\n mapping = nodes.new(\"ShaderNodeMapping\")\n mapping.rotation[1] = 1.5708 #Radian rotation of 90 degrees in Y\n mapping.translation[0] = abs(min_vertex(ob.data, 'z'))\n gr_tex = nodes.new(\"ShaderNodeTexGradient\")\n #\n #Nodes linking\n links = map['mat'].node_tree.links\n links.new(tex_coord.outputs[3], mapping.inputs[0])\n links.new(mapping.outputs[0], gr_tex.inputs[0])\n links.new(gr_tex.outputs[0], map['output'].inputs[0])\n #\n #Baking\n map['mat'].node_tree.nodes.active = map['image_node']\n enable_color_bake_settings()\n bpy.ops.object.bake(type='DIFFUSE')\n return map['image']", "def centerInCell(self):\n x, y = self.pos\n x = int(x) + 0.5\n y = int(y) + 0.5\n self.pos = (x,y)", "def map_coord(self, geom):\n coord = {\"skycoord\": self.radec}\n\n cols = {k.upper(): v for k, v in self.table.columns.items()}\n\n for axis in geom.axes:\n try:\n col = cols[axis.name.upper()]\n coord[axis.name] = u.Quantity(col).to(axis.unit)\n except KeyError:\n raise KeyError(f\"Column not found in event list: {axis.name!r}\")\n\n return MapCoord.create(coord)", "def pix2map(geoTransform, i, j):\n geoTransform = correct_geoTransform(geoTransform)\n if type(i) in (np.ma.core.MaskedArray, np.ndarray):\n are_two_arrays_equal(i, j)\n else: # if only a float is given\n i,j = correct_floating_parameter(i), correct_floating_parameter(j)\n\n x = geoTransform[0] + \\\n np.multiply(geoTransform[1], j) + np.multiply(geoTransform[2], i)\n\n y = geoTransform[3] + \\\n np.multiply(geoTransform[4], j) + np.multiply(geoTransform[5], i)\n\n # # offset the center of the pixel\n # x += geoTransform[1] / 2.0\n # y += geoTransform[5] / 2.0\n return x, y", "def get_center(self):\n x, y = self.pos\n ox, oy = self.origin\n w, h = self.size\n return (x - ox + w / 2, y - oy + h / 2)", "def getCenter(self) -> \"SbVec2d\":\n return _coin.SbBox2d_getCenter(self)", "def _get_pos(self):\r\n \r\n return (self.rect.midbottom[0]-(MAP_TILE_WIDTH/2))/MAP_TILE_WIDTH, (self.rect.midbottom[1]-(MAP_TILE_HEIGHT))/MAP_TILE_HEIGHT", "def map_to_surface(self, coords):\r\n return ((coords[0] - self.map_left) * self.sprite_size, \r\n (coords[1] - self.map_top) * self.sprite_size)", "def compute_position_center_of_mass(self):\n return self.position_collection[..., 0].copy()", "def get_center(self):\n\t\treturn sum([self.xyz0, self.get_diagonal/2], axis=0)", "def get_object_position_on_grid(self, distance):\n #now we know the spatial resolution per grid spacing\n dx = distance[0] / self._dx\n dy = distance[1] / self._dy\n dz = distance[2] / self._dz\n #get the center loction\n cx = int(self.x_dim / 2)\n cy = int(self.y_dim / 2)\n cz = int(self.z_dim / 2)\n #return the x, y, z indicies to add at\n return cx + dx, cy + dy, cz + dz" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets an element's properties (but not flags) from sector flags.
def set_element_properties(self, sector_index, element): sector = self.map_data.sectors[sector_index] # Set sector damage flag. if sector.damage > 0: if sector.damage <= 5: element.flags |= Element.FLAG_DAMAGE_LOW elif sector.damage <= 10: element.flags |= Element.FLAG_DAMAGE_MEDIUM elif sector.damage >= 20: element.flags |= Element.FLAG_DAMAGE_HIGH
[ "def set_flags(obj, flag_field, flags):\n\n for flag, value in flags:\n if value & flag_field:\n obj.__dict__[flag] = True\n else:\n obj.__dict__[flag] = False", "def _set_attr_reg(self):\n tmos_v = self._meta_data['bigip']._meta_data['tmos_version']\n attributes = self._meta_data['attribute_registry']\n v12kind = 'tm:asm:policies:blocking-settings:blocking-' \\\n 'settingcollectionstate'\n v11kind = 'tm:asm:policies:blocking-settings'\n if LooseVersion(tmos_v) < LooseVersion('12.0.0'):\n attributes[v11kind] = Blocking_Settings\n else:\n attributes[v12kind] = Blocking_Settings", "def set_flags(self,flags):\n _ldns.ldns_key_set_flags(self,flags)\n #parameters: ldns_key *,uint16_t,\n #retvals: ", "def set(self, **kwargs):\n if kwargs:\n try:\n for attribute, value in kwargs.items():\n if attribute in self.properties:\n self._cached_properties(attribute).set(value)\n else:\n raise AttributeError(attribute)\n #\n # Only try to write the attributes\n # back if the object exists.\n #\n if self.ole_object.Path_.Path:\n self.ole_object.Put_()\n except pywintypes.com_error:\n handle_com_error()", "def _set_flag(self, flag):\r\n\r\n self.conflags |= flag", "def setFlags(self, flags: 'uint32_t const') -> \"void\":\n return _coin.SoGLImage_setFlags(self, flags)", "def test_full_update_element_property_set_property(self):\n pass", "def setAttrMapping(axis=\"string\", clutch=\"string\", selection=bool, offset=float, absolute=bool, relative=bool, attribute=\"string\", scale=float, device=\"string\"):\n pass", "def setFluidAttr(reset=bool, floatRandom=float, zIndex=int, clear=bool, addValue=bool, yvalue=bool, lowerFace=bool, vectorRandom=float, yIndex=int, xIndex=int, xvalue=bool, floatValue=float, vectorValue=float, attribute=\"string\", zvalue=bool):\n pass", "def set_flags(self, key, mask):\n if mask == 0:\n if key in self.flags:\n del self.flags[key]\n return\n self.flags[key] = mask", "def setifflags(ifname, flags):\n s_ioctl(SIOCSIFFLAGS, ifreq_ifr_flags(ifname, flags))", "def set_compound_attr(obj, value, *namesandindices):\n currentattr = obj\n for e in namesandindices[:-1]:\n currentattr = _getattr_from_compound_element(currentattr, e)\n setattr(currentattr, namesandindices[-1], value)", "def setAltMaterialFlags( self, flags ):\r\n\r\n\t\tkeepDisp = []\r\n\t\tkeepOpac = []\r\n\t\tkeepBump = []\r\n\r\n\t\tfrom cross3d.constants import MaterialOverrideOptions\r\n\t\tfor flag in flags:\r\n\t\t\tkeepDisp.append( (flag & MaterialOverrideOptions.KeepDisplacement) != 0 )\r\n\t\t\tkeepOpac.append( (flag & MaterialOverrideOptions.KeepOpacity) != 0 )\r\n\t\t\tkeepBump.append( (flag & MaterialOverrideOptions.KeepBump) != 0)\r\n\r\n\t\tdata = self.metaData()\r\n\t\tdata.setValue( 'keepDisplacement', \tkeepDisp )\r\n\t\tdata.setValue( 'keepOpacity',\t \tkeepOpac )\r\n\t\tdata.setValue( 'keepBump',\t\t\tkeepBump )\r\n\t\tself._altMtlFlagsCache = flags\r\n\t\treturn True", "def setFlags(self, opt, upDict):\n d = self.desc[opt]\n if not d.has_key('flags'):\n return False\n d['flags'].update(upDict)\n self.sete(opt, d)\n return True", "def setParticleAttr(randomVector=float, object=\"string\", relative=bool, floatValue=float, randomFloat=float, attribute=\"string\", vectorValue=float):\n pass", "def set(self, prop, val):\n if prop == 'num_released':\n raise AttributeError(\"cannot set attribute\")\n\n # we don't want to add an attribute that doesn't already exist\n # first check to see that the attribute exists, then change it else\n # raise error\n if hasattr(self.release, prop):\n setattr(self.release, prop, val)\n elif hasattr(self.element_type, prop):\n setattr(self.element_type, prop, val)\n else:\n for init in self.element_type.initializers.values():\n if hasattr(init, prop):\n setattr(init, prop, val)\n break\n else:\n raise AttributeError('{0} attribute does not exist '\n 'in element_type '\n 'or release object'.format(prop))", "def do_set(self,name):\n attr,value=name.split()\n if attr in [\"is_{}\".format(flag) for flag in (\"locked\",\"hidden\",\"silent\",\"tunnel\")]:\n attr=attr[3:]\n if value.lower() in [\"true\",\"yes\",\"1\"]:\n value=True\n elif value.lower() in [\"false\",\"no\",\"0\"]:\n value=False\n else:\n print(\"Invalid value: {}\".format(value))\n return\n if self.in_program:\n setattr(self.vessel,attr,value)\n else:\n setattr(self.vessel.parent,attr,value)\n return\n print(\"Invalid attribute: {}\".format(attr))", "def _set_attrib_for_all(self, params, attrib, value):\n global progress_data\n codes = self._get_parent_codes(params)\n codes.append(self._get_precise_code(params))\n with lock:\n params[attrib] = value\n with lock:\n for precise_code in codes:\n progress_data[precise_code][attrib] = value", "def setModeAttributes(self) -> None:\n d = self.attributesDict\n aList = (\n ('default', 'null'),\n ('digit_re', ''),\n ('escape', ''), # New in Leo 4.4.2.\n ('highlight_digits', True),\n ('ignore_case', True),\n ('no_word_sep', ''),\n )\n for key, default in aList:\n val = d.get(key, default)\n if val in ('true', 'True'):\n val = True\n if val in ('false', 'False'):\n val = False\n setattr(self, key, val)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
count elements and attributes used based on DTD
def count_elements(node, stats): # test that we are an element and not a comment nor processing instruction if isinstance(node, (etree._Comment, etree._ProcessingInstruction)): return # key the hash on the local name of the element key = node.xpath('local-name(.)') # get stats counter for the current element name, or set the default element_stats = stats.setdefault(key, # <-- element name is dict key [0, # <-- count for this element [0] corpus [Counter(), # <-- parent elements [1][0] corpus Counter(), # <-- child elements [1][1] dtd Counter(), # <-- child attributes [1][2] dtd Counter()]] # <-- PCDATA [1][3] corpus ) # array address-^--^ # some counters look for everything possible based on dtd # some counters only observe the input corpus ## update the stats data structure # increment the count for this element stats[key][0] = element_stats[0] + 1 # count for this element # note the parent element parent = node.xpath('local-name(..)') stats[key][1][0][parent] += 1 # Counter() [0] # count child elements and attributes # look up what elements and attributes we might see, based on the DTD for element in allowed_elements(key): # Counter() [1] stats[key][1][1][element] += len(node.xpath(''.join(['*[local-name()="',element,'"]']))) for attribute in allowed_attributes(key): # Counter() [2] stats[key][1][2][attribute] += len(node.xpath(''.join(['@*[local-name()="', attribute, '"]']))) # note if there is text() pcdata = "PCDATA" if node.xpath('boolean(./text())') else "no text()" stats[key][1][3][pcdata] += 1 # Counter() [3] ## done counnting this element node, loop through child nodes for desc in list(node): # recursive call count_elements(desc, stats)
[ "def get_html_attributes(html):\n attributes = set()\n for element in html.iter():\n for attribute in element.keys():\n element_attribute = ' '.join([element.tag, attribute])\n attributes.add(element_attribute)\n return Counter(attributes)", "def numeroElements(self):\n count=0\n for c in self._components:\n count+=1\n return count", "def test_portals_id_designs_nk_tags_count_get(self):\n pass", "def get_count(self, xml_tag):\n string_lib.remove_space(xml_tag)\n try:\n data = self.soup.find_all(xml_tag)\n return len(data)\n\n except Exception as e:\n print(TAG + \" | Failed to find tag : {} in XML \".format(xml_tag))\n return 0", "def get_count_elements_addres_of_registration(self):\n elements_addres_of_registration = self.driver.find_elements(*self.FOR_COUNT_ELEMENTS_ADDRES_OF_REGISTRATION)\n return len(elements_addres_of_registration)", "def sort_by_numattrs(node):\n return len(node.attrs)", "def number_of_attributes(self):\n attributes = self._GetAttributes()\n return len(attributes)", "def elemSize(self): # real signature unknown; restored from __doc__\n pass", "def count_multiple_nodes(self, elements=['token', 'lemma', 'tag', 'sentence']):\n nodes = self.list_multiple_nodes(elements)\n result = {}\n for key, value in nodes.items():\n result[key] = len(value)\n return result", "def cumulative_pos_attributes(self):\n count = 0\n for ing in self.ingredients:\n for att in ing.attributes:\n if att:\n count += 1\n return count", "def num_attr(self, object_id):\n (_elemType, _numElem, _nodesPerElem, numAttr) = self.__ex_get_block('EX_ELEM_BLOCK', object_id)\n return numAttr.value", "def n_elements(self):\n return self._header.count", "def count_instance_attributes(listInst):\n cAttr = None\n for inst in listInst:\n cLen = len(inst.listAttrs)\n if cAttr is None:\n cAttr = cLen\n elif cLen != cAttr:\n return None\n return cAttr", "def universe_size(data):\n N = 0\n for doc in data: \n n=0\n for term in doc:\n count = doc[term]\n n += count\n N += n\n return N", "def font_specs(xml_data):\n xml_font_specs=xml_data.findall('page[@number=\"1\"]/fontspec[@id][@size]')\n return [fs.attrib for fs in xml_font_specs]", "def __getElementCounts(self, oeMol):\n eD = {}\n if len(eD) == 0:\n # calculate from current oeMol\n eD = {}\n for atom in oeMol.GetAtoms():\n atNo = atom.GetAtomicNum()\n if atNo not in eD:\n eD[atNo] = 1\n else:\n eD[atNo] += 1\n return eD", "def test_attribute_count(self):\n comment = CommentFactory.create()\n comment_dict = model_to_dict(comment)\n self.assertEqual(len(comment_dict.keys()), 6)", "def __len__(self):\n\t\ttotal=0\n\t\tfor ns in self.namespace:\n\t\t\tif ns is None:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\ttotal=total+len(ns)\n\t\treturn total", "def xml_info(node):\n tags = set()\n attrib = set()\n def scan(node):\n tags.add(node.tag)\n attrib.update(node.attrib)\n for child in node:\n scan(child)\n scan(node)\n print('Tags:', ' '.join(sorted(tags)))\n print('Attributes:', ' '.join(sorted(attrib)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read XML version of the DTD to figure out allowed attributes
def allowed_attributes(key): return DTD.xpath(''.join(["/dtd/attlist[@name='", key, "']/attribute/@name"]))
[ "def get_valid_attributes(self, buf, pos):\n node = buf.xml_node_at_pos(pos)\n if node is None: return None\n #print \"get_valid_attributes NODE %s:%s xmlns[%s] %r\"%(tree.prefix(node),node.localName,node.ns,node.tag)\n already_supplied = node.attrib.keys()\n handlerclass = buf.xml_tree_handler(node)\n attrs = handlerclass.attrs(buf.xml_tree, node)\n if not attrs:\n return None\n attrs = [name for name in attrs if name not in already_supplied]\n attrs.sort()\n return attrs", "def xmlread(filename):\n\n tree = ET.parse(filename)\n root = tree.getroot()\n value_node_spacecraft = None\n value_node_passdir = None\n node_component = root.find('component')\n for node in node_component:\n if node.get('name') == 'mission': # mission=S1 or spacecraftname=Sentinel-1\n value_node_spacecraft = node.find('value')\n\n node_bursts = node_component.find('component')\n for node in node_bursts:\n if node.get('name') in ['burst1', 'burst2', 'burst3']:\n for property in node:\n if property.get('name') == 'passdirection':\n value_node_passdir = property.find('value')\n\n if value_node_passdir.text == 'DESCENDING':\n passdirection = 'Desc'\n else:\n passdirection = 'Asc'\n\n attrdict = {'missionname': value_node_spacecraft.text, 'passdirection': passdirection}\n\n return attrdict", "def parse_attributes(self):\n\n byte_offset = self._first_attr\n attr_count = 0\n\n # while still within the entry and the number assigned attributes\n while (byte_offset + 16 < self._used_size and attr_count < self._next_attrID):\n print (\n \"Parsing next attribute: ((byte_offset=(%d+16) < used_size=%d) and (attr_count=%d < next_attribute=%d)\"\n % (byte_offset, self._used_size, attr_count, self._next_attrID))\n\n # read in first/next attribute\n attr_size = unpack(\"<L\", self._entry[byte_offset + 4:byte_offset + 8])[0]\n attr = self._entry[byte_offset:byte_offset + attr_size]\n\n attr_type, nr_flag = self.parse_attr_header(attr, attr_size)\n\n # if it is a resident attribute\n if nr_flag == 0:\n content_size, content_offset = self.parse_resident_attr(attr)\n\n # if type is 16, parse std_info\n if attr_type == 16:\n standard_info = STD_INFO(attr, content_size, content_offset)\n standard_info.parse()\n\n # if type is 48, parse file_name\n elif attr_type == 48:\n file_name = FILE_NAME(attr, content_size, content_offset)\n file_name.parse()\n\n # if it is a non-resident attribute\n elif nr_flag == 1:\n self.parse_nonresident_attr(attr, attr_type)\n\n # set up for next attribute\n byte_offset += attr_size\n attr_count += 1\n print (\"\")\n # end while loop", "def xml_info(node):\n tags = set()\n attrib = set()\n def scan(node):\n tags.add(node.tag)\n attrib.update(node.attrib)\n for child in node:\n scan(child)\n scan(node)\n print('Tags:', ' '.join(sorted(tags)))\n print('Attributes:', ' '.join(sorted(attrib)))", "def _parse_attributes(self):\n if \"identifier\" in self.xml.attrib:\n self.identifier = self.xml.attrib[\"identifier\"]\n else:\n raise ValueError(\"'identifier' is a required attribute for an <output> tag.\")\n if \"folder\" in self.xml.attrib:\n self.folder = self.xml.attrib[\"folder\"]\n if \"file\" in self.xml.attrib:\n self.filename = self.xml.attrib[\"file\"]\n if \"template\" in self.xml.attrib:\n self.template = self.xml.attrib[\"template\"]\n if \"mode\" in self.xml.attrib:\n self.mode = self.xml.attrib[\"mode\"]\n if \"value\" in self.xml.attrib:\n self.value = self.xml.attrib[\"value\"]\n if \"tolerance\" in self.xml.attrib:\n self.tolerance = float(self.xml.attrib[\"tolerance\"])\n else:\n self.tolerance = 1.\n if \"actolerance\" in self.xml.attrib:\n self.actolerance = float(self.xml.attrib[\"actolerance\"])\n else:\n self.actolerance = 1.\n\n if \"position\" in self.xml.attrib:\n self.position = self.xml.attrib[\"position\"]\n if \"autoclass\" in self.xml.attrib:\n self.autoclass = self.xml.attrib[\"autoclass\"].lower()==\"true\"", "def attributes(self):\n # \"\"\" Returns a List of an element's attributes \"\"\"\n # try:\n # return [Attr(key.lstrip('_'), value) for key, value in self.kwargs.items()]\n # except Exception as e:\n # print('Error - no tag!', e)\n # return []\n # print('attributes', self.kwargs)\n newargs = []\n for key, value in self.kwargs.items():\n # print('key', key)\n # print('value', value)\n newargs.append(Attr(key.lstrip('_'), value))\n\n nnm = NamedNodeMap(newargs, None, self)\n return nnm", "def supportedAttrs(self, path=None):\n if not path: path = self.homedir\n d = self.do_getattrdict(path, [FATTR4_SUPPORTED_ATTRS])\n return d[FATTR4_SUPPORTED_ATTRS]", "def read_xml_file(self, xml_fn):\n pass", "def Attributes(self) -> _n_5_t_17:", "def get_attrib(fxn):\n unclaimed[fxn] = 'encode_getattrib'\n return fxn", "def get_ea_attributes():\n reattrib = None\n try:\n reattrib = requests.get(PAYLOAD['url'] + \"extensibleattributedef?\",\n auth=(PAYLOAD['username'],\n PAYLOAD['password']),\n verify=False)\n reattrib.raise_for_status()\n except requests.exceptions.ConnectionError as eaerrt:\n print(\"Can't reach IPAM! Check your VPN or Local access\", eaerrt)\n exit()\n except requests.exceptions.HTTPError as eahrrt:\n print('Check your credentials!', eahrrt)\n exit()\n\n rutfeattrib = reattrib.content.decode('utf-8')\n rjsoneattrib = json.loads(rutfeattrib)\n eattl = []\n for att in rjsoneattrib:\n for key, value in att.items():\n if key == 'name':\n eattl.append(value)\n return eattl", "def readXML(filename):\n with open(filename) as fd:\n legal_doc_xml = fd.read()\n return legal_doc_xml", "def _read_structure_attributes(f):\n\n line = ''\n variogram_info = {}\n while \"end structure\" not in line:\n line = f.readline()\n if line == '':\n raise Exception(\"EOF while reading structure\")\n line = line.strip().lower().split()\n if line[0].startswith('#'):\n continue\n if line[0] == \"nugget\":\n nugget = float(line[1])\n elif line[0] == \"transform\":\n transform = line[1]\n elif line[0] == \"numvariogram\":\n numvariograms = int(line[1])\n elif line[0] == \"variogram\":\n variogram_info[line[1]] = float(line[2])\n elif line[0] == \"end\":\n break\n elif line[0] == \"mean\":\n warning.warn(\"'mean' attribute not supported, skipping\",PyemuWarningF)\n else:\n raise Exception(\"unrecognized line in structure definition:{0}\".\\\n format(line[0]))\n assert numvariograms == len(variogram_info)\n return nugget,transform,variogram_info", "def attributes(self):\n if self.element is not None:\n if isinstance(self.element, DocElement):\n return self.element.attributes\n else:\n return self.element.attrib\n else:\n return {}", "def getAttrs(self):\n\t\treturn self._attributes", "def getXMLTree( self ):\n \n try:\n self.tree = ET.parse(self.cdlfilename)\n except Exception, inst:\n print \"Unexpected error opening %s: %s\" % (self.cdlfilename, inst)\n return\n \n doc = self.tree.getroot()\n\n #do something bad to get the namespace (should really be handling these separately for when the asc cdl spec updates).\n try:\n self.ASCCDLNS = str(doc.tag)[str(doc.tag).index(\"{\"):str(doc.tag).index(\"}\")+1]\n except ValueError:\n nuke.tprint(\"badly formatted xml, no namespace. Attempting to continue without namespace. Unlikely to work.\")\n self.ASCCDLNS = \"\"\n \n return", "def test_get_attributes(self):\r\n\r\n _values = (0, None, '', '', '')\r\n\r\n self.assertEqual(self.DUT.get_attributes(), _values)", "def strip_attrs(self):\n for tag in self.root.findAll(True):\n tag.attrs = [(attr, val) for attr, val in tag.attrs\n if attr in self.settings['valid_attrs']]", "def getRequiredAttrs(self):\n required = []\n for type_uri, attribute in self.requested_attributes.items():\n if attribute.required:\n required.append(type_uri)\n\n return required" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read and resize volume
def process_scan(path): volume = read_nifti_file(path) volume = normalize(volume) volume = resize_volume(volume) return volume
[ "def process_scan(path):\r\n # Read scan\r\n volume = read_nifti_file(path)\r\n # Normalize\r\n # volume = normalize(volume)\r\n # Resize width, height and depth\r\n volume = resize_volume(volume)\r\n return volume", "def resize(self, size):\r\n self.instance.resize_volume(size)\r\n self.size = size", "def volume_autosize_get(self, volume):\n return self.request( \"volume-autosize-get\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'increment-size': [ basestring, False ],\n 'minimum-size': [ basestring, False ],\n 'grow-threshold-percent': [ int, False ],\n 'maximum-size': [ basestring, False ],\n 'shrink-threshold-percent': [ int, False ],\n 'is-enabled': [ bool, False ],\n 'mode': [ basestring, False ],\n } )", "def resize_volume(self, size):\r\n curr_size = self.volume.size\r\n if size <= curr_size:\r\n raise exc.InvalidVolumeResize(\"The new volume size must be larger \"\r\n \"than the current volume size of '%s'.\" % curr_size)\r\n body = {\"volume\": {\"size\": size}}\r\n self.manager.action(self, \"resize\", body=body)", "def extend_volume(self, connection_properties):\n # The StorPool client (storpool_block service) running on this host\n # should have picked up the change already, so it is enough to query\n # the actual disk device to see if its size is correct.\n #\n volume_id = connection_properties.get('volume', None)\n if volume_id is None:\n raise exception.BrickException(\n 'Invalid StorPool connection data, no volume ID specified.')\n\n # Get the expected (new) size from the StorPool API\n volume = self._attach.volumeName(volume_id)\n LOG.debug('Querying the StorPool API for the size of %(vol)s',\n {'vol': volume})\n vdata = self._attach.api().volumeList(volume)[0]\n LOG.debug('Got size %(size)d', {'size': vdata.size})\n\n # Wait for the StorPool client to update the size of the local device\n path = '/dev/storpool/' + volume\n for _ in range(10):\n size = utils.get_device_size(self, path)\n LOG.debug('Got local size %(size)d', {'size': size})\n if size == vdata.size:\n return size\n time.sleep(0.1)\n else:\n size = utils.get_device_size(self, path)\n LOG.debug('Last attempt: local size %(size)d', {'size': size})\n return size", "def volume_size(self, volume, new_size=None):\n return self.request( \"volume-size\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],\n }, {\n 'is-fixed-size-flex-volume': [ bool, False ],\n 'is-readonly-flex-volume': [ bool, False ],\n 'is-replica-flex-volume': [ bool, False ],\n 'volume-size': [ basestring, False ],\n } )", "def volume(ctx, *args, **kwargs):", "def read_volume_fast(self, threshold):\n v = self.return_header_values()\n nx, ny, nz = v['nx'], v['ny'], v['nz']\n mx, my, mz = v['mx'], v['my'], v['mz']\n mxstart, mystart, mzstart= v['mxstart'], v['mystart'], v['mzstart']\n xlen, ylen, zlen = v['xlen'], v['ylen'], v['zlen']\n self.extx = nx\n self.exty = ny \n self.extz = nz \n self.widthx = xlen/mx\n self.widthy = ylen/my\n self.widthz = zlen/mz\n self.width = max(self.widthx, self.widthy , self.widthz)\n self.gridx = mxstart * self.widthx\n self.gridy = mystart * self.widthy\n self.gridz = mzstart * self.widthz\n #Format independent part starts here\n nvox = self.extx * self.exty * self.extz\n self.map_phi = zeros(nvox)\n #self.protein_phi = zeros(nvox)\n extxy = self.extx * self.exty\t\t\n\t\t\n densities = reshape(self.densities, nvox)\n counts = where(self.densities>threshold)\n for count in counts[0]:\n indv = int(count)\n indz = indv /extxy\n indv = indv - indz * extxy\n indy = indv / self.extx\n indx = indv - indy * self.extx\n x = float(indx)*self.widthx+self.gridx\n y = float(indy)*self.widthy+self.gridy\n z = float(indz)*self.widthz+self.gridz\n #Here there will be the changes for the coordinates and density values\n density_value = self.densities[count]\n self.density_coords[(float(x), float(y), float(z))] = density_value\n #Needed for neighboor search I guess there should be interaface between EMmap and AtomicStrcucture class\n #self.neighbor_list.append(Neighbor([float(x), float(y), float(z)]))\n self.dotvalues.append(density_value)\n #self.map_phi[count]+=float(density_value)\n count = count +1\n #print x,y,z, density_value\n #self.density_coords_bkp = deepcopy(self.density_coords)\n return self.density_coords", "def _to_volume(self, element, name=None):\r\n volId = findtext(element=element, xpath='volumeId',\r\n namespace=NAMESPACE)\r\n size = findtext(element=element, xpath='size', namespace=NAMESPACE)\r\n\r\n # Get our tags\r\n tags = self._get_resource_tags(element)\r\n\r\n # If name was not passed into the method then\r\n # fall back then use the volume id\r\n name = name if name else tags.get('Name', volId)\r\n\r\n # Get our extra dictionary\r\n extra = self._get_extra_dict(\r\n element, RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'])\r\n\r\n return StorageVolume(id=volId,\r\n name=name,\r\n size=int(size),\r\n driver=self,\r\n extra=extra)", "def volume_size_async(self, volume_name, new_size=None):\n return self.request( \"volume-size-async\", {\n 'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-status': [ basestring, False ],\n 'result-error-code': [ int, False ],\n 'volume-size': [ basestring, False ],\n } )", "def read_raw(self):\n\n return self.read_volume(\"/volumes/raw\")", "def volume(self):\n\n volume = self.cache.volume()\n self.close()\n return volume", "def volume(vol):\n ReceiverManager().set_volume(vol)\n return jsonify(volume = vol, status = \"Ok\")", "def _extend_volume(self, name, new_size):\n LOG.debug('_extend__volume name: %s', name)\n params = {}\n params['volsize'] = ix_utils.get_bytes_from_gb(new_size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s/id/%s') % (\n FreeNASServer.REST_API_VOLUME,\n urllib.parse.quote_plus(\n self.configuration.ixsystems_dataset_path + '/' + name))\n ret = self.handle.invoke_command(FreeNASServer.UPDATE_COMMAND,\n request_urn, jparams)\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while extending volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)", "def read_volume( mdserver_name, fields ):\n global conf\n\n ctl_root = VOLUME_CTL_ROOT( conf, {'NAME': mdserver_name} )\n conf_path = VOLUME_CONF_PATH( ctl_root )\n\n try:\n vol_conf = read_config( conf_path, fields )\n except Exception, e:\n raise MDMethodFailed( \"read_volume\", \"could not read config, exception = '%s'\" % e )\n\n ret = {}\n for f in fields:\n ret[f] = vol_conf[f]\n \n return ret", "def _create_volume(self, name, size):\n\n params = {}\n params['name'] = self.configuration.ixsystems_dataset_path + '/' + name\n params['type'] = 'VOLUME'\n params['volsize'] = ix_utils.get_bytes_from_gb(size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s') % (FreeNASServer.REST_API_VOLUME)\n LOG.debug('_create_volume params : %s', params)\n LOG.debug('_create_volume urn : %s', request_urn)\n ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,\n request_urn, jparams)\n LOG.debug('_create_volume response : %s', json.dumps(ret))\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while creating volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)", "def test_extend_volume(self):\n self.mox.StubOutWithMock(self._driver, '_create_file')\n\n new_size = self._driver._size_bytes(self.TEST_NEWSIZE)\n self._driver._create_file(self.TEST_VOLPATH, new_size)\n\n self.mox.ReplayAll()\n\n self._driver.extend_volume(self.TEST_VOLUME, self.TEST_NEWSIZE)", "def manage_existing_get_size(self, volume, existing_ref):\n volume_info = self._validate_manage_existing_ref(existing_ref)\n size = self._round_bytes_to_gib(volume_info['size'])\n\n return size", "def readVTKVolume(name):\n try:\n reader = vtk.vtkStructuredPointsReader()\n reader.SetFileName(name)\n reader.Update()\n print(\"Input volume:\", name)\n vol = reader.GetOutput()\n reader = None\n return vol\n except BaseException:\n print(\"VTK volume reader failed\")\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(\n exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout\n )\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the point group msg published either from ROS2 or Unity and convert it to a dictionary so that it can be saved as a .yaml file
def _pointGroupsCallback(self, msg : PointGroups) -> None: self.pointGroupsDict = [] for group in msg.groups: tmp_dict = [{'map_pos' : [tmp for tmp in group.map_pos], 'group_id' : group.group_id, 'map_origin' : [group.map_origin[0], group.map_origin[1]], 'map_dims' : [group.map_dims[0], group.map_dims[1]], 'map_resol' : group.map_resolution, 'assoc_fl' : group.associated_file}] self.pointGroupsDict.append(tmp_dict) self.pointGroupsReceived = True
[ "def _bot_group_proto_to_tuple(msg, trusted_dimensions):\n dimensions = {unicode(k): set() for k in trusted_dimensions}\n for dim_kv_pair in msg.dimensions:\n # In validated config 'dim_kv_pair' is always 'key:value', but be cautious.\n parts = unicode(dim_kv_pair).split(':', 1)\n if len(parts) != 2:\n logging.error('Invalid dimension in bots.cfg - \"%s\"', dim_kv_pair)\n continue\n k, v = parts[0], parts[1]\n dimensions.setdefault(k, set()).add(v)\n\n auth_cfg = msg.auth or bots_pb2.BotAuth()\n\n content = ''\n if msg.bot_config_script:\n rev, content = config.get_self_config(\n 'scripts/' + msg.bot_config_script,\n store_last_good=True)\n if not rev or not content:\n # The entry is invalid. It points to a non existing file. It could be\n # because of a typo in the file name. An empty file is an invalid file,\n # log an error to alert the admins.\n logging.error(\n 'Configuration referenced non existing bot_config file %r\\n%s',\n msg.bot_config_script, msg)\n return _make_bot_group_config(\n require_luci_machine_token=auth_cfg.require_luci_machine_token,\n require_service_account=auth_cfg.require_service_account,\n ip_whitelist=auth_cfg.ip_whitelist,\n owners=tuple(msg.owners),\n dimensions={k: sorted(v) for k, v in dimensions.iteritems()},\n bot_config_script=msg.bot_config_script or '',\n bot_config_script_content=content or '',\n system_service_account=msg.system_service_account or '')", "def parse_msg(msg):\n subject = msg.get(\"Subject\")\n return {\n \"subject\": subject,\n \"sender\": msg.get(\"Sender\"),\n \"date\": msg.get(\"Date\"),\n \"size\": len(bytes(msg)),\n }", "def _get_group(self):\n\n import re\n\n pattern = re.compile(\"Group\\sinfo:\\n(.*?)(?=^-)\", re.M|re.DOTALL)\n pattern = re.compile(\"Group\\sinfo:\\n(.*?)(?=(^Profile\\skey|^$))\", re.M|re.DOTALL)\n info = pattern.findall(self.msg)\n\n # No group info: return None\n if len(info) == 0: return None\n info = info[0][0]\n\n # Decoding group information\n res = msggroupinfo(info)\n\n return(res)", "def unpack_point_msg(msg, stamped=False):\n if stamped:\n p = msg.point\n else:\n p = msg\n\n return p.x, p.y, p.z", "def _read_message(data, msg):\n if msg.type in IGNORED_MESSAGES:\n data = _ignore(data, msg)\n elif msg.type == 'time_signature':\n # NOTE: right now we're only handling fours\n if msg.numerator == 4 and msg.denominator == 4:\n data = _dict_update(\n data,\n clocks_per_click=msg.clocks_per_click,\n notated_32nd_notes_per_beat=msg.notated_32nd_notes_per_beat)\n else:\n raise TimeSignatureException('not 4/4')\n elif msg.type == 'note_on':\n data = _note_on_update(data, msg)\n elif msg.type == 'note_off':\n data = _note_off_update(data, msg)\n\n return data", "def unpackage(data):\n unpackaged_data = {}\n received_proto = ansible_pb2.DawnData()\n received_proto.ParseFromString(data)\n unpackaged_data[\"student_code_status\"] = [received_proto.student_code_status, time.time()]\n all_gamepad_dict = {}\n for gamepad in received_proto.gamepads:\n gamepad_dict = {}\n gamepad_dict[\"axes\"] = dict(enumerate(gamepad.axes))\n gamepad_dict[\"buttons\"] = dict(enumerate(gamepad.buttons))\n all_gamepad_dict[gamepad.index] = gamepad_dict\n unpackaged_data[\"gamepads\"] = [all_gamepad_dict, time.time()]\n return unpackaged_data", "def parse_msg(self, body):\n result = {}\n doc = minidom.parseString(body)\n root = doc.getElementsByTagName(\"xml\")[0]\n for item in root.childNodes:\n if item.firstChild:\n value = item.firstChild.nodeValue\n else:\n value = item.nodeValue\n result[item.nodeName] = value\n return result", "def _convert_data_to_dict(self, data: Any) -> Tuple[str, dict]:\n topic_attribute_name = self.get_topic_attribute_name(data.private_revCode)\n _, topic_name = topic_attribute_name.split(\"_\", maxsplit=1)\n\n data_stream = copy.deepcopy(\n self._template_manager_message[topic_attribute_name]\n )\n data_vars = data.get_vars()\n\n for topic_attribute in data_vars:\n data_stream[topic_attribute][\"value\"] = data_vars[topic_attribute]\n\n payload = (\n data_stream\n if topic_attribute_name in self.periodic_data\n else [\n data_stream,\n ]\n )\n\n data_as_dict = dict(\n csc=self.remote.salinfo.name,\n salindex=self.remote.salinfo.index,\n data=dict(\n [\n (topic_name, payload),\n ],\n ),\n )\n return topic_attribute_name, data_as_dict", "def params(self):\n for entry in self.raw['entries']:\n if entry['type'] == 'message':\n return entry['data']\n\n return {}", "def unpack_transform_msg(msg, stamped=False):\n if stamped:\n t = msg.transform.translation\n r = msg.transform.rotation\n else:\n t = msg.translation\n r = msg.rotation\n\n return (t.x, t.y, t.z), (r.w, r.x, r.y, r.z)", "def ros_to_pose_tuple(msg):\n p = [msg.position.x, msg.position.y, msg.position.z]\n q = [msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w]\n return (p,q)", "def test_should_generate_message_dict(self):\n correct_messages = {\n 'LAND': {'FAIJWJSOOFAMAU', 'dskajd'},\n 'ICE': {'STHSTSTVSASOS'},\n 'FIRE': {'JXGOOMUTOO'}\n }\n\n result_messages = read_messages_from_file(\n self.__CORRECT_FORMAT_FILE_PATH)\n\n self.assertDictEqual(correct_messages, result_messages)", "def extract_details(group: MeetupObject) -> dict:\n return {\n 'name': group.name,\n 'category': group.category['name'],\n 'created': from_epoch(group.created),\n 'city': group.city,\n 'state': group.state,\n 'country': group.country,\n 'description': group.description,\n 'url': group.link,\n 'organizer': group.organizer['name'],\n 'members': group.members,\n 'member_title': group.who\n }", "def _parse_merge_message(self, msg):\n gate_info = []\n for line in msg.split('\\n'):\n parts = re.split('\\s+', line)\n if parts[0] == '-':\n gate = {}\n gate['name'] = parts[1]\n gate['url'] = parts[2]\n gate['result'] = parts[4]\n gate_info.append(gate)\n return gate_info", "def _parse_sns_message(self, sns_message):\n splitted_list = sns_message.split(PATTERN_LINESPLITTER)\n # Workaround for when the last parameter is not terminated with\n # the same separator pattern, then a closing quote might remain.\n if splitted_list[-1] != '' and splitted_list[-1][-1] == '\\'':\n # Cut the last character from the last item\n splitted_list[-1] = splitted_list[-1][:-1]\n result_dict = {}\n for line_item in splitted_list:\n line_item = line_item.strip()\n if PATTERN_KEYSPLITTER not in line_item:\n # Unparseable line, do not parse\n continue\n key, value = line_item.split(PATTERN_KEYSPLITTER, 1)\n result_dict[key] = self._cast_type(value)\n return result_dict", "def pose_to_ros(body_pose, msg):\n pose_tuple_to_ros(body_pose.pose, msg)", "def unpack_pose_msg(msg, stamped=False):\n if stamped:\n p = msg.pose.position\n o = msg.pose.orientation\n else:\n p = msg.position\n o = msg.orientation\n\n return (p.x, p.y, p.z), (o.w, o.x, o.y, o.z)", "def _parse_line(self, line):\n return {'raw_message': line}", "def parse_msg_xml(self):\n try:\n xml_recv = element_tree.fromstring(self.xml)\n wechatimg_id = xml_recv.find(TO_USER_NAME).text\n fromuser_id = xml_recv.find(FROM_USER_NAME).text\n content = xml_recv.find(CONTENT).text\n create_time = xml_recv.find(CREATE_TIME).text\n message_id = xml_recv.find(MSG_ID).text\n message_type = xml_recv.find(MSG_TYPE).text\n except:\n logging.error(\"Error in parshing incoming XML: \" + self.xml)\n\n self.xlm_content_dict = dict([('wechatimg_id', wechatimg_id), ('fromuser_id', fromuser_id),\n ('create_time', create_time), ('message_id', message_id),\n ('message_type', message_type), ('content', content)])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the value of each trainable with SGD.
def sgd_update(trainables, learning_rate=1e-2): # Performs SGD # # Loop over the trainables for t in trainables: # Change the trainable's value by subtracting the learning rate # multiplied by the partial of the cost with respect to this # trainable. partial = t.gradients[t] t.value -= learning_rate * partial
[ "def sgd_update(trainables, learning_rate=1e-2):\n for node in trainables:\n node.value -= learning_rate * node.gradients[node]", "def sgd(self):\n import math\n for i, j, r in self.samples:\n # Computer prediction and error\n prediction = self.get_rating(i, j)\n e = (r - prediction)\n #print(e)\n # Update biases\n self.b_u[i] += self.alpha * (e - self.beta * self.b_u[i])\n self.b_i[j] += self.alpha * (e - self.beta * self.b_i[j])\n\n # Update user and item latent feature matrices\n #self.P[i, :] += self.alpha * (e * self.Q[j, :] - self.beta * self.P[i,:])\n #self.Q[j, :] += self.alpha * (e * self.P[i, :] - self.beta * self.Q[j,:])\n\n P_i = np.copy(self.P[i,:])\n for k in range(self.K):\n self.P[i,k] += self.alpha * (e * self.Q[j,k] - self.beta * self.P[i,k])\n self.Q[j,k] += self.alpha * (e * P_i[k] - self.beta * self.Q[j,k])", "def __batch_train(self):\n self.old_model = np.copy(self.model)\n for index, theta in enumerate(self.old_model):\n\n gradient = 0\n for train_example, target in izip(self.X_train, self.Y_train):\n model_at_example = np.dot(train_example, self.old_model[:-1]) + self.old_model[self.num_features]\n # non bias input\n if index < self.num_features:\n gradient += ((target - model_at_example) * train_example[index])\n else:\n gradient += (target - model_at_example)\n\n theta = theta + gradient * self.alpha\n self.model[index][0] = theta\n print self.model", "def update(self,lr):\n self.sample_minibatch(lr)\n # Calculate gradients at current point\n dlogbeta = lr.dlogpost(self)\n lr.grad_sample[self.iter-1,:] = dlogbeta\n\n # Update parameters using SGD\n eta = np.random.normal( scale = self.epsilon )\n lr.beta += self.epsilon / 2 * dlogbeta + eta", "def _gradient_update(self):\n # sample minibatch\n captions, image_features, urls = sample_coco_minibatch(self.data, self.batch_size, split='train')\n # compute loss and gradient\n loss, gradients = self.model.loss(image_features, captions)\n self.loss_history.append(loss)\n # parameter update\n for para_name, param in self.model.params.items():\n dparam = gradients[para_name]\n next_param, params = self.update_method(param, dparam, self.update_params_all[para_name])\n self.model.params[para_name] = next_param\n self.update_params_all[para_name] = params", "def updateFitnessSet(self):\r\n\r\n accuracySum=0.0\r\n accuracies = []\r\n\r\n #First, calculate the accuracies of the classifier and the accuracy sums\r\n i = 0\r\n for cl in self.clSet:\r\n accuracies.append(cl.getAccuracy())\r\n accuracySum = accuracySum + accuracies[i]*cl.getNumerosity()\r\n i = i + 1\r\n\r\n #Next, update the fitness accordingly\r\n for i in range(self.getSize()):\r\n self.clSet[i].updateFitness(accuracySum, accuracies[i])", "def update_data(self):\n\n for idx, key in enumerate(self.runner.input_data.dtype.names):\n self.variables[key].value = self.Xtrain[:, [idx]]\n for idx, key in enumerate(self.runner.output_data.dtype.names):\n # TODO: Multi Output\n self.variables[key].value = self.ytrain", "def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):\n \n if test_data: \n n_test = len(test_data)\n \n n = len(training_data)\n \n for j in xrange(epochs):\n random.shuffle(training_data)\n \n# mini_batches = [\n # training_data[k:k+mini_batch_size] for k in xrange(0, n, mini_batch_size)]\n \n mini_batches = training_data[:mini_batch_size]\n \n self.backPropagate(mini_batches, eta)\n \n if test_data:\n# print \"Epoch {0}: {1} / {2}\".format(j, self.evaluate(test_data), n_test)\n print \"Epoch {0}: cost is {1}\".format(j, self.evaluate(test_data))\n \n else:\n print \"Epoch {0} complete\".format(j)", "def update_weights(self):\n # print(\"\\u001b[31;1m|py|\\u001b[0m\\u001b[37m\", \"ModelInterface::\", inspect.currentframe().f_code.co_name)\n\n for k, optimizer in self.optimizers.items():\n self.models[k].before_update()\n optimizer.step()\n self.models[k].inc_step()", "def _update_trainable_params(self):\n self._trainable_params = list(self._par_info)", "def update(self):\n if self.enabled:\n for avg, weight in zip(self.averages, self.weights):\n self._update_single(avg, weight)\n self.num_updates += 1", "def _update_trainable_params(self):\n self._trainable_params = set(self._par_info)", "def update_learningrate(self, val):\n\n print \"> Training rate is update to: {0}\".format(val)\n self.trainer = BackpropTrainer(self.net, self.ds_training, learningrate=val)", "def _update_G(self):\n for module in self.modules:\n g = self.computeG(self.m_g[module], module, self.batch_averaged)\n if self.steps == 0:\n self._init_G(g, module)\n update_running_avg(g, self.m_G[module], self.factor_decay)", "def update_params(self): # computes gradient descent\n self.W=self.W-(self.rate*self.dW)\n self.b=self.b-(self.rate*self.db)", "def update(self, batch_size):\n self.weights = self.weights_update.update_params(self.weights, self.change_weights / batch_size) # update weights\n\n if self.use_bias:\n self.bias = self.bias_update.update_params(self.bias, self.change_bias / batch_size) # update biases", "def trainFM_parallel_sgd(\n sc,\n train,\n val=None,\n weights=None,\n iterations=50,\n iter_sgd=5,\n alpha=0.01,\n regParam=0.01,\n factorLength=4,\n verbose=False,\n savingFilename=None,\n evalTraining=None,\n mode='reg',\n loss='mse'):\n\n # split the data in train and validation sets if evalTraining or verbose\n if val: val.persist(StorageLevel.MEMORY_ONLY_SER)\n train.persist(StorageLevel.MEMORY_ONLY_SER)\n\n # glom() allows to treat a partition as an array rather as a single row at\n # time\n train_Y = train.map(lambda row: row.label).glom()\n train_X = train.map(lambda row: row.features).glom()\n train_XY = train_X.zip(train_Y).persist(StorageLevel.MEMORY_ONLY_SER)\n # train_XY = train_X.zip(train_Y).cache()\n\n # Initialize weight vectors\n nrFeat = len(train_XY.first()[0][0])\n if weights is not None:\n w = weights[0]\n bias = weights[1]\n assert(w.shape[1] == factorLength)\n print(w.shape)\n print(nrFeat)\n if w.shape[0] < nrFeat:\n w2 = np.random.ranf((nrFeat - w.shape[0], factorLength))\n w2 = w2 / np.sqrt((w2 * w2).sum()) \n bias2 = np.random.ranf(nrFeat - w.shape[0])\n bias2 = bias2 / np.sqrt((bias2 * bias2).sum())\n\n w = np.concatenate((w, w2), axis=0)\n tmp = bias[-1]\n bias = np.append(bias[:-1], bias2)\n bias = np.append(bias, tmp)\n\n else:\n np.random.seed(int(time.time()))\n w = np.random.ranf((nrFeat, factorLength))\n bias = np.random.ranf(nrFeat + 1)\n w = w / np.sqrt((w * w).sum())\n bias = bias / np.sqrt((bias * bias).sum())\n\n if evalTraining:\n evalTraining.evaluate(w, bias)\n if val:\n evalValidation = evaluation(val, mode, loss)\n evalValidation.modulo = evalTraining.modulo\n evalValidation.evaluate(w, bias)\n else:\n evalValidation = None\n\n if verbose:\n print('iter \\ttime \\ttrain_loss \\tval_loss')\n # compute original logloss (0 iteration)\n if evalValidation:\n print('%d \\t%d \\t%5f \\t%5f' %\n (0, 0, evalTraining.loss[-1], evalValidation.loss[-1]))\n elif evalTraining:\n print('%d \\t%d \\t%5f ' %\n (0, 0, evalTraining.loss[-1]))\n start = time.time()\n\n for i in range(iterations):\n wb = sc.broadcast(w)\n biasb = sc.broadcast(bias)\n weights = train_XY.map(\n lambda X_y: sgd_subset(\n X_y[0],\n X_y[1],\n wb.value,\n biasb.value,\n iter_sgd,\n alpha,\n regParam,\n loss))\n\n weights = weights.collect() \n wsub = np.array([x[0] for x in weights]) \n biassub = np.array([x[1] for x in weights]) \n w = wsub.mean(axis = 0)\n bias = biassub.mean(axis = 0)\n\n # evaluate and store the evaluation figures each 'evalTraining.modulo'\n # iteration\n if evalTraining and i % evalTraining.modulo == 0:\n evalTraining.evaluate(w, bias)\n if evalValidation:\n evalValidation.evaluate(w, bias)\n if verbose:\n\n if i % evalTraining.modulo == 0:\n if evalValidation:\n print('%d \\t%d \\t%5f \\t%5f' % (i + 1, time.time() - \\\n start, evalTraining.loss[-1], evalValidation.loss[-1]))\n else:\n print('%d \\t%d \\t%5f ' %(i + 1, time.time() - \\\n start, evalTraining.loss[-1]))\n if savingFilename:\n saveModel((w, bias), savingFilename + '_iteration_' + str(i + 1))\n\n train_XY.unpersist()\n\n return w, bias", "def __stochastic_train(self, learning_rate_delta=False):\n for train_example, target in izip(self.X_train, self.Y_train):\n\n last_gradient_update, cache = 0, 1\n self.old_model = np.copy(self.model)\n model_at_example = np.dot(train_example, self.old_model[:-1]) + self.old_model[self.num_features]\n for index, theta in enumerate(self.old_model):\n # non bias input\n if index < self.num_features:\n gradient = ((target - model_at_example) * train_example[index])\n else:\n gradient = (target - model_at_example)\n\n theta = theta + (gradient * (self.alpha / np.sqrt(cache + 1e-08)))\n last_gradient_update += (gradient * gradient)\n self.model[index][0] = theta\n cache = last_gradient_update\n\n # update the cache to be as in Adagrad updated to\n print self.model", "def update_sparsity(self):\n self._t += 1\n if self._t == self._t0:\n self._st = self._si\n elif self._t > self._t0:\n self._st = self._sf + (self._si - self._sf)*((1 - ((self._t - self._t0)/(self._n * self._delta)))**3)\n self._sparse_vec.append((self._t, self._st))\n self._update_masks()", "def update(self, examples):\n\n batch_X, batch_y = get_prodigy_x_y(examples, self.cat2bin)\n\n if len(batch_X) != 0:\n # Update if the \n self.training_X = self.training_X + batch_X\n self.training_y = self.training_y + batch_y\n\n # Refit with collated old training data with new\n self.vectorizer = TfidfVectorizer(\n analyzer='word',\n token_pattern=r'(?u)\\b\\w+\\b',\n ngram_range=(1, 2)\n )\n train_X_vect = self.vectorizer.fit_transform(self.training_X)\n \n self.model = LogisticRegression(max_iter=1000)\n self.model = self.model.fit(train_X_vect, self.training_y)\n\n new_y_pred = self.model.predict(train_X_vect)\n test_y_pred = self.model.predict(self.vectorizer.transform(self.test_X))\n\n train_f1 = f1_score(self.training_y, new_y_pred, average='weighted')\n self.test_f1 = f1_score(self.test_y, test_y_pred, average='weighted')\n print(f\"Training F1: {round(train_f1, 3)}\")\n print(f\"Test F1: {round(self.test_f1, 3)}\")\n print(\"Train classification report:\")\n print(classification_report(self.training_y, new_y_pred))\n print(\"Test classification report:\")\n print(classification_report(self.test_y, test_y_pred))\n print(\"Test confusion:\")\n print(confusion_matrix(self.test_y, test_y_pred))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GET Request for article recommendation
def article(title): try: idx = INDEX[title.lower()] if not isinstance(idx, np.int64): idx = np.int64(list(idx)[0]) # Get 5 recommendations based on similar post rec = list(ARTICLE_MODEL.recommend_from_interactions([idx], k=5)) # Get post_id of recommended posts post_id = [rec[x]['post_id'] for x, i in enumerate(rec)] # Return the top 5 most similar posts return jsonify(articles=list(INDEX2.loc[post_id])) except KeyError: return jsonify(articles="We Have No Recommendations For You!")
[ "def get(self):\n app.logger.info(\"Request for recommendations list\")\n\n recommendations = Recommendation.all()\n\n results = [recommendation.serialize() for recommendation in recommendations]\n return results, status.HTTP_200_OK", "def test_get_recommendations(self):\n taste_dive_api = TasteDiveApi()\n parsed_response = taste_dive_api.get_recommendations(\"inception\")\n print(json.dumps(parsed_response))\n self.assertTrue('Similar' in parsed_response)\n self.assertTrue('Info' in parsed_response.get('Similar'))\n self.assertTrue('Results' in parsed_response.get('Similar'))", "def list(self, request):\n recommendation = Recommendation.objects.all()\n user = self.request.query_params.get('user', None)\n\n if user is not None:\n customer = Customer.objects.get(user=request.auth.user)\n recommendation = recommendation.filter(customer=customer)\n\n serializer = RecommendationSerializer(\n recommendation, many=True, context={'request': request})\n return Response(serializer.data)", "def test_get_article_average(self):\n url = reverse('ratings', kwargs={\"article_id\": 1})\n response = self.client.get(url,\n HTTP_AUTHORIZATION=self.joel_auth_header1,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def recommendations(self, tv_id, page=1):\n return self._request_obj(\n self._urls[\"recommendations\"] % tv_id,\n params=\"page=%s\" % page,\n key=\"results\"\n )", "def articleRequest(group, index, id = None):", "def test_recommend():\n recommender = SLIM(alpha=0.1, l1_ratio=1e-3, seed=0)\n utils.test_recommend_simple(recommender)", "def recommendations():\n song_title = request.values['song_title']\n suggestions = recommended_songs(str(song_title),\n features_df,\n knn_spotify,\n data_path)\n return render_template('recommendations.html',\n song_title=song_title,\n suggestions=suggestions)", "def test_get_rate(self):\n self.base_post_rate()\n response = self.client.get(self.rate_article_url, format='json')\n assert response.status_code == 200\n assert response.data[\"user\"] == \"asheuh\"\n assert response.data[\"rate\"] == 5\n assert response.data[\"comment\"] == \"I like this article\"", "def test_get_empty_recommendations(self):\n taste_dive_api = TasteDiveApi()\n parsed_response = taste_dive_api.get_recommendations(\"tochen\")\n self.assertTrue('Similar' in parsed_response)\n self.assertTrue('Info' in parsed_response.get('Similar'))\n self.assertTrue('Results' in parsed_response.get('Similar'))\n\n self.assertEqual(1, len(parsed_response.get('Similar').get('Info')))\n self.assertEqual(0, len(parsed_response.get('Similar').get('Results')))", "def recommend(n_clicks, num_recs, upperlimit, lowerlimit, input_box):\n\n context = clean_text(input_box)\n print(upperlimit, num_recs, n_clicks)\n if context != '':\n if lowerlimit:\n hd2vrecommendations = hd2v_wvindvout_recommend(context, hd2vreducedmodel) \n bm25recommendations = solr_recommend(context, 'mag_en_cs_50_all')\n citedbm25_recommendations = solr_cited_recommend(context, 'mag_en_cs_50_cited_all')\n if not hd2vrecommendations or not bm25recommendations or not citedbm25_recommendations:\n return html.Div([\n html.Br(),\n html.Br(),\n html.H2('No recommendations returned.'),\n ])\n hybrid_recommendations = hybrid_recommend(hd2vrecommendations, bm25recommendations, citedbm25_recommendations)\n # magid, title, year, citations, abstract\n if upperlimit:\n all_recommendations = get_paper_details(hybrid_recommendations)\n reduced_recommendations = [recomm for recomm in all_recommendations if recomm[3]<=500]\n reduced_recommendations = get_topn(reduced_recommendations, num_recs)\n else:\n reduced_recommendations = get_paper_details(get_topn(hybrid_recommendations, num_recs))\n #recommended_titles = [details[1] for details in get_paper_details(reduced_recommendations)]\n return html.Div([\n html.Br(),\n html.Br(),\n html.H2('Recommendations:'),\n html.Ol([html.Li(html.A(recomm[1], \n href='https://academic.microsoft.com/paper/{}'.format(recomm[0]),\n title=' Year: {}\\nAbstract:{}'\\\n .format(recomm[2], recomm[4]))\n ) \n for recomm in reduced_recommendations])\n ])\n else:\n hd2vrecommendations = hd2v_wvindvout_recommend(context, hd2vmodel)\n bm25recommendations = solr_recommend(context, 'mag_en_cs_all')\n citedbm25_recommendations = solr_cited_recommend(context, 'mag_en_cs_cited_all')\n if not hd2vrecommendations or not bm25recommendations or not citedbm25_recommendations:\n return html.Div([\n html.Br(),\n html.Br(),\n html.H2('No recommendations returned.'),\n ])\n hybrid_recommendations = hybrid_recommend(hd2vrecommendations, bm25recommendations, citedbm25_recommendations)\n # magid, title, year, citations, abstract\n if upperlimit:\n all_recommendations = get_paper_details(hybrid_recommendations)\n reduced_recommendations = [recomm for recomm in all_recommendations if recomm[3]<=500]\n reduced_recommendations = get_topn(reduced_recommendations, num_recs)\n else:\n #print(hybrid_recommendations)\n reduced_recommendations = get_paper_details(get_topn(hybrid_recommendations, num_recs))\n #recommended_titles = [details[1] for details in get_paper_details(reduced_recommendations)]\n return html.Div([\n html.Br(),\n html.Br(),\n html.H2('Recommendations:'),\n html.Ol([html.Li(html.A(recomm[1], \n href='https://academic.microsoft.com/paper/{}'.format(recomm[0]),\n title=' Year: {}\\nAbstract:{}'\\\n .format(recomm[2], recomm[4]))\n ) \n for recomm in reduced_recommendations])\n ])", "def get(self, no):\n article = get_a_article(no)\n if not article:\n api.abort(404)\n else:\n return article", "def retrieve_review(claim):\n # print(claim)\n params = {\"query\": claim, \"key\": API_KEY}\n if claim:\n r = requests.get(url=URL, params=params)\n data = r.json()\n if data == {}:\n print(\"No review found\")\n return \"No review found\"\n reviews = []\n for review in data[\"claims\"]:\n reviews.append(review[\"claimReview\"][0][\"textualRating\"])\n # print(reviews)\n return reviews\n else:\n print(\"There is no claim entered\")\n return \"There is no claim entered\"", "def get(self, product_id, recommendation_product_id):\n app.logger.info(\"Request for relationship between product ids: %s %s\", product_id, recommendation_product_id)\n recommendation = Recommendation.find(product_id, recommendation_product_id)\n if not recommendation or not recommendation.relationship:\n abort(status.HTTP_404_NOT_FOUND, \"Recommendation for product id {} and {} was not found.\".format(product_id, recommendation_product_id))\n return recommendation.serialize(), status.HTTP_200_OK", "def article_body(request, query, format=None):\n\tif request.method == 'GET':\n\t\tarticles = Article.objects.filter(body__icontains=query)\n\t\tserializer = ArticleSerializer(articles, many=True)\n\t\treturn Response(serializer.data)", "def get_retrieved_knowledge(self, message):", "def article_author(request, query, format=None):\n\tif request.method == 'GET':\n\t\tarticles = Article.objects.filter(author__icontains=query)\n\t\tserializer = ArticleSerializer(articles, many=True)\n\t\treturn Response(serializer.data)", "def get_alternative_recommendation(self, user):\n raise NotImplementedError()", "def get_filtered_article(search_query):\n check_connection()\n spinner.start()\n response = requests.get(\n url + \"/articles/search?{}\".format(search_query))\n spinner.stop()\n spinner.clear()\n\n if response.status_code == 400:\n spinner.warn(\"The article requested was not found 😬\")\n click.echo(\"Status code: {}\".format(response.status_code))\n elif response.status_code == 200:\n spinner.succeed(\"Article found 🤓\")\n click.echo(\"Status code: {}\".format(response.status_code))\n article = json_formatter(response.text)\n click.echo(article)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to scan one dac and save the frames
def scanDac(self, dac, start, stop, step=1, tacq=1, nacq=1, filename=None, format='i16', binary=False, sparsexy=False, append=False, tp=False, pulseHeigh=0.2, pulseCount=100): # Check the range of the scan maxdac = self.DACs.max(dac) if start < 0: raise PmError('invalid DAC start value %d for ' % start + dac) if stop > maxdac: raise PmError('invalid DAC stop value %d for ' % stop + dac) if filename != None: (dirname, filename) = os.path.split(filename) (filename, ext) = os.path.splitext(filename) #Genereate steps and save the previous value dacs = np.arange(start, stop, step) meancount = np.zeros(dacs.shape) * np.nan dacbkp = self.DACs[dac] # if saveLog==True: # #Check if file exists and raise error if we have set overWrite to false # if os.path.isfile(os.path.join(dirname, filename)+'_log.txt') and overWrite==False: # raise IOError('File already exists set overWrite=True or use a different filename') # try: # log=open(os.path.join(dirname, filename)+'_log.txt', 'w') # except IOError: # os.makedirs(path) # log=open(os.path.join(dirname, filename)+'_log.txt', 'w') for i in np.arange(dacs.size): self.DACs[dac] = dacs[i] if tp: self.performTestPulseAcq(pulseHeight=pulseHeigh, pulseCount=pulseCount) else: self.performFrameAqc(nacq, tacq) if filename != None: if append == False: self.saveFrame(dirname + '/' + filename + '_' + dac + '%i_' % dacs[i] + '%4i' % 0 + ext, 0, format, binary, sparsexy, append) elif append == True: self.saveFrame(dirname + '/' + filename + ext, 0, format, binary, sparsexy, append) data = self.getFrame(format=format) for j in np.arange(1, nacq): if filename != None: self.saveFrame(dirname + '/' + filename + '_' + dac + '%i_' % dacs[i] + '%4i' % j + ext, 0, format, binary, sparsexy, append) data = data + self.getFrame(format=format) meancount[i] = np.mean(data[0:10, 0:10]) time.sleep(0.01) print('%d ' % dacs[i]), #force plot update self.DACs[dac] = dacbkp plt.figure() plt.plot(dacs, meancount) return dacs, meancount
[ "def thlScan(self, dac, start, stop,\n step=1,\n tacq=1,\n nacq=1,\n filename=None,\n format='i16',\n binary=False,\n sparsexy=False,\n append=False,\n useTestPulses=False,\n saveLog=False,\n overWrite=False,\n pulseHeight=0.2,\n pulseCount=100,\n spacing=2):\n\n # Check the range of the scan\n maxdac = self.DACs.max(dac)\n if start < 0:\n raise PmError('invalid DAC start value %d for ' % start + dac)\n if stop > maxdac:\n raise PmError('invalid DAC stop value %d for ' % stop + dac)\n if filename != None:\n (dirname, filename) = os.path.split(filename)\n (filename, ext) = os.path.splitext(filename)\n\n #Genereate steps and save the previous value\n dacs = np.arange(start, stop, step)\n meancount = np.zeros(dacs.shape) * np.nan\n dacbkp = self.DACs[dac]\n\n if saveLog == True:\n #Check if file exists and raise error if we have set overWrite to false\n if os.path.isfile(os.path.join(dirname, filename) + '_log.txt') and overWrite == False:\n raise IOError('File already exists set overWrite=True or use a different filename')\n try:\n log = open(os.path.join(dirname, filename) + '_log.txt', 'w')\n except IOError:\n os.makedirs(path)\n log = open(os.path.join(dirname, filename) + '_log.txt', 'w')\n\n #List with values to write to the log\n thlCodeList = []\n thlAnalogList = []\n fbkAnalogList = []\n frameSumList = []\n\n #SCAN\n for i in np.arange(dacs.size):\n #Set DAC\n self.DACs[dac] = dacs[i]\n\n if useTestPulses:\n self.performTestPulseAcq(pulseHeight=pulseHeight, pulseCount=pulseCount, spacing=spacing)\n else:\n self.performFrameAqc(nacq, tacq)\n if filename != None:\n if append == False:\n self.saveFrame(dirname + '/' + filename + '_' + dac + '%i_' % dacs[i] + '%4i' % 0 + ext, 0, format,\n binary, sparsexy, append)\n elif append == True:\n if i == 0:\n self.saveFrame(dirname + '/' + filename + ext, 0, format, binary, sparsexy, False)\n else:\n self.saveFrame(dirname + '/' + filename + ext, 0, format, binary, sparsexy, append)\n\n data = self.getFrame(format=format)\n\n # for j in np.arange(1, nacq):\n # if filename!=None:\n # self.saveFrame(dirname+'/'+filename+'_'+ dac+'%i_' %dacs[i] +'%4i' %j + ext, 0, format, binary, sparsexy, append)\n # data = data + self.getFrame(format=format)\n\n meancount[i] = np.mean(data)\n\n time.sleep(0.1)\n\n #Measure THL and FBK\n thl = self.DACs.analog('THL', senseCount=3)\n fbk = self.DACs.analog('FBK', senseCount=3)\n\n thlCodeList.append(dacs[i])\n thlAnalogList.append(thl)\n fbkAnalogList.append(fbk)\n frameSumList.append(data.sum())\n\n print('%d ' % dacs[i]), #force plot update\n self.DACs[dac] = dacbkp\n\n #plt.figure()\n #plt.plot(dacs, meancount)\n\n a, b, c = linreg(thlCodeList, thlAnalogList)\n fbk_m = sum(fbkAnalogList) / len(fbkAnalogList)\n\n if saveLog == True:\n log.write('Scan Log for ' + self.info.chipboardID + '\\n')\n if self.info.mpxType == 1:\n log.write('Chip type: Medipix2\\n')\n elif self.info.mpxType == 2:\n log.write('Chip type: Medipix2MXR\\n')\n elif self.info.mpxType == 3:\n log.write('Chip type: Timepix\\n')\n elif self.info.mpxType == 4:\n log.write('Chip type: Medipix3\\n')\n else:\n log.write('Chip type: Unknown\\n')\n log.write('Time: ' + strftime(\"%a, %d %b %Y %H:%M:%S\", localtime()) + '\\n')\n log.write('THL Fit: ' + str(a) + '*x+' + str(b) + '\\n')\n log.write('FBK Mean: ' + str(fbk_m) + '\\n')\n log.write(\n 'THL'.ljust(8) + 'THL (Analog)'.ljust(15) + 'FBK (Analog)'.ljust(15) + 'THL-FBK'.ljust(15) + 'Sum\\n')\n for i in range(len(thlCodeList)):\n th = a * thlCodeList[i] + b - fbk_m\n field_width = 10\n thl_s = \"%5.*f\" % (field_width, thlAnalogList[i])\n fbk_s = \"%5.*f\" % (field_width, fbkAnalogList[i])\n eff_s = \"%5.*f\" % (field_width, th)\n frame_s = \"%5.*i\" % (field_width, frameSumList[i])\n log.write(\n str(thlCodeList[i]).ljust(8) + thl_s.ljust(15) + fbk_s.ljust(15) + eff_s.ljust(15) + frame_s.ljust(\n 15) + '\\n')\n log.close()\n return dacs, meancount", "def detectDACs(self, timeout=1.0):\n def callback(src, data):\n board = int(src[-2:], 16)\n build = dac.DAC.readback2BuildNumber(data)\n devName = '{} DAC {}'.format(self.name, board)\n args = (devName, self, self.directEthernetServer, self.port, board,\n build)\n return (devName, args)\n macs = [dac.DAC.macFor(board) for board in range(256)]\n return self._doDetection(macs, dac.DAC.regPing(),\n dac.DAC.READBACK_LEN, callback)", "def _scan_file(self):\n\n try:\n ret = self._cd.scan_file(self._path)\n\n if self._email_message.__getitem__(self._x_virus) != YES and ret == None:\n self._virus = False\n elif ret != None:\n self._virus = True\n self._viruses.append(ret[self._path])\n\n #except pyclamd.ScanError, err:\n # log(\"%s [ClamAV] Error: %s\" % (self._message_id, err), STD_ERR)\n # raise BreakScanning()\n except Exception, err:\n if self._email_message.__getitem__(self._x_virus) != YES:\n self._virus = None\n log(\"%s [ClamAV] Unexpected error: %s\" % (self._message_id, err), STD_ERR)\n raise BreakScanning()", "def scan():\n _rpc.request('AudioLibrary.Scan')", "def find_msd(direct, frames):\n x = pd.DataFrame()\n for i in range(frames):\n #reading the x coordinates from the txt files\n s = pd.read_csv(direct + \"/images/fronts/\"+str(i)+\".png_sx.txt\", sep = \" \")\n s.columns = [0,1]\n x[i] = s[0]\n #computes the MSD of the dataframe with the x coordinates\n msd = an.MSD(x)\n #saving it\n msd.to_csv(direct + \"/msd.txt\", header = None, index = False,sep=' ')\n print(colors.green|\"msd saved in files 'msd.txt'\")\n\n return msd", "def export_scan_data():\n\n if (len(data.i_dat_all_combined) < 1):\n error = showerror('Error', 'No data in memory')\n return\n #Save current directory (should be at script level)\n original_directory = os.getcwd()\n #Make new directoy to work in:\n working_dir = \"exportedScans\"\n if os.path.exists(working_dir) != True:\n os.mkdir(working_dir)\n try:\n listLen = len(data.i_dat_all_combined)\n os.chdir(os.path.abspath(working_dir))\n print \"Saving: \", listLen, \" scans\"\n for i in range(listLen):\n with open(\"combined\" + str(i).zfill(4) + \".txt\", 'w') as FILE:\n for value in data.i_dat_all_combined[i]:\n FILE.write('%s \\n' % value)\n finally:\n os.chdir(original_directory)\n info = showinfo('Notice', str(listLen) +' scans saved to:\\n ../' + working_dir)", "def do_save_continuous(self, datas):\n try:\n det_name = datas['name']\n if self.logger_type == 'h5saver':\n det_group = self.data_logger.get_group_by_title(self.data_logger.raw_group, det_name)\n time_array = self.data_logger.get_node(det_group, 'Logger_time_axis')\n time_array.append(np.array([datas['acq_time_s']]))\n\n data_types = ['data0D', 'data1D']\n if self.data_logger.settings.child(('save_2D')).value():\n data_types.extend(['data2D', 'dataND'])\n\n for data_type in data_types:\n if data_type in datas.keys() and len(datas[data_type]) != 0:\n if not self.data_logger.is_node_in_group(det_group, data_type):\n data_group = self.data_logger.add_data_group(det_group, data_type,\n metadata=dict(type='scan'))\n else:\n data_group = self.data_logger.get_node(det_group, utils.capitalize(data_type))\n for ind_channel, channel in enumerate(datas[data_type]):\n channel_group = self.data_logger.get_group_by_title(data_group, channel)\n if channel_group is None:\n channel_group = self.data_logger.add_CH_group(data_group, title=channel)\n data_array = self.data_logger.add_data(channel_group, datas[data_type][channel],\n scan_type='scan1D', enlargeable=True)\n else:\n data_array = self.data_logger.get_node(channel_group, 'Data')\n if data_type == 'data0D':\n data_array.append(np.array([datas[data_type][channel]['data']]))\n else:\n data_array.append(datas[data_type][channel]['data'])\n self.data_logger.h5_file.flush()\n\n elif self.logger_type == 'dblogger':\n self.data_logger.add_datas(datas)\n\n self.data_logger.settings.child(('N_saved')).setValue(\n self.data_logger.settings.child(('N_saved')).value() + 1)\n\n except Exception as e:\n logger.exception(str(e))", "def save_data(self):\n self.directory = str(QtWidgets.QFileDialog.getExistingDirectory(self, \"Select Directory\", self.directory))\n filename = 'scan_data.dat'\n file = os.path.join(self.directory, filename)\n\n self.experiment.save_scan_data(file)", "def prepare_by_scan(self, scan_cmd):\n name_used = {} #名稱在哪些 src_id 中使用 # ->[src_id,...] \n lines = []\n #show all record\n for idx, row in self.df_cfg.iterrows():\n if row['enabled']!=1:\n continue\n ds_name = row['ds_name']\n id_type = row['id_type']\n col_id = row['col_id']\n col_name = row['col_name']\n col_key = row['col_key']\n src_id = row['src_id']\n #print(\"----- DS_NAME:%s -----\" %(ds_name))\n df = self.load_ds(ds_name)\n #print(\"key=%s\" %(col_key))\n if not pd.isnull(col_key):\n #if col_key !=\"\":\n keys = col_key.split(\",\")\n df.sort_values(by=keys)\n \n \n for idx2, row2 in df.iterrows():\n format_str = \"%s@%s,%s\"\n value = row2[col_name]\n col2_id = row2[col_id]\n \n if id_type=='wikidata':\n wid=self.wd_url_to_wid(col2_id)\n id_str = format_str %(src_id,wid,value)\n fid = \"%s@%s\" %(src_id,wid)\n \n else:\n id_str = format_str %(src_id,col2_id,value)\n fid = \"%s@%s\" %(src_id,col2_id)\n #print(id_str) \n lines.append(id_str)\n if value in name_used:\n if not fid in name_used[value] :\n name_used[value].append(fid)\n else:\n name_used[value] = [ fid ]\n \n \n self.name_used = name_used\n with open(\"output/idact_fid_name.csv\", \"w\") as outfile:\n outfile.write(\"fid,name\\n\") \n outfile.write(\"\\n\".join(lines))", "def write_database(ad, database_name=None, input_name=None):\n if input_name is None:\n input_name = ad.filename\n\n basename = os.path.basename(input_name)\n basename, filetype = os.path.splitext(basename)\n\n for ext in ad:\n record_name = '{}_{:0.3d}'.format(basename, ext.id)\n db = at.SpectralDatabase(binary_table=ext.WAVECAL,\n record_name=record_name)\n db.write_to_disk(database_name=database_name)\n return", "def calling_card(filename = 'hello.jpg'):\n\n\n\t# filename = input(\"Please enter an image filename to convert to a scanned copy : \")\n\n\tcopy, blurred_image = map_input(filename)\n\tprint(blurred_image[0], len(blurred_image))\n\toutput = eagle_eye(blurred_image)\n\tprint(output[0], len(output))\n\tscanned_copy = fuuin(output, copy)\n\tprint(scanned_copy[0], len(scanned_copy))\n\n\n\t#save it to a directory called 'Scanned'\n\tif 'Scanned' not in os.listdir():\n\t\tos.mkdir('Scanned')\n\n\tsaved_path = 'Scanned/'+str(filename.split('.')[0])+str('_scanned.jpg') #path of scanned image\n\n\tcv2.imwrite(saved_path, scanned_copy)#write image to the Scanned folder\n\n\tprint('Image has been scanned and saved to directory \"Scanned\"\\n')\n\n\t#return the path of the saved image\n\n\treturn saved_path", "def get_scans(self, patient_id):\n images = self._df[self._df[\"Patient ID\"] == patient_id]\n return images[\"Scan ID\"].unique()", "def dac_init(self, c, chan, signed=False):\n cmd = dac.DAC.getCommand({'A': 2, 'B': 3}, chan)\n dev = self.selectedDAC(c)\n pkt = ([0x0024, 0x0004, 0x1603, 0x0500] if signed else\n [0x0026, 0x0006, 0x1603, 0x0500])\n yield dev.runSerial(cmd, pkt)\n returnValue(signed)", "def save_movie( gsd_file,output_file,resolution,file_save=False,down_sample=1):\n path_tracer = fresnel.tracer.Path(device,resolution[0],resolution[1])\n\n f = gsd.fl.GSDFile(gsd_file, 'rb')\n t = gsd.hoomd.HOOMDTrajectory(f)\n\n a = render_sphere_frame(frame=t[0],path_tracer=path_tracer);\n\n if tuple(map(int, (PIL.__version__.split(\".\")))) < (3,4,0):\n print(\"Warning! Movie display output requires pillow 3.4.0 or newer.\")\n print(\"Older versions of pillow may only display the first frame.\")\n\n im0 = PIL.Image.fromarray(a[:,:, 0:3], mode='RGB').convert(\"P\", palette=PIL.Image.ADAPTIVE);\n ims = [];\n points = numpy.linspace(1,len(t)-1,(len(t)-1)/down_sample);\n print(points)\n for point in points:\n f = t[int(numpy.floor(point))];\n a = render_sphere_frame(frame=f,path_tracer=path_tracer);\n im = PIL.Image.fromarray(a[:,:, 0:3], mode='RGB')\n im_p = im.quantize(palette=im0);\n ims.append(im_p)\n if file_save:\n if not os.path.exists(os.path.dirname(output_file)):\n os.makedirs(os.path.dirname(output_file),exist_ok=True);\n\n im0.save(output_file, 'gif', save_all=True, append_images=ims, duration=1500, loop=0)\n\n return (f)", "def save_xrd_marccd(name, t=10, ext=None, prefix='13MARCCD4:', timeout=60.0):\n start_time = systime()\n\n # save shutter mode, disable shutter for now\n shutter_mode = caget(prefix+'cam1:ShutterMode')\n\n\n # NOTE: Need to start acquisition with the shutter\n # having been closed for awhile\n # using the SSA H Width as shutter we want\n # NOTE: Need to start acquisition with the shutter\n # having been closed for awhile\n # using the SSA H Width as shutter we want\n\n caput(prefix+'cam1:ShutterControl', 0)\n close_ccd_shutter()\n\n caput(prefix+'cam1:FrameType', 0)\n caput(prefix+'cam1:ImageMode', 0)\n caput(prefix+'cam1:AutoSave', 1)\n caput(prefix+'cam1:AutoIncrement', 1)\n caput(prefix+'cam1:FileName', name)\n if ext is not None:\n caput(prefix+'cam1:FileNumber', ext)\n #endif\n caput(prefix+'cam1:AcquireTime', t)\n\n sleep(0.1)\n\n # expose\n caput(prefix+'cam1:Acquire', 1)\n sleep(1.0 + max(1.0, t))\n t0 = systime()\n print('Wait for Acquire ... ')\n while ((1 == caget(prefix+'cam1:Acquire')) and\n (clock()-t0 < timeout)):\n sleep(0.25)\n #endwhile\n\n fname = caget(prefix+'cam1:FullFileName_RBV', as_string=True)\n print('Acquire Done! %.3f sec' % (systime()-start_time))\n print('Wrote %s' % fname)\n sleep(1.0)\n caput(prefix+'cam1:ShutterControl', 1)", "def get_barcode(dev = \"/dev/hidraw0\"):\n hiddev = open(dev, \"rb\")\n \n barcode = ''\n\n continue_looping = True\n\n k = 0\n\n while continue_looping:\n report = hiddev.read(8)\n\n # print \"k value: \", k\n k += 1\n\n for i in report:\n j = ord(i)\n # # print j\n if j == 0:\n # print \"j = \", j\n continue\n\n if j == 0x1E:\n barcode += '1'\n # print \"j = \", j\n continue\n elif j == 0x1F:\n barcode += '2'\n # print \"j = \", j\n continue\n elif j == 0x20:\n barcode += '3'\n # print \"j = \", j\n continue\n elif j == 0x21:\n barcode += '4'\n # print \"j = \", j\n continue\n elif j == 0x22:\n barcode += '5'\n # print \"j = \", j\n continue\n elif j == 0x23:\n barcode += '6'\n # print \"j = \", j\n continue\n elif j == 0x24:\n barcode += '7'\n # print \"j = \", j\n continue\n elif j == 0x25:\n barcode += '8'\n # print \"j = \", j\n continue\n elif j == 0x26:\n barcode += '9'\n # print \"j = \", j\n continue\n elif j == 0x27:\n barcode += '0'\n # print \"j = \", j\n continue\n elif j == 0x28:\n # print \"j = \", j\n # print barcode\n hiddev.close()\n continue_looping = False\n break\n else:\n pass\n # print \"+++ Melon melon melon +++\"\n # print \"j = \", j\n # hiddev.close()\n # continue_looping = False\n # break\n\n return barcode", "def get_vad(source_path, subject):\n # subject_dir = re.findall(r\"[\\w']+\", source_path)[1]\n fname = re.split(\"[/.]+\", source_path)[1]\n audio, sr = read_wave(source_path)\n vad = webrtcvad.Vad(1)\n frames = frame_generator(30, audio, sr)\n frames = list(frames)\n segments = vad_collector(sr, 30, 300, vad, frames)\n\n vad_dir = 'vad/'\n full_path = vad_dir + subject + '/'\n\n if not os.path.exists(vad_dir):\n os.makedirs(vad_dir)\n\n if not os.path.exists(full_path):\n os.makedirs(full_path)\n\n\n # for i, segment in enumerate(segments):\n # dest_path = '%s-%002d.wav' % (full_path, i)\n # print(' Writing %s' % (dest_path,))\n # write_wave(dest_path, segment, sr)\n\n # Expect only generated 1 VAD every voice record\n file_path = full_path + '%s.wav' % fname\n print(' Writing %s' % file_path)\n write_wave(file_path, next(segments, None), sr)\n\n print('extract vad is success!')", "def scan1D(sd, outputdir=None, step=-2., max_wait_time=.75, scanrange=300):\n if sd.verbose:\n print('### sensing dot scan')\n minstrument = sd.minstrument\n if sd.index is not None:\n minstrument = [sd.index]\n gg = sd.gg\n sdval = sd.sdval\n gates = sd.station.gates\n\n for ii in [0, 2]:\n gates.set(gg[ii], sdval[ii])\n\n startval = sdval[1] + scanrange\n endval = sdval[1] - scanrange\n wait_time = 0.8\n try:\n wait_time = sd.station.gate_settle(gg[1])\n except BaseException:\n pass\n wait_time = np.minimum(wait_time, max_wait_time)\n\n scanjob1 = qtt.measurements.scans.scanjob_t()\n scanjob1['sweepdata'] = dict(\n {'param': gg[1], 'start': startval, 'end': endval, 'step': step, 'wait_time': wait_time})\n scanjob1['wait_time_startscan'] = .2 + 3 * wait_time\n scanjob1['minstrument'] = minstrument\n scanjob1['compensateGates'] = []\n scanjob1['gate_values_corners'] = [[]]\n\n if sd.verbose:\n print('sensingdot_t: scan1D: gate %s, wait_time %.3f' % (sd.gg[1], wait_time))\n alldata = qtt.measurements.scans.scan1D(sd.station, scanjob=scanjob1, verbose=sd.verbose)\n return alldata", "def main(arg_dict):\n if arg_dict['use_dummy'] is True:\n import dummy_sweeppy as sweeppy\n else:\n import sweeppy\n\n exporter = ScanExporter()\n\n index = 0\n for base_angle_scalar in range(0, 13):\n dummy_samples = [sweeppy.Sample(angle=1000 * 30 * n, distance=1000, signal_strength=199)\n for n in range(12)]\n dummy_scan = sweeppy.Scan(samples=dummy_samples)\n\n exporter.export_2D_scan(\n dummy_scan,\n index,\n 30,\n 90,\n False)\n index = index + 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to scan thl dac and save the frames
def thlScan(self, dac, start, stop, step=1, tacq=1, nacq=1, filename=None, format='i16', binary=False, sparsexy=False, append=False, useTestPulses=False, saveLog=False, overWrite=False, pulseHeight=0.2, pulseCount=100, spacing=2): # Check the range of the scan maxdac = self.DACs.max(dac) if start < 0: raise PmError('invalid DAC start value %d for ' % start + dac) if stop > maxdac: raise PmError('invalid DAC stop value %d for ' % stop + dac) if filename != None: (dirname, filename) = os.path.split(filename) (filename, ext) = os.path.splitext(filename) #Genereate steps and save the previous value dacs = np.arange(start, stop, step) meancount = np.zeros(dacs.shape) * np.nan dacbkp = self.DACs[dac] if saveLog == True: #Check if file exists and raise error if we have set overWrite to false if os.path.isfile(os.path.join(dirname, filename) + '_log.txt') and overWrite == False: raise IOError('File already exists set overWrite=True or use a different filename') try: log = open(os.path.join(dirname, filename) + '_log.txt', 'w') except IOError: os.makedirs(path) log = open(os.path.join(dirname, filename) + '_log.txt', 'w') #List with values to write to the log thlCodeList = [] thlAnalogList = [] fbkAnalogList = [] frameSumList = [] #SCAN for i in np.arange(dacs.size): #Set DAC self.DACs[dac] = dacs[i] if useTestPulses: self.performTestPulseAcq(pulseHeight=pulseHeight, pulseCount=pulseCount, spacing=spacing) else: self.performFrameAqc(nacq, tacq) if filename != None: if append == False: self.saveFrame(dirname + '/' + filename + '_' + dac + '%i_' % dacs[i] + '%4i' % 0 + ext, 0, format, binary, sparsexy, append) elif append == True: if i == 0: self.saveFrame(dirname + '/' + filename + ext, 0, format, binary, sparsexy, False) else: self.saveFrame(dirname + '/' + filename + ext, 0, format, binary, sparsexy, append) data = self.getFrame(format=format) # for j in np.arange(1, nacq): # if filename!=None: # self.saveFrame(dirname+'/'+filename+'_'+ dac+'%i_' %dacs[i] +'%4i' %j + ext, 0, format, binary, sparsexy, append) # data = data + self.getFrame(format=format) meancount[i] = np.mean(data) time.sleep(0.1) #Measure THL and FBK thl = self.DACs.analog('THL', senseCount=3) fbk = self.DACs.analog('FBK', senseCount=3) thlCodeList.append(dacs[i]) thlAnalogList.append(thl) fbkAnalogList.append(fbk) frameSumList.append(data.sum()) print('%d ' % dacs[i]), #force plot update self.DACs[dac] = dacbkp #plt.figure() #plt.plot(dacs, meancount) a, b, c = linreg(thlCodeList, thlAnalogList) fbk_m = sum(fbkAnalogList) / len(fbkAnalogList) if saveLog == True: log.write('Scan Log for ' + self.info.chipboardID + '\n') if self.info.mpxType == 1: log.write('Chip type: Medipix2\n') elif self.info.mpxType == 2: log.write('Chip type: Medipix2MXR\n') elif self.info.mpxType == 3: log.write('Chip type: Timepix\n') elif self.info.mpxType == 4: log.write('Chip type: Medipix3\n') else: log.write('Chip type: Unknown\n') log.write('Time: ' + strftime("%a, %d %b %Y %H:%M:%S", localtime()) + '\n') log.write('THL Fit: ' + str(a) + '*x+' + str(b) + '\n') log.write('FBK Mean: ' + str(fbk_m) + '\n') log.write( 'THL'.ljust(8) + 'THL (Analog)'.ljust(15) + 'FBK (Analog)'.ljust(15) + 'THL-FBK'.ljust(15) + 'Sum\n') for i in range(len(thlCodeList)): th = a * thlCodeList[i] + b - fbk_m field_width = 10 thl_s = "%5.*f" % (field_width, thlAnalogList[i]) fbk_s = "%5.*f" % (field_width, fbkAnalogList[i]) eff_s = "%5.*f" % (field_width, th) frame_s = "%5.*i" % (field_width, frameSumList[i]) log.write( str(thlCodeList[i]).ljust(8) + thl_s.ljust(15) + fbk_s.ljust(15) + eff_s.ljust(15) + frame_s.ljust( 15) + '\n') log.close() return dacs, meancount
[ "def scanDac(self, dac, start, stop, step=1, tacq=1, nacq=1, filename=None, format='i16', binary=False,\n sparsexy=False, append=False, tp=False, pulseHeigh=0.2, pulseCount=100):\n\n # Check the range of the scan\n maxdac = self.DACs.max(dac)\n if start < 0:\n raise PmError('invalid DAC start value %d for ' % start + dac)\n if stop > maxdac:\n raise PmError('invalid DAC stop value %d for ' % stop + dac)\n if filename != None:\n (dirname, filename) = os.path.split(filename)\n (filename, ext) = os.path.splitext(filename)\n\n #Genereate steps and save the previous value\n dacs = np.arange(start, stop, step)\n meancount = np.zeros(dacs.shape) * np.nan\n dacbkp = self.DACs[dac]\n\n # if saveLog==True:\n # #Check if file exists and raise error if we have set overWrite to false\n # if os.path.isfile(os.path.join(dirname, filename)+'_log.txt') and overWrite==False:\n # raise IOError('File already exists set overWrite=True or use a different filename')\n # try:\n # log=open(os.path.join(dirname, filename)+'_log.txt', 'w')\n # except IOError:\n # os.makedirs(path)\n # log=open(os.path.join(dirname, filename)+'_log.txt', 'w')\n\n for i in np.arange(dacs.size):\n self.DACs[dac] = dacs[i]\n if tp:\n self.performTestPulseAcq(pulseHeight=pulseHeigh, pulseCount=pulseCount)\n else:\n self.performFrameAqc(nacq, tacq)\n if filename != None:\n if append == False:\n self.saveFrame(dirname + '/' + filename + '_' + dac + '%i_' % dacs[i] + '%4i' % 0 + ext, 0, format,\n binary, sparsexy, append)\n elif append == True:\n self.saveFrame(dirname + '/' + filename + ext, 0, format, binary, sparsexy, append)\n data = self.getFrame(format=format)\n for j in np.arange(1, nacq):\n if filename != None:\n self.saveFrame(dirname + '/' + filename + '_' + dac + '%i_' % dacs[i] + '%4i' % j + ext, 0, format,\n binary, sparsexy, append)\n data = data + self.getFrame(format=format)\n\n meancount[i] = np.mean(data[0:10, 0:10])\n time.sleep(0.01)\n print('%d ' % dacs[i]), #force plot update\n self.DACs[dac] = dacbkp\n plt.figure()\n plt.plot(dacs, meancount)\n return dacs, meancount", "def export_scan_data():\n\n if (len(data.i_dat_all_combined) < 1):\n error = showerror('Error', 'No data in memory')\n return\n #Save current directory (should be at script level)\n original_directory = os.getcwd()\n #Make new directoy to work in:\n working_dir = \"exportedScans\"\n if os.path.exists(working_dir) != True:\n os.mkdir(working_dir)\n try:\n listLen = len(data.i_dat_all_combined)\n os.chdir(os.path.abspath(working_dir))\n print \"Saving: \", listLen, \" scans\"\n for i in range(listLen):\n with open(\"combined\" + str(i).zfill(4) + \".txt\", 'w') as FILE:\n for value in data.i_dat_all_combined[i]:\n FILE.write('%s \\n' % value)\n finally:\n os.chdir(original_directory)\n info = showinfo('Notice', str(listLen) +' scans saved to:\\n ../' + working_dir)", "def detectDACs(self, timeout=1.0):\n def callback(src, data):\n board = int(src[-2:], 16)\n build = dac.DAC.readback2BuildNumber(data)\n devName = '{} DAC {}'.format(self.name, board)\n args = (devName, self, self.directEthernetServer, self.port, board,\n build)\n return (devName, args)\n macs = [dac.DAC.macFor(board) for board in range(256)]\n return self._doDetection(macs, dac.DAC.regPing(),\n dac.DAC.READBACK_LEN, callback)", "def save_movie( gsd_file,output_file,resolution,file_save=False,down_sample=1):\n path_tracer = fresnel.tracer.Path(device,resolution[0],resolution[1])\n\n f = gsd.fl.GSDFile(gsd_file, 'rb')\n t = gsd.hoomd.HOOMDTrajectory(f)\n\n a = render_sphere_frame(frame=t[0],path_tracer=path_tracer);\n\n if tuple(map(int, (PIL.__version__.split(\".\")))) < (3,4,0):\n print(\"Warning! Movie display output requires pillow 3.4.0 or newer.\")\n print(\"Older versions of pillow may only display the first frame.\")\n\n im0 = PIL.Image.fromarray(a[:,:, 0:3], mode='RGB').convert(\"P\", palette=PIL.Image.ADAPTIVE);\n ims = [];\n points = numpy.linspace(1,len(t)-1,(len(t)-1)/down_sample);\n print(points)\n for point in points:\n f = t[int(numpy.floor(point))];\n a = render_sphere_frame(frame=f,path_tracer=path_tracer);\n im = PIL.Image.fromarray(a[:,:, 0:3], mode='RGB')\n im_p = im.quantize(palette=im0);\n ims.append(im_p)\n if file_save:\n if not os.path.exists(os.path.dirname(output_file)):\n os.makedirs(os.path.dirname(output_file),exist_ok=True);\n\n im0.save(output_file, 'gif', save_all=True, append_images=ims, duration=1500, loop=0)\n\n return (f)", "def find_msd(direct, frames):\n x = pd.DataFrame()\n for i in range(frames):\n #reading the x coordinates from the txt files\n s = pd.read_csv(direct + \"/images/fronts/\"+str(i)+\".png_sx.txt\", sep = \" \")\n s.columns = [0,1]\n x[i] = s[0]\n #computes the MSD of the dataframe with the x coordinates\n msd = an.MSD(x)\n #saving it\n msd.to_csv(direct + \"/msd.txt\", header = None, index = False,sep=' ')\n print(colors.green|\"msd saved in files 'msd.txt'\")\n\n return msd", "def do_save_continuous(self, datas):\n try:\n det_name = datas['name']\n if self.logger_type == 'h5saver':\n det_group = self.data_logger.get_group_by_title(self.data_logger.raw_group, det_name)\n time_array = self.data_logger.get_node(det_group, 'Logger_time_axis')\n time_array.append(np.array([datas['acq_time_s']]))\n\n data_types = ['data0D', 'data1D']\n if self.data_logger.settings.child(('save_2D')).value():\n data_types.extend(['data2D', 'dataND'])\n\n for data_type in data_types:\n if data_type in datas.keys() and len(datas[data_type]) != 0:\n if not self.data_logger.is_node_in_group(det_group, data_type):\n data_group = self.data_logger.add_data_group(det_group, data_type,\n metadata=dict(type='scan'))\n else:\n data_group = self.data_logger.get_node(det_group, utils.capitalize(data_type))\n for ind_channel, channel in enumerate(datas[data_type]):\n channel_group = self.data_logger.get_group_by_title(data_group, channel)\n if channel_group is None:\n channel_group = self.data_logger.add_CH_group(data_group, title=channel)\n data_array = self.data_logger.add_data(channel_group, datas[data_type][channel],\n scan_type='scan1D', enlargeable=True)\n else:\n data_array = self.data_logger.get_node(channel_group, 'Data')\n if data_type == 'data0D':\n data_array.append(np.array([datas[data_type][channel]['data']]))\n else:\n data_array.append(datas[data_type][channel]['data'])\n self.data_logger.h5_file.flush()\n\n elif self.logger_type == 'dblogger':\n self.data_logger.add_datas(datas)\n\n self.data_logger.settings.child(('N_saved')).setValue(\n self.data_logger.settings.child(('N_saved')).value() + 1)\n\n except Exception as e:\n logger.exception(str(e))", "def analyze_file(datafile, sacc_threshold=0.9):\n\n\t# Read in datafile. Exit is file not found\n\tif not os.path.isfile(datafile):\n\t\traise IOError(\"File not found. Please specify a valid file location\")\n\n\t# There's two way the surface data could have been exported\n\t# 1. During recording by a modification of the pupil software by Daniel, resuling in a .npy file\n\t# (this is less and less likely as pupil versions no. increase)\n\t# 2. By the pupil player, resulting in a .csv file\n\t# Both ways have different locations at which the data file is placed and assume a different folder structure\n\tif os.path.splitext(datafile)[1] == \".npy\": # Assume its the surface export data implemented by Daniel\n\t\t# Load data into numpy series and create pandas dataframe\n\t\teye_data = np.load(datafile)\n\n\t\t# TEMPORARY FIX. In some recording made by the newer pupil capture software\n\t\t# The first column containing the frame indices is missing...\n\t\t# In the future, we should only support the offical pupil player output (e.g. the csv mode below)\n\t\t# and drop support for our custom .npy format as this becomes way too unpredictable\n\t\ttry:\n\t\t\tdata = pd.DataFrame(eye_data, columns=[\"frame_index\",\"surface_label\",\"x\",\"y\",\"timestamp\"])\n\t\texcept:\n\t\t\tdata = pd.DataFrame(eye_data, columns=[\"surface_label\",\"x\",\"y\",\"timestamp\"])\n\t\t\tdata.loc[:,\"frame_index\"] = data.index.values\n\t\t# Parse some extra information from the file path. This sadly isn't included in the datafile itself,\n\t\t# so it is absolutely *crucial* to adhere to the pupil folder structure when analyzing data!\n\t\ttrial_folder = os.path.split(datafile)[0]\n\telif os.path.splitext(datafile)[1] == \".csv\": # Assume it's the official pupil software's surface data export\n\t\tdata = pd.read_csv(datafile, sep=\"\\t\")\n\t\tif not len(data.index):\n\t\t\tprint(\"\\n{0} appears to be empty\".format(datafile))\n\t\t\treturn None\n\t\tdata = data.query(\"on_srf == True\")\n\t\tdata = data.rename(columns={\"world_frame_idx\":\"frame_index\", \"world_timestamp\":\"timestamp\", \"x_norm\":\"x\", \"y_norm\":\"y\"})\n\t\t# Go up one folder as pupil exports data to \"metrics_xxx\" subfolder\n\t\ttrial_folder = os.path.split(os.path.split(datafile)[0])[0]\n\telse:\n\t\traise IOError(\"File '{0}' has incorrect format or extension\".format(datafile))\n\n\t# If dataframe is empty, issue a warning message and simply return None\n\tif not len(data.index):\n\t\tprint(\"\\n{0} has no samples on surface\".format(datafile))\n\t\treturn None\n\n\t(folder, trial_no) = os.path.split(trial_folder)\n\tparticipant = os.path.split(folder)[1]\n\n\ttry:\n\t\tdata.loc[:,\"subject_file\"] = participant\n\t\tdata.loc[:,\"trial_no\"] = int(trial_no)\n\texcept Exception as e:\n\t\tprint(\"\\nError reading participant file {}, trial {}: {} (empty data file or no fixations on surface?)\".format(participant, trial_no, e))\n\t\treturn None\n\n\t# Parse the subject nr from the folder name\n\ttry:\n\t\tsubject_nr = int(re.findall(r'\\d+',participant)[0])\n\t\tdata.loc[:,\"subject_nr\"] = subject_nr\n\texcept:\n\t\tprint(\"\\nFolder {0}, trial {1}: Could not parse participant number from folder name\".format(participant,trial_no))\n\n\t# It is nice to set this information as the first two columns, so reindex the dataframe by\n\t# respecifying the order of columns\n\tdata = data.reindex(columns=[\"subject_file\",\"subject_nr\",\"trial_no\",\"surface_label\",\"frame_index\", \"x\",\"y\",\"timestamp\"])\n\tdata.x = data.x.astype(float)\n\tdata.y = data.y.astype(float)\n\n\t# Timestamps might be read as strings. Convert to floats\n\tdata.loc[:,\"timestamp\"] = data[\"timestamp\"].astype(float)\n\n\t# Subract eye data to calculate difference scores from it\n\txy = data[[\"x\",\"y\"]].astype(float).T\n\tvel_data = np.diff(xy)\n\n\t# Insert 0 for the first diff values of the array (for which no scores can be calculated)\n\tvel_data = np.insert(vel_data, 0, 0.0, axis=1)\n\n\t# Add difference scores to array (for now)\n\tdata[\"x_diff\"] = pd.Series(vel_data[0,:].T, index=data.index)\n\tdata[\"y_diff\"] = pd.Series(vel_data[1,:].T, index=data.index)\n\n\t# Calculate speed above which to cound x,y shift as saccade\n\tx_min_sacc_speed = calculate_min_distance(vel_data[0],sacc_threshold)\n\ty_min_sacc_speed = calculate_min_distance(vel_data[1],sacc_threshold)\n\n\t# Get rows at which saccades took place. Increment fixation index value after each saccade\n\tsaccade_rows = (np.abs(vel_data[0]) > x_min_sacc_speed) | (np.abs(vel_data[1]) > y_min_sacc_speed)\n\t# Get indices of saccade rows. Add 0 to beginning to take in start of first fixation too\n\tsaccades_idx = data[saccade_rows].index.insert(0,data.index[0])\n\n\t# Store True at rows at which saccade is assumed to have taken place in column \"saccade\"\n\tdata[\"saccade\"] = saccade_rows\n\n\t# Create a new series containing the fixation index values, to be added to the data array\n\t# The indices of the series correspond to the indices of the saccades in the data array\n\t# That is: saccade 1 demarkates the end of fixation 1, sacc 2 for fix 2, etc.\n\tfixation_indices = pd.Series(data=np.arange(1,len(saccades_idx)+1), index=saccades_idx)\n\tdata[\"fixation_index\"] = fixation_indices\n\n\t# At row at which no fixation index was entered the value by default is NaN. Use the\n\t# handy function bfill and ffill to fill these values with the next occurring saccade index value\n\tdata[\"fixation_index\"].ffill(inplace=True)\n\n\treturn data", "def _scan_file(self):\n\n try:\n ret = self._cd.scan_file(self._path)\n\n if self._email_message.__getitem__(self._x_virus) != YES and ret == None:\n self._virus = False\n elif ret != None:\n self._virus = True\n self._viruses.append(ret[self._path])\n\n #except pyclamd.ScanError, err:\n # log(\"%s [ClamAV] Error: %s\" % (self._message_id, err), STD_ERR)\n # raise BreakScanning()\n except Exception, err:\n if self._email_message.__getitem__(self._x_virus) != YES:\n self._virus = None\n log(\"%s [ClamAV] Unexpected error: %s\" % (self._message_id, err), STD_ERR)\n raise BreakScanning()", "def save_data(self):\n self.directory = str(QtWidgets.QFileDialog.getExistingDirectory(self, \"Select Directory\", self.directory))\n filename = 'scan_data.dat'\n file = os.path.join(self.directory, filename)\n\n self.experiment.save_scan_data(file)", "def detectLED(file, myDir, startY=400, endY=439, startX=354, endX=400, startTime=0):\n\n\t# print the file being processed\n\tprint(file)\n\t# create output directory\n\tos.makedirs(myDir, exist_ok=True)\n\n\t# extract the animal id/file id from the file path\n\taid=os.path.splitext(file)[0]\n\taid=aid.split('/')\n\taid=aid[-1]\n\n\t# load the video file to be working with\n\tvcap = cv2.VideoCapture(file)\n\tfps=int(vcap.get(5)) # frame rate of video acquisition \n\tvcap.set(1,fps*startTime*60) # set the first frame of video to work on\n\n\t# initialize variable to store the mean intensity of each frame\n\tmeanFrame=[]\n\twhile True:\n\t\t# read the frame\n\t\tret, frame = vcap.read()\n\t\t# height, width, layers = frame.shape\n\t\tframe=frame[startY:endY, startX:endX] #cropping criteria \n\n\t\t## to show and display the frames \n\t\t# cv2.imshow('Frame',frame)\n\t\t# if cv2.waitKey(25) & 0xFF == ord('q'):\n\t # break\n\n\t # mean frame\n\t\tmeanFrame.append(np.mean(frame))\n\n\t\t## if restriction apply \n\t\tif len(meanFrame) >= vcap.get(7): # limit the analysis on the first 300 frames of video\n\t\t\tbreak\n\t\t\tvcap.release()\n\n\tmeanFrame -= np.mean(meanFrame)\n\t# distance is calculated based on at least 40 second between shock\n\tpeaks, _ = find_peaks(meanFrame, distance=40*fps, height=10) \n\t# remove peaks detected before 2min\n\tpeaks=peaks[peaks>=2*60*fps]\n\n\t## generate graphical output\n\t# plt.plot(meanFrame)\n\t# plt.plot(peaks, meanFrame[peaks], \"x\")\n\t# plt.show(block=False)\n\t# plt.pause(1)\n\t# plt.savefig(myDir+'/'+aid+\".png\")\n\t# plt.close()\n\n\n\tnp.savetxt(myDir+'/'+aid+'.txt', peaks, delimiter=',') # X is an array\n\n\t# return peaks\n\tprint(peaks)", "def prepare_by_scan(self, scan_cmd):\n name_used = {} #名稱在哪些 src_id 中使用 # ->[src_id,...] \n lines = []\n #show all record\n for idx, row in self.df_cfg.iterrows():\n if row['enabled']!=1:\n continue\n ds_name = row['ds_name']\n id_type = row['id_type']\n col_id = row['col_id']\n col_name = row['col_name']\n col_key = row['col_key']\n src_id = row['src_id']\n #print(\"----- DS_NAME:%s -----\" %(ds_name))\n df = self.load_ds(ds_name)\n #print(\"key=%s\" %(col_key))\n if not pd.isnull(col_key):\n #if col_key !=\"\":\n keys = col_key.split(\",\")\n df.sort_values(by=keys)\n \n \n for idx2, row2 in df.iterrows():\n format_str = \"%s@%s,%s\"\n value = row2[col_name]\n col2_id = row2[col_id]\n \n if id_type=='wikidata':\n wid=self.wd_url_to_wid(col2_id)\n id_str = format_str %(src_id,wid,value)\n fid = \"%s@%s\" %(src_id,wid)\n \n else:\n id_str = format_str %(src_id,col2_id,value)\n fid = \"%s@%s\" %(src_id,col2_id)\n #print(id_str) \n lines.append(id_str)\n if value in name_used:\n if not fid in name_used[value] :\n name_used[value].append(fid)\n else:\n name_used[value] = [ fid ]\n \n \n self.name_used = name_used\n with open(\"output/idact_fid_name.csv\", \"w\") as outfile:\n outfile.write(\"fid,name\\n\") \n outfile.write(\"\\n\".join(lines))", "def test_step_scan():\n print(\"Starting fly scan test 2\")\n uid, = RE(nano_xrf(-5, 5, 1, 0, 2, 1, 0.1, shutter=False))\n print(\"Fly scan complete\")\n print(\"Reading scan from databroker: 'primary' stream\")\n db[uid].table(fill=True)\n print(\"Reading scan from databroker: 'stream0' stream\")\n db[uid].table(stream_name=\"stream0\", fill=True)\n print(\"Loading data using PyXRF\")\n from pyxrf.api import make_hdf\n dir_name = \"/tmp/acceptance_tests\"\n os.makedirs(dir_name, exist_ok=True)\n make_hdf(uid, wd=dir_name, create_each_det=True, file_overwrite_existing=True)\n print(\"Test is complete\")", "def scalogram(filename, savename):\n\n #signal reading\n (rate,signal) = wav.read(filename)\n\n #ignore other bands for primary treatment\n if signal.shape[1] > 1:\n signal = signal[:,0]\n\n #clip the signal\n max_energy = max(energy)\n start_frame = 0\n for k in range(len(energy)):\n if energy[k] >= max_energy*0.01:\n start_frame = k\n break\n\n end_frame = start_frame\n for k in range(start_frame,len(energy)):\n if energy[k] < max_energy*0.001:\n end_frame = k\n break\n\n if(end_frame == start_frame):\n for k in range(start_frame,len(energy)):\n if energy[k] < max_energy*0.01:\n end_frame = k\n break\n\n samples_per_frame = rate * 0.01\n signal = signal[start_frame*samples_per_frame:end_frame*samples_per_frame]\n\n\n wavelet=DOG4\n maxscale=10\n notes=100\n scaling='log'#\"log\" #or \"linear\"\n plotpower2d=True\n\n Ns=1024\n #limits of analysis\n Nlo=0 \n Nhi=Ns\n\n # Wavelet transform\n cw=wavelet(signal,maxscale,notes,scaling=scaling)\n scales=cw.getscales() \n cwt=cw.getdata()\n # power spectrum\n pwr=cw.getpower()\n scalespec=np.sum(pwr,axis=1)/scales # calculate scale spectrum\n # scales\n y=cw.fourierwl*scales\n x=np.arange(Nlo*1.0,Nhi*1.0,1.0)\n \n #mpl.tight_layout()\n mpl.axis('off')\n fig=mpl.figure(1)\n\n # 2-d coefficient plot\n plotcwt=np.clip(np.fabs(cwt.real), 0., 1000.)\n if plotpower2d: plotcwt=pwr\n im=mpl.imshow(plotcwt,cmap=mpl.cm.jet,extent=[x[0],x[-1],y[-1],y[0]],aspect='auto')\n mpl.ylim(y[0],y[-1])\n theposition=mpl.gca().get_position()\n\n mpl.tight_layout()\n mpl.savefig(savename)", "def DumpExtractor(filename,frames,atomNumber):\n fileDump=open(filename) #dump file for info extraction\n linesDump=fileDump.readlines()\n\n if(linesDump[-1]!=\"</OpenMD>\\n\"):\n print(\"Error: Incomplete file\")\n sys.exit();\n processP=\"Wait\"\n processC=\"Wait\"\n\n\n #information storage matrix\n #posiiton and velocity storage\n\n x=num.zeros((frames,atomNumber))\n y=num.zeros((frames,atomNumber))\n z=num.zeros((frames,atomNumber))\n vx=num.zeros((frames,atomNumber))\n vy=num.zeros((frames,atomNumber))\n vz=num.zeros((frames,atomNumber))\n q=num.zeros(4)\n j=num.zeros(3)\n\n #charge and velocity storage matrix\n c=num.zeros((frames,atomNumber))\n cv=num.zeros((frames,atomNumber))\n ex=num.zeros((frames,atomNumber))\n ey=num.zeros((frames,atomNumber))\n ez=num.zeros((frames,atomNumber))\n efieldConverter=1.0/23.0609 # converts kcal mol^-1 to V/A\n #frame count initilization\n fCount=0\n index=0 #index for the atoms\n for line in linesDump:\n linesSplit=str.split(line)\n length=len(linesSplit)\n\n if(length!=0 and linesSplit[0]==\"<StuntDoubles>\" and processP==\"Wait\"):\n processP=\"Start\"\n continue;\n\n elif(length!=0 and linesSplit[0]==\"</StuntDoubles>\" and processP==\"Start\"):\n processP=\"Wait\"\n fCount=fCount+1\n index=0\n continue;\n\n elif(fCount>=frames):\n break;\n\n else:\n processP=processP;\n\n\n\n if (processP==\"Start\"):\n x[fCount][int(linesSplit[0])]=float(linesSplit[2])\n y[fCount][int(linesSplit[0])]=float(linesSplit[3])\n z[fCount][int(linesSplit[0])]=float(linesSplit[4])\n vx[fCount][int(linesSplit[0])]=float(linesSplit[5])\n vy[fCount][int(linesSplit[0])]=float(linesSplit[6])\n vz[fCount][int(linesSplit[0])]=float(linesSplit[7])\n\n\n\n position=[x,y,z]\n velocity=[vx,vy,vz]\n \n\n\n infoDict={\"position\":position,\"velocity\":velocity}\n return infoDict", "def read_and_display_data(hat, num_channels):\n total_samples_read = 0\n read_request_size = READ_ALL_AVAILABLE\n \n basepath = '/home/pi/Documents/Measurement_Computing' \n mypath = basepath + '/Scanning_log_files'\n\n # When doing a continuous scan, the timeout value will be ignored in the\n # call to a_in_scan_read because we will be requesting that all available\n # samples (up to the default buffer size) be returned.\n timeout = 5.0\n\n # Read all of the available samples (up to the size of the read_buffer which\n # is specified by the user_buffer_size). Since the read_request_size is set\n # to -1 (READ_ALL_AVAILABLE), this function returns immediately with\n # whatever samples are available (up to user_buffer_size) and the timeout\n # parameter is ignored.\n #=============================================================================\n # file switch: w = Write to a file\n # file switch: w+ = Write to a file, if it doesn't exist create it\n # file switch: a = Append to a file\n # file switch: a+ = Append to a file, if is doesn't exist create it.\n # file switch: x = will create a file, returns an error if the file exist\n \n\n # If the scan starts, create a file name based upon current date and time.\n # Retrieve the Current Working Directory and generate the full path \n # to where to write the collected data as a .csv file. Open the file \n # begin writing the data to the file. When done, close the file.\n \n try:\n if os.path.exists(basepath):\n if not (os.path.exists(mypath)):\n os.mkdir(mypath)\n else:\n os.mkdir(basepath)\n os.chdir(basepath)\n os.mkdir(mypath)\n except OSError as exc:\n raise\n \n os.chdir(mypath)\n fileDateTime = datetime.strftime(datetime.now(), \"%Y_%B_%d_%H%M%S\")\n fileDateTime = mypath + \"/\" + fileDateTime + \".csv\"\n csvfile = open(fileDateTime, \"w+\")\n csvwriter = csv.writer(csvfile) \n \n while total_samples_read < 200000:\n read_result = hat.a_in_scan_read(read_request_size, timeout)\n\n # Check for an overrun error\n if read_result.hardware_overrun:\n print('\\n\\nHardware overrun\\n')\n break\n elif read_result.buffer_overrun:\n print('\\n\\nBuffer overrun\\n')\n break\n\n samples_read_per_channel = int(len(read_result.data) / num_channels)\n total_samples_read += samples_read_per_channel\n \n totalSamples = len(read_result.data) \n #print(\"\\r MyTotalSamples = %d\\n\" % totalSamples)\n\n # Display the last sample for each channel.\n #print('\\r{:12}'.format(samples_read_per_channel),\n # ' {:12} '.format(total_samples_read), end='')\n\n if samples_read_per_channel > 0:\n index = samples_read_per_channel * num_channels - num_channels\n print(\"\\r Index = %d\\n\" % index)\n \n new_index = 0\n myArray=[] #create an empty array\n for i in range(0, totalSamples, num_channels):\n myArray.append([]) #add a row to the array (COLUMN)\n for j in range(num_channels):\n #print('{:10.5f}'.format(read_result.data[j]), 'V ',\n # end='')\n \t#append a num_channels of data to the array (ROW)\n myArray[new_index].append(read_result.data[i + j]) \n new_index+=1\n #print(\"\\r\")\n\n csvwriter.writerows(myArray) #Write the array to file\n csvfile.flush\n\n #sleep(0.01)\n\n print('\\n')\n csvfile.close()\n return(fileDateTime)", "def dwrite_restart13(iur):\n# write out restart diagnostic file for electrostatic code\n s1.dwrite_restart1(iur)\n\n# write out electron current density diagnostic parameter\n s1.i1[0] = in1.ntje; s1.i1.tofile(iur)\n# write out record location\n if (in1.ntje > 0):\n s1.i1[0] = in1.njerec; s1.i1.tofile(iur)\n# write out record length (zero if error) and file name (if no error)\n if (in1.njerec > 0):\n it = mdiag1.fnrecl(in1.fjename)\n s1.i1[0] = it; s1.i1.tofile(iur)\n if (it > 0):\n s1.fname[:] = ''.join(in1.fjename)\n s1.fname.tofile(iur)\n\n# write out radiative vector potential diagnostic parameter\n s1.i1[0] = in1.ntar; s1.i1.tofile(iur)\n# write out record location\n if (in1.ntar > 0):\n s1.i1[0] = in1.narrec; s1.i1.tofile(iur)\n# write out record length (zero if error) and file name (if no error)\n if (in1.narrec > 0):\n it = mdiag1.fnrecl(in1.farname)\n s1.i1[0] = it; s1.i1.tofile(iur)\n if (it > 0):\n s1.fname[:] = ''.join(in1.farname)\n s1.fname.tofile(iur)\n# write out current density\n it = numpy.size(cue,1)\n s1.i1[0] = it; s1.i1.tofile(iur)\n cue.tofile(iur)\n# write out spectrum flag\n if ((in1.ndar==2) or (in1.ndar==3)):\n s1.i1[0] = itar; s1.i1.tofile(iur)\n# write out spectrum sizes and data\n if (itar > 0):\n s1.i4[0] = numpy.size(vpksr,0)\n s1.i4[1] = numpy.size(vpksr,1)\n s1.i4[2] = numpy.size(vpksr,2)\n s1.i4[3] = numpy.size(vpksr,3)\n s1.i4.tofile(iur)\n vpksr.tofile(iur)\n else:\n it = 0\n s1.i1[0] = it; s1.i1.tofile(iur)\n\n# write out vector potential diagnostic parameter\n s1.i1[0] = in1.nta; s1.i1.tofile(iur)\n# write out record location\n if (in1.nta > 0):\n s1.i1[0] = in1.narec; s1.i1.tofile(iur)\n# write out record length (zero if error) and file name (if no error)\n if (in1.narec > 0):\n it = mdiag1.fnrecl(in1.faname)\n s1.i1[0] = it; s1.i1.tofile(iur)\n if (it > 0):\n s1.fname[:] = ''.join(in1.faname)\n s1.fname.tofile(iur)\n# write out spectrum flag\n if ((in1.nda==2) or (in1.nda==3)):\n s1.i1[0] = ita; s1.i1.tofile(iur)\n# write out spectrum sizes and data\n if (ita > 0):\n s1.i4[0] = numpy.size(vpks,0); s1.i4[1] = numpy.size(vpks,1)\n s1.i4[2] = numpy.size(vpks,2); s1.i4[3] = numpy.size(vpks,3)\n s1.i4.tofile(iur)\n vpks.tofile(iur)\n else:\n it = 0\n s1.i1[0] = it; s1.i1.tofile(iur)\n\n# write out transverse efield diagnostic parameter\n s1.i1[0] = in1.ntet; s1.i1.tofile(iur)\n# write out record location\n if (in1.ntet > 0):\n s1.i1[0] = in1.netrec; s1.i1.tofile(iur)\n# write out record length (zero if error) and file name (if no error)\n if (in1.netrec > 0):\n it = mdiag1.fnrecl(in1.fetname)\n s1.i1[0] = it; s1.i1.tofile(iur)\n if (it > 0):\n s1.fname[:] = ''.join(in1.fetname)\n s1.fname.tofile(iur)\n# write out spectrum flag\n if ((in1.ndet==2) or (in1.ndet==3)):\n s1.i1[0] = itet; s1.i1.tofile(iur)\n# write out spectrum sizes and data\n if (itet > 0):\n s1.i4[0] = numpy.size(vpkset,0)\n s1.i4[1] = numpy.size(vpkset,1)\n s1.i4[2] = numpy.size(vpkset,2)\n s1.i4[3] = numpy.size(vpkset,3)\n s1.i4.tofile(iur)\n vpkset.tofile(iur)\n else:\n it = 0\n s1.i1[0] = it; s1.i1.tofile(iur)\n\n# write out magnetic field diagnostic diagnostic parameter\n s1.i1[0] = in1.ntb; s1.i1.tofile(iur)\n# write out record location\n if (in1.ntb > 0):\n s1.i1[0] = in1.nbrec; s1.i1.tofile(iur)\n# write out record length (zero if error) and file name (if no error)\n if (in1.nbrec > 0):\n it = mdiag1.fnrecl(in1.fbname)\n s1.i1[0] = it; s1.i1.tofile(iur)\n if (it > 0):\n s1.fname[:] = ''.join(in1.fbname)\n s1.fname.tofile(iur)\n\n# write out ion current density diagnostic parameter\n if (in1.movion==1):\n s1.i1[0] = in1.ntji; s1.i1.tofile(iur)\n# write out record location\n if (in1.ntji > 0):\n s1.i1[0] = in1.njirec; s1.i1.tofile(iur)\n# write out record length (zero if error) and file name (if no error)\n if (in1.njirec > 0):\n it = mdiag1.fnrecl(in1.fjiname)\n s1.i1[0] = it; s1.i1.tofile(iur)\n if (it > 0):\n s1.fname[:] = ''.join(in1.fjiname)\n s1.fname.tofile(iur)\n# write out spectrum flag\n if ((in1.ndji==2) or (in1.ndji==3)):\n s1.i1[0] = itji; s1.i1.tofile(iur)\n# write out spectrum sizes and data\n if (itji > 0):\n s1.i4[0] = numpy.size(vpksji,0)\n s1.i4[1] = numpy.size(vpksji,1)\n s1.i4[2] = numpy.size(vpksji,2)\n s1.i4[3] = numpy.size(vpksji,3)\n s1.i4.tofile(iur)\n vpksji.tofile(iur)\n else:\n it = 0\n s1.i1[0] = it; s1.i1.tofile(iur)", "def injest_auxil(self):\n\n # find best attitude file available (uat > pat > sat)\n #attexts = [\"uat.fits.gz\", \"pat.fits.gz\", \"sat.fits.gz\"]\n attexts = [\"pat.fits.gz\", \"sat.fits.gz\"]\n\n for attext in attexts:\n attfile = glob.glob(os.path.join(self.path,'raw/auxil/sw*' + attext))\n if len(attfile):\n self.attfile = attfile[0]\n break\n \n if not self.attfile:\n print(\"No attitude file not found in auxil files.\")\n\n hdfile = glob.glob(os.path.join(self.path,'raw/xrt/hk/sw*hd.hk.gz'))\n\n if len(hdfile):\n self.hdfile=hdfile[0]\n else:\n print(\"HD file not found in auxil files.\")\n\n event_file = glob.glob(os.path.join(self.path,'raw/xrt/event/sw' + self.obsid + \\\n 'x' + self.mode + '??po_cl.evt.gz'))[0]\n fits = pyfits.open(event_file)\n date_obs = fits[0].header['DATE-OBS']\n\n date_obs_split = date_obs.strip().strip('\\'').split(\"T\")\n\n self.alignfile = ftools.quzcif('ALIGNMENT','NOW','-', instrument='SC')[0][0]\n self.teldeffile = ftools.quzcif('TELDEF',date_obs_split[0],date_obs_split[1])[0][0]", "def capture_tomogram_flyscan(self, start_angle, end_angle,\n num_projections, ccd_readout=0.270,\n extra_projections=0):\n # Calculate angle parameters\n delta = (end_angle - start_angle) / (num_projections)\n total_time = num_projections * (self.exposure_time + ccd_readout)\n slew_speed = (end_angle - start_angle) / total_time\n # Set values for fly scan parameters\n self.Fly_ScanControl = \"Custom\"\n self.Fly_ScanDelta = delta\n self.Fly_StartPos = start_angle\n self.Fly_EndPos = end_angle\n self.Fly_SlewSpeed = slew_speed\n # Pause to let the values update\n time.sleep(3)\n # Update the value for the number of projections from instrument\n extra_projections = self.HDF1_NumCapture_RBV - num_projections\n log.debug('Acquiring %d extra projections (flat/dark)', extra_projections)\n calc_num_proj = math.ceil(self.Fly_Calc_Projections)\n if calc_num_proj is not None:\n num_projections = calc_num_proj\n log.debug('Fly scan resetting num_projections to %d (%d)',\n num_projections, extra_projections)\n # Logging\n # Prepare the instrument for scanning\n self.Reset_Theta = 1\n self.Cam1_TriggerMode = 'Overlapped'\n self.Cam1_NumImages = num_projections\n self.HDF1_NumCapture = num_projections + extra_projections\n self.Cam1_ImageMode = self.IMAGE_MODE_MULTIPLE\n self.Cam1_Acquire = self.DETECTOR_ACQUIRE\n self.wait_pv('Cam1_Status', self.DETECTOR_WAITING)\n # Execute the fly scan\n theta = []\n self.Cam1_FrameType = self.FRAME_DATA\n self.Fly_Taxi = 1\n self.wait_pv('Fly_Taxi', 0)\n self.Fly_Run = 1\n self.wait_pv('Fly_Run', 0, timeout=-1)\n # Clean up\n self.wait_pv('Cam1_Status', self.DETECTOR_IDLE)\n time.sleep(0.25)\n self.Proc_Theta = 1\n self.Fly_ScanControl = \"Standard\"\n # Retrieve the actual theta array to return\n pv_name = getattr(type(self), 'Theta_Array').pv_name(txm=self)\n theta = self.pv_get(pv_name, count=int(num_projections))\n if theta is None:\n # No theta array was retrieved, so calculate the angles instead\n warnings.warn(\"Could not retrieve actual angles, \"\n \"storing predicted values instead.\")\n theta = np.linspace(start_angle, end_angle, num=num_projections)\n return theta", "def snapshot(self):\n ts = datetime.datetime.now() # grab the current timestamp\n filename = \"{}.png\".format(ts.strftime(\n \"%Y-%m-%d_%H-%M-%S\")) # construct filename\n\n ok, frame = self.cap.read()\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(image)\n\n # save image as jpeg file\n image.save('exports/snapshots/' + filename, \"PNG\")\n print(\"[INFO] saved {}\".format(filename))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
search the 3 different modules for a similar title and return a list sorted by similarity
def get_similar_titles(title: str) -> list: light_novel_results = BakaUpdates.get_similar_titles(title) visual_novel_results = VisualNovelDatabase.get_similar_titles(title) anime_results = MyAnimeList.get_similar_titles(title) results = [] passed_titles = [] for result_list in (light_novel_results, visual_novel_results, anime_results): for result in result_list: if result['title'] in passed_titles: results[passed_titles.index(result['title'])]['links'].append(result['link']) else: results.append({ 'title': result['title'], 'links': [result['link']], 'similarity': result['similarity'] }) passed_titles.append(result['title']) results.sort(key=lambda item: item['similarity'], reverse=True) return results
[ "def find_items(self, title: str) -> list:\n title_list = []\n for library_item in self._item_list:\n title_list.append(library_item.get_title())\n results = difflib.get_close_matches(title, title_list, cutoff=0.5)\n return results", "def find_movies_by_title(self, title):\n\n articles = ['A ', 'An ', 'The ']\n if self.creative:\n title = title.lower()\n indexes = []\n titleWords = title.split(\" \")\n if title == '':\n return indexes\n for i, j in enumerate(self.titles):\n movie = j[0].lower()\n for form in self.generate_all_forms(title):\n if self.standalone_in(movie, form):\n indexes.append(i)\n break\n return indexes\n else:\n indexes = []\n new_title = title\n for article in articles: \n if article in title and title.index(article) == 0:\n if '(' in title:\n split_title = title.split('(')\n split_title[0] = split_title[0][len(article):len(split_title[0])-1] + ', ' + article\n new_title = '('.join(split_title)\n else:\n new_title = title[len(article):len(title)] + ', ' + article[:len(article)-1]\n for i, t in enumerate(self.titles):\n possible_title = t[0]\n if new_title == possible_title or title == possible_title:\n indexes.append(i)\n else: \n if '(' in possible_title:\n possible_title = possible_title[:possible_title.index('(')-1]\n if new_title == possible_title or title == possible_title:\n indexes.append(i)\n return indexes", "def searchTitlesByName(self, setName, IDList, searchTerm):\n outList = []\n for title in self.allTitles[setName]:\n if title['Const'] not in IDList:\n continue\n name = title['Title']\n if searchTerm.lower() in name.lower():\n outList.append(title['Const'])\n return outList", "async def get_best_results(self, query: str) -> List[Tuple[float, str]]:\n results = []\n for genre in self.genres:\n ratios = [difflib.SequenceMatcher(None, query, genre).ratio()]\n for word in REGEX_NON_ALPHABET.split(genre):\n ratios.append(difflib.SequenceMatcher(None, query, word).ratio())\n results.append((round(max(ratios), 2), genre))\n return sorted((item for item in results if item[0] >= 0.60), reverse=True)[:4]", "def _search_tokens(self, song_name):\n logger.debug(\"Searching [{}] in the cache at [{}]\".format(song_name, self.dir))\n song_name = remove_stopwords(remove_multiple_spaces(song_name).lower())\n song_name = remove_punct(song_name)\n tokens1 = song_name.split()\n cached_songs = self.list_mp3()\n\n res = []\n for song in cached_songs:\n name = os.path.splitext(song)[0].lower()\n title = name\n name = remove_stopwords(name)\n name = remove_punct(name)\n name = remove_multiple_spaces(name)\n tokens2 = name.split()\n match = check_keywords(tokens1, tokens2)\n if match:\n dist = compute_jaccard(tokens1, tokens2)\n res.append((song_name, song, title, dist))\n return sorted(res, key=lambda x: x[-1], reverse=True)", "def _most_similar(self, *args, **kwargs):\n topn = kwargs.get(\"topn\", 10)\n # Query for extra, since we filter some bad ones out\n kwargs[\"topn\"] = topn + 20\n words = self._model.most_similar(*args, **kwargs)\n words = [(w.lower(), n) for w, n in words]\n\n exclude_substrings = True\n if exclude_substrings:\n input_words = kwargs[\"positive\"]\n words = [\n (w.lower(), round(n, 3))\n for w, n in words\n if not (\n any(c not in ascii_letters for c in w) or\n any(w in i_w for i_w in input_words) or\n any(i_w in w for i_w in input_words) or\n any(editdistance.eval(w, i_w) <= 3 for i_w in input_words)\n )\n ]\n return words", "def recommend_group(similarities):\n group_rec = np.argsort(-similarities)[:,0]\n \n return group_rec", "def get_recommendations(sorted_matches):\n # put whole method in loop from 0 to len(sorted_matches)\n # continue until we have found some recommendations\n # (instead of just looking at top match)\n if len(sorted_matches) > 0:\n top_match = sorted_matches[0]\n top_match_songs = top_match[1]\n top_match_song_set = set(top_match_songs)\n # get the most common genre for top match user's songs\n genre_lists = [song.genres for song in top_match_songs]\n genres = list(itertools.chain(*genre_lists))\n genre_counts = Counter(genres)\n most_common_genre = genre_counts.most_common(1)[0][0]\n # just get the user field of a matching song instead of making db call\n top_match_user = top_match_songs[0].user\n # get all the Songs from Artists which have the most common genre\n # that also belong to the top match user\n most_common_genre_songs = Song.query.filter(Song.artist.has(\n Artist.genres.any(Genre.name == most_common_genre))).filter(\n Song.user == top_match_user).all()\n recommendations = []\n # if any songs in most_common_genre_songs are not in top matching\n # songs, add them to the recommended songs\n most_common_genre_song_set = set(most_common_genre_songs)\n recommend_set = most_common_genre_song_set - top_match_song_set\n recommendation_list = list(recommend_set)\n recommendations += recommendation_list\n if len(recommendations > 0):\n # sort by popularity, then return\n recommendations.sort(key=lambda x: x.popularity, reverse=True)\n return recommendations\n return []", "def _get_similar_words(self, word: str) -> list:\n\n number_similar_words = self.number_similar_words\n distance = self.distance\n\n found_words = self.tree.find(word, distance)\n\n arr = [[self._get_total_cost(it[1]), it[1]] for it in found_words]\n if arr:\n arr = sorted(arr)[:number_similar_words]\n return [it[1] for it in arr]\n else:\n return None", "def by_title(self, title, strict=True):\n f = {True: operator.eq, False: operator.contains}\n func = f[strict]\n result = [m for m in self._mods if func(m['title'], title)]\n if len(result) == 1:\n return result[0]\n return result", "def get_similarities(tags):\n simtags3 = {}\n for i in tags:\n prodtags3 = list(product([i,''], tags))\n for j in prodtags3:\n seqtags3 = SequenceMatcher(None, j[0].lower(), j[1].lower())\n if seqtags3.ratio() != 0.0 and seqtags3.ratio() >= SIMILAR and seqtags3.ratio() != 1.0:\n if j[0] not in simtags3 and j[0] not in simtags3.values():\n simtags3[j[0]] = j[1]\n return simtags3", "def searchSimilar(fullfilename):\n ix = open_dir(indexDir)\n\n with ix.searcher() as searcher:\n filename = os.path.basename(fullfilename)\n \n docnum = searcher.document_number(path=unicode(fullfilename, 'utf-8'))\n if docnum is None:\n print \"That document has not been indexed\"\n else:\n r = searcher.more_like(docnum, 'content', numterms=20)\n if len(r) > 1:\n header = \"Similar files to '\" + filename.replace(\".md\", \"\") + \"'\"\n print \"\\n\" + header + \"\\n\" + \"-\"*len(header) + \"\\n\"\n for hit in r:\n print hit['title'].replace(\".md\",\"\")\n print \" score: \" + str(hit.score) + \"\\n\"\n\n print \"keywords: \" + \", \".join(zip(*r.key_terms('content'))[0])", "def getWordSuggestionsV1(word, fileName, n, commonPercent, topN):\n \n \n wordlist=getWordsOfSimLength(word,fileName,n)#gives a list of words with almost similar length\n \n winners=getWordsWithCommonPercent(word, wordlist,commonPercent)#words with commonletters from the list provided\n \n similarityDictionary=getSimilarityDict(word,winners)#gives the words that meets the similarity criteria\n return getBestWords(similarityDictionary, topN)#returns the tobN best words", "def get_all_matching_models(cars=cars, grep='trail'):\n resultado = []\n for key in cars.keys():\n for model in cars[key]:\n print(model)\n if re.search(grep, model, flags=re.IGNORECASE):\n resultado.append(model)\n print('match')\n print('resultado: ' + str(resultado))\n else:\n print('no match')\n resultado.sort()\n return resultado", "def fuzzy_songs(self, query):\r\n\r\n query = query.upper()\r\n\r\n matched_song_titles = difflib.get_close_matches(\r\n query, self.song_titles)\r\n matched_song_artists = difflib.get_close_matches(\r\n query, self.song_artists)\r\n\r\n # if query is beautifully matched, then forget about everything else\r\n strict_priority_title = [x for x in matched_song_titles if x == query]\r\n strict_priority_artists = [\r\n x for x in matched_song_artists if x == query]\r\n\r\n if strict_priority_title:\r\n matched_song_titles = strict_priority_title\r\n if strict_priority_artists:\r\n matched_song_artists = strict_priority_artists\r\n\r\n matched_songs_bytitle = [\r\n song for song in self.songs if song.title in matched_song_titles]\r\n matched_songs_byartist = [\r\n song for song in self.songs if song.artist in matched_song_artists]\r\n\r\n matches = list(set(matched_songs_bytitle + matched_songs_byartist))\r\n\r\n return matches", "def get_movies_by_title(str):\r\n\r\n sort_by = \"popularity.desc\"\r\n\r\n url = f\"https://api.themoviedb.org/3/search/movie?api_key={TMDB_API_KEY}&language=en-US&sort_by={sort_by}&page=1&include_adult=false&query='{str}'\"\r\n\r\n response = requests.get(url)\r\n\r\n search_results = json.loads(response.text)\r\n\r\n recommendations = dict()\r\n count = 1\r\n \r\n for index, title in enumerate(search_results['results']):\r\n recommendations[(index + 1)] = title\r\n\r\n return recommendations", "def recommend_title_rel(self, uid):\n user = self.users[uid]\n click_record = user.click_record\n rec_list = list()\n for click_iid in click_record:\n for iid, item in self.items.items():\n if iid != click_iid:\n click_item = self.items[click_iid]\n rel = self.get_relevance(click_item.title, item.title)\n rec_list.append((iid, rel))\n rec_list = sorted(rec_list, key=lambda x: x[1], reverse=True)\n return rec_list", "def search_match_1(snip_fgp1):\n conn = psycopg2.connect(host=\"sculptor.stat.cmu.edu\", database=c.DB_USER,\n user=c.DB_USER, password=c.DB_PASSWORD)\n cur = conn.cursor()\n cur.execute(\"SELECT song_id FROM songs\")\n uniq_id = cur.fetchall()\n uniq_id = reduce(np.append, uniq_id)\n \n tolerance = 10**(-3) # this is the default tolerance level, tuned\n \n matching_cnt = []\n window_num = []\n \n for song_id in uniq_id:\n distance = retriv_fgp1(int(song_id), snip_fgp1)\n matching_cnt.append(np.sum(distance<=tolerance))\n window_num.append(len(distance))\n \n # This is the new criterion: must have more than 10% similarity of a song\n # in the database - considered different lengths of songs\n similarity_lst = list(map(lambda i,j: i/j > 0.1, matching_cnt, window_num))\n matched_idx = [i for i,val in enumerate(similarity_lst) if val==True]\n matched_sid = [uniq_id[i] for i in matched_idx]\n \n if matched_sid == []:\n sm_logger.info('Oops, we try hard but find nothing...')\n return None\n else:\n possible_lst = []\n for i in matched_sid:\n possible_lst.append(retriv_name(int(i)))\n sm_logger.info('Found some songs matched the snippet!')\n return possible_lst", "def _fuzzy_match(movie_title:str) -> str:\n with open('resources/final_movies.csv', newline='') as movies_list:\n reader = csv.DictReader(movies_list)\n movies = [movie['Movie_Titles'].strip() for movie in reader]\n match, confidence = process.extract(movie_title, movies, limit=1, scorer=fuzz.token_sort_ratio)[0]\n \n if confidence >= 70:\n movie_title = match\n \n \n movies_list.close()\n \n return movie_title" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search the 3 different modules for an alternative title of the given title and return a dictionary split into the different languages
def get_alternative_titles(title: str = '') -> dict: light_novel_results = BakaUpdates.get_alternative_titles(title=title) visual_novel_results = VisualNovelDatabase.get_alternative_titles(title=title) anime_results = MyAnimeList.get_alternative_titles(title=title) alternative_titles = {} for result_list in (light_novel_results, visual_novel_results, anime_results): for language in result_list: if not result_list[language]: continue for title in result_list[language]: if language not in alternative_titles: alternative_titles[language] = [title] continue if title not in alternative_titles[language]: alternative_titles[language].append(title) return alternative_titles
[ "def get_languages(self, title: str) -> dict:\n logging.info(\"Searching wikipedia for languages for article with title '{}'\".format(title))\n url: str = \"/w/api.php\"\n http_params: dict = {\n \"action\": \"query\",\n \"titles\": title.replace(\" \", \"%20\"),\n \"prop\": \"langlinks\",\n \"format\": \"json\",\n \"llprop\": \"url\"\n }\n url_with_params: str = helpers.add_http_parameters(url, http_params)\n\n http_client = self._connect_http_client()\n http_client.request(\"GET\", url_with_params)\n response: bytes = http_client.getresponse().read()\n http_client.close()\n\n return json.loads(response)", "def choose_implied_language(title_1, title_2, title_list, user_input, region_data):\r\n\r\n implied_languages = []\r\n\r\n for region in user_input.user_region_order:\r\n if region_data.implied_language[region] != '':\r\n if region_data.implied_language[region] not in implied_languages:\r\n implied_languages.append(region_data.implied_language[region])\r\n\r\n if (\r\n '[bios]' not in title_1.full_name_lower\r\n and '[bios]' not in title_2.full_name_lower):\r\n if (\r\n title_1.languages != ''\r\n and title_2.languages != ''\r\n and title_1.title_languages != ''\r\n and title_2.title_languages != ''\r\n and implied_languages != []):\r\n for implied_language in implied_languages:\r\n if(\r\n bool(re.search(implied_language, title_1.languages)) == True\r\n and bool(re.search(implied_language, title_2.languages)) == False):\r\n if title_2 in title_list: title_list.remove(title_2)\r\n break\r\n elif(\r\n bool(re.search(implied_language, title_2.languages)) == True\r\n and bool(re.search(implied_language, title_1.languages)) == False):\r\n if title_1 in title_list: title_list.remove(title_1)\r\n break\r\n elif (\r\n # If one title has languages, but the other doesn't, take the one that\r\n # supports the highest priority implied language and has the most languages\r\n title_1.languages != ''\r\n and title_2.languages != ''\r\n and title_1.languages != title_2.languages\r\n and (\r\n title_1.title_languages == '' or\r\n title_2.title_languages == '')):\r\n\r\n for region in user_input.user_region_order:\r\n if region_data.implied_language[region] != '':\r\n if (\r\n bool(re.search(region_data.implied_language[region], title_1.languages)) == True\r\n and bool(re.search(region_data.implied_language[region], title_2.languages)) == False):\r\n if title_2 in title_list: title_list.remove(title_2)\r\n break\r\n elif (\r\n bool(re.search(region_data.implied_language[region], title_2.languages)) == True\r\n and bool(re.search(region_data.implied_language[region], title_1.languages)) == False):\r\n if title_1 in title_list: title_list.remove(title_1)\r\n break\r\n\r\n # Accommodate if the user has a submitted a region order that doesn't\r\n # include all regions\r\n if (\r\n title_1 in title_list\r\n and title_2 in title_list):\r\n for region in region_data.all:\r\n if region_data.implied_language[region] != '':\r\n if (\r\n bool(re.search(region_data.implied_language[region], title_1.languages)) == True\r\n and bool(re.search(region_data.implied_language[region], title_2.languages)) == False):\r\n if title_2 in title_list: title_list.remove(title_2)\r\n break\r\n elif (\r\n bool(re.search(region_data.implied_language[region], title_2.languages)) == True\r\n and bool(re.search(region_data.implied_language[region], title_1.languages)) == False):\r\n if title_1 in title_list: title_list.remove(title_1)\r\n break", "def extract_from_wiktionary(input, output, language=\"en\", accents=(\"US\", \"USA\", \"GA\", \"GenAm\", None), enpr=False):\n title_regex = re.compile(\"<title>(.*)</title>\")\n ipa_regex_1 = re.compile(\"{{{{IPA[|]/([^|]+)/[|]lang={lang}}}}}\".format(lang=language))\n ipa_regex_2 = re.compile(\"{{{{IPA[|]lang={lang}[|]/([^|]+)/}}}}\".format(lang=language))\n enpr_regex = re.compile(\"{{enPR[|]([^}]+)}}\")\n accent_regex = re.compile(\"{{{{a(ccent)?[|]([^}}]+[|])?({accents})[|}}]\".format(accents=\"|\".join(a for a in make_iterable(accents) if a)))\n any_accent_regex = re.compile(\"{{a(ccent)?[|]\")\n match = ValueBox()\n with open(input, \"r\", encoding=\"utf-8\") as i:\n with open(output, \"w\", encoding=\"utf-8\") as o:\n for line in i:\n if match << re.search(title_regex, line):\n title = match().group(1)\n elif (enpr and match << re.search(enpr_regex, line)) or (not enpr and match << (re.search(ipa_regex_1, line) or re.search(ipa_regex_2, line))):\n if accents and not re.search(accent_regex, line) and (None not in accents or re.search(any_accent_regex, line)):\n continue\n elif \":\" in title:\n continue\n for pronunciation in match().group(1).split(\"|\" if enpr else \", \"):\n print(\"{}\\t{}\".format(title, pronunciation), file=o)", "def by_title(self, title, strict=True):\n f = {True: operator.eq, False: operator.contains}\n func = f[strict]\n result = [m for m in self._mods if func(m['title'], title)]\n if len(result) == 1:\n return result[0]\n return result", "def decode_title(elem):\n title = elem.attrib[TITLE ]\n params = title.split(\";\")\n dict = {}\n for param in params:\n chunks = param.strip().split(\" \", 1)\n key = chunks[0]\n value_list = chunks[1].split(\" \")\n Tesseract.add_title_list_to_dict(key, value_list, dict)\n return dict", "def get_article_language_info(self, title: str, language: str) -> tuple:\n logging.info(\"Searching wikipedia for '{}' language for article with title '{}'\".format(language, title))\n url: str = \"/w/api.php\"\n http_params: dict = {\n \"action\": \"query\",\n \"titles\": title.replace(\" \", \"%20\"),\n \"prop\": \"langlinks\",\n \"format\": \"json\",\n \"llprop\": \"url|*\",\n \"lllang\": language\n }\n url_with_params: str = helpers.add_http_parameters(url, http_params)\n\n http_client = self._connect_http_client()\n http_client.request(\"GET\", url_with_params)\n response: bytes = http_client.getresponse().read()\n http_client.close()\n\n return WikipediaClient._get_language_info_from_json(json.loads(response), language)", "def parse_title(title=''):\n data = filter(bool, re.split(u':|:', title, 1))\n if title == '':\n return {}\n elif len(data) == 2:\n return {'etype': data[0], 'etitle': data[1]}\n elif len(data) == 1:\n return {'etitle': data[0]}\n else:\n return {'etitle': data[0]}", "def title_search (self, title):\n meta = None\n timing = 0.0\n message = None\n t0 = time.time()\n try:\n query = \"query.bibliographic={}\".format(urllib.parse.quote(title))\n url = self._get_api_url(query)\n\n response = requests.get(url).text\n json_response = json.loads(response)\n\n items = json_response[\"message\"][\"items\"]\n first_item = items[0] if len(items) > 0 else {}\n titles = first_item.get(\"title\", []) \n result_title = titles[0] if len(titles) > 0 else None\n\n if self.title_match(title, result_title):\n raw_meta = first_item\n meta = dict()\n if 'title' in raw_meta:\n meta['title'] = raw_meta[\"title\"]\n else:\n meta['title'] = None\n \n if 'DOI' in raw_meta:\n meta['doi'] = raw_meta[\"DOI\"]\n else:\n meta['doi'] = None\n \n if 'container-title' in raw_meta:\n meta['journal'] = raw_meta[\"container-title\"][0]\n else:\n meta['journal'] = None\n \n if 'ISSN' in raw_meta:\n meta['issn'] = raw_meta[\"ISSN\"][0]\n else:\n meta['issn'] = None\n\n if \"published-print\" in raw_meta:\n meta['year'] = raw_meta[\"published-print\"]['date-parts'][0][0] \n else:\n meta['year'] = None\n \n if 'author' in raw_meta:\n meta['authors'] = raw_meta[\"author\"]\n else:\n meta['authors'] = None\n \n if 'URL' in raw_meta:\n meta['url'] = raw_meta[\"URL\"]\n else:\n meta['url'] = None\n # meta = raw_meta\n if self.parent.logger:\n self.parent.logger.debug(meta)\n except: \n print(traceback.format_exc())\n meta = None\n message = f\"ERROR: {title}\"\n print(message) \n \n timing = self._mark_elapsed_time(t0)\n return _ScholInfraResponse_Crossref(self, meta, timing, message)", "def find_movies_by_title(self, title):\n\n articles = ['A ', 'An ', 'The ']\n if self.creative:\n title = title.lower()\n indexes = []\n titleWords = title.split(\" \")\n if title == '':\n return indexes\n for i, j in enumerate(self.titles):\n movie = j[0].lower()\n for form in self.generate_all_forms(title):\n if self.standalone_in(movie, form):\n indexes.append(i)\n break\n return indexes\n else:\n indexes = []\n new_title = title\n for article in articles: \n if article in title and title.index(article) == 0:\n if '(' in title:\n split_title = title.split('(')\n split_title[0] = split_title[0][len(article):len(split_title[0])-1] + ', ' + article\n new_title = '('.join(split_title)\n else:\n new_title = title[len(article):len(title)] + ', ' + article[:len(article)-1]\n for i, t in enumerate(self.titles):\n possible_title = t[0]\n if new_title == possible_title or title == possible_title:\n indexes.append(i)\n else: \n if '(' in possible_title:\n possible_title = possible_title[:possible_title.index('(')-1]\n if new_title == possible_title or title == possible_title:\n indexes.append(i)\n return indexes", "def _title_substitutions(title):\n title = str(title)\n with open(xbmc.translatePath('special://userdata/addon_data/script.remote_downloader/title_regex_substitutions.txt'), 'r') as f:\n for line in f.readlines():\n if line.strip() and not line.startswith('#'):\n title = eval(line.strip())\n\n return title", "def get_keyword_by_title(title, default=None):", "def load_wiktionary(configuration, verbose=0):\n\n df = pandas.read_csv(configuration['wiktionary_translations_path'],\n sep='\\t', usecols=['ID', 'Concept_ID', 'Concept', 'Languoid', 'Language_name', 'Form'])\n\n\n if verbose:\n print()\n print('number of available languages', len(set(df.Language_name)))\n print('language that have Dutch in the name')\n for language in set(df.Language_name):\n if 'Dutch' in language:\n print(language)\n print('we use: Dutch; Flemish')\n\n df = df[df.Language_name == 'Dutch; Flemish']\n\n english_lemmas = []\n english_definitions = []\n\n for index, row in df.iterrows():\n concept = row['Concept']\n lemma, *definitions = concept.split('/')\n english_lemmas.append(lemma)\n english_definitions.append('/'.join(definitions))\n\n df['English_lemma'] = english_lemmas\n\n dutch2english = defaultdict(set)\n english2dutch = defaultdict(set)\n\n for index, row in df.iterrows():\n english_lemma = row['English_lemma']\n dutch_lemma = row['Form']\n dutch2english[dutch_lemma].add(english_lemma)\n english2dutch[english_lemma].add(dutch_lemma)\n\n if verbose:\n print(f'Dutch lemmas with English translations: {len(dutch2english)}')\n print(f'English lemmas with Dutch translations: {len(english2dutch)}')\n\n return dutch2english, english2dutch", "def custom_replace_title(title):\n for utf8s, latin1 in (((\"–\", \"—\", \"―\", \"‒\", \"‐\", \"‑\", \"⁃\"), \"-\"),\n ((\"‘\", \"’\", \"‚\", \"›\", \"‹\", \"′\", \"‵\", \"ʹ\", \"’\"), \"'\"),\n ((\"“\", \"”\", \"„\", \"»\", \"«\", \"″\", \"‶\", \"ʺ\"), '\"'),\n ((\"…\", \"...\"))):\n regex = r\"(\"\n for utf8 in utf8s[:-1]:\n regex += rf\"{utf8}|\"\n regex += rf\"{utf8s[-1]})\"\n title = re.sub(regex, latin1, title)\n # Medley Song 1/Medley Song 2\n title = title.replace(\" / \", \"/\")\n # Rock'n'Roll etc.\n title = re.sub(r\"(\\S+)( |'| ')(n|N)( |'|' )(\\S+)\", r\"\\1'n'\\5\", title)\n\n # Capitalise each word\n for char in (\" \", \"-\", \"(\", '\"', \"/\"):\n matches = re.finditer(rf\"\\{char}([A-Za-z]*)\", title)\n for match in matches:\n title = title.replace(match.group(0),\n f\"{char}{match.group(1).capitalize()}\")\n # but write these lowercase\n for keyword in (\"In\", \"Of\", \"The\", \"To\", \"And\", \"At\", \"A\", \"An\"):\n title = re.sub(rf\"([^.:-] ){keyword}( |$)\", rf\"\\1{keyword.lower()}\\2\",\n title)\n\n # Pt./Pts.\n matches = re.finditer(r\"P(ar)?t(s?)\\.? ([A-Za-z0-9]*)\"\n r\"( ?(-|&|and) ?([A-Za-z0-9]*))?\", title)\n for match in matches:\n replacement = f\"Pt{match.group(2)}. {get_number(match.group(3))}\"\n if match.group(4) is not None:\n if match.group(5) == \"-\":\n replacement += \"-\"\n else:\n replacement += \" & \"\n replacement += get_number(match.group(6))\n title = title.replace(match.group(0), replacement)\n\n return title", "def titleSearch():\n\n query = \"%\" + request.args.get(\"q\") + \"%\"\n anime = db.execute(\n \"SELECT title, title_english FROM anime WHERE title LIKE ? OR title_english LIKE ? ORDER BY title LIMIT 15\", query, query)\n return jsonify(anime)", "def get_similar_titles(title: str) -> list:\n light_novel_results = BakaUpdates.get_similar_titles(title)\n visual_novel_results = VisualNovelDatabase.get_similar_titles(title)\n anime_results = MyAnimeList.get_similar_titles(title)\n\n results = []\n passed_titles = []\n\n for result_list in (light_novel_results, visual_novel_results, anime_results):\n for result in result_list:\n if result['title'] in passed_titles:\n results[passed_titles.index(result['title'])]['links'].append(result['link'])\n else:\n results.append({\n 'title': result['title'],\n 'links': [result['link']],\n 'similarity': result['similarity']\n })\n passed_titles.append(result['title'])\n\n results.sort(key=lambda item: item['similarity'], reverse=True)\n return results", "def from_one_line_title(cls, title):\n title_to_instance = {\n \"A\": cls.DAILY_A,\n \"B\": cls.DAILY_B,\n \"C\": cls.DAILY_C,\n \"Manufacturer Series\": cls.MANUFACTURERS,\n \"Nations Cup\": cls.NATIONS,\n }\n return title_to_instance[title]", "def _search_offline_dictionary(text: str) -> str:\n offline_dict = get_dictionary()\n translated_odia_text = offline_dict.get(text.lower())\n return translated_odia_text", "def find_items(self, title: str) -> list:\n title_list = []\n for library_item in self._item_list:\n title_list.append(library_item.get_title())\n results = difflib.get_close_matches(title, title_list, cutoff=0.5)\n return results", "def get_published_languages(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the model was migrated correctly.
def _check_model_validity(self, original_model, migrated_model): self.assertEqual( migrated_model.committer_id, original_model.committer_id) self.assertEqual( migrated_model.commit_type, original_model.commit_type) self.assertEqual( migrated_model.commit_message, original_model.commit_message) self.assertEqual( migrated_model.commit_cmds, original_model.commit_cmds) self.assertEqual( migrated_model.last_updated, original_model.last_updated)
[ "def should_migrate() -> bool:\n\n # Check if there is database to migrate.\n if not QUANDL_DATABASE_PATH.is_file():\n print(f'No existing database {QUANDL_DATABASE_PATH} to migrate.')\n\n # Delete info json if it exists; something went wrong with previous migration.\n if INFO_PATH.is_file():\n INFO_PATH.unlink()\n return False\n\n # Check for existing info json file.\n if INFO_PATH.is_file():\n\n # Try to open and decode the json.\n try:\n with open(INFO_PATH) as conf_file:\n info = json.loads(conf_file.read())\n\n except JSONDecodeError as e:\n print(f'{INFO_PATH} is corrupted.')\n INFO_PATH.unlink()\n return True\n\n # Decoding json succeeded.\n else:\n\n # Check that entries have correct keys.\n for entry in info:\n EXPECTED_KEYS = {'date',\n 'last_refresh_date',\n 'size',\n 'num_symbols',\n 'num_days',\n 'version',\n 'type'}\n if set(entry.keys()) != EXPECTED_KEYS:\n print(f'{INFO_PATH} is corrupted. Bad keys.')\n INFO_PATH.unlink()\n return True\n\n # Check for existing entries.\n if len(info) > 0:\n print(f'Already migrated. {INFO_PATH} has {len(info)} entries.')\n return False\n\n return True", "def check_valid_model(self, model):\n if not model:\n QMessageBox.information(None, u\"Ενημέρωση!\", u\"Παρακαλώ κάντε αναζήτηση δεδομένων\")\n return\n if model.rowCount() == 0:\n QMessageBox.information(None, u\"Ενημέρωση!\", u\"Δεν βρέθηκαν δεδομένα προς εξαγωγή\")\n return\n return True", "def allow_migrate(self, db, model):\n\t\tif model._meta.app_label not in DATABASES:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn db == model._meta.app_label", "def test_lowland_is_migratable(self, lowland):\n assert lowland.is_migratable is True", "def valid_model_component(self):\n return True", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n if db == self.ANALYTICS_DB_ALIAS:\n return self.is_analytics_model(model_name)\n elif self.is_analytics_model(model_name):\n # If we are working on a analytics model, and the db is not\n # analytics_quiz_db, return false\n return False\n return None", "def check_model_constraints(self):\n # TODO: adapterize, utilitize or do anythin' or leave me as is.", "def test_highland_is_migratable(self, highland):\n assert highland.is_migratable is True", "def success(self, migration):", "def _check_model_params(self):", "def test_desert_is_migratable(self, desert):\n assert desert.is_migratable is True", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n if db == APIRouter.ANALYTICS_DB_ALIAS:\n return self.is_analytics_model(model_name)\n elif self.is_analytics_model(model_name):\n # If we are working on a analytics model, and the db is not\n # analytics_db, return false\n return False\n return None", "def _validate_model(self, session, obj):\n pass", "def is_model(self, model):\n if model is None:\n self.dump()\n raise Exception(\"Model is None\")\n return model_path(model) == model_path(self.model)", "def is_valid(self, rollback=True):\n is_valid = super().is_valid()\n\n if not is_valid and rollback:\n self._meta.session.rollback()\n\n return is_valid", "def environment_needs_upgrade(self, db):\n if self.env.config.get('trac', 'database').startswith('postgres'):\n found_version = self._check_schema_version(db)\n if not found_version:\n self.log.debug(\"Initial schema needed for businessintelligence plugin for views\")\n return True\n else:\n if found_version < self._schema_version:\n self.log.debug(\"Upgrade schema from %d to %d needed for businessintelligence plugin for view table\",\n found_version,\n self._schema_version)\n return True\n return False", "def validate_update(self):\n if self.validate_child_relation():\n if self.validate_transaction():\n return True", "def _db_checkup(self):\n self.validate_table(self._db_data_table)", "def _checkModelConfig(self):\n if (self.modelConfig.__eq__('')):\n print('Debe cargar primero el archivo de configuración')\n self.statusBar().showMessage('Debe cargar primero el archivo de configuración')\n return False\n else:\n return True #true porque no esta vacio" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combine a secondary evolution line with this one.
def combine_evo_lines(self, second_evo_first_step: EvolutionStep): if self.first.pokemon_name != second_evo_first_step.pokemon_name: raise Exception("UNEQUAL PARENTS") else: self._combine_evo_lines([self.first.pokemon_name], second_evo_first_step.next)
[ "def _combine_evo_lines(self, path: list, secondary: list):\n for pkmn in secondary:\n if not EvolutionLine.is_part_of_evo_line(self.first, pkmn.pokemon_name):\n self._insert(path.copy(), pkmn.pokemon_name, pkmn.ndex, pkmn.evo_stage)\n new_path = path.copy()\n new_path.append(pkmn.pokemon_name)\n self._combine_evo_lines(new_path, pkmn.next)", "def wedge(self, other):\n from .format_utilities import is_atomic\n if not isinstance(other, FreeModuleAltForm):\n raise TypeError(\"the second argument for the exterior product \" +\n \"must be an alternating form\")\n if other._tensor_rank == 0:\n return other*self\n if self._tensor_rank == 0:\n return self*other\n fmodule = self._fmodule\n basis = self.common_basis(other)\n if basis is None:\n raise ValueError(\"no common basis for the exterior product\")\n rank_r = self._tensor_rank + other._tensor_rank\n cmp_s = self._components[basis]\n cmp_o = other._components[basis]\n cmp_r = CompFullyAntiSym(fmodule._ring, basis, rank_r,\n start_index=fmodule._sindex,\n output_formatter=fmodule._output_formatter)\n for ind_s, val_s in cmp_s._comp.iteritems():\n for ind_o, val_o in cmp_o._comp.iteritems():\n ind_r = ind_s + ind_o\n if len(ind_r) == len(set(ind_r)): # all indices are different\n cmp_r[[ind_r]] += val_s * val_o\n result = fmodule.alternating_form(rank_r)\n result._components[basis] = cmp_r\n if self._name is not None and other._name is not None:\n sname = self._name\n oname = other._name\n if not is_atomic(sname):\n sname = '(' + sname + ')'\n if not is_atomic(oname):\n oname = '(' + oname + ')'\n result._name = sname + '/\\\\' + oname\n if self._latex_name is not None and other._latex_name is not None:\n slname = self._latex_name\n olname = other._latex_name\n if not is_atomic(slname):\n slname = '(' + slname + ')'\n if not is_atomic(olname):\n olname = '(' + olname + ')'\n result._latex_name = slname + r'\\wedge ' + olname\n return result", "def append(self, other, joinType=\"line\"):\n segs1 = self.asSegments()\n segs2 = other.asSegments()\n if len(segs1) < 1:\n self.activeRepresentation = SegmentRepresentation(self, segs2)\n return\n if len(segs2) < 1:\n self.activeRepresentation = SegmentRepresentation(self, segs1)\n return\n\n # Which way around should they go?\n dist1 = segs1[-1].end.distanceFrom(segs2[0].start)\n dist2 = segs1[-1].end.distanceFrom(segs2[-1].end)\n if dist2 > 2 * dist1:\n segs2 = list(reversed([ x.reversed() for x in segs2]))\n\n # Add a line between if they don't match up\n if segs1[-1].end != segs2[0].start:\n segs1.append(Line(segs1[-1].end,segs2[0].start))\n\n # XXX Check for discontinuities and harmonize if needed\n\n segs1.extend(segs2)\n self.activeRepresentation = SegmentRepresentation(self, segs1)\n return self", "def combine(self, other, inplace=False):\n if not isinstance(other, PhotCurve):\n raise ValueError(\"Can only combine two curves\")\n if self.is_lam:\n if other.is_nu:\n other.in_lam(reinterpolate=True)\n elif self.is_nu:\n if other.is_lam:\n other.in_nu(reinterpolate=True)\n allx = np.unique(np.concatenate([other.x, self.x]))\n othery = other.interpolate(allx)\n selfy = self.interpolate(allx)\n finaly = othery * selfy\n if self.header is not None:\n newhd = self.header.deepcopy()\n if other.header is not None:\n newhd.add_card_value('comment', 'combined with {}'.\n other.header.short_description())\n else:\n newhd.add_card_value('comment', 'combined with a non '\n 'descript phocurve')\n if inplace:\n self.x = allx\n self.y = finaly\n self.nb = self.y.shape[0]\n self.interpolate_set = self.set_interpolation(\n self.interpolate_method,\n extrapolate=self.extrapolate,\n positive=self.positive)\n self.header = newhd\n return self\n else:\n return PhotCurve(x=allx * self.x_si_unit, y=finaly,\n header=newhd)", "def getSecondLine(self):\n\t\tpass", "def fused_with(self, other):\n seg = LineSegment(\n a=self.a,\n b=other.b,\n width=self.width,\n color=self.color,\n start_slant=self.start_slant,\n end_slant=other.end_slant,\n )\n seg.a_left = self.a_left\n seg.a_right = self.a_right\n seg.b_left = other.b_left\n seg.b_right = other.b_right\n return seg", "def redraw_secondary_y(self, x, y, index = 0, **kwarg):\r\n sub_plot = self.sub_plots(index)\r\n lines = sub_plot.y2_axis.plot(x, y, style, **kwarg)\r\n #show it\r\n self.canvas.draw()\r\n return lines", "def __add__(self, other: 'Wavefunction') -> 'Wavefunction':\n out = copy.deepcopy(self)\n out.ax_plus_y(1.0, other)\n return out", "def combine(self, other_wpc):\n for idx in range(len(other_wpc._line_point_clouds)):\n pc = other_wpc._line_point_clouds[idx]\n line = other_wpc._fitted_3d_lines[idx]\n self._line_point_clouds.append(pc)\n self._fitted_3d_lines.append(line)\n if self._color_inliers:\n if line.shape[0] == 0:\n self._c.append(np.broadcast_to(np.array([255, 0, 0]), (pc.shape[0], 3)))\n else:\n error = self.fitter.get_error(pc, line)\n colors = np.where(np.expand_dims(error < self._line_inlier_thresh, 1), np.array([[255, 255, 255]]), np.array([[255, 0, 0]]))\n colors = np.vstack([colors, np.broadcast_to(255, (4, 3))])\n self._c.append(colors)\n\n iterations = 0\n print(\"[WPC DEBUG] Iter: {}, Num Point Clouds: {}\".format(iterations, len(self._line_point_clouds)))\n while self.simplify():\n iterations += 1\n print(\"[WPC DEBUG] Iter: {}, Num Point Clouds: {}\".format(iterations, len(self._line_point_clouds)))\n print(\"[WPC DEBUG] Combining took {} iterations\".format(iterations))", "def createAltLine(self,line):\n # Parse line\n name, reactLine, prodLine, rLabelLine, pLabelLine, reversible, sym = self.parseLine(line)\n reacts = [x.strip() for x in reactLine.split('+')]\n prods = [x.strip() for x in prodLine.split('+')]\n rLabel = map(lambda x: utils.symsplit(x.strip()) if ';' in x else x.strip(), rLabelLine.split('+'))\n pLabel = map(lambda x: utils.symsplit(x.strip()) if ';' in x else x.strip(), pLabelLine.split('+')) \n \n # If there is a symmetric molecule\n if sym:\n ## Duplicate metabolites\n newReacts = []\n for react in reacts:\n newReacts.append(react)\n newReacts.append(react)\n newProds = []\n for prod in prods:\n newProds.append(prod)\n newProds.append(prod)\n newReacts = AtomTransition.convert2PS(newReacts)\n newProds = AtomTransition.convert2PS(newProds)\n \n ## Take care of labeling\n # Find out all carbon letters (= inputKeys)\n inputKeys=[]\n for lab in rLabel:\n if type(lab) == list:\n for comp in lab:\n inputKeys.extend([x for x in comp])\n else:\n inputKeys.extend([x for x in lab])\n inputKeys = sorted(set(inputKeys))\n \n # Find equivalent for each of the old carbon letters \n all_letters = string.ascii_letters # All possible letters\n avoid = ''.join(inputKeys) # These are taken letters, to be avoided in choosing new equivalents\n keyDict = {}\n for key in inputKeys:\n keyDict[key] = [x for x in all_letters if x not in avoid][0]\n avoid = avoid + keyDict[key]\n \n # Add new labeling for new metabolites \n newRlabel = []\n for lab in rLabel:\n if type(lab) is list: # Only works for two alternative labelings !!!\n newRlabel.append(lab[0])\n newRlabel.append(''.join([keyDict[x] for x in lab[1]]))\n else:\n # Creating new labeling for new metabolites\n newRlabel.append(lab)\n newLab = ''.join([keyDict[x] for x in lab])\n newRlabel.append(newLab)\n newPlabel = []\n for lab in pLabel:\n if type(lab) is list:\n newPlabel.append(lab[0])\n newPlabel.append(''.join([keyDict[x] for x in lab[1]]))\n else:\n # Creating new labeling for new metabolites\n newPlabel.append(lab)\n newLab = ''.join([keyDict[x] for x in lab]) \n newPlabel.append(newLab)\n else:\n # Convert reactants and products to pseudo metabolits (e.g. glu_L_c --> glu_L_c__ps1) if needed\n newReacts = AtomTransition.convert2PS(reacts)\n newProds = AtomTransition.convert2PS(prods)\n \n newRlabel = rLabel\n newPlabel = pLabel\n \n ## Join all into new line\n rxnSymb = ' <==> ' if reversible else ' --> '\n altLine = name \n altLine += ' \\t '+' + '.join(newReacts)+rxnSymb+' + '.join(newProds)\n altLine += ' \\t '+' + '.join(newRlabel)+' : '+' + '.join(newPlabel)\n \n return altLine", "def _extend_edge(self, panel_name, edge_influence, value, multiplicative=True):\n if isinstance(value, list):\n raise ValueError(\"Multiple scaling factors are not supported\")\n\n verts_ids, verts_coords, target_line, _ = self._meta_edge(panel_name, edge_influence)\n\n # calc extention pivot\n if edge_influence['direction'] == 'end':\n fixed = verts_coords[0] # start is fixed\n elif edge_influence['direction'] == 'start':\n fixed = verts_coords[-1] # end is fixed\n elif edge_influence['direction'] == 'both':\n fixed = (verts_coords[0] + verts_coords[-1]) / 2\n else:\n raise RuntimeError('Unknown edge extention direction {}'.format(edge_influence['direction']))\n\n # move verts \n # * along target line that sits on fixed point (correct sign & distance along the line)\n verts_projection = np.empty(verts_coords.shape)\n for i in range(verts_coords.shape[0]):\n verts_projection[i] = (verts_coords[i] - fixed).dot(target_line) * target_line\n\n if multiplicative:\n # * to match the scaled projection (correct point of application -- initial vertex position)\n new_verts = verts_coords - (1 - value) * verts_projection\n else:\n # * to match the added projection: \n # still need projection to make sure the extention derection is corect relative to fixed point\n \n # normalize first\n for i in range(verts_coords.shape[0]):\n norm = np.linalg.norm(verts_projection[i])\n if not np.isclose(norm, 0):\n verts_projection[i] /= norm\n\n # zero projections were not normalized -- they will zero-out the effect\n new_verts = verts_coords + value * verts_projection\n\n # update in the initial structure\n panel = self.pattern['panels'][panel_name]\n for ni, idx in enumerate(verts_ids):\n panel['vertices'][idx] = new_verts[ni].tolist()", "def __model_add_line (self, pos1, pos2, line):\n i1 = 0 # index for pos1\n i2 = 0 # index for pos2\n i2_prev = 0 # index for pos2 in previous pos1\n # [pos1-self.peaksize,pos1+self.peaksize]\n # region\n i1_max = len(pos1)\n i2_max = len(pos2)\n last_p2 = -1\n flag_find_overlap = False\n \n while i1<i1_max and i2<i2_max:\n p1 = pos1[i1]\n p2 = pos2[i2]\n if p1-self.peaksize > p2: # move pos2\n i2 += 1\n elif p1+self.peaksize < p2: # move pos1\n i1 += 1 \n i2 = i2_prev # search minus peaks from previous index\n flag_find_overlap = False\n else: # overlap!\n if not flag_find_overlap:\n flag_find_overlap = True\n i2_prev = i2 # only the first index is recorded\n # project\n for i in range(p2-p1+self.peaksize-self.tsize/2,p2-p1+self.peaksize+self.tsize/2):\n if i>=0 and i<len(line):\n line[i]+=1\n i2+=1\n return line", "def line_style_2():\n\n return(\"| | |\")", "def get_extrapoled_line(self, p1, p2):\n dist = p1.distance(p2)\n EXTRAPOL_RATIO = constants.field_width * 2 / dist\n a = p1\n b = geo.Point(p1.x + EXTRAPOL_RATIO * (p2.x - p1.x), p1.y + EXTRAPOL_RATIO * (p2.y - p1.y))\n return geo.LineString([a, b])", "def append_edge(self, edge):", "def addEdgeToDoc(self):\n super().addEdgeToDoc()\n \n for blk in self.lNode:\n assert blk.type.name in [\"row\", \"sepH\"], blk.type.name\n \n if blk.type.name == \"row\":\n ndBaseline = blk.node.xpath(\".//pc:Baseline\", namespaces=self.dNS)[0]\n o = self.shaper_fun(ndBaseline)\n MultiPageXml.setPoints(ndBaseline, list(o.coords))\n \n return", "def __sub__(self, other: 'Wavefunction') -> 'Wavefunction':\n out = copy.deepcopy(self)\n out.ax_plus_y(-1.0, other)\n return out", "def add_line(line_one, line_two):\n for i in range(len(line_one)):\n line_one[i] += line_two[i]", "def add_joining_lines(input_lines, output_lines, max_distance):\n \n print \"Starting add_joining_lines\"\n \n points = create_temp_filename(\"AJL_Points.shp\")\n\n if os.path.isfile(output_lines):\n remove_shapefile(output_lines)\n\n arcpy.CreateFeatureclass_management(os.path.dirname(output_lines), os.path.basename(output_lines), \"POLYLINE\")\n\n # Convert the lines to points at each end\n arcpy.FeatureVerticesToPoints_management(input_lines,points,\"BOTH_ENDS\")\n\n # Generate the near table\n arcpy.Near_analysis(points, points, max_distance, True, True)\n\n # Get the SearchCursor to allow us to iterate over the points\n points_rows = arcpy.SearchCursor(points)\n\n # Also get an InsertCursor to allow us to add to the lines\n output_rows = arcpy.InsertCursor(output_lines)\n\n # Get the shape field name\n points_shape_name = arcpy.Describe(points).shapeFieldName\n\n # For each row (that is, each line in the input dataset)\n for row in points_rows:\n # Get the nearest point found\n new_x = row.getValue(\"NEAR_X\")\n new_y = row.getValue(\"NEAR_Y\")\n\n # If one or other of them is not valid then continue to next iteration\n if new_x == -1 or new_y == -1:\n continue\n\n # Get the FID of the line that this point was originally part of\n orig_fid = row.getValue(\"ORIG_FID\")\n\n # Get the details of the input lines file (OID field name and the Shape field name)\n desc = arcpy.Describe(input_lines)\n oid_field = desc.OIDFieldName\n line_shape_name = desc.shapeFieldName\n\n # Construct the SQL WHERE clause to select the original line corresponding to the selected\n # points\n where_clause = arcpy.AddFieldDelimiters(input_lines, oid_field) + \" = \" + str(orig_fid)\n orig_rows = arcpy.SearchCursor(input_lines, where_clause)\n\n # Get the X and Y values of the current point\n part = row.getValue(points_shape_name).getPart()\n current_x = part.X\n current_y = part.Y\n\n # There will only be one record returned from the SQL statement above\n # so just get it (don't bother with a loop)\n line_row = orig_rows.next()\n\n # Get the first and last points of the selected line\n geom = line_row.Shape\n firstp = geom.firstPoint\n lastp = geom.lastPoint\n\n # Check to see if we are trying to connect one end of a line with the other end\n # If so, continue to the next loop iteration (that is, the next point)\n # NB: Two if statements are required as it needs to be checked both ways around\n if (current_x == firstp.X and current_y == firstp.Y) and (new_x == lastp.X and new_y == lastp.Y):\n continue\n elif (current_x == lastp.X and current_y == lastp.Y) and (new_x == firstp.X and new_y == firstp.Y):\n continue\n\n # If we've got to here then we want to create the line (we'll have 'continue'd before this\n # if there was a problem with the line. So...\n \n # Add the points to a new array of points for the line\n lineArray = arcpy.Array()\n\n first = arcpy.Point(X=current_x, Y=current_y)\n last = arcpy.Point(X=new_x, Y=new_y)\n\n lineArray.add(first)\n lineArray.add(last) \n\n # Insert the new line into the dataset\n feat = output_rows.newRow()\n feat.shape = lineArray\n output_rows.insertRow(feat)\n\n del orig_rows\n del line_row\n\n\n # Clean up\n del output_rows\n del points_rows\n del row\n\n gc.collect()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combine a secondary evolution line with this one recursively
def _combine_evo_lines(self, path: list, secondary: list): for pkmn in secondary: if not EvolutionLine.is_part_of_evo_line(self.first, pkmn.pokemon_name): self._insert(path.copy(), pkmn.pokemon_name, pkmn.ndex, pkmn.evo_stage) new_path = path.copy() new_path.append(pkmn.pokemon_name) self._combine_evo_lines(new_path, pkmn.next)
[ "def combine_evo_lines(self, second_evo_first_step: EvolutionStep):\n if self.first.pokemon_name != second_evo_first_step.pokemon_name:\n raise Exception(\"UNEQUAL PARENTS\")\n else:\n self._combine_evo_lines([self.first.pokemon_name], second_evo_first_step.next)", "def wedge(self, other):\n from .format_utilities import is_atomic\n if not isinstance(other, FreeModuleAltForm):\n raise TypeError(\"the second argument for the exterior product \" +\n \"must be an alternating form\")\n if other._tensor_rank == 0:\n return other*self\n if self._tensor_rank == 0:\n return self*other\n fmodule = self._fmodule\n basis = self.common_basis(other)\n if basis is None:\n raise ValueError(\"no common basis for the exterior product\")\n rank_r = self._tensor_rank + other._tensor_rank\n cmp_s = self._components[basis]\n cmp_o = other._components[basis]\n cmp_r = CompFullyAntiSym(fmodule._ring, basis, rank_r,\n start_index=fmodule._sindex,\n output_formatter=fmodule._output_formatter)\n for ind_s, val_s in cmp_s._comp.iteritems():\n for ind_o, val_o in cmp_o._comp.iteritems():\n ind_r = ind_s + ind_o\n if len(ind_r) == len(set(ind_r)): # all indices are different\n cmp_r[[ind_r]] += val_s * val_o\n result = fmodule.alternating_form(rank_r)\n result._components[basis] = cmp_r\n if self._name is not None and other._name is not None:\n sname = self._name\n oname = other._name\n if not is_atomic(sname):\n sname = '(' + sname + ')'\n if not is_atomic(oname):\n oname = '(' + oname + ')'\n result._name = sname + '/\\\\' + oname\n if self._latex_name is not None and other._latex_name is not None:\n slname = self._latex_name\n olname = other._latex_name\n if not is_atomic(slname):\n slname = '(' + slname + ')'\n if not is_atomic(olname):\n olname = '(' + olname + ')'\n result._latex_name = slname + r'\\wedge ' + olname\n return result", "def merge(line):\n # make sure the grid is more than one element\n # else return the original gird\n if len(line) < 2:\n return line\n slide_line = slide(line)\n for i_dummy in range(len(slide_line) - 1):\n try:\n if slide_line[i_dummy] == 0:\n continue\n # if adjacent tiles are equal, combine\n # them and zero out the further of the two\n if slide_line[i_dummy] == slide_line[i_dummy+1]:\n slide_line[i_dummy] *= 2\n slide_line[i_dummy+1] = 0\n continue\n except IndexError:\n continue\n # trim zeroes from the list and return the value \n return slide(slide_line)", "def createAltLine(self,line):\n # Parse line\n name, reactLine, prodLine, rLabelLine, pLabelLine, reversible, sym = self.parseLine(line)\n reacts = [x.strip() for x in reactLine.split('+')]\n prods = [x.strip() for x in prodLine.split('+')]\n rLabel = map(lambda x: utils.symsplit(x.strip()) if ';' in x else x.strip(), rLabelLine.split('+'))\n pLabel = map(lambda x: utils.symsplit(x.strip()) if ';' in x else x.strip(), pLabelLine.split('+')) \n \n # If there is a symmetric molecule\n if sym:\n ## Duplicate metabolites\n newReacts = []\n for react in reacts:\n newReacts.append(react)\n newReacts.append(react)\n newProds = []\n for prod in prods:\n newProds.append(prod)\n newProds.append(prod)\n newReacts = AtomTransition.convert2PS(newReacts)\n newProds = AtomTransition.convert2PS(newProds)\n \n ## Take care of labeling\n # Find out all carbon letters (= inputKeys)\n inputKeys=[]\n for lab in rLabel:\n if type(lab) == list:\n for comp in lab:\n inputKeys.extend([x for x in comp])\n else:\n inputKeys.extend([x for x in lab])\n inputKeys = sorted(set(inputKeys))\n \n # Find equivalent for each of the old carbon letters \n all_letters = string.ascii_letters # All possible letters\n avoid = ''.join(inputKeys) # These are taken letters, to be avoided in choosing new equivalents\n keyDict = {}\n for key in inputKeys:\n keyDict[key] = [x for x in all_letters if x not in avoid][0]\n avoid = avoid + keyDict[key]\n \n # Add new labeling for new metabolites \n newRlabel = []\n for lab in rLabel:\n if type(lab) is list: # Only works for two alternative labelings !!!\n newRlabel.append(lab[0])\n newRlabel.append(''.join([keyDict[x] for x in lab[1]]))\n else:\n # Creating new labeling for new metabolites\n newRlabel.append(lab)\n newLab = ''.join([keyDict[x] for x in lab])\n newRlabel.append(newLab)\n newPlabel = []\n for lab in pLabel:\n if type(lab) is list:\n newPlabel.append(lab[0])\n newPlabel.append(''.join([keyDict[x] for x in lab[1]]))\n else:\n # Creating new labeling for new metabolites\n newPlabel.append(lab)\n newLab = ''.join([keyDict[x] for x in lab]) \n newPlabel.append(newLab)\n else:\n # Convert reactants and products to pseudo metabolits (e.g. glu_L_c --> glu_L_c__ps1) if needed\n newReacts = AtomTransition.convert2PS(reacts)\n newProds = AtomTransition.convert2PS(prods)\n \n newRlabel = rLabel\n newPlabel = pLabel\n \n ## Join all into new line\n rxnSymb = ' <==> ' if reversible else ' --> '\n altLine = name \n altLine += ' \\t '+' + '.join(newReacts)+rxnSymb+' + '.join(newProds)\n altLine += ' \\t '+' + '.join(newRlabel)+' : '+' + '.join(newPlabel)\n \n return altLine", "def getEdgeMergeThr(self) -> retval:\n ...", "def evolve(self, stage):", "def _extend_edge(self, panel_name, edge_influence, value, multiplicative=True):\n if isinstance(value, list):\n raise ValueError(\"Multiple scaling factors are not supported\")\n\n verts_ids, verts_coords, target_line, _ = self._meta_edge(panel_name, edge_influence)\n\n # calc extention pivot\n if edge_influence['direction'] == 'end':\n fixed = verts_coords[0] # start is fixed\n elif edge_influence['direction'] == 'start':\n fixed = verts_coords[-1] # end is fixed\n elif edge_influence['direction'] == 'both':\n fixed = (verts_coords[0] + verts_coords[-1]) / 2\n else:\n raise RuntimeError('Unknown edge extention direction {}'.format(edge_influence['direction']))\n\n # move verts \n # * along target line that sits on fixed point (correct sign & distance along the line)\n verts_projection = np.empty(verts_coords.shape)\n for i in range(verts_coords.shape[0]):\n verts_projection[i] = (verts_coords[i] - fixed).dot(target_line) * target_line\n\n if multiplicative:\n # * to match the scaled projection (correct point of application -- initial vertex position)\n new_verts = verts_coords - (1 - value) * verts_projection\n else:\n # * to match the added projection: \n # still need projection to make sure the extention derection is corect relative to fixed point\n \n # normalize first\n for i in range(verts_coords.shape[0]):\n norm = np.linalg.norm(verts_projection[i])\n if not np.isclose(norm, 0):\n verts_projection[i] /= norm\n\n # zero projections were not normalized -- they will zero-out the effect\n new_verts = verts_coords + value * verts_projection\n\n # update in the initial structure\n panel = self.pattern['panels'][panel_name]\n for ni, idx in enumerate(verts_ids):\n panel['vertices'][idx] = new_verts[ni].tolist()", "def addEdgeToDoc(self):\n super().addEdgeToDoc()\n \n for blk in self.lNode:\n assert blk.type.name in [\"row\", \"sepH\"], blk.type.name\n \n if blk.type.name == \"row\":\n ndBaseline = blk.node.xpath(\".//pc:Baseline\", namespaces=self.dNS)[0]\n o = self.shaper_fun(ndBaseline)\n MultiPageXml.setPoints(ndBaseline, list(o.coords))\n \n return", "def append_edge(self, edge):", "def return_streamline(self):\n decision1 = 0\n decision2 = 0\n decision = 0\n streamline = self.seed\n track_point = self.seed\n node_onetrack = []\n decision1 = 1\n decision2 = 1\n if len(self.graph.shape) == 1:\n index_c = 0\n node_onetrack = self.seed\n if len(self.graph.shape) != 1:\n norm2 = norm(self.graph-self.seed,axis=1,ord=2)\n if norm2.min() < self.resolution:\n index_c = np.argmin(norm2)\n node_onetrack = self.graph[index_c]\n else:\n index_c = self.graph.shape[0]\n self.graph = np.vstack((self.graph,self.seed))\n self.value = np.append(self.value,0.0)\n #node_onetrack = seed\n \"\"\"Alter\n \"\"\"\n if len(self.exp_graph_alter.shape) == 1:\n norm_alter = norm(self.exp_graph_alter-self.seed)\n if norm_alter.min() < self.resolution:\n index_alter = np.argmin(norm_alter)\n else:\n index_alter = self.exp_graph_alter.shape[0]\n self.exp_graph_alter = np.vstack((self.exp_graph_alter,self.seed))\n self.exp_value_alter = np.append(self.exp_value_alter,0.0)\n self.exp_direc_alter = np.vstack((self.exp_direc_alter,np.array([0,0,0])))\n if len(self.exp_graph_alter.shape) != 1:\n norm_alter = norm(self.exp_graph_alter-self.seed,axis=1,ord=2)\n if norm_alter.min() < self.resolution:\n index_alter = np.argmin(norm_alter)\n node_onetrack_alter = self.exp_graph_alter[index_alter]\n else:\n index_alter = self.exp_graph_alter.shape[0]\n self.exp_graph_alter = np.vstack((self.exp_graph_alter,self.seed))\n self.exp_value_alter = np.append(self.exp_value_alter,0.0)\n self.exp_direc_alter = np.vstack((self.exp_direc_alter,np.array([0,0,0])))\n\n seed_onetrack = Seed(self.seed, index_c)\n seed_onetrack.track1 = np.append(seed_onetrack.track1, index_c)\n \"\"\"Alter\n \"\"\"\n seed_onetrack_alter = Seed(self.seed, index_alter)\n seed_onetrack_alter.track1 = np.append(seed_onetrack_alter.track1, index_alter)\n \"\"\"\n if len(self.graph.shape) == 1:\n seed_onetrack.nodes1 = self.graph\n else:\n seed_onetrack.nodes1 = self.graph[index_c]\n \"\"\"\n\n def itp(track_point):\n t0 = int(np.round(track_point[0]))\n t1 = int(np.round(track_point[1]))\n t2 = int(np.round(track_point[2]))\n return t0, t1, t2\n\n t0_init,t1_init,t2_init = itp(track_point)\n \"\"\"\n if self.direc == True:\n dir_old = -self.direction_getter[t0, t1, t2, 0,:]\n if self.direc == False:\n dir_old = self.direction_getter[t0, t1, t2, 0,:]\n \"\"\"\n \"\"\"First initial start direction\n \"\"\"\n for kk in range(2):\n if kk%2 == 0:\n dir_old = self.direction_getter[t0_init, t1_init, t2_init,0,:]#,int(kk/2) ,:]\n else:\n dir_old = -self.direction_getter[t0_init, t1_init, t2_init,0,:]# int(np.floor(kk/2)),:]\n t0 = t0_init\n t1 = t1_init\n t2 = t2_init\n while(self.tissue_classifier[t0,t1,t2] != 0 ):\n decision1 = 0\n decision2 = 0\n value_single = -500\n t0, t1, t2 = itp(track_point)\n dir_sub = self.direction_getter[t0, t1, t2, 0,:]\n #dir_final = self.direction_getter[t0,t1,t2,0,:]\n if dir_sub.all() == False:\n t0, t1, t2 = self.check_direction(t0,t1,t2)\n if t0 == -1 and t1 == -1 and t2 == -1:\n break\n \"\"\"First direction\n \"\"\"\n for i in range(5):\n dir_sub = self.direction_getter[t0, t1, t2, i,:]\n if dir_sub.all() == True:\n if np.dot(dir_old,dir_sub)<self.angles:\n #dir_sub = -dir_sub\n continue\n value_single_test = self.find_track_point(dir_sub, track_point)\n #if value_single_test < self.reward_negative/25:\n # continue\n decision1 = 1\n if value_single_test > value_single:\n index_inside = i\n value_single = value_single_test\n dir_final = dir_sub\n \"\"\"Alter\n\n value_single_test_alter = self.find_track_point_general(dir_sub, track_point)\n if value_single_test_alter > 0:\n if value_single_test < 0:\n continue\n index_inside = i\n value_single = value_single_test\n dir_final = dir_sub\n \"\"\"\n \"\"\"\n second direction\n \"\"\"\n for i in range(5):\n dir_sub = -self.direction_getter[t0, t1, t2, i,:]\n if dir_sub.all() == True:\n if np.dot(dir_old,dir_sub)<self.angles:\n #dir_sub = -dir_sub\n continue\n value_single_test = self.find_track_point(dir_sub, track_point)\n #if value_single_test < self.reward_negative/25:\n # continue\n decision2 = 1\n if value_single_test > value_single:\n index_inside = i\n value_single = value_single_test\n dir_final = dir_sub\n \"\"\"Alter\n\n value_single_test_alter = self.find_track_point_general(dir_sub, track_point)\n if value_single_test_alter > value_single:\n if value_single_test < 0:\n continue\n index_inside = i\n value_single = value_single_test_alter\n dir_final = dir_sub\n \"\"\"\n\n dir_learned = self.find_track_point_general(track_point)\n if np.any(dir_learned):\n if np.dot(dir_final,dir_learned) > self.angles:\n #print(\"im in corporating dir\")\n dir_final = (0.3*dir_learned+0.3*dir_old+0.7*dir_final)/norm(0.3*dir_learned+0.3*dir_old+0.7*dir_final)\n\n\n if decision1 == 0 and decision2 == 0:\n break\n\n #dir_old = dir_final\n #track_point = track_point + self.step_size * dir_final\n \"\"\"Adding computing direction\n \"\"\"\n if len(self.exp_graph_alter.shape) == 1:\n norm_final = norm(self.exp_graph_alter-track_point)\n else:\n norm_final = norm(self.exp_graph_alter-track_point,axis=1,ord=2)\n if norm_final.min() < self.resolution_exp:\n \"\"\"\n if np.sum(norm_final < self.resolution) < self.exp_range:\n index_tt = np.argmin(norm_final)\n node_near = self.exp_graph_alter[index_tt]\n dir_tt = self.exp_direc_alter[index_tt]\n if not norm(node_near-track_point) == 0:\n dir_t = (node_near-track_point)/norm(node_near-track_point)\n if np.dot(dir_old,dir_t)>self.angles:\n #print(\"im here inference\")\n if np.dot(dir_old,dir_tt)<0:\n dir_final = (0.2*dir_old+0.2*dir_final+dir_t-0.1*dir_tt)/norm(0.2*dir_old+0.2*dir_final+dir_t-0.1*dir_tt)\n else:\n dir_final = (0.2*dir_old+0.2*dir_final+dir_t+0.1*dir_tt)/norm(0.2*dir_old+0.2*dir_final+dir_t+0.1*dir_tt)\n \"\"\"\n if np.sum(norm_final < self.resolution) > self.exp_range or np.sum(norm_final < self.resolution) == self.exp_range:\n #print(\"im here\")\n index_tt = np.argmin(norm_final)\n #index_tt = np.where(norm_final<self.resolution)\n node_near = self.exp_graph_alter[index_tt]\n dir_t = self.exp_direc_alter[index_tt]\n #dir_t = np.sum(self.exp_direc_alter[index_tt],axis=0)/norm(self.exp_direc_alter[index_tt],axis=0)\n if np.any(dir_t) and np.dot(dir_old,dir_t)>self.angles:\n print(\"im here\")\n dir_final = (0.3*dir_old+dir_final+0.5*dir_t)/norm(0.3*dir_old+dir_final+0.5*dir_t)\n if np.any(dir_t) and np.dot(dir_old,dir_t)<0:\n print(\"im here\")\n dir_final = (0.3*dir_old+dir_final-0.5*dir_t)/norm(0.3*dir_old+dir_final-0.5*dir_t)\n \"\"\"\n if not np.any(dir_t):\n index_tt = np.argmin(norm_final)\n node_near = self.exp_graph_alter[index_tt]\n dir_tt = self.exp_direc_alter[index_tt]\n if not norm(node_near-track_point) == 0:\n dir_t = (node_near-track_point)/norm(node_near-track_point)\n if np.dot(dir_old,dir_t)>self.angles:\n #print(\"im here inference\")\n if np.dot(dir_old,dir_tt)<0:\n dir_final = (0.2*dir_old+0.2*dir_final+dir_t-0.1*dir_tt)/norm(0.2*dir_old+0.2*dir_final+dir_t-0.1*dir_tt)\n else:\n dir_final = (0.2*dir_old+0.2*dir_final+dir_t+0.1*dir_tt)/norm(0.2*dir_old+0.2*dir_final+dir_t+0.1*dir_tt)\n\n \"\"\"\n dir_old = dir_final\n track_point = track_point + self.step_size * dir_final\n\n if len(self.graph.shape) == 1:\n norm2 = norm(self.graph-track_point)\n else:\n norm2 = norm(self.graph-track_point,axis=1,ord=2)\n \"\"\"Alter\n \"\"\"\n if len(self.exp_graph_alter.shape) == 1:\n norm_alter = norm(self.exp_graph_alter-track_point)\n else:\n norm_alter = norm(self.exp_graph_alter-track_point,axis=1,ord=2)\n\n if norm2.min() < self.resolution:\n index_t = np.argmin(norm2)\n if not np.any(seed_onetrack.track1 == index_t):\n seed_onetrack.track1 = np.append(seed_onetrack.track1,index_t)\n if len(self.graph.shape) == 1:\n seed_onetrack.nodes1 = np.vstack((seed_onetrack.nodes1, self.graph))\n else:\n seed_onetrack.nodes1 = np.vstack((seed_onetrack.nodes1, self.graph[int(index_t)]))\n else:\n if len(self.graph.shape) == 1:\n index_t = 1\n else:\n index_t = self.graph.shape[0]\n self.graph = np.vstack((self.graph,track_point))\n self.value = np.append(self.value,0.0)\n seed_onetrack.track1 = np.append(seed_onetrack.track1, index_t)\n if len(self.graph.shape) == 1:\n seed_onetrack.nodes1 = np.vstack((seed_onetrack.nodes1, self.graph))\n else:\n seed_onetrack.nodes1 = np.vstack((seed_onetrack.nodes1, self.graph[int(index_t)]))\n\n \"\"\"Alter\n \"\"\"\n if norm_alter.min() < self.resolution:\n index_alter = np.argmin(norm_alter)\n if not np.any(seed_onetrack_alter.track1 == index_alter):\n seed_onetrack_alter.track1 = np.append(seed_onetrack_alter.track1,index_alter)\n else:\n if len(self.exp_graph_alter.shape) == 1:\n index_alter = 1\n else:\n index_alter = self.exp_graph_alter.shape[0]\n self.exp_direc_alter = np.vstack((self.exp_direc_alter,dir_final))\n self.exp_graph_alter = np.vstack((self.exp_graph_alter,track_point))\n self.exp_value_alter = np.append(self.exp_value_alter,0.0)\n seed_onetrack_alter.track1 = np.append(seed_onetrack_alter.track1, index_alter)\n streamline = np.vstack((streamline,track_point))\n t0, t1, t2 = itp(track_point)\n\n if t0 > self.shape0 or t0 == self.shape0:\n t0 = self.shape0 - 1\n if t1 > self.shape1 or t1 == self.shape1:\n t1 = self.shape1 - 1\n if t2 > self.shape2 or t2 == self.shape2:\n t2 = self.shape2 - 1\n\n #dir_sub = self.direction_getter[t0, t1, t2, 0,:]\n #if dir_sub.all() == False:\n # t0, t1, t2 = self.check_direction(t0,t1,t2)\n \"\"\"\n if len(seed_onetrack.nodes1.shape) == 1:\n norm3_track1 = norm(seed_onetrack.nodes1 - self.goal_point)\n else:\n norm3_track1 = norm(seed_onetrack.nodes1 - self.goal_point,axis=1,ord=2)\n \"\"\"\n if len(streamline.shape) == 1:\n norm3_track1 = norm(streamline - self.goal_points)\n else:\n for i in range(streamline.shape[0]):\n norm3_track1 = norm(streamline[i] - self.goal_points,axis=1,ord=2)\n if norm3_track1.min()<self.goal_radius:\n self.positive=True\n #self.streamlines.append(streamline)\n decision = 1\n break\n if decision == 0:\n self.positive=False\n #if seed_onetrack.track1.shape[0] > self.maxlen:\n if streamline.shape[0] > self.maxlen:\n self.positive = False\n if self.positive == True:\n self.streamlines.append(streamline)\n self.td_learning_general(seed_onetrack_alter.track1)\n self.exp_graph_general = self.exp_graph_alter\n self.exp_value_general = self.exp_value_alter\n self.exp_direc_general = self.exp_direc_alter\n self.indexs = np.append(self.indexs,self.index)\n else:\n self.exp_graph_alter = self.exp_graph_general\n self.exp_value_alter = self.exp_value_general\n self.exp_direc_alter = self.exp_direc_general\n self.td_learning(seed_onetrack.track1)\n return streamline, seed_onetrack", "def blend_co(parent1,parent2,max_points=25,alpha=0.01): \n \n \n number_co_points = randint(1,max_points)\n \n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n \n for i in range(number_co_points):\n \n # randomly get a weight index to perform the crossover\n idx1 = randint(1,len(parent1)) - 1\n idx2 = randint(1,len(parent1[idx1])) - 1\n idx3 = randint(1,len(parent1[idx1][idx2])) - 1\n \n #print('indexes:', idx1, idx2, idx3) \n \n gamma = (1. + 2. * alpha) * random.random() - alpha # generating a random gamma\n \n x1 = offspring1[idx1][idx2][idx3] # saving the value of point 1\n x2 = offspring2[idx1][idx2][idx3] # saving the value of point 2\n \n #print('x1:',x1)\n #print('x2:',x2)\n \n point1 = (1. - gamma) * x1 + gamma * x2 # new value for point 1\n point2 = gamma * x1 + (1. - gamma) * x2 # new value for point 2\n \n #print('point1:', point1)\n #print('point2:', point2)\n \n offspring1[idx1][idx2][idx3] = point1 # updating\n offspring2[idx1][idx2][idx3] = point2 # updating\n \n #print('\\n')\n \n return offspring1, offspring2", "def initialSecond(self):\n\n # ************************************************************\n # ***** CHANNEL INITIAL SPLIT UP IN SECOND CHANNEL************\n # ************************************************************\n if option['SplitRouting']:\n\n ChanMan2 = (self.var.ChanMan / self.var.CalChanMan) * loadmap('CalChanMan2')\n AlpTermChan2 = (ChanMan2 / (np.sqrt(self.var.ChanGrad))) ** self.var.Beta\n ChannelAlpha2C = AlpTermChan2 * (self.var.ChanWettedPerimeterAlpha ** self.var.AlpPow)\n self.var.InvChannelAlpha2 = 1/ChannelAlpha2C\n self.var.ChannelAlpha2 = decompress(ChannelAlpha2C)\n # calculating second Alpha for second (virtual) channel\n\n if not(option['InitLisflood']):\n\n self.var.QLimit = loadmap('AvgDis') * loadmap('QSplitMult')\n self.var.M3Limit = self.var.ChannelAlphaC * self.var.ChanLengthC * (self.var.QLimit ** self.var.Beta)\n # lower discharge limit for second line of routing\n # set to mutiple of average discharge (map from prerun)\n # QSplitMult =2 is around 90 to 95% of Q\n self.var.QLimit = self.var.QLimit / self.var.NoRoutSteps\n\n self.var.Chan2M3Start = ChannelAlpha2C * self.var.ChanLengthC * (self.var.QLimit ** self.var.Beta)\n # virtual amount of water in the channel through second line\n\n self.var.Chan2QStart = self.var.QLimit - compressArray(\n upstream(self.var.LddKinematic, decompress(self.var.QLimit)))\n\n # because kinematic routing with a low amount of discharge leads to long travel time:\n # Starting Q for second line is set to a higher value\n\n self.var.Chan2M3Kin = self.var.CrossSection2Area * self.var.ChanLengthC + self.var.Chan2M3Start\n\n self.var.ChanM3Kin = self.var.ChanM3 - self.var.Chan2M3Kin + self.var.Chan2M3Start", "def polyMergeEdge(secondEdge=int, mergeMode=int, nodeState=int, constructionHistory=bool, firstEdge=int, caching=bool, name=\"string\"):\n pass", "def combine_lines(olds, newline):\n final = newline\n for old in olds:\n final = combine_two(final, old)\n return final", "def infer_2d_links(self): # DwaqAggregator version\n\n if self.exch_to_2d_link is None:\n self.infer_2d_elements() \n poi0=self.pointers-1\n\n # map 0-based exchange index to 0-based link index, limited\n # to horizontal exchangse\n exch_to_2d_link=np.zeros(self.n_exch_x+self.n_exch_y,[('link','i4'),\n ('sgn','i4')])\n exch_to_2d_link['link']=-1\n\n # track some info about links\n links=[] # elt_from,elt_to\n mapped=dict() # (src_2d, dest_2d) => link idx\n\n # two boundary exchanges, can't be distinguished based on the internal segment.\n # but we can see which unaggregated exchanges/links map to them. \n # at this point, is there ever a time that we don't want to keep these separate?\n # I think we always want to keep them separate, the crux is how to keep track of\n # who is who between layers. And that is where we can use the mapping from the\n # unaggregated hydro, where the external id, instead of setting it to -1 and\n # distinguishing only on aggregated internal segment, we can now refine that\n # and label it based on ... maybe the smallest internal segment for boundary\n # exchanges which map to this aggregated exchange?\n\n self.hydro_in.infer_2d_links()\n # some of the code below can't deal with multiple subdomains\n assert self.exch_local.shape[0]==1\n\n # and special to aggregated code, also build up a mapping of unaggregated\n # links to aggregated links. And since we're forcing this code to deal with\n # only a single, global unaggregated domain, this mapping is just global to agg.\n # Maybe this should move out to a more general purpose location??\n link_global_to_agg=np.zeros(self.hydro_in.n_2d_links,'i4')-1\n\n # build hash table to accelerate the lookup below\n agg_exch_to_locals=defaultdict(list)\n # iterate over the local exchanges, here just proc 0\n for unagg_exch,agg in enumerate(self.exch_local['agg'][0]):\n agg_exch_to_locals[ agg ].append(unagg_exch)\n \n for exch_i,(a,b,_,_) in enumerate(poi0[:self.n_exch_x+self.n_exch_y]):\n # probably have to speed this up with some hashing\n # my_unagg_exchs=np.nonzero(self.exch_local['agg'][0,:]==exch_i)[0]\n my_unagg_exchs=np.array(agg_exch_to_locals[exch_i])\n \n # this is [ (link, sgn), ... ]\n my_unagg_links=self.hydro_in.exch_to_2d_link[my_unagg_exchs]\n if a>=0:\n a2d=self.seg_to_2d_element[a]\n else:\n # assuming this only works for global domains\n # we *could* have multiple unaggregated boundary exchanges mapping\n # onto this single aggregated boundary exchange. or not.\n # what do we know about how the collection of unagg links will be\n # consistent across layers? ... hmmmph\n # unsure.. but will use the smallest unaggregated link as a label\n # to make this aggregated link distinction\n a2d=-1 - my_unagg_links['link'].min()\n\n assert b>=0 # too lazy, and this shouldn't happen. \n b2d=self.seg_to_2d_element[b]\n\n k='not yet set'\n if (b2d,a2d) in mapped:\n k=(b2d,a2d) \n exch_to_2d_link['link'][exch_i] = mapped[k]\n exch_to_2d_link['sgn'][exch_i]=-1\n else:\n k=(a2d,b2d)\n if k not in mapped:\n mapped[k]=len(links)\n # does anyone use the values in links[:,0] ??\n links.append( [a2d,b2d] )\n\n exch_to_2d_link['link'][exch_i] = mapped[k]\n exch_to_2d_link['sgn'][exch_i]=1\n # record this mapping for later use. There is some duplicated\n # effort here, since in most cases we'll get the same answer for each\n # of the exchanges in this one link. But it's possible that some\n # exchanges exist at only certain elevations, or something? for now\n # duplicate effort in exchange for being sure that all of the links\n # get set.\n # actually, getting some cases where this gets overwritten with\n # different values. Shouldn't happen!\n prev_values=link_global_to_agg[my_unagg_links['link']]\n # expect that these are either already set, or uninitialized. but if\n # set to a different link, then we have problems.\n prev_is_okay= (prev_values==mapped[k]) | (prev_values==-1)\n assert np.all(prev_is_okay)\n link_global_to_agg[my_unagg_links['link']]=mapped[k]\n\n self.link_global_to_agg=link_global_to_agg\n links=np.array(links)\n n_2d_links=len(links)\n\n ##\n\n # Bit of a sanity warning on multiple boundary exchanges involving the\n # same segment - this would indicate that there should be multiple 2D\n # links into that segment, but this generic code doesn't have a robust\n # way to deal with that.\n if 1:\n # get 172 of these now. sounds roughly correct.\n # ~50 in the ocean, 113 or 117 sources, and a handful of\n # others (false_*) which take up multiple links for\n # a single source.\n\n # indexes of which links are boundary\n bc_links=np.nonzero( links[:,0] < 0 )[0]\n\n for bc_link in bc_links:\n # index of which exchanges map to this link\n exchs=np.nonzero( exch_to_2d_link['link']==bc_link )[0]\n # link id, sgn for each of those exchanges\n ab=exch_to_2d_link[exchs]\n # find the internal segments for each of those exchanges\n segs=np.zeros(len(ab),'i4')\n sel0=exch_to_2d_link['sgn'][exchs]>0 # regular order\n segs[sel0]=poi0[exchs,1]\n if np.any(~sel0):\n # including checking for weirdness\n self.log.warning(\"Some exchanges had to be flipped when flattening to 2D links\")\n segs[~sel0]=poi0[exchs,0]\n # And finally, are there any duplicates into the same segment? i.e. a segment\n # which has multiple boundary exchanges which we have failed to distinguish (since\n # in this generic implementation we have little info for distinguishing them).\n # note that in the case of suntans output, this is possible, but if it has been\n # mapped from multiple domains to a global domain, those exchanges have probably\n # already been combined.\n if len(np.unique(segs)) < len(segs):\n self.log.warning(\"In flattening exchanges to links, link %d has ambiguous multiple exchanges for the same segment\"%bc_link)\n\n ##\n self.exch_to_2d_link=exch_to_2d_link\n self.links=links\n self.n_2d_links=n_2d_links", "def __call__(self, params, return_traj=False):\n trajectory = [params]\n \n ### YOUR CODE HERE\n\n # TODO: write code here, should call gd_step\n\n cost = InnerObjective(self.x, self.y)\n\n new_params = params\n\n for i in range(self.num_steps):\n new_params = gd_step(cost, new_params, self.inner_lrate)\n trajectory += [new_params]\n\n final_cost = cost(new_params)\n\n ### END CODE\n \n if return_traj:\n return final_cost, trajectory\n else:\n return final_cost", "def add_edge_to_dependences(curr):\n global GRAPH, priorities, NODE_OPS, def_line, ROOT, REV_GRAPH, \\\n OP_CHILDREN_NUM, OP_PARENTS_NUM\n curr_data = curr.ir_data\n line = curr.line_num\n used = get_used(curr.opcode)\n added = set()\n # max number of defined variables is 1 for all our purposes\n for j in used:\n\n other_line = def_line[curr_data[j + 1]]\n if other_line in added:\n continue\n other_ir_obj = NODE_OPS[other_line]\n # if verbose:\n # print(\"curr line num: %d\" % curr.line_num)\n # print(\"other line num: %d\" % other_ir_obj.line_num)\n # if verbose and curr.opcode == 0:\n # print(\n # \"edge added: (%d,%d)\" % (curr.line_num, other_ir_obj.line_num))\n REV_GRAPH[other_line][curr.line_num] = True\n GRAPH[curr.line_num][other_line] = True\n # Add a directed edge between these two\n\n OP_PARENTS_NUM[other_ir_obj.line_num] += 1\n # increment its parents count\n\n OP_CHILDREN_NUM[curr.line_num] += 1\n # increment its children count\n added.add(other_line)", "def append_intra_layers(self):\n for i in self.layers + self.links:\n\n if hasattr(i, 'layers'):\n for layer in i.layers:\n if layer not in self.layers:\n self.layers.append(layer)\n\n if hasattr(i, 'links'):\n for link in i.links:\n if link not in self.links:\n self.links.append(link)", "def line_walk_edges(self,n1=None,n2=None,v1=None,v2=None,\n include_tangent=False,\n include_coincident=True):\n # this is a bit dicey in terms of numerical robustness - \n # face_in_direction is liable to give bad results when multiple faces are\n # indistinguishable (like a colinear set of points with many degenerate faces\n # basically on top of each other).\n\n # How can this be made more robust?\n # When the query line exactly goes through one or more vertex stuff starts\n # going nuts.\n # So is it possible to handle this more intelligently?\n # there are 3 possibilities for intersecting edges:\n # (1) intersect only at an end point, i.e. endpoint lies on query line\n # (2) intersect in interior of edge - one end point on one side, other endpoint\n # on the other side of the query line\n # (3) edge is coincident with query line\n\n\n # so for a first cut - make sure that we aren't just directly connected:\n if (n2 is not None) and (n1 is not None) and (n2 in self.delaunay_neighbors(n1)):\n return []\n\n if v1 is None:\n v1 = self.vh[n1]\n if v2 is None:\n v2 = self.vh[n2]\n\n # Get the points from the vertices, not self.points, because in some cases\n # (adjust_move_node) we may be probing\n p1 = np.array([ v1.point().x(), v1.point().y()] )\n p2 = np.array([ v2.point().x(), v2.point().y()] )\n\n # print \"Walking the line: \",p1,p2\n\n vec = p2 - p1\n unit_vec = vec / norm(vec)\n\n pnt = p1 \n\n # NB: this can be None - though not sure whether the context can\n # ensure that it never would be.\n f1 = self.face_in_direction(v1,vec)\n f2 = self.face_in_direction(v2,-vec)\n\n # do the search:\n f_trav = f1\n edges = []\n while 1:\n # print \"line_walk_edges: traversing face:\"\n # print [f_trav.vertex(i).point() for i in [0,1,2]]\n\n # Stop condition: we're in a face containing the final vertex\n # check the vertices directly, rather than the face\n still_close = 0\n for i in range(3):\n if f_trav.vertex(i) == v2:\n return edges\n\n if not still_close:\n # Check to see if this vertex is beyond the vertex of interest\n vertex_i_pnt = np.array( [f_trav.vertex(i).point().x(),f_trav.vertex(i).point().y()] )\n if norm(vec) > np.dot( vertex_i_pnt - p1, unit_vec):\n still_close = 1\n\n if not still_close:\n # We didn't find any vertices of this face that were as close to where we started\n # as the destination was, so we must have passed it.\n print(\"BAILING: n1=%s n2=%s v1=%s v2=%s\"%(n1,n2,v1,v2))\n raise Exception(\"Yikes - line_walk_edges exposed its numerical issues. We traversed too far.\")\n return edges\n\n edge,new_face = self.next_face(f_trav,pnt,vec)\n\n edges.append(edge)\n\n f_trav = new_face\n return edges" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Yields tupil of (name, ndex)
def yield_all_name_ndex(self): yield from self._yield_all_name_ndex(self.first)
[ "def _yield_all_name_ndex(self, current_node):\n\n current_node = current_node\n\n yield current_node.pokemon_name, current_node.ndex\n\n for next_evolution in current_node.next:\n yield from self._yield_all_name_ndex(next_evolution)", "def test_namedtuple_tuple(self):\n with mn.model() as m:\n Baz = mn.variable('Baz', \n self.drg((7, 9), (18, 4), (6, 11)))\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n self.drg((0, 0), (0, 0), (0, 0)))\n m.step()\n self.assertEqual(\n Corge[''], \n self.drg((8, 10), (19, 5), (7, 12)))\n m.step(2)\n self.assertEqual(\n Corge[''], \n self.drg((24, 30), (57, 15), (21, 36)))", "def generate_names(args):\n index = 0\n for _, decl in args:\n index += 1\n yield decl.name or 'arg{0}'.format(index)", "def iter_named_tuples( df):\n Row = namedtuple('Item', df.columns)\n for row in df.itertuples():\n yield Row(*row[1:])", "def test_tuple_namedtuple(self):\n with mn.model() as m:\n Baz = mn.variable('Baz', \n (self.site(7, 9), self.site(18, 4), self.site(6, 11)))\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n (self.site(0, 0), self.site(0, 0), self.site(0, 0)))\n m.step()\n self.assertEqual(\n Corge[''], \n (self.site(8, 10), self.site(19, 5), self.site(7, 12)))\n m.step(2)\n self.assertEqual(\n Corge[''], \n (self.site(24, 30), self.site(57, 15), self.site(21, 36)))", "def iter_named_tuples(df):\n Row = namedtuple('Item', df.columns)\n for row in df.itertuples():\n yield Row(*row[1:])", "def getNodeWildIter(self, name, *usage):\n for nid in self._getNodeWildIter(name,*usage):\n yield TreeNode(nid,self)", "def __iter__(self):\n for doc in self.documents:\n yield namedtuple('TaggedDocument',doc.keys())(**doc)", "def yield_vnfd_vnfr_pairs(self, proxy, nsr=None):\n def get_vnfd(vnfd_id):\n xpath = (\n \"/rw-project:project[rw-project:name='default']/\" +\n \"vnfd-catalog/vnfd[id={}]\".format(quoted_key(vnfd_id)))\n return proxy(RwProjectVnfdYang).get(xpath)\n\n vnfr = (\n \"/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr\")\n vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)\n for vnfr in vnfrs.vnfr:\n\n if nsr:\n const_vnfr_ids = [const_vnfr.vnfr_id for const_vnfr in nsr.constituent_vnfr_ref]\n if vnfr.id not in const_vnfr_ids:\n continue\n\n vnfd = get_vnfd(vnfr.vnfd.id)\n yield vnfd, vnfr", "def variableNameGenerator() -> Iterator[str]:\n\n def f():\n temp_idx = -1\n while True:\n temp_idx += 1\n yield f\"t_{temp_idx}\"\n\n return iter(f())", "def iter_ngrams(tokens, n = 2):\n history = []\n for token in tokens:\n history.append(token)\n if len(history) == n:\n ngram = tuple(history)\n history.pop(0)\n yield ngram", "def test_iterNonTips(self):\n tree = self.TreeRoot\n self.assertEqual([i.Name for i in tree.iterNontips()], list('bcf'))", "def get_fields(self):\n for field_index in xrange(self.num_fields):\n yield dex_field(self, field_index)", "def iteritems(self):\n for name, job in self._regex_extractors(): yield name, job", "def _named_items(self, xpath, select=None):\n if select and not isinstance(select, (list, tuple)):\n select = [select]\n for item in self.tree.findall(xpath):\n try:\n name = item.attrib[\"name\"]\n if select:\n if name in select:\n yield name, item\n else:\n yield name, item\n except KeyError:\n pass", "def iteritems(self):\n for seq in self:\n yield seq.identifier, seq", "def __iter__(self) -> Generator[tuple[str, str, Types], None, None]:\n for cluster, namespaces in self._inv.items():\n for namespace, types in namespaces.items():\n yield cluster, namespace, types", "def antenna_iter(ms_name, columns, **kwargs):\n logger.debug(\"Creating antenna iterable\")\n\n taql_where = kwargs.get(\"taql_where\", \"\")\n table_schema = kwargs.get(\"table_schema\", None)\n chunks = kwargs.get(\"chunks\", 5000)\n\n # Shall be prepended to the later antenna selection\n if taql_where:\n taql_where += \" && \"\n\n outp = []\n # ant_names = vu.get_antennas(ms_name).values.tolist()\n n_ants = vu.get_antennas(ms_name).size\n n_spws = vu.get_frequencies(ms_name).row.size\n\n for d in range(n_spws):\n for a in range(n_ants):\n\n logger.debug(f\"Spw: {d}, antenna: {a}\")\n\n sel_str = taql_where + \\\n f\"ANTENNA1=={a} || ANTENNA2=={a} && DATA_DESC_ID=={d}\"\n\n # do not group to capture all the data\n sub = xm.xds_from_ms(ms_name,\n taql_where=sel_str,\n table_schema=table_schema,\n chunks=chunks, columns=columns,\n group_cols=[])[0]\n\n # Add the selection attributes\n sub.attrs = dict(ANTENNA=a, DATA_DESC_ID=d)\n\n outp.append(sub)\n\n logger.debug(\"Done\")\n return outp", "def description(ntuples, binaryTuples):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursive function to yield tupils of (name, ndex)
def _yield_all_name_ndex(self, current_node): current_node = current_node yield current_node.pokemon_name, current_node.ndex for next_evolution in current_node.next: yield from self._yield_all_name_ndex(next_evolution)
[ "def yield_all_name_ndex(self):\n yield from self._yield_all_name_ndex(self.first)", "def generate_names(args):\n index = 0\n for _, decl in args:\n index += 1\n yield decl.name or 'arg{0}'.format(index)", "def getNodeWildIter(self, name, *usage):\n for nid in self._getNodeWildIter(name,*usage):\n yield TreeNode(nid,self)", "def recgen(n):\n if n<=1:\n return ['()']\n l1 = recgen(n-1)\n l2=[]\n for par in l1:\n p1='()'+par\n p2='('+par+')'\n p3=par+'()'\n l2.append(p1)\n l2.append(p2)\n if p1 != p3:\n l2.append(p3)\n return l2", "def deal_with_adj_taxa_with_same_names(itd):\n id_to_parent = itd.to_par\n id_to_children = itd.to_children\n id_to_rank = itd.to_rank\n id_to_name = itd.to_name\n names_to_ids = itd.name_to_ids\n synonyms = itd.synonyms\n repeated_names = itd.repeated_names\n suppressed_ids = {}\n renamed_ids = set()\n for name in repeated_names:\n ids_with_this_name = names_to_ids[name]\n assert isinstance(ids_with_this_name, list) and len(ids_with_this_name) > 1\n adj_same_named_ids = []\n for i in ids_with_this_name:\n par_id = id_to_parent.get(i)\n if par_id and par_id in ids_with_this_name:\n insert_loc = len(adj_same_named_ids)\n for ind, el in enumerate(adj_same_named_ids):\n if el[1] == par_id:\n insert_loc = ind\n break\n adj_same_named_ids.insert(insert_loc, (par_id, i))\n for par_id, child_id in adj_same_named_ids:\n pr = id_to_rank.get(par_id)\n if pr and pr.lower() == 'genus':\n # Change the child's name\n cr = id_to_rank.get(child_id, '')\n nn = '{} {} {}'.format(name, cr, name)\n assert nn not in names_to_ids # could happen, but ugh...\n names_to_ids[nn] = child_id\n id_to_name[child_id] = nn\n try:\n ids_with_this_name.remove(child_id)\n except:\n pass\n renamed_ids.add(child_id)\n else:\n # suppress the child\n suppressed_ids[child_id] = par_id\n c_list = id_to_children.get(child_id, [])\n pc_list = id_to_children.get(par_id)\n pc_list.remove(child_id)\n pc_list.extend(c_list)\n for gc in c_list:\n id_to_parent[gc] = par_id\n for syn_el in synonyms.get(child_id, []):\n synonyms.setdefault(par_id, []).append(syn_el)\n itd.details_log['ids_suppressed_because_same_name_as_par'] = suppressed_ids\n ril = list(renamed_ids)\n ril.sort()\n itd.details_log['names_decorated_because_same_name_as_par'] = ril", "def test_namedtuple_tuple(self):\n with mn.model() as m:\n Baz = mn.variable('Baz', \n self.drg((7, 9), (18, 4), (6, 11)))\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n self.drg((0, 0), (0, 0), (0, 0)))\n m.step()\n self.assertEqual(\n Corge[''], \n self.drg((8, 10), (19, 5), (7, 12)))\n m.step(2)\n self.assertEqual(\n Corge[''], \n self.drg((24, 30), (57, 15), (21, 36)))", "def get_pair_children(self):", "def get_ngrams(text: Iterable[str], n: int) -> Generator[tuple[str, ...], None, None]:\n if n <= 0:\n return None\n\n if n == 1:\n # (word,) is a tuple with just one element, without the comma it'd be a string\n yield from ((word,) for word in text)\n return None\n\n try:\n iterator = iter(text)\n previous = [next(iterator) for _ in range(n - 1)]\n except StopIteration:\n # If the text is shorter than the number of words per shingle, yield nothing\n return None\n\n for word in iterator:\n previous.append(word)\n yield tuple(previous)\n del previous[0]", "def _get_name(self, n):\n self.index += 1\n return n.declname or 'arg{0}'.format(self.index)", "def get_deepest_match(self, name):\n\n depth = len(name)\n if depth > self.max_depth:\n depth = self.max_depth\n for i in xrange(-depth, 0):\n n = dns.name.Name(name[i:])\n if self.has_key(n):\n return (n, self[n])\n v = self[dns.name.empty]\n return (dns.name.empty, v)", "def gen_addrs(addr):\n i = addr.find(\"X\")\n if i == -1:\n yield int(addr, 2)\n return\n a = list(addr)\n a[i] = \"0\"\n for x in gen_addrs(\"\".join(a)):\n yield x\n b = list(addr)\n b[i] = \"1\"\n for x in gen_addrs(\"\".join(b)):\n yield x", "def iter_ngrams(tokens, n = 2):\n history = []\n for token in tokens:\n history.append(token)\n if len(history) == n:\n ngram = tuple(history)\n history.pop(0)\n yield ngram", "def _named_items(self, xpath, select=None):\n if select and not isinstance(select, (list, tuple)):\n select = [select]\n for item in self.tree.findall(xpath):\n try:\n name = item.attrib[\"name\"]\n if select:\n if name in select:\n yield name, item\n else:\n yield name, item\n except KeyError:\n pass", "def test_tuple_namedtuple(self):\n with mn.model() as m:\n Baz = mn.variable('Baz', \n (self.site(7, 9), self.site(18, 4), self.site(6, 11)))\n Corge = mn.stock('Corge',\n mn.foreach(mn.foreach(lambda x: x+1)), ('Baz',),\n (self.site(0, 0), self.site(0, 0), self.site(0, 0)))\n m.step()\n self.assertEqual(\n Corge[''], \n (self.site(8, 10), self.site(19, 5), self.site(7, 12)))\n m.step(2)\n self.assertEqual(\n Corge[''], \n (self.site(24, 30), self.site(57, 15), self.site(21, 36)))", "def gen_alternates_recurse(pkgdesc):\n assert(isinstance(pkgdesc, list))\n if len(pkgdesc) <= 1:\n yield ''.join(pkgdesc)\n else:\n prefix = pkgdesc[0]\n alternates = pkgdesc[1]\n pkgdesc = pkgdesc[2:]\n for alt in alternates:\n for x in gen_alternates_recurse(pkgdesc):\n yield prefix + alt + x", "def horn(n):\n if n == 0:\n yield 'o', ()\n else:\n for k in range(0, n):\n for f, l in horn(k):\n for g, r in horn(n - 1 - k):\n yield g, ((f, l),) + r", "def genenames_from10x(genelist):\n genesymbol=[]\n #ensemblid=[]\n for i in range(len(genelist)):\n curgene=genelist[i]\n starts=[]\n for x in re.finditer('_',curgene):\n starts.append(x.start()+1)\n genesymbol.append(curgene[starts[-1]:])\n \n return genesymbol#,ensemblid", "def _clean_names(self, names):\n for n in names:\n definition = n.parent\n if isinstance(definition, (tree.Function, tree.Class, tree.Module)):\n yield self._evaluator.wrap(definition).name\n else:\n yield n", "def variableNameGenerator() -> Iterator[str]:\n\n def f():\n temp_idx = -1\n while True:\n temp_idx += 1\n yield f\"t_{temp_idx}\"\n\n return iter(f())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of feet in the given number of miles.
def miles_to_feet(miles): return miles * 5280
[ "def inches(feets):\n return 12*feets", "def height_US_to_centimeters(feet, inches):\r\n total_inches = (feet * inches_per_foot) + inches # Total inches\r\n centimeters = total_inches * centimeters_per_inch\r\n return centimeters", "def numTiles(wRoom,lRoom,sTile):\n sizeRoom = wRoom * lRoom\n return math.ceil(sizeRoom/(sTile*sTile))", "def convert_inches_to_feet(inches):\r\n\r\n feet = inches / 12\r\n return feet", "def miles_km(miles):\n if not isinstance(miles, int) or isinstance(miles, float):\n raise ValueError(\"Cannot convert \\\"{}\\\" of type: \\\"{}\\\" to miles.\".format(miles, type(miles)))\n return 1.60934 * miles", "def _get_num_units(self, value):\n large_units = value/self._scale\n return int(large_units)", "def inchesToMeters(inches):\n return(inches * .0254)", "def convert_inches_to_feet(num_to_convert):\n return c(num_to_convert / 12) + \" feet\"", "def n_quantiles(self):\n return self.n_quantiles_", "def get_dozens (total_eggs):\n dozens_ordered = total_eggs /12\n dozens_ordered = math.ceil(dozens_ordered) #So that the amount of dozens cover all the eggs\n print(\"Dozens required: {}\".format(dozens_ordered))", "def toFeet(value, unit):\n newValue = 0.0\n\n if unit == Units.INCHES:\n newValue = value / 12\n elif unit == Units.FEET:\n newValue = value\n elif unit == Units.CENTIMETERS:\n newValue = value / 30.48\n elif unit == Units.METERS:\n newValue = value * 3.281\n else:\n newValue = -1\n\n return newValue", "def miles_to_km(miles):\n\n km = miles * 1.609\n\n return km", "def number_of_ways_for_the_frog_to_jump_n_feet(n):\n global total\n if n == 0:\n return 1\n if n < 0:\n return 0\n\n return number_of_ways_for_the_frog_to_jump_n_feet(n - 1) + number_of_ways_for_the_frog_to_jump_n_feet(n - 2)", "def _calculate_cell_size(avenues, streets):\n cell_width = constants.MAX_WIDTH_PIXELS / float(avenues + 2)\n cell_height = constants.MAX_HEIGHT_PIXELS / float(streets + 1)\n smallest = min(cell_width, cell_height)\n\n # find highest power of two less than smallest\n cell_size = 16\n while cell_size * 2 < smallest:\n cell_size *= 2\n\n # special case to allow 24-pixel cell size\n if cell_size == 16 and 24 < smallest:\n cell_size = 24\n\n return cell_size", "def get_total_miles(self, start_date=None, end_date=None):\n if start_date is None:\n start_date = self.quest.start_date\n return self.user.profile.get_total_miles(start_date=start_date, \n end_date=end_date)", "def convert_feet_to_inches(num_to_convert):\n return c(num_to_convert * 12) + \" inches\"", "def milesOnFillup(graph, f, prevFillup):\n val = graph.value\n try:\n # prefer odometer since it should have less per-reading error\n miles = (int(val(f, GAS['odometer'])) -\n int(val(prevFillup, GAS['odometer'])))\n if miles < 1 or miles > 600:\n raise ValueError(miles)\n return miles\n except (ValueError, TypeError), e:\n return val(f, GAS['tripMeter'])", "def num_chunks(self, chunk_size):\n Nfn = lambda i: math.ceil((self.maxpt[i] - self.minpt[i]) / chunk_size[i])\n return reduce(operator.mul, map(Nfn, range(len(self.minpt))))", "def n_cells(self):\n return int(np.prod(self._n))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scan the target CIDR address using Nmap. The flags "A oX" are used by default.
def nmap_scan(outDir, address, flags="-A"): nmap = {} nmap = nm.PortScanner() nmap.scan(address, arguments=flags) with open(outDir + "nmap.xml", 'w') as outFile: outFile.write(nmap.get_nmap_last_output()) return nmap
[ "def scan_net(sub_net):\n sub_net = str(sub_net)\n list_host = []\n str_nmap = subprocess.run([\"nmap\", \"-sP\", sub_net],capture_output=True)\n str_nmap = str_nmap.stdout.decode(\"utf-8\")\n arr_host = str_nmap.split(\"Nmap scan report for\")\n del arr_host[0]\n active_hosts = map(filter_address, arr_host)\n for host in active_hosts: \n list_host.append(host)\n return list_host", "def scan(self, option):\n proc = NmapProcess(self.net, option)\n ret = proc.run()\n if ret == 0:\n try:\n parsed = NmapParser.parse(proc.stdout)\n output = self._extract_data(parsed)\n return (True, output)\n except NmapParserException:\n return (False, None)\n else:\n return (False, None)", "def Scan(self):\n ntop = nmapParser.NmapParserFunk(self.target)\n ntop.openPorts()\n np = nmapParser.NmapParserFunk(self.target)\n np.allOpenPorts()\n ftpPorts = list(sorted(set(merge(np.ftp_ports, ntop.ftp_ports))))\n smtpPorts = list(sorted(set(merge(ntop.smtp_ports, np.smtp_ports))))\n nfsPorts = list(sorted(set(merge(ntop.nfs_ports, np.nfs_ports))))\n rpcPorts = list(sorted(set(merge(ntop.rpc_ports, np.rpc_ports))))\n telnetPorts = list(sorted(set(merge(ntop.telnet_ports, np.telnet_ports))))\n sipPorts = list(sorted(set(merge(ntop.sip_ports, np.sip_ports))))\n vncPorts = list(sorted(set(merge(ntop.vnc_ports, np.vnc_ports))))\n cupsPorts = list(sorted(set(merge(ntop.cups_ports, np.cups_ports))))\n javaRmiPorts = list(sorted(set(merge(ntop.java_rmi_ports, np.java_rmi_ports))))\n mssqlPorts = list(sorted(set(merge(ntop.mssql_ports, np.mssql_ports))))\n mysqlPorts = list(sorted(set(merge(ntop.mysql_ports, np.mysql_ports))))\n cassandraPorts = list(sorted(set(merge(ntop.cassandra_ports, np.cassandra_ports))))\n mongoPorts = list(sorted(set(merge(ntop.mongo_ports, np.mongo_ports))))\n pop3Ports = list(sorted(set(merge(ntop.pop3_ports, np.pop3_ports))))\n kerberosPorts = list(sorted(set(merge(ntop.kerberos_ports, np.kerberos_ports))))\n fingerPorts = list(sorted(set(merge(ntop.finger_ports, np.finger_ports))))\n tcpPorts = list(sorted(set(merge(ntop.tcp_ports, np.tcp_ports))))\n string_tcp_ports = \",\".join(map(str, tcpPorts))\n unp = nmapParser.NmapParserFunk(self.target)\n unp.openUdpPorts()\n snmpPorts = unp.snmp_ports\n ikePorts = unp.ike_ports\n c = config_parser.CommandParser(f\"{os.getcwd()}/config/config.yaml\", self.target)\n unsorted_commands = []\n unsorted_commands.append(c.getCmd(\"nmap\", \"nmapVulners\", openTcpPorts=string_tcp_ports))\n if len(snmpPorts) != 0:\n if not os.path.exists(c.getPath(\"snmp\", \"snmpDir\")):\n os.makedirs(c.getPath(\"snmp\", \"snmpDir\"))\n unsorted_commands.append(c.getCmd(\"snmp\", \"snmpwalk\"))\n unsorted_commands.append(c.getCmd(\"snmp\", \"snmpCheck\"))\n unsorted_commands.append(c.getCmd(\"snmp\", \"onesixtyone\"))\n if len(ikePorts) != 0:\n unsorted_commands.append(c.getCmd(\"ike\", \"ikescan\"))\n unsorted_commands.append(c.getCmd(\"ike\", \"ikescan4500\"))\n unsorted_commands.append(c.getCmd(\"ike\", \"nmapIke\"))\n if len(ftpPorts) != 0:\n string_ftp_ports = \",\".join(map(str, ftpPorts))\n unsorted_commands.append(c.getCmd(\"ftp\", \"nmapFtp\", ftpPorts=string_ftp_ports))\n if len(fingerPorts) != 0:\n if not os.path.exists(c.getPath(\"finger\", \"fingerDir\")):\n os.makedirs(c.getPath(\"finger\", \"fingerDir\"))\n for p in fingerPorts:\n unsorted_commands.append(c.getCmd(\"finger\", \"fingerUserEnum\", p=p))\n if len(smtpPorts) != 0:\n if not os.path.exists(c.getPath(\"smtp\", \"smtpDir\")):\n os.makedirs(c.getPath(\"smtp\", \"smtpDir\"))\n for p in smtpPorts:\n unsorted_commands.append(c.getCmd(\"smtp\", \"smtpUserEnum\", p=p))\n if len(nfsPorts) != 0:\n if not os.path.exists(c.getPath(\"nfs\", \"nfsDir\")):\n os.makedirs(c.getPath(\"nfs\", \"nfsDir\"))\n string_nfs_ports = \",\".join(map(str, nfsPorts))\n unsorted_commands.append(c.getCmd(\"nfs\", \"nmapNfs\", nfsPorts=string_nfs_ports))\n unsorted_commands.append(c.getCmd(\"nfs\", \"showmount\"))\n if len(rpcPorts) != 0:\n if not os.path.exists(c.getPath(\"rpc\", \"rpcDir\")):\n os.makedirs(c.getPath(\"rpc\", \"rpcDir\"))\n if not os.path.exists(c.getPath(\"smb\", \"smbScan\")):\n unsorted_commands.append(c.getCmd(\"rpc\", \"enum4linuxRpc\"))\n if which(\"impacket-rpcdump\"):\n unsorted_commands.append(c.getCmd(\"rpc\", \"rpcdump\"))\n if len(cupsPorts) != 0:\n string_cups_ports = \",\".join(map(str, cupsPorts))\n unsorted_commands.append(c.getCmd(\"cups\", \"nmapCups\", cupsPorts=string_cups_ports))\n if len(javaRmiPorts) != 0:\n string_java_rmi_ports = \",\".join(map(str, javaRmiPorts))\n unsorted_commands.append(c.getCmd(\"java\", \"javaRmiDump\", javarmiPorts=string_java_rmi_ports))\n unsorted_commands.append(c.getCmd(\"java\", \"javaRmiVulns\", javarmiPorts=string_java_rmi_ports))\n if len(sipPorts) != 0:\n if not os.path.exists(c.getPath(\"sip\", \"sipDir\")):\n os.makedirs(c.getPath(\"sip\", \"sipDir\"))\n string_sip_ports = \",\".join(map(str, sipPorts))\n unsorted_commands.append(c.getCmd(\"sip\", \"nmapSip\", sipPorts=string_sip_ports))\n unsorted_commands.append(c.getCmd(\"sip\", \"svwar\"))\n if len(vncPorts) != 0:\n string_vnc_ports = \",\".join(map(str, vncPorts))\n unsorted_commands.append(c.getCmd(\"vnc\", \"nmapVnc\", vncPorts=string_vnc_ports))\n if len(telnetPorts) != 0:\n string_telnet_ports = \",\".join(map(str, telnetPorts))\n unsorted_commands.append(c.getCmd(\"telnet\", \"nmapTelnet\", telnetPorts=string_telnet_ports))\n if len(cassandraPorts) != 0:\n string_cassandra_ports = \",\".join(map(str, cassandraPorts))\n unsorted_commands.append(c.getCmd(\"cassandra\", \"nmapCassandra\", cassandraPorts=string_cassandra_ports))\n if len(mssqlPorts) != 0:\n string_mssql_ports = \",\".join(map(str, mssqlPorts))\n unsorted_commands.append(c.getCmd(\"mssql\", \"nmapMssql\", mssqlPorts=string_mssql_ports, mssqlPort=mssqlPorts[0]))\n if len(mysqlPorts) != 0:\n string_mysql_ports = \",\".join(map(str, mysqlPorts))\n unsorted_commands.append(c.getCmd(\"mysql\", \"nmapMysql\", mysqlPorts=string_mysql_ports))\n if len(mongoPorts) != 0:\n string_mongo_ports = \",\".join(map(str, mongoPorts))\n unsorted_commands.append(c.getCmd(\"mongodb\", \"nmapMongo\", mongoPorts=string_mongo_ports))\n if len(pop3Ports) != 0:\n string_pop3_ports = \",\".join(map(str, pop3Ports))\n unsorted_commands.append(c.getCmd(\"pop3\", \"nmapPop3\", popPorts=string_pop3_ports))\n if len(kerberosPorts) != 0:\n string_kerberos_ports = \",\".join(map(str, kerberosPorts))\n unsorted_commands.append(c.getCmd(\"kerberos\", \"nmapKerberos\", kerberosPorts=string_kerberos_ports))\n\n set_sorted_cmds = sorted(set(unsorted_commands))\n cmds_to_run = []\n for i in set_sorted_cmds:\n cmds_to_run.append(i)\n self.processes = tuple(cmds_to_run)", "def ndvi_map(red_img, nir_img):\n global CMAP\n #calculate NDVI values pixel-wise and scale to 0-255\n #####ndvi = ne.evaluate(\"(nir_img - red_img)/(nir_img+red_img)\")\n #min_ndvi = np.min(ndvi)\n #idx = ne.evaluate(\"((ndvi - min_ndvi)*128)\").astype('uint8')\n #####idx = ne.evaluate(\"((ndvi + 1)*128)\").astype('uint8')\n #idx = (((nir_img - red_img) / (nir_img+red_img) + 1)*128).astype('uint8')\n idx = ne.evaluate(\"(((nir_img - red_img) / (nir_img+red_img) + 1)*128)\").astype('uint8')\n\n return CMAP[idx], 0#int(np.mean(np.nan_to_num(ndvi))*1000)/1000", "def ndvi_map2(red_img, nir_img):\n global CMAP\n #calculate NDVI values pixel-wise and scale to 0-255\n ndvi = ne.evaluate(\"(nir_img - red_img)/(nir_img+red_img)\")\n #min_ndvi = np.min(ndvi)\n #idx = ne.evaluate(\"((ndvi - min_ndvi)*128)\").astype('uint8')\n idx = ne.evaluate(\"((ndvi + 1)*128)\").astype('uint8')\n #idx = (((nir_img - red_img) / (nir_img+red_img) + 1)*128).astype('uint8')\n #idx = ne.evaluate(\"(((nir_img - red_img) / (nir_img+red_img) + 1)*128)\").astype('uint8')\n\n return CMAP[idx], int(np.mean(np.nan_to_num(ndvi)[ndvi>0])*1000)/1000", "def main():\n from Remap import Remap\n from IPScanner import IPScanner\n print(\"Testing Remap\\n\")\n source_ip = \"192.168.1.111\"\n net_addr = \"192.168.1.0\"\n remapper = Remap(source_ip,net_addr)\n print(remapper.get_available_hosts())\n pkt = rdpcap(\"testFiles/test00.pcapng\")[1]\n remapper.remap(pkt)", "def find_with_ip():\n state_filter = \" nud \" + \" nud \".join(HOME_STATES.values()).lower()\n cmd = f\"ip neigh show {state_filter}\".split()\n neighbours = subprocess.run(cmd, shell=False, capture_output=True, text=True)\n neighbours_ip = [_.split()[0] for _ in neighbours.stdout.splitlines()]\n return neighbours_ip", "def connect_nmap(domain):\n nmap = nmap3.Nmap()\n sub_nmap = nmap.nmap_dns_brute_script(domain)\n return sub_nmap", "def scan(self, subnets):\n Node.idGenerator = 0\n dal = DAL.DAL()\n engine = ReasoningEngine.ReasoningEngine()\n nodes = []\n rules = []\n vulens = []\n nm = nmap.PortScanner()\n logging.info('Start Scanning Network Details ...'.format())\n # nm.scan('192.168.1.0/24', '0-65535', arguments='-sS -A -sV')\n for subnet in subnets:\n ip = subnet.ipRange\n try:\n nm.scan(ip, '0-65535', arguments='-sS -A -sV')\n except:\n e = ''\n destination = []\n external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')\n destination.append(external_ip)\n inet = Node.Node(\"Internet\", \"Internet\", \"Attacker's location\", [], 'inet')\n nodes.append(inet)\n for host in nm.all_hosts():\n name = nm[host]['hostnames'][0]['name']\n ip = nm[host]['addresses']['ipv4']\n if name == '':\n name = ip\n destination.append(name)\n for host in nm.all_hosts():\n # print('----------------------------------------------------')\n # print('Host : %s ' % nm[host]['hostnames'][0]['name'])\n # print('IP : %s ' % nm[host]['addresses']['ipv4'])\n # print('State : %s' % nm[host]['status']['state'])\n name = nm[host]['hostnames'][0]['name']\n ip = nm[host]['addresses']['ipv4']\n if name == '':\n name = ip\n label = ''\n desc = ''\n try:\n if nm[host]['osmatch']:\n for os in nm[host]['osmatch']:\n #print('Name : %s' % os['name'])\n label = os['name']\n desc = os['osclass'][0]['type']\n except:\n # print(\"No osmatch\")\n e = ''\n lport = []\n try:\n protocols.append(nm[host]['tcp'])\n except:\n # print('No TCP')\n e = ''\n try:\n protocols.append(nm[host]['udp'])\n except:\n # print('No UDP')\n e = ''\n vuls = []\n ports = []\n protocols = ['tcp', 'udp']\n for protocol in protocols:\n lport = list(nm[host]['tcp'].keys())\n sorted(lport)\n for port in lport:\n if nm[host]['tcp'][port]['state'] == 'open':\n state = nm[host]['tcp'][port]['state']\n name = nm[host]['tcp'][port]['name']\n version = nm[host]['tcp'][port]['version']\n cve = dal.getVulnerabilityByName(name, version)\n if cve is not None:\n cve_id = cve['cve']['CVE_data_meta']['ID']\n cve_desc = cve['cve']['description']['description_data'][0]['value']\n impact = cve['impact']['baseMetricV2']['cvssV2']['availabilityImpact']\n cve_av = cve['impact']['baseMetricV2']['cvssV2']['accessVector']\n auth = cve['impact']['baseMetricV2']['cvssV2']['authentication']\n priv = ''\n try:\n priv = cve['impact']['baseMetricV3']['cvssV3']['privilegesRequired']\n except:\n # print(\"No cvss3\")\n e = ''\n if priv != '':\n auth = ''\n cve_requires = engine.getPrivPost(cve_desc, cve_av, auth, priv)\n cve_provides = engine.getPrivPre(cve_desc, impact, cve_requires)\n tempVul = Vulnerability(cve_id, '', cve_desc, cve_requires, cve_provides, cve_av)\n vuls.append(tempVul)\n vulens.append(tempVul)\n ports.append(port)\n # print('port : %s\\tstate : %s\\tname: %s\\tversion : %s' % (port, state, name, version))\n rule = {'source': [name], 'destination': destination, 'ports': ports}\n rules.append(rule)\n node = Node.Node(name, label, desc, vuls, name)\n nodes.append(node)\n # print('----------------------------------------------------')\n # print(nm.csv())\n logging.info('Finished Scanning Network Details ...'.format())\n reachability = Reachability.Reachability(rules)\n network = {'nodes': nodes, 'vulnerabilities': vulens, 'reachabilities': [], 'subnets': subnets}\n network['reachabilities'].append(reachability)\n return network", "def _ip_addr_mapping(self, stix_data, full_block_size):\n\n cidr_parts = stix_data.get('value', '').split('/')\n cidr_suffix = cidr_parts[1] if len(cidr_parts) > 1 else str(full_block_size)\n if cidr_suffix == str(full_block_size):\n return {\n 'type': 'Address',\n 'ip': '@.value',\n 'confidence': '@.confidence',\n }\n return {\n 'confidence': '@.confidence',\n 'type': 'CIDR',\n 'block': '@.value',\n }", "def add_wildcard_ip(network: str, store_hosts: bool = True):\n\n def get_nmap_xml_hosts():\n \"\"\" Call Nmap and return all XML host elements as ElementTree nodes\"\"\"\n nonlocal nmap_call, file\n devnull_fd = open(os.devnull)\n subprocess.call(nmap_call.split(\" \"), stdout=devnull_fd, stderr=subprocess.STDOUT)\n nm_xml_tree = ET.parse(file.name)\n nmaprun_elem = nm_xml_tree.getroot()\n devnull_fd.close()\n return nmaprun_elem.findall(\"host\")\n\n # if network expression has already been parsed before\n if network in PARSED_NETWORK_EXPRS:\n if len(PARSED_NETWORK_EXPRS[network]) > 1: # hosts are already stored\n return True\n if not store_hosts:\n return True\n\n hosts = []\n host_ranges = []\n prev_ip = None\n\n # call Nmap and parse its host output\n with tempfile.NamedTemporaryFile() as file:\n # first try to parse as IPv4 address\n nmap_call = \"nmap -n -sL -oX %s %s\" % (file.name, network)\n host_elems = get_nmap_xml_hosts()\n\n if not host_elems: # nmap could not parse IPv4 network expression\n # try to parse as IPv6 network expression\n nmap_call += \" -6\"\n host_elems = get_nmap_xml_hosts()\n if not host_elems:\n return False\n\n for host_elem in host_elems:\n ip = host_elem.find(\"address\").attrib[\"addr\"]\n if not host_ranges:\n host_ranges.append([ip, ip])\n elif prev_ip is not None:\n # if network expression does not yield continuous block of IPs\n if ip_str_to_int(ip) != (ip_str_to_int(prev_ip) + 1):\n host_ranges[-1][1] = prev_ip\n host_ranges.append([ip, ip])\n\n if store_hosts:\n hosts.append(ip)\n prev_ip = ip\n\n if host_ranges:\n host_ranges[-1][1] = prev_ip # close last IP range\n if store_hosts:\n PARSED_NETWORK_EXPRS[network] = (hosts, host_ranges)\n else:\n PARSED_NETWORK_EXPRS[network] = (host_ranges)\n return True", "def test_ip_addresses_read(self):\n pass", "def fetch_nat_nopat_address(self, device, **kwargs):\n all_entry_list = self._common_get_processing(device=device, cmd_keyword=\"nat-nopat-address\", kwargs=kwargs)\n device.log(message=\"{} return value:\\n{}\".format(self.tool.get_current_function_name(), self.tool.pprint(all_entry_list)))\n return all_entry_list", "def netmask_to_cidr(m_netmask):\n return(sum([ bin(int(bits)).count(\"1\") for bits in m_netmask.split(\".\") ]))", "def autoscan_address(address):\n\n try:\n network = Network.from_ip(address)\n except IndexError:\n raise NoQueueError(\n \"Address {0} doesn't belong to any configured \"\n \"network.\".format(address),\n )\n if not network.queue:\n raise NoQueueError(\n \"The network {0} has no discovery queue.\".format(network),\n )\n queue_name = network.queue.name\n queue = django_rq.get_queue(queue_name)\n queue.enqueue_call(\n func=_autoscan_group,\n args=([address],),\n timeout=60,\n result_ttl=0,\n )", "def findNatsubNetwork():\n ipsubnet = \"192.168.\"\n i = 10\n while True:\n cmdstatus, cmdoutput = commands.getstatusoutput(\"/sbin/ifconfig -a | /bin/grep -w inet | /bin/awk -F' ' '{print $2}' | grep '%s%s' \" % (ipsubnet.replace('.', '\\.'), str(i) + '\\.'))\n if cmdstatus:\n break\n else:\n i += 2\n return [ipsubnet + str(i) + sub for sub in [\".1\", \".2\", \".254\" ]]", "def _lookup_rid(self):\n for node in self.nodes_dict.values():\n node_ip = node[\"top_label\"]\n if node_ip in self.ip_lookup_data:\n node.update(\n {\n k: v\n for k, v in self.ip_lookup_data[node_ip].items()\n if k in [\"top_label\", \"bottom_label\", \"label\"]\n }\n )", "def getAllroutes(asn, add_query_params=''):\n #ipaddress.IPv4Network, ipaddress.IPv6Network\n results = ASNOrigin.lookup(asn, add_query_params=add_query_params)\n return [ipaddress.ip_network(_net['cidr']) for _net in results['nets']]", "def ipn(ra,dec,start,end):\n c = SkyCoord(ra,dec,unit='deg')\n window = [Time(start, format=\"jd\"), Time(end, format=\"jd\")]\n\n print(\"CONDUCTING SEARCH OF IPN CATALOG\")\n\n # Make sure that each day is represented\n window_grid = Time(\n np.arange(window[0].jd, window[-1].jd+1, 0.5), format='jd')\n searchstr = [get_searchstr(t) for t in window_grid]\n searchstr = np.unique(np.array(searchstr))\n\n # Pull out the relevant lines\n fpath = \"http://www.ssl.berkeley.edu/ipn3/masterli.txt\"\n fname = fpath.split('/')[-1]\n if os.path.exists(fname) is False:\n subprocess.call(['wget', 'http://www.ssl.berkeley.edu/ipn3/masterli.txt'])\n lines = np.array(open(fname, \"r\").readlines())\n header = lines[np.array([' DOY TIME ' in l for l in lines])][0]\n keep = np.array([l[7:16] in searchstr for l in lines])\n\n # Now, check each one to see if the time is correct\n final_set = []\n for l in lines[keep]:\n filtered_l = [i for i in l.split(\" \") if i]\n burst_dd = filtered_l[0].split('.')[1]\n burst_mm = str(strptime(filtered_l[1], '%b').tm_mon).zfill(2)\n burst_yy = str(filtered_l[2]).zfill(2)\n burst_time = filtered_l[4]\n burst_datetime = Time(\n '20%s-%s-%sT%s' %(burst_yy,burst_mm,burst_dd,burst_time), \n format='isot')\n if np.logical_and(\n burst_datetime >= window[0], burst_datetime <= window[-1]):\n final_set.append(l)\n\n print(\"There are %s bursts in the %s-day window\" %(\n len(final_set), window[-1]-window[0]))\n\n # Check which spacecraft observed these bursts\n for l in final_set:\n det_by = np.array(\n [header[i.start():i.end()] for i in re.finditer('YES', l)])\n print(det_by)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiates a NASNet model. Optionally loads weights pretrained on ImageNet. This model is available for TensorFlow only, and can only be used with inputs following the TensorFlow data format `(width, height, channels)`. You should set `image_data_format='channels_last'` in your Keras config located at ~/.keras/keras.json. Note that the default input image size for this model is 331x331. Arguments
def NASNet_large_do(net_type, include_top=True, do_rate=0.3, weights='imagenet', input_tensor=None, input_shape=None, total_training_steps=None, penultimate_filters=4032, num_blocks=6, stem_block_filters=96, skip_reduction=True, filter_multiplier=2, pooling=None, classes=1000): if not (weights in {'imagenet', None} or os.path.exists(weights)): raise ValueError('The `weights` argument should be either ' '`None` (random initialization), `imagenet` ' '(pre-training on ImageNet), ' 'or the path to the weights file to be loaded.') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as imagenet with `include_top`' ' as true, `classes` should be 1000') if K.backend() != 'tensorflow': raise RuntimeError('The Xception model is only available with ' 'the TensorFlow backend.') if K.image_data_format() != 'channels_last': warnings.warn('The NASNet model is only available for the ' 'input data format "channels_last" ' '(width, height, channels). ' 'However your settings specify the default ' 'data format "channels_first" (channels, width, height). ' 'You should set `image_data_format="channels_last"` in your Keras ' 'config located at ~/.keras/keras.json. ' 'The model being returned right now will expect inputs ' 'to follow the "channels_last" data format.') K.set_image_data_format('channels_last') old_data_format = 'channels_first' else: old_data_format = None # Determine proper input shape input_shape = _obtain_input_shape(input_shape, default_size=331, min_size=32, data_format=K.image_data_format(), require_flatten=False, weights=None) # weights=None to prevent input channels equality check if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor if penultimate_filters % (24 * (filter_multiplier ** 2)) != 0: raise ValueError( 'For NASNet-A models, the `penultimate_filters` must be a multiple ' 'of 24 * (`filter_multiplier` ** 2). Current value: %d' % penultimate_filters) filters = penultimate_filters // 24 x = Conv2D(stem_block_filters, (3, 3), strides=(2, 2), padding="same", use_bias=False, name='stem_conv1', kernel_initializer='he_normal')(img_input) x = BatchNormalization(momentum=0.9997, epsilon=1e-3, name='stem_bn1')(x) #conv1 if net_type == NetType.mc_do: x = Dropout(do_rate, name='dropout')(x, training=True) elif net_type == NetType.mc_df: x = Dropout(do_rate, noise_shape=(x.shape[0], 1, 1, x.shape[-1]), name='dropfilter')(x, training=True) total_num_cells = 4 + 3 * num_blocks cell_counter = 0 p = None x, p = _reduction_a_cell_do(x, p, filters // (filter_multiplier ** 2), net_type=net_type, cell_num=cell_counter, total_num_cells=total_num_cells, total_training_steps=total_training_steps, do_rate=do_rate, block_id='stem_1') cell_counter += 1 #conv2 x, p = _reduction_a_cell_do(x, p, filters // filter_multiplier, net_type=net_type, cell_num=cell_counter, total_num_cells=total_num_cells, total_training_steps=total_training_steps, do_rate=do_rate, block_id='stem_2') cell_counter += 1 for i in range(num_blocks): x, p = _normal_a_cell_do(x, p, filters, net_type=net_type, cell_num=cell_counter, total_num_cells=total_num_cells, total_training_steps=total_training_steps, do_rate=do_rate, block_id='%d' % (i)) cell_counter += 1 #conv3 x, p0 = _reduction_a_cell_do(x, p, filters * filter_multiplier, net_type=net_type, cell_num=cell_counter, total_num_cells=total_num_cells, total_training_steps=total_training_steps, do_rate=do_rate, block_id='reduce_%d' % (num_blocks)) cell_counter += 1 p = p0 if not skip_reduction else p for i in range(num_blocks): x, p = _normal_a_cell_do(x, p, filters * filter_multiplier, net_type=net_type, cell_num=cell_counter, total_num_cells=total_num_cells, total_training_steps=total_training_steps, do_rate=do_rate, block_id='%d' % (num_blocks + i + 1)) cell_counter += 1 #conv4 x, p0 = _reduction_a_cell_do(x, p, filters * filter_multiplier ** 2, net_type=net_type, cell_num=cell_counter, total_num_cells=total_num_cells, total_training_steps=total_training_steps, do_rate=do_rate, block_id='reduce_%d' % (2 * num_blocks)) cell_counter += 1 p = p0 if not skip_reduction else p for i in range(num_blocks): x, p = _normal_a_cell_do(x, p, filters * filter_multiplier ** 2, net_type=net_type, cell_num=cell_counter, total_num_cells=total_num_cells, total_training_steps=total_training_steps, do_rate=do_rate, block_id='%d' % (2 * num_blocks + i + 1)) cell_counter += 1 #conv5 x = Activation('relu')(x) if include_top: x = GlobalAveragePooling2D()(x) x = Dense(classes, activation='sigmoid', name='predictions')(x) else: if pooling == 'avg': x = GlobalAveragePooling2D()(x) elif pooling == 'max': x = GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = Model(inputs, x, name='NASNet') # Create donor model if input_shape[-1] > 3 and weights is not None: donor_input_shape = (*input_shape[:-1], 3) donor_model = get_donor_model(include_top, input_tensor=None, input_shape=donor_input_shape, penultimate_filters=penultimate_filters, num_blocks=num_blocks, stem_block_filters=stem_block_filters, skip_reduction=skip_reduction, pooling=pooling, classes=classes) # load weights if weights is not None and input_shape[-1] > 3: if weights == 'imagenet': if include_top: print('Loading pretrained ImageNet weights, include top for NASNet backbone') weights_path = get_file('nasnet_large.h5', TF_NASNET_LARGE_WEIGHT_PATH, cache_subdir='models', file_hash='11577c9a518f0070763c2b964a382f17') else: print('Loading pretrained ImageNet weights, exclude top for NASNet backbone') weights_path = get_file('nasnet_large_no_top.h5', TF_NASNET_LARGE_WEIGHT_PATH_NO_TOP, cache_subdir='models', file_hash='d81d89dc07e6e56530c4e77faddd61b5') else: print('Parameter "pretrained_weights" is expected to be "imagenet". However you can pass path to weights ' 'if you are sure about what you are doing!') if os.path.exists(weights): weights_path = weights else: print('Parameter "pretrained_weights" is expected to be "imagenet" or path to weights. Considered to ' f'be path, but it doesn\'t exist: {weights}') if input_shape[-1] > 3: print( f'Copying pretrained weights to model with {input_shape[-1]} input channels for NASNet backbone') donor_model.load_weights(weights_path) donor_model_layers_weights = [d_l for d_l in donor_model.layers if len(d_l.weights) > 0] j = 0 already_copied_layers = [] for i, l in enumerate([l for l in model.layers if len(l.weights) > 0]): if j >= len(donor_model_layers_weights): break while j in already_copied_layers: j += 1 d_l = donor_model_layers_weights[j] # dropout in target model - skip to next layer if 'dropout' in l.name and 'dropout' not in d_l.name or \ 'droppath' in l.name and 'droppath' not in d_l.name or \ 'dropfilter' in l.name and 'dropfilter' not in d_l.name: continue # first weighted layer in target model - adding weights for uncovered channels if i == 0: new_w = tf.tile(d_l.weights[0], (1, 1, 2, 1))[:, :, :input_shape[-1], :] l.weights[0].assign(new_w) j += 1 # layers names are identical - copy weights elif l.name == d_l.name: for (w, d_w) in zip(l.weights, d_l.weights): w.assign(d_w) j += 1 # layer order is broken - search for the matching donor layer and copy weights else: for k in range(j+1, len(donor_model_layers_weights)): d_l_next = donor_model_layers_weights[k] if l.name == d_l_next.name: for (w, d_n_w) in zip(l.weights, d_l_next.weights): w.assign(d_n_w) already_copied_layers.append(k) break if k == len(donor_model_layers_weights) -1: raise ValueError assert j == len(donor_model_layers_weights) del donor_model else: model.load_weights(weights_path) elif weights is not None: model.load_weights(weights) else: print('No pretrained weights passed') if old_data_format: K.set_image_data_format(old_data_format) return model
[ "def build_model(classes, height, width):\n print(\"> Building Keras neural network...\")\n network_model = model.simple_3(classes=classes, height=height, width=width)\n return network_model", "def init_model_scratch(args):\n img_size = args.img_size\n channels = args.channels\n num_class = args.num_class\n inputs = Input(shape=(img_size, img_size, channels), name='input')\n conv1 = Conv2D(16, (3,3), padding='same', activation='relu', name='conv1')(inputs)\n pool1 = MaxPooling2D(name='pool1')(conv1)\n conv2 = Conv2D(32, (3,3), padding='same', activation='relu', name='conv2')(pool1)\n pool2 = MaxPooling2D(name='pool2')(conv2)\n conv3 = Conv2D(64, (3,3), padding='same', activation='relu', name='conv3')(pool2)\n pool3 = MaxPooling2D(name='pool3')(conv3)\n flatten = Flatten(name='flatten')(pool3)\n fc1 = Dense(units=128, activation='relu', name='fc1')(flatten)\n dropout = Dropout(rate=0.5, name='dropout')(fc1)\n predictions = Dense(units=num_class, activation='softmax', name='prediction')(dropout)\n model = models.Model(inputs=inputs, outputs=predictions)\n model.compile(\n optimizer=optimizers.Adam(),\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"]\n )\n\n return model", "def initialize_model(model_name, num_classes, resume_from=None):\n model_ft = None\n input_size = 0 # Input images are input_size x input_size\n use_pretrained = False\n\n if model_name == \"resnet\":\n \"\"\" \n Resnet18\n \"\"\"\n model_ft = ResNet(block=BasicBlock, layers=[2, 2, 2, 2], num_classes=num_classes)\n input_size = 28\n\n elif model_name == \"alexnet\":\n \"\"\" \n Alexnet\n \"\"\"\n model_ft = models.alexnet(pretrained=use_pretrained)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n input_size = 28\n\n elif model_name == \"vgg\":\n \"\"\"\n VGG11_bn\n \"\"\"\n model_ft = models.vgg11_bn(pretrained=use_pretrained)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n input_size = 28\n\n elif model_name == \"squeezenet\":\n \"\"\" \n Squeezenet\n \"\"\"\n model_ft = models.squeezenet1_0(pretrained=use_pretrained)\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))\n model_ft.num_classes = num_classes\n input_size = 28\n\n elif model_name == \"densenet\":\n \"\"\" \n Densenet\n \"\"\"\n model_ft = models.densenet121(pretrained=use_pretrained)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n input_size = 28\n\n elif model_name == \"simple\":\n \"\"\"\n A simple custom CNN\n \"\"\"\n model_ft = SimpleCNN(num_classes)\n input_size = 28\n\n else:\n raise Exception(\"Invalid model name\")\n\n if resume_from is not None:\n print(\"Loading weights from %s\" % resume_from)\n model_ft.load_state_dict(torch.load(resume_from, map_location=torch.device(\"cpu\")))\n\n return model_ft, input_size", "def create(model_size=\"b0\", variant=\"std\", resolution=None):\n\n if variant not in (\"std\", \"aa\", \"adv-prop\", \"noisy-student\"):\n raise ValueError(f\"EfficientNet variant not supported: {variant}\")\n\n # Note that for the standard EfficientNet variant only B0-B5 architectures are\n # supported, B0-B7 for all other variants. Noisy-Student also supports L2\n # and L2_475 (with a resolution of 475).\n valid = (variant == \"std\" and model_size in {f\"b{i}\" for i in range(6)}) or \\\n (variant != \"std\" and model_size in {f\"b{i}\" for i in range(8)}) or \\\n (variant == \"noisy-student\" and model_size in (\"l2\", \"l2_475\"))\n if not valid:\n raise ValueError(\n f\"Invalid `model_size` {model_size!r} for EfficientNet `variant` \"\n f\"{variant!r}!\")\n\n if model_size.startswith(\"l2\"):\n noisy_student = hub.KerasLayer(MODEL_PATHS[variant + \"-l2\"].format(\n model_size))\n else:\n noisy_student = hub.KerasLayer(MODEL_PATHS[variant].format(model_size))\n\n @tf.function\n def model(features):\n images = features[\"image\"]\n return tf.nn.softmax(noisy_student(images), axis=-1)\n\n def preprocess_fn(features):\n # EfficientNet preprocessing with model-dependent input resolution.\n # Preprocessing mimicks that of the public EfficientNet code from\n # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/preprocessing.py\n # (both `_resize_image` and `_decode_and_center_crop` taken from that code)\n\n def _resize_image(image, image_size, method=None):\n if method is not None:\n return tf1.image.resize([image], [image_size, image_size], method)[0]\n return tf1.image.resize_bicubic([image], [image_size, image_size])[0]\n\n def _decode_and_center_crop(image, image_size, resize_method=None):\n \"\"\"Crops to center of image with padding then scales image_size.\"\"\"\n shape = tf1.shape(image)\n image_height = shape[0]\n image_width = shape[1]\n\n padded_center_crop_size = tf1.cast(\n ((image_size / (image_size + CROP_PADDING)) *\n tf.cast(tf.minimum(image_height, image_width), tf.float32)),\n tf.int32)\n\n offset_height = ((image_height - padded_center_crop_size) + 1) // 2\n offset_width = ((image_width - padded_center_crop_size) + 1) // 2\n image = tf1.image.crop_to_bounding_box(image, offset_height, offset_width,\n padded_center_crop_size,\n padded_center_crop_size)\n image = _resize_image(image, image_size, resize_method)\n return image\n\n features[\"image\"] = _decode_and_center_crop(\n features[\"image\"], EFFICIENTNET_RESOLUTIONS[model_size])\n features[\"image\"] = tf1.cast(features[\"image\"], tf1.float32)\n # We assume the modules expect pixels in [-1, 1].\n features[\"image\"] = features[\"image\"] / 127.5 - 1.0\n\n return features\n\n if resolution is not None:\n preprocess_config_fmt = \"resize_small({})|central_crop({})|value_range(-1,1)\"\n preprocess_config = preprocess_config_fmt.format(\n int(1.15 * resolution), resolution)\n preprocess_fn = pipeline_builder.get_preprocess_fn(\n preprocess_config, remove_tpu_dtypes=False)\n\n return model, preprocess_fn", "def alexnet(pretrained=False, **kwargs):\n model = AlexNet(**kwargs)\n # # print (kwargs['num_classes'])\n # if pretrained:\n # # model_path = '/mnt/lustre/wangzhouxia/Data_t1/rl_exposure_fusion_v1/368/decision_ps_v2_401_r1_ssim_fc99_b16_lr5/policy_decision_ps_v2_401_r1_ssim_fc99_b16_lr5_checkpoint_19.path.tar'\n # model_path = '/mnt/lustre/wangzhouxia/Data_t1/rl_exposure_fusion_v1/0327/decision_gff_v2_438_ssim_lr4_c99/policy_decision_gff_v2_438_ssim_lr4_c99_checkpoint_29.path.tar'\n # # model_path = '/data1/models/rl_exposure_fusion/policy_decision_ps_v2_401_r1_ssim_fc99_b16_lr5_checkpoint_19.path.tar'\n # pre_model = torch.load(model_path)['state_dict']\n # # pdb.set_trace()\n # model_dict = model.state_dict()\n # for k in pre_model.keys():\n # if not model_dict.has_key(k):\n # del pre_model[k]\n # # pre_model['classifier.6.weight'] = torch.empty((kwargs['num_classes'], 4096)).normal_(0.0, 0.01)\n # # pre_model['classifier.6.bias'] = torch.zeros(kwargs['num_classes'])\n\n # model.load_state_dict(pre_model)\n\n # model = AlexNet(**kwargs)\n # print (kwargs['num_classes'])\n if pretrained:\n pre_model = model_zoo.load_url(model_urls['alexnet'])\n model_dict = model.state_dict()\n for k in pre_model.keys():\n if not model_dict.has_key(k):\n del pre_model[k]\n model.load_state_dict(pre_model)\n \n return model", "def buildNihModel(self, img_size, label_len):\n \n base_model = DenseNet121(weights='imagenet', include_top=False, \n input_shape = (img_size,img_size,3))\n x = base_model.output\n x = layers.GlobalAveragePooling2D()(x)\n predictions = layers.Dense(label_len, activation='sigmoid', name='last')(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n if not self.weights == 'imagenet':\n model.load_weights(self.weights)\n return model", "def get_image_model(network, num_classes, img_width, img_height):\n input_shape = (img_width, img_height, 3)\n image_input = Input(shape=input_shape)\n base_model, last_layer_number = get_cnn_model(\n network, input_shape, image_input)\n\n #base_model.trainable = True\n\n # adding regularization\n #regularizer = l2(0.01)\n\n #for layer in base_model.layers:\n # for attr in ['kernel_regularizer']:\n # if hasattr(layer, attr):\n # setattr(layer, attr, regularizer)\n\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dense(1024, activation='relu')(x)\n predictions = Dense(num_classes, activation='softmax')(x)\n\n return Model(base_model.input, predictions), last_layer_number", "def train_network():\n if os.path.isfile('data/notes'):\n notes = pickle.load(open( \"data/notes\", \"rb\" ))\n\n else:\n notes = get_notes()\n\n # get amount of pitch names\n n_vocab = len(set(notes))\n embedding_input, network_input, network_output, note_to_int = prepare_sequences(notes, n_vocab)\n\n embedding_weights = create_embedding(embedding_input, n_vocab, note_to_int)\n\n\n model = create_network(network_input, n_vocab, embedding_weights)\n\n train(model, network_input, network_output)", "def __init__(self, mode=NET_MODE_TRAIN, dimension=2, n_hidden_units=128, n_mixtures=5, batch_size=100, sequence_length=120, layers=2):\n # network parameters\n self.dimension = dimension\n self.mode = mode\n self.n_hidden_units = n_hidden_units\n self.n_rnn_layers = layers\n self.n_mixtures = n_mixtures # number of mixtures\n # Training parameters\n self.batch_size = batch_size\n self.sequence_length = sequence_length\n self.val_split = 0.10\n # Sampling hyperparameters\n self.pi_temp = 1.5\n self.sigma_temp = 0.01\n\n if self.mode is NET_MODE_TRAIN:\n self.model = build_model(seq_len=self.sequence_length,\n hidden_units=self.n_hidden_units,\n num_mixtures=self.n_mixtures,\n layers=self.n_rnn_layers,\n out_dim=self.dimension,\n time_dist=True,\n inference=False,\n compile_model=True,\n print_summary=True)\n else:\n self.model = build_model(seq_len=1,\n hidden_units=self.n_hidden_units,\n num_mixtures=self.n_mixtures,\n layers=self.n_rnn_layers,\n out_dim=self.dimension,\n time_dist=False,\n inference=True,\n compile_model=False,\n print_summary=True)\n\n self.run_name = self.get_run_name()", "def build_model_audio(modelfile, meanstd_file, input_dim, excerpt_size):\n # Build CNN architecture\n net = {}\n net['input'] = InputLayer((None, 1, excerpt_size, input_dim))\n kwargs = dict(nonlinearity=lasagne.nonlinearities.leaky_rectify,\n W=lasagne.init.Orthogonal())\n net['Conv1_1'] = ConvLayer(net['input'], 64, 3, **kwargs)\n net['Conv1_2'] = ConvLayer(net['Conv1_1'], 32, 3, **kwargs)\n net['pool1'] = MaxPool2DLayer(net['Conv1_2'], 3)\n net['Conv2_1'] = ConvLayer(net['pool1'], 128, 3, **kwargs)\n net['Conv2_2'] = ConvLayer(net['Conv2_1'], 64, 3, **kwargs)\n net['pool2'] = MaxPool2DLayer(net['Conv2_2'], 3)\n net['fc3'] = DenseLayer(dropout(net['pool2'], 0.5), 256, **kwargs)\n net['fc4'] = DenseLayer(dropout(net['fc3'], 0.5), 64, **kwargs)\n net['score'] = DenseLayer(dropout(net['fc4'], 0.5), 1,\n nonlinearity=lasagne.nonlinearities.sigmoid,\n W=lasagne.init.Orthogonal())\n \n # load saved weights\n with np.load(modelfile) as f:\n lasagne.layers.set_all_param_values(\n net['score'], [f['param%d' % i] for i in range(len(f.files))])\n \n # - load mean/std \n with np.load(meanstd_file) as f:\n mean = f['mean']\n std = f['std']\n mean = mean.astype(floatX)\n istd = np.reciprocal(std).astype(floatX)\n\n return net, mean, istd", "def train_mobilenet_run(args):\n from .train.mobilenet import train\n\n train(\n args.epochs,\n args.batchsize,\n args.outputdir,\n use_class_weights=args.classweights,\n use_image_variations=args.imagevariations,\n )", "def resnet20(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3,3,3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet20']))\n return model", "def build_model():\n # Instantiate AlexNet\n inception = InceptionV3(weights='imagenet', include_top=False, input_shape=(139,139,3))\n\n # Create preprocessing layers\n inp = Input(shape=(160, 320, 3))\n norm = Lambda(lambda x: (x/255)-0.5)(inp) # normalize image\n crop = Cropping2D(cropping=((50, 20), (0,0)))(norm)\n resized = Lambda(lambda x: tf.image.resize_images(x, (139, 139)))(crop)\n \n # Attach the AlexNet with the preprocessing layers\n piped_inception = inception(resized)\n \n # Attach new classifier layers\n avg_pool = GlobalAveragePooling2D()(piped_inception)\n fc = Dense(512, activation='relu')(avg_pool)\n prediction = Dense(1)(fc)\n model = Model(input=inp, output=prediction)\n\n return model", "def resnet56(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [9,9,9], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet20']))\n return model", "def test_compileNxModel(self):\n\n inputShape = (7, 7, 1)\n inputLayer = NxInputLayer(inputShape)\n outputLayer = NxConv2D(2, 3)(inputLayer.input)\n model = NxModel(inputLayer.input, outputLayer)\n model.clearTemp()", "def download():\n ResNet50(weights='imagenet', include_top=False)", "def init_network(self):\n # Load OpenVINO Inference Engine.\n self.get_logger().info(f\"Loading Inference Engine on {self.device}\")\n self.ie = IECore()\n\n # Read and load the network.\n self.net = self.ie.read_network(model=constants.MODEL_XML, weights=constants.MODEL_BIN)\n self.func = ng.function_from_cnn(self.net)\n self.ops = self.func.get_ordered_ops()\n self.exec_net = self.ie.load_network(network=self.net, device_name=self.device)\n\n # Read expected input image info from network and prepare input blobs.\n # n: batch size, c: no. of channels, h: input height, w: input width\n for self.input_key in self.net.input_info:\n self.input_name = self.input_key\n self.n, self.c, self.h, self.w = self.net.input_info[self.input_key].input_data.shape\n # Initializing to float for optimizing in later functions\n self.h = float(self.h)\n self.w = float(self.w)\n\n # Prepare output blobs\n self.out_blob = next(iter(self.net.outputs))", "def load_neural_data(self, *args, **kwargs) -> NoReturn:\n raise NotImplementedError", "def create(cfg, shapes, dtypes, num_classes):\n input_channels = shapes['input'][0]\n specification = cfg['model.arch']\n num_outputs = 1 if num_classes == 2 else num_classes\n specification = specification.replace('C', str(num_outputs))\n input_name = cfg.get('model.input_name', 'input')\n output_name = cfg.get('model.output_name', 'output')\n return custom_cnn(input_channels, specification, input_name, output_name,\n default_nonlin=cfg.get('model.nonlin', 'relu'),\n batch_norm=cfg.get('model.batch_norm', False))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Renders the template for the given step, returning an HttpResponse object. Override this method if you want to add a custom context, return a different MIME type, etc. If you only need to override the template name, use get_template() instead.
def render_template( self, request, form, previous_fields, step, context=None ): if IsDebug: print "render: %s" % step context = context or {} context.update(self.extra_context) context_instance = RequestContext(request) template = self.get_template(step) fieldsets = form.fieldsets data = request.POST.copy() data['step'] = step wizardForm = WizardForm( data, form, fieldsets ) response = dict(context, step_field=STEP_FIELD_NAME, step0=step, step=step + 1, step_count=self.num_steps(), form=form, wizardform=wizardForm, previous_fields=previous_fields, media=self.media, ) if form.cooked: for i, f, v, s in form.cooked: response[i] = request.POST.get(self.get_fieldname_for_step(s, f), None) or '' response['value_%s' % i] = request.POST.get(self.get_fieldname_for_step(s, v), None) or PK_EMPTY response['pk'] = form.instance.pk or PK_EMPTY if form.errors: # or step==2: errors = form.errors #raise 1 if IsDebug: print "OK" return render_to_response(template, response, context_instance=context_instance)
[ "def render(\n request, template_name, context=None, content_type=None, status=None, using=None\n):\n content = loader.render_to_string(template_name, context, request, using=using)\n return HttpResponse(content, content_type, status)", "def render_step(self, **kwargs):\n if self.templated:\n from jinja2 import Environment\n env = Environment().from_string(json.dumps(self._raw_representation)).render(\n core.config.config.JINJA_GLOBALS, **kwargs)\n self._update_json(updated_json=json.loads(env))", "def render_to_response(self, context, **response_kwargs):\n return SimpleTemplateResponse(\n template=self.get_template(),\n context=context,\n **response_kwargs\n )", "def render_template(self, template_name, output_name, context):\n raise NotImplementedError()", "def render(self, template, qcontext=None, lazy=True, **kw):\n response = Response(template=template, qcontext=qcontext, **kw)\n if not lazy:\n return response.render()\n return response", "def render_template(self, *args, **kwargs):\n return self.renderer.render(*args, **kwargs)", "def on_template_response(self, context, **kwargs):\n request = kwargs.setdefault(\"request\", self.req())\n\n res = TemplateResponse(request, \"some/template.html\", context)\n\n return self.on_response(res, **kwargs)", "def render_issue_response(self, request, context):\n return render(request, self.response_template, context)", "def _render_response(request, *args, **kwargs):\n httpresponse_kwargs = {'mimetype': kwargs.pop('mimetype', None)}\n status = kwargs.pop('status', 200)\n if 'context_instance' not in kwargs:\n kwargs['context_instance'] = RequestContext(request)\n return HttpResponse(loader.render_to_string(*args, **kwargs),\n status=status, **httpresponse_kwargs)", "def render (self, request, dct):\n\t\treturn render_to_response ([self.template, self.fallback_template],\n\t\t\tdct, context_instance=RequestContext(request))", "def renderContent(self,state,context):\n if not isinstance(context,dict):\n raise Exception(\"context in {0} was not a dict\".format(self.getName()))\n\n content_type = self.getContentType()\n\n if content_type == self.CONTENT_TYPE_HTML:\n #Check to make sure that a template is defined\n if self.TEMPLATE is None:\n raise Exception(\"{0} does not define self.TEMPLATE\".format(self.getName()))\n\n #Render to TEMPLATE\n context[\"self\"] = self.getSelf()\n content = self.renderTemplate(self.TEMPLATE,context)\n content = self.renderScript(state,content)\n content = self.makeTemplateSafe(content)\n\n else:\n content = context\n\n return content", "def render(self, environment, typename, filetype, **kwargs):\n template_filename = \"{}.{}.{}.j2\".format(self.language(), typename, filetype)\n rendered = environment.get_template(template_filename).render(generator = self, **kwargs)\n return rendered", "def render_string(self, template_name, **kwargs):\n if 'tornado' == settings['TEMPLATE_ENGINE']:\n return super(BaseHandler, self).render_string(template_name, **kwargs)\n elif 'jinja2' == settings['TEMPLATE_ENGINE']:\n return jinja2_render(template_name, **kwargs)\n else:\n raise errors.SettingsError(\n '%s is not a supported TEMPLATE_ENGINE, should be `tornado` or `jinja2`'\n % settings['TEMPLATE_ENGINE'])", "def render_response(app, request, template, **context):\r\n template = app.templates.get_template(template)\r\n\r\n default_context = {\r\n \"config\": app.config,\r\n \"csrf_token\": functools.partial(helpers.csrf_token, request),\r\n \"gravatar_url\": helpers.gravatar_url,\r\n \"static_url\": functools.partial(helpers.static_url, app),\r\n \"url_for\": functools.partial(helpers.url_for, request),\r\n }\r\n\r\n return TemplateResponse(\r\n TemplateRenderer(template, context, default_context=default_context),\r\n mimetype=\"text/html\",\r\n )", "def render(self, template, **kw):\n t = jinja_env.get_template(template) \n self.response.out.write(t.render(kw))", "def render_template(name, context=None, type='html'):\n return template.render(get_template_path('%s.%s'% (name, type)), context)", "def dispatch_request(self, *args, **kwargs):\n path = request.path.lstrip(\"/\")\n matching_template = self._get_template(path)\n\n if not matching_template:\n abort(404, f\"Can't find page for: {path}\")\n\n if matching_template[-2:] == \"md\":\n with open(\n f\"{current_app.template_folder}/{matching_template}\"\n ) as f:\n file_content = f.read()\n parsed_file = load_frontmatter_from_markdown(file_content)\n wrapper_template = parsed_file.metadata.get(\"wrapper_template\")\n\n if not wrapper_template or not os.path.isfile(\n current_app.template_folder + \"/\" + wrapper_template\n ):\n abort(404, f\"Can't find page for: {path}\")\n\n context = parsed_file.metadata.get(\"context\", {})\n return self._render_markdown(\n parsed_file.content, wrapper_template, context\n )\n\n return render_template(matching_template, **self._get_context())", "def renderTemplate(self,template_path,context):\n html = render_to_string(template_path,context)\n return html", "def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract challenge tag list and get the combination of 2 and 3 tags.
def get_challenge_tag_combination_count() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]: def count_tag_combination(challenge_tag_it: Iterator) -> pd.DataFrame: tag_combinatio_count = (pd.DataFrame .from_records( pd.Series(challenge_tag_it) .apply(lambda l: [c if isinstance(c, str) else tuple(sorted(c)) for c in l]) ) .fillna('') .stack() .value_counts() .to_frame() .reset_index() .rename(columns={'index': 'tag', 0: 'count'})) return tag_combinatio_count.loc[tag_combinatio_count['tag'].astype(bool)].reset_index(drop=True) challenge_tags_cursor = DB.TopcoderMongo.run_challenge_aggregation([ *DB.TopcoderMongo.scoped_challenge_with_text_query, {'$project': {'tags': True, '_id': False}}, ]) it0, it1, it2, it3 = itertools.tee((doc['tags'] for doc in challenge_tags_cursor), 4) return ( count_tag_combination(it0), count_tag_combination(itertools.combinations(tags, 2) for tags in it1), count_tag_combination(itertools.combinations(tags, 3) for tags in it2), count_tag_combination(itertools.combinations(tags, 4) for tags in it3), )
[ "def getTags(src):\n tags = []\n if len(src) == 1:\n tags = ['S']\n elif len(src) == 2:\n tags = ['B', 'E']\n else:\n m_num = len(src) - 2\n tags.append('B')\n tags.extend(['M'] * m_num)\n tags.append('S')\n return tags", "def compute_tag_feature() -> list[dict]:\n tag_comb_softmax: list[pd.DataFrame] = get_tag_combination_softmax()\n challenge_tag = DB.TopcoderMongo.run_challenge_aggregation([\n *DB.TopcoderMongo.scoped_challenge_with_text_query,\n {'$project': {'id': True, 'tags': True, '_id': False}},\n ])\n\n def map_tag_lst_to_softmax(tags: list[str]) -> dict[str, dict]:\n \"\"\" Encode the tag list into one-hot list and sum of softmax.\n Short var name `tc` stands for `tag_combination`.\n \"\"\"\n feature_dct = {}\n for comb_r, tc_softmax in enumerate(tag_comb_softmax, 1):\n tc_lst = tags if comb_r == 1 else [tuple(sorted(tc)) for tc in itertools.combinations(tags, comb_r)]\n softmax_score = tc_softmax.loc[tc_softmax['tag'].isin(tc_lst), 'count_softmax'].sum()\n one_hot_array = tc_softmax['tag'].isin(tc_lst).astype(int).to_numpy()\n feature_dct.update({\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_softmax_score': softmax_score,\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_one_hot_array': one_hot_array,\n })\n return feature_dct\n\n return [{**cha, **map_tag_lst_to_softmax(cha['tags'])} for cha in challenge_tag]", "def extract_relevant(self, ner_tags: List[str]) -> List[str]:\n filtered_tags = []\n for gold_tag in ner_tags:\n matched = None\n for tag in self.tags2ix:\n if re.match(f\".*-{tag}\", gold_tag) is not None:\n matched = gold_tag\n if matched is None:\n filtered_tags.append(\"O\")\n else:\n filtered_tags.append(matched)\n return filtered_tags", "def tags(self) -> List:", "def get_tutor_tags(tags_tutor, tag_list):\n tag_count = []\n for tutor, values in tags_tutor.items():\n this_tutor = []\n this_tutor.append(tutor)\n i = 0\n while i < 5:\n this_tutor.append(tags_tutor[tutor][tag_list[i]])\n i += 1\n tag_count.append(this_tutor)\n return tag_count", "def make_id3v2_tags(metadata):\n _log.call(metadata)\n\n tags = _make_tagging_map(\"ID3v2\", metadata)\n\n # only use TCMP=1\n if tags[\"TCMP\"] == ['0']:\n del tags[\"TCMP\"]\n\n # lame automatically includes TSSE to identify itself\n tags[\"TENC\"] = [\"http://ninthtest.info/flac-mp3-audio-manager/\"]\n\n _log.return_(tags)\n return tags", "def get_tags(datasets):\n tags = set()\n for dataset in datasets:\n for sentence in dataset:\n for word, tag in sentence:\n tags.add(tag)\n return tags", "def get_challenges_by_tags(self, challenge_tags, limit=10, page=0):\n query_params = {\n \"tags\": str(challenge_tags),\n \"limit \": str(limit),\n \"page\": str(page)\n }\n response = self.get(\n endpoint=f\"/challenges/tags\",\n params=query_params\n )\n return response", "def biTag(train, test):\n return (evaluate_taggers(train, test)[0][3], evaluate_taggers(train, test)[1][3])", "def prob2(code):\n return [tag.name for tag in BeautifulSoup(code, 'html.parser').find_all(True)]", "def get_words_by_tag(self, tag_list, tags):\n return [(word, tag) for (word, tag) in tag_list if tag in tags]", "def read_tags(i, dataset):\n filepath = 'tags_train/' if dataset == TRAIN else 'tags_test/'\n filepath += str(i) + \".txt\"\n with open(filepath) as f:\n lines = f.read().splitlines()\n lines = list(filter(None, lines))\n imgtags = []\n for tag in lines:\n imgtags.append(tuple(tag.split(':')))\n return imgtags", "def extract_io(pre_tag_elements, url):\n sample_inputs = []\n sample_outputs = []\n for sample_io in pre_tag_elements:\n # finding heading / previous sibling of pre\n sibling = sample_io.previous_sibling\n while(not str(sibling).strip()):\n sibling = sibling.previous_sibling\n\n # converting sample_io to text\n iotext = str(sample_io.text)\n\n # standard codechef problems with input and output in same pre tag\n # OR sometimes input just above pre tag and output in pretag\n if((\"input\" in iotext.lower() or \"input\" in str(sibling).lower()) and\n \"output\" in iotext.lower()):\n in_index, out_index = iotext.lower().find(\n \"input\"), iotext.lower().find(\"output\")\n ki = 1 if (in_index == -1) else 5\n sample_input = sanitize(iotext[in_index+ki: out_index])\n sample_output = sanitize(iotext[out_index + 6:])\n\n if(len(sample_inputs) != len(sample_outputs)):\n sample_inputs = []\n sample_outputs = []\n sample_inputs.append(sample_input)\n sample_outputs.append(sample_output)\n\n # problem with input only like challenge problems\n # or input and output in seperate pre tags\n elif(\"input\" in str(sample_io.text).lower() or\n \"input\" in str(sibling).lower()):\n in_index = iotext.lower().find(\"input\")\n ki = 1 if (in_index == -1) else 5\n sample_input = sanitize(iotext[in_index+ki:])\n sample_inputs.append(sample_input)\n\n # problem with output only like printing 100! etc\n # or input and output in seperate pre tags\n elif(\"output\" in str(sample_io.text).lower() or\n \"output\" in str(sibling).lower()):\n out_index = iotext.lower().find(\"output\")\n ko = 1 if (out_index == -1) else 6\n sample_output = sanitize(iotext[out_index+ko:])\n sample_outputs.append(sample_output)\n\n return sample_inputs, sample_outputs", "def parse_tag(tag: PydicomTag) -> tuple:\n return int_to_tag_hex(tag.group), int_to_tag_hex(tag.element)", "def get_flags(self):\n flags = [ ]\n\n # Iterate through known flags based on tag naming convention. Tag flags\n # are listed here in order of priority.\n tag_flags = ('firstplace', 'secondplace', 'thirdplace', 'finalist')\n for p in tag_flags:\n for tag in self.taggit_tags.all():\n # TODO: Is this 'system:challenge' too hard-codey?\n if tag.name.startswith('system:challenge:%s:' % p):\n flags.append(p)\n\n # Featured is an odd-man-out before we had tags\n if self.featured:\n flags.append('featured')\n\n return flags", "def extract_hash_tags(text: str) -> List[str]:\n tags = []\n\n if text != '':\n hash_tags = re.findall(TagsProcessor.__HASHTAG_REGEX, text)\n tags = TagsProcessor.compress_tags(hash_tags)\n\n # del duplicates\n unique_tags = []\n for i in tags:\n if i not in unique_tags:\n unique_tags.append(i)\n\n return unique_tags", "def tags_with(self, word):\n return sorted(list(set([x[2] for x in self.tags if word in x[2]])))", "def get_possible_tags(word):\n if word in perWordTagCounts:\n tags_count = perWordTagCounts[word].most_common()\n tags = list(map(lambda x: x[0], tags_count))\n else:\n tags = list(allTagCounts.keys())\n return tags", "def find_tags(invite_title):\n # prepare for nlp with preprocessing pipeline\n text = nlp_pipeline(invite_title)\n # embed on vector\n count_vectorizer = CountVectorizer(stop_words='english')\n count_data = count_vectorizer.fit_transform([text])\n # chose 4 tags for now to represent each invite\n number_topics = 1\n number_words = 4\n # create and fit LDA model\n lda = LDA(n_components=number_topics, n_jobs=-1)\n lda.fit(count_data)\n \n # assign words as feature names for topics/tags \n words = count_vectorizer.get_feature_names()\n\n # get topics/tags from model using topic word distribution from lda\n topics = [[words[i] for i in topic.argsort()[:-number_words - 1:-1]] for (topic_idx, topic) in enumerate(lda.components_)]\n topics = np.array(topics).ravel()\n\n return topics" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate Tag combination's softmax score from frequency count. Separate this part of logic from `get_challenge_tag_combination_count` to preserve the total combination count df.
def get_tag_combination_softmax() -> list[pd.DataFrame]: def compute_softmax(tag_combination: pd.DataFrame): """ Calculate softmax for tag combination DataFrame.""" top_n = tag_combination.head(S.CHALLENGE_TAG_COMB_TOP).copy() top_n['count_softmax'] = TML.softmax(np.log(top_n['count'])) return top_n return [compute_softmax(tag_combination) for tag_combination in get_challenge_tag_combination_count()]
[ "def compute_softmax(tag_combination: pd.DataFrame):\n top_n = tag_combination.head(S.CHALLENGE_TAG_COMB_TOP).copy()\n top_n['count_softmax'] = TML.softmax(np.log(top_n['count']))\n return top_n", "def compute_tag_feature() -> list[dict]:\n tag_comb_softmax: list[pd.DataFrame] = get_tag_combination_softmax()\n challenge_tag = DB.TopcoderMongo.run_challenge_aggregation([\n *DB.TopcoderMongo.scoped_challenge_with_text_query,\n {'$project': {'id': True, 'tags': True, '_id': False}},\n ])\n\n def map_tag_lst_to_softmax(tags: list[str]) -> dict[str, dict]:\n \"\"\" Encode the tag list into one-hot list and sum of softmax.\n Short var name `tc` stands for `tag_combination`.\n \"\"\"\n feature_dct = {}\n for comb_r, tc_softmax in enumerate(tag_comb_softmax, 1):\n tc_lst = tags if comb_r == 1 else [tuple(sorted(tc)) for tc in itertools.combinations(tags, comb_r)]\n softmax_score = tc_softmax.loc[tc_softmax['tag'].isin(tc_lst), 'count_softmax'].sum()\n one_hot_array = tc_softmax['tag'].isin(tc_lst).astype(int).to_numpy()\n feature_dct.update({\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_softmax_score': softmax_score,\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_one_hot_array': one_hot_array,\n })\n return feature_dct\n\n return [{**cha, **map_tag_lst_to_softmax(cha['tags'])} for cha in challenge_tag]", "def map_tag_lst_to_softmax(tags: list[str]) -> dict[str, dict]:\n feature_dct = {}\n for comb_r, tc_softmax in enumerate(tag_comb_softmax, 1):\n tc_lst = tags if comb_r == 1 else [tuple(sorted(tc)) for tc in itertools.combinations(tags, comb_r)]\n softmax_score = tc_softmax.loc[tc_softmax['tag'].isin(tc_lst), 'count_softmax'].sum()\n one_hot_array = tc_softmax['tag'].isin(tc_lst).astype(int).to_numpy()\n feature_dct.update({\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_softmax_score': softmax_score,\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_one_hot_array': one_hot_array,\n })\n return feature_dct", "def get_challenge_tag_combination_count() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n def count_tag_combination(challenge_tag_it: Iterator) -> pd.DataFrame:\n tag_combinatio_count = (pd.DataFrame\n .from_records(\n pd.Series(challenge_tag_it)\n .apply(lambda l: [c if isinstance(c, str) else tuple(sorted(c)) for c in l])\n )\n .fillna('')\n .stack()\n .value_counts()\n .to_frame()\n .reset_index()\n .rename(columns={'index': 'tag', 0: 'count'}))\n return tag_combinatio_count.loc[tag_combinatio_count['tag'].astype(bool)].reset_index(drop=True)\n\n challenge_tags_cursor = DB.TopcoderMongo.run_challenge_aggregation([\n *DB.TopcoderMongo.scoped_challenge_with_text_query,\n {'$project': {'tags': True, '_id': False}},\n ])\n\n it0, it1, it2, it3 = itertools.tee((doc['tags'] for doc in challenge_tags_cursor), 4)\n return (\n count_tag_combination(it0),\n count_tag_combination(itertools.combinations(tags, 2) for tags in it1),\n count_tag_combination(itertools.combinations(tags, 3) for tags in it2),\n count_tag_combination(itertools.combinations(tags, 4) for tags in it3),\n )", "def _compute_tag_frequency(self):\n for tag_id in self:\n tag_id.tag_frequency = self.env['project.task'].search_count(\n [('user_id', '=', self._context.get('usr_id')), ('tag_ids', 'in', tag_id.id)])", "def predict_viterbi(x, f_map, tags_s, word_t_map, lib_model):\n y = []\n v = [{(extract.START_SYMBOL, extract.START_SYMBOL): 0.0}]\n bp = []\n for ind, word in enumerate(x):\n # Check if word was seen in the corpus.\n if word not in word_t_map:\n is_rare = True\n available_tags = tags_s\n else:\n is_rare = False\n # Pruning of tags to lower amount of possible tags for this word.\n available_tags = word_t_map[word]\n\n max_score = {}\n max_tags = {}\n # Calculate for each word best scores/probabilities and best tags for each word.\n for pp_t, p_t in v[ind]:\n for curr_tag in available_tags:\n word_features = extract.generate_word_features(is_rare, p_t, pp_t, word, ind, x)\n features_vec = features_to_vec(word_features, f_map)\n scores = lib_model.predict(features_vec)\n score = np.amax(scores)\n if (p_t, curr_tag) not in max_score or score > max_score[(p_t, curr_tag)]:\n max_score[(p_t, curr_tag)] = score\n max_tags[(p_t, curr_tag)] = pp_t\n\n v.append(max_score)\n bp.append(max_tags)\n # Calculate last 2 best tags.\n max_score = float(\"-inf\")\n prev_last_tag, last_tag = None, None\n for prev_t, curr_t in v[len(x)]:\n score = v[len(x)][(prev_t, curr_t)]\n if score > max_score:\n max_score = score\n last_tag = curr_t\n prev_last_tag = prev_t\n\n y.append(last_tag)\n if len(x) > 1:\n y.append(prev_last_tag)\n\n prev_t = last_tag\n prev_prev_t = prev_last_tag\n # By backtracking extract all the path of best tags for each word starting by last 2 tags we calculated above.\n for i in range(len(v) - 2, 1, -1):\n curr_t = bp[i][(prev_prev_t, prev_t)]\n y.append(curr_t)\n prev_t = prev_prev_t\n prev_prev_t = curr_t\n y = reversed(y)\n return y", "def tf_augmented_freq(self):\n ftd_max = self.TF_count.max(axis=1)\n augmented_freq = 0.5 + 0.5 * self.TF_count / ftd_max[:, None]\n return augmented_freq", "def get_entropy(self, examples_and_tags):\n tags = [tag for example, tag in examples_and_tags]\n if not tags:\n return 0\n tags_and_total_num = Counter()\n entropy = 0.0\n p_per_class = []\n for tag in tags:\n tags_and_total_num[tag] += 1\n for tag_total_num in tags_and_total_num:\n p_per_class.append(float(tags_and_total_num[tag_total_num]) / len(tags))\n for p in p_per_class:\n if p == 0:\n return 0\n entropy += -p * math.log(p, 2)\n return entropy", "def learn_params(tagged_sentences):\n global global_word_to_index\n num_of_sentences = len(tagged_sentences)\n all_possible_tags = []\n\n for sentence in tagged_sentences:\n prev_tag = START\n for word_tag in sentence:\n word, tag = word_tag\n allTagCounts[tag] += 1\n if perWordTagCounts.get(word) == None:\n perWordTagCounts[word] = Counter()\n if perWordTagCounts[word].get(tag) == None:\n perWordTagCounts[word][tag] = 0\n perWordTagCounts[word][tag] = perWordTagCounts.get((word), {}).get(tag, 0) + 1\n transitionCounts[(prev_tag, tag)] = transitionCounts.get((prev_tag, tag), 0) + 1\n emissionCounts[(tag, word)] = emissionCounts.get((tag, word), 0) + 1\n prev_tag = tag\n transitionCounts[(prev_tag, END)] = transitionCounts.get((prev_tag, END), 0) + 1\n # Calc A & B (Probabilities)\n total_number_of_tags = len(allTagCounts)\n for tag_t in [START] + list(allTagCounts.keys()):\n for tag_t1 in [END] + list(allTagCounts.keys()):\n A[(tag_t, tag_t1)] = transitionCounts.get((tag_t, tag_t1), 1) / (allTagCounts[tag_t] + total_number_of_tags)\n for word in perWordTagCounts.keys():\n for tag in allTagCounts.keys():\n B[(word, tag)] = perWordTagCounts[word].get(tag, 1) / (allTagCounts[tag] + total_number_of_tags)\n\n global_word_to_index = perWordTagCounts\n return [allTagCounts, perWordTagCounts, transitionCounts, emissionCounts, A, B]", "def compute_counts(training_data, order):\n # initialize variables\n num_of_tokens = 0\n count_tag_word = defaultdict(lambda: defaultdict(float))\n count_tag = defaultdict(float)\n count_tag12 = defaultdict(lambda: defaultdict(float))\n count_tag123 = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n end = len(training_data)\n\n # fill base cases and inductive cases based on order\n for i in range(end):\n pair = training_data[i]\n if pair:\n num_of_tokens += 1\n word2, tag2 = decompose_a_pair(training_data, i)\n count_tag_word[tag2][word2] += 1\n count_tag[tag2] += 1\n\n if i >= 1:\n word1, tag1 = decompose_a_pair(training_data, i - 1)\n count_tag12[tag1][tag2] += 1\n if order == 3 and i >= order - 1:\n word0, tag0 = decompose_a_pair(training_data, i - 2)\n count_tag123[tag0][tag1][tag2] += 1\n\n return num_of_tokens, count_tag_word, count_tag, count_tag12, count_tag123", "def baseline_tag_sentence(sentence, perWordTagCounts, allTagCounts):\n keys , values = zip(*allTagCounts.items())\n tup_sentence=[]\n for w in sentence:\n if not w in perWordTagCounts.keys():\n t = choices(keys,values)[0] \n else:\n t = max(perWordTagCounts[w].items() , key=operator.itemgetter(1))[0]\n \n tup_sentence.append((w,t))\n \n return tup_sentence", "def max_frequency(self):\n max = 0\n for term, frequency in self.vocabulary.items() :\n if frequency > max :\n max = frequency\n return max", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n predicted_orig_shape = predicted.shape\n outputVectors_orig_shape = outputVectors.shape\n\n # STEP 0: first let's make the notations consitent with the course and written assignments\n # let D=dimension of hidden layer |V|=number of tokens in outputvectors\n V_c = predicted.reshape(-1,1) # the input vector of predicted word --> D x 1\n U = outputVectors.reshape(-1, V_c.shape[0]) # ALL the output vectors --> |V| x D\n U_o = U[target, :] # the output vector of predicted word --> 1 x D\n #-----\n\n # STEP 1: since the softmax output value of all outputvectors is needed to compute all returned values\n # we compute it once and save its value to use it multiple times\n\n # Like in question 1, we remove the max_score before doing exp to avoid too large values and hence enhance numericall stability.\n # Again this is allowed because softmax is invariant to shift. softmax(x) = softmax(x + c)\n\n all_outputvectors_scores = U.dot(V_c) #--> |V| x 1\n all_outputvectors_softmax = softmax(all_outputvectors_scores.T).T\n del all_outputvectors_scores\n #-----\n\n # STEP 2: cost = - log (softmax(target))\n cost = -1. * np.log(all_outputvectors_softmax[target, :]) #--> 1 x 1 , scalar\n cost = np.asscalar(cost)\n #-----\n\n # STEP 3: gradPed = grad_Cost__wrt__V_c = -1 * U_o + sum_w( U_w * softmax(U_w) )\n gradPred = -1.*U_o + all_outputvectors_softmax.T.dot(U) #--> 1 x D\n gradPred = gradPred.reshape(predicted_orig_shape)\n #-----\n\n # STEP 4: grad : grad_Cost__wrt__all_outputvectors\n # for all output vectors (expect the target vector) the gradient is:\n grad = all_outputvectors_softmax.dot(V_c.T) #--> |V| x D : each row is the gradient wrt to an output vector\n\n #now we replace the row for the particular case of the targeted output\n grad[target, :] = (all_outputvectors_softmax[target, :] - 1.).dot(V_c.T)\n grad = grad.reshape(outputVectors_orig_shape)\n #-----\n\n assert predicted_orig_shape == gradPred.shape\n assert outputVectors_orig_shape == outputVectors.shape\n ### END YOUR CODE\n return cost, gradPred, grad", "def score(motifs):\n columns = [''.join(seq) for seq in zip(*motifs)]\n max_count = sum([max([c.count(nucleotide) for nucleotide in 'ACGT']) for c in columns])\n\n return len(motifs[0])*len(motifs) - max_count", "def reduceDimensionalityWithTF_IDF(cleansedDF, calculateMaxFeatures=True):\n print(\"Getting max unique body words count for maximum features...\")\n body_text_values = cleansedDF[\"body_text\"].values\n maxFeatures = 2 ** 13\n if calculateMaxFeatures:\n uniqueBodyWordsCount = pd.Series()\n uniqueBodyWordsCount[\"unique_body_words_count\"] = cleansedDF['body_text'].apply(lambda x: len(set(str(x).split())))\n maxFeatures = uniqueBodyWordsCount[\"unique_body_words_count\"].max()\n print(\"Reducing dimensionality using TF-IDF...\")\n vectorizer = TfidfVectorizer(max_features=maxFeatures)\n return vectorizer.fit_transform(body_text_values)", "def weakCount(dfZ, dfY, categories, reg=None):\n\n # Number of categories\n n_cat = len(categories)\n\n # These vectors are useful to convert binary vectors into integers.\n # To convert arbitrary binary vectors to a decimal\n p2 = np.array([2**n for n in reversed(range(n_cat))])\n # To convert single-1-vectors to position integers.\n ind = range(n_cat)\n\n # Convert weak label dataframe into matrix\n if type(dfZ) == pd.DataFrame:\n Z = dfZ[categories].values\n else:\n Z = dfZ\n\n # Initialize (and maybe regularize) the counting matrix\n if reg is None:\n S = csr_matrix((2**n_cat, n_cat))\n elif reg == 'Complete':\n S = csr_matrix(np.ones((2**n_cat, n_cat)))\n elif reg == 'Partial':\n S = csr_matrix((2**n_cat, n_cat))\n weak_list = list(set(Z.dot(p2))) # Flag vector of existing weak labels\n S[weak_list, :] = 1\n\n # Convert weak label dataframe into matrix\n if type(dfY) == pd.DataFrame:\n Y = dfY[categories].values\n else:\n Y = dfY\n\n # Start the weak label count\n for idx in dfY.index:\n\n # True label\n y = dfY.loc[idx].values\n c = y.dot(ind)\n\n # Weak label\n if idx in dfZ.index:\n z = dfZ.loc[idx, categories].values\n w = int(z.dot(p2))\n\n S[w, c] += 1\n\n return S", "def compute_choice_frequencies_to_model_output_frequencies(df):\n\n df_frequencies = compute_choice_frequencies(df)\n\n for index in [\"a\", \"b\", \"edu\", \"home\"]:\n if index not in df_frequencies.columns:\n df_frequencies[index] = 0\n\n df_frequencies.sort_index(axis=1, inplace=True)\n\n output_frequencies = {\"data\": np.array(fill_nan(df_frequencies))}\n\n return output_frequencies", "def pseudo_frequency(id2token, w, doc, model, alpha, sim_threshold):\n return sum([cossim(id2token[w], id2token[wd], alpha, model, sim_threshold) for wd in doc])", "def gain_calculate(merged_freq_dict): # {austen : [1232, 332], milton : [232, 622]}\n\tTOTAL = sum([i for a in merged_freq_dict.values() for i in a])\n\teach_small_big = [i for i in merged_freq_dict.values()];\n\tTOTAL_class = [sum(i) for i in each_small_big] \t\t#[982, 512, 1102(small+big in one class),...]\n\tTOTAL_entropy_in = [each/sum(TOTAL_class) for each in TOTAL_class]\n\tTOTAL_entropy = entropy(TOTAL_entropy_in)\n\tsmall_TOTAL \t = sum([ i[0] for i in each_small_big])/TOTAL\n\tbig_TOTAL \t\t = sum([ i[1] for i in each_small_big])/TOTAL\n\n\tclass_by_small, class_by_big = list(), list()\n\tfor c in merged_freq_dict:\n\t\tclass_by_small.append(merged_freq_dict[c][0])\n\t\tclass_by_big.append(merged_freq_dict[c][1])\n\t\n\tprob_class_by_small = [e/sum(class_by_small) for e in class_by_small]\n\tprob_class_by_big = [e/sum(class_by_big) for e in class_by_big]\n\n\tIG = TOTAL_entropy - (small_TOTAL)*entropy(prob_class_by_small) -(big_TOTAL)*entropy(prob_class_by_big)\n\t#print('head entropy is',entropy(total_small/total_big))\n\t#print('IG is',IG)\n\tif math.isnan(IG):\n\t\t#print('this is nan')\n\t\treturn(-5000) #jsut random minus value.\n\telse :\treturn(round(IG,5))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate softmax for tag combination DataFrame.
def compute_softmax(tag_combination: pd.DataFrame): top_n = tag_combination.head(S.CHALLENGE_TAG_COMB_TOP).copy() top_n['count_softmax'] = TML.softmax(np.log(top_n['count'])) return top_n
[ "def get_tag_combination_softmax() -> list[pd.DataFrame]:\n def compute_softmax(tag_combination: pd.DataFrame):\n \"\"\" Calculate softmax for tag combination DataFrame.\"\"\"\n top_n = tag_combination.head(S.CHALLENGE_TAG_COMB_TOP).copy()\n top_n['count_softmax'] = TML.softmax(np.log(top_n['count']))\n return top_n\n\n return [compute_softmax(tag_combination) for tag_combination in get_challenge_tag_combination_count()]", "def softmax(x):\n x = x.T - np.max(x.T, axis=0)\n x = np.exp(x) / np.sum(np.exp(x),axis=0)\n\n return x.T", "def softmax(data):\n sum = 0.0\n for i in range(len(data)):\n sum += np.exp(data[i])\n for i in range(len(data)):\n data[i] = np.exp(data[i]) / sum", "def stable_softmax(x):\r\n z = x - np.max(x, axis=-1, keepdims=True)\r\n numerator = np.exp(z)\r\n denominator = np.sum(numerator, axis=-1, keepdims=True)\r\n softmax = numerator / denominator\r\n return softmax", "def softmax(a_arr):\n exp = np.exp(a_arr)\n return exp / np.sum(exp)", "def softmax(z):\n # TODO: Compute and return softmax(z)\n return np.exp(z) / np.sum(np.exp(z), axis=0)", "def softmax(z: np.ndarray) -> np.ndarray:\n return np.exp(z) / np.sum(np.exp(z), axis=0)", "def softmax_minus_max(x):\n\n exp_scores = np.exp(x - np.max(x, axis = 1, keepdims = True))\n probs = exp_scores/np.sum(exp_scores, axis = 1, keepdims = True)\n return probs", "def softmax(x):\n if len(x.shape) > 1:\n # Matrix\n # substracting max leaves function unchanged due to softmax's invariance to sums by a constant \n # keepdims= True, because broadcasting requires trailing shape entries to match\n x -= np.max(x, axis=1, keepdims=True)\n x = np.exp(x)\n sum_exp_xj = np.sum(x, axis=1, keepdims=True)\n x = np.divide(x, sum_exp_xj)\n else:\n # Vector\n x -= np.max(x)\n x = np.exp(x)\n sum_exp_xj = np.sum(x)\n x = np.divide(x, sum_exp_xj)\n return x", "def softmax(self, output):\n return np.exp(output) / np.sum(np.exp(output), axis=0)", "def compute_tag_feature() -> list[dict]:\n tag_comb_softmax: list[pd.DataFrame] = get_tag_combination_softmax()\n challenge_tag = DB.TopcoderMongo.run_challenge_aggregation([\n *DB.TopcoderMongo.scoped_challenge_with_text_query,\n {'$project': {'id': True, 'tags': True, '_id': False}},\n ])\n\n def map_tag_lst_to_softmax(tags: list[str]) -> dict[str, dict]:\n \"\"\" Encode the tag list into one-hot list and sum of softmax.\n Short var name `tc` stands for `tag_combination`.\n \"\"\"\n feature_dct = {}\n for comb_r, tc_softmax in enumerate(tag_comb_softmax, 1):\n tc_lst = tags if comb_r == 1 else [tuple(sorted(tc)) for tc in itertools.combinations(tags, comb_r)]\n softmax_score = tc_softmax.loc[tc_softmax['tag'].isin(tc_lst), 'count_softmax'].sum()\n one_hot_array = tc_softmax['tag'].isin(tc_lst).astype(int).to_numpy()\n feature_dct.update({\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_softmax_score': softmax_score,\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_one_hot_array': one_hot_array,\n })\n return feature_dct\n\n return [{**cha, **map_tag_lst_to_softmax(cha['tags'])} for cha in challenge_tag]", "def map_tag_lst_to_softmax(tags: list[str]) -> dict[str, dict]:\n feature_dct = {}\n for comb_r, tc_softmax in enumerate(tag_comb_softmax, 1):\n tc_lst = tags if comb_r == 1 else [tuple(sorted(tc)) for tc in itertools.combinations(tags, comb_r)]\n softmax_score = tc_softmax.loc[tc_softmax['tag'].isin(tc_lst), 'count_softmax'].sum()\n one_hot_array = tc_softmax['tag'].isin(tc_lst).astype(int).to_numpy()\n feature_dct.update({\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_softmax_score': softmax_score,\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_one_hot_array': one_hot_array,\n })\n return feature_dct", "def LogSoftmax(axis=-1):\n return Fn('LogSoftmax', lambda x: log_softmax(x, axis=axis))", "def __softmax(self, inputs: np.ndarray) -> np.ndarray:\n res = np.exp(inputs)\n sums = res.sum()\n return res / sums", "def softmax(x: np.array, beta=1.0):\n v = np.exp(beta*x)\n return v / np.sum(v)", "def softmax(self, w):\n e = np.exp(np.array(w))\n dist = e / np.sum(e)\n return dist", "def softmax(logits):\n # print(\"logit\", logits.shape)\n\n clas = np.exp(np.minimum(logits, 22.))\n clas = clas / np.maximum(np.sum(clas, axis=-1, keepdims=True), 1e-10)\n return clas", "def softmax(Z):\n Z_exp = np.exp(Z)\n return Z_exp/np.sum(Z_exp, axis=0)", "def stable_softmax(Z):\n\n shift_Z = Z - np.max(Z, axis=0)\n Z_exp = np.exp(shift_Z)\n return Z_exp / np.sum(Z_exp, axis=0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use the tag combination softmax table to caluate the softmax score of a challenge's tags. And encode the binary array.
def compute_tag_feature() -> list[dict]: tag_comb_softmax: list[pd.DataFrame] = get_tag_combination_softmax() challenge_tag = DB.TopcoderMongo.run_challenge_aggregation([ *DB.TopcoderMongo.scoped_challenge_with_text_query, {'$project': {'id': True, 'tags': True, '_id': False}}, ]) def map_tag_lst_to_softmax(tags: list[str]) -> dict[str, dict]: """ Encode the tag list into one-hot list and sum of softmax. Short var name `tc` stands for `tag_combination`. """ feature_dct = {} for comb_r, tc_softmax in enumerate(tag_comb_softmax, 1): tc_lst = tags if comb_r == 1 else [tuple(sorted(tc)) for tc in itertools.combinations(tags, comb_r)] softmax_score = tc_softmax.loc[tc_softmax['tag'].isin(tc_lst), 'count_softmax'].sum() one_hot_array = tc_softmax['tag'].isin(tc_lst).astype(int).to_numpy() feature_dct.update({ f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_softmax_score': softmax_score, f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_one_hot_array': one_hot_array, }) return feature_dct return [{**cha, **map_tag_lst_to_softmax(cha['tags'])} for cha in challenge_tag]
[ "def get_tag_combination_softmax() -> list[pd.DataFrame]:\n def compute_softmax(tag_combination: pd.DataFrame):\n \"\"\" Calculate softmax for tag combination DataFrame.\"\"\"\n top_n = tag_combination.head(S.CHALLENGE_TAG_COMB_TOP).copy()\n top_n['count_softmax'] = TML.softmax(np.log(top_n['count']))\n return top_n\n\n return [compute_softmax(tag_combination) for tag_combination in get_challenge_tag_combination_count()]", "def map_tag_lst_to_softmax(tags: list[str]) -> dict[str, dict]:\n feature_dct = {}\n for comb_r, tc_softmax in enumerate(tag_comb_softmax, 1):\n tc_lst = tags if comb_r == 1 else [tuple(sorted(tc)) for tc in itertools.combinations(tags, comb_r)]\n softmax_score = tc_softmax.loc[tc_softmax['tag'].isin(tc_lst), 'count_softmax'].sum()\n one_hot_array = tc_softmax['tag'].isin(tc_lst).astype(int).to_numpy()\n feature_dct.update({\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_softmax_score': softmax_score,\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_one_hot_array': one_hot_array,\n })\n return feature_dct", "def compute_softmax(tag_combination: pd.DataFrame):\n top_n = tag_combination.head(S.CHALLENGE_TAG_COMB_TOP).copy()\n top_n['count_softmax'] = TML.softmax(np.log(top_n['count']))\n return top_n", "def Hotencoding(word_tokens, sentences):\r\n\r\n # first we make arrays from the values\r\n values = array(word_tokens)\r\n label_encoder = LabelEncoder()\r\n\r\n # here we make labels for each word\r\n # from 0-402 labels in integers\r\n integer_encoded = label_encoder.fit_transform(values)\r\n\r\n # and here we make the binary encoding by using the one hot encoder which is function that has the fit transforms\r\n # with using the integers we had build from label encoder it turns each label to it's binary form for example\r\n # the word the label 4 will have a binary encode of [0,0,0,0,1,...,0]\r\n onehot_encoder = OneHotEncoder(sparse=False, dtype=int)\r\n integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\r\n onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\r\n # and now we make a dictionary of the the encoded values with each of the words\r\n Dict_of_encoded_words = {}\r\n counter = 0\r\n for i in values:\r\n Dict_of_encoded_words[i] = list(onehot_encoded[counter])\r\n counter += 1\r\n\r\n # and now that we have the dict of words with there encoded values we\r\n # can now make a matrix of sentences and there binary codes\r\n\r\n Encoded_sentences = sentences.copy()\r\n\r\n for i in range(len(sentences)):\r\n for j in range(len(sentences[i])):\r\n Encoded_sentences[i][j] = Dict_of_encoded_words[sentences[i][j]]\r\n # after that we putted them in a list containg a matrix of sentences with their words being encoded\r\n # we output the results in a text file\r\n\r\n with open(args.txtfile.replace('txt','_1hot.txt'), 'w') as f:\r\n for item in Encoded_sentences:\r\n f.write(\"%s\\n\" % item)", "def sentiwordnet_encode(self):\n\n # we only consider the highest score and donesn't care about its pos or neg\n\n words_encode = np.zeros([len(self.words)], dtype = \"float32\")\n for index, word in enumerate(self.words):\n senti_score = list(swn.senti_synsets(word, 'a'))\n if senti_score:\n words_encode[index] = max(senti_score[0].pos_score(), senti_score[0].neg_score())\n \n return words_encode", "def encode_array(sequences):\n # Binarize the features to one-of-K encoding.\n # compute_alphabet returns {A,T,C,G}\n '''\n seq_lengths = Counter({1734: 1510, 1701: 1326, 1752: 247, 1744: 216, 1721: 176, 1777: 161,..})\n where e.g. [1734: 1510] is about particular viral sequence and represents \n [length of this sequence:no. of times this sequence occur]\n '''\n alphabet = compute_alphabet(sequences)\n seq_lengths = compute_seq_lengths(sequences) \n seq_array = seq2chararray(sequences)\n \n '''\n seq_array: \n [['G' 'G' 'A' ... '*' '*' '*']\n ['A' 'A' 'A' ... '*' '*' '*']\n ...\n ['A' 'A' 'A' ... '*' '*' '*']] \n \n alphabet (it's amino acid):\n {'A', 'G', 'C', 'N', 'T', 'S', 'W', 'K', 'Y', 'R', 'M'}\n \n list(alphabet) \n ['Y', 'N', 'A', 'K', 'T', 'G', 'C', 'R', 'M', 'S', 'W']\n\n seq_array.shape[0]=5591 (no. of different viral segments.)\n seq_array.shape[1]=1846 (length of viral seq)\n \n max(seq_lengths.keys()) * len(alphabet)) = viral sequence length * (total no. of different amino acid) --> for one-hot encoding. \n '''\n \n lb = LabelBinarizer()\n lb.fit(list(alphabet)) \n \n encoded_array = np.zeros(shape=(seq_array.shape[0],\n max(seq_lengths.keys()) * len(alphabet)))\n ''' \n I guess the following do one-hot enclding for all protein sequences. \n encoded_array[: --> means all rows\n i*len(alphabet):(i+1)*len(alphabet)]--> allocate the location to fit in encoded protein sequence\n the encode protein sequence is lb.transform(seq_array[:, i])\n seq_array[:, i] is each amino acid \n '''\n for i in range(seq_array.shape[1]):\n encoded_array[:, i*len(alphabet):(i+1)*len(alphabet)] = \\\n lb.transform(seq_array[:, i])\n\n return encoded_array", "def predict_viterbi(x, f_map, tags_s, word_t_map, lib_model):\n y = []\n v = [{(extract.START_SYMBOL, extract.START_SYMBOL): 0.0}]\n bp = []\n for ind, word in enumerate(x):\n # Check if word was seen in the corpus.\n if word not in word_t_map:\n is_rare = True\n available_tags = tags_s\n else:\n is_rare = False\n # Pruning of tags to lower amount of possible tags for this word.\n available_tags = word_t_map[word]\n\n max_score = {}\n max_tags = {}\n # Calculate for each word best scores/probabilities and best tags for each word.\n for pp_t, p_t in v[ind]:\n for curr_tag in available_tags:\n word_features = extract.generate_word_features(is_rare, p_t, pp_t, word, ind, x)\n features_vec = features_to_vec(word_features, f_map)\n scores = lib_model.predict(features_vec)\n score = np.amax(scores)\n if (p_t, curr_tag) not in max_score or score > max_score[(p_t, curr_tag)]:\n max_score[(p_t, curr_tag)] = score\n max_tags[(p_t, curr_tag)] = pp_t\n\n v.append(max_score)\n bp.append(max_tags)\n # Calculate last 2 best tags.\n max_score = float(\"-inf\")\n prev_last_tag, last_tag = None, None\n for prev_t, curr_t in v[len(x)]:\n score = v[len(x)][(prev_t, curr_t)]\n if score > max_score:\n max_score = score\n last_tag = curr_t\n prev_last_tag = prev_t\n\n y.append(last_tag)\n if len(x) > 1:\n y.append(prev_last_tag)\n\n prev_t = last_tag\n prev_prev_t = prev_last_tag\n # By backtracking extract all the path of best tags for each word starting by last 2 tags we calculated above.\n for i in range(len(v) - 2, 1, -1):\n curr_t = bp[i][(prev_prev_t, prev_t)]\n y.append(curr_t)\n prev_t = prev_prev_t\n prev_prev_t = curr_t\n y = reversed(y)\n return y", "def tags2onehot(tags, label_list):\n m = len(label_list)\n n = len(tags)\n y = np.zeros((n,m))\n \n for i in range(n):\n tags_i = tags[i].split()\n for tag in tags_i:\n index = label_list.index(tag)\n y[i, index] = 1\n return y", "def multi_hot_encoding(self, mask: np.ndarray) -> np.ndarray:\n encoded_mask = np.zeros((len(self.class_indexes), *mask.shape))\n for i, label in enumerate(self.class_indexes):\n encoded_mask[i,:,:,:] = np.where(mask == label, 1, 0)\n return encoded_mask", "def one_hot_encoding(class_list, num_classes):\r\n\r\n # Returns true for the class index, false otherwise\r\n booleans = (np.arange(num_classes) == class_list[:, None])\r\n\r\n # Converts all false entries to 0, and all true entries to 1\r\n encoded = booleans.astype(float)\r\n return encoded", "def __softmax(self, inputs: np.ndarray) -> np.ndarray:\n res = np.exp(inputs)\n sums = res.sum()\n return res / sums", "def one_hot_encoder(y):\n\n letter = np.zeros((10, 1))\n letter[int(y)] = 1\n return letter", "def __tag_vec(self, tag, class_size):\n one_hot = np.zeros(class_size, dtype=np.int32)\n tid = self.config.embvec.get_tid(tag)\n one_hot[tid] = 1\n return one_hot", "def __get_encoded(self, input_x):\r\n data = input_x\r\n for idx in numpy.arange(self.hidden_count):\r\n self.Result[idx] = self.activation(T.dot(data, self.W[idx]) + self.b[idx])\r\n data = self.Result[idx]\r\n self.Result[self.hidden_count] = T.tanh(T.dot(data, self.W[self.hidden_count]) + self.b[self.hidden_count])*float(0.5)\r\n return self.Result[self.hidden_count]", "def one_hot_encode(board):\n\n flat = (board.reshape(SIZE ** 2)).tolist()\n\n X = []\n for i in np.arange(1,17): \n encoding = np.zeros(SIZE ** 2)\n encoding[flat.index(i)] = 1\n\n X.append(encoding)\n\n X = (np.asarray(X).reshape(SIZE ** 4))\n\n # Potentially append Manhattan distance. \n # np.append(X, manhattan(board))\n\n return X", "def hmm_viterbi(self):\n char_list = list(TRAIN_LETTERS) # Converting tag_set to a list to have indexes to refer\n rows = len(char_list)\n cols = len(self.test_letters)\n vit_matrix = [[None] * cols for i in range(rows)]\n\n # Storing a tuple in each cell (index of the previous cell, probability of the current cell)\n for col_index in range(len(self.test_letters)):\n curr_emission_probs = self.get_emission_probs(col_index)\n\n for row_index, curr_char in enumerate(char_list):\n # Computing the probabilities for the first column\n if col_index == 0:\n init_prob = self.init_prob[curr_char] if curr_char in self.init_prob else max_val\n vit_matrix[row_index][col_index] = (-1, curr_emission_probs[curr_char] + init_prob)\n # Computing the probabilities of the other columns\n else:\n best_prob_tuple = (-1, 200000000.0)\n for prev_row_index, prev_char in enumerate(char_list):\n prev_prob = vit_matrix[prev_row_index][col_index - 1][1]\n curr_prob = prev_prob + self.trans_prob[prev_char][curr_char] + curr_emission_probs[curr_char]\n if curr_prob < best_prob_tuple[1]:\n best_prob_tuple = (prev_row_index, curr_prob)\n vit_matrix[row_index][col_index] = (best_prob_tuple[0], best_prob_tuple[1])\n\n # Backtracking to fetch the best path\n # Finding the cell with the max probability from the last column\n (max_index, max_prob) = (-1, max_val)\n for row in range(rows):\n curr_prob = vit_matrix[row][cols - 1][1]\n if curr_prob < max_prob:\n (max_index, max_prob) = (row, curr_prob)\n\n output_list = list() # List to store the output tags\n # Adding the best path to output list\n for col in range(cols - 1, 0, -1):\n output_list.insert(0, char_list[max_index])\n max_index = vit_matrix[max_index][col][0]\n output_list.insert(0, char_list[max_index])\n print 'HMM MAP:', ''.join(output_list)", "def one_hot_encode(digit):\n y = np.array([0] * 10)\n y[digit] = 1\n return y", "def one_hot_encode_y(y):\r\n y = y-1\r\n one_hot_y = np.zeros((m,10))\r\n one_hot_y[np.arange(y.size), y] = 1\r\n return one_hot_y", "def encode_binary(self,var=[]):\n from sklearn.preprocessing import LabelEncoder\n binary=self.find_binary()\n if len(var)!=0:\n for a in var:\n if len(self.data[a].value_counts())==2:\n self.data[a]=LabelEncoder().fit_transform(self.data[a])\n self.data[a]=self.data[a].astype('category')\n print(\"Binary encoding is done for: \" + str(var)+ \".\")\n else:\n raise ValueError(\"Input variable is not binary, please check it again!\")\n \n elif len(binary)!=0:\n for col in binary:\n self.data[col] = LabelEncoder().fit_transform(self.data[col])\n self.data[col]=self.data[col].astype('category')\n print(\"Binary encoding is done for: \" + str(var)+ \".\")\n else:\n print(\"No variable is binary.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode the tag list into onehot list and sum of softmax. Short var name `tc` stands for `tag_combination`.
def map_tag_lst_to_softmax(tags: list[str]) -> dict[str, dict]: feature_dct = {} for comb_r, tc_softmax in enumerate(tag_comb_softmax, 1): tc_lst = tags if comb_r == 1 else [tuple(sorted(tc)) for tc in itertools.combinations(tags, comb_r)] softmax_score = tc_softmax.loc[tc_softmax['tag'].isin(tc_lst), 'count_softmax'].sum() one_hot_array = tc_softmax['tag'].isin(tc_lst).astype(int).to_numpy() feature_dct.update({ f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_softmax_score': softmax_score, f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_one_hot_array': one_hot_array, }) return feature_dct
[ "def tags2onehot(tags, label_list):\n m = len(label_list)\n n = len(tags)\n y = np.zeros((n,m))\n \n for i in range(n):\n tags_i = tags[i].split()\n for tag in tags_i:\n index = label_list.index(tag)\n y[i, index] = 1\n return y", "def onehot_encode(seq):\n\n seq_list = list(seq)\n label_encoder = LabelEncoder()\n label_encoder.fit(np.array(['A', 'C', 'G', 'T', 'N']))\n integer_encoded = label_encoder.transform(seq_list)\n onehot_encoder = OneHotEncoder(sparse=False, dtype=int, categories=[range(5)])\n integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\n onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\n return np.delete(onehot_encoded, -2, 1)", "def compute_tag_feature() -> list[dict]:\n tag_comb_softmax: list[pd.DataFrame] = get_tag_combination_softmax()\n challenge_tag = DB.TopcoderMongo.run_challenge_aggregation([\n *DB.TopcoderMongo.scoped_challenge_with_text_query,\n {'$project': {'id': True, 'tags': True, '_id': False}},\n ])\n\n def map_tag_lst_to_softmax(tags: list[str]) -> dict[str, dict]:\n \"\"\" Encode the tag list into one-hot list and sum of softmax.\n Short var name `tc` stands for `tag_combination`.\n \"\"\"\n feature_dct = {}\n for comb_r, tc_softmax in enumerate(tag_comb_softmax, 1):\n tc_lst = tags if comb_r == 1 else [tuple(sorted(tc)) for tc in itertools.combinations(tags, comb_r)]\n softmax_score = tc_softmax.loc[tc_softmax['tag'].isin(tc_lst), 'count_softmax'].sum()\n one_hot_array = tc_softmax['tag'].isin(tc_lst).astype(int).to_numpy()\n feature_dct.update({\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_softmax_score': softmax_score,\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_one_hot_array': one_hot_array,\n })\n return feature_dct\n\n return [{**cha, **map_tag_lst_to_softmax(cha['tags'])} for cha in challenge_tag]", "def one_hot_encode(board):\n\n flat = (board.reshape(SIZE ** 2)).tolist()\n\n X = []\n for i in np.arange(1,17): \n encoding = np.zeros(SIZE ** 2)\n encoding[flat.index(i)] = 1\n\n X.append(encoding)\n\n X = (np.asarray(X).reshape(SIZE ** 4))\n\n # Potentially append Manhattan distance. \n # np.append(X, manhattan(board))\n\n return X", "def __tag_vec(self, tag, class_size):\n one_hot = np.zeros(class_size, dtype=np.int32)\n tid = self.config.embvec.get_tid(tag)\n one_hot[tid] = 1\n return one_hot", "def Hotencoding(word_tokens, sentences):\r\n\r\n # first we make arrays from the values\r\n values = array(word_tokens)\r\n label_encoder = LabelEncoder()\r\n\r\n # here we make labels for each word\r\n # from 0-402 labels in integers\r\n integer_encoded = label_encoder.fit_transform(values)\r\n\r\n # and here we make the binary encoding by using the one hot encoder which is function that has the fit transforms\r\n # with using the integers we had build from label encoder it turns each label to it's binary form for example\r\n # the word the label 4 will have a binary encode of [0,0,0,0,1,...,0]\r\n onehot_encoder = OneHotEncoder(sparse=False, dtype=int)\r\n integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\r\n onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\r\n # and now we make a dictionary of the the encoded values with each of the words\r\n Dict_of_encoded_words = {}\r\n counter = 0\r\n for i in values:\r\n Dict_of_encoded_words[i] = list(onehot_encoded[counter])\r\n counter += 1\r\n\r\n # and now that we have the dict of words with there encoded values we\r\n # can now make a matrix of sentences and there binary codes\r\n\r\n Encoded_sentences = sentences.copy()\r\n\r\n for i in range(len(sentences)):\r\n for j in range(len(sentences[i])):\r\n Encoded_sentences[i][j] = Dict_of_encoded_words[sentences[i][j]]\r\n # after that we putted them in a list containg a matrix of sentences with their words being encoded\r\n # we output the results in a text file\r\n\r\n with open(args.txtfile.replace('txt','_1hot.txt'), 'w') as f:\r\n for item in Encoded_sentences:\r\n f.write(\"%s\\n\" % item)", "def one_hot_encoding(X):\n X_cat = pd.get_dummies(X.select_dtypes(include=['object']))\n X_num = X.select_dtypes(exclude=['object'])\n res = pd.concat([X_num, X_cat], axis=1, sort=False)\n \n return res", "def one_hot_encoding(class_list, num_classes):\r\n\r\n # Returns true for the class index, false otherwise\r\n booleans = (np.arange(num_classes) == class_list[:, None])\r\n\r\n # Converts all false entries to 0, and all true entries to 1\r\n encoded = booleans.astype(float)\r\n return encoded", "def compute_softmax(tag_combination: pd.DataFrame):\n top_n = tag_combination.head(S.CHALLENGE_TAG_COMB_TOP).copy()\n top_n['count_softmax'] = TML.softmax(np.log(top_n['count']))\n return top_n", "def get_tag_combination_softmax() -> list[pd.DataFrame]:\n def compute_softmax(tag_combination: pd.DataFrame):\n \"\"\" Calculate softmax for tag combination DataFrame.\"\"\"\n top_n = tag_combination.head(S.CHALLENGE_TAG_COMB_TOP).copy()\n top_n['count_softmax'] = TML.softmax(np.log(top_n['count']))\n return top_n\n\n return [compute_softmax(tag_combination) for tag_combination in get_challenge_tag_combination_count()]", "def one_hot_encoding(labels):\n\tencoded_labels = [0]*NUM_CLASSES\n\tfor label in labels:\n\t\tencoded_labels[label] = 1\n\treturn encoded_labels", "def one_hot_encoding(labels, num_classes=10):\n one_hot_labels = []\n for label in labels:\n ohe = [0] * num_classes\n ohe[int(label)] = 1\n one_hot_labels.append(ohe)\n return np.array(one_hot_labels)", "def test_onehot_encoding():\n wordmap = {'C':0, '#':1, '!':2, 'E':3}\n x = np.array(['#C!'])\n result = DataUtils.onehot_encoding(x,5,wordmap)\n assert result.shape == (1,5,4)\n\n return", "def one_hot_encoder(y):\n\n letter = np.zeros((10, 1))\n letter[int(y)] = 1\n return letter", "def one_hot_encoder(data, keymap=None):\n\n if keymap is None:\n keymap = []\n for col in data.T:\n uniques = set(list(col))\n keymap.append(dict((key, i) for i, key in enumerate(uniques)))\n total_pts = data.shape[0]\n outdat = []\n for i, col in enumerate(data.T):\n km = keymap[i]\n num_labels = len(km)\n spmat = sparse.lil_matrix((total_pts, num_labels))\n for j, val in enumerate(col):\n if val in km:\n spmat[j, km[val]] = 1\n outdat.append(spmat)\n outdat = sparse.hstack(outdat).tocsr()\n return outdat, keymap", "def one_hot_encode(digit):\n y = np.array([0] * 10)\n y[digit] = 1\n return y", "def one_hot_encode(idx, vocab_size):\n # Initialize the encoded array\n one_hot = np.zeros(vocab_size)\n\n # Set the appropriate element to one\n one_hot[idx] = 1.0\n\n return one_hot", "def to_one_hot(category_id, num_labels=80):\n index = coco_categories.index(category_id)\n return [0 if i != index else 1 for i in range(num_labels)]", "def multi_hot_encoding(self, mask: np.ndarray) -> np.ndarray:\n encoded_mask = np.zeros((len(self.class_indexes), *mask.shape))\n for i, label in enumerate(self.class_indexes):\n encoded_mask[i,:,:,:] = np.where(mask == label, 1, 0)\n return encoded_mask" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the document vector representation of a challenge description with given similarity, frequency and token length threshold.
def compute_challenge_desc_docvec() -> dict[str, list]: model, corpus = train_challenge_desc_doc2vec() return {doc.tags[0]: model.docvecs[doc.tags[0]].tolist() for doc in corpus}
[ "def calculate_similarity(source_doc,\n target_doc,\n embedding=\"Glove\",\n threshold=0):\n def w2v_vectorize(doc):\n \"\"\"Identify the vector values for each word in the given document\"\"\"\n doc = [i.lower().split() for i in doc]\n word_list = []\n for w in doc:\n w = [word for word in w if word not in stopwords.words('english')]\n word_list.append(w)\n vec_list = []\n for words in word_list:\n word_vecs = []\n for word in words:\n try:\n vec = w2v_model[word]\n word_vecs.append(vec)\n except KeyError:\n pass\n vector = np.mean(word_vecs, axis=0)\n vec_list.append(vector)\n vectors = np.mean(vec_list, axis=0)\n return vectors\n\n def glove_vectorize(doc):\n \"\"\"Identify the vector values for each word in the given document\"\"\"\n doc = [i.lower().split() for i in doc]\n word_list = []\n for w in doc:\n w = [word for word in w if word not in stopwords.words('english')]\n word_list.append(w)\n vec_list = []\n for words in word_list:\n word_vecs = []\n for word in words:\n try:\n vec = glove_model[word]\n word_vecs.append(vec)\n except KeyError:\n pass\n vector = np.mean(word_vecs, axis=0)\n vec_list.append(vector)\n vectors = np.mean(vec_list, axis=0)\n return vectors\n\n def fasttext_vectorize(doc):\n \"\"\"Identify the vector values for each word in the given document\"\"\"\n doc = \" \".join(doc)\n doc = doc.lower()\n words = [w for w in doc.split(\" \")]\n word_vecs = []\n for word in words:\n try:\n vec = fasttext_model[word]\n word_vecs.append(vec)\n except KeyError:\n # Ignore, if the word doesn't exist in the vocabulary\n pass\n vector = np.mean(word_vecs, axis=0)\n return vector\n\n def cosine_sim(vecA, vecB):\n \"\"\"Find the cosine similarity distance between two vectors.\"\"\"\n csim = np.dot(vecA,\n vecB) / (np.linalg.norm(vecA) * np.linalg.norm(vecB))\n if np.isnan(np.sum(csim)):\n return 0\n return csim\n\n if embedding == \"Word2Vec\":\n w2v_model = KeyedVectors.load_word2vec_format(\n 'Semantic_Similarity/Word_Embedding/data/GoogleNews-vectors-negative300.bin',\n binary=True,\n )\n source_vec = w2v_vectorize(source_doc)\n target_vec = w2v_vectorize(target_doc)\n sim_score = cosine_sim(source_vec, target_vec)\n\n if sim_score > threshold:\n return sim_score\n\n elif embedding == \"Glove\":\n source_vec = glove_vectorize(source_doc)\n\n target_vec = glove_vectorize(target_doc)\n sim_score = cosine_sim(source_vec, target_vec)\n\n if sim_score > threshold:\n return sim_score\n\n elif embedding == \"FastText\":\n fasttext_model = FastText.load_fasttext_format(\n 'Semantic_Similarity/Word_Embedding/data/cc.en.300.bin')\n source_vec = fasttext_vectorize(source_doc)\n target_vec = fasttext_vectorize(target_doc)\n sim_score = cosine_sim(source_vec, target_vec)\n\n if sim_score > threshold:\n return sim_score", "def compute_doc_vector(tokens):\n length = 0\n\n doc_vec = []\n for (term, _, freq, _) in tokens:\n weighted_tf = calculate_weighted_tf(freq)\n length += weighted_tf ** 2\n doc_vec.append((term, weighted_tf))\n\n # Sort by descending weighted tf\n doc_vec = sorted(doc_vec, key=lambda term_tf : term_tf[1], reverse=True)\n\n return (sqrt(length), doc_vec)", "def sense2vec(phrase, threshold):\n res = requests.post(API_URL, {\n \"sense\": \"auto\",\n \"word\": phrase\n })\n results = res.json()[\"results\"]\n output = []\n for r in results:\n if r[\"score\"] > threshold or len(output) <= 10:\n output.append((r[\"score\"], r[\"text\"]))\n\n return output", "def _transform(self, raw_document, method):\n\t\t# TODO: Implement this method\n\t\tsize = len(self.vocabulary)\t\t\n\n\t\twords = raw_document.split() # get each word in the document as a list\n\t\tcounter = Counter(words)\t\t\t# store how many times each word appear in the document\n\n\t\tif method == 'count':\n\t\t\tfeature_vector = np.zeros((size,), int)\n\t\t\tfor i in range(size):\n\t\t\t\tfeature_vector[i] = counter[self.vocabulary[i]]\n\n\t\t\treturn feature_vector\n\n\t\telif method == 'existance':\n\t\t\tfeature_vector = np.zeros((size,), int)\n\t\t\tfor i in range(size):\n\t\t\t\tif counter[self.vocabulary[i]] > 0:\n\t\t\t\t\tfeature_vector[i] = 1\n\n\t\t\treturn feature_vector\n\n\t\telif method == 'tf-idf':\n\t\t\tfeature_vector = np.zeros((size,), float)\n\t\t\ti = 0\n\t\t\tfor word in self.vocabulary: \t\t\t\t# for each word in a document\n\t\t\t\ttf = counter[word] \t\t\t\t\t\t\t# find tf\n\n\t\t\t\ttf_idf = tf * self.term_idf_dict[word] \t# find tf-idf\n\t\t\t\t\n\t\t\t\tfeature_vector[i] = tf_idf\n\n\t\t\t\ti += 1\n\n\t\t\tnorm = np.linalg.norm(feature_vector)\n\t\t\tif norm > 0.0:\n\t\t\t\tnormalized = feature_vector / norm\n\t\t\t\treturn normalized\n\t\t\telse:\n\t\t\t\treturn feature_vector", "def word_movers(doc1, doc2, metric='cosine'):\n stringstore = StringStore()\n\n n = 0\n word_vecs = []\n for word in itertoolz.concatv(extract.words(doc1), extract.words(doc2)):\n if word.has_vector:\n if stringstore[word.text] - 1 == n: # stringstore[0] always empty space\n word_vecs.append(word.vector)\n n += 1\n distance_mat = pairwise_distances(np.array(word_vecs), metric=metric).astype(np.double)\n distance_mat /= distance_mat.max()\n\n vec1 = collections.Counter(\n stringstore[word.text] - 1\n for word in extract.words(doc1)\n if word.has_vector)\n vec1 = np.array([vec1[word_idx] for word_idx in range(len(stringstore))]).astype(np.double)\n vec1 /= vec1.sum() # normalize word counts\n\n vec2 = collections.Counter(\n stringstore[word.text] - 1\n for word in extract.words(doc2)\n if word.has_vector)\n vec2 = np.array([vec2[word_idx] for word_idx in range(len(stringstore))]).astype(np.double)\n vec2 /= vec2.sum() # normalize word counts\n\n return 1.0 - emd(vec1, vec2, distance_mat)", "def dictionary(raw_captions,threshold):\n caps = []\n for im in raw_captions:\n for s in raw_captions[im]:\n caps.append(s.split())\n\n word_freq = nltk.FreqDist(itertools.chain(*caps))\n id_to_word = ['<pad>'] + [word for word, cnt in word_freq.items() if cnt >= threshold] + ['<unk>']\n word_to_id = {word:idx for idx, word in enumerate(id_to_word)}\n \n return id_to_word, word_to_id", "def create_tfidf_vectors():\n vector_list = {}\n vector_magnitude = {}\n for file,tokens in tf.items():\n \n \"\"\"calculates raw tf-idf\n For a given dict of tokens we extract keys using tokens.keys()\n Using Lambda we calculate tf-idf for each token in the tokens dict\n and then return a key:value pair dict\n where key -> token name , value -> un normalized tf-idf and store in vector_list\"\"\"\n vector_list[file] = dict(map(lambda token : (token,(1+log10(tokens[token]))*getidf(token)) ,tokens.keys()))\n \n \"\"\"calculates file magnitude\n Form the calculated vector_list using vector_list[file].values() \n Using Lambda we calculate magnitude of the each document\n and then return a key:value pair dict\n where key -> file name , value -> magnitude of the file\"\"\"\n vector_magnitude[file] = (sqrt(sum(map(lambda value : value * value ,vector_list[file].values()))))\n \n tfidf_vectors[file] = Counter()\n \n #normalization of each token with respect document in which they are present\n for token in vector_list[file]:\n tfidf_vectors[file][token] = vector_list[file][token] / vector_magnitude[file]", "def document_term_matrix(vectorizer, excerpt):\n return vectorizer.transform([preprocess(excerpt)]).toarray()", "def word2vec(self, query1, query2):\n s1 = jieba.lcut(query1)\n s2 = jieba.lcut(query2)\n msg1 = np.zeros((self.WORD2VEC_LENGTH), dtype=np.float32)\n msg2 = np.zeros((self.WORD2VEC_LENGTH), dtype=np.float32)\n for i in s1:\n try:\n msg1 += np.array(self.wv[i])\n except:\n pass\n for i in s2:\n # msg2 += self.wv[i]\n try:\n # print i, self.wv[i], len(self.wv[i])\n msg2 += self.wv[i]\n except:\n # print i\n pass\n msg1_ave = msg1 / len(s1)\n msg2_ave = msg2 / len(s2)\n msg = np.concatenate((msg1_ave, msg2_ave))\n return msg", "def document_vector(doc):\n doc = [word for word in doc if word in w2v.wv.vocab]\n #print(doc)\n x= np.mean(w2v[doc], axis=0)\n #print('-----------------------------------------------------------------------------------------')\n #print(x)\n #print('-----------------------------------------------------------------------------------------')\n return x", "def vector_space(self, query_term, doc):\n # Frequency of term in the document (document term frequency - dtf)\n fik = doc.get_dtf()\n # Number of documents in the collection\n N = self.inverted_index.get_total_docs()\n # Number of documents containing the term (document frequency - df)\n nk = self.inverted_index.get_df(query_term)\n \n score = 0\n if fik:\n score = (math.log(fik) + 1) * math.log(N / nk)\n return score", "def pseudo_frequency(id2token, w, doc, model, alpha, sim_threshold):\n return sum([cossim(id2token[w], id2token[wd], alpha, model, sim_threshold) for wd in doc])", "def idf_matrix(documents: dict) -> dict:\n # init dictionary\n print('begin creating idf_matrix')\n d = {\n 'doc_count': len(documents.keys()),\n 'doc_vector_lengths': {},\n 'words': {},\n 'docs': {}\n }\n\n # make an entry for each word \n # noting in which document it appeared and how many times\n # additionally make a list of unique words per each document\n for idx, doc in enumerate(documents, 1):\n if idx % 1000 == 0: print('processing document no:', idx)\n t = tokenize(documents[doc])\n d['docs'][doc] = set(t)\n for element in t:\n if d['words'].get(element) is None:\n d['words'][element] = {'occurences': {doc: 1}, 'total': 1}\n elif d['words'].get(element).get('occurences').get(doc) is None:\n d['words'][element]['occurences'][doc] = 1\n d['words'][element]['total'] += 1\n else:\n d['words'][element]['occurences'][doc] += 1\n d['words'][element]['total'] += 1\n\n # temp variable\n dc = d['doc_count']\n\n # calculate logarithm of inverse document frequency per word\n print('calculating logarithms')\n for word in d['words']:\n idf = dc/d['words'][word]['total']\n d['words'][word]['idf_logarithm'] = log2(idf)\n\n # calculate vector length for each document\n print('calculating vector length')\n for idx, doc in enumerate(d['docs'], 1):\n if idx % 1000 == 0: print('processing document no', idx)\n d['doc_vector_lengths'][doc] = sqrt(sum(\n [\n (d['words'][x]['idf_logarithm']*d['words'][x]['occurences'][doc])**2 \n for x in d['docs'][doc]\n if d['words'][x]['occurences'].get(doc) is not None\n ]\n ))\n print('finished preparing the dataset')\n\n return d", "def get_doc_word_vectorizer(vectorizer,ngram_range,list_docs):\n \n if vectorizer == 'cv':\n vec = CountVectorizer(ngram_range=ngram_range,stop_words='english')\n elif vectorizer == 'tfidf':\n vec = TfidfVectorizer(ngram_range=ngram_range,stop_words='english')\n doc_word = vec.fit_transform(list_docs)\n \n return vec, doc_word", "def getSimiliarWords(self, term, threshold=0.75, language=DEFAULT_LANGUAGE, common_length=-1):\n if not have_lv:\n raise LexiconError('Method not allowed. Please install the Levenshtein extension properly')\n tree = self._getTree(language)\n if common_length > -1:\n prefix = term[:common_length]\n words = tree.keys(prefix, prefix + u'\\uffff')\n else:\n words = tree.keys() \n return [(w, ratio(w,term)) for w in words if ratio(w, term) > threshold]", "def w2v_vectorize(doc):\n doc = [i.lower().split() for i in doc]\n word_list = []\n for w in doc:\n w = [word for word in w if word not in stopwords.words('english')]\n word_list.append(w)\n vec_list = []\n for words in word_list:\n word_vecs = []\n for word in words:\n try:\n vec = w2v_model[word]\n word_vecs.append(vec)\n except KeyError:\n pass\n vector = np.mean(word_vecs, axis=0)\n vec_list.append(vector)\n vectors = np.mean(vec_list, axis=0)\n return vectors", "def query_inverted_index(\n query: str, k: int, shelve_index: shelve.Shelf, doc_length_shelve: shelve.Shelf\n) -> Tuple[List[Tuple[float, int]], List[str], List[str]]:\n #process query\n processed_query = parse_query(query, shelve_index)\n terms = processed_query[0]\n #create the vector for the query... starts with just tf, gets updated with idf later\n if len(terms)==len(set(terms)): #most common... each word in query is used once\n w_tq = [text_processor.tf(1)]*len(terms) #vector of 1s\n else: #if someone did a query with repeated words, such as best car foreign car\n w_tq = []\n new_terms = list(set(terms))\n for term in new_terms:\n w_tq.append(text_processor.tf(terms.count(term)))\n terms = new_terms\n #initialize doc_scores\n doc_scores = {}\n\n #open shelf\n db = shelve.open(shelve_index)\n N = len(db)\n #go through term list and add doc lists to doc_scores\n for i in range(len(terms)):\n doc_list = db[terms[i]]\n idf = text_processor.idf(N, len(doc_list)) #updates w_tq of query to be tf-idf\n w_tq[i] *= idf\n for tup in doc_list: #if doc in dict already, add q x d for term, else add to dict\n if tup[0] in doc_scores:\n doc_scores[tup[0]] += tup[1]*idf*w_tq[i]\n else:\n doc_scores[tup[0]] = tup[1]*idf*w_tq[i]\n\n #normalize length by using doc_vec shelf\n doc_vec_shelf = shelve.open(doc_length_shelve)\n for key,val in doc_scores.items():\n doc_scores[key] /= doc_vec_shelf[str(key)]\n doc_vec_shelf.close()\n #get top k from doc score dict\n top_k = top_k_docs(doc_scores, k)\n top_k.sort(reverse=True)\n #close shelf\n db.close()\n return (top_k, processed_query[1], processed_query[2])", "def automated_readability_index(word_length, sentence_length):\n\n # compute formula\n return 4.71 * word_length + 0.5 * sentence_length - 21.43", "def document_distance(file1: str, file2: str):\n file1_text = process_file(file1)\n file2_text = process_file(file2)\n file1_words = get_words_from_text_list(file1_text)\n file2_words = get_words_from_text_list(file2_text)\n file1_word_freq = get_freq_count_from_words_dict(file1_words)\n file2_word_freq = get_freq_count_from_words_dict(file2_words)\n distance = vector_angle(file1_word_freq, file2_word_freq)\n print(distance)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies if the certificate is up to date
def validate_certificate(self, certificate): dates = (certificate.not_valid_before.timestamp(),certificate.not_valid_after.timestamp()) date_now=datetime.now().timestamp() return dates[0]< date_now < dates[1]
[ "def check_certificate():\n server = get_odoo_server_url()\n if server:\n path = Path('/etc/ssl/certs/nginx-cert.crt')\n if path.exists():\n with path.open('r') as f:\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())\n cert_end_date = datetime.datetime.strptime(cert.get_notAfter().decode('utf-8'), \"%Y%m%d%H%M%SZ\") - datetime.timedelta(days=10)\n for key in cert.get_subject().get_components():\n if key[0] == b'CN':\n cn = key[1].decode('utf-8')\n if cn == 'OdooTempIoTBoxCertificate' or datetime.datetime.now() > cert_end_date:\n _logger.info(_('Your certificate %s must be updated') % (cn))\n load_certificate()\n else:\n _logger.info(_('Your certificate %s is valid until %s') % (cn, cert_end_date))\n else:\n load_certificate()", "def test_check_cert(certfile):\n cert = load_cert(certfile)\n\n now = datetime.datetime.utcnow()\n if now > cert.not_valid_after:\n raise Exception(\"Certificate has expired!\")\n\n elif now + datetime.timedelta(hours=20) > cert.not_valid_after:\n print('> Certificate expiring soon: %s' % cert.not_valid_after)\n\n elif now < cert.not_valid_before:\n raise Exception('Certificate is not yet valid!')", "def test_update_cloud_certificate(self):\n pass", "def _SSLVerifyCallback(self, conn, cert, errnum, errdepth, ok):\n # some parameters are unused, but this is the API\n # pylint: disable=W0613\n assert self._ssl_params, \"SSL not initialized\"\n\n return (self._ssl_cert.digest(\"sha1\") == cert.digest(\"sha1\") and\n self._ssl_cert.digest(\"md5\") == cert.digest(\"md5\"))", "def test_patch_certificate_signing_request_status(self):\n pass", "def verifyCrt():\n logging.debugv(\"functions/linux.py->verifyCrt()\", [])\n\n if os.access(locations.CRT, os.R_OK):\n cmd = locations.OPENSSL + ' verify -CAfile ' + locations.CA + ' ' + locations.CRT + ' 2>&1 | grep \": OK$\" 1>/dev/null 2>/dev/null'\n status = os.system(cmd)\n logging.debug(\"Sensor certificate verification status: %s\" % str(status))\n if status == 0:\n return True\n else:\n logging.error(\"Sensor certificate verification failed\")\n return False\n else:\n return False", "def test_replace_certificate_signing_request_status(self):\n pass", "def test_read_certificate_signing_request_status(self):\n pass", "def check_certif_version(self, cert, index):\n if cert.get_version() != crypto.x509.Version.v3.value:\n self.error(\"Invalid certificate version\")", "def test_validate_host_cert(self):\n cn = 'test.valid-cert.com'\n cert = SpokeHostCert(cn, self.ca_name)\n cert.create()\n self.assertTrue(cert._verify())\n cert.delete()", "def IsValidCert(fname: str) -> bool:\n try:\n with open(fname, encoding=\"ascii\", errors=\"replace\") as f:\n cert_bytes = f.read()\n except Exception:\n logging.error('IsValidCert:: Unable to open certificate file %s', fname)\n return False\n\n try:\n x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_bytes) # type: ignore[attr-defined]\n except Exception:\n logging.error('IsValidCert:: Unable to load certificate %s', fname)\n return False\n\n x509_notafter = x509.get_notAfter()\n utc_time = datetime.datetime.strptime(x509_notafter.decode(\"utf-8\"), \"%Y%m%d%H%M%SZ\")\n time_notafter = int((utc_time - datetime.datetime(1970, 1, 1)).total_seconds())\n time_current = int(datetime.datetime.now().timestamp())\n time_remaining = time_notafter - time_current\n if time_remaining < 1:\n logging.error('IsValidCert:: Expired certificate %s', fname)\n return time_remaining > 300", "def _verify_ssl(self):\n ca_cert = self.api_ca_cert\n\n if ca_cert is None or ca_cert == 'changeme' or ca_cert == '':\n return False\n if not os.path.exists(ca_cert):\n LOG.error(\"Could not find %s CA certificate.\"\n \"No such file or directory\",\n ca_cert)\n return False\n return ca_cert", "def test_cert_verification(self, session):\n adapter = DummyAdapter()\n session.mount(\"https://\", adapter)\n client = corbeau.Client(self.dsn)\n client.captureMessage(\"oh noes!\")\n request = adapter.request\n kwargs = adapter.kwargs\n self.assertTrue(kwargs[\"verify\"])\n self.assertEqual(kwargs[\"timeout\"], 1)\n self.assertTrue(\"X-Sentry-Auth\" in request.headers)\n self.assertTrue(request.body)", "def test_https_expired(self):\n domain = inspect(\"expired.badssl.com\")\n basic_check(domain.https)\n\n self.assertTrue(domain.https.https_expired_cert)", "def refresh_cert_and_key(self):\n d = None\n\n if \"post_body\" in self.config[\"cfssl\"]:\n d = self.config[\"cfssl\"][\"post_body\"]\n else:\n d = {\n \"request\": self.config[\"cfssl\"][\"request\"]\n }\n\n url = \"{}/api/v1/cfssl/newcert\".format(self.config[\"cfssl\"][\"url\"])\n\n kwargs = {}\n\n if \"auth\" in self.config[\"cfssl\"]:\n kwargs[\"auth\"] = (self.config[\"cfssl\"][\"auth\"][\"user\"],\n self.config[\"cfssl\"][\"auth\"][\"password\"])\n\n if \"ca_bundle\" in self.config[\"cfssl\"]:\n kwargs[\"verify\"] = self.config[\"cfssl\"][\"ca_bundle\"]\n\n try:\n resp = requests.post(url, json=d, **kwargs)\n resp.raise_for_status()\n except requests.exceptions.RequestException as e:\n print(\"cfssl refresh failed! {}\".format(e))\n\n if \"onfailure\" in self.config:\n if \"post_to_slack\" in self.config[\"onfailure\"]:\n\n msg_lines = [\n \"exception: `{}`\".format(e),\n \"request:\",\n \"```\",\n \"{}\".format(\n json.dumps(self.config[\"cfssl\"][\"request\"],\n indent=2)),\n \"```\"\n ]\n\n self._post_to_slack(\"cfssl refresh failed!\", msg_lines)\n\n return False\n\n r = resp.json()\n\n self._write_out_cert_files(r[\"result\"])\n\n if \"onsuccess\" in self.config:\n if \"execute_command\" in self.config[\"onsuccess\"]:\n args = shlex.split(\n self.config[\"onsuccess\"][\"execute_command\"]\n )\n\n child = subprocess.Popen(args, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = child.communicate()\n\n if child.returncode != 0:\n if \"onfailure\" in self.config:\n if \"post_to_slack\" in self.config[\"onfailure\"]:\n msg_lines = [\n \"args: `{}`\".format(args),\n \"rc: {}\".format(child.returncode),\n \"stdout: `{}`\".format(stdout.strip()),\n \"stderr: `{}`\".format(stderr.strip()),\n ]\n\n self._post_to_slack(\n \"post cfssl refresh execute command failed!\",\n msg_lines)\n\n return False\n\n return True", "def _sanityCheckForSSL(self):\n if not self.requiresSsl():\n return 0\n\n if not self.sslCertPath:\n log.error(\"sslCertPath to be set - cannot start server\")\n return 1\n try:\n util.mkdirChain(os.path.dirname(self.sslCertPath))\n except OSError, err:\n log.error(\"Could not access sslCert dir %s: %s\" % os.path.dirname(self.sslCertPath), err)\n\n if self.caCertPath:\n log.warning(\"The caCertPath option is deprecated\")\n return self.makeCertificate()", "def _precheck_save_kubernetes_rootca_cert(self, update, temp_pem_contents):\n\n if update.state != kubernetes.KUBE_ROOTCA_UPDATE_STARTED:\n msg = \"A new root CA certificate already exists\"\n return dict(success=\"\", error=msg)\n\n if update.to_rootca_cert:\n LOG.info(\"root CA target with serial number %s will be overwritten\"\n % update.to_rootca_cert)\n\n # extract the certificate contained in PEM file\n try:\n cert = cutils.extract_certs_from_pem(temp_pem_contents)[0]\n except Exception as e:\n msg = \"Failed to extract certificate from file: %s\" % str(e)\n return dict(success=\"\", error=msg)\n\n if not cert:\n msg = \"No certificate have been added, \" \\\n \"no valid certificate found in file.\"\n LOG.info(msg)\n return dict(success=\"\", error=msg)\n\n # extract current k8s rootca\n current_cert = \\\n cutils.get_certificate_from_file(kubernetes.KUBERNETES_ROOTCA_CERT)\n if not current_cert:\n msg = \"Not able to get the current kube rootca\"\n return dict(success=\"\", error=msg)\n\n # validate certificate\n msg = cutils.check_cert_validity(cert)\n\n if msg is not None:\n return dict(success=\"\", error=msg)\n\n is_ca = cutils.is_ca_cert(cert)\n if not is_ca:\n msg = \"The certificate in the file is not a CA certificate\"\n LOG.error(msg)\n return dict(success=\"\", error=msg)\n\n # extract information regarding the new rootca\n try:\n new_cert_id = cutils.build_cert_identifier(cert)\n except Exception:\n msg = \"Failed to extract subject and serial number \" \\\n \"from new root CA\"\n LOG.error(msg)\n return dict(success=\"\", error=msg)\n\n return dict(success=new_cert_id, error=\"\")", "def test_failedCertificateVerification(self):\n onServerLost = defer.Deferred()\n onClientLost = defer.Deferred()\n self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,\n certificate=self.sCert, verify=False,\n requireCertificate=False),\n sslverify.OpenSSLCertificateOptions(verify=True,\n requireCertificate=False, caCerts=[self.cCert]),\n onServerLost=onServerLost,\n onClientLost=onClientLost)\n\n d = defer.DeferredList([onClientLost, onServerLost],\n consumeErrors=True)\n def afterLost(result):\n ((cSuccess, cResult), (sSuccess, sResult)) = result\n self.assertFalse(cSuccess)\n self.assertFalse(sSuccess)\n\n return d.addCallback(afterLost)", "def test_patch_certificate_signing_request(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validates the server certificate purpose
def validate_server_purpose(self,certificate): server_auth = x509.oid.ExtendedKeyUsageOID.SERVER_AUTH extended_key_usages = certificate.extensions.get_extension_for_oid(ExtensionOID.EXTENDED_KEY_USAGE) return any(extension for extension in extended_key_usages.value if extension.dotted_string == server_auth.dotted_string)
[ "def test_validHostnameInvalidCertificate(self):\n cProto, sProto, cWrapped, sWrapped, pump = self.serviceIdentitySetup(\n u\"valid.example.com\",\n u\"valid.example.com\",\n validCertificate=False,\n )\n\n self.assertEqual(cWrapped.data, b'')\n self.assertEqual(sWrapped.data, b'')\n\n cErr = cWrapped.lostReason.value\n sErr = sWrapped.lostReason.value\n\n self.assertIsInstance(cErr, SSL.Error)\n self.assertIsInstance(sErr, SSL.Error)", "def ssl_verify_server_cert(self):\n return \"\"\"--ssl-verify-server-cert\"\"\"", "def validate_certificate(self):\n # type: () -> int\n return self._get_property('validate_certificate')", "def ssl_check():\n return \"All ok, mm'kay.\"", "def check_certificate():\n server = get_odoo_server_url()\n if server:\n path = Path('/etc/ssl/certs/nginx-cert.crt')\n if path.exists():\n with path.open('r') as f:\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())\n cert_end_date = datetime.datetime.strptime(cert.get_notAfter().decode('utf-8'), \"%Y%m%d%H%M%SZ\") - datetime.timedelta(days=10)\n for key in cert.get_subject().get_components():\n if key[0] == b'CN':\n cn = key[1].decode('utf-8')\n if cn == 'OdooTempIoTBoxCertificate' or datetime.datetime.now() > cert_end_date:\n _logger.info(_('Your certificate %s must be updated') % (cn))\n load_certificate()\n else:\n _logger.info(_('Your certificate %s is valid until %s') % (cn, cert_end_date))\n else:\n load_certificate()", "def test_read_certificate_signing_request_status(self):\n pass", "def is_server_cert_verification_enabled(self):\n\n return self.need_server_auth", "def verify(self, conn, cert, errnum, depth, ok):\n # If there is already an error bail now\n if not ok:\n return ok\n\n # Only perform further verification on client certs\n if depth>0:\n return ok\n\n # At this point we know the certificate is signed by a\n # trusted CA, check the issuer OU matches the incoming cert\n # OU and the incoming cert is not a server cert\n # XXX: Should look at using something like nsCertType rather\n # than the CN field for this.\n s = cert.get_subject()\n i = cert.get_issuer()\n if s.OU != i.OU:\n log_warn(\"Rejected incoming connection from invalid \"\n \"SSL cert (%s). OU did not match.\" % s)\n return 0\n if s.CN == \"server\":\n log_warn(\"Rejected incoming connection from server SSL \"\n \"cert (%s).\" % s)\n return 0\n return 1", "def validate_certificate(self, value):\n self._set_property('validate_certificate', value)", "def test_https_self_signed_cert(self):\n domain = inspect(\"self-signed.badssl.com\")\n basic_check(domain.https)\n\n self.assertTrue(domain.https.https_self_signed_cert)", "def _sanityCheckForSSL(self):\n if not self.requiresSsl():\n return 0\n\n if not self.sslCertPath:\n log.error(\"sslCertPath to be set - cannot start server\")\n return 1\n try:\n util.mkdirChain(os.path.dirname(self.sslCertPath))\n except OSError, err:\n log.error(\"Could not access sslCert dir %s: %s\" % os.path.dirname(self.sslCertPath), err)\n\n if self.caCertPath:\n log.warning(\"The caCertPath option is deprecated\")\n return self.makeCertificate()", "def test_read_certificate_signing_request(self):\n pass", "def test_cert_verification(self, session):\n adapter = DummyAdapter()\n session.mount(\"https://\", adapter)\n client = corbeau.Client(self.dsn)\n client.captureMessage(\"oh noes!\")\n request = adapter.request\n kwargs = adapter.kwargs\n self.assertTrue(kwargs[\"verify\"])\n self.assertEqual(kwargs[\"timeout\"], 1)\n self.assertTrue(\"X-Sentry-Auth\" in request.headers)\n self.assertTrue(request.body)", "def verify_csdata(self) -> None:", "def test_https_bad_chain(self):\n domain = inspect(\"untrusted-root.badssl.com\")\n basic_check(domain.https)\n\n self.assertTrue(domain.https.https_bad_chain)", "def ssl_certificate_validation_disabled():\n return bool(\n os.environ.get('LP_DISABLE_SSL_CERTIFICATE_VALIDATION', False))", "def validate_signature(self,issuer,subject):\r\n\t\tissuer_pub_key = issuer.public_key()\r\n\t\ttry:\r\n\t\t\tissuer_pub_key.verify(\r\n\t\t\t\tsubject.signature,\r\n\t\t\t\tsubject.tbs_certificate_bytes,\r\n\t\t\t\tpd.PKCS1v15(),\r\n\t\t\t\tsubject.signature_hash_algorithm,\r\n\t\t\t)\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\tlogger.error(\"Could not Validate the Signature of the Certificate\")\r\n\t\t\treturn False", "def test_create_certificate_signing_request(self):\n pass", "def validate_required_certs(member, gear):\n missing_certs = []\n for cert_required in gear.geartype.min_required_certs.all():\n if cert_required not in member.certifications.all():\n missing_certs.append(cert_required)\n if missing_certs:\n cert_names = [cert.title for cert in missing_certs]\n msg = f\"{member.get_full_name()} is missing the following certifications: {cert_names}\"\n logger.info(msg)\n raise ValidationError(msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate the Signature of a Certificate The issuer parameter represents the certificate of the issuer The subject parameter represents the certificate we want to verify
def validate_signature(self,issuer,subject): issuer_pub_key = issuer.public_key() try: issuer_pub_key.verify( subject.signature, subject.tbs_certificate_bytes, pd.PKCS1v15(), subject.signature_hash_algorithm, ) return True except: logger.error("Could not Validate the Signature of the Certificate") return False
[ "def ValidateCertificateSignature(self, signed_cert, signing_cert):\n # First the naive way -- note this does not check expiry / use etc.\n signed_m2 = M2_X509.load_cert_der_string(der_encoder.encode(signed_cert))\n signing_cert_text=der_encoder.encode(signing_cert)\n signing_m2 = M2_X509.load_cert_der_string(signing_cert_text)\n pubkey = signing_m2.get_pubkey()\n #XXX: eval! eval!!! \n #for openssl doesn't accept md2 as hash method. and such a cert has been used every where.\n #will not just trust it\n if hashlib.md5(signing_cert_text).hexdigest() == '10fc635df6263e0df325be5f79cd6767':\n return #10fc635df6263e0df325be5f79cd6767: Issuer: C=US, O=VeriSign, Inc., OU=Class 3 Public Primary Certification Authority\n #Serial Number:70:ba:e4:1d:10:d9:29:34:b6:38:ca:7b:03:cc:ba:bf\n v = signed_m2.verify(pubkey)\n if v != 1:\n self.openssl_error = M2_Err.get_error()\n raise Asn1Error('1: Validation of cert signature failed.')", "def verify_certificate(self, message, signature):\n\n # detach the signature from the message\n message_without_sign = message.split(\"&sign=\")[0]\n # decode base64 the signature\n binary_signature = base64.b64decode(signature)\n # create a pubkey object\n if self.production:\n key = RSA.importKey(\n settings.PAYBOX_PUBLIC_KEY\n )\n else:\n key = RSA.importKey(\n settings.PAYBOX_TEST_PUBLIC_KEY\n )\n # digest the message\n h = SHA.new(bytes(message_without_sign, encoding=\"utf8\"))\n # and verify the signature\n verifier = PKCS1_v1_5.new(key)\n assert verifier.verify(h, binary_signature), \"Signature Verification Failed\"\n\n return True", "def ValidateSignatures(self):\n # Encrypted digest is that of auth_attrs, see comments in ValidateHashes.\n signing_cert = self.certificates[self.signing_cert_id]\n v = self._ValidatePubkeyGeneric(signing_cert, self.digest_algorithm,\n self.computed_auth_attrs_for_hash,\n self.encrypted_digest)\n if v != 1:\n raise Asn1Error('1: Validation of basic signature failed.')\n\n if self.has_countersignature:\n signing_cert = self.certificates[self.counter_sig_cert_id]\n v = self._ValidatePubkeyGeneric(signing_cert, self.digest_algorithm_out,\n self.computed_counter_attrs_for_hash,\n self.encrypted_counter_digest)\n if v != 1:\n raise Asn1Error('2: Validation of counterSignature failed.')", "def test_signature_validity(curve, generator, Msg, Qx, Qy, R, S, expectedVerification):\n pubk = Public_key(generator, ellipticcurve.Point(curve, Qx, Qy))\n verificationRes = pubk.verifies(digest_integer(Msg), Signature(R, S))\n assert verificationRes == expectedVerification, \"Signature verification failed\"", "def validate_sig_integrity(signer_info: cms.SignedData,\n cert: x509.Certificate,\n expected_content_type: str,\n actual_digest: bytes) -> Tuple[bool, bool]:\n\n signature_algorithm: cms.SignedDigestAlgorithm = \\\n signer_info['signature_algorithm']\n digest_algorithm_obj = signer_info['digest_algorithm']\n md_algorithm = digest_algorithm_obj['algorithm'].native\n signature = signer_info['signature'].native\n\n # signed_attrs comes with some context-specific tagging\n # because it's an implicit field. This breaks validation\n signed_attrs = signer_info['signed_attrs'].untag()\n # TODO if there are no signed_attrs, we should validate the signature\n # against actual_digest. Find some real-world exmples to test this\n # Also, signed_attrs is mandatory if content_type is not id-data\n\n # check the CMSAlgorithmProtection attr, if present\n try:\n cms_algid_protection, = find_cms_attribute(\n signed_attrs, 'cms_algorithm_protection'\n )\n signed_digest_algorithm = \\\n cms_algid_protection['digest_algorithm'].native\n if signed_digest_algorithm != digest_algorithm_obj.native:\n raise SignatureValidationError(\n \"Digest algorithm does not match CMS algorithm protection \"\n \"attribute.\"\n )\n signed_sig_algorithm = \\\n cms_algid_protection['signature_algorithm'].native\n if signed_sig_algorithm is None:\n raise SignatureValidationError(\n \"CMS algorithm protection attribute not valid for signed data\"\n )\n elif signed_sig_algorithm != signature_algorithm.native:\n raise SignatureValidationError(\n \"Signature mechanism does not match CMS algorithm \"\n \"protection attribute.\"\n )\n except KeyError:\n pass\n except SignatureValidationError:\n raise\n except ValueError:\n raise SignatureValidationError(\n 'Multiple CMS protection attributes present'\n )\n\n signed_blob = signed_attrs.dump(force=True)\n try:\n content_type, = find_cms_attribute(signed_attrs, 'content_type')\n content_type = content_type.native\n if content_type != expected_content_type:\n raise SignatureValidationError(\n 'Content type did not match expected value'\n )\n except (KeyError, ValueError):\n raise SignatureValidationError(\n 'Content type not found in signature, or multiple content-type '\n 'attributes present.'\n )\n\n try:\n embedded_digest, = find_cms_attribute(signed_attrs, 'message_digest')\n embedded_digest = embedded_digest.native\n except (KeyError, ValueError):\n raise SignatureValidationError(\n 'Message digest not found in signature, or multiple message '\n 'digest attributes present.'\n )\n intact = actual_digest == embedded_digest\n\n try:\n _validate_raw(\n signature, signed_blob, cert, signature_algorithm, md_algorithm\n )\n valid = True\n except SignatureError:\n valid = False\n\n return intact, valid", "def verify_signature(request_body, signature, hmac_key):\n computed = hmac.new(hmac_key, request_body, hashlib.sha1)\n if not hmac.compare_digest(computed.hexdigest(), signature.encode('ascii', 'ignore')):\n raise SignatureError('Computed signature does not match request signature.')", "def verify(self, require_x509=True, x509_cert=None, ca_pem_file=None, ca_path=None, hmac_key=None,\n validate_schema=True, parser=None, uri_resolver=None, id_attribute=None):\n self.hmac_key = hmac_key\n self.require_x509 = require_x509\n self.x509_cert = x509_cert\n\n if x509_cert:\n self.require_x509 = True\n\n if id_attribute is None:\n self.id_attributes = (\"Id\", \"ID\")\n else:\n self.id_attributes = (id_attribute, )\n\n if isinstance(self.data, (str, bytes)):\n root = fromstring(self.data, parser=parser)\n else:\n root = self.data\n\n if root.tag == ds_tag(\"Signature\"):\n signature = root\n else:\n signature = self._find(root, \"Signature\")\n\n if validate_schema:\n _get_schema().assertValid(signature)\n\n signed_info = self._find(signature, \"SignedInfo\")\n c14n_method = self._find(signed_info, \"CanonicalizationMethod\")\n c14n_algorithm = c14n_method.get(\"Algorithm\")\n reference = self._find(signed_info, \"Reference\")\n transforms = self._find(reference, \"Transforms\", require=False)\n signed_info_c14n = self._c14n(signed_info, algorithm=c14n_algorithm)\n digest_algorithm = self._find(reference, \"DigestMethod\").get(\"Algorithm\")\n digest_value = self._find(reference, \"DigestValue\")\n\n payload = self._resolve_reference(root, reference, uri_resolver=uri_resolver)\n payload_c14n = self._apply_transforms(payload, transforms, signature, c14n_algorithm)\n\n if digest_value.text != self._get_digest(payload_c14n, self._get_digest_method(digest_algorithm)):\n raise InvalidDigest(\"Digest mismatch\")\n\n signature_method = self._find(signed_info, \"SignatureMethod\")\n signature_value = self._find(signature, \"SignatureValue\")\n signature_alg = signature_method.get(\"Algorithm\")\n raw_signature = b64decode(signature_value.text)\n x509_data = signature.find(\"ds:KeyInfo/ds:X509Data\", namespaces=namespaces)\n\n if x509_data is not None or self.require_x509:\n from OpenSSL.crypto import load_certificate, FILETYPE_PEM, verify, Error as OpenSSLCryptoError\n\n if self.x509_cert is None:\n if x509_data is None:\n raise InvalidInput(\"Expected a X.509 certificate based signature\")\n certs = [cert.text for cert in self._findall(x509_data, \"X509Certificate\")]\n cert_chain = [load_certificate(FILETYPE_PEM, add_pem_header(cert)) for cert in certs]\n verify_x509_cert_chain(cert_chain, ca_pem_file=ca_pem_file, ca_path=ca_path)\n else:\n cert_chain = [load_certificate(FILETYPE_PEM, add_pem_header(self.x509_cert))]\n\n signature_digest_method = self._get_signature_digest_method(signature_alg).name\n try:\n verify(cert_chain[-1], raw_signature, signed_info_c14n, signature_digest_method)\n except OpenSSLCryptoError as e:\n lib, func, reason = e.message[0]\n raise InvalidSignature(\"Signature verification failed: {}\".format(reason))\n elif \"hmac-sha\" in signature_alg:\n if self.hmac_key is None:\n raise InvalidInput('Parameter \"hmac_key\" is required when verifying a HMAC signature')\n\n from cryptography.hazmat.primitives.hmac import HMAC\n signer = HMAC(key=ensure_bytes(self.hmac_key),\n algorithm=self._get_hmac_digest_method(signature_alg),\n backend=default_backend())\n signer.update(signed_info_c14n)\n if raw_signature != signer.finalize():\n raise InvalidSignature(\"Signature mismatch (HMAC)\")\n else:\n key_value = signature.find(\"ds:KeyInfo/ds:KeyValue\", namespaces=namespaces)\n if key_value is None:\n raise InvalidInput(\"Expected to find either KeyValue or X509Data XML element in KeyInfo\")\n\n self._verify_signature_with_pubkey(signed_info_c14n, raw_signature, key_value, signature_alg)\n\n return payload", "def verify(self, subject, signature=None):\n sspairs = []\n\n # some type checking\n if not isinstance(subject, (type(None), PGPMessage, PGPKey, PGPUID, PGPSignature, str, bytes, bytearray)):\n raise TypeError(\"Unexpected subject value: {:s}\".format(str(type(subject))))\n if not isinstance(signature, (type(None), PGPSignature)):\n raise TypeError(\"Unexpected signature value: {:s}\".format(str(type(signature))))\n\n def _filter_sigs(sigs):\n _ids = {self.fingerprint.keyid} | set(self.subkeys)\n for sig in sigs:\n if sig.signer in _ids:\n yield sig\n\n # collect signature(s)\n if signature is None:\n if isinstance(subject, PGPMessage):\n for sig in _filter_sigs(subject.signatures):\n sspairs.append((sig, subject.message))\n\n if isinstance(subject, (PGPUID, PGPKey)):\n sspairs += [ (sig, subject) for sig in _filter_sigs(subject.__sig__) ]\n\n if isinstance(subject, PGPKey):\n # user ids\n for uid in subject.userids:\n for sig in _filter_sigs(uid.__sig__):\n sspairs.append((sig, uid))\n # user attributes\n for ua in subject.userattributes:\n for sig in _filter_sigs(ua.__sig__):\n sspairs.append((sig, ua))\n\n # subkey binding signatures\n for subkey in subject.subkeys.values():\n for sig in _filter_sigs(subkey.__sig__):\n sspairs.append((sig, subkey))\n\n elif signature.signer in {self.fingerprint.keyid} | set(self.subkeys):\n sspairs += [(signature, subject)]\n\n if len(sspairs) == 0:\n raise PGPError(\"No signatures to verify\")\n\n # finally, start verifying signatures\n sigv = SignatureVerification()\n for sig, subj in sspairs:\n if self.fingerprint.keyid != sig.signer and sig.signer in self.subkeys:\n sigv &= self.subkeys[sig.signer].verify(subj, sig)\n\n else:\n if isinstance(subj, PGPKey):\n self_verifying = sig.signer == subj.fingerprint\n else:\n self_verifying = False\n\n subkey_issues = self.check_soundness(self_verifying)\n signature_issues = self.check_primitives()\n\n if self_verifying:\n signature_issues &= ~SecurityIssues.HashFunctionNotCollisionResistant\n\n issues = signature_issues | subkey_issues\n if issues and issues.causes_signature_verify_to_fail:\n sigv.add_sigsubj(sig, self, subj, issues)\n else:\n verified = self._key.verify(sig.hashdata(subj), sig.__sig__, getattr(hashes, sig.hash_algorithm.name)())\n if verified is NotImplemented:\n raise NotImplementedError(sig.key_algorithm)\n\n sigv.add_sigsubj(sig, self, subj, SecurityIssues.WrongSig if not verified else SecurityIssues.OK)\n\n return sigv", "def verify_certificate_chain(self, certificates, now=None):\n if not certificates:\n raise ValueError(\"chain must have at least one certificate\")\n if now is None:\n now = int(time.time() * 1000)\n root_issuer = certificates[0].payload[\"iss\"]\n root_key = self.supportdocs.get_key(root_issuer)\n current_key = root_key\n for cert in certificates:\n if cert.payload[\"exp\"] < now:\n raise ExpiredSignatureError(\"expired certificate in chain\")\n if not cert.check_signature(current_key):\n raise InvalidSignatureError(\"bad signature in chain\")\n current_key = cert.payload[\"public-key\"]\n return cert", "def validate_certificate(self, certificate):\r\n\t\t\r\n\t\tdates = (certificate.not_valid_before.timestamp(),certificate.not_valid_after.timestamp())\r\n\t\tdate_now=datetime.now().timestamp()\r\n\t\treturn dates[0]< date_now < dates[1]", "def certify(self, subject, level=SignatureType.Generic_Cert, **prefs):\n hash_algo = prefs.pop('hash', None)\n sig_type = level\n if isinstance(subject, PGPKey):\n sig_type = SignatureType.DirectlyOnKey\n\n sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))\n\n # signature options that only make sense in certifications\n usage = prefs.pop('usage', None)\n exportable = prefs.pop('exportable', None)\n\n if usage is not None:\n sig._signature.subpackets.addnew('KeyFlags', hashed=True, flags=usage)\n\n if exportable is not None:\n sig._signature.subpackets.addnew('ExportableCertification', hashed=True, bflag=exportable)\n\n keyfp = self.fingerprint\n if isinstance(subject, PGPKey):\n keyfp = subject.fingerprint\n if isinstance(subject, PGPUID) and subject._parent is not None:\n keyfp = subject._parent.fingerprint\n\n if keyfp == self.fingerprint:\n # signature options that only make sense in self-certifications\n cipher_prefs = prefs.pop('ciphers', None)\n hash_prefs = prefs.pop('hashes', None)\n compression_prefs = prefs.pop('compression', None)\n key_expires = prefs.pop('key_expiration', None)\n keyserver_flags = prefs.pop('keyserver_flags', None)\n keyserver = prefs.pop('keyserver', None)\n primary_uid = prefs.pop('primary', None)\n attested_certifications = prefs.pop('attested_certifications', [])\n\n if key_expires is not None:\n # key expires should be a timedelta, so if it's a datetime, turn it into a timedelta\n if isinstance(key_expires, datetime):\n key_expires = key_expires - self.created\n\n sig._signature.subpackets.addnew('KeyExpirationTime', hashed=True, expires=key_expires)\n\n if cipher_prefs is not None:\n sig._signature.subpackets.addnew('PreferredSymmetricAlgorithms', hashed=True, flags=cipher_prefs)\n\n if hash_prefs:\n sig._signature.subpackets.addnew('PreferredHashAlgorithms', hashed=True, flags=hash_prefs)\n if sig.hash_algorithm is None:\n sig._signature.halg = hash_prefs[0]\n if sig.hash_algorithm is None:\n sig._signature.halg = HashAlgorithm.SHA256\n\n if compression_prefs is not None:\n sig._signature.subpackets.addnew('PreferredCompressionAlgorithms', hashed=True, flags=compression_prefs)\n\n if keyserver_flags is not None:\n sig._signature.subpackets.addnew('KeyServerPreferences', hashed=True, flags=keyserver_flags)\n\n if keyserver is not None:\n sig._signature.subpackets.addnew('PreferredKeyServer', hashed=True, uri=keyserver)\n\n if primary_uid is not None:\n sig._signature.subpackets.addnew('PrimaryUserID', hashed=True, primary=primary_uid)\n\n cert_sigtypes = {SignatureType.Generic_Cert, SignatureType.Persona_Cert,\n SignatureType.Casual_Cert, SignatureType.Positive_Cert,\n SignatureType.CertRevocation}\n # Features is always set on certifications:\n if sig._signature.sigtype in cert_sigtypes:\n sig._signature.subpackets.addnew('Features', hashed=True, flags=Features.pgpy_features)\n\n # If this is an attestation, then we must include a Attested Certifications subpacket:\n if sig._signature.sigtype == SignatureType.Attestation:\n attestations = set()\n for attestation in attested_certifications:\n if isinstance(attestation, PGPSignature) and attestation.type in cert_sigtypes:\n h = sig.hash_algorithm.hasher\n h.update(attestation._signature.canonical_bytes())\n attestations.add(h.digest())\n elif isinstance(attestation, (bytes, bytearray)) and len(attestation) == sig.hash_algorithm.digest_size:\n attestations.add(attestation)\n else:\n warnings.warn(\n 'Attested Certification element is neither a PGPSignature certification nor '\n 'a bytes object of size {:d}; ignoring'.format(sig.hash_algorithm.digest_size)\n )\n sig._signature.subpackets.addnew('AttestedCertifications', hashed=True, attested_certifications=b''.join(sorted(attestations)))\n\n else:\n # signature options that only make sense in non-self-certifications\n trust = prefs.pop('trust', None)\n regex = prefs.pop('regex', None)\n\n if trust is not None:\n sig._signature.subpackets.addnew('TrustSignature', hashed=True, level=trust[0], amount=trust[1])\n\n if regex is not None:\n sig._signature.subpackets.addnew('RegularExpression', hashed=True, regex=regex)\n\n return self._sign(subject, sig, **prefs)", "def signCertificateRequest(self,\n issuerDistinguishedName,\n requestData,\n verifyDNCallback,\n serialNumber,\n requestFormat=crypto.FILETYPE_ASN1,\n certificateFormat=crypto.FILETYPE_ASN1,\n secondsToExpiry=60 * 60 * 24 * 365, # One year\n digestAlgorithm='sha256'):\n hlreq = CertificateRequest.load(requestData, requestFormat)\n\n dn = hlreq.getSubject()\n vval = verifyDNCallback(dn)\n\n def verified(value):\n if not value:\n raise VerifyError(\"DN callback %r rejected request DN %r\" % (verifyDNCallback, dn))\n return self.signRequestObject(issuerDistinguishedName, hlreq,\n serialNumber, secondsToExpiry, digestAlgorithm).dump(certificateFormat)\n\n if isinstance(vval, Deferred):\n return vval.addCallback(verified)\n else:\n return verified(vval)", "def test_sign(self):\n user = 'jethro@example.com'\n passwd = 'hunter2'\n\n issuerName = \"fake certificate\"\n domainCert = makeCert(issuerName)\n\n class FakeAvatar(object):\n def signCertificateRequest(fa, certificateRequest, hostcert,\n suggestedSerial):\n self.assertEqual(hostcert, domainCert)\n return hostcert.signRequestObject(certificateRequest,\n suggestedSerial)\n\n class FakeStorage(object):\n def getPrivateCertificate(cs, subject):\n return domainCert\n\n def genSerial(cs, domain):\n return 1\n\n cr = CertificateRequest.load(makeCertRequest(user))\n class FakePortal(object):\n def login(fp, creds, proto, iface):\n self.assertEqual(iface, IQ2QUser)\n self.assertEqual(creds.username, user)\n self.assertEqual(creds.password, passwd)\n return succeed([None, FakeAvatar(), None])\n\n class FakeService(object):\n portal = FakePortal()\n certificateStorage = FakeStorage()\n\n q = Q2Q()\n q.service = FakeService()\n d = callResponder(Sign, {'certificate_request': cr,\n 'password': passwd},\n q)\n\n response = self.successResultOf(d)\n self.assertEqual(response['certificate'].getIssuer().commonName,\n issuerName)", "def mk_cacert(issuer, request, private_key):\n pkey = request.get_pubkey()\n cert = X509.X509()\n cert.set_serial_number(1)\n cert.set_version(2)\n mk_cert_valid(cert)\n cert.set_issuer(issuer)\n cert.set_subject(cert.get_issuer())\n cert.set_pubkey(pkey)\n cert.add_ext(X509.new_extension('basicConstraints', 'CA:TRUE'))\n cert.add_ext(X509.new_extension('subjectKeyIdentifier', cert.get_fingerprint()))\n cert.sign(private_key, 'sha256')\n return cert, private_key, pkey", "def verify_xml_signature(xml_file, certificate_path):\n # TODO - refactor such that this verifies for generic stuff\n tree = etree.parse(xml_file)\n root = tree.getroot()\n with open(certificate_path) as f:\n certificate = f.read()\n # for per_tag in root.iter('UAPermission'):\n # data_to_sign = per_tag\n try:\n verified_data = sx.XMLVerifier().verify(data=root, require_x509=True, x509_cert=certificate).signed_xml\n # The file signature is authentic\n return True\n except cryptography.exceptions.InvalidSignature:\n # print(verified_data)\n # add the type of exception\n return False", "def test_create_certificate_signing_request(self):\n pass", "def verify(self, public_key, message, signature):", "def test_idtoken_sign_validation(self):\n SIGKEYS = self._get_keys()\n RSAKEYS = [k for k in SIGKEYS if k.kty == 'RSA']\n\n code = self._create_code()\n\n post_data = self._auth_code_post_data(code=code.code)\n\n response = self._post_request(post_data)\n response_dic = json.loads(response.content.decode('utf-8'))\n\n JWS().verify_compact(response_dic['id_token'].encode('utf-8'), RSAKEYS)", "def test_read_certificate_signing_request(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate if certificate is in list of the revocated certificates
def crl_validation(self, cert): return all(crl.get_revoked_certificate_by_serial_number(cert.serial_number) == None for crl in self.crls_list)
[ "def check_certif_revokation_list(self, cert, index):\n # 12. Revokation list check\n # - Subject public key\n # - Issuer or certificate serial number\n if (self.context_revoked_certificates_id or\n self.context_revoked_public_keys):\n self.error(\"Revokation list check not implemented\")", "def util_is_chain_valid(list_of_cert_dirs):\n if not list_of_cert_dirs or len(list_of_cert_dirs)==1:\n return False\n\n chain_keys = list_of_cert_dirs.keys()\n chain_keys.sort()\n \n next_index = int(chain_keys[0])\n for chain_index in chain_keys:\n if next_index != int(chain_index):\n return False\n next_index = int(chain_index) + 1\n\n return True", "def validate_cert_chain(self):\r\n\r\n\t\tchain = self.trusting_chain\r\n\t\tif len(self.trusting_chain) <= 1:\r\n\t\t\treturn False \r\n\t\tfor i in range(0, len(chain) - 1):\r\n\r\n\t\t\tif not self.validate_certificate(chain[i]):\r\n\t\t\t\treturn False\r\n\r\n\t\t\t#verifies if the signatures are valid \r\n\t\t\tif not self.validate_signature(chain[i+1], chain[i]):\r\n\t\t\t\treturn False\r\n\t\t\t\r\n\t\t\t# verifies if the certificate is not on a CRL \r\n\t\t\tif not self.crl_validation(chain[i]):\r\n\t\t\t\treturn False\r\n\t\t\t\r\n\t\treturn True", "def verify_certificate_chain(self, certificates, now=None):\n if not certificates:\n raise ValueError(\"chain must have at least one certificate\")\n if now is None:\n now = int(time.time() * 1000)\n root_issuer = certificates[0].payload[\"iss\"]\n root_key = self.supportdocs.get_key(root_issuer)\n current_key = root_key\n for cert in certificates:\n if cert.payload[\"exp\"] < now:\n raise ExpiredSignatureError(\"expired certificate in chain\")\n if not cert.check_signature(current_key):\n raise InvalidSignatureError(\"bad signature in chain\")\n current_key = cert.payload[\"public-key\"]\n return cert", "def validate_cert_chain(certs, hostname):\n # TODO: Raise error codes with appropriate messages instead.\n encoded_certs, lengths = zip(*[\n (ffi.new(\"uint8_t[]\", cert), len(cert)) for cert in certs\n ])\n cert_ptr_buffer = ffi.new(\"uint8_t*[]\", encoded_certs)\n cert_size_buffer = ffi.new(\"size_t[]\", lengths)\n cert_count = ffi.new(\"int *\", len(certs))\n hostname = ffi.new(\"char[]\", hostname.encode('utf-8'))\n\n result = lib.validate_cert_chain(\n cert_ptr_buffer,\n cert_size_buffer,\n cert_count[0],\n hostname,\n )\n return result == 1", "def validate_certificate(self, certificate):\r\n\t\t\r\n\t\tdates = (certificate.not_valid_before.timestamp(),certificate.not_valid_after.timestamp())\r\n\t\tdate_now=datetime.now().timestamp()\r\n\t\treturn dates[0]< date_now < dates[1]", "def check_child_certs(conf, errs):\n\n z = Zookeeper(handle=conf.handle)\n req = Element(tag_msg, nsmap=nsmap, type=\"query\", version=version)\n SubElement(req, tag_list_published_objects,\n tag=\"list_published_objects\", tenant_handle=conf.handle)\n pdus = z.call_rpkid(req)\n for pdu in pdus:\n if pdu.get(\"uri\").endswith('.cer'):\n cert = X509(Base64=pdu.text)\n t = cert.getNotAfter()\n if t <= expire_time:\n e = 'expired' if t <= now else 'will expire'\n errs.write(\"%(handle)s's rescert for Child %(child)s %(expire)s on %(date)s uri=%(uri)s subject=%(subject)s\\n\" % {\n 'handle': conf.handle,\n 'child': pdu.get(\"child_handle\"),\n 'uri': pdu.get(\"uri\"),\n 'subject': cert.getSubject(),\n 'expire': e,\n 'date': t})", "def _are_certificates_identical(self):\n sni_cert = self.sni_data.get(\"Certificate_details\")\n non_sni_cert = self.non_sni_data.get(\"Certificate_details\")\n if all(cert for cert in (sni_cert, non_sni_cert) if cert) and sni_cert == non_sni_cert:\n return True\n return", "def test_list_cloud_certificates(self):\n pass", "def validate_required_certs(member, gear):\n missing_certs = []\n for cert_required in gear.geartype.min_required_certs.all():\n if cert_required not in member.certifications.all():\n missing_certs.append(cert_required)\n if missing_certs:\n cert_names = [cert.title for cert in missing_certs]\n msg = f\"{member.get_full_name()} is missing the following certifications: {cert_names}\"\n logger.info(msg)\n raise ValidationError(msg)", "def verifyCertChain(self, \n x509Cert2Verify=None, \n caX509Stack=None,\n rejectSelfSignedCert=True):\n \n if caX509Stack is None:\n caX509Stack = []\n \n n2Validate = len(self)\n if x509Cert2Verify:\n # One more to validate in addition to stack content\n n2Validate += 1\n else:\n # Validate starting from last on stack - but check first that it's\n # populated\n if n2Validate == 0:\n raise X509StackEmptyError(\"Empty stack and no x509Cert2Verify \"\n \"set: no cert.s to verify\")\n\n x509Cert2Verify = self[-1]\n \n # Exit loop if all certs have been validated or if find a self \n # signed cert.\n nValidated = 0\n issuerX509Cert = None\n while nValidated < n2Validate:\n issuerX509Cert = None\n issuerDN = x509Cert2Verify.issuer\n \n # Search for issuing certificate in stack\n for x509Cert in self:\n if x509Cert.dn == issuerDN:\n # Match found - the cert.'s issuer has been found in the \n # stack\n issuerX509Cert = x509Cert\n break\n \n if issuerX509Cert:\n # An issuing cert. has been found - use it to check the \n # signature of the cert. to be verified\n if not x509Cert2Verify.verify(issuerX509Cert.pubKey):\n X509CertInvalidSignature('Signature is invalid for cert. '\n '\"%s\"' % x509Cert2Verify.dn)\n \n # In the next iteration the issuer cert. will be checked:\n # 1) search for a cert. in the stack that issued it\n # 2) If found use the issuing cert. to verify\n x509Cert2Verify = issuerX509Cert\n nValidated += 1\n else:\n # All certs in the stack have been searched\n break\n\n\n if issuerX509Cert: \n # Check for self-signed certificate\n if (nValidated == 1 and rejectSelfSignedCert and \n issuerX509Cert.dn == issuerX509Cert.issuer):\n\n # If only one iteration occurred then it must be a self\n # signed certificate\n raise SelfSignedCert(\"Certificate is self signed: [DN=%s]\" %\n issuerX509Cert.dn)\n \n if not caX509Stack:\n caX509Stack = [issuerX509Cert]\n \n elif not caX509Stack:\n raise X509CertIssuerNotFound('No issuer certificate found for '\n 'certificate \"%s\"' % \n x509Cert2Verify.dn)\n \n for caCert in caX509Stack:\n issuerDN = x509Cert2Verify.issuer\n if caCert.dn == issuerDN:\n issuerX509Cert = caCert\n break\n \n if issuerX509Cert:\n if not x509Cert2Verify.verify(issuerX509Cert.pubKey):\n X509CertInvalidSignature('Signature is invalid for cert. \"%s\"' %\n x509Cert2Verify.dn)\n \n # Chain is validated through to CA cert\n return\n else:\n raise X509CertIssuerNotFound('No issuer cert. found for '\n 'certificate \"%s\"'%x509Cert2Verify.dn)\n \n # If this point is reached then an issuing cert is missing from the\n # chain \n raise X509CertIssuerNotFound('Can\\'t find issuer cert \"%s\" for '\n 'certificate \"%s\"' %\n (x509Cert2Verify.issuer, \n x509Cert2Verify.dn))", "def check_certificate():\n server = get_odoo_server_url()\n if server:\n path = Path('/etc/ssl/certs/nginx-cert.crt')\n if path.exists():\n with path.open('r') as f:\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())\n cert_end_date = datetime.datetime.strptime(cert.get_notAfter().decode('utf-8'), \"%Y%m%d%H%M%SZ\") - datetime.timedelta(days=10)\n for key in cert.get_subject().get_components():\n if key[0] == b'CN':\n cn = key[1].decode('utf-8')\n if cn == 'OdooTempIoTBoxCertificate' or datetime.datetime.now() > cert_end_date:\n _logger.info(_('Your certificate %s must be updated') % (cn))\n load_certificate()\n else:\n _logger.info(_('Your certificate %s is valid until %s') % (cn, cert_end_date))\n else:\n load_certificate()", "def verify_ssl_certificate(self):\n return all(driver.verify_ssl_certificate for driver in self.drivers)", "def test_list_certificate_signing_request(self):\n pass", "def verify_list(mailchimp, list_id, course_id):\n lists = mailchimp.lists(filters={'list_id': list_id})['data']\n\n if len(lists) != 1:\n log.error('incorrect list id')\n return False\n\n list_name = lists[0]['name']\n\n log.debug('list name: %s', list_name)\n\n # check that we are connecting to the correct list\n parts = course_id.replace('_', ' ').replace('/', ' ').split()\n count = sum(1 for p in parts if p in list_name)\n if count < 3:\n log.info(course_id)\n log.info(list_name)\n log.error('course_id does not match list name')\n return False\n\n return True", "def is_on_certificate_allowlist(user, course_key):\n return CertificateAllowlist.objects.filter(user=user, course_id=course_key, allowlist=True).exists()", "def check_cert_key_match(cert, private_key):\n try:\n cert_obj = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)\n except OpenSSL.crypto.Error:\n raise CertificateError('new Services Director service '\n 'certificate is not correct: %s' % cert)\n\n try:\n private_key_obj = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, private_key)\n except OpenSSL.crypto.Error:\n raise CertificateError('new Services Director service '\n 'private key is not correct: %s' % private_key)\n\n context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)\n context.use_privatekey(private_key_obj)\n context.use_certificate(cert_obj)\n try:\n context.check_privatekey()\n return True\n except OpenSSL.SSL.Error:\n raise CertificateError(\n 'new sd service private key and new sd service certificate do not match: %s' % cert)", "def verify_ssl_certificate(self, value):\n for driver in self.drivers:\n driver.verify_ssl_certificate = value", "def _SSLVerifyCallback(self, conn, cert, errnum, errdepth, ok):\n # some parameters are unused, but this is the API\n # pylint: disable=W0613\n assert self._ssl_params, \"SSL not initialized\"\n\n return (self._ssl_cert.digest(\"sha1\") == cert.digest(\"sha1\") and\n self._ssl_cert.digest(\"md5\") == cert.digest(\"md5\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the server certificate chain is valid
def validate_cert_chain(self): chain = self.trusting_chain if len(self.trusting_chain) <= 1: return False for i in range(0, len(chain) - 1): if not self.validate_certificate(chain[i]): return False #verifies if the signatures are valid if not self.validate_signature(chain[i+1], chain[i]): return False # verifies if the certificate is not on a CRL if not self.crl_validation(chain[i]): return False return True
[ "def check_certificate():\n server = get_odoo_server_url()\n if server:\n path = Path('/etc/ssl/certs/nginx-cert.crt')\n if path.exists():\n with path.open('r') as f:\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())\n cert_end_date = datetime.datetime.strptime(cert.get_notAfter().decode('utf-8'), \"%Y%m%d%H%M%SZ\") - datetime.timedelta(days=10)\n for key in cert.get_subject().get_components():\n if key[0] == b'CN':\n cn = key[1].decode('utf-8')\n if cn == 'OdooTempIoTBoxCertificate' or datetime.datetime.now() > cert_end_date:\n _logger.info(_('Your certificate %s must be updated') % (cn))\n load_certificate()\n else:\n _logger.info(_('Your certificate %s is valid until %s') % (cn, cert_end_date))\n else:\n load_certificate()", "def validate_cert_chain(certs, hostname):\n # TODO: Raise error codes with appropriate messages instead.\n encoded_certs, lengths = zip(*[\n (ffi.new(\"uint8_t[]\", cert), len(cert)) for cert in certs\n ])\n cert_ptr_buffer = ffi.new(\"uint8_t*[]\", encoded_certs)\n cert_size_buffer = ffi.new(\"size_t[]\", lengths)\n cert_count = ffi.new(\"int *\", len(certs))\n hostname = ffi.new(\"char[]\", hostname.encode('utf-8'))\n\n result = lib.validate_cert_chain(\n cert_ptr_buffer,\n cert_size_buffer,\n cert_count[0],\n hostname,\n )\n return result == 1", "def _sanityCheckForSSL(self):\n if not self.requiresSsl():\n return 0\n\n if not self.sslCertPath:\n log.error(\"sslCertPath to be set - cannot start server\")\n return 1\n try:\n util.mkdirChain(os.path.dirname(self.sslCertPath))\n except OSError, err:\n log.error(\"Could not access sslCert dir %s: %s\" % os.path.dirname(self.sslCertPath), err)\n\n if self.caCertPath:\n log.warning(\"The caCertPath option is deprecated\")\n return self.makeCertificate()", "def verify(self, conn, cert, errnum, depth, ok):\n # If there is already an error bail now\n if not ok:\n return ok\n\n # Only perform further verification on client certs\n if depth>0:\n return ok\n\n # At this point we know the certificate is signed by a\n # trusted CA, check the issuer OU matches the incoming cert\n # OU and the incoming cert is not a server cert\n # XXX: Should look at using something like nsCertType rather\n # than the CN field for this.\n s = cert.get_subject()\n i = cert.get_issuer()\n if s.OU != i.OU:\n log_warn(\"Rejected incoming connection from invalid \"\n \"SSL cert (%s). OU did not match.\" % s)\n return 0\n if s.CN == \"server\":\n log_warn(\"Rejected incoming connection from server SSL \"\n \"cert (%s).\" % s)\n return 0\n return 1", "def test_https_bad_chain(self):\n domain = inspect(\"untrusted-root.badssl.com\")\n basic_check(domain.https)\n\n self.assertTrue(domain.https.https_bad_chain)", "def verify_ssl_certificate(self):\n return all(driver.verify_ssl_certificate for driver in self.drivers)", "def _SSLVerifyCallback(self, conn, cert, errnum, errdepth, ok):\n # some parameters are unused, but this is the API\n # pylint: disable=W0613\n assert self._ssl_params, \"SSL not initialized\"\n\n return (self._ssl_cert.digest(\"sha1\") == cert.digest(\"sha1\") and\n self._ssl_cert.digest(\"md5\") == cert.digest(\"md5\"))", "def util_is_chain_valid(list_of_cert_dirs):\n if not list_of_cert_dirs or len(list_of_cert_dirs)==1:\n return False\n\n chain_keys = list_of_cert_dirs.keys()\n chain_keys.sort()\n \n next_index = int(chain_keys[0])\n for chain_index in chain_keys:\n if next_index != int(chain_index):\n return False\n next_index = int(chain_index) + 1\n\n return True", "def verify_certificate_chain(self, certificates, now=None):\n if not certificates:\n raise ValueError(\"chain must have at least one certificate\")\n if now is None:\n now = int(time.time() * 1000)\n root_issuer = certificates[0].payload[\"iss\"]\n root_key = self.supportdocs.get_key(root_issuer)\n current_key = root_key\n for cert in certificates:\n if cert.payload[\"exp\"] < now:\n raise ExpiredSignatureError(\"expired certificate in chain\")\n if not cert.check_signature(current_key):\n raise InvalidSignatureError(\"bad signature in chain\")\n current_key = cert.payload[\"public-key\"]\n return cert", "def _verify_cert(self, peercert):\n if isinstance(self._ssl_options, dict):\n verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)\n elif isinstance(self._ssl_options, ssl.SSLContext):\n verify_mode = self._ssl_options.verify_mode\n\n assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)\n\n if verify_mode == ssl.CERT_NONE or self._server_hostname is None:\n return True\n cert = self._socket.getpeercert()\n if cert is None and verify_mode == ssl.CERT_REQUIRED:\n gen_log.warning(\"No SSL certificate given\")\n return False\n try:\n ssl_match_hostname(peercert, self._server_hostname)\n except SSLCertificateError:\n gen_log.warning(\"Invalid SSL certificate\", )\n return False\n else:\n return True", "def check_sign_chain_length(self, source):\n # 16. Chain length\n if (self.context_chain_length and\n len(self.cert_chains) < self.context_chain_length):\n self.error(\n \"Certificate chain length should be at least {} long,\"\n \" got {}\".format(\n self.context_chain_length, len(self.cert_chains)))", "def valid_chain(chain):\n\n if chain['length'] < MIN_NT_DISCREPANCY:\n return False\n\n if chain['method'] != 'SOLUTION NMR':\n return chain['resolution'] is not None and \\\n chain['resolution'] <= MAX_RESOLUTION_DISCREPANCY\n return True", "def is_server_cert_verification_enabled(self):\n\n return self.need_server_auth", "def test_wrong_cert(self):\n certfile = os.path.join(os.path.dirname(__file__) or os.curdir,\n \"keycert.pem\")\n server = ThreadedEchoServer(SIGNED_CERTFILE,\n certreqs=ssl.CERT_REQUIRED,\n cacerts=SIGNING_CA, chatty=False,\n connectionchatty=False)\n with server, \\\n socket.socket() as sock, \\\n test_wrap_socket(sock,\n certfile=certfile,\n ssl_version=ssl.PROTOCOL_TLSv1) as s:\n try:\n # Expect either an SSL error about the server rejecting\n # the connection, or a low-level connection reset (which\n # sometimes happens on Windows)\n s.connect((HOST, server.port))\n except ssl.SSLError as e:\n if support.verbose:\n sys.stdout.write(\"\\nSSLError is %r\\n\" % e)\n except OSError as e:\n if e.errno != errno.ECONNRESET:\n raise\n if support.verbose:\n sys.stdout.write(\"\\nsocket.error is %r\\n\" % e)\n else:\n self.fail(\"Use of invalid cert should have failed!\")", "def _verify_ssl(self):\n ca_cert = self.api_ca_cert\n\n if ca_cert is None or ca_cert == 'changeme' or ca_cert == '':\n return False\n if not os.path.exists(ca_cert):\n LOG.error(\"Could not find %s CA certificate.\"\n \"No such file or directory\",\n ca_cert)\n return False\n return ca_cert", "def IsValidCert(fname: str) -> bool:\n try:\n with open(fname, encoding=\"ascii\", errors=\"replace\") as f:\n cert_bytes = f.read()\n except Exception:\n logging.error('IsValidCert:: Unable to open certificate file %s', fname)\n return False\n\n try:\n x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_bytes) # type: ignore[attr-defined]\n except Exception:\n logging.error('IsValidCert:: Unable to load certificate %s', fname)\n return False\n\n x509_notafter = x509.get_notAfter()\n utc_time = datetime.datetime.strptime(x509_notafter.decode(\"utf-8\"), \"%Y%m%d%H%M%SZ\")\n time_notafter = int((utc_time - datetime.datetime(1970, 1, 1)).total_seconds())\n time_current = int(datetime.datetime.now().timestamp())\n time_remaining = time_notafter - time_current\n if time_remaining < 1:\n logging.error('IsValidCert:: Expired certificate %s', fname)\n return time_remaining > 300", "def test_failedCertificateVerification(self):\n onServerLost = defer.Deferred()\n onClientLost = defer.Deferred()\n self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,\n certificate=self.sCert, verify=False,\n requireCertificate=False),\n sslverify.OpenSSLCertificateOptions(verify=True,\n requireCertificate=False, caCerts=[self.cCert]),\n onServerLost=onServerLost,\n onClientLost=onClientLost)\n\n d = defer.DeferredList([onClientLost, onServerLost],\n consumeErrors=True)\n def afterLost(result):\n ((cSuccess, cResult), (sSuccess, sResult)) = result\n self.assertFalse(cSuccess)\n self.assertFalse(sSuccess)\n\n return d.addCallback(afterLost)", "def test_validate_host_cert(self):\n cn = 'test.valid-cert.com'\n cert = SpokeHostCert(cn, self.ca_name)\n cert.create()\n self.assertTrue(cert._verify())\n cert.delete()", "def is_valid():\n \n # Get validity of blockchain\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n \n if is_valid: response = {'message': 'The blockchain is valid!'}\n else: response = {'message': 'Error, the blockchain is invalid!'}\n\n return jsonify(response), 200" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads Certificates from disk
def load_certs(self, path): try: with os.scandir(path) as it: for entry in it: if entry.name.endswith('crt') and entry.is_file(): with open(path + entry.name,'rb') as cert: data=cert.read() cr = x509.load_pem_x509_certificate(data) if self.validate_certificate(cr): self.issuers_certs[cr.subject.rfc4514_string()] = cr logger.info("Certicates loaded!") except: logger.error("Could not load certificates.Make sure to run this file on the /client directory")
[ "def load_ca_certs(ctx):\n for path in find_ca_cert_files():\n logging.debug('loading certs from %s', path)\n ctx.load_verify_locations(path)", "def load(filename):\n\t\tbuffer = [];\n\t\tb64_contents = \"\";\n\t\ttry:\n\t\t\thandle = open(filename, \"r\");\n\t\t\traw_contents = handle.readlines();\n\t\t\tfor line in raw_contents:\n\t\t\t\tif line.startswith(\"----\"):\n\t\t\t\t\tcontinue\n\t\t\t\tb64_contents += line.strip();\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Failed to read PEM file: \" + str(e));\n\t\tbuffer = b64decode(b64_contents);\n\t\treturn X509v3Certificate(buffer);", "def certs(path):\n import shutil\n from .frozen import resource_path\n\n cert_path = os.path.join(path, 'server.crt')\n shutil.copyfile(resource_path('ssl/server.crt'), cert_path)\n echo(cert_path)\n root_path = os.path.join(path, 'root.crt')\n shutil.copyfile(resource_path('ssl_root/root.crt'), root_path)\n echo(root_path)\n echo('Done!')", "def load_crl(self,path):\r\n\t\ttry:\r\n\t\t\twith os.scandir(path) as it:\r\n\t\t\t\tfor entry in it:\r\n\t\t\t\t\tif entry.name.endswith('crl') and entry.is_file():\r\n\t\t\t\t\t\twith open(path + entry.name,'rb') as f:\r\n\t\t\t\t\t\t\tcrl_data = f.read()\r\n\t\t\t\t\t\t\tcrl = x509.load_der_x509_crl(crl_data)\r\n\t\t\t\t\t\t\tself.crls_list.append(crl)\r\n\t\t\t\t\t\t\r\n\t\t\t\tlogger.info(\"Certicates loaded!\")\r\n\t\texcept:\r\n\t\t\tlogger.error(\"Could not read Path!Make sure to run this file on the /client directory\")", "def _loadCAsFromDir(directoryPath):\n from twisted.internet import ssl\n\n caCerts = {}\n for child in directoryPath.children():\n if not child.basename().split('.')[-1].lower() == 'pem':\n continue\n try:\n data = child.getContent()\n except IOError:\n # Permission denied, corrupt disk, we don't care.\n continue\n try:\n theCert = ssl.Certificate.loadPEM(data)\n except ssl.SSL.Error:\n # Duplicate certificate, invalid certificate, etc. We don't care.\n pass\n else:\n caCerts[theCert.digest()] = theCert.original\n return caCerts.values()", "def _tryload_certificatefile(filename):\n\n filename = sanatizefilename(filename)\n\n if syhelpers.tls.load_certificate(filename) and syhelpers.tls.load_privatekey(filename):\n return True\n else:\n return False", "def load(self):\n data = self.get_data(\"certificates/%s\" % self.id)\n certificate = data[\"certificate\"]\n\n for attr in certificate.keys():\n setattr(self, attr, certificate[attr])\n\n return self", "def load_x509_certificate_pem(path):\n\n with open(path, 'rb') as f:\n cert = x509.load_pem_x509_certificate(f.read(), default_backend())\n return cert", "def download_load_certs(self, vpn_type, vpn_server_addr, ipsec_server_type):\n url = \"http://%s%s%s\" % (vpn_server_addr,\n self.cert_path_vpnserver,\n self.client_pkcs_file_name)\n local_cert_name = \"%s_%s_%s\" % (vpn_type.name,\n ipsec_server_type,\n self.client_pkcs_file_name)\n local_file_path = os.path.join(self.log_path, local_cert_name)\n try:\n ret = urllib.request.urlopen(url)\n with open(local_file_path, \"wb\") as f:\n f.write(ret.read())\n except:\n asserts.fail(\"Unable to download certificate from the server\")\n f.close()\n self.dut.adb.push(\"%s sdcard/\" % local_file_path)\n return local_cert_name", "def _download_ca_cert_and_key():\n ctx.logger.info('Downloading certificates to a local path...')\n ca_cert = os.path.join(_certs_dir(), CA_CERT)\n ca_key = os.path.join(_certs_dir(), CA_KEY)\n _download_file(CA_CERT, target=ca_cert)\n _download_file(CA_KEY, target=ca_key)\n return ca_cert, ca_key", "def load_certificate():\n db_uuid = read_file_first_line('odoo-db-uuid.conf')\n enterprise_code = read_file_first_line('odoo-enterprise-code.conf')\n if db_uuid and enterprise_code:\n url = 'https://www.odoo.com/odoo-enterprise/iot/x509'\n data = {\n 'params': {\n 'db_uuid': db_uuid,\n 'enterprise_code': enterprise_code\n }\n }\n urllib3.disable_warnings()\n http = urllib3.PoolManager(cert_reqs='CERT_NONE')\n response = http.request(\n 'POST',\n url,\n body = json.dumps(data).encode('utf8'),\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n )\n result = json.loads(response.data.decode('utf8'))['result']\n if result:\n write_file('odoo-subject.conf', result['subject_cn'])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,rw\", \"/\"])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,rw\", \"/root_bypass_ramdisks/\"])\n Path('/etc/ssl/certs/nginx-cert.crt').write_text(result['x509_pem'])\n Path('/root_bypass_ramdisks/etc/ssl/certs/nginx-cert.crt').write_text(result['x509_pem'])\n Path('/etc/ssl/private/nginx-cert.key').write_text(result['private_key_pem'])\n Path('/root_bypass_ramdisks/etc/ssl/private/nginx-cert.key').write_text(result['private_key_pem'])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,ro\", \"/\"])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,ro\", \"/root_bypass_ramdisks/\"])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,rw\", \"/root_bypass_ramdisks/etc/cups\"])\n subprocess.check_call([\"sudo\", \"service\", \"nginx\", \"restart\"])", "def load_pfx(file_path, password):\n\n with open(file_path, 'rb') as fp:\n return pkcs12.load_key_and_certificates(fp.read(), password.encode(), backends.default_backend())", "def loadCA(options):\n if not options.ca_cert:\n sys.exit(\"A CA certificate is required\")\n\n ca_x509 = X509.load_cert(options.ca_cert)\n\n if options.ca_key:\n keyPath = options.ca_key\n else:\n keyPath = options.ca_cert\n ca_evp = EVP.load_key(keyPath)\n\n return ca_evp, ca_x509", "def find_ca_cert_files():\n # Widely used locations for CA certificate files\n well_known_ca_cert_locations = [\n # Ubuntu\n '/etc/ssl/certs/ca-certificates.crt',\n # RedHat\n '/etc/pki/tls/certs/ca-bundle.crt',\n ]\n # Load all of the above locations that we can find\n for path in well_known_ca_cert_locations:\n if os.access(path, os.R_OK):\n yield path", "def _loadPEM(self):\n\n if self._cert is not None:\n return\n\n pem_filename = settings.AAP_PEM_FILE\n\n # log Loading public PEM certificate\n\n if not os.path.isfile(pem_filename):\n # The pem file hasn't been downloaded yet, grab it\n try:\n self.fetchPEM()\n except Exception:\n # log Error fetching PEM from AAP when trying to lead PEM\n raise\n\n try:\n with open(pem_filename, 'r') as cert_file:\n cert = load_pem(cert_file.read().encode(),\n default_backend())\n self.cert = cert.public_key()\n except Exception:\n # log Error loading PEM from local file\n # re-raise the exception received\n raise", "def load_certificate():\n params = demisto.params()\n cert = params.get(\"certificate\")\n cert = base64.b64decode(cert)\n passphrase = params.get('passphrase_creds', {}).get('password') or params.get(\"passphrase\", \"\")\n return cert, passphrase", "def test_source_certs(self):\n # Given a file based cert file\n file_certs = [\n os.path.join('auth', 'tests', 'files', 'rsacert.pem'),\n ]\n\n # when I attempt to source all configured certs\n kwargs = {\n 'file_certs': file_certs,\n 'federation_meta_uri': None,\n }\n received = auth.jwt.utils.source_certs(**kwargs)\n\n # then I should receive a list of certs\n msg = 'Certs list error'\n self.assertEqual(len(received), 1, msg)", "def _load_configurations(self):\n with open(self.config_file) as f:\n configs = f.read()\n config = ConfigParser.RawConfigParser(allow_no_value=True)\n config.readfp(io.BytesIO(configs))\n self.config = config\n #\n self.cert_file = self.config.get(\"cert-paths\", \"cert_file\")", "def _load_private_key(data_dir: str, filename: str) -> bytes:\n return load_binary(os.path.join(data_dir, 'keys_and_certs', filename))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads CRLs from disk
def load_crl(self,path): try: with os.scandir(path) as it: for entry in it: if entry.name.endswith('crl') and entry.is_file(): with open(path + entry.name,'rb') as f: crl_data = f.read() crl = x509.load_der_x509_crl(crl_data) self.crls_list.append(crl) logger.info("Certicates loaded!") except: logger.error("Could not read Path!Make sure to run this file on the /client directory")
[ "def load_certs(self, path):\r\n\t\ttry:\r\n\t\t\twith os.scandir(path) as it:\r\n\t\t\t\tfor entry in it:\r\n\t\t\t\t\tif entry.name.endswith('crt') and entry.is_file():\r\n\t\t\t\t\t\twith open(path + entry.name,'rb') as cert:\r\n\t\t\t\t\t\t\tdata=cert.read()\r\n\t\t\t\t\t\t\tcr = x509.load_pem_x509_certificate(data)\r\n\t\t\t\t\t\t\tif self.validate_certificate(cr):\r\n\t\t\t\t\t\t\t\tself.issuers_certs[cr.subject.rfc4514_string()] = cr\r\n\t\t\t\t\t\t\t\r\n\t\t\t\tlogger.info(\"Certicates loaded!\")\r\n\t\texcept:\r\n\t\t\tlogger.error(\"Could not load certificates.Make sure to run this file on the /client directory\")", "def load_ca_certs(ctx):\n for path in find_ca_cert_files():\n logging.debug('loading certs from %s', path)\n ctx.load_verify_locations(path)", "def read_lanc(path: str) -> admix.data.Lanc:\n lanc = admix.data.Lanc(path)\n return lanc", "def load_cache():\n Location._geocode_cache = Cache.get_file_objects(Location._geocode_cache_name)", "def ssl_crl(self):\n return \"\"\"--ssl-crl=file_name\"\"\"", "def _grab_crl(user_agent, url, timeout):\n request = Request(url)\n request.add_header('Accept', 'application/pkix-crl')\n request.add_header('User-Agent', user_agent)\n response = urlopen(request, None, timeout)\n data = response.read()\n if pem.detect(data):\n _, _, data = pem.unarmor(data)\n return crl.CertificateList.load(data)", "def load_file(self, file_path):\n ...", "def load(filename: str) -> RLAgent:\n pass", "def vpn_get_crl_path(self):\n return os.path.join(self.get_ejbca_home(), 'vpn', '%s.crl' % self.hostname)", "def load(self, crt_data: Optional[bytes] = None, crt_file: Optional[str] = None):\n if crt_data:\n self.crt = self.load_data(crt_data)\n elif crt_file:\n self.crt = self.load_file(path=crt_file)\n else:\n self.crt = None", "def load_id_lookup_table():\r\n dpath = csdldata.set_data_path()\r\n with open(dpath +'id_lookup_table.pkl', 'rb') as f:\r\n return pickle.load(f)", "def read_control_sequences(path):\n import os\n import sys\n trajectories = []\n for root, _, files in os.walk(path, followlinks=True):\n for file in files:\n if not file.lower().endswith('.prm.gz'):\n continue\n sys.stderr.write('\\rLoading {}...'.format(file))\n trajectories.append(read_control_sequence(os.path.join(root, file)))\n sys.stderr.write(\"\\rDone... \\n\")\n return trajectories", "def load_corpus_hashcode(codes_path):\n src_f = open(codes_path, \"r\")\n corpus_hashcodes = []\n for ln in src_f:\n corpus_hashcodes.append(int(ln.strip()))\n return corpus_hashcodes", "def load_climbs(self, ukc_name: str):\r\n # Get the logbook file name for the climber\r\n log_book = [file for file in os.listdir(ClimbManager.DIRECTORY) if file.split('_')[0] == ukc_name][0]\r\n with open(ClimbManager.DIRECTORY + '\\\\' + log_book) as file: # Load the file\r\n read_csv = csv.reader(file)\r\n first = True # First line contains the header so skip\r\n for row in read_csv:\r\n if first:\r\n first = False\r\n continue\r\n name = row[0]\r\n # When downloading info from UKC the grades and stars are located together\r\n # Seperate this information, store the grade and count the stars\r\n grade_info = row[1].split(' ')\r\n grade = ' '.join(grade_info[:-1])\r\n grade = grade.rstrip()\r\n stars = len(grade_info[-1])\r\n climb_style = row[2]\r\n # The grade of the route hides information about what kind of route it is, trad/sport/boulder etc.\r\n style = self.find_style(grade)\r\n partners = row[3].split(', ')\r\n notes = row[4]\r\n # Get the date the route was done\r\n date = self.determine_date(row[5])\r\n crag = row[6]\r\n climb = Climb(name, style, grade, stars, crag) # Create a Climb object and add\r\n if name + '_' + crag not in self.get_climbs_in_climb(): # If the climb not loaded then load\r\n self.add_climb(climb)\r\n log = Log(date, climb_style, partners, notes, climb) # Create a Log object and append\r\n self.add_log(log)\r\n else:\r\n # Find the correct climb and add a log of that climb\r\n load_climb = [climb for climb in self.get_climbs() if climb.name_crag() == name + '_' + crag][0]\r\n log = Log(date, climb_style, partners, notes, load_climb) # Create a Log object and append\r\n self.add_log(log)", "def load_links():\n # if .hn doesn't exist, return empty list\n if not os.path.isfile(HN_PATH):\n return []\n # otherwise, load it up\n hn_links = json.load(open(HN_PATH, 'r'))\n return hn_links", "def load_conll_dataset(filepath):\n observations = []\n lines = (x for x in open(filepath))\n for buf in generate_lines_for_sent(lines):\n conllx_lines = []\n for line in buf:\n conllx_lines.append(line.strip().split(\"\\t\"))\n embeddings = [None for x in range(len(conllx_lines))]\n observation = Observations(*zip(*conllx_lines), embeddings)\n observations.append(observation)\n return observations", "def _load_file(self):\n self.insertion_offset = 0\n self.populate_rand()\n self.file_name = self.randlist.popleft()\n with open(self.file_name) as f:\n self.file = list((f.readlines()))\n for x,i in enumerate(self.file):\n self.file[x] = i.rstrip('\\n')", "def _resolve_path_load(self, cdx, is_original, failed_files):\n\n if is_original:\n (filename, offset, length) = (cdx['orig.filename'],\n cdx['orig.offset'],\n cdx['orig.length'])\n else:\n (filename, offset, length) = (cdx['filename'],\n cdx['offset'],\n cdx['length'])\n\n # optimization: if same file already failed this request,\n # don't try again\n if failed_files is not None and filename in failed_files:\n raise ArchiveLoadFailed('Skipping Already Failed', filename)\n\n any_found = False\n last_exc = None\n last_traceback = None\n for resolver in self.path_resolvers:\n possible_paths = resolver(filename)\n\n if possible_paths:\n for path in possible_paths:\n any_found = True\n try:\n return self.record_loader.load(path, offset, length)\n\n except Exception as ue:\n last_exc = ue\n import sys\n last_traceback = sys.exc_info()[2]\n\n # Unsuccessful if reached here\n if failed_files is not None:\n failed_files.append(filename)\n\n if last_exc:\n #msg = str(last_exc.__class__.__name__)\n msg = str(last_exc)\n else:\n msg = 'Archive File Not Found'\n\n raise ArchiveLoadFailed(msg, filename), None, last_traceback", "def load_cluster_leaders(self, path_to_leaders):\n\n\t\tself._cluster_leader_dir = os.path.join(os.path.abspath(path_to_leaders), '')\n\t\tself._leader_set = glob.glob((self._cluster_leader_dir + \"*\"))\n\n\t\treturn" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds the certificate chain of a given certificate
def build_cert_chain(self,certificate): chain = [] last = None logger.info("Starting to build trusting chain..") while True: if last == certificate: self.trusting_chain = [] return last = certificate chain.append(certificate) issuer = certificate.issuer.rfc4514_string() subject = certificate.subject.rfc4514_string() if issuer == subject and issuer in self.issuers_certs: break if issuer in self.issuers_certs: certificate = self.issuers_certs[issuer] logger.info("Chain Built with success") self.trusting_chain = chain
[ "def _get_cert_chain(ssl_info):\n\n cert = M2Crypto.X509.load_cert_string(ssl_info.get(\"cert\", \"\"))\n chain = M2Crypto.X509.X509_Stack()\n for c in ssl_info.get(\"chain\", []):\n aux = M2Crypto.X509.load_cert_string(c)\n chain.push(aux)\n return cert, chain", "def _GetSigningChain(cert_id):\n signing_chain = []\n next_cert_id = cert_id\n\n while next_cert_id:\n cert = _GetCertificate(next_cert_id)\n signing_chain.append(cert)\n next_cert_id = cert.parent_certificate_id\n\n return signing_chain", "def makeCertRequest(cn):\n key = KeyPair.generate()\n return key.certificateRequest(DN(CN=cn))", "def get_cert_chain(self, error: bool = False) -> t.List[cert_human.Cert]:\n if not (isinstance(self.URL_CERT_CHAIN, list) and self.URL_CERT_CHAIN):\n response: t.Optional[requests.Response] = self.safe_request(error=error)\n value = []\n if response:\n chain: t.List[OpenSSL.crypto.X509] = listify(\n response.raw.captured_chain,\n )\n source: dict = {\n \"url\": self.url,\n \"method\": f\"{self.get_cert_chain.__name__}\",\n }\n value = [cert_human.Cert(cert=x, source=source) for x in chain]\n self.URL_CERT_CHAIN = value\n return self.URL_CERT_CHAIN", "def mk_cacert(issuer, request, private_key):\n pkey = request.get_pubkey()\n cert = X509.X509()\n cert.set_serial_number(1)\n cert.set_version(2)\n mk_cert_valid(cert)\n cert.set_issuer(issuer)\n cert.set_subject(cert.get_issuer())\n cert.set_pubkey(pkey)\n cert.add_ext(X509.new_extension('basicConstraints', 'CA:TRUE'))\n cert.add_ext(X509.new_extension('subjectKeyIdentifier', cert.get_fingerprint()))\n cert.sign(private_key, 'sha256')\n return cert, private_key, pkey", "def mk_temporary_cert(cacert_file, ca_key_file, cn):\n cert_req, pk2 = mk_request(1024, cn=cn)\n if cacert_file and ca_key_file:\n cacert = X509.load_cert(cacert_file)\n pk1 = EVP.load_key(ca_key_file)\n else:\n cacert = None\n pk1 = None\n\n cert = mk_cert()\n cert.set_subject(cert_req.get_subject())\n cert.set_pubkey(cert_req.get_pubkey())\n\n if cacert and pk1:\n cert.set_issuer(cacert.get_issuer())\n cert.sign(pk1, 'sha256')\n else:\n cert.set_issuer(cert.get_subject())\n cert.sign(pk2, 'sha256')\n\n certf = namedtmp()\n certf.write(cert.as_pem())\n certf.write(pk2.as_pem(None))\n certf.flush()\n\n return certf", "def create_certificates(self, certificate, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_certificates_with_http_info(certificate, **kwargs)\n else:\n (data) = self.create_certificates_with_http_info(certificate, **kwargs)\n return data", "def create_certificate_signing_request(*props): # pylint: disable=unused-argument\n pass", "def test_creating_cert(self):\n\n certificate = keyper.Certificate(AppleKeychainTests.TEST_CERT_PATH, password=AppleKeychainTests.TEST_CERT_PASSWORD)\n self.assertEqual(certificate.sha1, \"75:22:4C:AD:D6:A0:BD:0C:88:5F:B1:77:85:2F:83:A4:F6:80:69:70\")\n self.assertEqual(certificate.common_name, \"TestCertificate_CodeSign\")\n self.assertEqual(certificate.private_key_name, \"TestCertificate_CodeSign\")", "def _get_certificates_arguments(\n self, ssl_cert_key, ssl_cert_crt, ssl_cert_generate):\n section = self._config[self._config_section]\n\n # Private key\n ssl_cert_key = ssl_cert_key or section['ssl_cert_key']\n\n # Public certificate\n if ssl_cert_crt is not False:\n ssl_cert_crt = ssl_cert_crt or section.get_literal('ssl_cert_crt')\n\n # Generated certificate\n ssl_cert_generate = (\n ssl_cert_generate or section.get_literal('ssl_cert_generate')\n or False)\n\n return ssl_cert_key, ssl_cert_crt, ssl_cert_generate", "def verify_certificate_chain(self, certificates, now=None):\n if not certificates:\n raise ValueError(\"chain must have at least one certificate\")\n if now is None:\n now = int(time.time() * 1000)\n root_issuer = certificates[0].payload[\"iss\"]\n root_key = self.supportdocs.get_key(root_issuer)\n current_key = root_key\n for cert in certificates:\n if cert.payload[\"exp\"] < now:\n raise ExpiredSignatureError(\"expired certificate in chain\")\n if not cert.check_signature(current_key):\n raise InvalidSignatureError(\"bad signature in chain\")\n current_key = cert.payload[\"public-key\"]\n return cert", "def _write_ca_chain(self, ks, ts, ca_chain_path):\n if os.path.exists(ca_chain_path):\n return\n\n ca_chain = \"\"\n for store in [ks, ts]:\n for alias, c in store.certs.items():\n ca_chain = ca_chain + self._bytes_to_pem_str(c.cert, \"CERTIFICATE\")\n\n with Path(ca_chain_path).open(\"w\") as f:\n f.write(ca_chain)", "def _parse_file_key_certs(certificate_file, validate = False):\n\n while True:\n keycert_content = _read_until_keywords('dir-key-certification', certificate_file)\n\n # we've reached the 'router-signature', now include the pgp style block\n block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]\n keycert_content += _read_until_keywords(block_end_prefix, certificate_file, True)\n\n if keycert_content:\n yield stem.descriptor.networkstatus.KeyCertificate(bytes.join(b'', keycert_content), validate = validate)\n else:\n break # done parsing file", "def create_certificates_with_http_info(self, certificate, **kwargs):\n\n all_params = ['certificate', 'names']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_certificates\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'certificate' is set\n if ('certificate' not in params) or (params['certificate'] is None):\n raise ValueError(\"Missing the required parameter `certificate` when calling `create_certificates`\")\n\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'names' in params:\n query_params.append(('names', params['names']))\n collection_formats['names'] = 'csv'\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'certificate' in params:\n body_params = params['certificate']\n # Authentication setting\n auth_settings = ['AuthTokenHeader']\n\n return self.api_client.call_api('/1.10/certificates', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='CertificateResponse',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def _create_certificate (self, batch, certificate_data):\n with transaction.atomic ():\n certificate = Certificate.objects.create (batch=batch, **certificate_data)\n\n return certificate", "def install_certificate():\n stream = open(\"/bootflash/poap_device_recipe.yaml\", 'r')\n dictionary = yaml.load(stream)\n config_file_second = open(os.path.join(\"/bootflash\", options[\"split_config_second\"]), \"a+\")\n \n if (\"Trustpoint\" in dictionary):\n for ca in dictionary[\"Trustpoint\"].keys():\n ca_apply = 0\n for tp_cert, crypto_pass in dictionary[\"Trustpoint\"][ca].items():\n tp_cert = tp_cert.strip()\n file = tp_cert.split('/')[-1]\n if (file.endswith(\".p12\") or file.endswith(\".pfx\")):\n poap_log(\"Installing certificate file. %s\" % file)\n if (ca_apply == 0):\n config_file_second.write(\"crypto ca trustpoint %s\\n\" % ca)\n ca_apply = 1\n config_file_second.write(\"crypto ca import %s pkcs12 bootflash:poap_files/%s/%s %s\\n\" % (ca, ca, file, crypto_pass))\n poap_log(\"Installed certificate %s succesfully\" % file)", "def fetch_certs(certificate_list, user_agent=None, timeout=10):\n\n output = []\n\n if user_agent is None:\n user_agent = 'certvalidator %s' % __version__\n elif not isinstance(user_agent, str_cls):\n raise TypeError('user_agent must be a unicode string, not %s' % type_name(user_agent))\n\n for url in certificate_list.issuer_cert_urls:\n request = Request(url)\n request.add_header('Accept', 'application/pkix-cert,application/pkcs7-mime')\n request.add_header('User-Agent', user_agent)\n response = urlopen(request, None, timeout)\n\n content_type = response.headers['Content-Type'].strip()\n response_data = response.read()\n\n if content_type == 'application/pkix-cert':\n output.append(x509.Certificate.load(response_data))\n\n elif content_type == 'application/pkcs7-mime':\n signed_data = cms.SignedData.load(response_data)\n if isinstance(signed_data['certificates'], cms.CertificateSet):\n for cert_choice in signed_data['certificates']:\n if cert_choice.name == 'certificate':\n output.append(cert_choice.chosen)\n else:\n raise ValueError('Unknown content type of %s when fetching issuer certificate for CRL' % repr(content_type))\n\n return output", "def load_or_create_root_ca(key_filename, cert_filename, org_name='Microchip Technology Inc',\n common_name='Crypto Authentication Root CA 002'):\n key_filename = Path(key_filename)\n cert_filename = Path(cert_filename)\n rebuild_cert = True\n\n # Load or create key pair\n private_key = load_or_create_key_pair(filename=key_filename)\n\n # Look for root certificate\n certificate = None\n if cert_filename.is_file():\n rebuild_cert = False\n # Found cached certificate file, read it in\n with open(str(cert_filename), 'rb') as f:\n certificate = x509.load_pem_x509_certificate(f.read(), get_backend())\n\n if certificate:\n if get_org_name(certificate.subject) != org_name:\n rebuild_cert = True\n\n cert_pub_bytes = certificate.public_key().public_bytes(format=PublicFormat.SubjectPublicKeyInfo, encoding=Encoding.DER)\n key_pub_bytes = private_key.public_key().public_bytes(format=PublicFormat.SubjectPublicKeyInfo, encoding=Encoding.DER)\n if cert_pub_bytes != key_pub_bytes:\n rebuild_cert = True\n\n if rebuild_cert:\n print(\"Building new root certificate\")\n # Build new certificate\n builder = ExtBuilder()\n builder = builder.subject_name(x509.Name([\n x509.NameAttribute(x509.oid.NameOID.ORGANIZATION_NAME, org_name),\n x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, common_name)]))\n builder = builder.issuer_name(builder._subject_name) # Names are the same for a self-signed certificate\n builder = builder.not_valid_before(datetime.utcnow().replace(tzinfo=timezone.utc))\n builder = builder.not_valid_after(builder._not_valid_before + timedelta(days=365*40))\n #builder = builder.not_valid_after(datetime(9999, 12, 31, 23, 59, 59, tzinfo=timezone.utc))\n builder = builder.public_key(private_key.public_key())\n builder = builder.serial_number(random_cert_sn(16))\n builder = builder.add_extension(\n x509.SubjectKeyIdentifier.from_public_key(builder._public_key),\n critical=False)\n builder = builder.add_extension(\n x509.AuthorityKeyIdentifier.from_issuer_public_key(builder._public_key),\n critical=False)\n builder = builder.add_extension(\n x509.BasicConstraints(ca=True, path_length=None),\n critical=True)\n\n # Sign certificate with its own key\n certificate_new = builder.sign(\n private_key=private_key,\n algorithm=hashes.SHA256(),\n backend=get_backend())\n\n certificate = update_x509_certificate(certificate, certificate_new, cert_filename)\n else:\n print(\"Using cached root certificate\")\n\n return {'private_key': private_key, 'certificate': certificate}", "def get_path(temp_dir=None, cache_length=24, cert_callback=None):\n\n ca_path, temp = _ca_path(temp_dir)\n\n # Windows and OS X\n if temp and _cached_path_needs_update(ca_path, cache_length):\n empty_set = set()\n\n any_purpose = '2.5.29.37.0'\n apple_ssl = '1.2.840.113635.100.1.3'\n win_server_auth = '1.3.6.1.5.5.7.3.1'\n\n with path_lock:\n if _cached_path_needs_update(ca_path, cache_length):\n with open(ca_path, 'wb') as f:\n for cert, trust_oids, reject_oids in extract_from_system(cert_callback, True):\n if sys.platform == 'darwin':\n if trust_oids != empty_set and any_purpose not in trust_oids \\\n and apple_ssl not in trust_oids:\n if cert_callback:\n cert_callback(Certificate.load(cert), 'implicitly distrusted for TLS')\n continue\n if reject_oids != empty_set and (apple_ssl in reject_oids\n or any_purpose in reject_oids):\n if cert_callback:\n cert_callback(Certificate.load(cert), 'explicitly distrusted for TLS')\n continue\n elif sys.platform == 'win32':\n if trust_oids != empty_set and any_purpose not in trust_oids \\\n and win_server_auth not in trust_oids:\n if cert_callback:\n cert_callback(Certificate.load(cert), 'implicitly distrusted for TLS')\n continue\n if reject_oids != empty_set and (win_server_auth in reject_oids\n or any_purpose in reject_oids):\n if cert_callback:\n cert_callback(Certificate.load(cert), 'explicitly distrusted for TLS')\n continue\n if cert_callback:\n cert_callback(Certificate.load(cert), None)\n f.write(armor('CERTIFICATE', cert))\n\n if not ca_path:\n raise CACertsError('No CA certs found')\n\n return ca_path" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the client has already negotiated algs
def has_negotiated(self): return not (self.cipher is None or self.digest is None)
[ "def negotiate_algs(self):\r\n\r\n\t\tdata = {\r\n\t\t\t'method': \"NEGOTIATE_ALG\",\r\n\t\t\t'ciphers': self.ciphers,\r\n\t\t\t'digests': self.digests,\r\n\t\t\t'ciphermodes': self.ciphermodes\r\n\t\t}\r\n\t\trequest = requests.post(f'{SERVER_URL}/api/protocols',json=data, headers={'Content-Type': 'application/json'})\r\n\r\n\r\n\r\n\t\tresponse = json.loads(request.text)\r\n\t\t\r\n\t\tif response['method'] == 'NACK':\r\n\t\t\tlogger.info('ERROR NEGOTIATING ALGORITHMS')\r\n\t\telse:\r\n\t\t\tlogger.info('NEGOTIATED ALGORITHMS WITH SUCCESS')\r\n\t\t\tself.session_id = response['id']\r\n\t\t\tself.cipher, self.digest, self.ciphermode = response['cipher'], response['digest'], response['mode']\r\n\t\t\t\r\n\t\t\tcert = base64.b64decode(response['cert'])\r\n\t\t\tcert = x509.load_pem_x509_certificate(cert)\r\n\t\t\tself.build_cert_chain(cert)\r\n\t\t\tif self.validate_cert_chain() and self.validate_server_purpose(cert):\r\n\t\t\t\tlogger.info(\"Server Certificate OK\")\r\n\t\t\t\tself.server_cert = cert\r\n\t\t\t\tself.state = 'NEGOTIATE_ALG'\r\n\t\t\telse:\r\n\t\t\t\tlogger.info(\"Certificate is not valid\")\r\n\t\t\t\texit(1)\t\t# TODO: ver\r\n\r\n\t\t\t#self.server_cert=cert\r", "def test_nextProtocolMechanismsALPNIsSupported(self):\n supportedProtocols = sslverify.protocolNegotiationMechanisms()\n self.assertTrue(\n sslverify.ProtocolNegotiationSupport.ALPN in\n supportedProtocols\n )", "def has_apikeys(self):\n return self.api_keys.count() > 0", "def AllExpectationsUsed(self):\n return not self._expected_responses", "def test_backends_are_idle(self):\n for impalad in ImpalaCluster.get_e2e_test_cluster().impalads:\n verifier = MetricVerifier(impalad.service)\n verifier.wait_for_backend_admission_control_state()", "def test_NPNAndALPNNoAdvertise(self):\n protocols = [b'h2', b'http/1.1']\n negotiatedProtocol, lostReason = negotiateProtocol(\n clientProtocols=protocols,\n serverProtocols=[],\n )\n self.assertIsNone(negotiatedProtocol)\n self.assertIsNone(lostReason)", "def check_used_once(g):\n mng = g.manager\n return sum(mng.graph_users[g].values()) == 1", "def _verify_ledger_apis_access() -> None:\n path = Path(DEFAULT_AEA_CONFIG_FILE)\n agent_loader = ConfigLoader(\"aea-config_schema.json\", AgentConfig)\n fp = path.open(mode=\"r\", encoding=\"utf-8\")\n aea_conf = agent_loader.load(fp)\n\n for identifier, _value in aea_conf.ledger_apis.read_all():\n if identifier not in SUPPORTED_LEDGER_APIS:\n ValueError(\"Unsupported identifier in ledger apis.\")\n\n fetchai_ledger_api_config = aea_conf.ledger_apis.read(FETCHAI)\n if fetchai_ledger_api_config is None:\n logger.debug(\"No fetchai ledger api config specified.\")\n else:\n network = cast(str, fetchai_ledger_api_config.get(\"network\"))\n host = cast(str, fetchai_ledger_api_config.get(\"host\"))\n port = cast(int, fetchai_ledger_api_config.get(\"port\"))\n if network is not None:\n _try_to_instantiate_fetchai_ledger_api(network=network)\n elif host is not None and port is not None:\n _try_to_instantiate_fetchai_ledger_api(host=host, port=port)\n else:\n raise ValueError(\"Either network or host and port must be specified.\")\n ethereum_ledger_config = aea_conf.ledger_apis.read(ETHEREUM)\n if ethereum_ledger_config is None:\n logger.debug(\"No ethereum ledger api config specified.\")\n else:\n address = cast(str, ethereum_ledger_config.get(\"address\"))\n if address is not None:\n _try_to_instantiate_ethereum_ledger_api(address)\n else:\n raise ValueError(\"Address must be specified.\")", "def check_beam_connections(self):\n pass", "def can_activate(self):\n return IPossibleLocalAgencyInfo.providedBy(self.context) and \\\n not ILocalAgencyInfo.providedBy(self.context)", "def test_NPNAndALPNNoOverlap(self):\n clientProtocols = [b'h2', b'http/1.1']\n serverProtocols = [b'spdy/3']\n negotiatedProtocol, lostReason = negotiateProtocol(\n serverProtocols=clientProtocols,\n clientProtocols=serverProtocols,\n )\n self.assertIsNone(negotiatedProtocol)\n self.assertEqual(lostReason.type, SSL.Error)", "def all_auxiliary_public_keys_received(self) -> bool:\n return (\n len(self._guardian_auxiliary_public_keys)\n == self.ceremony_details.number_of_guardians\n )", "def available_for_protocol(self, protocol):\n if self.protocol == ALL or protocol == ALL:\n return True\n\n return protocol in ensure_sequence(self.protocol)", "def exists(self):\n try:\n asg = self.client.get_asg(self.env, self.name, retries=1)\n return True\n except:\n return False", "def alternatives_available(self):\n return 1", "def test_unused_recent_badge_awards(self):\n request = HttpRequest()\n request.user = self.testuser\n request.COOKIES[LAST_CHECK_COOKIE_NAME] = '1156891591.492586'\n self.mw.process_request(request)\n ok_(hasattr(request, 'recent_badge_awards'))\n\n response = HttpResponse()\n self.mw.process_response(request, response)\n ok_(LAST_CHECK_COOKIE_NAME not in response.cookies)", "def verify_connection(self, request, client_address):\n return 1", "def _do_admission_control_check():\n return not sstbf.is_sstbf_configured()", "def all_election_public_keys_received(self) -> bool:\n return (\n len(self._guardian_election_public_keys)\n == self.ceremony_details.number_of_guardians\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Negotiates algorithms with the server Sends all the available algorithms and waits for the server to choose one
def negotiate_algs(self): data = { 'method': "NEGOTIATE_ALG", 'ciphers': self.ciphers, 'digests': self.digests, 'ciphermodes': self.ciphermodes } request = requests.post(f'{SERVER_URL}/api/protocols',json=data, headers={'Content-Type': 'application/json'}) response = json.loads(request.text) if response['method'] == 'NACK': logger.info('ERROR NEGOTIATING ALGORITHMS') else: logger.info('NEGOTIATED ALGORITHMS WITH SUCCESS') self.session_id = response['id'] self.cipher, self.digest, self.ciphermode = response['cipher'], response['digest'], response['mode'] cert = base64.b64decode(response['cert']) cert = x509.load_pem_x509_certificate(cert) self.build_cert_chain(cert) if self.validate_cert_chain() and self.validate_server_purpose(cert): logger.info("Server Certificate OK") self.server_cert = cert self.state = 'NEGOTIATE_ALG' else: logger.info("Certificate is not valid") exit(1) # TODO: ver #self.server_cert=cert
[ "def run_default(self):\n\n if self.complete_pipelines or self.dataset.depth + 1 >= self.max_pipeline_depth:\n candidates = CLASSIFIERS.keys()\n else:\n candidates = ALGORITHMS.keys()\n for algo_type in candidates:\n try:\n LOGGER.info('Starting new algorithm \\'{}\\'...'.format(algo_type))\n algorithm = Algorithm(algo_type,\n input_dataset=self.dataset.id,\n output_dataset=None,\n status=AlgorithmStatus.RUNNING,\n start_time=datetime.now(),\n host=HOSTNAME)\n\n \"\"\"Save a random configuration of the algorithms hyperparameters in params\"\"\"\n params = algorithm.default_config()\n algorithm.hyperparameter_values = params\n\n param_info = 'Chose parameters for algorithm :'.format(algorithm.hyperparameter_values)\n for k in sorted(params.keys()):\n param_info += '\\n\\t{} = {}'.format(k, params[k])\n LOGGER.debug(param_info)\n except Exception as ex:\n if isinstance(ex, KeyboardInterrupt):\n raise ex\n LOGGER.error('Failed to select hyperparameters', ex)\n raise AlgorithmError(str(ex), traceback.format_exc())\n\n # Run algorithm and store result\n self._run_algorithm(algorithm, params)", "def check_for_specs(self):\n while True:\n # FIXME configurable default idle time.\n self.idle_time = 5\n\n # try to send a request for specifications. If URL is unreachable means that the Supervisor (or Client) has\n # most probably died, so we need to re-register capabilities\n try:\n logger.info(\"Polling for specifications at \" + self.specification_url)\n res = self.send_message(self.specification_url, \"GET\")\n except Exception as e:\n logger.warning(\"Specification poll at \" + self.specification_url + \"failed :\" + repr(e))\n logger.warning(\"Attempting reregistration\")\n self.register_to_client()\n\n if res.status == 200:\n # specs retrieved: split them if there is more than one\n env = mplane.model.parse_json(res.data.decode(\"utf-8\"))\n for spec in env.messages():\n # handle callbacks\n # FIXME NO NO NO see issue #3\n if spec.get_label() == \"callback\":\n self.idle_time = spec.when().timer_delays()[1]\n break\n\n # hand spec to scheduler, making sure the callback is called after\n with self._callback_lock:\n reply = self.scheduler.process_message(self._client_identity, spec, callback=self.return_results)\n if not isinstance(spec, mplane.model.Interrupt):\n self._result_url[spec.get_token()] = spec.get_link()\n\n # send receipt to the Client/Supervisor\n res = self.send_message(self._result_url[spec.get_token()], \"POST\", reply)\n\n # not registered on supervisor, need to re-register\n # FIXME what's 428 for? See issue #4\n elif res.status == 428:\n logger.warning(\"Specification poll got 428, attempting reregistration\")\n self.register_to_client()\n\n else:\n logger.critical(\"Specification poll to \"+self.specification_url+\" failed:\"+\n str(res.status) + \" - \" + res.data.decode(\"utf-8\"))\n \n sleep(self.idle_time)", "def process_negotiation(self, message: str):\n logger.debug(f\"Process Negotation: {message}\")\n\n symetric_ciphers = message[\"algorithms\"][\"symetric_ciphers\"]\n chiper_modes = message[\"algorithms\"][\"chiper_modes\"]\n digest_algorithms = message[\"algorithms\"][\"digest_algorithms\"]\n\n for sm_cipher in symetric_ciphers:\n if sm_cipher in self.symetric_ciphers:\n self.used_symetric_cipher = sm_cipher\n break\n\n for cipher_md in chiper_modes:\n if cipher_md in self.cipher_modes:\n self.used_cipher_mode = cipher_md\n break\n\n for digest_alg in digest_algorithms:\n if digest_alg in self.digest_algorithms:\n self.used_digest_algorithm = digest_alg\n break\n\n message = {\n \"type\": \"NEGOTIATION_REP\",\n \"algorithms\": {\n \"symetric_cipher\": self.used_symetric_cipher,\n \"cipher_mode\": self.used_cipher_mode,\n \"digest_algorithm\": self.used_digest_algorithm,\n },\n }\n\n if (\n self.used_symetric_cipher is not None\n and self.used_cipher_mode is not None\n and self.used_digest_algorithm is not None\n ):\n self._send(message)\n return True\n\n return False", "async def __delayed_handshake(self):\n await asyncio.sleep(1)\n self.create_task(self.local_client.register_local_data_watcher(), 'local data watcher')\n self.create_task(self.local_client.register_classic_games_updater(), 'classic games updater')", "def wait_on_request(self):\n \n self.sock1.listen(5)\n self.sock2.listen(5)\n self.sock3.listen(5)\n self.sock1.setblocking(1)\n self.sock2.setblocking(1)\n self.sock3.setblocking(1)\n inputs = [self.sock1,self.sock2,self.sock3]\n incoming, a, b = select.select(inputs, [], inputs)\n if incoming[0]:\n recev_ad = (incoming[0].getsockname());\n self.connection,address = incoming[0].accept()\n data_in = self.connection.recv(64); \n if self.verify_dt_request(data_in) == True:\n self.lang_type = self.portarray.index(recev_ad[1])+1\n print('Received a request for #{} language and type {}'.format(self.lang_type,self.req_type))\n self.response()\n self.wait_on_request()", "def algorithm(self):\n # init\n self.transfer = Transfer()\n # Read info from files written by PostJobs and bookkeeping from previous run.\n self.transfer.readInfo()\n self.rucioClient = self._initRucioClient(self.transfer.username, self.transfer.restProxyFile)\n # Get info what's already in Rucio containers\n self.transfer.readInfoFromRucio(self.rucioClient)\n self.crabRESTClient = self._initCrabRESTClient(\n self.transfer.restHost,\n self.transfer.restDBInstance,\n self.transfer.restProxyFile,\n )\n # build dataset\n BuildDBSDataset(self.transfer, self.rucioClient, self.crabRESTClient).execute()\n # do 1\n RegisterReplicas(self.transfer, self.rucioClient, self.crabRESTClient).execute()\n # do 2\n MonitorLockStatus(self.transfer, self.rucioClient, self.crabRESTClient).execute()", "def LoadAlgorithms(self):\n\n\n self.__api = AlgorithmApi\n self.__apiexc = ApiException\n\n DxDomainList()\n\n try:\n api_instance = self.__api(self.__engine.api_client)\n api_response = api_instance.get_all_algorithms()\n\n if self.__sync:\n synclist = DxSyncList()\n sync = synclist.get_all_algorithms()\n\n if api_response.response_list:\n for c in api_response.response_list:\n alg = DxAlgorithm(self.__engine, existing_object=c)\n\n dom = DxDomainList.get_domain_by_algorithm(c.algorithm_name, report_error=False)\n\n if dom:\n alg.domain_name = dom\n else:\n alg.domain_name = ''\n\n if self.__sync:\n if c.algorithm_name in sync:\n alg.sync = 1\n else:\n alg.sync='N/A'\n self.__algorithmList[c.algorithm_name] = alg\n else:\n print_error(\"No algorithm found\")\n self.__logger.error(\"No algorithm found\")\n return 1\n\n return None\n\n except self.__apiexc as e:\n print_error(e.body)\n self.__logger.error(e.body)\n return 1", "def test_reset_protocol_work(self):\n try:\n from openmm import app\n except ImportError: # OpenMM < 7.6\n from simtk.openmm import app\n\n parameter_name = 'lambda_electrostatics'\n temperature = 298.0 * unit.kelvin\n parameter_initial = 1.0\n parameter_final = 0.0\n platform_name = 'CPU'\n nonbonded_method = 'CutoffPeriodic'\n\n # Creating the test system with a high frequency barostat.\n testsystem = testsystems.AlchemicalAlanineDipeptide(nonbondedMethod=getattr(app, nonbonded_method))\n context, integrator = self.create_system(testsystem, parameter_name, parameter_initial, temperature, platform_name)\n\n # Number of NCMC steps\n nsteps = 20\n niterations = 3\n\n # Running several rounds of configuration updates and NCMC\n for i in range(niterations):\n integrator.step(5)\n # Reseting the protocol work inside the integrator\n integrator.reset_protocol_work()\n integrator.reset()\n external_protocol_work, integrator_protocol_work = self.run_ncmc(context, integrator, temperature, nsteps, parameter_name, parameter_initial, parameter_final)\n assert abs(external_protocol_work - integrator_protocol_work) < 1.E-5", "def waitForNetwork(self):\n time.sleep(0.1)", "def main():\n simulation_mode = input('Enter -1 for simulation, or anything else for main program.')\n simulation_mode = True if simulation_mode == '-1' else False\n adversarial_mode = input('Enter -1 to enable adversarial mode or anything else for a normal election.\\n')\n adversarial_mode = True if adversarial_mode == '-1' else False\n\n simulation_map = {\n 1: {'description': 'Valid voters casting valid votes', 'adversarial': False, 'kwargs': {'ballot_config_path': 'configs/simulation/simulation_1_ballot_config.json'}},\n 2: {'description': 'Unknown voter attempting to cast vote', 'adversarial': False, 'kwargs': {'num_unregistered_voters': 10, 'ballot_config_path': 'configs/simulation/simulation_2_ballot_config.json'}},\n 3: {'description': 'Valid voter attempting to cast extra vote', 'adversarial': False, 'kwargs': {'num_double_voting_voters': 5, 'ballot_config_path': 'configs/simulation/simulation_3_ballot_config.json'}}, # voter will vote twice so effectively 10 voters\n # note about 4: this isn't necessarily an adversarial scenario, but we choose to treat it as one here.\n 4: {'description': 'Valid voters attempting to cast invalid vote', 'adversarial': True, 'kwargs': {'voting_node_adversary_class': InvalidBallotVotingComputer, \n 'additional_selections': [{'position': 'FakePosition', 'candidate': 'Jai Punjwani'}],\n 'ballot_config_path': 'configs/simulation/simulation_4_ballot_config.json'}},\n 5: {'description': 'Node broadcasting invalid transaction', 'adversarial': True, 'kwargs': {'voter_node_adversary_class': UnrecognizedVoterAuthenticationBooth, 'ballot_config_path': 'configs/simulation/simulation_5_ballot_config.json'}},\n 6: {'description': 'Adversarial node creating invalid claim tickets', 'adversarial': True, 'kwargs': {'voter_node_adversary_class': AuthBypassVoterAuthenticationBooth, 'ballot_config_path': 'configs/simulation/simulation_6_ballot_config.json'}},\n 7: {'description': 'Adversarial node not participating in consensus round', 'adversarial': True, 'kwargs': {'voting_node_adversary_class': DOSVotingComputer, 'ballot_config_path': 'configs/simulation/simulation_7_ballot_config.json'}},\n 8: {'description': 'Custom', 'adversarial': adversarial_mode} # TODO - future work\n }\n adversary_simulation_indexes = [k for k,v in simulation_map.items() if v['adversarial']]\n setup_kwargs = {}\n\n # allow user to choose which simulation to run\n if simulation_mode:\n for n in simulation_map:\n # print either adversarial or non-adversarial simulations\n if simulation_map[n]['adversarial'] == adversarial_mode:\n print('({}) {}'.format(n, simulation_map[n]['description']))\n simulation_number = int(input('Enter a simulation number: '))\n\n try:\n simulation = simulation_map[simulation_number]\n if simulation_number in adversary_simulation_indexes and not adversarial_mode:\n print('Wrong index. Defaulting to (1)')\n simulation_number = 1\n setup_kwargs.update(simulation.get('kwargs', {}))\n if simulation_number == 8:\n exit('Custom mode is for future development.')\n except KeyError:\n print (\"Wrong index. Defaulting to (1)\")\n simulation_number = 1\n\n # adversarial in normal program mode \n elif adversarial_mode:\n # prompt user to select adversary of choice\n voting_node_key = 'voting_node_adversary_class'\n voter_node_key = 'voter_node_adversary_class'\n adversary_classes = {\n voter_node_key: [UnrecognizedVoterAuthenticationBooth, AuthBypassVoterAuthenticationBooth],\n voting_node_key: [InvalidBallotVotingComputer, DOSVotingComputer]\n }\n\n blockchain_names = {\n voter_node_key: 'Voter Blockchain',\n voting_node_key: 'Ballot Blockchain'\n }\n\n for i, blockchain_key in enumerate([voter_node_key, voting_node_key]):\n blockchain_name = blockchain_names[blockchain_key]\n _input = input('Enter {} for a {} adversary or anything else to skip.\\n'.format(i, blockchain_name))\n if _input == str(i):\n for index, adversary_class in enumerate(adversary_classes[blockchain_key]):\n print ('({}) {}'.format(index, adversary_class.__name__))\n node_index = int(input('Choose an adversary node.'))\n try:\n setup_kwargs.update(\n {blockchain_key: adversary_classes[blockchain_key][node_index]}\n )\n except (TypeError, KeyError) as e:\n print('Invalid index. exiting..')\n exit()\n\n program = Simulation() if simulation_mode else VotingProgram()\n consensus_round_interval = 6 if simulation_mode else 30\n \n print(\"Setting up election...\")\n program.setup(\n adversarial_mode=adversarial_mode, \n consensus_round_interval=consensus_round_interval,\n **setup_kwargs\n )\n input('Set up complete. Press enter to begin election\\n')\n program.begin_program()", "def approach_server(self):\n self.add_phase(timing_phases.ApproachServer_4_1)\n # There's no certainty that phase 4_3 will happen. We register\n # choices to decide whether to add it.\n self.game.register_choice_provider(\n timing_phases.ApproachServer_4_2, self, 'approach_4_2_actions')\n self.add_phase(timing_phases.ApproachServer_4_2)", "def body(self):\n waitfor = set()\n for a in self.acceptors:\n self.sendMessage(a, P2aMessage(self.id, self.ballot_number, self.slot_number, self.command))\n waitfor.add(a)\n\n while True:\n msg = self.getNextMessage()\n if isinstance(msg, P2bMessage):\n if self.ballot_number == msg.ballot_number and msg.src in waitfor:\n waitfor.remove(msg.src)\n if len(waitfor) < float(len(self.acceptors))/2:\n for r in self.replicas:\n self.sendMessage(r, DecisionMessage(self.id, self.slot_number, self.command))\n return\n else:\n self.sendMessage(self.leader, PreemptedMessage(self.id, msg.ballot_number))\n return", "def main():\n initData()\n\n # Loop to simulate data connections\n # Current set up\n # Once an hour send hsk\n # Every 10 min send hsk\n # Every 5 min send spec and nrbd\n # This can be changed for different cycles\n while 1:\n connection([\"time\",\"spec\",\"nrbd\",\"hsk\"])\n for j in range(0,10):\n sleep(5*60)\n connection([\"spec\",\"nrbd\"])\n sleep(5*60)\n connection([\"spec\",\"nrbd\",\"hsk\"])", "def offline_client_garbler_phase(env, client_storage, server_storage):\n\n # key generation\n now = env.now\n yield env.timeout(measurements.off_client_compute_keygen) # client generates key\n yield env.timeout(measurements.off_client_write_key / upload_bandwidth) # client sends key to server\n # simulate linear layers\n yield env.timeout(measurements.off_client_compute_he_encrypt.sum())\n yield env.timeout(measurements.off_client_write_linear.sum() / upload_bandwidth)\n yield env.timeout(he_models.he_eval_latency(args.dataset, args.network, args.num_threads_server_he))\n yield env.timeout(measurements.off_server_write_linear.sum() / download_bandwidth)\n yield env.timeout(measurements.off_client_compute_he_decrypt.sum())\n\n # simulate ReLU layers\n yield env.timeout(gc_models.garble_latency(\"client\", measurements.NUM_RELU, args.num_threads_client)) \n yield env.timeout(measurements.off_client_compute_encode)\n yield env.timeout(measurements.off_client_write_garbled_c / upload_bandwidth) # client sends garbled circuit to server\n\n # oblivious transfer protocol (protocol 4 of https://eprint.iacr.org/2016/602)\n yield env.timeout(measurements.off_server_write_base_ot / download_bandwidth) # server sends labels (k_0, k_1)..... BASE OT\n yield client_storage.put(client_storage_per_inf)\n yield server_storage.put(server_storage_per_inf)", "def run_interactions(self, sentinel=False):\n\n self.bind_job = self.driver.job_bind()\n self.bind_transfer = self.driver.transfer_bind()\n poller_time = time.time()\n poller_interval = 128\n\n while True:\n current_time = time.time()\n if current_time > poller_time + 64:\n if poller_interval != 2048:\n self.log.info(\"Directord server entering idle state.\")\n poller_interval = 2048\n elif current_time > poller_time + 32:\n if poller_interval != 1024:\n self.log.info(\"Directord server ramping down.\")\n poller_interval = 1024\n\n if self.driver.bind_check(\n bind=self.bind_transfer, constant=poller_interval\n ):\n poller_interval, poller_time = 64, time.time()\n\n (\n identity,\n msg_id,\n control,\n command,\n _,\n info,\n _,\n _,\n ) = self.driver.socket_recv(socket=self.bind_transfer)\n if command == b\"transfer\":\n transfer_obj = info.decode()\n self.log.debug(\n \"Executing transfer for [ %s ]\", transfer_obj\n )\n self._run_transfer(\n identity=identity,\n verb=b\"ADD\",\n file_path=os.path.abspath(\n os.path.expanduser(transfer_obj)\n ),\n )\n elif control == self.driver.transfer_end:\n self.log.debug(\n \"Transfer complete for [ %s ]\", info.decode()\n )\n self._set_job_status(\n job_status=control,\n job_id=msg_id.decode(),\n identity=identity.decode(),\n job_output=info.decode(),\n )\n elif self.driver.bind_check(\n bind=self.bind_job, constant=poller_interval\n ):\n poller_interval, poller_time = 64, time.time()\n (\n identity,\n msg_id,\n control,\n command,\n data,\n info,\n stderr,\n stdout,\n ) = self.driver.socket_recv(socket=self.bind_job)\n node = identity.decode()\n node_output = info.decode()\n if stderr:\n stderr = stderr.decode()\n if stdout:\n stdout = stdout.decode()\n\n try:\n data_item = json.loads(data.decode())\n except Exception:\n data_item = dict()\n\n self._set_job_status(\n job_status=control,\n job_id=msg_id.decode(),\n identity=node,\n job_output=node_output,\n job_stdout=stdout,\n job_stderr=stderr,\n execution_time=data_item.get(\"execution_time\", 0),\n recv_time=time.time(),\n )\n\n if command == b\"QUERY\":\n # NOTE(cloudnull): When a command return is \"QUERY\" an ARG\n # is resent to all known workers.\n try:\n query_value = json.loads(node_output)\n except Exception as e:\n self.log.error(\n \"Query value failed to load, VALUE:%s, ERROR:%s\",\n node_output,\n str(e),\n )\n else:\n if query_value and data_item:\n targets = self.workers.keys()\n task = data_item[\"task\"] = utils.get_uuid()\n data_item[\"skip_cache\"] = True\n data_item[\"extend_args\"] = True\n data_item[\"verb\"] = \"ARG\"\n data_item[\"args\"] = {\n \"query\": {\n node: {data_item.pop(\"query\"): query_value}\n }\n }\n data_item.pop(\"task_sha256sum\", None)\n data_item[\"task_sha256sum\"] = utils.object_sha256(\n data_item\n )\n self.create_return_jobs(\n task=task, job_item=data_item, targets=targets\n )\n self.log.debug(\n \"Runing query against with DATA: %s\",\n data_item,\n )\n for target in targets:\n self.log.debug(\n \"Runing query ARG update against\"\n \" TARGET: %s\",\n target.decode(),\n )\n self.driver.socket_send(\n socket=self.bind_job,\n identity=target,\n command=data_item[\"verb\"].encode(),\n data=json.dumps(data_item).encode(),\n )\n elif self.workers:\n poller_interval, poller_time = self.run_job()\n\n if sentinel:\n break", "def main_loop(self):\n self.receive_initial_data()\n self.allocate_range()\n print(f\"ranges are: start -> {self._range_start_finish[0]}, finish -> {self._range_start_finish[1]}\") # debug\n self._client_socket.sendall(pickle.dumps(tuple(self._range_start_finish)))\n print(\"ranges sent\")\n try:\n encoded_packet = self._client_socket.recv(BUFFER_SIZE)\n except ConnectionResetError as e:\n print(\"Thread disconnected\")\n else:\n packet = pickle.loads(encoded_packet)\n print(\"answer received\")\n # if the tuple has only a 0 its a failed attempt, if it has a number its the answer.\n if packet[0] != 0:\n print(f\"The result is:{packet[0]}\")\n else:\n print(\"No matches in thread\")", "def connection(dataTypes):\n #Simulates one cycle of data files\n global resends\n resends = 0\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((tcpAddr, tcpPort))\n s.settimeout(connectionTimeout)\n s.send(Wakeup)\n\n print(\"Sent: %s\" % Wakeup)\n\n response = s.recv(bufferSize)\n\n print(\"Recieved: %s\" % response)\n\n #Add more elif for other command flows\n if response == DataRequest:\n #Get the date and time\n date = datetime.datetime.utcnow().strftime(\"%d%m%y\")\n time = datetime.datetime.utcnow().strftime(\"%H%M%S.%f\")\n time = time[:10]\n\n #Loop thorugh all the file types given and send them\n for fileType in dataTypes:\n print(\"Sending File Type %s\" % fileType)\n if fileType == \"time\":\n status = sendTimeData(s, date, time)\n if status == 1:\n s.send(ConnectionEnd)\n return 1\n continue\n\n elif fileType == \"spec\":\n status = sendSpecData(s,date,time)\n if status == 1:\n s.send(ConnectionEnd)\n return 1\n continue\n elif fileType == \"nrbd\":\n status = sendNrbdData(s,date,time)\n if status == 1:\n s.send(ConnectionEnd)\n return 1\n continue\n elif fileType == \"hsk\":\n status = sendHskData(s, date, time)\n if status == 1:\n s.send(ConnectionEnd)\n return 1\n continue\n\n s.send(DataEnd)\n print(\"Sent: %s\" % DataEnd)\n response = s.recv(bufferSize)\n print(\"Recieved: %s\" % response)\n if response == ConnectionEnd:\n try:\n s.send(ConnectionEnd)\n except:\n print(\"Done Connection\")\n return 0", "def doTest(language_name):\n \n elapsed = -1\n bytesSent = 0\n\n try:\n start_client(language_name)\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((BIND_IP, PROXY_PORT))\n sock.listen(100)\n\n new_stream = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n new_stream.connect((BIND_IP, CLIENT_PORT))\n\n conn, addr = sock.accept()\n\n conn.settimeout(0.1)\n\n startTimer = time.time()\n expected_msg = random_string(BYTES_TO_SEND)\n elapsed = -1\n for i in range(TRIALS):\n bytesSent += BYTES_TO_SEND\n actual_msg = ''\n expected_buffer = expected_msg\n while True:\n if expected_buffer:\n bytessent = new_stream.send(expected_buffer[:BLOCK_SIZE])\n expected_buffer = expected_buffer[bytessent:]\n else:\n break\n while True:\n time.sleep(0.00001)\n [is_alive, data] = fteproxy.network_io.recvall_from_socket(conn)\n actual_msg += data\n assert expected_msg.startswith(actual_msg)\n if (actual_msg == expected_msg):\n break\n if elapsed > TIMEOUT or not is_alive:\n raise Exception(\n \"!!!!!!!!!!! System tests failed to execute properly\")\n elapsed = time.time() - startTimer\n\n fteproxy.network_io.close_socket(conn)\n fteproxy.network_io.close_socket(new_stream)\n fteproxy.network_io.close_socket(sock)\n finally:\n stop_client()\n\n return elapsed, bytesSent", "def prompt_alg():\n algorithms = {0: 'random', 1: 'greedy', 2: 'greedy2', 3: 'hillclimber', 4: 'simulated_annealing'}\n print(\"What algorithm should be used (type INFO to get description of algorithms)\")\n print(''.join(['{0}{1}'.format(str(key) + ': ', value + ' ') for key, value in algorithms.items()]), end=' ')\n user_in = input('\\n> ')\n command(user_in)\n try:\n user_in = int(user_in)\n except ValueError:\n print('Invalid number, choose one from list below')\n return prompt_alg()\n if user_in not in algorithms:\n print('Invalid algorithm, choose one from list below')\n return prompt_alg()\n else:\n return algorithms[user_in]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Exchange keys with the server
def dh_exchange_key(self): if self.state =='DH_START': logger.info('Sending POST Request to exchange DH Shared key') key = self.public_key.public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo) data = { 'method': 'KEY_EXCHANGE', 'pub_key': key } # POST request sending public key request = requests.post(f'{SERVER_URL}/api/key', json=data, headers={'Content-Type': 'application/json','session_id' : str(self.session_id)}) data = json.loads(request.text) method = data['method'] if method == 'ACK': logger.info('Server confirmed the exchange') self.shared_key = self.private_key.exchange(self.srvr_publickey) self.state='KEY_EXCHANGE' else: logger.error('Could not exchange a key with the server') else: return False
[ "def put_zmqkeys(appname):\n pub_key, secret_key = zmq.curve_keypair()\n\n try:\n etcd_put(\"/Publickeys/\" + appname, pub_key)\n except RuntimeError:\n logging.error(\"Error putting Etcd public key for %s\", appname)\n\n try:\n etcd_put(\"/\" + appname + \"/private_key\", secret_key)\n except RuntimeError:\n logging.error(\"Error putting Etcd private key for %s\", appname)", "def update_host_keys(self):\n assert not self.remote, \"Updating host key only works for local connections.\"\n cmd = \"ssh-keygen -R {host} && ssh-keyscan {host} >> ~/.ssh/known_hosts\".format(host=self.host)\n proc = run_in_subprocess(cmd, True)\n if proc.returncode != 0:\n raise RuntimeError(\n \"Could not update host keys! Please handle this manually. The \"\n \"error was:\\n\" + '\\n'.join([proc.stdout.decode('utf-8'), proc.stderr.decode('utf-8')])\n )", "def set_keys(self, client_private_key, server_public_key):\n d_client_private_key = Data(client_private_key)\n d_server_public_key = Data(server_public_key)\n status = self._lib_vsce_uokms_client.vsce_uokms_client_set_keys(self.ctx, d_client_private_key.data, d_server_public_key.data)\n VsceStatus.handle_status(status)", "def manage_keys(config):\n choice = 0\n while choice != 8:\n choice = manage_menu()\n if choice == 1:\n print 'You currently have the following secret keys installed:'\n print\n os.system(config['PROGRAM_GPG'] + ' --list-secret-keys --with-fingerprint')\n print 'Please note that the uid of your secret key and the value of'\n print 'the ID parameter set in signcontrol.conf should be the same.'\n elif choice == 2:\n print\n print '-----------------------------------------------------------------------'\n print 'Please put the e-mail address from which you will send control articles'\n print 'in the key ID (the real name field). And leave the other fields blank,'\n print 'for better compatibility with Usenet software.'\n print 'Choose a 2048-bit RSA key which never expires.'\n print 'You should also provide a passphrase, for security reasons.'\n print 'There is no need to edit the key after it has been generated.'\n print\n print 'Please note that the key generation may not finish if it is launched'\n print 'on a remote server, owing to a lack of enough entropy. Use your own'\n print 'computer instead and import the key on the remote one afterwards.'\n print '-----------------------------------------------------------------------'\n print\n os.system(config['PROGRAM_GPG'] + ' --gen-key --allow-freeform-uid')\n print\n print 'After having generated these keys, you should export your PUBLIC key'\n print 'and make it public (in the web site of your hierarchy, along with'\n print 'a current checkgroups, and also announce it in news.admin.hierarchies).'\n print 'You can also export your PRIVATE key for backup only.'\n elif choice == 3:\n print 'The key will be written to the file public-key.asc.'\n key_name = raw_input('Please enter the uid of the public key to export: ')\n os.system(config['PROGRAM_GPG'] + ' --armor --output public-key.asc --export \"=' + key_name + '\"')\n elif choice == 4:\n print 'The key will be written to the file private-key.asc.'\n key_name = raw_input('Please enter the uid of the secret key to export: ')\n os.system(config['PROGRAM_GPG'] + ' --armor --output private-key.asc --export-secret-keys \"=' + key_name + '\"')\n if os.path.isfile('private-key.asc'):\n os.chmod('private-key.asc', 0400)\n print\n print 'Be careful: it is a security risk to export your private key.'\n print 'Please make sure that nobody has access to it.'\n elif choice == 5:\n raw_input('Please put it in a file named secret-key.asc and press enter.')\n os.system(config['PROGRAM_GPG'] + ' --import secret-key.asc')\n print\n print 'Make sure that both the secret and public keys have properly been imported.'\n print 'Their uid should be put as the value of the ID parameter set in signcontrol.conf.'\n elif choice == 6:\n key_name = raw_input('Please enter the uid of the key to *remove*: ')\n os.system(config['PROGRAM_GPG'] + ' --delete-secret-and-public-key \"=' + key_name + '\"')\n elif choice == 7:\n key_name = raw_input('Please enter the uid of the secret key to revoke: ')\n os.system(config['PROGRAM_GPG'] + ' --gen-revoke \"=' + key_name + \"'\")\n print", "def send_public_key(self):\n self.send(str(self.PUBLIC_KEY[0]) + \",\" + str(self.PUBLIC_KEY[1]))", "def upload_ssh_key(self):\n click.echo(\"Uploading key \" + self.ssh_key.name)\n self.server.addsshkey(self.ssh_key.name, self.ssh_key.public_key)", "def update_keys(self):\n from ...main import add_api_key\n add_api_key(\"google_api_key\", self.google_api_key.get())\n add_api_key(\"soundcloud_client_id\", self.soundcloud_client_id.get())", "def encrypt(self, public_key, message):", "def negotiate(self, my_key, his_key):\n pass", "def handleSharedKey(sharedKey):\n decryptedSharedKey = decryptSharedKey(sharedKey,privKey)\n keyStoreHandle.setKeyfor(receiver.uid, decryptedSharedKey)\n keyStoreHandle.ackKeyfor(receiver.uid)\n message = packMessage('', MsgType.KEY_ESTABLISHED)\n time.sleep(5)\n sendMessage(message,receiver.uid)", "async def thesauruskey(self, ctx, *, key):\n # Load\n config_boards = await self.config.apikey()\n # Set\n await self.config.apikey.set(key)\n await ctx.send(\"The apikey has been added.\")", "def main(args=None):\n if not args:\n args = sys.argv[1:]\n\n CONF.setup(args)\n\n host = CONF.get_value('keyserver', 'host') # fallbacks are in defaults.ini\n port = CONF.get_value('keyserver', 'port', conv=int)\n\n # health_check_url = 'http://{}:{}{}'.format(host, port, CONF.get_value('keyserver', 'health_endpoint'))\n # status_check_url = 'http://{}:{}{}'.format(host, port, CONF.get_value('keyserver', 'status_endpoint'))\n\n ssl_certfile = Path(CONF.get_value('keyserver', 'ssl_certfile')).expanduser()\n ssl_keyfile = Path(CONF.get_value('keyserver', 'ssl_keyfile')).expanduser()\n LOG.debug(f'Certfile: {ssl_certfile}')\n LOG.debug(f'Keyfile: {ssl_keyfile}')\n sslcontext = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n sslcontext.check_hostname = False\n sslcontext.load_cert_chain(ssl_certfile, ssl_keyfile)\n\n loop = asyncio.get_event_loop()\n\n keyserver = web.Application(loop=loop)\n keyserver.router.add_routes(routes)\n\n # Keystore\n store = KeysConfiguration(args)\n global _cache\n _cache = Cache()\n # Load all the keys in the store\n for section in store.sections():\n _unlock_key(section, **dict(store.items(section))) # includes defaults\n keyserver['store'] = store\n\n LOG.info(f\"Start keyserver on {host}:{port}\")\n web.run_app(keyserver, host=host, port=port, shutdown_timeout=0, ssl_context=sslcontext)", "def generate_keys(self, sender, receiver):", "def put_private_key(self):\n self.__logger.info(\"Put private keypair in manager\")\n scpc = scp.SCPClient(self.ssh.get_transport())\n scpc.put(self.key_filename, remote_path='~/cloudify_ims.pem')\n (_, stdout, stderr) = self.ssh.exec_command(\n \"sudo docker cp ~/cloudify_ims.pem \"\n \"cfy_manager_local:/etc/cloudify/ && \"\n \"sudo docker exec cfy_manager_local \"\n \"chmod 444 /etc/cloudify/cloudify_ims.pem\")\n self.__logger.debug(\"output:\\n%s\", stdout.read().decode(\"utf-8\"))\n self.__logger.debug(\"error:\\n%s\", stderr.read().decode(\"utf-8\"))", "def send_key_to_box(self,key: str):\n payload = (\n '{\"type\":\"CPE.KeyEvent\",\"status\":{\"w3cKey\":\"'\n + key\n + '\",\"eventType\":\"keyDownUp\"}}'\n )\n self.mqttClient.publish(self._householdId+ \"/\" + self.box_id, payload)\n self._request_settop_box_state()", "def send(self, key, value):\n try:\n data = json.dumps({\"key\": key, \"value\": value})\n r = requests.post(url=self.endpoint, data=data)\n r.close()\n except:\n print(\"couldn't update peer\", self.endpoint)", "def send_pubkeys(self):\n for agent_name, agent in self.directory.clients.items():\n pubkey = self.pubkeyList[agent_name] # retrieve pubkey for client we're sending to\n body = {'pubkey': pubkey}\n msg = Message(sender_name=self.name, recipient_name=agent_name, body=body)\n agent.receive_pubkey(msg) # invoke method of receiving agent", "def key(self, name, secret):\n return self._send_command(\"key %s %s\" % (name, secret))", "def set_evp_key(self,e):\n _ldns.ldns_key_set_evp_key(self,e)\n #parameters: ldns_key *,EVP_PKEY *,\n #retvals: " ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads citizen card certificates and private key
def read_cc(self): print("-------+---------") pkcs11 = PyKCS11.PyKCS11Lib() pkcs11.load(lib) self.slots =pkcs11.getSlotList() for slot in self.slots: print(pkcs11.getTokenInfo(slot)) #slot=pkcs11.getSlotList(tokenPresent=Tru)[0] self.session=pkcs11.openSession(slot) all_attributes = list(PyKCS11.CKA.keys()) all_attributes = [e for e in all_attributes if isinstance(e, int)] obj = self.session.findObjects([(PyKCS11.CKA_LABEL, 'CITIZEN AUTHENTICATION CERTIFICATE')])[0] attributes = self.session.getAttributeValue(obj, all_attributes) attributes = dict(zip(map(PyKCS11.CKA.get, all_attributes), attributes)) self.certificate=x509.load_der_x509_certificate(bytes(attributes['CKA_VALUE'])) cc_num = self.certificate.subject.get_attributes_for_oid(NameOID.SERIAL_NUMBER) self.private_key_cc = self.session.findObjects([(PyKCS11.CKA_CLASS, PyKCS11.CKO_PRIVATE_KEY), (PyKCS11.CKA_LABEL, 'CITIZEN AUTHENTICATION KEY')])[0] self.mechanism = PyKCS11.Mechanism(PyKCS11.CKM_SHA1_RSA_PKCS, None)
[ "def get_citizen_certificates(self, library=\"/usr/local/lib/libpteidpkcs11.so\"):\n # init certificates as a list\n certificates = []\n # start session\n card_session = self._start_session(library)\n if not isinstance(card_session, PyKCS11.Session):\n return certificates\n # retrieve certificates\n try:\n # cycles trough card objects\n for entry in card_session.findObjects([(PyKCS11.CKA_CLASS, PyKCS11.CKO_CERTIFICATE)]):\n # convert entry to dictionary\n dict_entry = entry.to_dict()\n # get certificate\n cert = OpenSSL.crypto.load_certificate(type=OpenSSL.crypto.FILETYPE_ASN1,\n buffer=''.join(chr(c) for c in dict_entry['CKA_VALUE']))\n # build certificate\n certificate = {\n 'TYPE': dict_entry['CKA_LABEL'],\n 'CERTIFICATE_PEM': OpenSSL.crypto.dump_certificate(type=OpenSSL.crypto.FILETYPE_PEM, cert=cert),\n }\n # add to certificate list\n certificates.append(certificate)\n except Exception as exception:\n if self.debug:\n print \"Couldn't retrieve certificates\\nException: \" + str(exception)\n else:\n print \"Couldn't retrieve certificates\"\n finally:\n card_session.closeSession()\n # returns None or found certificates\n return certificates", "def read_certificates():\n\n cert_input = get_stdin().read()\n\n return [crt.strip() + '\\n' + PEM_FOOTER + '\\n' for crt in cert_input.split(PEM_FOOTER) if len(crt.strip()) > 0]", "def read_private_key( key_path ):\n return read_key( key_path, public=False )", "def _read_para_cert(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument\n _ctgp = self._read_unpack(1)\n _ctct = self._read_unpack(1)\n _ctid = self._read_unpack(1)\n _cttp = self._read_unpack(1)\n _ctdt = self._read_fileng(clen-4)\n\n cert = dict(\n type=desc,\n critical=cbit,\n length=clen,\n group=_GROUP_ID.get(_ctgp),\n count=_ctct,\n id=_ctid,\n cert_type=_CERT_TYPE.get(_cttp),\n certificate=_ctdt,\n )\n\n _plen = length - clen\n if _plen:\n self._read_fileng(_plen)\n\n return cert", "def test_read_certificate_signing_request(self):\n pass", "def _download_ca_cert_and_key():\n ctx.logger.info('Downloading certificates to a local path...')\n ca_cert = os.path.join(_certs_dir(), CA_CERT)\n ca_key = os.path.join(_certs_dir(), CA_KEY)\n _download_file(CA_CERT, target=ca_cert)\n _download_file(CA_KEY, target=ca_key)\n return ca_cert, ca_key", "def load_certificate():\n params = demisto.params()\n cert = params.get(\"certificate\")\n cert = base64.b64decode(cert)\n passphrase = params.get('passphrase_creds', {}).get('password') or params.get(\"passphrase\", \"\")\n return cert, passphrase", "def read_RSA_private_key(private_key_files):\n for private_key_file in private_key_files:\n private_key = open(private_key_file, \"r\").read()\n if private_key.find(\"BEGIN RSA PRIVATE KEY\") != -1:\n return private_key\n return None", "def __init__(self, path):\n\n self._base64Key = list()\n self._base16Key = list()\n\n if not os.path.isfile(path):\n sys.exit(\"Path \" + path + \" does not exist or is not a file!\")\n\n pkFile = open(path, 'r').readlines()\n base64Key = \"\"\n lineNo = 1\n certNo = 1\n inCert = False\n for line in pkFile:\n line = line.strip()\n # Are we starting the certificate?\n if line == \"-----BEGIN CERTIFICATE-----\":\n if inCert:\n sys.exit(\"Encountered another BEGIN CERTIFICATE without \" +\n \"END CERTIFICATE on line: \" + str(lineNo))\n\n inCert = True\n\n # Are we ending the ceritifcate?\n elif line == \"-----END CERTIFICATE-----\":\n if not inCert:\n sys.exit(\"Encountered END CERTIFICATE before \" +\n \"BEGIN CERTIFICATE on line: \" + str(lineNo))\n\n # If we ended the certificate trip the flag\n inCert = False\n\n # Sanity check the input\n if len(base64Key) == 0:\n sys.exit(\"Empty certficate , certificate \" + str(certNo) +\n \" found in file: \" + path)\n\n # ... and append the certificate to the list\n # Base 64 includes uppercase. DO NOT tolower()\n self._base64Key.append(base64Key)\n try:\n # Pkgmanager and setool see hex strings with lowercase,\n # lets be consistent\n self._base16Key.append(base64.b16encode(base64.b64decode(base64Key)).lower())\n except TypeError:\n sys.exit(\"Invalid certificate, certificate \" +\n str(certNo) + \" found in file: \" + path)\n\n # After adding the key, reset the accumulator as pem files\n # may have subsequent keys\n base64Key = \"\"\n\n # And increment your cert number\n certNo = certNo + 1\n\n # If we haven't started the certificate, then we should not record\n # any data\n elif not inCert:\n lineNo += 1\n continue\n\n # else we have started the certificate and need to append the data\n elif inCert:\n base64Key += line\n\n else:\n # We should never hit this assert, if we do then an unaccounted\n # for state was entered that was NOT addressed by the\n # if/elif statements above\n assert(False == True)\n\n # The last thing to do before looping up is to increment line number\n lineNo = lineNo + 1", "def CertInfo(fname: str) -> RET:\n try:\n with open(fname, encoding = \"ascii\", errors = \"replace\") as f:\n cert_bytes = f.read()\n except Exception:\n return RET(2, '', f'File >>>{fname}<<< not found') # ENOENT /* No such file or directory */\n\n try:\n x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_bytes)\n except Exception:\n return RET(5, '', f'Could not load certificate >>>{fname}<<<') # EIO /* I/O error */\n\n utc_time_notafter = datetime.datetime.strptime(x509.get_notAfter().decode(\"utf-8\"), \"%Y%m%d%H%M%SZ\")\n utc_time_notbefore = datetime.datetime.strptime(x509.get_notBefore().decode(\"utf-8\"), \"%Y%m%d%H%M%SZ\")\n issuer = '/'.join([f'{k.decode(\"utf-8\")}={v.decode(\"utf-8\")}' for k, v in x509.get_issuer().get_components()])\n subject = '/'.join([f'{k.decode(\"utf-8\")}={v.decode(\"utf-8\")}' for k, v in x509.get_subject().get_components()])\n info = f'DN >>> {subject}\\nISSUER >>> {issuer}\\nBEGIN >>> {utc_time_notbefore}\\nEXPIRE >>> {utc_time_notafter}'\n return RET(0, info)", "def _initkeycertificate(self):\n\n if self.privatekey and self.fingerprint and self.certificate and self.publickeyxml:\n return # all set up\n\n filename = sanatizefilename(self.options['STAGECERTIFICATEFILE']['Value'])\n\n self.privatekey = syhelpers.tls.load_privatekey(filename)\n if not self.privatekey:\n print_error(\"Failed to load privatekey, please check STAGECERTIFICATEFILE\")\n return\n self.certificate = syhelpers.tls.load_certificate(filename)\n if not self.certificate:\n print_error(\"Failed to load certificate, please check STAGECERTIFICATEFILE\")\n return\n self.publickeyxml = self._getrsapublickeyxml()\n self.fingerprint = self._getfingerprint()", "def get_certificate_der(self, filename):\n pkcs7message = self.get_buff(filename)\n\n pkcs7obj = cms.ContentInfo.load(pkcs7message)\n cert = pkcs7obj['content']['certificates'][0].chosen.dump()\n return cert", "def get_revoked_certificate_by_serial_number(self, serial_number):", "def load_certs() -> Dict[str, x509.Certificate]:\n re_clean_fname = re.compile(r\"[^A-Za-z0-9_-]\")\n\n eu_url = \"https://ec.europa.eu/information_society/policy/esignature/trusted-list/tl-mp.xml\"\n log.info(\"Downloading EU index from %s\", eu_url)\n eu_tl = load_url(eu_url)\n it_url = eu_tl.get_tsl_pointer_by_territory(\"IT\")\n log.info(\"Downloading IT data from %s\", it_url)\n trust_service_status_list = load_url(it_url)\n\n by_name = defaultdict(list)\n for tsp in trust_service_status_list.trust_service_provider_list.trust_service_provider:\n for tsp_service in tsp.tsp_services.tsp_service:\n si = tsp_service.service_information\n if si.service_status not in (\n \"http://uri.etsi.org/TrstSvc/TrustedList/Svcstatus/recognisedatnationallevel\",\n \"http://uri.etsi.org/TrstSvc/TrustedList/Svcstatus/granted\"):\n continue\n if si.service_type_identifier not in (\n \"http://uri.etsi.org/TrstSvc/Svctype/CA/QC\",):\n continue\n # print(\"identifier\", si.service_type_identifier)\n # print(\"status\", si.service_status)\n cert = []\n sn = []\n for di in si.service_digital_identity.digital_id:\n if di.x509_subject_name is not None:\n sn.append(di.x509_subject_name)\n # if di.x509_ski is not None:\n # print(\" SKI:\", di.x509_ski)\n if di.x509_certificate is not None:\n from cryptography import x509\n from cryptography.hazmat.backends import default_backend\n der = base64.b64decode(di.x509_certificate)\n cert.append(x509.load_der_x509_certificate(der, default_backend()))\n\n if len(cert) == 0:\n raise RuntimeError(\"{} has no certificates\".format(sn))\n elif len(cert) > 1:\n raise RuntimeError(\"{} has {} certificates\".format(sn, len(cert)))\n else:\n from cryptography.x509.oid import NameOID\n cert = cert[0]\n cn = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value\n # print(\"sn\", sn)\n # print(cert)\n # print(\"full cn\", cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME))\n # print(\"cn\", cn)\n fname = re_clean_fname.sub(\"_\", cn)\n by_name[fname].append(cert)\n\n res = {}\n for name, certs in by_name.items():\n if len(certs) == 1:\n if name in res:\n raise RuntimeError(\"{} already in result\".format(name))\n res[name] = certs[0]\n else:\n for idx, cert in enumerate(certs, start=1):\n idxname = name + \"_a38_{}\".format(idx)\n if idxname in res:\n raise RuntimeError(\"{} already in result\".format(name))\n res[idxname] = cert\n return res", "def test_certificate_get(self):\n response = self.client.open(\n '/api/v1.0/domain/{domainName}/certificate/{certificateId}'.format(domainName='domainName_example', certificateId='certificateId_example'),\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def _extract_keys_from_pem(mode, pem_contents, cert_format,\n passphrase=None):\n\n if passphrase:\n passphrase = str(passphrase)\n\n private_bytes = None\n private_mode = False\n temp_pem_contents = pem_contents.encode(\"utf-8\")\n if mode in [constants.CERT_MODE_SSL,\n constants.CERT_MODE_DOCKER_REGISTRY,\n constants.CERT_MODE_OPENSTACK,\n constants.CERT_MODE_OPENLDAP,\n constants.CERT_MODE_OPENLDAP_CA,\n ]:\n private_mode = True\n\n if private_mode:\n # extract private_key with passphrase\n try:\n private_key = serialization.load_pem_private_key(\n temp_pem_contents,\n password=passphrase,\n backend=default_backend())\n except Exception as e:\n raise exception.SysinvException(_(\"Error loading private key \"\n \"from PEM data: %s\" % e))\n\n if not isinstance(private_key, rsa.RSAPrivateKey):\n raise exception.SysinvException(_(\n \"Only RSA encryption based Private Keys are supported.\"))\n\n try:\n private_bytes = private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=cert_format,\n encryption_algorithm=serialization.NoEncryption())\n except Exception as e:\n raise exception.SysinvException(_(\"Error loading private \"\n \"bytes from PEM data: %s\"\n % e))\n\n certs = cutils.extract_certs_from_pem(temp_pem_contents)\n cert_list = []\n for cert in certs:\n # format=serialization.PrivateFormat.TraditionalOpenSSL,\n try:\n public_bytes = cert.public_bytes(\n encoding=serialization.Encoding.PEM)\n except Exception as e:\n raise exception.SysinvException(_(\"Error loading public \"\n \"bytes from PEM data: %s\"\n % e))\n\n # check if the cert is a CA cert\n is_ca = cutils.is_ca_cert(cert)\n\n hash_subject = cutils.get_cert_subject_hash(cert)\n\n signature = mode + '_' + str(cert.serial_number)\n if len(signature) > 255:\n LOG.info(\"Truncating certificate serial no %s\" % signature)\n signature = signature[:255]\n LOG.info(\"config_certificate signature=%s\" % signature)\n\n cert_list.append({'cert': cert,\n 'is_ca': is_ca,\n 'public_bytes': public_bytes,\n 'signature': signature,\n 'hash_subject': hash_subject})\n\n return cert_list, private_bytes", "def load(filename):\n\t\tbuffer = [];\n\t\tb64_contents = \"\";\n\t\ttry:\n\t\t\thandle = open(filename, \"r\");\n\t\t\traw_contents = handle.readlines();\n\t\t\tfor line in raw_contents:\n\t\t\t\tif line.startswith(\"----\"):\n\t\t\t\t\tcontinue\n\t\t\t\tb64_contents += line.strip();\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Failed to read PEM file: \" + str(e));\n\t\tbuffer = b64decode(b64_contents);\n\t\treturn RSAPrivateKey(buffer);", "def test_read_certificate_signing_request_approval(self):\n pass", "def _get_certificates_arguments(\n self, ssl_cert_key, ssl_cert_crt, ssl_cert_generate):\n section = self._config[self._config_section]\n\n # Private key\n ssl_cert_key = ssl_cert_key or section['ssl_cert_key']\n\n # Public certificate\n if ssl_cert_crt is not False:\n ssl_cert_crt = ssl_cert_crt or section.get_literal('ssl_cert_crt')\n\n # Generated certificate\n ssl_cert_generate = (\n ssl_cert_generate or section.get_literal('ssl_cert_generate')\n or False)\n\n return ssl_cert_key, ssl_cert_crt, ssl_cert_generate" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a key that identifies the given chunk and media
def chunk_identification(self, chunk_id, media_id): return (self.shared_key.decode('latin') + media_id + str(chunk_id)).encode('latin')
[ "def _generate_s3_metadata_key(self, file_id: str, location: str) -> str:\n return f'{location}/{file_id}.{self._get_metadata_location_extension(location)}'", "def objkey(oid, gen, mkey):\n return md5(mkey[:16] + struct.pack(\"<HBH\", oid&0xFFFF, oid>>16, gen) + b'sAlT')", "def _get_thumbnail_asset_key(asset, course_key):\n # note, due to the schema change we may not have a 'thumbnail_location' in the result set\n thumbnail_location = asset.get('thumbnail_location', None)\n thumbnail_asset_key = None\n\n if thumbnail_location:\n thumbnail_path = thumbnail_location[4]\n thumbnail_asset_key = course_key.make_asset_key('thumbnail', thumbnail_path)\n return thumbnail_asset_key", "def cache_token_key_for_record(record):\n klass = record.__class__\n return \":\".join(map(str, [klass.__module__, klass.__name__, record.pk]))", "def _get_key(sample, project):\n return sample + \".\" + project", "def get_vineyard_object_id(self, chunk_key):\n raise NotImplementedError", "def _build_cache_key(self, *args):\n return self.key if not self.key_mod else self.key % tuple(args)", "def keys(request: Request) -> str:\n if request.method != \"POST\":\n return http_response(\"Only POST requests are supported\", 400)\n\n try:\n request_json = request.get_json(silent=True)\n if not request_json:\n return http_response(\"no request body was provided\", 400)\n\n media_id = request_json.get(\"mediaId\")\n if not media_id:\n return http_response(\"'mediaId' field must be specified\", 400)\n provider_key = request_json.get(\"provider\")\n if not provider_key:\n return http_response(\n f\"'provider' field must be specified. supported providers: {PROVIDERS.keys()}\",\n 400,\n )\n if provider_key not in PROVIDERS:\n return http_response(\n f\"'{provider_key}' is not a valid provider. supported providers: {PROVIDERS.keys()}\",\n 400,\n )\n key_ids = request_json.get(\"keyIds\")\n if not key_ids:\n return http_response(\n \"at least one key ID must be specified via the 'keyIds' field\", 400\n )\n\n cpix_client = PROVIDERS[provider_key]()\n env_error = validate_environment(cpix_client)\n if env_error:\n return env_error\n\n key_info = cpix_client.fetch_keys(media_id, key_ids)\n version_name = write_secret(media_id, json.dumps(key_info, indent=2))\n current_app.logger.info(\"wrote encryption key secret to %s\", version_name)\n return version_name\n\n # pylint: disable=broad-except\n except Exception as ex:\n current_app.logger.exception(ex)\n return http_response(str(ex), 500)", "def _create_key(self):\n return uuid.uuid4().hex", "def _get_key(self, entity_id):\n if entity_id:\n return self.client.key(self.kind, entity_id)\n return self.client.key(self.kind)", "def entity_key(entity):\n key = entity.key or entity.string\n return ':'.join([entity.resource.path, key])", "def _generate_blob_key(time_func=time.time, random_func=random.random):\n timestamp = str(time_func())\n tries = 0\n while tries < 10:\n number = str(random_func())\n digester = hashlib.md5()\n digester.update(timestamp)\n digester.update(number)\n blob_key = base64.urlsafe_b64encode(digester.digest())\n datastore_key = datastore.Key.from_path(blobstore.BLOB_INFO_KIND, blob_key,\n namespace='')\n try:\n datastore.Get(datastore_key)\n tries += 1\n except datastore_errors.EntityNotFoundError:\n return blob_key\n raise _TooManyConflictsError()", "def get_key(filename):\n return 'names-{0}'.format(filename)", "def generate_content_key(key_id, key_seed):\n if len(key_seed) < 30:\n raise Exception(\"seed must be >= 30 bytes\")\n key_seed = b64decode(key_seed)\n # key ID should be a UUID\n if isinstance(key_id, str):\n key_id = uuid.UUID(key_id)\n elif isinstance(key_id, bytes):\n key_id = uuid.UUID(str(key_id, \"ASCII\"))\n elif isinstance(key_id, uuid.UUID):\n pass\n else:\n raise TypeError(\"key_id should be a uuid\")\n\n key_id = key_id.bytes_le\n\n sha = SHA256.new()\n sha.update(key_seed)\n sha.update(key_id)\n sha_a = [x for x in sha.digest()]\n\n sha = SHA256.new()\n sha.update(key_seed)\n sha.update(key_id)\n sha.update(key_seed)\n sha_b = [x for x in sha.digest()]\n\n sha = SHA256.new()\n sha.update(key_seed)\n sha.update(key_id)\n sha.update(key_seed)\n sha.update(key_id)\n sha_c = [x for x in sha.digest()]\n\n content_key = b\"\"\n for i in range(16):\n content_key += (\n sha_a[i] ^ sha_a[i + 16] ^ sha_b[i] ^ sha_b[i + 16] ^ sha_c[i] ^\n sha_c[i + 16]).to_bytes(1, byteorder='big')\n\n return b16encode(content_key)", "def chunk_filename(self, filename, chunks, chunk):\n return \"%s_%s_%s\" % (\n filename,\n chunks,\n chunk,\n )", "def key( self, digram ):\n\t\ta,b = digram.refdigram()\n\t\treturn str( a ) + self.keyseparator + str( b )", "def media_image_hash(self):\n if self._albumart_url:\n return super().media_image_hash\n if self._albumart:\n return hashlib.md5(self._albumart).hexdigest()[:5]\n return None", "def _build_key(self, prefix, file_name):\r\n if self._top_level_prefix:\r\n return os.path.join(\r\n self._top_level_prefix,\r\n prefix,\r\n os.path.basename(file_name)\r\n )\r\n else:\r\n return os.path.join(\r\n prefix,\r\n os.path.basename(file_name)\r\n )", "def generate_key(self):\n try:\n return self.proto.genuid()\n except ValueError:\n return uuid.uuid4()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Client sends to server his certificate in order to validate it, along side with a challenge.
def start_challenge(self): if self.state=='KEY_EXCHANGE': logger.info("Starting Challenge") nonce = os.urandom(16) self.challenge_nonce = nonce key, salt = self.derive_key(self.shared_key) if self.session_id != None: headers = { 'Content-Type': 'application/json', 'session_id' : str(self.session_id) } message = json.dumps({ 'method': 'START_CHALLENGE', 'nonce': nonce.decode('latin'), 'cert': self.certificate.public_bytes(serialization.Encoding.PEM).decode('latin'), }).encode('latin') data,iv = self.encrypt_message(message,key) logger.info("Sucessfuly encrypted challenge and certificate") message = { 'data': base64.b64encode(data), 'iv': base64.b64encode(iv), 'hmac': base64.b64encode(self.add_hmac(data,key)), 'salt': base64.b64encode(salt) } logger.info("Sending POST Challenge and Client Certificate") request = requests.post(f'{SERVER_URL}/api',json=message, headers=headers) response = json.loads(request.text) message, key, iv, salt, hmac = self.receive_message(response) #iv = base64.b64decode(response['iv']) #hmac = base64.b64decode(response['hmac']) #salt = base64.b64decode(response['salt']) #msg = base64.b64decode(response['message']) #key, _ = self.derive_key(self.shared_key,salt) if not self.verify_hmac(hmac,message,key): exit(0) else: logger.info("HMAC OK") message = self.decrypt_message(message,iv,key) message = json.loads(message) nonce = message['snonce'].encode('latin') nonce2 = message['nonce2'].encode('latin') self.state='START_CHALLENGE' if self.verify_challenge(nonce): self.accept_challenge(nonce2) else: return False else: return False
[ "def test_handshake(self):\n cli, svr, p = connectedServerAndClient(\n ServerClass=SecurableProto,\n ClientClass=SecurableProto)\n\n okc = OKCert()\n svr.certFactory = lambda : okc\n\n cli.callRemote(\n amp.StartTLS, tls_localCertificate=okc,\n tls_verifyAuthorities=[PretendRemoteCertificateAuthority()])\n\n # let's buffer something to be delivered securely\n L = []\n cli.callRemote(SecuredPing).addCallback(L.append)\n p.flush()\n # once for client once for server\n self.assertEqual(okc.verifyCount, 2)\n L = []\n cli.callRemote(SecuredPing).addCallback(L.append)\n p.flush()\n self.assertEqual(L[0], {'pinged': True})", "def verify(self, conn, cert, errnum, depth, ok):\n # If there is already an error bail now\n if not ok:\n return ok\n\n # Only perform further verification on client certs\n if depth>0:\n return ok\n\n # At this point we know the certificate is signed by a\n # trusted CA, check the issuer OU matches the incoming cert\n # OU and the incoming cert is not a server cert\n # XXX: Should look at using something like nsCertType rather\n # than the CN field for this.\n s = cert.get_subject()\n i = cert.get_issuer()\n if s.OU != i.OU:\n log_warn(\"Rejected incoming connection from invalid \"\n \"SSL cert (%s). OU did not match.\" % s)\n return 0\n if s.CN == \"server\":\n log_warn(\"Rejected incoming connection from server SSL \"\n \"cert (%s).\" % s)\n return 0\n return 1", "def accept_challenge(self,nonce2):\r\n\t\tlogger.info(\"Sending POST to accept Challenge\")\r\n\t\tif self.state=='START_CHALLENGE':\r\n\t\t\tsnonce2 = self.sign_message(nonce2)\r\n\t\t\tself.challenge_nonce2 = snonce2\r\n\t\t\tkey, salt = self.derive_key(self.shared_key)\r\n\t\t\tif self.session_id != None:\r\n\t\t\t\theaders = {\r\n\t\t\t\t\t'Content-Type': 'application/json',\r\n\t\t\t\t\t'session_id': str(self.session_id)\r\n\t\t\t\t}\r\n\t\t\tmessage = json.dumps({\r\n\t\t\t\t'method': 'ACCEPT_CHALLENGE',\r\n\t\t\t\t'snonce2':snonce2.decode('latin'),\r\n\t\t\t\t'protocols':json.dumps({'cipher':self.ciphers,'mode':self.ciphermodes,'digest':self.digests})\r\n\t\t\t}).encode('latin')\r\n\t\t\tdata, iv = self.encrypt_message(message,key)\r\n\t\t\t\r\n\t\t\tlogger.info(\"Sucessfuly encrypted challenge,certificate and communication protocols.\")\r\n\r\n\t\t\t\r\n\t\t\tmessage = {\r\n\t\t\t\t'data': base64.b64encode(data),\r\n\t\t\t\t'iv': base64.b64encode(iv),\r\n\t\t\t\t'salt': base64.b64encode(salt),\r\n\t\t\t\t'hmac': base64.b64encode(self.add_hmac(data,key))\t\t\r\n\t\t\t}\r\n\r\n\r\n\t\t\tlogger.info(\"Sending POST Challenge\")\r\n\t\t\trequest = requests.post(f'{SERVER_URL}/api',json=message, headers=headers)\r\n\t\t\tresponse = json.loads(request.text)\r\n\r\n\t\t\tmessage, key, iv, salt, hmac = self.receive_message(response)\r\n\r\n\t\t\tif not self.verify_hmac(hmac,message,key):\r\n\t\t\t\texit(0)\r\n\t\t\telse:\r\n\t\t\t\tlogger.info(\"HMAC OK\")\r\n\t\t\t\tmessage = self.decrypt_message(message,iv,key)\r\n\t\t\t\tmessage=json.loads(message)\r\n\t\t\t\tif message['method'] == 'ACK':\r\n\t\t\t\t\tself.state='ACCEPT_CHALLENGE'\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\tlogger.error(message['content'])\r\n\t\t\t\t\treturn False\r\n\t\telse:\r\n\t\t\treturn False", "def verify_challenge(self,crypt):\r\n\t\ttry:\r\n\t\t\tself.server_cert.public_key().verify(\r\n\t\t\t\tcrypt,\r\n\t\t\t\tself.challenge_nonce,\r\n\t\t\t\tpd.PSS(\r\n\t\t\t\tmgf=pd.MGF1(hashes.SHA256()),\r\n\t\t\t\tsalt_length=pd.PSS.MAX_LENGTH),\r\n\t\t\t\thashes.SHA256()\r\n\t\t\t)\r\n\t\t\tlogger.info(\"Challenge OK\")\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\tlogger.error(\"Challenge wrong. Comms Compromised\")\r\n\t\t\treturn False", "def test_wrong_cert(self):\n certfile = os.path.join(os.path.dirname(__file__) or os.curdir,\n \"keycert.pem\")\n server = ThreadedEchoServer(SIGNED_CERTFILE,\n certreqs=ssl.CERT_REQUIRED,\n cacerts=SIGNING_CA, chatty=False,\n connectionchatty=False)\n with server, \\\n socket.socket() as sock, \\\n test_wrap_socket(sock,\n certfile=certfile,\n ssl_version=ssl.PROTOCOL_TLSv1) as s:\n try:\n # Expect either an SSL error about the server rejecting\n # the connection, or a low-level connection reset (which\n # sometimes happens on Windows)\n s.connect((HOST, server.port))\n except ssl.SSLError as e:\n if support.verbose:\n sys.stdout.write(\"\\nSSLError is %r\\n\" % e)\n except OSError as e:\n if e.errno != errno.ECONNRESET:\n raise\n if support.verbose:\n sys.stdout.write(\"\\nsocket.error is %r\\n\" % e)\n else:\n self.fail(\"Use of invalid cert should have failed!\")", "def test_cert_verification(self, session):\n adapter = DummyAdapter()\n session.mount(\"https://\", adapter)\n client = corbeau.Client(self.dsn)\n client.captureMessage(\"oh noes!\")\n request = adapter.request\n kwargs = adapter.kwargs\n self.assertTrue(kwargs[\"verify\"])\n self.assertEqual(kwargs[\"timeout\"], 1)\n self.assertTrue(\"X-Sentry-Auth\" in request.headers)\n self.assertTrue(request.body)", "def verify_SSL_key_and_cert(keyfile, certfile):\r\n\r\n if not (os.path.exists(keyfile) and os.path.exists(certfile)):\r\n # key/cert does not exist. Create.\r\n import subprocess\r\n from Crypto.PublicKey import RSA\r\n from twisted.conch.ssh.keys import Key\r\n\r\n print \" Creating SSL key and certificate ... \",\r\n\r\n try:\r\n # create the RSA key and store it.\r\n KEY_LENGTH = 1024\r\n rsaKey = Key(RSA.generate(KEY_LENGTH))\r\n keyString = rsaKey.toString(type=\"OPENSSH\")\r\n file(keyfile, 'w+b').write(keyString)\r\n except Exception, e:\r\n print \"rsaKey error: %(e)s\\n WARNING: Evennia could not auto-generate SSL private key.\" % {'e': e}\r\n print \"If this error persists, create game/%(keyfile)s yourself using third-party tools.\" % {'keyfile': keyfile}\r\n sys.exit(5)\r\n\r\n # try to create the certificate\r\n CERT_EXPIRE = 365 * 20 # twenty years validity\r\n # default:\r\n #openssl req -new -x509 -key ssl.key -out ssl.cert -days 7300\r\n exestring = \"openssl req -new -x509 -key %s -out %s -days %s\" % (keyfile, certfile, CERT_EXPIRE)\r\n #print \"exestring:\", exestring\r\n try:\r\n #, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\r\n subprocess.call(exestring)\r\n except OSError, e:\r\n string = \"\\n\".join([\r\n \" %s\\n\" % e,\r\n \" Evennia's SSL context factory could not automatically\",\r\n \" create an SSL certificate game/%(cert)s.\" % {'cert': certfile},\r\n \" A private key 'ssl.key' was already created. Please\",\r\n \" create %(cert)s manually using the commands valid\" % {'cert': certfile},\r\n \" for your operating system.\",\r\n \" Example (linux, using the openssl program): \",\r\n \" %s\" % exestring])\r\n print string\r\n sys.exit(5)\r\n print \"done.\"", "def handleCertCallResponse(self, result, node, challenge):\n _log.debug(\"KademliaProtocolAppend::handleCertCallResponse\"\n \"\\n\\tresult={}\"\n \"\\n\\tnode={}\"\n \"\\n\\tchallenge={}\".format(result, node.id.encode('hex'), challenge))\n try:\n signature = result[1]['signature'].decode('hex')\n cert_str = result[1]['value']\n except Exception as err:\n _log.error(\"handleCertCallResponse::incorrectly formated result\"\n \"\\n\\terr={}\"\n \"\\n\\tresult={}\".format(err))\n self.router.removeContact(node)\n return (False, None)\n try:\n id = dhtidhex_from_certstring(cert_str)\n except Exception as err:\n _log.error(\"Failed to extract id from certstr\"\n \"\\n\\terr={}\"\n \"\\n\\tid={}\".format(err, node.id.encode('hex')))\n return (False, None)\n if node.id.encode('hex') == id:\n try:\n payload = self.payload_to_be_signed(self.sourceNode.id,\n challenge,\n \"signed_cert_response\",\n value=cert_str)\n verified = self.handle_verify_signature(node.id, payload, signature, cert_str=cert_str)\n except:\n _log.error(\n \"Invalid signature on certificate \"\n \"response from {}\".format(node.id.encode('hex')))\n self.router.addContact(node)\n self.storeCert(cert_str, id)\n if self.router.isNewNode(node):\n self.transferKeyValues(node)\n else:\n _log.error(\"RETFALSENONE: Certificate from {} does not match claimed node id\".format(node.id.encode('hex')))\n return (False, None)\n return result", "def check_host_certificate(host=\"www.google.com\"):\n port = 443\n conn = ssl.create_connection((host, port))\n context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n sock = context.wrap_socket(conn, server_hostname=host)\n raw_pem_cert = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))\n\n try:\n parsed_cert = x509.load_pem_x509_certificate(raw_pem_cert.encode(\"UTF-8\"), default_backend())\n end_date = parsed_cert.not_valid_after\n time_to_expiry = (end_date - datetime.now()).days\n subject = str(parsed_cert.subject)\n serial = parsed_cert.serial_number\n logger.info(\"Parsed Certificate Sucessfully Using Cryptography.io\")\n logger.info(subject)\n except:\n end_date = datetime.now()\n time_to_expiry = 0\n subject = \"\"\n serial = 0\n logger.warn(\"Failed to Parse Certificate Using Cryptography.io -- using Placeholder Variables\")\n return {\"end_date\":end_date,\"time_to_expiry\":time_to_expiry,\"subject\":subject,\"serial\":serial}", "def IssueCertificate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_server_certificate(addr,ssl_version=PROTOCOL_SSLv3,ca_certs=None):\n\tpass", "def test_create_certificate_signing_request(self):\n pass", "def accept_certificate(cls, path, username=None, password=None,\n local_site_name=None, certificate=None):\n client = cls.build_client(path, username, password,\n local_site_name=local_site_name)[1]\n\n return client.accept_ssl_certificate(path)", "def _SSLVerifyCallback(self, conn, cert, errnum, errdepth, ok):\n # some parameters are unused, but this is the API\n # pylint: disable=W0613\n assert self._ssl_params, \"SSL not initialized\"\n\n return (self._ssl_cert.digest(\"sha1\") == cert.digest(\"sha1\") and\n self._ssl_cert.digest(\"md5\") == cert.digest(\"md5\"))", "def _verify_cert(self, peercert):\n if isinstance(self._ssl_options, dict):\n verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)\n elif isinstance(self._ssl_options, ssl.SSLContext):\n verify_mode = self._ssl_options.verify_mode\n\n assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)\n\n if verify_mode == ssl.CERT_NONE or self._server_hostname is None:\n return True\n cert = self._socket.getpeercert()\n if cert is None and verify_mode == ssl.CERT_REQUIRED:\n gen_log.warning(\"No SSL certificate given\")\n return False\n try:\n ssl_match_hostname(peercert, self._server_hostname)\n except SSLCertificateError:\n gen_log.warning(\"Invalid SSL certificate\", )\n return False\n else:\n return True", "def test_failedCertificateVerification(self):\n onServerLost = defer.Deferred()\n onClientLost = defer.Deferred()\n self.loopback(sslverify.OpenSSLCertificateOptions(privateKey=self.sKey,\n certificate=self.sCert, verify=False,\n requireCertificate=False),\n sslverify.OpenSSLCertificateOptions(verify=True,\n requireCertificate=False, caCerts=[self.cCert]),\n onServerLost=onServerLost,\n onClientLost=onClientLost)\n\n d = defer.DeferredList([onClientLost, onServerLost],\n consumeErrors=True)\n def afterLost(result):\n ((cSuccess, cResult), (sSuccess, sResult)) = result\n self.assertFalse(cSuccess)\n self.assertFalse(sSuccess)\n\n return d.addCallback(afterLost)", "def handshakeClientCert(self, certChain=None, privateKey=None,\n session=None, settings=None, checker=None,\n async=False):\n handshaker = self._handshakeClientAsync(certParams=(certChain,\n privateKey), session=session, settings=settings,\n checker=checker)\n if async:\n return handshaker\n for result in handshaker:\n pass", "def check_certificate():\n server = get_odoo_server_url()\n if server:\n path = Path('/etc/ssl/certs/nginx-cert.crt')\n if path.exists():\n with path.open('r') as f:\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, f.read())\n cert_end_date = datetime.datetime.strptime(cert.get_notAfter().decode('utf-8'), \"%Y%m%d%H%M%SZ\") - datetime.timedelta(days=10)\n for key in cert.get_subject().get_components():\n if key[0] == b'CN':\n cn = key[1].decode('utf-8')\n if cn == 'OdooTempIoTBoxCertificate' or datetime.datetime.now() > cert_end_date:\n _logger.info(_('Your certificate %s must be updated') % (cn))\n load_certificate()\n else:\n _logger.info(_('Your certificate %s is valid until %s') % (cn, cert_end_date))\n else:\n load_certificate()", "def test_certificate_create(self):\n body = CertificatePayload()\n response = self.client.open(\n '/api/v1.0/domain/{domainName}/certificate/'.format(domainName='domainName_example'),\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies the server response to the challenge
def verify_challenge(self,crypt): try: self.server_cert.public_key().verify( crypt, self.challenge_nonce, pd.PSS( mgf=pd.MGF1(hashes.SHA256()), salt_length=pd.PSS.MAX_LENGTH), hashes.SHA256() ) logger.info("Challenge OK") return True except: logger.error("Challenge wrong. Comms Compromised") return False
[ "def accept_challenge(self,nonce2):\r\n\t\tlogger.info(\"Sending POST to accept Challenge\")\r\n\t\tif self.state=='START_CHALLENGE':\r\n\t\t\tsnonce2 = self.sign_message(nonce2)\r\n\t\t\tself.challenge_nonce2 = snonce2\r\n\t\t\tkey, salt = self.derive_key(self.shared_key)\r\n\t\t\tif self.session_id != None:\r\n\t\t\t\theaders = {\r\n\t\t\t\t\t'Content-Type': 'application/json',\r\n\t\t\t\t\t'session_id': str(self.session_id)\r\n\t\t\t\t}\r\n\t\t\tmessage = json.dumps({\r\n\t\t\t\t'method': 'ACCEPT_CHALLENGE',\r\n\t\t\t\t'snonce2':snonce2.decode('latin'),\r\n\t\t\t\t'protocols':json.dumps({'cipher':self.ciphers,'mode':self.ciphermodes,'digest':self.digests})\r\n\t\t\t}).encode('latin')\r\n\t\t\tdata, iv = self.encrypt_message(message,key)\r\n\t\t\t\r\n\t\t\tlogger.info(\"Sucessfuly encrypted challenge,certificate and communication protocols.\")\r\n\r\n\t\t\t\r\n\t\t\tmessage = {\r\n\t\t\t\t'data': base64.b64encode(data),\r\n\t\t\t\t'iv': base64.b64encode(iv),\r\n\t\t\t\t'salt': base64.b64encode(salt),\r\n\t\t\t\t'hmac': base64.b64encode(self.add_hmac(data,key))\t\t\r\n\t\t\t}\r\n\r\n\r\n\t\t\tlogger.info(\"Sending POST Challenge\")\r\n\t\t\trequest = requests.post(f'{SERVER_URL}/api',json=message, headers=headers)\r\n\t\t\tresponse = json.loads(request.text)\r\n\r\n\t\t\tmessage, key, iv, salt, hmac = self.receive_message(response)\r\n\r\n\t\t\tif not self.verify_hmac(hmac,message,key):\r\n\t\t\t\texit(0)\r\n\t\t\telse:\r\n\t\t\t\tlogger.info(\"HMAC OK\")\r\n\t\t\t\tmessage = self.decrypt_message(message,iv,key)\r\n\t\t\t\tmessage=json.loads(message)\r\n\t\t\t\tif message['method'] == 'ACK':\r\n\t\t\t\t\tself.state='ACCEPT_CHALLENGE'\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\tlogger.error(message['content'])\r\n\t\t\t\t\treturn False\r\n\t\telse:\r\n\t\t\treturn False", "def test_challenge_response(self):\n api = self.api\n session = {'expire':datetime.now()+timedelta(1), 'challenge':None, 'auth':False}\n api.sessions = {0:session}\n \n challenge = api.authenticate(0)\n self.assert_(challenge, 'Challenge should not be none')\n \n result = api.challenge_response(0, session['challenge'])\n \n self.assertEqual(True, result)\n self.assertEqual(True, session['auth'])\n self.assertEqual(None, session['challenge'])", "def test_response(self):\n d = self._createAndDecodeChallenge()\n\n def _test(creds):\n self.failUnless(creds.checkPassword('password'))\n return d.addCallback(_test)", "def test_challenge_response_invalid(self):\n api = self.api\n session = {'expire':datetime.now()+timedelta(1), 'challenge':None, 'auth':False}\n api.sessions = {0:session}\n \n challenge = api.authenticate(0)\n self.assert_(challenge, 'Challenge should not be none')\n \n result = api.challenge_response(0, 'INVALID_RESPONSE')\n \n self.assertEqual(False, result)\n self.assertEqual(False, session['auth'])\n self.assertEqual(None, session['challenge'])", "def verify_problem_answer(self, answer: models.ProblemAnswer):", "def _check_worker_response(self, s, n, x):\n print \"Received response from worker\"\n s_in_ascii = PowHelper.binary_to_ascii(s)\n hash_verified = PowHelper.verify_hash(s_in_ascii, x, n)\n if hash_verified:\n print \"Successfully verified hash calculation from worker\"\n else:\n print \"Hash from worker NOT verified !\"\n\n return hash_verified", "def test_challenge_response_no_challenge(self):\n api = self.api\n session = {'expire':datetime.now()+timedelta(1), 'challenge':None, 'auth':False}\n api.sessions = {0:session}\n \n result = api.challenge_response(0, 'PREEMPTIVE_RESPONSE')\n self.assertEqual(False, result)\n self.assertEqual(False, session['auth'])\n self.assertEqual(None, session['challenge'])", "def check_solve():\n data = request.form or request.get_json()\n\n team_id = str(data[\"team_id\"])\n\n try:\n challenge = challenges[team_id]\n except KeyError:\n abort(401)\n\n return \"Success\"", "def is_correct_response(response):\r\n return response == 'success'", "def start_challenge(self):\r\n\t\tif self.state=='KEY_EXCHANGE':\r\n\r\n\t\t\tlogger.info(\"Starting Challenge\")\r\n\t\t\tnonce = os.urandom(16)\r\n\t\t\tself.challenge_nonce = nonce\r\n\t\t\tkey, salt = self.derive_key(self.shared_key)\r\n\t\t\tif self.session_id != None:\r\n\t\t\t\theaders = {\r\n\t\t\t\t\t'Content-Type': 'application/json',\r\n\t\t\t\t\t'session_id' : str(self.session_id)\r\n\t\t\t\t\t}\t\r\n\t\t\tmessage = json.dumps({\r\n\t\t\t\t'method': 'START_CHALLENGE',\r\n\t\t\t\t'nonce': nonce.decode('latin'), \r\n\t\t\t\t'cert': self.certificate.public_bytes(serialization.Encoding.PEM).decode('latin'),\r\n\t\t\t}).encode('latin')\t\t\r\n\t\t\tdata,iv = self.encrypt_message(message,key)\r\n\t\t\t\r\n\t\t\tlogger.info(\"Sucessfuly encrypted challenge and certificate\")\r\n\t\t\t\r\n\t\t\tmessage = {\r\n\t\t\t\t'data': base64.b64encode(data),\r\n\t\t\t\t'iv': base64.b64encode(iv),\r\n\t\t\t\t'hmac': base64.b64encode(self.add_hmac(data,key)),\r\n\t\t\t\t'salt': base64.b64encode(salt)\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tlogger.info(\"Sending POST Challenge and Client Certificate\")\r\n\t\t\trequest = requests.post(f'{SERVER_URL}/api',json=message, headers=headers)\r\n\t\t\t\r\n\t\t\tresponse = json.loads(request.text)\r\n\t\t\tmessage, key, iv, salt, hmac = self.receive_message(response)\r\n\t\t\t#iv = base64.b64decode(response['iv'])\r\n\t\t\t#hmac = base64.b64decode(response['hmac'])\r\n\t\t\t#salt = base64.b64decode(response['salt'])\r\n\t\t\t#msg = base64.b64decode(response['message'])\r\n\t\t\t\r\n\t\t\t#key, _ = self.derive_key(self.shared_key,salt)\r\n\t\t\tif not self.verify_hmac(hmac,message,key):\r\n\t\t\t\texit(0)\r\n\t\t\telse:\r\n\t\t\t\tlogger.info(\"HMAC OK\")\r\n\t\t\t\tmessage = self.decrypt_message(message,iv,key)\r\n\t\t\t\tmessage = json.loads(message)\r\n\t\t\t\tnonce = message['snonce'].encode('latin')\r\n\t\t\t\tnonce2 = message['nonce2'].encode('latin')\r\n\t\t\t\tself.state='START_CHALLENGE'\r\n\t\t\t\tif self.verify_challenge(nonce):\r\n\t\t\t\t\tself.accept_challenge(nonce2)\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn False\r\n\r\n\t\telse:\r\n\t\t\treturn False", "def getDigestResponse(self, challenge, ncount):\n nonce = challenge.get('nonce')\n algo = challenge.get('algorithm').lower()\n qop = challenge.get('qop')\n\n expected = digest.calcResponse(\n digest.calcHA1(algo,\n \"username\",\n \"test realm\",\n \"password\",\n nonce,\n cnonce),\n algo, nonce, ncount, cnonce, qop, \"GET\", \"/write/\", None\n )\n return expected", "def verify_hmac(self, recv_hmac):\n \n recv_hmac = bso.hex_to_bytes(recv_hmac)\n return MaliciousSRPServer.verify_hmac(self, recv_hmac)", "def challenge_response(\n serial: Optional[str],\n host: str,\n user: str,\n prompt: str,\n credential_id: str,\n challenge: str,\n udp: bool,\n) -> None:\n\n nkfido2.find().simple_secret(\n credential_id,\n challenge,\n host=host,\n user_id=user,\n serial=serial,\n prompt=prompt,\n output=True,\n udp=udp,\n )", "def generate_nt_response_mschap(challenge,password):\n password_hash=nt_password_hash(password)\n return challenge_response(challenge,password_hash)", "def test_validate_response(self):\n\n\t\texpected_result = True # expected function result\n\n\t\tresponse_obj = requests.Response()\n\t\tresponse_obj.status_code = 200\n\n\t\tresponse = self.calc_obj.validate_response(response_obj)\n\n\t\ttry:\n\t\t\tself.assertEqual(response, expected_result)\n\n\t\tfinally:\n\t\t\ttab = [[response], [expected_result]]\n\t\t\tprint(\"\\n\")\n\t\t\tprint(inspect.currentframe().f_code.co_name)\n\t\t\tprint(tabulate(tab, headers='keys', tablefmt='rst'))\n\t\t\t\n\t\treturn", "def challenge( self, request, response, **kw ):\n # If browser is coming back with auth, yet we are still challenging\n # that means there is insufficient privs.\n if request._auth and request._auth.startswith(self.auth_scheme):\n return False\n response.addHeader('WWW-Authenticate', self.auth_scheme)\n response.addHeader('Connection', 'keep-alive')\n response.setStatus(401)\n m = \"<strong>You are not authorized to access this resource.</strong>\"\n response.setBody(m, is_error=1)\n return True", "def generate_nt_response_mschap2(authenticator_challenge,peer_challenge,username,password):\n challenge=challenge_hash(peer_challenge,authenticator_challenge,username)\n password_hash=nt_password_hash(password)\n return challenge_response(challenge,password_hash)", "def verify_response(\n self,\n request: str,\n expected_response: str,\n response_number: int,\n user: str = \"foo@example.com\",\n ) -> None:\n\n bot, bot_handler = self._get_handlers()\n message = self.make_request_message(request, user)\n bot_handler.reset_transcript()\n\n bot.handle_message(message, bot_handler)\n\n responses = [message for (method, message) in bot_handler.transcript]\n\n first_response = responses[response_number]\n self.assertEqual(expected_response, first_response[\"content\"])", "def test_multiResponse(self):\n d = self._createAndDecodeChallenge()\n\n def _test(creds):\n self.failUnless(creds.checkPassword('password'))\n\n def _test2(_):\n d2 = self._createAndDecodeChallenge(\"00000002\")\n return d2.addCallback(_test)\n return d.addCallback(_test)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }