query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
This builds your guide. Use Keyword to update any options at build time.
def build_guide(self, **kwargs): # This builds your guide master and updates your options self.create_guide_master(**kwargs) prefix = self.prefix # Naming prefix. Use this for every new node you create and there should be no name clashes. options = self.options # Build options mirror_value = self.mirror_value # 1.0 for left and center sided parts and -1.0 for right sided part. mc.setAttr(self.guide_master+'.offsetTranslateY', -0.2) l_prefix = prefix.replace('C','L', 1) r_prefix = prefix.replace('C','R', 1) mirror_values = [1, -1] enable_steering = options.get('enableSteering') colors = ['green', 'red'] for mi, prefix in enumerate([l_prefix, r_prefix]): mirror_value = mirror_values[mi] color = colors[mi] l_main_zero, l_main_plc = self.guide_joint('main', alt_prefix=prefix, placer_only=1) # create hub hub_zero, hub_plc, hub_jnt = self.guide_joint('wheelhub', alt_prefix=prefix, constraint_type='point') hub_end_zero, hub_end_plc, hub_end_jnt = self.guide_joint('wheelhub_end', alt_prefix=prefix, constraint_type='point') mc.xform(hub_end_zero, r=1, t=[1,0,0]) mc.parent(hub_end_jnt, hub_jnt) mc.aimConstraint(hub_end_plc, hub_jnt, aim=[mirror_value,0,0], u=[0,1,0], wu=[0,1,0], wut='vector') mc.parentConstraint(hub_plc, hub_end_zero , mo=1) # Create steering arm steer_zero, steer_plc, steer_jnt = self.guide_joint('steeringArm', alt_prefix=prefix, constraint_type='parent') mc.xform(steer_zero, r=1, t=[-1,0,0]) mc.parent(hub_jnt, steer_jnt) # Create shocks shock_a_zero, shock_a_plc, shock_a_jnt = self.guide_joint('shock_A', alt_prefix=prefix, constraint_type='point') shock_b_zero, shock_b_plc, shock_b_jnt = self.guide_joint('shock_B', alt_prefix=prefix, constraint_type='point') mc.xform(shock_a_zero, ws=1, t=[-2,2,0]) mc.xform(shock_b_zero, ws=1, t=[-0.5,0.25,0]) mc.parent(shock_b_jnt, shock_a_jnt) mc.aimConstraint(shock_b_plc, shock_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector') mc.aimConstraint(shock_a_plc, shock_b_jnt, aim=[-mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector') # upper arm up_arm_zero, up_arm_plc, up_arm_jnt = self.guide_joint('upperArm', alt_prefix=prefix, constraint_type='point') up_arm_end_zero, up_arm_end_plc, up_arm_end_jnt = self.guide_joint('upperArm_end', alt_prefix=prefix, constraint_type='point') mc.xform(up_arm_end_zero, r=1, t=[-3.5,1,0]) mc.xform(up_arm_zero, r=1, t=[-1,0.5,0]) mc.parent(up_arm_end_jnt, up_arm_jnt) mc.aimConstraint(up_arm_end_plc, up_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=up_arm_plc) # lower arm lo_arm_zero, lo_arm_plc, lo_arm_jnt = self.guide_joint('lowerArm', alt_prefix=prefix, constraint_type='point') lo_arm_end_zero, lo_arm_end_plc, lo_arm_end_jnt = self.guide_joint('lowerArm_end', alt_prefix=prefix, constraint_type='point') mc.xform(lo_arm_end_zero, r=1, t=[-4,-0.5,0]) mc.xform(lo_arm_zero, r=1, t=[-1,-0.5,0]) mc.parent(lo_arm_end_jnt, lo_arm_jnt) mc.aimConstraint(lo_arm_end_plc, lo_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=lo_arm_plc) # steeringArm if enable_steering: steeringArm_a_zero, steeringArm_a_plc, steeringArm_a_jnt = self.guide_joint('steeringArm_A', alt_prefix=prefix, constraint_type='point') steeringArm_b_zero, steeringArm_b_plc, steeringArm_b_jnt = self.guide_joint('steeringArm_B', alt_prefix=prefix, constraint_type='point') mc.xform(steeringArm_b_zero, r=1, t=[-1.5,0,1]) mc.xform(steeringArm_a_zero, r=1, t=[-4,0,1]) mc.parent(steeringArm_b_jnt, steeringArm_a_jnt) mc.aimConstraint(steeringArm_b_plc, steeringArm_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector') # Create control zero, ctrl = self.guide_ctrl('wheel', alt_prefix=prefix, driver=hub_end_jnt, color=color, shape='circle', axis='X', scale=[3]*3, create_pivot=0) mc.setAttr(ctrl+'.numOffsetCtrls', 1) mc.addAttr(ctrl+'.numOffsetCtrls', e=1, min=1) mc.xform(ctrl.replace('_CTL','_A_OFF_CTL.cv[*]'), r=1, s=[0.8]*3) control.create_shape('wheel', ctrl, axis='X', scale=[3]*3) #suspension_zero, suspension_ctrl = self.guide_ctrl('suspension', create_pivot=0, driver=shock_a_jnt, axis='X', shape='pyramid', color=color, scale=[1.5,1,1], alt_prefix=prefix) ground_zero, ground_ctrl = self.guide_ctrl('ground', create_pivot=0, shape='square', color='grass', alt_prefix=prefix) mc.delete(mc.pointConstraint(hub_jnt, ground_zero)) # constraint to placer childs = [prefix+'_wheelhub_JNT_PLC_ZERO', prefix+'_steeringArm_JNT_PLC_ZERO', prefix+'_shock_A_JNT_PLC_ZERO', prefix+'_shock_B_JNT_PLC_ZERO', prefix+'_upperArm_JNT_PLC_ZERO', prefix+'_upperArm_end_JNT_PLC_ZERO', prefix+'_lowerArm_JNT_PLC_ZERO', prefix+'_lowerArm_end_JNT_PLC_ZERO'] for c in childs: mc.parentConstraint(l_main_plc, c, mo=1) mc.setAttr(l_main_plc+'.offsetTranslateY', mirror_value*0.5) # ################3 # Place it all hub_pos = mc.ls(options.get('hubCenter') or '') if hub_pos: loc = utils.snap_locator(hub_pos) mc.delete(mc.pointConstraint(loc, self.guide_master)) mc.setAttr(self.guide_master+'.tx', 0) mc.delete(mc.pointConstraint(loc, l_main_plc), loc) hub_end_pos = mc.ls(options.get('hubEndCenter') or '') if hub_end_pos: loc = utils.snap_locator(hub_end_pos) mc.delete(mc.pointConstraint(loc, hub_end_plc), loc) else: mc.xform(self.guide_master, ws=1, t=[0,2,10]) mc.xform(l_main_plc, r=1, t=[mirror_value*6,0,0]) mc.setAttr(self.guide_master+'.jointAxisVis', 1) l = utils.snap_locator(hub_jnt) mc.setAttr(l+'.ty', 0) mc.delete(mc.pointConstraint(l, ground_zero), l) chassis_plc_zero, chassis_plc = self.guide_joint('chassis_driver', placer_only=1) mc.setAttr(chassis_plc+'.radius', 1) mc.setAttr(chassis_plc+'.color', 0.96, 0.71, .01) mc.setAttr(chassis_plc+'.otherType', 'Leg IK Driver', type='string'); mc.setAttr(chassis_plc+'.type', 18) mc.pointConstraint(l_prefix+'_lowerArm_end_JNT_PLC', r_prefix+'_lowerArm_end_JNT_PLC', chassis_plc_zero) utils.set_attrs(chassis_plc, l=1, k=0) # This finalizes your guide. self.finalize_guide() self.mirror_guide()
[ "def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n prefix, options = self.create_guide_master(**kwargs)\n\n prefix = self.prefix\n options = self.options\n mirror_value = self.mirror_value\n\n\n noxform_grp = self.guide_master + '_NOX'\n\n if mc.objExists ('hips_Mid_jnt'):\n mc.delete ('hips_Mid_jnt')\n\n\n pp = env.get_parts_paths()[-1]\n branch = r'BidepAutoRig\\part_joints\\legs_skel.mb'\n import_path = pp.replace('partsLibrary', branch)\n mc.file(import_path, i=1)\n\n snaps=[u'heel_Rt_plc', u'tiltIn_Rt_plc', u'tiltOut_Rt_plc', u'toeTip_Rt_plc', u'heel_Lt_plc', u'tiltOut_Lt_plc',\n u'tiltIn_Lt_plc', u'toeTip_Lt_plc', u'hips_Mid_jnt', u'legBase_Lt_jnt',\n u'thigh_Lt_jnt', u'knee_Lt_jnt', u'legEnd_Lt_jnt', u'foot_Lt_bind', u'toe_Lt_bind', u'toeEnd_Lt_jnt',\n u'knee_Lt_VJ_bind', u'uprKnee_Lt_jnt', u'lwrKnee_Lt_jnt', u'legBase_Rt_jnt', u'thigh_Rt_jnt', u'knee_Rt_jnt',\n u'legEnd_Rt_jnt', u'foot_Rt_bind', u'toe_Rt_bind', u'toeEnd_Rt_jnt', u'knee_Rt_VJ_bind', u'uprKnee_Rt_jnt',\n u'lwrKnee_Rt_jnt']\n for snap in snaps:\n target='snap_'+snap\n if mc.objExists (target):\n mc.delete (mc.parentConstraint (target, snap, mo=0))\n\n if mc.objExists ('snap_root_Mid_jnt'):\n mc.delete (mc.parentConstraint('snap_root_Mid_jnt', 'drivenLeg_root03_Mid_anim', mo=0))\n\n\n # This finalizes your guide.\n self.finalize_guide()\n jnts_grp = self.guide_master + '_JNTS'\n foot_plcrs=['drivenLeg_root03_Mid_anim', u'heel_Rt_plc', u'tiltIn_Rt_plc', u'tiltOut_Rt_plc', u'toeTip_Rt_plc', u'heel_Lt_plc', u'tiltOut_Lt_plc', u'tiltIn_Lt_plc', u'toeTip_Lt_plc']\n\n mc.parent ('hips_Mid_jnt', foot_plcrs, jnts_grp)\n\n self.finalize_guide()", "def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix\n options = self.options\n mirror_value = self.mirror_value\n\n num_joints = options.get('numberJoints')\n single_joint = options.get('singleJoint')\n pickWalk_parent = options.get('pickWalkParent')\n\n num_joints += 1\n if single_joint:\n num_joints = 1\n\n # Builde joints\n if single_joint:\n jnt_zero, plc, jnt = self.guide_joint(constraint_type='parent')\n zero, ctrl = self.guide_ctrl(shape='circle', color='light_blue', driver=jnt, axis='X')\n ctrls = [ctrl]\n zeros = [zero]\n\n else:\n jnt_zeros, plcs, jnts = self.guide_joint_chain('', num_joints=num_joints)\n zeros, ctrls = [], []\n for i, jnt in enumerate(jnts[:-1]):\n letter = utils.letters[i]\n zero, ctrl = self.guide_ctrl(name=letter, shape='circle',\n color='light_blue', driver=jnt, axis='X')\n zeros.append(zero)\n ctrls.append(ctrl)\n\n mc.xform(zeros, jnt_zeros, r=1, t=[-1*self.mirror_value, 0, 0])\n\n # lock stuff\n pivots = [mc.listRelatives(c, p=1)[0] for c in ctrls]\n utils.set_attrs(zeros, l=1, k=0)\n utils.set_attrs(pivots, 't s', l=1, k=0)\n\n mc.setAttr(self.guide_master+'.offsetTranslateX', -0.5*self.mirror_value)\n\n # This finalizes your guide.\n self.finalize_guide()", "def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix\n options = self.options\n mirror_value = self.mirror_value\n\n number_mid_ctrl = options.get('numberMidCtrls')\n num_joints = options.get('numberJoints')\n create_jaw = options.get('createJaw')\n create_skull = options.get('createReverseJaw')\n surface = options.get('createSurfaceDriver')\n create_fk_ctrls = options.get('createFKShaperCtrls')\n\n noxform_grp = self.guide_master + '_NOX'\n\n if mc.objExists ('drivenNeck_chest_Mid_bind'):\n mc.delete ('drivenNeck_chest_Mid_bind')\n\n\n pp = env.get_parts_paths()[-1]\n branch = r'BidepAutoRig\\part_joints\\neck_skel.mb'\n import_path = pp.replace('partsLibrary', branch)\n mc.file(import_path, i=1)\n\n if mc.objExists ('snap_chest_Mid_jnt'):\n mc.delete (mc.parentConstraint ('snap_chest_Mid_bind', 'drivenNeck_chest_Mid_bind'))\n\n\n snaps=[u'head_Mid_bind', u'headEnd_Mid_jnt', u'eye_Lt_bind', u'eye_Rt_bind', u'headTop_Mid_bind',\n u'headRear_Mid_bind', u'headSide_Lt_bind', u'headSide_Rt_bind', u'neck01_Mid_bind', u'neck02_Mid_bind',\n u'neck03_Mid_bind', u'neckEnd_Mid_jnt']\n\n for snap in snaps:\n target='snap_'+snap\n if mc.objExists (target):\n mc.delete (mc.parentConstraint (target, snap))\n\n\n\n\n # This finalizes your guide.\n self.finalize_guide()\n jnts_grp = self.guide_master + '_JNTS'\n mc.parent ('drivenNeck_chest_Mid_bind', jnts_grp)\n\n self.finalize_guide()", "def build(ctx, cmd='help'):\n ctx.run('sphinx-build -M {} {} {}'.format(cmd, source, dest))", "def cli(ctx, **kwds):\n invalid = _validate_kwds(kwds)\n if invalid:\n ctx.exit(invalid)\n tool_description = tool_builder.build(**kwds)\n tool_builder.write_tool_description(ctx, tool_description, **kwds)", "def run(self):\n\t\tself.sphinx_instance.build(force_all=False, filenames=None)\n\t\treturn None", "def main():\n opts = build_parser(sys.argv)\n execute_build(opts)\n\n if opts.docs is not None:\n build_docs(make_opts=opts.docs)", "def build_documentation(context):\n context.render_full_static_site()\n cli_message(\n \"\"\"\nTo view the generated data documentation, open this file in a web browser:\n <green>great_expectations/uncommitted/documentation/index.html</green>\n\"\"\")", "def _BuildCommand(self):\n pass", "def auto_add_guides(self):\r\n self.refresh_guide_data()\r\n geoTypes = {\r\n 'body': '*_body_*_msh__',\r\n 'wheel': ['*_tire_*_msh__', '*_wheel_*_msh__', '*_rims_*_msh__', '*_rim_*_msh__'],\r\n 'door': '*_door_*_msh__',\r\n 'seat': ['*_seat_*_msh__', '*_seats_*_msh__'],\r\n 'steering': ['*_steering_*_msh__', '*_drive_wheel_*_msh__'],\r\n }\r\n rootNames = ['|root|x__base__grp__', '|root|x__model__grp__', 'x__model__grp__']\r\n for geoKey in geoTypes:\r\n geoFilter = [x for x in pm.ls(geoTypes[geoKey], type='transform') if x.getParent().name() in rootNames]\r\n if geoKey == 'wheel':\r\n # filter out steering wheels from the tire wheels.\r\n geoFilter = [x for x in geoFilter if not any([match in x.name() for match in ['steering', 'drive']])]\r\n self.add_guide(geoKey, geoFilter)\r\n self.refresh_guide_data()\r\n print('Vehicle guides successfully built.')", "async def build_options_embed():\n embed = helper.embed_builder(\n bot.user.name,\n \"Hello, my friend! I am Valron. Here are the supported \"\n + \"classes and HP modifiers for your reference\",\n )\n embed.add_field(\n name=\"List of supported classes\",\n value=helper.alias_builder(constants.DND_ALIASES()),\n inline=False,\n )\n embed.add_field(\n name=\"List of supported HP modifiers\",\n value=helper.alias_builder(constants.HP_MOD_ALIASES()),\n inline=False,\n )\n embed.set_footer(\n text=\"/help - main help command\\n\" + \"/links - to view some helpful links\"\n )\n\n return embed", "def run_builder(cls): \n if cls.built == False:\n try:\n sphinxbuild(shlex.split(cls.sphinxopts))\n except SystemExit as e:\n if e.code != 0:\n raise AssertionError(\"Error running sphinx-build (exited with code %s)\" % e.code)\n\n cls.built = True", "def update_documentation():\n root_path = '/opt/webshop-demo'\n\n with cd(root_path):\n with prefix(\"source %s/bin/activate\" % root_path):\n run('pip install sphinx_rtd_theme')\n with cd('nereid-webshop'):\n run('python setup.py build_sphinx')", "def make_docs(self):\n from pypeapp.lib.Terminal import Terminal\n from pypeapp import execute\n\n self._initialize()\n t = Terminal()\n\n source_dir_setup = os.path.join(\n os.environ.get(\"PYPE_SETUP_PATH\"), \"docs\", \"source\")\n build_dir_setup = os.path.join(\n os.environ.get(\"PYPE_SETUP_PATH\"), \"docs\", \"build\")\n\n source_dir_pype = os.path.join(\n os.environ.get(\"PYPE_SETUP_PATH\"), \"repos\",\n \"pype\", \"docs\", \"source\")\n build_dir_pype = os.path.join(\n os.environ.get(\"PYPE_SETUP_PATH\"), \"repos\",\n \"pype\", \"docs\", \"build\")\n\n t.echo(\">>> Generating documentation ...\")\n t.echo(\" - Cleaning up ...\")\n execute(['sphinx-build', '-M', 'clean',\n source_dir_setup, build_dir_setup],\n shell=True)\n execute(['sphinx-build', '-M', 'clean',\n source_dir_pype, build_dir_pype],\n shell=True)\n t.echo(\" - generating sources ...\")\n execute(['sphinx-apidoc', '-M', '-f', '-d', '4', '--ext-autodoc',\n '--ext-intersphinx', '--ext-viewcode', '-o',\n source_dir_setup, 'pypeapp'], shell=True)\n vendor_ignore = os.path.join(\n os.environ.get(\"PYPE_SETUP_PATH\"), \"repos\",\n \"pype\", \"pype\", \"vendor\")\n execute(['sphinx-apidoc', '-M', '-f', '-d', '6', '--ext-autodoc',\n '--ext-intersphinx', '--ext-viewcode', '-o',\n source_dir_pype, 'pype',\n '{}{}*'.format(vendor_ignore, os.path.sep)], shell=True)\n t.echo(\" - Building html ...\")\n execute(['sphinx-build', '-M', 'html',\n source_dir_setup, build_dir_setup],\n shell=True)\n execute(['sphinx-build', '-M', 'html',\n source_dir_pype, build_dir_pype],\n shell=True)\n t.echo(\">>> Done. Documentation id generated:\")\n t.echo(\"*** For pype-setup: [ {} ]\".format(build_dir_setup))\n t.echo(\"*** For pype: [ {} ]\".format(build_dir_pype))", "def add_guideline():\n addGuideline()", "def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')", "def build_step(self):\n pass", "def _build(self):\n raise NotImplementedError", "def build(self) -> None:\n self.builder.call_all_build_entrypoints()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add fu code to list.
def add_fu(self, state): self._fu_set.add(state)
[ "def add_code(self, code):\n self.code += code", "def add_code(self, code):\n self.custom_code.append(code)", "def addFriend(self, f):\n\t\tself.friends.append(f)", "def set_function_list(self, L):\n\t\tself.function_list = L", "def list_add(self,data):\r\n self.list.append(data)", "def add_hook(f, h):\n if f in hooks:\n hooks[f] += [h]\n else:\n hooks[f] = [h]", "def add_code(self, code_lines: List[str]) -> None:\n self.__code_block__ += code_lines", "def add(list_, *args, **kwargs):\n def wrapped_func(f, *args, **kwargs):\n list_.append(f.__name__)\n return f\n\n return wrapped_func", "def add_handler ( handler_list, handler_function ):\n if not (handler_function in handler_list):\n handler_list.append ( handler_function )\n \n #cellblender_added_handlers", "def add_to_list(pn, sl, override = False):", "def _add_function(self, __assistant):\r\n # TODO: Consider refactoring _add_function; current McCabe metric = 11\r\n # Find out how many Functions to add. Defaults to one Function if the\r\n # user hasn't entered and value.\r\n try:\r\n _n_functions = int(self.txtQuantity.get_text())\r\n except ValueError:\r\n _n_functions = 1\r\n\r\n # If specified, the same base code will be used for _n_function newly\r\n # added Functions.\r\n _basecode = self.txtFunctionCode.get_text()\r\n\r\n # If specified, the same base name will be used for _n_function newly\r\n # added Functions.\r\n _name = self.txtFunctionName.get_text()\r\n\r\n # The same remarks will be used for _n_function newly added Functions.\r\n _remarks = self.txtRemarks.get_text(*self.txtRemarks.get_bounds())\r\n\r\n # By default we add the new function as a top-level function.\r\n if self._parent_id is None:\r\n self._parent_id = -1\r\n\r\n _error_codes = []\r\n for i in range(_n_functions):\r\n # Create the Function code for the new Function.\r\n if _basecode == '' or _basecode is None:\r\n _code = '{0:s}-{1:d}'.format(str(Configuration.RTK_PREFIX[2]),\r\n Configuration.RTK_PREFIX[3])\r\n else:\r\n _code = '{0:s}-{1:d}'.format(_basecode,\r\n Configuration.RTK_PREFIX[3])\r\n\r\n # Create the Function name if one hasn't been specified.\r\n if _name == '' or _name is None:\r\n _name = 'New Function {0:d}'.format(i + 1)\r\n\r\n # Add the new Function. If there was an error adding the function\r\n # update the error_codes list with a tuple (Iteration, Error Code)\r\n # otherwise add a new functional FMEA for new Function and add the\r\n # new Function to each of the functional Matrix.\r\n (_results,\r\n _error_code,\r\n _function_id) = self._controller.dtcFunction.add_function(\r\n self._revision_id, self._parent_id, _code, _name, _remarks)\r\n\r\n if _error_code != 0:\r\n _error_codes.append((i, _error_code))\r\n else:\r\n # Add a FMEA with one failure mode to the new Function.\r\n self._controller.dtcFMEA.add_fmea(None, _function_id)\r\n self._controller.dtcFMEA.add_mode(None, _function_id)\r\n\r\n # Add the new Function to each of the Function matrices.\r\n for _matrix_id in [0, 1, 2]:\r\n self._controller.dtcMatrices.add_row(_matrix_id,\r\n self._parent_id,\r\n _function_id,\r\n val1=_code,\r\n val2=_name)\r\n\r\n # Increment the Function index.\r\n Configuration.RTK_PREFIX[3] += 1\r\n\r\n # Handle any errors returned. Write each of them to the debug log and\r\n # then raise an error dialog.\r\n for __, _code in enumerate(_error_codes):\r\n _content = \"rtk.function.Assistant._add_function: \" \\\r\n \"Received error code {1:d} while adding function \" \\\r\n \"{0:d} of {3:d}.\".format(_code[0], _code[1],\r\n _n_functions)\r\n self._modulebook.mdcRTK.debug_log.error(_content)\r\n\r\n if len(_error_codes) > 0:\r\n _prompt = _(u\"An error occurred while attempting to add one or \"\r\n u\"more functions.\")\r\n Widgets.rtk_error(_prompt)\r\n\r\n self._modulebook.request_load_data(self._controller.project_dao,\r\n self._revision_id)\r\n\r\n return False", "def code():", "def test_add_file_to_list(self):\n pass", "def append_function_index(self, node):\n ilist = self.function_index\n node._function_index = len(ilist)\n # node.fmtdict.function_index = str(len(ilist)) # debugging\n ilist.append(node)", "def copyin_fields(self, listw, namelist, flist):\n\n dlu = dict()\n for p in namelist:\n dlu[p[0]] = p[1]\n for fn in flist:\n try:\n descr = dlu[fn]\n item = QListWidgetItem(descr)\n item.setData(Qt.UserRole, QtCore.QVariant(fn))\n listw.addItem(item)\n except KeyError:\n pass", "def fdb_add(self, fdb_entries):\n raise NotImplementedError()", "def fpa(tokens):\r\n varname = tokens[0][0]\r\n self.functions_used.add(varname)", "def callback_extend_list(item):\n fisher_contingency_pval_parallel_insertion.extend(item)", "def AddEntriesAuto(self, values: List[bytes]):\n idx = 0\n for value in values:\n self.aiFunctionNames.insert(idx, AiFunction(value, 0))\n idx += 1\n\n self.UpdateOffsets()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allow to scroll through the raw data of the csv file selected
def see_raw_data(city): while True: try: see_raw_data_input = input('\nIn addition of the stats above, would you like to scroll through the raw data? (y/n)\n') if see_raw_data_input not in ('y', 'n'): raise Exception ('Invalid answer') if see_raw_data_input == 'n': break if see_raw_data_input == 'y': with open (CITY_DATA[city], 'r') as f: reader = csv.reader(f) count_row_start_iteration = 0 count_row_read = 0 for row in reader: print(row) count_row_read += 1 if count_row_read == count_row_start_iteration +6: continue_scroll = input('\nDo you want to continue scrolling 5 more rows through the raw data? (y/n): ') if continue_scroll == 'n': break else: count_row_start_iteration +=5 except Exception : print ("Please answer 'y' or 'n'\n")
[ "def process_loading_file(self):\n column_headers = []\n column_headers_all = []\n\n # Open the file once to get idea of the total rowcount to display progress\n with open(self.csv_file_path[0], newline='') as csv_file:\n self.progress_max.emit(len(csv_file.readlines()) - 2)\n\n with open(self.csv_file_path[0], newline='') as csv_file:\n\n self.csv_data_table.setRowCount(0)\n self.csv_data_table.setColumnCount(0)\n\n csv_file_read = csv.reader(csv_file, delimiter=',', quotechar='|')\n\n # Fetch the column headers and move the iterator to actual data\n column_headers = next(csv_file_read)\n\n # Reflect back the changes in the reference to the column headers\n for header in column_headers:\n self.column_headers.append(header)\n # A backup to keep a list of all the headers to toogle their view later\n self.column_headers_all.append(header)\n\n # TODO: Increase the reading speed by decreasing load on actual table population\n\n # self.csv_data_table.hide()\n\n for row_data in csv_file_read:\n\n self.relay.emit(self.csv_data_table.rowCount())\n # self.relay.emit(self.x)\n # self.x = self.x + 1\n row = self.csv_data_table.rowCount()\n self.csv_data_table.insertRow(row)\n self.csv_data_table.setColumnCount(len(row_data))\n for column, stuff in enumerate(row_data):\n item = QTableWidgetItem(stuff)\n self.csv_data_table.setItem(row, column, item)\n\n self.csv_data_table.setHorizontalHeaderLabels(self.column_headers)\n\n # Set WordWrap to True to make the cells change height according to content\n # Currently set it to false as it looks very decent and makes cell size uniform throughout\n self.csv_data_table.setWordWrap(False)\n # Uncomment below line to stretch to fill the column width according to content\n # self.csv_data_table.resizeColumnsToContents()\n self.csv_data_table.resizeRowsToContents()\n\n # Update the bottom toolbar to reflect changes\n self.update_bottom_toolbar.emit()\n self.finished.emit()", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def csv_view(file, encoding, delimiter, quotechar, cols, i):\n if file is None:\n click.echo('File is required!')\n if file is not None:\n csv = CSVData(file, encoding, delimiter, quotechar)\n txt = csv.pretty_print()\n click.echo(txt)\n if i:\n menu(txt)", "def read_csv_file(self):\n pass", "def browse_1(self):\r\n file = QFileDialog()\r\n filter_name = \"Csv files (*.csv);;Text files (*.txt);;Xls files (*.xls);; Xlsx files (*.xlsx)\"\r\n file.setNameFilter(filter_name)\r\n if file.exec():\r\n filenames = file.selectedFiles()\r\n self.browseLine.setText(str(filenames[0]))", "def test_preview_csv(self):\n pass", "def openFile(self):\n self.filepath = askopenfilename(\n #(\"Text Files\", \"*.txt\"),\n filetypes=[(\"All Files\", \"*.*\")])\n sep = self.config[\"default\"][\"sep\"]\n sep = sep[1:-1]\n with open(self.filepath, \"r\") as input_file:\n df = pd.read_csv(input_file, sep=sep, engine= \"python\", skip_blank_lines=True)\n if not self.filepath:\n return \n self.model.dictdata.clear()\n self.model.storeRecords(df)\n self.view.showData()\n self.view.setTitle()", "def openData(self):\n\n\n path = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.getcwd(), 'CSV, XLSX(*.csv *.xlsx)')\n\n # If a file was specified, load it up. If not, tell the user to pick a valid file\n if path[0] != '':\n\n if os.path.exists(path[0]) and os.path.getsize(path[0]):\n\n filepath, filename = os.path.split(path[0])\n pandaData = procedures.load(filename, filepath)\n\n self.createTab(pandaData, name=filename)\n\n else:\n self.notifyUser(\"Please pick a valid file.\")", "def scrollTo(self, item):\n cidx = self._model.fileToIndex(item)\n self._view.scrollTo(cidx)", "def reload_csv(self):\n self.load_csv()\n self.tableView.insert_data(self.database)\n self.update()", "def get_contents(self, limit: int, offset: int = 0) -> \"RowSliceView\":\n contents = petl.fromcsv(self.download_path)\n return petl.rowslice(contents, offset, offset + limit)", "def user_csv_prompter(location):\r\n print('Fill in the details in the file:', location)\r\n\r\n # Opens .csv file in default editor.\r\n open_file(location)\r\n\r\n input(\"Press any key to continue.\")", "def _handleLoadFile(self) -> None:\n\n dialog: ChooseFileDialog = self._makeChooseFileDialog()\n result: DialogResult = dialog.show()\n if result == DialogResult.Ok:\n file: str = dialog.getSelectedFile()\n self._setWindowTitle(file)\n data: List[List[Any]] = csvReader.readFile(file)\n self.__spreadsheet.setData(data)", "def OpenCSVFile(self, filename:str):\n \n #if the given file is specified to have a header \"infer\" takes the first row of the dataframe and renames the columns of \n #the dataframe with the first row else no header will be choosen and the default header from the regular expressions from \n #dataframeAndHeaderHandler will be shown\n if self.opened_files_dict[filename][\"hasHeader\"]:\n header = \"infer\" \n else:\n header = None\n #slices the string filename to open the file\n if self.multiple_files_counter <= 1:\n endswith_slice = -2\n else:\n endswith_slice = floor(log(self.multiple_files_counter, 10)+2)\n endswith_slice *= -1\n if filename.endswith(\"_\", endswith_slice, -1):\n tmp_filename = filename[:endswith_slice:]\n else:\n tmp_filename = filename\n try:\n #converts the csv file with the specified parameters to a dataframe\n new_dataframe = pd.read_csv(tmp_filename,\n header = header,\n encoding=self.opened_files_dict[filename][\"Encoding\"],\n sep=self.opened_files_dict[filename][\"Delimiter\"],\n quotechar= self.opened_files_dict[filename][\"QuoteChar\"],\n skipinitialspace=self.opened_files_dict[filename][\"skipInitSpace\"],\n lineterminator=self.opened_files_dict[filename][\"lineTerminator\"],\n quoting=self.opened_files_dict[filename][\"Quoting\"])\n column_amount = len(new_dataframe.columns)\n \n #appends the new dataframe to main_dataframe\n self.main_dataframe = self.importer.ImportFile(new_dataframe, column_amount, self.opened_files_dict[filename][\"hasHeader\"])\n \n #Error Handling:\n #if the csv File cannot be opened, because it doesn't exist for example\n except OSError as e:\n self.opened_files_dict.pop(filename)\n raise OSError(e)\n #if the encoding is invalid it will be reset to the default value\n except (UnicodeDecodeError,LookupError):\n if self.multiple_files_counter <= 1:\n endswith_slice = -2\n else:\n endswith_slice = floor(log(self.multiple_files_counter, 10)+2)\n endswith_slice *= -1\n if filename.endswith(\"_\", endswith_slice, -1):\n tmp_filename = filename[:endswith_slice:]\n else:\n tmp_filename = filename\n enc = detect(Path(tmp_filename).read_bytes())\n self.opened_files_dict[filename][\"Encoding\"] = enc[\"encoding\"] \n self.update_dataframe()\n raise LookupError\n #if the types of the dataframes don't correspond or the csv file cannot be parsed to a dataframe\n except (ValueError,pd.errors.ParserError) as value_error:\n raise ValueError(value_error)\n \n return self.main_dataframe", "def display_csv_file(self):\n print(pd.read_csv(self.csv_file))", "def scroll(self, lines=1): # real signature unknown; restored from __doc__\n pass", "def browseforcsv(self, entry):\r\n filename = filedialog.askopenfilename(title='Select CSV')\r\n if filename != '': # Doesn't change if no file name entered\r\n entry.delete(0, tk.END)\r\n entry.insert(tk.END, filename)", "def _read_csv_with_offset_pyarrow_on_ray(\n fname, num_splits, start, end, kwargs, header\n ): # pragma: no cover\n bio = open(fname, \"rb\")\n # The header line for the CSV file\n first_line = bio.readline()\n bio.seek(start)\n to_read = header + first_line + bio.read(end - start)\n bio.close()\n table = csv.read_csv(\n BytesIO(to_read), parse_options=csv.ParseOptions(header_rows=1)\n )\n chunksize = get_default_chunksize(table.num_columns, num_splits)\n chunks = [\n pa.Table.from_arrays(table.columns[chunksize * i : chunksize * (i + 1)])\n for i in range(num_splits)\n ]\n return chunks + [\n table.num_rows,\n pandas.Series(\n [t.to_pandas_dtype() for t in table.schema.types],\n index=table.schema.names,\n ),\n ]", "def __openFile(self):\n itm = self.findList.selectedItems()[0]\n self.on_findList_itemDoubleClicked(itm, 0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialization function. Sets the model name and function, path to input data, and the output filename.
def __init__(self, sfs, model, popnames, output): self.sfs = self.load_sfs(sfs) self.modelname = model # Make an extrapolating version of the function self.modelfunc = dadi.Numerics.make_extrap_log_func( self.set_model_func(model)) self.params = self.set_parameters() self.popnames = popnames self.output = '_'.join(popnames + [output, model]) + '.txt' self.figout = '_'.join(popnames + [output, model]) + '_Comp.pdf' return
[ "def __init__(self, model_paths, raw_data, real_label, output_folder):\n self._model_paths = model_paths\n self.raw_data = raw_data\n self.real_label = real_label\n self.output_folder = output_folder", "def __init__(self, model_name=\"\", model=\"\", snapshot=\"\", module_names=[]):\n\n if snapshot:\n self.load_from_snapshot(snapshot)\n return\n\n if not model_name:\n raise AttributeError('A model file name or a snap shot must be supplied')\n self.model_name = model_name\n self.model = model\n if not model:\n self.model = gp.read(model_name)\n self.date_created = datetime.now()\n self.model_description = \"\"\n self.solve_count = 0\n self.optimal = False\n\n # Store an list of tuples of the functions, their args, and their kwargs\n # that have been applied to the model\n self.function_list = []\n\n self.snapshot = \"\"\n self.update_filename()\n self.module_names = module_names\n self.modules = {}\n if self.module_names:\n self.add_modules(self.module_names)", "def initialize(self) -> None:\n self.model = load(self.path)", "def initialize(self, runInfo, inputs, initDict=None) :\n # if 'externalFunction' in initDict.keys(): self.externalFunction = initDict['externalFunction']\n self.inputs = inputs\n self._workingDir = runInfo['WorkingDir']", "def setup(self):\n source_file = os.path.join(self.path.specfem_data, self.source_prefix)\n self._f0 = getpar(key=\"f0\", file=source_file)[1]\n\n par_file = os.path.join(self.path.specfem_data, \"Par_file\")\n if self.multiples:\n setpar(key=\"absorbtop\", val=\".false.\", file=par_file)\n else:\n setpar(key=\"absorbtop\", val=\".true.\", file=par_file)\n\n super().setup()\n\n # Copy in coordinate files to the Model definition so we can plot\n self._export_starting_models(parameters=[\"x\", \"z\"])", "def makeModel(datafn,niter=20,targetrms=1.0,nlayers=100,nlperdec=30,\r\n z1layer=50,bwidth=200,trigger=.75,savepath=None,rhostart=100,\r\n occampath=r\"c:\\Peacock\\PHD\\OCCAM\\MakeFiles\"):\r\n #get the base name of data file \r\n dfnb=os.path.basename(datafn)\r\n \r\n #put data file into the same directory as MakeModel2DMT\r\n if os.path.dirname(datafn)!=occampath:\r\n shutil.copy(datafn,os.path.join(occampath,dfnb))\r\n \r\n #write input file for MakeModel2DMT\r\n mmfid=open(os.path.join(occampath,'inputMakeModel.txt'),'w')\r\n mmfid.write(dfnb+'\\n')\r\n mmfid.write(str(niter)+'\\n') \r\n mmfid.write(str(targetrms)+'\\n') \r\n mmfid.write(str(nlayers)+'\\n')\r\n mmfid.write(str(nlperdec)+'\\n')\r\n mmfid.write(str(z1layer)+'\\n')\r\n mmfid.write(str(bwidth)+'\\n')\r\n mmfid.write(str(trigger)+'\\n')\r\n mmfid.write('\\n')\r\n mmfid.close()\r\n \r\n #get current working directory\r\n cdir=os.getcwd() \r\n \r\n #change directory path to occam path\r\n os.chdir(occampath) \r\n \r\n #---call MakeModel2DMT---\r\n subprocess.os.system(\"MakeModel2DMT < inputMakeModel.txt\")\r\n \r\n #change back to original working directory \r\n os.chdir(cdir)\r\n \r\n if savepath==None:\r\n savepath=os.path.dirname(datafn)\r\n \r\n if not os.path.exists(savepath):\r\n os.mkdir(savepath)\r\n \r\n meshfn=os.path.join(savepath,'MESH') \r\n inmodelfn=os.path.join(savepath,'INMODEL') \r\n startupfn=os.path.join(savepath,'startup') \r\n \r\n #copy ouput files to savepath\r\n shutil.copy(os.path.join(occampath,'MESH'),meshfn)\r\n shutil.copy(os.path.join(occampath,'INMODEL'),inmodelfn)\r\n shutil.copy(os.path.join(occampath,'startup'),startupfn)\r\n if not os.path.exists(os.path.join(savepath,dfnb)):\r\n shutil.copy(datafn,os.path.join(savepath,dfnb))\r\n if os.path.getctime(os.path.join(savepath,dfnb))<\\\r\n os.path.getctime(datafn):\r\n shutil.copy(datafn,os.path.join(savepath,dfnb))\r\n \r\n #rewrite mesh so it contains the right number of columns and rows\r\n rewriteMesh(meshfn)\r\n \r\n #write startup file to have the starting desired starting rho value\r\n ifid=open(startupfn,'r')\r\n ilines=ifid.readlines()\r\n ifid.close()\r\n \r\n if rhostart!=100:\r\n #make startup model a homogeneous half space of rhostart\r\n rhostart=np.log10(rhostart)\r\n ifid=open(startupfn,'w')\r\n for line in ilines:\r\n if line.find('2.000000')>=0:\r\n line=line.replace('2.000000','%.6f' % rhostart)\r\n ifid.write(line)\r\n ifid.close()\r\n \r\n print 'Be sure to check the INMODEL file for clumped numbers near the bottom.'\r\n print 'Also, check the MESH and startup files to make sure they are correct.'\r\n \r\n return meshfn,inmodelfn,startupfn", "def __init__(self,FILE_INPUT_CALL):\n self.FILE_INPUT= FILE_INPUT_CALL", "def __init__(self, model_path, logger):\n self.logger = logger\n self.model = pickle.load(open(model_path, 'rb'))\n self.logger.info(\"Starting: IrisPredictor\")", "def __init__( self, checkpoint ):\n\t\tself.model = _tf.keras.models.load_model(checkpoint)", "def compile(self):\n self.model_forward_run = self.train_model.init_model()", "def GenerateModel(modelData, outputFilePath, objectName = 'SBMLmodel'):\n #The library mathFuncs serves to both only allow functions supported\n #functions in SBML/user defined functions, but also the python equivalent\n \n np.set_printoptions(threshold=sys.maxsize)\n \n \n \n outputFile = open(outputFilePath, \"w\")\n\n parameters = modelData.parameters\n compartments = modelData.compartments\n species = modelData.species\n reactions = modelData.reactions\n functions = modelData.functions\n \n assignmentRules = modelData.assignmentRules\n rateRules = modelData.rateRules\n initialAssignments = modelData.initialAssignments\n \n mathFuncs = {'abs' : 'abs',\n 'max' : 'max',\n 'min' : 'min',\n 'pow' : 'pow',\n 'exp' : 'math.exp',\n 'floor' : 'np.floor',\n 'ceiling' : 'math.ceil',\n 'exp' : 'math.exp',\n 'ln' : 'math.log',\n 'log' : 'math.log10',\n 'factorial' : 'math.factorial',\n 'sqrt' : 'math.sqrt',\n \n 'eq' : 'operator.eq',\n 'neq' : 'operator.ne',\n 'gt' : 'operator.gt',\n 'lt' : 'operator.lt',\n 'geq' : 'operator.ge',\n 'leq' : 'operator.le',\n \n 'and' : 'operator.and_',\n 'or' : 'operator.or_',\n 'xor' : 'operator.xor_',\n 'not' : 'operator.not_',\n \n 'sin' : 'np.sin',\n 'cos' : 'np.cos',\n 'tan' : 'np.tan',\n 'sec' : '1/np.cos',\n 'csc' : '1/np.sin',\n 'cot' : '1/np.tan',\n 'sinh' : 'np.sinh',\n 'cosh' : 'np.cosh',\n 'tanh' : 'np.tanh',\n 'sech' : '1/np.cosh',\n 'csch' : '1/np.sinh',\n 'coth' : '1/np.tanh',\n 'arcsin' : 'np.arcsin',\n 'arccos' : 'np.arccos',\n 'arctan' : 'np.arctan',\n 'arcsinh' : 'np.arcsinh',\n 'arccosh' : 'np.arccosh',\n 'arctanh' : 'np.arctanh',\n \n 'true' : 'True',\n 'false' : 'False',\n 'notanumber' : 'np.nan',\n 'pi' : 'np.pi',\n 'infinity' : 'np.inf',\n 'exponentiale' : 'np.e',\n 'piecewise' : 'Piecewise'\n } \n #Add in user defined functions\n# for function in functions:\n# mathFuncs[function] = \"self.\" + function\n\t\t\n #Set up stoichCoeffMat, a matrix of stoichiometric coefficients for solving the reactions\n reactantCounter = 0\n reactantIndex = {}\n reactionCounter = 0\n reactionIndex = {}\n \n rateRuleVars = []\n rateParams = 0\n for specie in species:\n reactantIndex[specie] = reactantCounter\n reactantCounter += 1\n for key, rateRule in rateRules.items():\n if rateRule.variable in parameters or rateRule.variable in compartments:\n rateParams += 1\n reactantIndex[rateRule.variable] = reactantCounter\n reactantCounter += 1\n rateRuleVars.append(rateRule.variable)\n elif rateRule.variable in species:\n pass\n else:\n raise Exception(\"Rate Rule adjusting something other than specie amount, parameter value, or compartment size.\")\n\n \t\t\n stoichCoeffMat = np.zeros([len(species) + rateParams, max(len(reactions),1)])\n \n for rxnId in reactions:\n reactionIndex[rxnId] = reactionCounter\n reactionCounter += 1\n reaction = reactions[rxnId]\n for reactant in reaction.reactants:\n if reactant[1] not in reactantIndex:\n reactantIndex[reactant[1]] = reactantCounter\n reactantCounter += 1\n if not (species[reactant[1]].isBoundarySpecies == \"True\"):\n stoichCoeffMat[reactantIndex[reactant[1]], reactionIndex[rxnId]] += reactant[0]\n\n \t\n # for reaction in reactions:\n # for reactant in reactions[reaction][0]:\n # if reactant[1] not in reactantIndex:\n # reactantIndex[reactant[1]] = reactantCounter\n # reactantCounter += 1\n # if not species[reactant[1]][4]:\n # stoichCoeffMat[reactantIndex[reactant[1]], reaction-1] += reactant[0]\n #print(rateParams)\n #print(stoichCoeffMat)\n \n outputFile.write(\"from sbmltopyode.SBMLModelClasses import *\\n\")\n outputFile.write(\"from scipy.integrate import odeint\\n\")\n outputFile.write(\"import numpy as np\\n\")\n outputFile.write(\"import operator\\n\")\n outputFile.write(\"import math\\n\\n\")\n \n outputFile.write(\"class \" + objectName +\":\\n\\n\")\n \n outputFile.write(\"\\tdef __init__(self):\\n\\n\")\n outputFile.write(\"\\t\\tself.p = {} #Dictionary of model parameters\\n\")\n for paramId in parameters:\n outputFile.write(\"\\t\\tself.p[\\'\" + paramId + \"\\'] = Parameter(\" + str(parameters[paramId].value)+ \", \\'\"+ paramId + \"\\', \" + str(parameters[paramId].isConstant) +\")\\n\")\n \n outputFile.write(\"\\n\\t\\tself.c = {} #Dictionary of compartments\\n\")\n for compartmentId in compartments:\n outputFile.write(\"\\t\\tself.c[\\'\" + compartmentId + \"\\'] = Compartment(\" + str(compartments[compartmentId].size) + \", \" + str(compartments[compartmentId].dimensionality)+ \", \" + str(compartments[compartmentId].isConstant) + \")\\n\")\n \n outputFile.write(\"\\n\\t\\tself.s = {} #Dictionary of chemical species\\n\")\n for speciesId in species:\n outputFile.write(\"\\t\\tspeciesMetadata = SBMLMetadata('\" + species[speciesId].name +\"')\\n\")\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\'] = Species(\" + str(species[speciesId].value) + \", '\" + species[speciesId].valueType + \"', self.c['\" + species[speciesId].compartment + \"'], \" + str(species[speciesId].hasOnlySubstanceUnits) + \", constant = \" + str(species[speciesId].isConstant) + \")\\n\")\n for key, rule in assignmentRules.items():\n if rule.variable == speciesId:\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\']._modifiedBy = \" + rule.Id + \"\\n\")\n for key, rule in rateRules.items():\n if rule.variable == speciesId:\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\']._modifiedBy = \" + rule.Id + \"\\n\")\n \n \n outputFile.write(\"\\n\\t\\tself.r = {} #Dictionary of reactiions\\n\")\n for reactionId in reactions:\n outputFile.write(\"\\t\\tself.r[\\'\" + reactionId + \"\\'] = \" + reactionId + \"(self, SBMLMetadata('\" + reactions[reactionId].name + \"'))\\n\")\n \n outputFile.write(\"\\t\\tself.time = 0\\n\\n\")\n \n outputFile.write(\"\\t\\tself.reactionMetadata = {\")\n commaFlag = 0\n for reactionId in reactions:\n if commaFlag == 0:\n commaFlag = 1\n outputFile.write(\"\\n\\t\\t\")\n else:\n outputFile.write(\",\\n\\t\\t\")\n outputFile.write(\"self.Reaction\" + reactionId + \": SBMLMetadata('\" + reactions[reactionId].name + \"')\")\n outputFile.write(\"\\n\\t\\t}\\n\")\n \n outputFile.write('\\t\\tself.AssignmentRules()\\n\\n')\n \n outputFile.write(\"\\n\\n\")\n outputFile.write(\"\\tdef AssignmentRules(self):\\n\\n\")\n #These functions are defined here due to reading variables in the parent function's namespace\n #These are not intended to be used elsewhere\n def ParseLHS(rawLHS):\n returnLHS = ''\n if rawLHS in parameters:\n returnLHS = \"self.p[\\'\" + rawLHS + \"\\'].value = \"\n elif rawLHS in species:\n if not species[rawLHS].hasOnlySubstanceUnits: \n returnLHS = 'self.s[\\'' + rawLHS + '\\'].concentration = '\n else: \n returnLHS = 'self.s[\\'' + rawLHS + '\\'].amount = '\n elif rawLHS in compartments:\n returnLHS = 'self.c[\\'' + rawLHS + '\\'].size = '\n else:\n raise(Exception(\"New case: rule LHS not in p: \" + rawLHS))\n\n return returnLHS\n\t\n def ParseRHS(rawRHS, extendedParams = [], objectText = \"self\"):\n #objectText is not \"self\" when parsing reaction math\n \n #The main purpose of this function is to turn math strings given by libSBML into\n #code formated to properly call members of the resulting class\n #For example k_1*C_A may turn to\n \n \n rawRHS = rawRHS.replace(\"^\", \"**\") #Replaces carrot notation for exponentiation with ** operator\n variables = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', rawRHS): #look for variable names\n #ToDo: check for function calls\n variables.append([rawRHS[match.start():match.end()], match.span()])\n \n #rule[1] contains the right hand side\n returnRHS = ''\n oldSpan = None\n if variables != []:\n for variable in variables:\n if oldSpan == None and variable[1][0] != 0:\n returnRHS += rawRHS[0:variable[1][0]]\n elif oldSpan != None:\n returnRHS += rawRHS[oldSpan[1]:variable[1][0]]\n oldSpan = variable[1]\n if variable[0] in parameters:\n returnRHS += objectText + '.p[\\'' + variable[0] + '\\'].value'\n elif variable[0] in species:\n if not species[variable[0]].hasOnlySubstanceUnits == \"True\": \n returnRHS += objectText + '.s[\\'' + variable[0] + '\\'].concentration'\n else: \n returnRHS += objectText + '.s[\\'' + variable[0] + '\\'].amount'\n elif variable[0] in compartments:\n returnRHS += objectText + '.c[\\'' + variable[0] + '\\'].size'\n elif variable[0] in mathFuncs:\n returnRHS += mathFuncs[variable[0]]\n elif variable[0] in functions:\n returnRHS += objectText + '.' + variable[0]\n elif variable[0] in extendedParams:\n if objectText == \"self\":\n returnRHS += variable[0]\n else:\n returnRHS += \"self.p[\\'\" + variable[0] + \"\\'].value\"\n\n elif variable[0] == \"time\":\n returnRHS += objectText + '.time'\n elif variable[0] == \"pi\":\n returnRHS += \"np.pi\"\n else:\n raise(Exception('New case: unkown RHS variable: ' + variable[0]))\n returnRHS += rawRHS[variable[1][1]:len(rawRHS)]\n # print(rule[1][variable[1][1]])\n #print(rule[1][-1])\n else:\n returnRHS = rawRHS\n\t\t\n return returnRHS\n\n ruleDefinedVars = [rule.variable for rule in assignmentRules.values()]\n for key, assignment in initialAssignments.items():\n ruleDefinedVars.append(assignment.variable)\n \n for key, rule in assignmentRules.items():\n rule.dependents = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', rule.math): #look for variable names\n rule.dependents.append(rule.math[match.start():match.end()])\n originalLen = len(rule.dependents)\n for i in range(originalLen):\n if rule.dependents[originalLen - i -1] not in ruleDefinedVars:\n rule.dependents.pop(originalLen- i-1)\n \n for key, assignment in initialAssignments.items():\n assignment.dependents = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', assignment.math): #look for variable names\n assignment.dependents.append(assignment.math[match.start():match.end()])\n originalLen = len(assignment.dependents)\n for i in range(originalLen):\n if assignment.dependents[originalLen - i -1] not in ruleDefinedVars :\n assignment.dependents.pop(originalLen- i-1)\n \n# breakVar = False\n while True:\n continueVar = False\n breakVar = True\n varDefinedThisLoop = None\n for key, rule in assignmentRules.items():\n if rule.dependents == []:\n ruleLHS = ParseLHS(rule.variable)\n ruleRHS = ParseRHS(rule.math)\n outputFile.write(\"\\t\\t\" + ruleLHS + ruleRHS + '\\n\\n')\n varDefinedThisLoop = rule.variable\n rule.dependents = None\n continueVar = True\n breakVar = False\n break\n elif not rule.dependents == None:\n breakVar = False\n \n if not continueVar:\n for key, assignment in initialAssignments.items():\n if assignment.dependents == []:\n assignmentLHS = ParseLHS(assignment.variable)\n assignmentRHS = ParseRHS(assignment.math)\n outputFile.write(\"\\t\\tif self.time <= 0 :\\n\")\n if assignment.variable in parameters:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.p['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.p['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.p['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n elif assignment.variable in species:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.s['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.s['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.s['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n elif assignment.variable in compartment:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.c['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.c['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.c['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n \n varDefinedThisLoop = assignment.variable\n assignment.dependents = None\n continueVar = True\n breakVar = False\n break\n elif not rule.dependents == None:\n breakVar = False\n \n for rule in assignmentRules.values():\n if not rule.dependents == None:\n originalLen = len(rule.dependents)\n for i in range(originalLen):\n if rule.dependents[originalLen - i -1] == varDefinedThisLoop:\n rule.dependents.pop(originalLen - i -1)\n# print(rule.variable + ':' + str(rule.dependents))\n\n for assignment in initialAssignments.values():\n if not assignment.dependents == None:\n originalLen = len(assignment.dependents)\n for i in range(originalLen):\n if assignment.dependents[originalLen - i - 1] == varDefinedThisLoop:\n assignment.dependents.pop(originalLen - i - 1)\n# print(assignment.variable + ':' + str(assignment.dependents))\n \n if continueVar:\n continue\n elif breakVar:\n break\n else:\n raise Exception('Algebraic Loop in AssignmentRules')\n \n outputFile.write(\"\\t\\treturn\\n\\n\")\n \n for functionId in functions:\n arguments = functions[functionId].arguments\n argumentString = \"\"\n for i in range(len(arguments)):\n argumentString += arguments[i]\n if i != len(arguments) - 1:\n argumentString += \", \"\n \n outputFile.write(\"\\tdef \" + functionId + \"(self, \" + argumentString + \"):\\n\")\n outputFile.write(\"\\t\\treturn \" + functions[functionId].mathString.replace(\"^\", \"**\") + \"\\n\")\n \n for reactionId in reactions:\n outputFile.write(\"\\tdef Reaction\" + str(reactionId) + \"(self):\\n\\n\")\n\n rxnParameters = []\n for param in reactions[reactionId].rxnParameters:\n outputFile.write(\"\\t\\t\" + param[0] + \" = \" + str(param[1]) + \"\\n\")\n rxnParameters.append(param[0])\n\t\t\t\n rateLaw = ParseRHS(reactions[reactionId].rateLaw, rxnParameters)\n \n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n\n rateRuleLHSVars = []\n for key, rateRule in rateRules.items():\n rateRuleLHSVars.append(rateRule.variable)\n outputFile.write(\"\\tdef Rate\" + rateRule.variable + \"(self):\\n\\n\")\n rateLaw = ParseRHS(rateRule.math)\n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n \n yArray = ''\n i = 0\n yArrayVars = [0 for x in range(len(species) + rateParams)]\n for variable, index in reactantIndex.items():\n yArrayVars[index] = variable\n \n for index in range(len(yArrayVars)):\n # print(yArrayVars[index])\n if index != 0:\n yArray += ', '\n \n if yArrayVars[index] in species:\n yArray += 'self.s[\\'' + yArrayVars[index] + '\\'].amount'\n continue\n \n if yArrayVars[index] in parameters:\n yArray += 'self.p[\\'' + yArrayVars[index] + '\\'].value'\n continue\n \n if yArrayVars[index] in compartments:\n yArray += 'self.c\\'' + yArrayVars[index] + '\\'].size'\n continue\n \n\n \n outputFile.write('\\tdef _SolveReactions(self, y, t):\\n\\n')\n outputFile.write('\\t\\tself.time = t\\n')\n outputFile.write('\\t\\t' + yArray + ' = y\\n')\n outputFile.write('\\t\\tself.AssignmentRules()\\n\\n')\n# outputFile.write('\\t\\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\\n')\n rateArray = '[ '\n i = 0\n rateArrayVars = [0 for x in range(len(species) + rateParams)]\n \n for variable, index in reactantIndex.items():\n if variable in rateRuleLHSVars:\n rateArrayVars[index] = variable\n \n\n \n for variable in rateArrayVars:\n if i != 0:\n rateArray += ', '\n i += 1\n if variable == 0:\n rateArray += '0'\n else:\n rateArray += 'self.Rate' + variable + '()'\n \n \n \n \n rateArray += ']'\n outputFile.write('\\t\\trateRuleVector = np.array(' + str(rateArray) + ', dtype = np.float64)\\n\\n') \n \n outputFile.write('\\t\\tstoichiometricMatrix = np.array(' + re.sub('\\n,', ',\\n\\t\\t\\t\\t\\t', re.sub('[^[] +', ',' ,str(stoichCoeffMat))) + ', dtype = np.float64)\\n\\n')\n outputFile.write('\\t\\treactionVelocities = np.array([')\n reactionElements = ''\n if reactions:\n for reactionId in reactions:\n if reactionElements == '':\n reactionElements += ('self.r[\\'' + str(reactionId) + '\\']()')\n else:\n reactionElements += (', self.r[\\'' + str(reactionId) + '\\']()')\n else:\n reactionElements = '0'\n outputFile.write(reactionElements + '], dtype = np.float64)\\n\\n')\n outputFile.write('\\t\\trateOfSpeciesChange = stoichiometricMatrix @ reactionVelocities + rateRuleVector\\n\\n')\n outputFile.write('\\t\\treturn rateOfSpeciesChange\\n\\n')\n \n outputFile.write('\\tdef RunSimulation(self, deltaT, absoluteTolerance = 1e-12, relativeTolerance = 1e-6):\\n\\n')\n \n outputFile.write('\\t\\tfinalTime = self.time + deltaT\\n')\n outputFile.write('\\t\\ty0 = np.array([' + yArray + '], dtype = np.float64)\\n')\n outputFile.write('\\t\\t' + yArray + ' = odeint(self._SolveReactions, y0, [self.time, finalTime], atol = absoluteTolerance, rtol = relativeTolerance, mxstep=5000000)[-1]\\n')\n outputFile.write('\\t\\tself.time = finalTime\\n')\n outputFile.write('\\t\\tself.AssignmentRules()\\n')\n# outputFile.write('\\t\\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\\n')\n outputFile.write('\\n')\n \n for key in reactions.keys():\n outputFile.write('class ' + key + ':\\n\\n')\n outputFile.write('\\tdef __init__(self, parent, metadata = None):\\n\\n')\n outputFile.write('\\t\\tself.parent = parent\\n')\n outputFile.write('\\t\\tself.p = {}\\n')\n outputFile.write('\\t\\tself.metadata = metadata\\n\\n')\n for param in reactions[key].rxnParameters:\n outputFile.write(\"\\t\\tself.p[\\'\" + param[0] + \"\\'] = Parameter(\" + str(param[1]) + \", '\" + param[0] + \"')\\n\")\n #\"\\t\\tself.p[\\'\" + paramId + \"\\'] = Parameter(\" + str(parameters[paramId].value)+ \", \"+ paramId + \", \" + str(parameters[paramId].isConstant) +\")\\n\"\n \n outputFile.write('\\n\\tdef __call__(self):\\n')\n# print(key)\n# print(reactions[key].rxnParameters)\n rxnParamNames = [param[0] for param in reactions[key].rxnParameters]\n rateLaw = ParseRHS(reactions[key].rateLaw, rxnParamNames, \"self.parent\")\n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n\n \n for key in functions.keys():\n outputFile.write('class ' + key + ':\\n\\n')\n outputFile.write('\\tdef __init__(self, parent, metadata = None):\\n\\n')\n outputFile.write('\\t\\tself.parent = parent\\n')\n outputFile.write('\\t\\tself.metadata = metadata\\n\\n')\n\n arguments = functions[key].arguments\n argumentString = \"\"\n for i in range(len(arguments)):\n argumentString += arguments[i]\n if i != len(arguments) - 1:\n argumentString += \", \"\n \n outputFile.write('\\tdef __call__(self, ' + argumentString + '):\\n')\n outputFile.write(\"\\t\\treturn \" + functions[key].mathString.replace(\"^\", \"**\") + \"\\n\\n\")\n\n outputFile.close()", "def __init__(self, mesh, out_dir='./results/', use_periodic=False):\n s = \"::: INITIALIZING 2D MODEL :::\"\n print_text(s, cls=self)\n \n Model.__init__(self, mesh, out_dir, use_periodic)", "def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return", "def WriteSourceFileForSrnModel(filename, model):\n srn_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n source_file = open(srn_model_name + \".cpp\", 'w')\n\n #Include header files\n included_files = GetIncludedFilesForSourceString(filename, model)\n source_file.write(included_files)\n\n #Initialise class\n class_def = GetClassDefinition(filename, model)\n source_file.write(class_def)\n\n #Constructor for system\n constructor = GetClassConstructor(filename)\n source_file.write(constructor)\n\n #Functiond efinitions\n funct_defn_str = GetFunctionDefinitionsForSource(filename, model)\n source_file.write(funct_defn_str)\n\n #Initialise parameters\n init_fn = GetInitForSource(filename, model)\n source_file.write(init_fn)\n\n #Get the derivative function\n derivs_fn = GetEvaluateYDerivativesVoidString(filename, model)\n source_file.write(derivs_fn)\n\n #Initialise function\n initialise_fn = GetInitialiseString(filename, model)\n source_file.write(initialise_fn)\n\n #Define SRN Model\n srn_model_defn = GetModelDefinitionString(filename, model, False)\n source_file.write(srn_model_defn)\n\n source_file.close()\n\n print(srn_model_name + \".cpp written!\\n\")", "def __init__(\n self,\n data_columns: Sequence[str] = None,\n bijector: Tuple[InitFunction, Bijector_Info] = None,\n latent: distributions.LatentDist = None,\n conditional_columns: Sequence[str] = None,\n data_error_model: Callable = None,\n condition_error_model: Callable = None,\n autoscale_conditions: bool = True,\n seed: int = 0,\n info: Any = None,\n file: str = None,\n _dictionary: dict = None,\n ) -> None:\n\n # validate parameters\n if data_columns is None and file is None and _dictionary is None:\n raise ValueError(\"You must provide data_columns OR file.\")\n if any(\n (\n data_columns is not None,\n bijector is not None,\n conditional_columns is not None,\n latent is not None,\n data_error_model is not None,\n condition_error_model is not None,\n info is not None,\n )\n ):\n if file is not None:\n raise ValueError(\n \"If providing a file, please do not provide any other parameters.\"\n )\n if _dictionary is not None:\n raise ValueError(\n \"If providing a dictionary, please do not provide any other parameters.\"\n )\n if file is not None and _dictionary is not None:\n raise ValueError(\"Only provide file or _dictionary, not both.\")\n\n # if file or dictionary is provided, load everything from it\n if file is not None or _dictionary is not None:\n save_dict = self._save_dict()\n if file is not None:\n with open(file, \"rb\") as handle:\n save_dict.update(pickle.load(handle))\n else:\n save_dict.update(_dictionary)\n\n if save_dict[\"class\"] != self.__class__.__name__:\n raise TypeError(\n f\"This save file isn't a {self.__class__.__name__}. \"\n f\"It is a {save_dict['class']}\"\n )\n\n # load columns and dimensions\n self.data_columns = save_dict[\"data_columns\"]\n self.conditional_columns = save_dict[\"conditional_columns\"]\n self._input_dim = len(self.data_columns)\n self.info = save_dict[\"info\"]\n\n # load the latent distribution\n self._latent_info = save_dict[\"latent_info\"]\n self.latent = getattr(distributions, self._latent_info[0])(\n *self._latent_info[1]\n )\n\n # load the error models\n self.data_error_model = save_dict[\"data_error_model\"]\n self.condition_error_model = save_dict[\"condition_error_model\"]\n\n # load the bijector\n self._bijector_info = save_dict[\"bijector_info\"]\n if self._bijector_info is not None:\n init_fun, _ = build_bijector_from_info(self._bijector_info)\n _, self._forward, self._inverse = init_fun(\n random.PRNGKey(0), self._input_dim\n )\n self._params = save_dict[\"params\"]\n\n # load the conditional means and stds\n self._condition_means = save_dict[\"condition_means\"]\n self._condition_stds = save_dict[\"condition_stds\"]\n\n # set whether or not to automatically standard scale any\n # conditions passed to the normalizing flow\n self._autoscale_conditions = save_dict[\"autoscale_conditions\"]\n\n # if no file is provided, use provided parameters\n else:\n self.data_columns = tuple(data_columns)\n self._input_dim = len(self.data_columns)\n self.info = info\n\n if conditional_columns is None:\n self.conditional_columns = None\n self._condition_means = None\n self._condition_stds = None\n else:\n self.conditional_columns = tuple(conditional_columns)\n self._condition_means = jnp.zeros(\n len(self.conditional_columns)\n )\n self._condition_stds = jnp.ones(len(self.conditional_columns))\n\n # set whether or not to automatically standard scale any\n # conditions passed to the normalizing flow\n self._autoscale_conditions = autoscale_conditions\n\n # set up the latent distribution\n if latent is None:\n self.latent = distributions.Uniform(self._input_dim, 5)\n else:\n self.latent = latent\n self._latent_info = self.latent.info\n\n # make sure the latent distribution and data_columns have the\n # same number of dimensions\n if self.latent.input_dim != len(data_columns):\n raise ValueError(\n f\"The latent distribution has {self.latent.input_dim} \"\n f\"dimensions, but data_columns has {len(data_columns)} \"\n \"dimensions. They must match!\"\n )\n\n # set up the error models\n if data_error_model is None:\n self.data_error_model = gaussian_error_model\n else:\n self.data_error_model = data_error_model\n if condition_error_model is None:\n self.condition_error_model = gaussian_error_model\n else:\n self.condition_error_model = condition_error_model\n\n # set up the bijector\n if bijector is not None:\n self.set_bijector(bijector, seed=seed)\n # if no bijector was provided, set bijector_info to None\n else:\n self._bijector_info = None", "def load_model(self, filename):\r\n pass", "def __init__(self, path_model):\n self._interpreter = Interpreter.load(path_model)", "def _init_param(self):\n self.activation_func = self.activation_functions[self.activation]\n self.derivative_func = self.derivative_functions[self.activation]\n\n # output for regression\n if self.classes_ is None:\n self.output_func = _identity\n # output for multi class\n elif len(self.classes_) > 2 and self.multi_label is False:\n self.output_func = _softmax\n # output for binary class and multi-label\n else:\n self.output_func = logistic_sigmoid", "def makeModelFiles(self,niter=20,targetrms=1.0,nlayers=100,nlperdec=30,\n z1layer=50,bwidth=200,trigger=.75,savepath=None,rhostart=100,\n occampath=r\"c:\\Peacock\\PHD\\OCCAM\\MakeFiles\"):\n #get the base name of data file \n dfnb=os.path.basename(self.datafn)\n \n #put data file into the same directory as MakeModel2DMT\n if os.path.dirname(self.datafn)!=occampath:\n shutil.copy(self.datafn,os.path.join(occampath,dfnb))\n \n #write input file for MakeModel2DMT\n mmfid=open(os.path.join(occampath,'inputMakeModel.txt'),'w')\n mmfid.write(dfnb+'\\n')\n mmfid.write(str(niter)+'\\n') \n mmfid.write(str(targetrms)+'\\n') \n mmfid.write(str(nlayers)+'\\n')\n mmfid.write(str(nlperdec)+'\\n')\n mmfid.write(str(z1layer)+'\\n')\n mmfid.write(str(bwidth)+'\\n')\n mmfid.write(str(trigger)+'\\n')\n mmfid.write('\\n')\n mmfid.close()\n \n #get current working directory\n cdir=os.getcwd() \n \n #change directory path to occam path\n os.chdir(occampath) \n \n #---call MakeModel2DMT---\n subprocess.os.system(\"MakeModel2DMT < inputMakeModel.txt\")\n \n #change back to original working directory \n os.chdir(cdir)\n \n if savepath==None:\n savepath=os.path.dirname(self.datafn)\n \n if not os.path.exists(savepath):\n os.mkdir(savepath)\n \n meshfn=os.path.join(savepath,'MESH') \n inmodelfn=os.path.join(savepath,'INMODEL') \n startupfn=os.path.join(savepath,'startup') \n \n #copy ouput files to savepath\n shutil.copy(os.path.join(occampath,'MESH'),meshfn)\n shutil.copy(os.path.join(occampath,'INMODEL'),inmodelfn)\n shutil.copy(os.path.join(occampath,'startup'),startupfn)\n shutil.copy(os.path.join(occampath,'inputMakeModel.txt'),\n os.path.join(savepath,'inputMakeModel.txt'))\n if not os.path.exists(os.path.join(savepath,dfnb)):\n shutil.copy(self.datafn,os.path.join(savepath,dfnb))\n if os.path.getctime(os.path.join(savepath,dfnb))<\\\n os.path.getctime(self.datafn):\n shutil.copy(self.datafn,os.path.join(savepath,dfnb))\n \n \n# #rewrite mesh so it contains the right number of columns and rows\n# rewriteMesh(meshfn)\n \n #write startup file to have the starting desired starting rho value\n ifid=open(startupfn,'r')\n ilines=ifid.readlines()\n ifid.close()\n \n if rhostart!=100:\n #make startup model a homogeneous half space of rhostart\n rhostart=np.log10(rhostart)\n ifid=open(startupfn,'w')\n for line in ilines:\n if line.find('2.000000')>=0:\n line=line.replace('2.000000','%.6f' % rhostart)\n ifid.write(line)\n ifid.close()\n \n print('Be sure to check the INMODEL file for clumped numbers near the bottom.')\n print('Also, check the MESH and startup files to make sure they are correct.')\n \n self.meshfn=meshfn\n self.inmodelfn=inmodelfn\n self.startupfn=startupfn" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the dadi SFS file and return it as a Spectrum object. Dadi will do basic checking of the spectrum, but we will be more thorough.
def load_sfs(self, sfs): try: fs = dadi.Spectrum.from_file(sfs) except: print 'The spectrum file you provided is not valid!' exit(1) return fs
[ "def stis_spectrum_loader(file_obj, **kwargs):\n\n with read_fileobj_or_hdulist(file_obj, **kwargs) as hdulist:\n header = hdulist[0].header\n meta = {'header': header}\n\n unit = Unit(\"erg/cm**2 Angstrom s\")\n disp_unit = Unit('Angstrom')\n data = hdulist[1].data['FLUX'].flatten() * unit\n dispersion = hdulist[1].data['wavelength'].flatten() * disp_unit\n uncertainty = StdDevUncertainty(hdulist[1].data[\"ERROR\"].flatten() * unit)\n\n sort_idx = dispersion.argsort()\n dispersion = dispersion[sort_idx]\n data = data[sort_idx]\n uncertainty = uncertainty[sort_idx]\n\n return Spectrum1D(flux=data,\n spectral_axis=dispersion,\n uncertainty=uncertainty,\n meta=meta)", "def load_spectrum(inputfile):\n if inputfile.endswith(\"fits\"):\n wav, flux = spectrum_sdss_fits(inputfile)\n imodel = False\n inu = False\n\n else:\n f = open(inputfile, \"r\")\n # Read header\n try:\n nn = int(f.tell())\n f.readline()\n except BaseException:\n pass\n\n # Read first line\n f.readline()\n # Check format of second line\n test = f.readline()\n f.seek(0) # rewind to begining\n\n # Read data\n if (len(test.split()) == 10) or (len(test.split()) == 6): # test62\n wav, flux = spectrum_test62(f)\n imodel = True\n inu = True\n\n elif len(test.split(\",\")) == 2 or len(test.split(\",\")) == 4: # csv\n wav, flux = spectrum_csv(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 2: # tsv\n wav, flux = spectrum_tsv(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 3: # tsv with uncertainties\n wav, flux = spectrum_tsv3(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 5 or len(test.split()) == 7: # mics format\n wav, flux = spectrum_misc(f)\n imodel = False\n inu = False\n\n else:\n\n raise ValueError(f\"Unknown format for {inputfile}.\")\n\n f.close()\n\n return Spectrum(wav, flux, (imodel, inu))", "def read_spectrum(filename):\n with fits.open(filename) as hdulist:\n primary_hdu = hdulist[0]\n identifier = primary_hdu.header['OBJECT']\n wave = compute_wave(hdulist)\n flux = primary_hdu.data\n return identifier, wave, flux", "def loadsdss(hdu):\n farr=hdu[0].data[0]\n xarr=np.arange(len(farr))\n warr=10**(hdu[0].header['CRVAL1']+hdu[0].header['CD1_1']*(xarr+1))\n return create_spectrum(warr, farr)", "def spectrum_sdss_fits(f):\n\n hdul = fits.open(f)\n\n if \"SDSS\" in hdul[0].header[\"TELESCOP\"]:\n # .fits from SDSS\n data = hdul[1].data\n\n # log10(wav) in the .fits\n wav = 10.0 ** data.field(1) # Angstrom\n\n # flux F_lambda in units of de 1e-17 erg/...\n flux = data.field(0) * 1e-17 # erg/cm^2/s/Ang\n\n # c_ang = speed of light in angstrom / s\n # flux *= wav**2/sc.c_ang # erg/cm^2/s/Hz\n\n hdul.close()\n return wav, flux\n\n else:\n raise Exception(\"Unknown .fits format.\")", "def read(cls,infile):\n data,head = fits.getdata(infile,header=True)\n return Spectrum(data[:,0],data[:,1])", "def read_spectrum(specfile):\n hdu = pyfits.open(specfile)\n w = [a[0] for a in hdu[0].data]\n f = [a[1] for a in hdu[0].data]\n if 'cassis' in specfile.name:\n ef = [a[2] for a in hdu[0].data]\n colS = 'b'\n elif 'sws' in specfile.name:\n ef = [a[3] for a in hdu[0].data]\n colS = 'g'\n \n f2, ef2 = [], []\n for i in range(0, len(f)):\n f2.append(JyToLamFlam(f[i],ef[i],w[i]*1e-6)[0])\n ef2.append(JyToLamFlam(f[i],ef[i],w[i]*1e-6)[1])\n \n wvlen = [a[0] for a in sorted(zip(w,f2))]\n flux = [a[1] for a in sorted(zip(w,f2))]\n eflux = [a[1] for a in sorted(zip(w,ef2))]\n \n return wvlen,flux,eflux,colS", "def spectrum_inst():\n spectrum_file = file_ref(\"binary.vot\")\n return analyzer.Spectrum.read_spectrum(spectrum_file)", "def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set", "def readSpec(filename):\n header = parseSigprocHeader(filename)\n hdrlen = header[\"hdrlen\"]\n f = File(filename,\"r\",nbits=32)\n f.seek(hdrlen)\n data = np.fromfile(f,dtype=\"complex32\")\n return FourierSeries(data,header)", "def spectre_sdss_fits(f):\n hdul = fits.open(f)\n \n if 'SDSS' in hdul[0].header['TELESCOP']:\n # .fits from SDSS\n data = hdul[1].data\n \n # log10(wav) dans les .fits\n wav = 10.**data.field(1) # Angstrom\n \n # flux F_lambda en unités de 1e-17 erg/...\n flux = data.field(0)*1e-17 # erg/cm^2/s/Ang\n \n # c_ang = vitesse de la lumière en angstrom / s\n # flux *= wav**2/sc.c_ang # erg/cm^2/s/Hz\n \n hdul.close()\n return wav, flux\n \n else:\n raise Exception('.fits format inconnu')", "def build_spectrum(spectrum_filename):\n hdulist = fits.open(spectrum_filename)\n data = hdulist[1].data\n \n spec = Spectrum(data['wave'], data['flux'], data['error'])\n \n return spec", "def deimos_spectrum2D_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='2D Spectrum')\n hdulist[1].header['CTYPE2'] = 'Spatial Y'\n wcs = WCS(hdulist[1].header)\n # original WCS has both axes named \"LAMBDA\", glue requires unique component names\n\n data.coords = coordinates_from_wcs(wcs)\n data.header = hdulist[1].header\n data.add_component(hdulist[1].data['FLUX'][0], 'Flux')\n data.add_component(1/np.sqrt(hdulist[1].data['IVAR'][0]), 'Uncertainty')\n return data", "def parseDigitalSValRecord(self, f):\n try:\n # gives an np.void with named fields:\n r = self.digitalsvalrecords[self.ndigitalsvalrecords]\n except IndexError:\n newsize = len(self.digitalsvalrecords) + DEFNDIGITALSVALRECORDS\n self.digitalsvalrecords.resize(newsize, refcheck=False)\n # gives an np.void with named fields:\n r = self.digitalsvalrecords[self.ndigitalsvalrecords]\n #junk, junk, r['TimeStamp'], r['SVal'], junk, junk = unpack('<hiqhhi', f.read(22))\n #junk, r['TimeStamp'], r['SVal'], junk, junk = unpack('qqhhi', f.read(24))\n junk, r['TimeStamp'], r['SVal'], junk, junk = unpackdsvalrec(f.read(24))\n self.ndigitalsvalrecords += 1", "def parse_spectrum(path):\n\n with open(path, \"r\") as fp:\n contents = fp.read().split(\"\\n\")[3:-3]\n\n return np.array([point.split() for point in contents], dtype=float)", "def parse_linetools_spectrum_format(hdulist, **kwargs):\n if 'WAVELENGTH' not in hdulist:\n pdb.set_trace()\n xspec1d = XSpectrum1D.from_spec1d(spec1d)\n else:\n wave = hdulist['WAVELENGTH'].data * u.AA\n fx = hdulist['FLUX'].data\n\n # Error array\n if 'ERROR' in hdulist:\n sig = hdulist['ERROR'].data\n else:\n sig = None\n\n if 'CONTINUUM' in hdulist:\n co = hdulist['CONTINUUM'].data\n else:\n co = None\n\n xspec1d = XSpectrum1D.from_tuple((wave, fx, sig, co), **kwargs)\n\n if 'METADATA' in hdulist[0].header:\n # Prepare for JSON (bug fix of sorts)\n metas = hdulist[0].header['METADATA']\n ipos = metas.rfind('}')\n try:\n xspec1d.meta.update(json.loads(metas[:ipos+1]))\n except:\n # TODO: fix this in a better manner, if possible\n print(\"Bad METADATA; proceeding without\")\n\n return xspec1d", "def openObservedSpectrumFC(self):\n\t\tself.wavelength,self.fluxl,self.fluxlErr = n.loadtxt(self.path_to_spectrum , unpack=True )", "def nirspec_spectrum2d_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='2D Spectrum')\n data.header = hdulist['DATA'].header\n data.coords = coordinates_from_header(hdulist[1].header)\n data.add_component(hdulist['DATA'].data, 'Flux')\n data.add_component(np.sqrt(hdulist['VAR'].data), 'Uncertainty')\n\n return data", "def _read(self, spec_file: IO[AnyStr], filename: str) -> List[Spectrum]:\n raise NotImplementedError(SpectrumReader._read.__qualname__)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a model name, set the function that has to be called to run that model. This should be safe because we restrict the user input for the models at the argument parsing stage.
def set_model_func(self, model): if model == 'SI': import cavefish_dadi.Models.si return cavefish_dadi.Models.si.si elif model == 'SC': import cavefish_dadi.Models.sc return cavefish_dadi.Models.sc.sc elif model == 'IM': import cavefish_dadi.Models.im return cavefish_dadi.Models.im.im elif model == 'AM': import cavefish_dadi.Models.am return cavefish_dadi.Models.am.am elif model == 'SC2M': import cavefish_dadi.Models.sc2m return cavefish_dadi.Models.sc2m.sc2m elif model == 'IM2M': import cavefish_dadi.Models.im2m return cavefish_dadi.Models.im2m.im2m elif model == 'AM2M': import cavefish_dadi.Models.am2m return cavefish_dadi.Models.am2m.am2m else: return None
[ "def get_model(model: str) -> Any:\n try:\n model_function = eval(model)\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted models from {tf}, {sm}, {tfa}, {tfc}')\n return model_function", "def get_function(model_or_function, preprocess_function=None):\n from dianna.utils.onnx_runner import SimpleModelRunner # pylint: disable=import-outside-toplevel\n\n if isinstance(model_or_function, Path):\n model_or_function = str(model_or_function)\n\n if isinstance(model_or_function, (str, bytes, Path)):\n runner = SimpleModelRunner(model_or_function,\n preprocess_function=preprocess_function)\n elif callable(model_or_function):\n if preprocess_function is None:\n runner = model_or_function\n else:\n\n def runner(input_data):\n return model_or_function(preprocess_function(input_data))\n else:\n raise TypeError(\n 'model_or_function argument must be string (path to model), '\n 'bytes (serialized onnx model), or function')\n return runner", "def addModel(self, model, modelName):\n self.modelFunction = model\n self.modelName = modelName", "async def gpt2_set_model(self, ctx, *, arg=None):\n print('Command gpt2_set_model triggered')\n if arg:\n if arg in VALID_DEFAULT_MODELS:\n self.update_config(model_name=arg)\n else:\n await ctx.send(f\"ERROR: Invalid model name {arg}\")\n else:\n await ctx.send(\"ERROR: Argument required\")", "def model_entrypoint(model_name: str, module_filter: Optional[str] = None) -> Callable[..., Any]:\n arch_name = get_arch_name(model_name)\n if module_filter and arch_name not in _module_to_models.get(module_filter, {}):\n raise RuntimeError(f'Model ({model_name} not found in module {module_filter}.')\n return _model_entrypoints[arch_name]", "def get_model_function(name: str):\n if name not in REGISTRY:\n names = \", \".join(sorted(REGISTRY.keys()))\n raise KeyError(f\"Model {name} not found in registry. Available names: {names}\")\n return REGISTRY[name]", "def set_model_name(self, model_name: str = \"355M\") -> None:\n self.model_name = model_name", "def test_model_processor():\n global model_processor_called\n\n model_str = 'first 34 45 7 A 45 65 B true C \"dfdf\"'\n\n metamodel = metamodel_from_str(grammar)\n metamodel.register_model_processor(model_processor)\n\n metamodel.model_from_str(model_str)\n\n assert model_processor_called", "def set_model(*, name: str, model: typing.Type) -> None:\n setattr(open_alchemy.models, name, model)", "def get_model(name, *args, **kwargs):\n assert name in models.keys(), \\\n \"Unknown model name \" + name\n return models[name](*args, **kwargs)", "def model_name(self, model_name: str):\n\n self._model_name = model_name", "def set_models_eval(self):\n raise NotImplementedError", "def select_model(model_name: str):\r\n global predictor, currently_selected_model\r\n predictor = FeatureExtractor(model_name)\r\n currently_selected_model = model_name", "def get_option_setter(model_name):\n model_class = find_model_using_name(model_name)\n return model_class.modify_commandline_options", "def main():\n print('1. Linear Model')\n print('2. K-NN')\n\n choice = input(\"Enter your choice::\")\n\n if choice=='1':\n Run_linear()\n else:\n Run_knn()", "def _call_adanet_model_fn(self, input_fn, mode, params):\n\n with tf.Graph().as_default():\n tf.set_random_seed(self.config.tf_random_seed)\n # Create global step before calling model_fn as does superclass.\n tf.train.get_or_create_global_step()\n features, labels = input_fn()\n self._adanet_model_fn(features, labels, mode, params)", "def model_fn(self, features, labels, mode, params):\n raise NotImplementedError", "def interprocess_function(name):\n interproc_funcs = set([\"peak_fitting\", \"transform\"])\n assert name in interproc_funcs\n if name == \"peak_fitting\":\n return peak_fitting \n elif name == \"transform\":\n return transform", "def set_model_name(self, name):\n self.model_name = name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Summarize the replicate runs and convert the parameters estimates into meaningful numbers.
def summarize(self, locuslen): # First, calculate the mean of the parameter estimates from each # of the replicates hot_means = [] for r_t in zip(*self.hot_params): v = [x for x in r_t if not math.isnan(x)] hot_means.append(sum(v)/len(v)) cold_means = [] for r_t in zip(*self.cold_params): v = [x for x in r_t if not math.isnan(x)] cold_means.append(sum(v)/len(v)) bfgs_means = [] for r_t in zip(*self.opt_params): v = [x for x in r_t if not math.isnan(x)] bfgs_means.append(sum(v)/len(v)) theta_mean = sum(self.theta) / len(self.theta) # Then, convert the parameters into meaningful values # the theta estimate is 4*Na*u*L anc_ne = theta_mean / (4 * 3e-9 * locuslen) # Then, the parameters are scaled by that. Population sizes are scaled # by theta (4Na), and times and migration rates are given in units of # 2N. scaled_params = [] for name, val in zip(self.params['Names'], bfgs_means): if name.startswith('N'): scaled_params.append(val * anc_ne) elif name.startswith('m'): scaled_params.append(val /(anc_ne * 2)) elif name.startswith('T'): scaled_params.append(val * anc_ne * 2) else: scaled_params.append(val) # Write these values into the class data self.hot_mean = hot_means self.cold_mean = cold_means self.bfgs_mean = bfgs_means self.theta_mean = theta_mean self.Na = anc_ne self.scaled_params = scaled_params return
[ "def get_runs(self) -> int:", "def representation_size_experiments(\n experimental_run,\n n_samples=100, \n min_features=50,\n max_features=100, \n increment=50,\n loss=LOG_LOSS,\n test_metric=accuracy_score,\n random_state=None): \n all_results = []\n for n_features in range(min_features, max_features+1, increment):\n dataset = artificial_dataset(\n n_samples=100,\n n_features=n_features,\n n_classes=3,\n random_state=random_state)\n results = run_experiments(\n experimental_run,\n dataset,\n loss=loss,\n test_metric=accuracy_score,\n random_state=random_state,\n dataset_name=n_features)\n all_results.append(results)\n return all_results", "def load_run_summary(self):\n vrun_attrs = {}\n print 'Loading summary of {:} runs for {:} from SQL database'.format( \\\n len(self.runs),self.exp)\n print 'Estimate loading time ~{:} sec'.format(len(self.runs)/4)\n for run in range(1,self.runs[-1]['num']+1):\n run_attr = experiment_info.run_attributes(self.instrument,self.exp,run)\n for a in run_attr:\n if a['name'] not in vrun_attrs:\n vrun_attrs[a['name']] = {'class': a['class'], 'desc': a['descr'], \n 'type': a['type'], 'val': \n [None for i in range(1,run)]} \n vrun_attrs[a['name']]['val'].append(a['val'])\n self.run_summary = vrun_attrs", "def crunch_numbers(self):\n self.num_unique_loops = len(self.list_loops)\n self.lens_unique_loops = [len(loop) for loop in self.list_loops]\n lens_pre_loops = [len(run['pre_loop']) for run in\n self.list_of_runs]\n\n self.avg_len_pre_loop = sum(lens_pre_loops)/len(self.lens_pre_loops)", "def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)", "def run_sim(self, runs):\n self.data = [[step for step in self.voter_generator()] for _ in\n range(runs)]", "def capture_stats(self, cust_game=None):\n rewards = []\n optimals = []\n for i in range(self.n_epochs):\n game = self.make_game()\n if cust_game: \n game = cust_game\n game.run_sim()\n rewards.append(game.bandit.rewards)\n optimals.append(game.bandit.optimal)\n sys.stdout.write(f'\\rRunning Epoch: {i}')\n tweak = True if cust_game else False \n self.epochs_run[str(self.num_runs)] = {'rewards': rewards, \n 'optimals': optimals, \n 'sigmas': game.slots.sigmas, \n 'mus':game.slots.mus, \n 'epsilon': game.bandit.epsilon,\n 'steps': game.steps, \n 'tweak': tweak}\n self.num_runs += 1", "def repeat_simulation(simulate, runs, **kwargs):\n \n # Do first run to determine number of outputs\n first_run = simulate(**kwargs)\n results = [[out] for out in first_run]\n \n # Do the rest of the simulations\n for run in range(runs-1):\n outputs = simulate(**kwargs)\n for i, out in enumerate(outputs):\n results[i].append(out)\n \n return results", "def run_inference(self, num_rounds, num_simulations_per_round):\n\n round_description = \"\"\n tbar = tqdm(range(num_rounds))\n for round_ in tbar:\n\n tbar.set_description(round_description)\n\n # Generate parameters from prior in first round, and from most recent posterior\n # estimate in subsequent rounds.\n if round_ == 0:\n parameters, observations = simulators.simulation_wrapper(\n simulator=self._simulator,\n parameter_sample_fn=lambda num_samples: self._prior.sample(\n (num_samples,)\n ),\n num_samples=num_simulations_per_round,\n )\n else:\n parameters, observations = simulators.simulation_wrapper(\n simulator=self._simulator,\n parameter_sample_fn=lambda num_samples: self.sample_posterior(\n num_samples\n ),\n num_samples=num_simulations_per_round,\n )\n\n # Store (parameter, observation) pairs.\n self._parameter_bank.append(torch.Tensor(parameters))\n self._observation_bank.append(torch.Tensor(observations))\n\n # Fit neural likelihood to newly aggregated dataset.\n self._fit_likelihood()\n\n # Update description for progress bar.\n round_description = (\n f\"-------------------------\\n\"\n f\"||||| ROUND {round_ + 1} STATS |||||:\\n\"\n f\"-------------------------\\n\"\n f\"Epochs trained: {self._summary['epochs'][-1]}\\n\"\n f\"Best validation performance: {self._summary['best-validation-log-probs'][-1]:.4f}\\n\\n\"\n )\n\n # Update TensorBoard and summary dict.\n self._summarize(round_)", "def make_summary(self): \n sumWT = 0\n OT = 0\n IT = 0\n WT_by_group = {}\n crowding_by_group = {}\n for g in self.patient_groups:\n WT_by_group[g] = 0\n crowding_by_group = 0\n for iter in range(self.n_iterations):\n IT += self.idle_time_observations[iter]\n OT += self.overtime_observations[iter]\n for id in self.wait_time_observations_by_patient:\n obs = self.wait_time_observations_by_patient[id]\n g = self.patient_groups[id]", "def aggregate_runs(learn_results):\n rel_name = learn_results['rel_name']\n rel_type = learn_results['rel_type']\n emb_model_results = learn_results['emb_model_results']\n result = []\n for emb in emb_model_results:\n emb_results = emb_model_results[emb]\n emress = EmbeddingModelResults(emb_results)\n basic_agg = {\n 'rel_type': rel_type,\n 'rel_name': rel_name,\n 'emb': emb,\n }\n for test_agg in emress.calc_test_aggregates():\n ba = {**basic_agg, **test_agg}\n ba['result_type'] = 'test'\n result.append(ba)\n for rand_agg in emress.calc_randpredict_aggregates():\n ra = {**basic_agg, **rand_agg}\n ra['result_type'] = 'random'\n result.append(ra)\n return result", "def simulationTwoDrugsDelayedTreatment(numTrials):\n #Initialization\n delayList = [300, 150, 75, 0]\n #delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': False, 'grimpex' : False }\n #mutProb = 0.005\n mutProb = 0.010\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n print \"Running trials for delay %(delay)d\" % {'delay' : n}\n for i in range(numTrials):\n #print \"Trial: \" + str(i)\n pop = runTrialTwoDrugs(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop < 50:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)", "def estimate_rate_of_reanalysis(self):\n \n self.rate_of_reanalysis = {}\n self.rate_of_reanalysis_d1 = {}\n self.rate_of_reanalysis_d2 = {}\n \n for idx, changes in self.results['log_changes'].items():\n self.rate_of_reanalysis[idx] = self._get_rate_of_reanalysis(changes)\n self.rate_of_reanalysis_d1[idx] = self._get_rate_of_reanalysis_d1(changes)\n self.rate_of_reanalysis_d2[idx] = self._get_rate_of_reanalysis_d2(changes)", "def finalize_experiment(self):\n guard_rotations_all = [x.total_guard_rotations for x in self.simulation_runs]\n guard_rotations_all = filter(None, guard_rotations_all) # remove any Nones out of there\n self.avg_guard_rotations = sum(guard_rotations_all) / len(guard_rotations_all)\n\n time_to_g1_all = [x.time_to_g1 for x in self.simulation_runs]\n time_to_g1_all = filter(None, time_to_g1_all) # remove any Nones out of there\n self.avg_secs_to_deanon = sum(time_to_g1_all) / len(time_to_g1_all)\n\n time_to_g2_all = [x.time_to_g2 for x in self.simulation_runs]\n time_to_g2_all = filter(None, time_to_g2_all) # remove any Nones out of there\n self.avg_secs_to_guard_discovery = sum(time_to_g2_all) / len(time_to_g2_all)\n\n time_to_g3_all = [x.time_to_g3 for x in self.simulation_runs]\n time_to_g3_all = filter(None, time_to_g3_all) # remove any Nones out of there\n self.avg_secs_to_g3 = sum(time_to_g3_all) / len(time_to_g3_all)", "def add_runs(self,runs):\n for r in runs:\n self.add(r)", "def generate_run_summary(run_dir):\n run_number = get_run_number(run_dir)\n run_id = 'run' + str(run_number)\n summary = RunSummary(run_id)\n summary.add_param_field('run_number', get_run_number(run_dir))\n add_fields_from_run_dir(summary, run_dir)\n summary.timeseries = get_growth_data(run_dir)\n return summary", "def simulationDelayedTreatment(numTrials):\n \n #Initialization\n #delayList = [300, 150, 75, 0]\n delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': True }\n mutProb = 0.005\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n for i in range(numTrials):\n pop = runTrial(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop == 0:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)", "def evaluate_benchmarks(self):\n\n # iterate over replicates\n results = {}\n for replicate_id, replicate in self.replicates:\n\n # evaluate benchmark for current replicate\n bmark = SimulationBenchmark(replicate.copy(),\n graph=self.graphs[replicate_id],\n **self.params)\n\n # store results\n results[replicate_id] = dict(\n\n labels_MAE=bmark.scores['labels'].MAE,\n level_only_MAE=bmark.scores['level_only'].MAE,\n spatial_only_MAE=bmark.scores['spatial_only'].MAE,\n community_MAE=bmark.scores['labels_comm'].MAE,\n\n labels_PCT=bmark.scores['labels'].percent_correct,\n level_only_PCT=bmark.scores['level_only'].percent_correct,\n spatial_only_PCT=bmark.scores['spatial_only'].percent_correct,\n community_PCT=bmark.scores['labels_comm'].percent_correct)\n\n # compile dataframe\n results = pd.DataFrame.from_dict(results, orient='index')\n results.index.set_names(self.multiindex, inplace=True)\n\n return results", "def compute_experiment_statistics(df, experiment_proto):\n experiment_proto = copy.deepcopy(experiment_proto)\n\n for round_proto in experiment_proto.rounds.values():\n for reads in [round_proto.positive_reads, round_proto.negative_reads]:\n if reads.name:\n counts = df[reads.name]\n\n reads.statistics.total_depth = counts.sum()\n reads.statistics.num_uniques = (counts > 0).sum()\n reads.statistics.mean = counts.mean()\n # normalize by N, not N-1\n reads.statistics.std_dev = counts.std(ddof=0)\n\n log_counts = np.log(counts + 1)\n reads.statistics.mean_log_plus_one = log_counts.mean()\n reads.statistics.std_dev_log_plus_one = log_counts.std(ddof=0)\n\n return experiment_proto" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Drive the UV plane combination. Functionally, this means Performing concatenation Cleaning the concatenated MS in the UV plane Imaging the concatenated MS
def _drive_uv(param_dict, clargs, output_basename, casa_instance): script = [] if glob.glob('{}.concat.ms'.format(output_basename)) and clargs.overwrite: os.system('rm -rf {}.concat.ms'.format(output_basename)) # casa_instance.run_script(script) # todo # write an extension of the drivecasa command for imstat, which will let # us do the imstat work to do the inference for clean params. # perform concatenation if not glob.glob('{}.concat.ms'): concat_vis = drivecasa.commands.reduction.concat(script, [ param_dict[ 'twelve_meter_filename'], param_dict[ 'seven_meter_filename'] ], out_path='./{}.concat.ms'. format(output_basename)) # clean + image thresh, clean_args = utils.param_dict_to_clean_input( param_dict, seven_meter=False) clean_args.update( {'spw': str(param_dict['seven_meter_spw'] + ',' + param_dict['twelve_meter_spw'])}) clean_image = drivecasa.commands.clean( script, concat_vis, niter=10000, threshold_in_jy=thresh, other_clean_args=clean_args) if param_dict['moments']: for moment in param_dict['moments']: _ = additional_casa_commands.immoments( script, clean_image.image, clean_image.image, moment) if clargs.verbose: utils.eprint(script) if not clargs.generate: _ = casa_instance.run_script(script, timeout=None) if clargs.generate: utils.output_to_file(script, output_basename) if clargs.verbose: utils.eprint("Data products present in {}".format(clean_image))
[ "def computeUV(mdl,xx,yy,uu,vv):\n dshape=uu.shape\n uu=uu.flatten()\n vv=vv.flatten()\n corr=np.zeros(uu.shape,dtype=complex)\n for ind in range(uu.shape[0]):\n corr[ind]=idft2d(mdl,xx,yy,uu[ind],vv[ind],norm=mdl.size)\n return np.reshape(corr,dshape)", "def plane_unwrapping():\r\n pass", "def apply_uvs(mesh, bsp_verts):\n\n mesh.uv_textures.new(\"UVs\")\n bm = bmesh.new()\n bm.from_mesh(mesh)\n\n if hasattr(bm.faces, \"ensure_lookup_table\"): \n bm.faces.ensure_lookup_table()\n\n uv_layer = bm.loops.layers.uv[0]\n\n for face_idx, current_face in enumerate(bm.faces):\n current_face.loops[0][uv_layer].uv = bsp_verts[current_face.loops[0].vert.index][1]\n current_face.loops[1][uv_layer].uv = bsp_verts[current_face.loops[1].vert.index][1]\n current_face.loops[2][uv_layer].uv = bsp_verts[current_face.loops[2].vert.index][1]\n \n bm.to_mesh(mesh)", "def mergeVOIs(data):\n\n lz, ly, lx = data[0].shape # The shape of the data (VTK is inverted wrt numpy)\n \n numberOfDataSets = len(data)\n \n cV = camphorBlendedVOIs()\n cV.data = data\n\n cV.importer = [vtk.vtkImageImport() for i in range(numberOfDataSets)]\n cV.image = [vtk.vtkImageMapToColors() for i in range(numberOfDataSets)]\n cV.slice = [vtk.vtkImageResliceToColors() for i in range(numberOfDataSets)]\n\n for i in range(numberOfDataSets):\n cV.importer[i].SetWholeExtent(0, lx - 1, 0, ly - 1, 0, lz - 1)\n cV.importer[i].SetDataExtentToWholeExtent()\n cV.importer[i].SetDataScalarTypeToUnsignedChar()\n cV.importer[i].SetImportVoidPointer(data[i])\n cV.importer[i].Modified()\n\n # Creates the colormap\n table = VOILookupTables(numberOfDataSets)\n\n # This map is for the RGB components - we render in independentComponents mode\n # so we assign each component a color transfer function and scalar opacity \n opacityMap = vtk.vtkPiecewiseFunction()\n opacityMap.AddPoint(0, 0)\n opacityMap.AddPoint(1, 0.5)\n \n # This map is for the alpha component - we do not display it\n nullMap = vtk.vtkPiecewiseFunction()\n nullMap.AddPoint(0, 0)\n nullMap.AddPoint(1, 0)\n\n colorMaps = [vtk.vtkColorTransferFunction() for i in range(3)]\n for i in range(3):\n colorMaps[i].SetColorSpaceToRGB()\n colorMaps[i].AddRGBPoint(0, 0, 0, 0)\n colorMaps[i].AddRGBPoint(1, int(i == 0), int(i == 1), int(i == 2))\n\n # Matrix to initialize the slices reslice plane\n sagittal = vtk.vtkMatrix4x4()\n sagittal.DeepCopy((0, 1, 0, 0,\n 0, 0, 1, 127.5,\n 1, 0, 0, 0,\n 0, 0, 0, 1))\n\n # Initializes the slices\n for i in range(numberOfDataSets):\n cV.slice[i].SetOutputDimensionality(2)\n cV.slice[i].SetInterpolationModeToLinear()\n cV.slice[i].SetResliceAxes(sagittal)\n \n # Sets the volume property's color and opacity maps\n cV.volumeProperty.IndependentComponentsOn()\n for i in range(3):\n cV.volumeProperty.SetColor(i, colorMaps[i])\n cV.volumeProperty.SetScalarOpacity(i, opacityMap)\n cV.volumeProperty.SetScalarOpacity(3, nullMap)\n\n # Creates color image from the VOI data\n for i in range(numberOfDataSets):\n cV.image[i].SetLookupTable(table[i])\n cV.image[i].SetInputConnection(cV.importer[i].GetOutputPort())\n\n # Merges the image together\n cV.blender.RemoveAllInputConnections(0)\n cV.sliceBlender.RemoveAllInputConnections(0)\n cV.blender.SetBlendModeToNormal()\n cV.sliceBlender.SetBlendModeToNormal()\n\n for i in range(numberOfDataSets):\n cV.blender.AddInputConnection(cV.image[i].GetOutputPort())\n cV.slice[i].SetInputConnection(cV.importer[i].GetOutputPort())\n # For some reason, if the first input is RGBA then subsequent inputs become dimmer and dimmer\n if i == 0:\n cV.slice[i].SetOutputFormatToRGB()\n else:\n cV.slice[i].SetOutputFormatToRGBA()\n cV.slice[i].SetLookupTable(table[i])\n cV.sliceBlender.AddInputConnection(cV.slice[i].GetOutputPort())\n cV.blender.SetOpacity(i, 0.5)\n cV.sliceBlender.SetOpacity(i, 0.5)\n\n # Adds the alpha channel to the sliceBlender output\n cV.luminance.SetInputConnection(cV.sliceBlender.GetOutputPort())\n cV.append.AddInputConnection(cV.sliceBlender.GetOutputPort())\n cV.append.AddInputConnection(cV.luminance.GetOutputPort())\n cV.append.Update()\n\n # Connects the objects to their mapper\n cV.volumeMapper.SetInputConnection(cV.blender.GetOutputPort())\n cV.sliceMapper.SetInputConnection(cV.append.GetOutputPort())\n\n # Adjusts the properties of the slice\n cV.sliceProperty.SetColorLevel(20)\n cV.sliceProperty.SetColorWindow(20)\n cV.sliceProperty.SetInterpolationTypeToNearest()\n\n cV.volume.SetMapper(cV.volumeMapper)\n cV.sliceActor.SetMapper(cV.sliceMapper)\n\n cV.volume.SetProperty(cV.volumeProperty)\n cV.sliceActor.SetProperty(cV.sliceProperty)\n\n cV.numberOfDataSets = numberOfDataSets\n cV.numberOfTimeFrames = 1\n cV.dimensions = [lx, ly, lz]\n \n return cV", "def fix_uvsets(cls):\n for node in pm.selected():\n shape = node.getShape()\n\n # get current uvset\n uvset_names = pm.polyUVSet(shape, query=True, allUVSets=True)\n\n if \"DiffuseUV\" in uvset_names:\n if len(uvset_names) == 1:\n # Copy values of uvset \"DiffuseUV\" to \"map1\"\n pm.polyUVSet(shape, copy=True, nuv=\"map1\", uvSet=\"DiffuseUV\")\n\n # set current uvset to map1\n pm.polyUVSet(shape, currentUVSet=True, uvSet=\"map1\")\n\n # delete uv set\n # pm.polyUVSet( shape, delete=True, uvSet='DiffuseUV')\n else:\n if \"map1\" in uvset_names:\n # set current uvset to map1\n uvs = shape.getUVs(uvSet=\"map1\")\n\n if len(uvs[0]) == 0:\n # Copy values of uvset \"DiffuseUV\" to \"map1\"\n pm.polyUVSet(\n shape, copy=True, nuv=\"map1\", uvSet=\"DiffuseUV\"\n )", "def render_planes(self):\n\n # create outline which provides context around the data\n outlineData = vtk.vtkOutlineFilter()\n outlineData.SetInputConnection(self.reader.GetOutputPort())\n outlineData.Update()\n\n mapOutline = vtk.vtkPolyDataMapper()\n mapOutline.SetInputConnection(outlineData.GetOutputPort())\n\n outline = vtk.vtkActor()\n colors = vtk.vtkNamedColors()\n outline.SetMapper(mapOutline)\n outline.GetProperty().SetColor(colors.GetColor3d(\"Black\"))\n\n # create black/white lookup table\n bwLut = vtk.vtkLookupTable()\n bwLut.SetTableRange(0,2000)\n bwLut.SetSaturationRange(0,0)\n bwLut.SetHueRange(0,0)\n bwLut.SetValueRange(0,1)\n bwLut.Build()\n\n # create lookup table of full hue circle (HSV)\n hueLut = vtk.vtkLookupTable()\n hueLut.SetTableRange(0,2000)\n hueLut.SetSaturationRange(1,1)\n hueLut.SetHueRange(0,1)\n hueLut.SetValueRange(1,1)\n hueLut.Build()\n\n # create lookup table of single hue (having range in saturation of hue)\n satLut = vtk.vtkLookupTable()\n satLut.SetTableRange(0,2000)\n satLut.SetSaturationRange(0,1)\n satLut.SetHueRange(0.6,0.6)\n satLut.SetValueRange(1,1)\n satLut.Build()\n\n # create sagittal plane (1/3)\n sagittalColors = vtk.vtkImageMapToColors()\n sagittalColors.SetInputConnection(self.reader.GetOutputPort())\n sagittalColors.SetLookupTable(bwLut)\n sagittalColors.Update()\n\n sagittal = vtk.vtkImageActor()\n sagittal.GetMapper().SetInputConnection(sagittalColors.GetOutputPort())\n # sagittal.SetDisplayExtent(minX, maxX, minY, maxY, minZ, maxZ)\n sagittal.SetDisplayExtent(128,128, 0,255, 0,92)\n sagittal.ForceOpaqueOn()\n\n # create axial plane (2/3)\n axialColors = vtk.vtkImageMapToColors()\n axialColors.SetInputConnection(self.reader.GetOutputPort())\n axialColors.SetLookupTable(hueLut)\n axialColors.Update()\n\n axial = vtk.vtkImageActor()\n axial.GetMapper().SetInputConnection(axialColors.GetOutputPort())\n axial.SetDisplayExtent(0,255, 0,255, 46,46)\n axial.ForceOpaqueOn()\n\n # create coronal plane (3/3)\n coronalColors = vtk.vtkImageMapToColors()\n coronalColors.SetInputConnection(self.reader.GetOutputPort())\n coronalColors.SetLookupTable(satLut)\n coronalColors.Update()\n\n coronal = vtk.vtkImageActor()\n coronal.GetMapper().SetInputConnection(coronalColors.GetOutputPort())\n coronal.SetDisplayExtent(0,255, 128,128, 0,92)\n coronal.ForceOpaqueOn()\n\n # render planes\n self.ren.AddActor(outline)\n self.ren.AddActor(sagittal)\n self.ren.AddActor(axial)\n self.ren.AddActor(coronal)", "def transfer_uvs(meshes):\n\n def mfn_object(mesh):\n \"\"\"Get shape mesh function\"\"\"\n sel_list = om2.MGlobal.getSelectionListByName(mesh.name())\n base = sel_list.getDagPath(0)\n mfn_object = om2.MFnMesh(base)\n return mfn_object\n\n # Get source UVs\n source_dag = mfn_object(meshes[0])\n source_uvs = source_dag.getUVs()\n\n # Find Orig Shape if exists in targets.\n for mesh in meshes[1:]:\n\n target_mesh_function = None\n\n # Try to find Orig shape. This will exist for skinned meshes.\n for shape in mesh.listRelatives():\n if shape.find('Orig') != -1:\n target_mesh_function = mfn_object(shape)\n\n if not target_mesh_function:\n target_mesh_function = mfn_object(mesh.getShape())\n\n target_mesh_function.setUVs(source_uvs[0], source_uvs[1], uvSet='map1')\n\n pymel.ogs(reset=True) # update the viewport, clean out cached geo\n pymel.dgdirty(allPlugs=True) # Making sure everything in the scene has evaluated.", "def InterpolateSurfaceVectorsWithPlane():\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Inserting Plane...')\r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,Vectors1,50,8)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.5)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.5)\r\n print('Interpolation Finished \\n Plotting...')\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors With Central axis Plane\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/SurfacePlaneVectorInterpolation.dat\",Vectors2,header = header,comments='')", "def makeVOIs(data):\n\n lz, ly, lx = data.shape # The shape of the data (VTK is inverted wrt numpy)\n\n numberOfDataSets = 1\n\n cV = camphorVOIs()\n cV.data = data\n\n cV.importer = [vtk.vtkImageImport()]\n cV.image = [vtk.vtkImageMapToColors()]\n cV.slice = [vtk.vtkImageResliceToColors()]\n\n cV.importer[0].SetWholeExtent(0, lx - 1, 0, ly - 1, 0, lz - 1)\n cV.importer[0].SetDataExtentToWholeExtent()\n cV.importer[0].SetDataScalarTypeToUnsignedChar()\n cV.importer[0].SetImportVoidPointer(data)\n cV.importer[0].Modified()\n\n # Creates the colormap\n table = VOILookupTables(numberOfDataSets)\n\n # This map is for the RGB components - we render in independentComponents mode\n # so we assign each component a color transfer function and scalar opacity\n opacityMap = vtk.vtkPiecewiseFunction()\n opacityMap.AddPoint(0, 0)\n opacityMap.AddPoint(1, 0.5)\n\n # This map is for the alpha component - we do not display it\n nullMap = vtk.vtkPiecewiseFunction()\n nullMap.AddPoint(0, 0)\n nullMap.AddPoint(1, 0)\n\n colorMaps = [vtk.vtkColorTransferFunction() for i in range(3)]\n for i in range(3):\n colorMaps[i].SetColorSpaceToRGB()\n colorMaps[i].AddRGBPoint(0, 0, 0, 0)\n colorMaps[i].AddRGBPoint(1, int(i == 0), int(i == 1), int(i == 2))\n\n # Matrix to initialize the slices reslice plane\n sagittal = vtk.vtkMatrix4x4()\n sagittal.DeepCopy((0, 1, 0, 0,\n 0, 0, 1, 127.5,\n 1, 0, 0, 0,\n 0, 0, 0, 1))\n\n # Initializes the slices\n cV.slice[0].SetOutputDimensionality(2)\n cV.slice[0].SetInterpolationModeToLinear()\n cV.slice[0].SetResliceAxes(sagittal)\n cV.slice[0].SetInputConnection(cV.importer[0].GetOutputPort())\n cV.slice[0].SetLookupTable(table[0])\n\n # Sets the volume property's color and opacity maps\n cV.volumeProperty.IndependentComponentsOn()\n for i in range(3):\n cV.volumeProperty.SetColor(i, colorMaps[i])\n cV.volumeProperty.SetScalarOpacity(i, opacityMap)\n cV.volumeProperty.SetScalarOpacity(3, nullMap)\n\n # Creates color image from the VOI data\n cV.image[0].SetLookupTable(table[0])\n cV.image[0].SetInputConnection(cV.importer[0].GetOutputPort())\n\n # Connects the objects to their mapper\n cV.volumeMapper.SetInputConnection(cV.image[0].GetOutputPort())\n cV.sliceMapper.SetInputConnection(cV.slice[0].GetOutputPort())\n\n # Adjusts the properties of the slice\n cV.sliceProperty.SetColorLevel(20)\n cV.sliceProperty.SetColorWindow(20)\n cV.sliceProperty.SetInterpolationTypeToNearest()\n\n cV.volume.SetMapper(cV.volumeMapper)\n cV.sliceActor.SetMapper(cV.sliceMapper)\n\n cV.volume.SetProperty(cV.volumeProperty)\n cV.sliceActor.SetProperty(cV.sliceProperty)\n\n cV.numberOfDataSets = numberOfDataSets\n cV.numberOfTimeFrames = 1\n cV.dimensions = [lx, ly, lz]\n\n return cV", "def __makeColorDataSpace__(self):\r\n\r\n # compU = np.ndarray.flatten( self.image[:,:,1] )\r\n # compV = np.ndarray.flatten( self.image[:,:,2] )\r\n compU = np.reshape( self.image[:,:,1], (-1,1) )\r\n compV = np.reshape( self.image[:,:,2], (-1,1) )\r\n compUV = np.transpose(np.array((compU[:,0],compV[:,0])))\r\n # print compU.shape, compV.shape, compUV.shape\r\n # self.colorSpace[ compU,compV ] = 1\r\n for u,v in compUV :\r\n # print (u, v)\r\n self.colorSpace[ u,v ] += 1\r\n\r\n # print self.segmentedImage\r\n # print compV.max(), compU.max()\r\n # plt.plot( compV.tolist(), compU.tolist(),'bo' )\r\n # plt.show()\r\n # exit(1)\r", "def weingarten(self, X, U, V):", "def uvmap(self, p):\n # bottom left corner of the plane\n p00 = self.position - (self.sx * self.n0) / 2 - (self.sy * self.n1) / 2\n dif_vector = p - p00\n u = np.dot(dif_vector, self.n0) / self.sx\n v = np.dot(dif_vector, self.n1) / self.sy\n return u, v", "def test_uv_controller(self):\n def check_uv_controller(nif):\n # XXX some uv controlled geometries need such node...\n #nif_animnode = nif.root_blocks[0].find(\n # block_type = NifFormat.NiBSAnimationNode)\n #assert(nif_animnode)\n nif_node = nif.root_blocks[0].find(\n block_type=NifFormat.NiTriBasedGeom)\n assert(nif_node.name.startswith(\"TestUVController\"))\n nif_uvctrl = nif_node.get_controllers()[0]\n assert(nif_uvctrl)\n nif_uvdata = nif_uvctrl.data\n assert(nif_uvdata)\n nif_ofsu = nif_uvdata.uv_groups[0]\n assert(nif_ofsu.num_keys == 2)\n\n # loading the test nif\n # (this nif has emit color 1,0,1 and emitmulti 3)\n # stencil test\n nif = self.test(filename='test/nif/mw/test_uvcontroller.nif')\n # double check that nif has the claimed values\n check_uv_controller(nif)\n # check that the controllers are present in blender\n obj = Blender.Object.Get(\"TestUVController\")\n mat = obj.data.materials[0]\n # check that there is material offset animation data\n assert(mat.ipo)\n assert(mat.ipo[Blender.Ipo.MA_OFSX])\n # export\n nif = self.test(\n filename='test/nif/mw/_test_uvcontroller.nif',\n config=dict(EXPORT_VERSION='Morrowind'),\n selection=['TestUVController'],\n next_layer=False)\n # check that nif was correctly exported\n check_uv_controller(nif)\n # export again, with BSAnimationNode\n nif = self.test(\n filename='test/nif/mw/_test_bsanimation_uvcontroller.nif',\n config=dict(\n EXPORT_VERSION='Morrowind',\n EXPORT_MW_BS_ANIMATION_NODE=True),\n selection=['TestUVController'],\n next_layer=True)\n # check that nif was correctly exported\n check_uv_controller(nif)\n assert(isinstance(nif.root_blocks[0], NifFormat.NiBSAnimationNode))\n assert(nif.root_blocks[0].flags == 42)", "def camUvDeleteAnim ():\n\n help(camUvDeleteAnim)\n\n import hou\n nodeSelect= hou.selectedNodes()\n pink=hou.Color((0.98,0.275,0.275))\n black=hou.Color ((0,0,0))\n\n for node in nodeSelect:\n parent = node.parent()\n parentString= parent.name()\n getName= node.name()\n connectNode = node.outputs()\n outNull = node.createOutputNode(\"null\",\"inCamDeleteAnim\")\n outNull.setPosition(node.position())\n outNull.move([0, -.75])\n outNull.setColor(black)\n #create left branch\n blackColor= outNull.createOutputNode(\"color\",\"black\")\n blackColor.move([-.75, -.75])\n blackColor.setParms({\"colorr\":0,\"colorg\":0,\"colorb\":0})\n #create right branch\n wrangleSnippet=outNull.createOutputNode(\"attribwrangle\",\"camUvDelete\")\n wrangleSnippet.setColor(pink)\n wrangleSnippet.setParms({\"snippet\":\"\"\"\nvector ndc=toNDC(\"/obj/root/transform/camera/cambaked/ppCam/ppCamShape\",@P); // DONT FORGET TO FILL CAM PATH\n@Cd = ndc; // viz\nfloat secu = 0.1;\nif(ndc.x+secu<0||ndc.x-secu>1||ndc.y+secu<0||ndc.y-secu>1||ndc.z>1){\nremovepoint(geoself(),@ptnum);\n}\"\"\"}) \n wrangleSnippet.move([0.75, -.75])\n redColor= wrangleSnippet.createOutputNode(\"color\",\"red\")\n redColor.move([0, -.35])\n redColor.setParms({\"colorr\":1,\"colorg\":0,\"colorb\":0}) \n #create solver\n mysolver = blackColor.createOutputNode(\"solver\",\"transferColor\")\n solverName = mysolver.name()\n mysolver.move([0, -1.5]) \n mysolver.setInput(1,redColor)\n mytransfert = hou.node('/obj/'+parentString+'/'+solverName+'/d/s').createNode('attribtransfer','transferUv')\n hou.node('/obj/'+parentString+'/'+solverName+'/d/s/transferUv').setInput(0,hou.node('/obj/'+parentString+'/'+solverName+'/d/s/Prev_Frame'))\n hou.node('/obj/'+parentString+'/'+solverName+'/d/s/transferUv').setInput(1,hou.node('/obj/'+parentString+'/'+solverName+'/d/s/Input_2'))\n mytransfert.setDisplayFlag(True) #set display flag true\n mytransfert.setParms({\"thresholddist\":0.1})\n time = mysolver.createOutputNode(\"timeshift\",\"Fend\")\n time.parm(\"frame\").deleteAllKeyframes()\n time.setParms({\"frame\":240})\n \n removePointVex=time.createOutputNode(\"attribwrangle\",\"deleteGeo\")\n removePointVex.setColor(pink)\n removePointVex.setParms({\"snippet\":\"\"\"\nif(@Cd.x<0.5)removepoint(0,@ptnum);\n\"\"\"}) \n removePointVex.setDisplayFlag(True)\n\n print(\"--- Don't forget to check the campath in camUvdelete ---\")", "def add_subdivision(self):\n temp_sub_vertices = []\n for plane in (self.subdivision_list):\n current_mids = []\n mid_m_01 = Vec3d(0, 0, 0, 0)\n mid_m_12 = Vec3d(0, 0, 0, 0)\n mid_m_20 = Vec3d(0, 0, 0, 0)\n\n mid_m_01.x = (plane[0].x + plane[1].x) / 2\n mid_m_01.y = (plane[0].y + plane[1].y) / 2\n mid_m_01.z = (plane[0].z + plane[1].z) / 2\n mid_m_01.w = plane[0].w\n\n mid_m_12.x = (plane[1].x + plane[2].x) / 2\n mid_m_12.y = (plane[1].y + plane[2].y) / 2\n mid_m_12.z = (plane[1].z + plane[2].z) / 2\n mid_m_12.w = plane[1].w\n\n mid_m_20.x = (plane[2].x + plane[0].x) / 2\n mid_m_20.y = (plane[2].y + plane[0].y) / 2\n mid_m_20.z = (plane[2].z + plane[0].z) / 2\n mid_m_20.w = plane[2].w\n\n current_mids = [mid_m_01, mid_m_12, mid_m_20]\n temp_sub_vertices.append(current_mids)\n\n for index in range(len(current_mids)):\n v0 = Vec3d(0, 0, 0, 0)\n v1 = Vec3d(0, 0, 0, 0)\n v2 = Vec3d(0, 0, 0, 0)\n\n v0.x = plane[index].x\n v0.y = plane[index].y\n v0.z = plane[index].z\n\n v1.x = current_mids[index].x\n v1.y = current_mids[index].y\n v1.z = current_mids[index].z\n\n v2.x = current_mids[index - 1].x\n v2.y = current_mids[index - 1].y\n v2.z = current_mids[index - 1].z\n\n temp_sub_vertices.append([v0, v1, v2])\n\n self.subdivision_list = temp_sub_vertices", "def blackbodyUV(temp):\n lam=lambda wl: planckian(temp, wl)\n xyz=spectrumToTristim(perfectrefl, lam)\n uvy=xyzTouvY(xyz)\n return [uvy[0], uvy[1]*2.0/3]", "def setUVs(self, uArray, vArray, faceIds, uvSet=None):\n \n pass", "def _getuv(self):\n\n # we need to sample the geometry, just do uniformly now\n nu = 20\n nv = 20\n\n # define the points on the parametric domain to sample\n ul = np.linspace(0, 1, nu + 1)\n vl = np.linspace(0, 1, nv + 1)\n uu, vv = np.meshgrid(ul, vl)\n uu = uu.flatten()\n vv = vv.flatten()\n\n # now create a concentrated uv array\n uv = np.dstack((uu, vv)).squeeze()\n\n return uv.copy()", "def create_vtu_file_v_and_a(\n path,\n nNodes,\n v_value_mesh_twice_interp,\n a_value_mesh_twice_interp,\n filename,\n orig_vel,\n orig_alpha,\n iTime,\n nDim=3,\n):\n nDim += 1\n velocity_field = np.zeros((nNodes, 3))\n velocity_field[:, 0:nDim] = np.transpose(\n v_value_mesh_twice_interp[0:nDim, :]\n )\n\n # streamwise component only\n v_difference = np.zeros((nNodes, 3))\n\n v_difference[:, 0:nDim] = (\n np.transpose(v_value_mesh_twice_interp[0:nDim, :]) - orig_vel\n )\n\n # streamwise component only\n v_difference = v_difference / np.max(velocity_field)\n\n #### ALPHA ####\n alpha_field = np.zeros((nNodes, 3))\n alpha_field[:, 0:nDim] = np.transpose(a_value_mesh_twice_interp[0:nDim, :])\n\n # streamwise component only\n a_difference = np.zeros((nNodes, 3))\n a_difference[:, 0:nDim] = (\n np.transpose(a_value_mesh_twice_interp[0:nDim, :]) - orig_alpha\n )\n\n # streamwise component only\n a_difference = a_difference / np.max(alpha_field)\n\n clean_vtk = get_clean_vtk_file(filename)\n new_vtu = vtktools.vtu()\n new_vtu.ugrid.DeepCopy(clean_vtk.ugrid)\n new_vtu.filename = path + \"recon_\" + str(iTime) + \".vtu\"\n new_vtu.AddField(\"Velocity\", velocity_field)\n new_vtu.AddField(\"V_Original\", orig_vel)\n new_vtu.AddField(\"Velocity_diff\", v_difference)\n\n new_vtu.AddField(\"Alpha\", alpha_field)\n new_vtu.AddField(\"A_Original\", orig_alpha)\n new_vtu.AddField(\"Alpha_diff\", a_difference)\n new_vtu.Write()\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Drive the feather combination. Functionally, this means Cleaning the individual ms separately. Imaging the individual ms. Feathering the two together.
def _drive_feather(param_dict, clargs, output_basename, casa_instance): # todo later -> the imstat stuff script = [] thresh, seven_meter_clean_args = utils.param_dict_to_clean_input( param_dict, seven_meter=True) _, twelve_meter_clean_args = utils.param_dict_to_clean_input( param_dict, seven_meter=False) if clargs.verbose: utils.eprint('Seven meter clean args {}'.format( seven_meter_clean_args)) utils.eprint('Twelve meter clean args {}'.format( twelve_meter_clean_args)) utils.eprint('Running individual cleaning...') seven_meter_cleaned = drivecasa.commands.reduction.clean( script, niter=10000, vis_paths=param_dict['seven_meter_filename'], threshold_in_jy=thresh, other_clean_args=seven_meter_clean_args, out_path=os.path.abspath(output_basename)) twelve_meter_cleaned = drivecasa.commands.reduction.clean( script, niter=10000, vis_paths=param_dict['twelve_meter_filename'], threshold_in_jy=thresh, other_clean_args=twelve_meter_clean_args, out_path=os.path.abspath(output_basename)) if not clargs.generate: _ = casa_instance.run_script(script, timeout=None) if clargs.generate: utils.output_to_file(script, output_basename) if clargs.verbose: utils.eprint('Individual cleanings complete. Now feathering.') script = [] feathered_image = additional_casa_commands.feather(script, output_basename=output_basename, highres=twelve_meter_cleaned.image, lowres=seven_meter_cleaned.image, weighting=_calc_feather_weighting(param_dict)) if clargs.verbose: utils.eprint("Feather script") utils.eprint(script) if not clargs.generate: _ = casa_instance.run_script(script, timeout=None) if clargs.generate: utils.output_to_file(script, output_basename) script = [] if param_dict['moments']: for moment in param_dict['moments']: _ = additional_casa_commands.immoments( script, feathered_image, feathered_image, moment) if clargs.verbose: utils.eprint("Moments") utils.eprint(script) if not clargs.generate: _ = casa_instance.run_script(script, timeout=None) if clargs.generate: utils.output_to_file(script, output_basename) return
[ "def formationFlying(): ### PENDING\n\tpass", "def feather_clean(in_directory):\n # in_directory = UpdateSP500Data.TOP_LEVEL_PATH / 'feather'\n Path.is_dir(in_directory)\n all_files = os.listdir(in_directory)\n for item in all_files:\n if item.endswith('.feather'):\n # Remove options with strikes at 5$\n option_df = pd.read_feather(in_directory / item)\n idx = option_df['strike'] == 5\n option_df = option_df.drop(option_df.index[idx])\n # # Remove Quarterly options\n # idx2 = option_df['root'] == 'SPXQ'\n # option_df = option_df.drop(option_df.index[idx2])\n # # Remove Monthly options\n # idx2 = option_df['root'] == 'SPXM'\n # option_df = option_df.drop(option_df.index[idx2])\n # feather.write_dataframe(option_df, str(in_directory / item))\n option_df.to_feather(str(in_directory / item))", "def feather_one(sd_map,int_map,int_pb,tag=''):\n\n outfile=int_map+tag+'.feather'\n outfile_pbcord=outfile+'.pbcor'\n outfile_uncorr=outfile\n #rmtables(outfile_uncorr)\n #rmtables(outfile_pbcord)\n rmtables(sd_map+\".TMP.intGrid.intPb\")\n imregrid(imagename=sd_map,template=int_map,output=sd_map+\".TMP.intGrid\",overwrite=True)\n immath(imagename=[sd_map+\".TMP.intGrid\",int_pb],expr='IM0*IM1',outfile=sd_map+\".TMP.intGrid.intPb\")\n feather(imagename=outfile_uncorr,highres=int_map,lowres=sd_map+\".TMP.intGrid.intPb\")\n immath(imagename=[outfile_uncorr,int_pb],expr='IM0/IM1',outfile=outfile_pbcord)\n\n # clean up after self\n rmtables(sd_map+\".TMP.intGrid.intPb\")\n rmtables(sd_map+\".TMP.intGrid\")\n\n return [outfile_pbcord,outfile_uncorr]", "def cleanImage(datasetPath: str, imageName: str, medullaMinPart=0.05, onlyMasks=False):\n # Defining all the useful paths\n currentImageDirPath = os.path.join(datasetPath, imageName)\n imagesDirPath = os.path.join(currentImageDirPath, 'images')\n imageFileName = os.listdir(imagesDirPath)[0]\n imagePath = os.path.join(imagesDirPath, imageFileName)\n image = cv2.imread(imagePath)\n\n # Getting the cortex image\n medulla = np.zeros((image.shape[0], image.shape[1]), np.uint8)\n cortexDirPath = os.path.join(currentImageDirPath, 'cortex')\n cortexPresent = os.path.exists(cortexDirPath)\n if cortexPresent:\n cortexFileName = os.listdir(cortexDirPath)[0]\n cortexFilePath = os.path.join(cortexDirPath, cortexFileName)\n if not onlyMasks:\n # Copying the full image into the correct directory\n fullImageDirPath = os.path.join(currentImageDirPath, 'full_images')\n os.makedirs(fullImageDirPath, exist_ok=True)\n cv2.imwrite(os.path.join(fullImageDirPath, imageFileName), image)\n\n # Cleaning the image\n cortex = cv2.imread(cortexFilePath)\n image = cv2.bitwise_and(image, cortex)\n cv2.imwrite(imagePath, image)\n\n # Starting medulla mask creation\n cortex = cv2.imread(cortexFilePath, cv2.IMREAD_UNCHANGED)\n invertedCortex = np.bitwise_not(cortex)\n medulla = cortex\n\n backgroundDirPath = os.path.join(currentImageDirPath, 'fond')\n if os.path.exists(backgroundDirPath):\n for backgroundFile in os.listdir(backgroundDirPath):\n # Second Medulla mask creation step\n backgroundFilePath = os.path.join(backgroundDirPath, backgroundFile)\n backgroundImage = cv2.imread(backgroundFilePath, cv2.IMREAD_UNCHANGED)\n medulla = np.bitwise_or(medulla, backgroundImage)\n\n # Cleaning background image if cortex mask is present\n if cortexPresent:\n # background = background && not(cortex)\n backgroundImage = np.bitwise_and(backgroundImage, invertedCortex)\n cv2.imwrite(backgroundFilePath, backgroundImage)\n\n # Last Medulla mask creation step if medulla > 1% of image\n black, white = getBWCount(medulla)\n bwRatio = black / (black + white)\n if bwRatio >= medullaMinPart:\n # medulla = !(background || cortex)\n medulla = np.bitwise_not(medulla)\n medullaDirPath = os.path.join(currentImageDirPath, 'medullaire')\n os.makedirs(medullaDirPath, exist_ok=True)\n medullaFilePath = os.path.join(medullaDirPath, imageName + '_medulla.png')\n cv2.imwrite(medullaFilePath, medulla)", "def watering():", "def test_apply_father_wavelet_dirac(self):\n pass", "def new_farm_damage(turbineX,turbineY,windDirections,windFrequencies,atm_free,atm_close,atm_far,Omega_free,Omega_waked,free_speed,waked_speed,TI=0.11):\n\n damage = np.zeros_like(turbineX)\n nDirections = len(windDirections)\n nTurbines = len(turbineX)\n\n for j in range(nDirections):\n turbineXw, turbineYw = fast_calc_aep.windframe(windDirections[j], turbineX, turbineY)\n for i in range(nTurbines):\n cc_moments = new_get_cc_loads(turbineXw,turbineYw,i,Omega_free,Omega_waked,free_speed,waked_speed,len(atm_free),TI=0.11,wind_speed=8.)\n for k in range(len(cc_moments)):\n cc_moments[k] = round(cc_moments[k],4)\n damage_cc = calc_damage(cc_moments,windFrequencies[j],fos=3)\n damage_atm = new_damage_atm(turbineXw,turbineYw,i,atm_free,atm_close,atm_far,np.mean(cc_moments),windFrequencies[j],TI=0.11,wind_speed=8.,rotor_diameter=126.4)\n damage[i] += damage_cc*2.\n damage[i] += damage_atm\n return damage", "def runBrighterFatter():\n RunData([getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/')[0],], out='I800nmlow',\n wavelength='l800l')\n RunData([getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/')[2],], out='I800nmmed',\n wavelength='l800m')\n RunData([getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/')[4],], out='I800nmhigh',\n wavelength='l800h')", "def process_fuel(self):\n\n # read bumat1 (output composition)\n self.bumat_dict, self.mat_def = self.read_bumat(self.input_file, 1)\n self.core = self.dict_to_array(self.bumat_dict)\n\n # record core composition before reprocessing to db_0\n self.bu_adens_db_0[self.current_step, :] = self.core\n\n # start reprocessing and refilling\n # reprocess out pa233\n # every 1 step = 3days\n th232_id = self.find_iso_indx('Th232')\n # add back u233 to core\n # !! where is this refill coming from?\n u233_to_add = self.tank_adens_db[self.current_step, self.find_iso_indx(\n 'Pa233')]\n self.refill(self.find_iso_indx('U233'), u233_to_add)\n\n # remove volatile gases\n # every 1 step = 3 days\n volatile_gases = self.find_iso_indx(['Kr', 'Xe', 'Se', 'Nb', 'Mo', 'Tc', 'Ru',\n 'Rh', 'Pd', 'Ag', 'Sb', 'Te'])\n self.rem_adens[0, ] = self.remove_iso(volatile_gases, 1)\n\n # !! this rem_adens indexing looks wrong\n # remove seminoble metals\n # every 67 steps = 201 days\n if self.current_step % 67 == 0:\n se_noble_id = self.find_iso_indx(['Zr', 'Cd', 'In', 'Sn'])\n self.rem_adens[1, ] = self.remove_iso(se_noble_id, 1)\n\n # remove volatile fluorides\n # every 20 steps = 60 days\n if self.current_step % 20 == 0:\n vol_fluorides = self.find_iso_indx(['Br', 'I'])\n self.rem_adens[2, ] = self.remove_iso(vol_fluorides, 1)\n\n # remove REEs\n # evrey 17 steps = 50 days\n if self.current_step % 17 == 0:\n rees_id = self.find_iso_indx(['Y', 'Gd', 'La', 'Ce', 'Pr',\n 'Nd', 'Pm', 'Sm'])\n self.rem_adens[3, ] = self.remove_iso(rees_id, 1)\n\n # remove Eu\n # evrey 167 steps = 500 days\n if self.current_step % 167 == 0:\n eu_id = self.find_iso_indx('Eu')\n self.rem_adens[4, ] = self.remove_iso(eu_id, 1)\n\n # remove Rb, Sr, Cs, Ba\n # every 1145 steps = 3435 days\n if self.current_step % 1145 == 0:\n discard_id = self.find_iso_indx(['Rb', 'Sr', 'Cs', 'Ba'])\n self.rem_adens[4, ] = self.remove_iso(discard_id, 1)\n\n # remove np-237, pu-242\n # every 1946 steps = 16 years\n if self.current_step % 1946 == 0:\n higher_nuc = self.find_iso_indx(['Pu237', 'Pu242'])\n self.rem_adens[4, ] = self.remove_iso(higher_nuc, 1)\n\n # refill th232 to keep adens constant\n # do it every time\n # if want to do it less often do:\n # if current_step % time == 0:\n self.th_adens_db[self.current_step, ] = self.maintain_const(th232_id,\n self.th232_adens_0)\n\n # write the processed material to mat file for next run\n self.write_mat_file()", "def applyMorphologicalCleaning(self, image):", "def ChangeWeather():\n if interface.weather == \"spring\":\n print(\"Lake expands in spring\")\n \n # I'm recording the elevation of all water pixel and non-water within 15 pixels (if gains elevation no more than 1 meter)\n # It will be easier to determine if the pixel gains evelation more than 1 meter\n tempElevations = interface.elevations.copy() \n \n # Found the water edge\n waterEdge = []\n # Find the edge of water\n for x in range(1, interface.width):\n for y in range(1, interface.height):\n if isWater(x, y):\n # Determine if water edge, find the function below\n if checkSurrending(x, y):\n waterEdge.append((x, y));\n # Increase the elevation by 1 here to reduce the time complexity\n tempElevations[y][x] = tempElevations[y][x] + 1\n \n \n # Find and color the surging water\n for i in range(15):\n # temp for BFS, store the water edge of each level\n temp = [] \n for point in waterEdge:\n # search all 8 pixels around\n for i in range(-1, 2):\n for j in range(-1, 2):\n tempX = point[0] + i\n tempY = point[1] + j\n # check if pixel still in image\n if (tempX > 0) and (tempX < interface.width) and (tempY > 0) and (tempY < interface.height):\n # find the non-water pixel\n if interface.pix[tempX, tempY][:3] != (0,0,255) and interface.pix[tempX, tempY][:3] != (205,0,101):\n # find the pixel will be underwater\n if tempElevations[tempY][tempX] < tempElevations[point[1]][point[0]]:\n # then it's underwater now\n tempElevations[tempY][tempX] = tempElevations[point[1]][point[0]]\n interface.draw.point((tempX, tempY), fill=(135,72,12))\n temp.append((tempX, tempY))\n # prepare for next level of searching\n waterEdge = temp\n elif interface.weather == \"summer\":\n print(\"Nothing changed, summer is the default weather.\") # Default weather, no action required\n elif interface.weather == \"fall\":\n print(\"The speed on easy movement forest is now slower\") # No map changeing required in fall\n elif interface.weather == \"winter\":\n print(\"Lake partially freeze in winter\") # Winter, similar to spring\n # Found the water edge\n waterEdge = []\n # Find the edge of water\n for x in range(1, interface.width):\n for y in range(1, interface.height):\n if isWater(x, y):\n if checkSurrending(x, y):\n waterEdge.append((x, y));\n \n \n # Found and color the frozen water\n for level in range(7):\n temp = []\n for point in waterEdge:\n \n interface.draw.point(point, fill=(66,233,245))\n \n # Draw first then search, no need for search if on level 6 (7th)\n if level < 6:\n for i in range(-1, 2):\n for j in range(-1, 2):\n tempX = point[0] + i\n tempY = point[1] + j\n if (tempX > 0) and (tempX < interface.width) and (tempY > 0) and (tempY < interface.height):\n if interface.pix[tempX, tempY][:3] == (0,0,255):\n temp.append((tempX, tempY))\n \n waterEdge = temp\n else:\n raise Exception(\"Not a valid weather type\")", "def processFoil(self):\n \n # Split airfoil in upper and lower portions\n self.__airfoilSplit()\n \n # Interpolate\n self.__hinterpolate()", "def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)", "def drive(self, kilometres_driven):\n self.fuel -= (self.litres_per_kilometre * kilometres_driven)", "def run(self):\n self.coffee_machine.water_tank.decrease_weight(self.coffee_machine.chosen_coffee_data.get('water_weight'))", "def removeInsignificant(self):\n #TODO make sure this method now works AFTER meanCurves and analyseCures have been run\n \n # Searching for curves that are in the noise\n if len(self.plate.noProtein) > 0:\n thresholdm, i = rh.meanSd([self.originalPlate.wells[x].monoThresh for x in self.plate.noProtein])\n for well in self.originalPlate.wells:\n if not self.originalPlate.wells[well].contents.isControl and well not in self.delCurves:\n if self.originalPlate.wells[well].monoThresh > thresholdm/1.15:\n #self.wells[well].fluorescence = None\n self.delCurves.append(well)\n\n # Searching for curves that have overloaded the sensor\n for well in self.wells:\n if well not in self.delCurves:\n mini = self.wells[well].fluorescence[0]\n maxi = self.wells[well].fluorescence[0]\n\n maxInd = 0\n for i in range(len(self.wells[well].fluorescence)):\n if self.wells[well].fluorescence[i] > maxi:\n maxi = self.wells[well].fluorescence[i]\n maxInd = i\n if self.wells[well].fluorescence[i] < mini:\n mini = self.wells[well].fluorescence[i]\n\n diff = maxi - mini\n\n # A boundry defining how much the points can fluctuate and still be considered flat\n lowFlatBoundry = maxi - 0.005*diff\n\n # Look each way to see how many temperature steps the curve stays flat for\n count = 0\n ind = maxInd - 1\n while ind>=0:\n if self.wells[well].fluorescence[ind] > lowFlatBoundry:\n count += 1\n ind -= 1\n else:\n break\n ind = maxInd+1\n while ind<len(self.wells[well].fluorescence):\n if self.wells[well].fluorescence[ind] > lowFlatBoundry:\n count += 1 \n ind += 1\n else:\n break\n if well not in self.delCurves and count >= 10:\n self.delCurves.append(well) \n return", "def run_some_frozen_fermi(defects,T,nelect, e_gap, spin_polarised, dopant_chg, dopant_conc):\n out = []\n scf = frozen_SCFermi( defects, nelect, e_gap, T, dopant_chg, dopant_conc, spin_polarised)\n scf.output()\n with open(\"out.txt\", 'w') as f:\n sp = subprocess.run([\"./frozen-sc-fermi\"],stdout=f)\n text_file = open((\"out.txt\") , \"r\")\n lines = text_file.readlines()\n for i in defects:\n #print(i.label)\n for line in lines:\n if re.search(str(i.label)+' '+r'.*?Charge',line) is not None:\n for h in range(i.n_charge_states):\n joop = lines[lines.index(line)+(h+1)]\n coop = joop.split()\n x = coop[1]\n y = float(coop[-2])\n a_dict = {(i.label)+'_'+str(x) : y}\n out.append(a_dict)\n #print(a_dict)\n flat = {k: v for d in out for k, v in d.items()}\n for line in lines:\n if re.search('\\(electrons\\)', line) is not None:\n flat.update({'c_e' : float(line.split()[3])})\n if re.search('\\(holes\\)', line) is not None:\n flat.update({'c_h' : float(line.split()[3])})\n if re.search('\\(eV\\)', line) is not None:\n flat.update({'Fermi_level' : float(line.split()[4])})\n return flat", "def stop_aperture(self):\n self.aperture_id = None\n self.mode = \"\"", "def masterflat(input_file):\n #set original directory\n original_path = os.getcwd()\n data_path = input_file['data_path']\n save_path = input_file['save_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n flat = glob.glob('flat*.fits')\n print 'Loading flat images \\nTotal of flat files = ',len(flat),'\\nFiles = \\n'\n print flat\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n os.system('cp flat*.fits '+save_path)\n #creating the names of flat with bias subctracted\n bflat = []\n for i in flat:\n bflat.append('B'+i)\n print '\\n Names os flat images with bias subtracted: \\n \\n',bflat\n #change for save_path directory\n os.chdir(save_path)\n #verify if previous superbias exist\n if os.path.isfile('superflat.fits') == True:\n os.system('rm superflat.fits')\n #verify if exits previous bflat*.fits files and remove then.\n for i in bflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n print '\\nCreating superflat .... \\n'\n #create the list of flat images and bflat images\n #flat = string.join(flat,',')\n #bflat = string.join(bflat,',')\n print '\\n Subtracting bias from flat images and creating bflat images.... \\n'\n #iraf.imarith()\n for i in range(len(flat)):\n iraf.imarith(flat[i],'-','superbias.fits',bflat[i])\n #print statistics from bflat*.fits images\n iraf.imstat(bflat[i])\n print '\\n .... done \\n'\n #clean previos flat*.fits files\n print '\\n Clean flat*.fits images .... \\n'\n os.system('rm flat*.fits')\n print '\\n .... done. \\n'\n #normalizing each flat\n print '\\nNormalizing each flat ....\\n'\n #checking if mean from numpy is the same from your bflat images using imstat\n #take the mean of each bflat image\n bflat_mean = np.zeros(len(bflat))\n for i in range(len(bflat)):\n image = fits.getdata(bflat[i])\n image = np.array(image,dtype='Float64')\n bflat_mean[i] = round(np.mean(image))\n image = 0 #clean image allocate to this variable\n print 'The mean of each bflat image, respectivaly ...'\n print bflat_mean\n #creating the names of bflat images after the normalization:\n abflat = []\n for i in bflat:\n abflat.append('A'+i)\n print '\\n Names os bflat images with bias subtracted and normalizad: \\n \\n',abflat\n #verify if exist previous ABflat*.fits images and remove then.\n for i in abflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n for i in range(len(abflat)):\n iraf.imarith(bflat[i],'/',bflat_mean[i],abflat[i])\n print '\\n.... done!\\n'\n # print '\\n Cleaning bflat*.fits images ....\\n'\n # os.system('rm Bflat*.fits')\n print '\\n.... done.\\n'\n print 'Statistics of the abflat*.fits images .... \\n'\n for i in range(len(abflat)):\n iraf.imstat(abflat[i])\n print '\\n Combining abflat images ....\\n'\n\n # ablist = string.join(abflat,',')\n # iraf.imcombine(ablist,'superflat.fits')\n #change how import flat files\n #usning the abflat list of flat files We will create a pandas python dataframe\n ablist = DataFrame(abflat)\n ablist.columns=['flat_files']\n ablist.to_csv('flat_list',index_label=False,index=False,header=False)\n #combine all flat images\n iraf.imcombine('@flat_list','superflat.fits')\n iraf.imstat('superflat.fits')\n print '\\n .... done. \\n'\n # print '\\nCleaning ABflat*.fits images ....\\n'\n # os.system('rm ABflat*.fits')\n print '\\n.... done!'\n #Verify if the image was created:\n output = glob.glob('superflat*.fits')\n if len(output) != 0:\n output = 0\n else:\n output = 1\n #Return to original directory\n os.chdir(original_path)\n #last mensage\n print '\\n MASTERFLAT.FITS created! \\n'\n print '\\n END of Data Reduction for create a masterflat.fits file. \\n'\n #obtain the value of return\n if output == 1:\n print '!!! ERROR/WARNING !!!'\n print 'Check if the superbias was created or if there is more than one superbias image.'\n return output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate weightings to use for the feather task
def _calc_feather_weighting(param_dict): weightings = param_dict['weightings'] if not isinstance(weightings, (list, tuple)): return 1.0 return float(weightings[1]) / float(weightings[0])
[ "def weight(self):", "def calc_weight(self):\n self.weight = (self.Profile.Atot*1e-6 * self.lStiff*1e-3\n * self.Material.density\n )\n pass", "def get_weights(self):", "def get_weight(self):\n pass", "def CalculateWeightedSum( self ):\n if self.On( BehaviorType.WALL_AVOID ):\n self.SteeringForce += self.WallAvoidance()\n if self.On( BehaviorType.OBSTACLE_AVOID ):\n obstaclelist = list()\n for city in self.Owener.World.Cities:\n for building in city.Buildings:\n if building.IsObstacle:\n obstaclelist.append( building )\n pass\n avoidForce = self.ObstacleAvoidance( obstaclelist ) * self.WeightObstacleAvoidance\n self.SteeringForce += avoidForce\n\n if self.On( BehaviorType.EVADE ):\n if self.TargetAgent1 is not None:\n self.SteeringForce += self.Evade( self.SetTargetAgent1 ) * self.WeightEvade\n\n\n # The next three can be combined for flocking behavior\n if not self.CellSpaceOn:\n pass ########### Since we always use spacial partition, this is not necessary\n else:\n if self.On( BehaviorType.SEPARATION ):\n self.SteeringForce += self.Separation( self.Owener.World.People ) * self.WeightSeparation\n pass\n if self.On( BehaviorType.ALLIGNMENT ):\n self.SteeringForce += self.Alignment( self.Owener.World.People ) * self.WeightAlignment\n pass\n if self.On( BehaviorType.COHESION ):\n self.SteeringForce += self.Cohesion( self.Owener.World.People ) * self.WeightCohesion\n pass\n\n # TODO: A lot of things you should do here.\n if self.On( BehaviorType.WANDER ):\n self.SteeringForce += self.Wander() * self.WanderWeight\n pass\n\n if self.On( BehaviorType.SEEK ):\n self.SteeringForce += self.Seek( self.Owener.World.Crosshair ) * self.WeightSeek\n pass\n \n if self.On( BehaviorType.FLEE ):\n self.SteeringForce += self.Flee( self.Owener.World.Crosshair ) * self.WeightFlee\n pass\n\n if self.On( BehaviorType.ARRIVE ):\n self.SteeringForce += self.Arrive( self.Owener.World.Crosshair, self.ArriveMode ) * self.WeightArrive\n\n if self.On( BehaviorType.PURSUIT ) and self.TargetAgent1 is not None:\n self.SteeringForce += self.Pursuit( self.TargetAgent1 ) * self.WeightPursuit\n\n if self.On( BehaviorType.OFFSET_PURSUIT ):\n # TODO: OFFSET_PURSUIT\n pass\n\n if self.On( BehaviorType.INTERPOSE ):\n if self.TargetAgent1 is not None and self.TargetAgent2 is not Nones:\n self.SteeringForce += self.Interpose( self.TargetAgent1, self.TargetAgent2 ) * self.WeightInterpose;\n\n if self.On( BehaviorType.HIDE ) and self.TargetAgent1 is not None:\n # TODO: HIDE\n pass\n\n if self.On( BehaviorType.FOLLOW_PATH ):\n self.SteeringForce += self.FollowPath() * self.WeightFollowPath\n \n\n # make sure this steering force is not larger than the max force allowed\n self.SteeringForce.Truncate( self.Owener.MaxForce )\n\n return self.SteeringForce", "def calc_total_weight(self):\n panWeight = []\n stiffWeight = []\n for i in range(0, self.Panel.__len__()):\n self.Panel[i].calc_weight()\n panWeight = panWeight + [self.Panel[i].weight]\n panWeight = sum(panWeight)\n try:\n for i in range(0, self.Stiffener.__len__()):\n self.Stiffener[i].calc_weight()\n stiffWeight = stiffWeight + [self.Stiffener[i].weight]\n stiffWeight = sum(stiffWeight)\n except:\n stiffWeight = 0\n\n self.weight = panWeight + stiffWeight\n pass", "def calc_weight(self, auv_state, shark_state):\n theta_auv = auv_state.theta\n \n x_shark, y_shark, z_shark_range, _, _ = shark_state\n\n # Constants used in the calculation\n SIGMA_ALPHA = 0.5 # alpha weight\n SIGMA_RANGE = 100 # range weight\n CONSTANT = 1.2533141375\n MINIMUM_WEIGHT = .001\n\n particle_alpha = self.calc_particle_alpha(x_shark, y_shark, theta_auv)\n particle_range = self.calc_particle_range(x_shark, y_shark)\n\n if particle_alpha > 0:\n weight_p = 0.001 + (1.0/(constant) * (math.e**(((-((angle_wrap(float(particle_alpha) - z_shark_range)**2))))/(0.5))))\n elif particle_alpha == 0:\n weight_p = 0.001 + (1.0/(constant) * (math.e**(((-((angle_wrap(float(particle_alpha) - z_shark_range)**2))))/(0.5))))\n else:\n weight_p = 0.001 + (1.0/(constant) * (math.e**(((-((angle_wrap(float(particle_alpha) - z_shark_range)**2))))/(0.5))))\n\n # multiply weights\n weight_p *= MINIMUM_WEIGHT + (1/(SIGMA_RANGE * constant)* (math.e**(((-((particle_range - z_shark_range)**2)))/(20000))))\n\n return weight_p", "def get_weights(self):\n pass", "def get_weight():\n # Set a weight for energy function. Adjusting this value will give different\n # behaviour: the larger the weight, the more influence the constraints have.\n # The weight value should be >= 0.0\t\n return 10.0", "def weight(self):\n counters = [\n (\"total_mhz\", self.dominfo.vms_online + self.dominfo.cpus_online / 4.0),\n (\"memory\", self.dominfo.vms_online + self.dominfo.ram_online / 4096.0),\n ]\n load_w = sum((self.node[k] / float(v or 1)) / self.node[k] for k, v in counters)\n return load_w * self.srv_weight", "def totalWeighting(distance, count, data, n):\n\n weighting = (data)*(distance)*count\n weighting = weighting/(np.sum(np.sum(weighting))) \n return weighting", "def weight_features(self):\n self.train, self.test, self.feature_weight = self.metric.fit_transform()", "def _compute_weight(self):\n sign = self.invoice_id.type in ['in_refund', 'out_refund'] and -1 or 1\n #sign = 1\n for line in self:\n factor = line.uom_id.factor * line.product_id.uom_id.factor\n if factor != 0: \n weight = line.product_id.weight * (line.quantity / factor)\n else:\n weight = line.product_id.weight * line.quantity \n weight = round(weight,3)\n \n weight_signed = weight * sign\n \n if weight!=0:\n if line.discount >= 100: \n price_weight = (line.price_unit * line.quantity) / weight \n else: \n price_weight = line.price_subtotal / weight\n else: \n price_weight = 0\n \n price_weight = round(price_weight ,3) \n \n line.update({\n 'weight': weight,\n 'weight_signed': weight_signed,\n 'price_weight': price_weight\n })", "def icing_weight(self, tech_factor):\n\n R = self.radius*m2f\n chord = self.chord*m2f\n Ablades = self.nblade*self.nrotors *self.radius * self.chord*m2f*m2f # blade plan-form area, sq.ft \n\n wght_DIelect = k_elect * Ablades*tech_factor\n\n# heating element weights \n wght_DIsys = k_rotor * Ablades*tech_factor\n\n#total weight done outside for the rotor\n # total_wt = wght_DIelect + wght_DIsys \n\n deicing = {'blades': wght_DIelect*lb2kg, \n 'equip': wght_DIsys *lb2kg}\n\n# increment assembly mass\n self.mass_assembly = self.mass_assembly + lb2kg*(wght_DIsys + wght_DIelect)/self.nrotors\n return deicing", "def weight(self) -> int:\n weight = 0\n if self.quirk_classes:\n weight += 501 - (\n 1 if callable(self.quirk_classes) else len(self.quirk_classes)\n )\n\n if self.models:\n weight += 401 - (1 if callable(self.models) else len(self.models))\n\n if self.manufacturers:\n weight += 301 - (\n 1 if callable(self.manufacturers) else len(self.manufacturers)\n )\n\n weight += 10 * len(self.cluster_handler_names)\n weight += 5 * len(self.generic_ids)\n if isinstance(self.aux_cluster_handlers, frozenset):\n weight += 1 * len(self.aux_cluster_handlers)\n return weight", "def calculate_weight(self, element, total_cores_used, total_disk_used,\n total_memory_used):\n cpu_capacity = self.model.get_resource_from_id(\n resource.ResourceType.cpu_cores).get_capacity(element)\n\n disk_capacity = self.model.get_resource_from_id(\n resource.ResourceType.disk).get_capacity(element)\n\n memory_capacity = self.model.get_resource_from_id(\n resource.ResourceType.memory).get_capacity(element)\n\n score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /\n float(cpu_capacity))\n\n # It's possible that disk_capacity is 0, e.g., m1.nano.disk = 0\n if disk_capacity == 0:\n score_disk = 0\n else:\n score_disk = (1 - (float(disk_capacity) - float(total_disk_used)) /\n float(disk_capacity))\n\n score_memory = (\n 1 - (float(memory_capacity) - float(total_memory_used)) /\n float(memory_capacity))\n # TODO(jed): take in account weight\n return (score_cores + score_disk + score_memory) / 3", "def weight(self) -> None:\n assert hasattr(self, \"characterized_inventory\"), \"Must do lcia first\"\n if not hasattr(self, \"weighting_value\"):\n self.load_weighting_data()\n self.weighting_calculation()", "def calculate_weights(self):\n class_col = []\n for c in self.backgrounds:\n class_col.extend([c for _ in range(len(self.trace[\"eps\"]))])\n\n weight_by_class = pd.DataFrame(\n {\n \"Class\": class_col,\n \"Weights\": np.array(\n [self.trace[\"b\"][:, x] for x in range(len(self.backgrounds))]\n ).ravel(),\n }\n )\n self.weights = weight_by_class", "def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Automatically generate a basename or else use the one provided.
def _gen_basename(param_dict, clargs): if param_dict['output_basename'] in ['', 'auto']: return clargs.input_fname.lower().split('.json')[0] else: return param_dict['output_basename']
[ "def make_fullname(basename, _type=None):\n return '{}.{}'.format(basename, extensions.get(_type, None))", "def basename(arg):\n if not isinstance(arg, str):\n return arg\n try:\n name = os.path.basename(arg)\n except:\n name = arg\n\n fileExtensions = ['el', 'txt', 'dat', 'csv', 'exe', 'out']\n for ext in fileExtensions:\n name = re.sub(r'\\.'+'{}$'.format(ext), '', name)\n name = re.sub(r'\\.', '_', name)\n return name", "def get_basename(cls):\n if not getattr(cls, 'basename', None):\n setattr(cls ,'basename', cls._meta.label_lower.split('.',1)[1])\n return cls.basename", "def purebasename(self):\n return self.namebase", "def built_file_basename(self, name, type=None, **kw):\n if not kw.get('bare'):\n if type == self.EXECUTABLE:\n name = name + self._exe\n elif type == self.STATIC_LIB:\n name = self.lib_ + name + self._lib\n elif type == self.SHARED_LIB:\n name = self.dll_ + name + self._dll\n return name", "def make_basename(readfiles,prefix=None):\n if prefix:\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n# else:\n# if os.listdir(prefix):\n# ## When running under HTCondor, the execution might be restarted\n# ## However, this code in not tolerant of restarts\n# print \"ERROR: Directory {} not empty, exiting!\".format(prefix)\n# return\n\n prefixParentDir, prefix = os.path.split(prefix)\n if not prefix:\n # if prefix has a trailing /, prefixParentDir will have the / stripped and prefix will be empty.\n # so try again\n prefix = os.path.split(prefixParentDir)[1]\n\n\n return prefixParentDir,prefix\n\n ## --prefix is not set on cmd line; Write output to subdir in . \n basename = os.path.split(readfiles[0])[1].split('_')[0]\n\n if not os.path.exists(basename):\n os.makedirs(basename)\n return '.', basename", "def _set_basename(self, value):\n # TODO move to reactor as well (see path)\n if value is None:\n value = (self.__class__.__name__ +\n '_' + self.name.replace(' ', '_'))\n self._basename = value\n self.__log.debug('basename: %s', self.basename)\n self._out_csv = os.path.join(self.path, self.basename + '.csv')\n self.__log.debug('Out CSV %s', self._out_csv)", "def _get_backup_name(basename):\n\n utctime = datetime.datetime.utcnow()\n utctime = utctime.strftime(\"%Y%m%d_%H%M%S\")\n return '{}-{}.json'.format(basename, utctime)", "def base_name(path):\n return os.path.basename(path)", "def make_path(self, basename):\n return os.path.join(self.output_folder, basename.format(self.sample_name))", "def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"", "def make_suffix_string(args, suffix_key):\n try:\n file_suffix = args[suffix_key]\n if file_suffix != \"\" and not file_suffix.startswith('_'):\n file_suffix = '_' + file_suffix\n except KeyError:\n file_suffix = ''\n\n return file_suffix", "def get_basename(cls, length, width, ht, m0, bc=''):\n return (f'mbp_l_{int(round(length * 1e9))}_w_{int(round(width * 1e9))}'\n f'_ht_{int(round(ht * 1e9))}{f\"_bc_{bc}\" if bc != \"\" else \"\"}'\n f'_{m0}')", "def _gen_subname(self, videofile, language, ext):\n root, basename = os.path.split(videofile)\n name, _ = os.path.splitext(basename)\n unique = os.urandom(4).hex()\n unique = os.urandom(4).hex()\n subname = '{basename}.{language}.{ext}'.format(\n basename=name, \n language=language,\n ext=ext)\n p = os.path.join(root, subname)\n return p", "def _generate_file_name(file_extension: str):\n uid = uuid.uuid1()\n return secure_filename('{}{}'.format(uid, file_extension))", "def gen_fake_filename():\n filename = 'notarealfile'\n import os\n # if the file exists, keep adding to it\n while os.path.isfile(filename):\n filename += 'x'\n\n return filename", "def TaskBaseName(cls, task):\n if not task: return None\n return os.path.basename(task)", "def fname( file_, base=None, new_base=None, new_ext=None ):\n if base and new_base:\n file_ = file_.replace(base, new_base, 1)\n if new_ext:\n file_ = os.path.splitext(file_)[0] + new_ext\n return file_", "def get_random_filename(ext, prefix=\"\"):\n # hex returns a string with no dashes\n return prefix + str(uuid.uuid4().hex) + ext" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Create ssh client. Create ssh client to run commands in host machine from inside container.
def create_client(): hostname = "localhost" username = "she393" password = os.getenv("PASSWORD") client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(hostname=hostname, username=username, password=password) return client
[ "def ssh_client():\n return paramiko.SSHClient()", "def ssh(self):\n logger.debug('Creating SSH connection to bastion')\n cmd = self._container_run_command() + [\n 'python3',\n os.path.join('/app', 'deploy', 'ssh.py'),\n '--vars-file', self._container_file_realpath(self.vars_file),\n ]\n subprocess.call(cmd)", "def create_client(host, user, password):\n client = paramiko.client.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy)\n client.connect(hostname=host, username=user, password=password, timeout=60)\n return client", "def sshclient_from_instance(instance, ssh_key_file,\r\n host_key_file='~/.ssh/known_hosts',\r\n user_name='root', ssh_pwd=None):\r\n s = FakeServer(instance, ssh_key_file)\r\n return SSHClient(s, host_key_file, user_name, ssh_pwd)", "def connect_instance(tag, key_name, user_name):\n inst = get_instance(tag)\n cmd = boto.manage.cmdshell.sshclient_from_instance(\n inst,\n SSH_FOLDER + key_name + \".pem\",\n user_name=user_name\n )\n return inst, cmd", "def create_ssh_handle(xcnode):\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy)\n\n try:\n client.connect(\n hostname=xcnode.hostname,\n username=xcnode.username,\n password=xcnode.password,\n port=int(xcnode.port)\n )\n xcnode.fd.write('ssh\\'ed to {} @ {}\\n'.format(\n xcnode.hostname, datetime.now()))\n except Exception as e:\n print e\n client = None\n\n xcnode.client = client\n\n return xcnode", "def _get_ssh_client(self, host):\n ssh = None\n\n global remote_user\n global remote_pwd\n if remote_user is None:\n remote_user = os.getenv('RP_REMOTE_USER', getpass.getuser())\n remote_pwd = os.getenv('RP_REMOTE_PWD') # this should use password-less ssh\n\n try:\n ssh = paramiko.SSHClient()\n ssh.load_system_host_keys()\n ssh.set_missing_host_key_policy(paramiko.RejectPolicy())\n host_ip = gethostbyname(host)\n if remote_pwd:\n ssh.connect(host_ip, port=ssh_port, username=remote_user, password=remote_pwd)\n else:\n ssh.connect(host_ip, port=ssh_port, username=remote_user)\n except Exception as e:\n current_host = gethostbyname(gethostname())\n error_message = \"Exception '{}' occurred when creating a SSHClient at {} connecting \" \\\n \"to '{}:{}' with user '{}', message='{}'.\". \\\n format(type(e).__name__, current_host, host, ssh_port, remote_user, e)\n if e is paramiko.SSHException or paramiko.AuthenticationException:\n error_message_prefix = \"Failed to authenticate SSHClient with password\"\n error_message = error_message_prefix + (\" provided\" if remote_pwd else \"-less SSH\")\n self.log_and_raise(PermissionError(error_message))\n self.log_and_raise(RuntimeError(error_message))\n return ssh", "def init_ssh_client(self): \r\n logger.info(\"Create SSH Client\") \r\n self._client = paramiko.SSHClient() \r\n self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n return True", "def _get_sshclient(host_name, ip, port=22):\n ssh_clt = paramiko.SSHClient()\n # Allow connection not in the known_host\n ssh_clt.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_clt.connect(ip, port, host_name,\n key_filename=conf.SSH_KEY_ARGS['path'])\n return ssh_clt", "def ssh_into_container(container_name, key=\"insecure_key\"):\n container_id = get_container_id(container_name)\n container_ip = get_container_ip(container_id)\n\n command = \"ssh -i config/ssh/%s root@%s\" % (key, container_ip)\n\n try:\n subprocess.call(command, shell=True)\n except:\n sys.exit(\n logging.warning(\"An error occured while issuing the command \"\n \"'ssh'\"))", "def start_sshd(container):\n try:\n subprocess.call(['/usr/local/bin/hyper', 'exec', '-d', container, '/usr/sbin/sshd'], stdout=FNULL, stderr=subprocess.STDOUT)\n except IOError:\n print(\" I/O error \")", "def create_sftp_client(ssh_client):\n sftp_client = ssh_client.open_sftp()\n return sftp_client", "def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):\n cmd = b'%s %s %s' % (\n sshcmd,\n args,\n procutil.shellquote(\n b'%s -R %s serve --stdio'\n % (_serverquote(remotecmd), _serverquote(path))\n ),\n )\n\n ui.debug(b'running %s\\n' % cmd)\n\n # no buffer allow the use of 'select'\n # feel free to remove buffering and select usage when we ultimately\n # move to threading.\n stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)\n\n return proc, stdin, stdout, stderr", "def test_sshd(self):\n\n container = None\n try:\n os.chmod(self.private_key, 0o600)\n\n self.assertEqual(0, self.run_forklift(\n '--driver', 'save_ssh_command_docker',\n DOCKER_BASE_IMAGE, 'sshd',\n '--identity', self.private_key,\n ))\n\n command, available, container = SaveSSHDetailsDocker.last_details()\n\n def in_container(inside_command):\n \"\"\"\n Command line to execute a command inside the container\n via SSH.\n \"\"\"\n\n # TODO: run commands directly when environment is passed\n # properly.\n return \"echo '{0}' | \".format(inside_command) + \\\n command + \\\n ' -T' + \\\n ' -o NoHostAuthenticationForLocalhost=yes' + \\\n ' -o PasswordAuthentication=no'\n\n self.assertTrue(available)\n self.assertEqual(\n subprocess.call(in_container('/bin/true'), shell=True),\n 0\n )\n\n ssh_env = parse_environment(\n subprocess.check_output(in_container('/usr/bin/env -0'),\n shell=True),\n )\n\n self.assertEqual(ssh_env['DEVNAME'], 'myself')\n self.assertEqual(ssh_env['ENVIRONMENT'], 'dev_local')\n self.assertEqual(ssh_env['SITE_PROTOCOL'], 'http')\n self.assertTrue(\n re.match(r'^localhost:\\d+$', ssh_env['SITE_DOMAIN']))\n\n finally:\n # Kill and remove the started container\n if container is not None:\n for action in ('stop', 'rm'):\n subprocess.check_call(\n ('docker', action, container),\n stdout=DEVNULL,\n )", "def common_setup(ssh_client):\n with open_cfg() as cfg:\n delete_hdfs = cfg.getboolean('main', 'delete_hdfs')\n # preliminary steps required due to differences between azure and aws\n if c.PROVIDER == \"AZURE\":\n\n # todo only if first run\n if c.NUM_INSTANCE > 0 or True:\n print(\"In common_setup, NUM_INSTANCE=\" + str(c.NUM_INSTANCE))\n # add ssh key that matches the public one used during creation\n if not c.PRIVATE_KEY_NAME in ssh_client.listdir(\"/home/ubuntu/.ssh/\"):\n ssh_client.put(localpath=c.PRIVATE_KEY_PATH, remotepath=\"/home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n ssh_client.run(\"chmod 400 /home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n\n # ssh_client.run(\"sudo groupadd supergroup\")\n ssh_client.run(\"sudo usermod -aG supergroup $USER\")\n ssh_client.run(\"sudo usermod -aG supergroup root\")\n\n # join docker group\n ssh_client.run(\"sudo usermod -aG docker $USER\")\n\n ssh_client.run(\"mkdir /usr/local/spark/spark-events\")\n\n # ssh_client.run(\"sudo chmod -R 777 /mnt\")\n\n # to refresh groups\n ssh_client.close()\n ssh_client.connect()\n\n # restore environmental variables lost when creating the image\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native/' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n\n ssh_client.run(\"source $HOME/.bashrc\")\n\n if c.PROVIDER == \"AWS_SPOT\":\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n ssh_client.run(\"source $HOME/.bashrc\")\n \n ssh_client.run(\"export GOMAXPROCS=`nproc`\")\n\n if c.UPDATE_SPARK_DOCKER:\n print(\" Updating Spark Docker Image...\")\n ssh_client.run(\"docker pull elfolink/spark:2.0\")\n\n if delete_hdfs:\n ssh_client.run(\"sudo umount /mnt\")\n ssh_client.run(\n \"sudo mkfs.ext4 -E nodiscard \" + c.TEMPORARY_STORAGE + \" && sudo mount -o discard \" + c.TEMPORARY_STORAGE + \" /mnt\")\n\n ssh_client.run(\"test -d /mnt/tmp || sudo mkdir -m 1777 /mnt/tmp\")\n ssh_client.run(\"sudo mount --bind /mnt/tmp /tmp\")\n\n ssh_client.run('ssh-keygen -f \"/home/ubuntu/.ssh/known_hosts\" -R localhost')\n\n print(\" Stop Spark Slave/Master\")\n # ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-master.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && sudo {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n \n stdout, stderr, status = ssh_client.run(\n \"cd \" + c.SPARK_HOME + \" && cp conf/log4j.properties.template conf/log4j.properties\")\n print(stdout, stderr)\n print(\" Set Log Level\")\n ssh_client.run(\n \"sed -i '19s/.*/log4j.rootCategory={}, console /' {}conf/log4j.properties\".format(c.LOG_LEVEL,\n c.SPARK_HOME))\n if c.KILL_JAVA:\n print(\" Killing Java\")\n ssh_client.run('sudo killall java && sudo killall java && sudo killall java')\n\n print(\" Kill SAR CPU Logger\")\n ssh_client.run(\"screen -ls | grep Detached | cut -d. -f1 | awk '{print $1}' | xargs -r kill\")\n\n if c.SYNC_TIME:\n print(\" SYNC TIME\")\n ssh_client.run(\"sudo ntpdate -s time.nist.gov\")\n\n print(\" Removing Stopped Docker\")\n ssh_client.run(\"docker ps -a | awk '{print $1}' | xargs --no-run-if-empty docker rm\")", "async def run_client(host, command):", "def docker_enter(self, user, host, container):\n import os\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n ssh_host = user+\"@\"+host\n ssh_timeout = \"5\"\n ssh_options = \"-A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=\"+ssh_timeout+\" -o ConnectionAttempts=1 -tt\"\n docker_cmd = \"\\\"/opt/bin/docker-enter \"+container+\"\\\" \"\n cmd = \"ssh \"+ssh_options+\" \"+ssh_host+\" \"+docker_cmd\n logging.debug(\"Executing Command: %s\" % (cmd))\n returned = os.system(cmd)\n logging.debug(\"docker_enter func Exiting with code %i\" % (returned))\n sys.exit(returned)", "def editor_cloud9_ssh_command():\n docker_vars = _editor_cloud9_docker_vars()\n print \"ssh -p %s -i private/ssh/id_rsa_devbox root@%s\" % (docker_vars['public_ssh_port'], env.host)", "def ssh():\n env['remote_port'] = env['port_map']['22']\n\n sys.stdout.write('Connecting to SSH session on remote port %(remote_port)s\\n' % env)\n\n run('chmod 600 %(pair_private_key)s' % env)\n\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.connect(\n hostname=env['relay_server'],\n port=int(env['remote_port']),\n username=env['pair_user'],\n key_filename=env['pair_private_key']\n )\n\n channel = client.invoke_shell()\n posix_shell(channel)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
! Wrapper for HTTP responses. message The content of the successful (200) HTTP response. Flask HTTP response object with content of message from the argument and status code 200.
def response(message): res = Response(json.dumps(message)) res.status_code = 200 res.content_type = "application/json" return res
[ "def success_response( message, code = 200 ):\n return jsonify( { 'success_message' : message } ), code", "def simpleResponse(code, message):\n return response({'code':code, 'message':message})", "def build_response(http_status, message):\n return Response(data={'detail': message},\n status=http_status)", "def assign_message_code(success: bool):\n return (HTTPStatus.OK.phrase, HTTPStatus.OK) if success\\\n else (HTTPStatus.INTERNAL_SERVER_ERROR.phrase, HTTPStatus.INTERNAL_SERVER_ERROR)", "def error_response( message, code = 400 ):\n return jsonify( { 'error_message' : message } ), code", "def ping_response():\n\n return Response(\"ok\", status=200)", "def success(message=\"\"):\n\n success = {\"status\": 0, \"message\": message}\n return HttpResponse(json.dumps(success))", "def http501(message):\n response = HttpResponse(message)\n response.status_code = 501\n return response", "def http_status(code):\n return \"200 OK\" if code == 200 else \"404 Not Found\"", "def make_reponse(p_object=None, status_code=200):\n if p_object is None and status_code == 404:\n p_object = {\"status\": {\"status_content\": [{\"code\": \"404 - Not Found\", \"message\": \"Resource not found\"}]}}\n\n json_response = jsonify(p_object)\n json_response.status_code = status_code\n json_response.content_type = 'application/json;charset=utf-8'\n json_response.headers['Cache-Control'] = 'max-age=3600'\n return json_response", "def custom_response(status, details):\n return app.response_class(status=status,\n mimetype='application/json',\n response=json.dumps({\"status\": status,\n \"details\": details}))", "def test_responseCode(self):\n def application(environ, start_response):\n status = '314'\n response_headers = [('Content-type','text/html')]\n writer = start_response(status, response_headers)\n return []\n\n return self.assertResponse(\n (WSGI(application), 'http://host/'),\n (314, {\"Content-Length\": 0}, ''))", "def send_response(self, code, message=None):\n self.log_request(code)\n if message is None:\n message = code in self.responses and self.responses[code][0] or \"\"\n if self.request_version != \"HTTP/0.9\":\n hdr = \"%s %d %s\\r\\n\" % (self.protocol_version, code, message)\n self.wfile.write(hdr.encode(\"ascii\"))", "def test_ok_response(self):\n res = OkResponse({\"message\": self.message})", "def AddSuccessfulResponse(self, content=None):\n self.AddResponse(content=content)", "def create_response(self, status, statusmsg, body):\n self.response.setStatus(status, statusmsg)\n return body", "def return_payload(status_code: int, message: str):\n return {\n \"statusCode\": status_code,\n \"headers\": {\n \"Content-Type\": \"application/json\",\n \"Access-Control-Allow-Origin\": \"*\",\n },\n \"body\": message,\n }", "def response(code):\n\n def decorator(func):\n func.wsgi_code = code\n return func\n return decorator", "def _send_immediate_response(self, success, message=\"\"):\r\n\r\n # Send the response indicating success/failure\r\n response_str = json.dumps(\r\n {'return_code': 0 if success else 1, 'content': message}\r\n )\r\n\r\n if self._is_grade_request():\r\n self.send_response(\r\n 200, content=response_str, headers={'Content-type': 'text/plain'}\r\n )\r\n self.log_message(\"XQueue: sent response {0}\".format(response_str))\r\n\r\n else:\r\n self.send_response(500)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
One to one identification of the snapshots.
def snapshot_identification(snapshot): return { 'user_id': snapshot['user_id'], 'timestamp': snapshot['timestamp'], 'snapshot_id': snapshot['snapshot_id']}
[ "def snapshot_id(self) -> Optional[str]:\n return pulumi.get(self, \"snapshot_id\")", "def ensure_identity_match(self, inventory):\n if 'identity' in self.meta:\n if self.meta['identity'] != inventory.identity:\n raise SnapshotMismatchError(\n expected=inventory.identity,\n found=self.meta['identity']\n )\n\n for snapshot in self.neighbours:\n if snapshot.meta['identity'] != inventory.identity:\n raise SnapshotMismatchError(\n expected=inventory.identity,\n found=snapshot.meta['identity']\n )", "def source_instant_snapshot_id(self) -> str:\n return pulumi.get(self, \"source_instant_snapshot_id\")", "def get_snapshot_uuid(self) -> str:\n return self._snapshot_uuid", "def get_image(self):\n logging.debug(\"%s get_image entered\" % str(self.machine_name))\n snapshots = cs.list_snapshots()\n # find the one for this server\n if self.cloudserver:\n server_id = self.cloudserver.id\n else:\n return self.image_id\n\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print \"XXX:\", img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n return img\n\n print \"Server %s has no snapshots\" % (server_id)\n return None", "def getId():", "def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids", "def test_listSnapshot_by_id_as_user_own(self):\n\n self.apiclient.connection.apiKey = self.user_d11a_apikey\n self.apiclient.connection.securityKey = self.user_d11a_secretkey\n SnapshotList1 = Snapshot.list(self.apiclient, id=self.vm_d11a_snapshot.id)\n\n self.assertNotEqual(SnapshotList1,\n None,\n \"Regular User is not able to list Snapshots that are self-owned\")\n\n self.assertEqual(len(SnapshotList1),\n 1,\n \"Regular User is not able to list Snapshots that are self-owned\")", "def get_primary_id(self):", "def getSnapshot():\n if len(sys.argv) < 1:\n sys.exit(\"ERROR: snapshot ID requrired\")\n\n loadSnapshots()\n\n passed = sys.argv[0]\n\n if passed[0:1] == \"#\":\n sys.argv.pop(0)\n if passed in snapshot.idmap:\n return snapshot.idmap[passed]\n else:\n sys.exit(\"ERROR: Specified nonexistent snapshot {}\".format(passed))\n else:\n place = getPlace()\n snap = getSnap(place)\n return snap", "def l1_id(self):\n return self._l1_id", "def get_snapshot(self, name=None, snapshot_id=None):\n if snapshot_id:\n return self._search_snapshot(key=\"snapshot_id\", value=snapshot_id)\n elif name:\n return self._search_snapshot(key=\"name\", value=name)\n else:\n raise ValueError(\"name or snapshot_id must be provided\")", "def identity(self):\n if self.fingerprint:\n return fromHex(self.fingerprint)", "def snapshot(self, snapshot_id):\r\n return self.connection.create_dbsnapshot(snapshot_id, self.id)", "def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds", "def snapshot_by_id(self, snapshot_id: int) -> Optional[Snapshot]:\n try:\n return next(snapshot for snapshot in self.metadata.snapshots if snapshot.snapshot_id == snapshot_id)\n except StopIteration:\n return None", "def test_reservation_id_one_instance(self):\n (refs, resv_id) = self.compute_api.create(self.context,\n self.default_flavor,\n image_href=uuids.image_href_id)\n self.assertEqual(len(refs), 1)\n self.assertEqual(refs[0]['reservation_id'], resv_id)", "def get_instance_snapshot(instanceSnapshotName=None):\n pass", "def get_snapshot(self, snapshot):\n return self._get(_snapshot.Snapshot, snapshot)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The AccountBroker initialze() function before we added the policy stat table. Used by test_policy_table_creation() to make sure that the AccountBroker will correctly add the table for cases where the DB existed before the policy support was added.
def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs): if not self.account: raise ValueError( 'Attempting to create a new database with no account set') self.create_container_table(conn) self.create_account_stat_table(conn, put_timestamp)
[ "def init_tables():\n # drop_table_m_candidates()\n # drop_table_m_qiita_users()\n create_table_m_candidates()\n create_table_m_qiita_users()", "def _InitTables(self):\n \n self.config_table = sa.Table('config', self.metadata,\n sa.Column('name', sa.String(50), primary_key=True),\n sa.Column('url', sa.String, nullable=False),\n sa.Column('type', sa.String(20), nullable=False));\n #fetchall releases any locks\n results = self.config_table.select().execute().fetchall()\n for x in results:\n #Only process this entry if it is basic\n if x['type'] == \"banlist\":\n #Adds the table as a member of this object. The name of the variable\n #is the name of the table, with _table appended to it. \n table = sa.Table(x['name'], self.metadata,\n sa.Column('id', sa.Integer, autoincrement=True, primary_key=True),\n sa.Column('lname', sa.String(50), index=True),\n sa.Column('name', sa.String(50)),\n sa.Column('reason', sa.String, nullable=False),\n sa.Column('source', sa.String(50)),\n sa.Column('deleted', sa.Integer));\n table.create(checkfirst=True)\n setattr(self, \"%s_table\" % (x['name']), table)", "def init_tables():\n logger = logging.getLogger(__name__)\n logger.info(\"Checking if DB tables need to be initialised\")\n if not db.engine.table_names():\n logger.warning(\"Creating DB tables for the first time!\")\n db.create_all()", "def txn_createTables(self):\r\n self.db_create_nonce()\r\n self.db_create_assoc()\r\n self.db_create_settings()", "def _pre_init(self):\n pass", "def __init__(self):\n super(ESEDBPlugin, self).__init__()\n self._tables = {}\n self._tables.update(self.REQUIRED_TABLES)\n self._tables.update(self.OPTIONAL_TABLES)", "def init_post_connection(self):\n\n if self.authorized and not self.post_initiated:\n self.create_tables_and_apply_patches()\n self.post_initiated = True\n\n PyFunceble.INTERN[\"mysql\"] = self.__dict__.copy()", "def init_db(self):\n self.create_db()\n col_rows = self.check_default_settings()\n if col_rows == 0:\n self.set_default_settings()", "def initialize():\n db.connect()\n db.create_tables([TimeSheets, Users], safe=True)", "def init_reset_hash_table(self):\n if ENVIRONMENT == 'devel':\n Base.metadata.drop_all(self.engine)\n Base.metadata.create_all(self.engine)\n else:\n logger.warning('Reset hash table is not recommended '\n 'anywhere else than devel environment')", "def test_db_init(self):\n db.initialize.init_db()\n self.assertTrue(db.retrieve.exists_table('scrape_log'), 'Table \\'scrape_log\\' should exist.')", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def startup_prep(self, stageid, eventlist):\n super(MewloDatabaseManagerSqlA,self).startup_prep(stageid, eventlist)\n if (stageid == mconst.DEF_STARTUPSTAGE_earlycore):\n # this needs to be done at this state so it's ready for database table creation, etc.\n self.setup_sqlahelpers(eventlist)", "def init_db():\n # We are setting the module variables here for the first time, so disable the warning\n global DB_USER_TABLE # pylint: disable=global-variable-undefined\n global DB_CUSTOMER_TABLE # pylint: disable=global-variable-undefined\n global DB_USER_CUSTOMER_RELS_TABLE # pylint: disable=global-variable-undefined\n global DB_TICKET_TABLE # pylint: disable=global-variable-undefined\n global DB_COMMENT_TABLE # pylint: disable=global-variable-undefined\n\n db = TinyDB(app.config['DB_NAME'])\n\n DB_USER_TABLE = db.table('users')\n DB_CUSTOMER_TABLE = db.table('customers')\n DB_USER_CUSTOMER_RELS_TABLE = db.table('user_customer_rels')\n DB_TICKET_TABLE = db.table('tickets')\n DB_COMMENT_TABLE = db.table('comments')", "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True", "def init_db(self):\n raise NotImplementedError()", "def dbnd_on_pre_init_context(ctx):", "def init():\n if not table_present('dmigrations_log'):\n _execute(MIGRATION_LOG_SQL)", "def initialize_db(self) -> None:\n if not self.check_schema_initialized():\n self._create_genes_table()\n self._create_meta_data_table()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copied from AccountBroker before the metadata column was added; used for testing with TestAccountBrokerBeforeMetadata. Create account_stat table which is specific to the account DB.
def premetadata_create_account_stat_table(self, conn, put_timestamp): conn.executescript(''' CREATE TABLE account_stat ( account TEXT, created_at TEXT, put_timestamp TEXT DEFAULT '0', delete_timestamp TEXT DEFAULT '0', container_count INTEGER, object_count INTEGER DEFAULT 0, bytes_used INTEGER DEFAULT 0, hash TEXT default '00000000000000000000000000000000', id TEXT, status TEXT DEFAULT '', status_changed_at TEXT DEFAULT '0' ); INSERT INTO account_stat (container_count) VALUES (0); ''') conn.execute(''' UPDATE account_stat SET account = ?, created_at = ?, id = ?, put_timestamp = ? ''', (self.account, Timestamp.now().internal, str(uuid4()), put_timestamp))
[ "def pre_track_containers_create_policy_stat(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE policy_stat (\n storage_policy_index INTEGER PRIMARY KEY,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0\n );\n INSERT OR IGNORE INTO policy_stat (\n storage_policy_index, object_count, bytes_used\n )\n SELECT 0, object_count, bytes_used\n FROM account_stat\n WHERE container_count > 0;\n \"\"\")", "def create_AccountStat_object(cls, info):\n if 'account' not in info:\n info['account'] = '-1'\n if 'created_at' not in info:\n info['created_at'] = '0'\n if 'put_timestamp' not in info:\n info['put_timestamp'] = '0'\n if 'delete_timestamp' not in info:\n info['delete_timestamp'] = '0'\n if 'container_count' not in info:\n info['container_count'] = 0\n if 'object_count' not in info:\n info['object_count'] = 0\n if 'bytes_used' not in info:\n info['bytes_used'] = 0\n if 'hash' not in info:\n info['hash'] = '-1'\n if 'id' not in info:\n info['id'] = '-1'\n if 'status' not in info:\n info['status'] = '-1'\n if 'status_changed_at' not in info:\n info['status_changed_at'] = '0'\n if 'metadata' not in info:\n info['metadata'] = {}\n return libaccountLib.AccountStat(\n info['account'],\n info['created_at'],\n normalize_timestamp(\\\n info['put_timestamp']),\n normalize_timestamp(\\\n info['delete_timestamp']),\n info['container_count'],\n info['object_count'],\n info['bytes_used'],\n info['hash'],\n info['id'],\n info['status'],\n info['status_changed_at'],\n info['metadata']\n )", "def create_playerstats(conn, curr):\n\n drop_table('PlayerStats', conn, curr)\n cmd = \"\"\"\n CREATE TABLE PlayerStats(\n user_id INTEGER NOT NULL,\n event_id INTEGER NOT NULL,\n team_id INTEGER,\n kills INTEGER,\n damage INTEGER,\n distance INTEGER,\n headshots INTEGER,\n time INTEGER,\n death BIT,\n score INTEGER,\n PRIMARY KEY (user_id, event_id),\n FOREIGN KEY(user_id) REFERENCES Players(user_id),\n FOREIGN KEY(event_id) REFERENCES Events(event_id),\n FOREIGN KEY(team_id) REFERENCES Teams(team_id)\n )\n \"\"\"\n curr.execute(cmd)\n conn.commit()", "def add_statistics(self, stat_col):\n pass", "def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):\n if not self.account:\n raise ValueError(\n 'Attempting to create a new database with no account set')\n self.create_container_table(conn)\n self.create_account_stat_table(conn, put_timestamp)", "def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()", "def _create_tables(self):\n\n sql = f\"\"\"CREATE UNLOGGED TABLE IF NOT EXISTS {self.name} (\n rank bigint,\n asn bigint,\n organization varchar (250),\n links bigint[]\n );\"\"\"\n self.cursor.execute(sql)", "def load_status_table():", "def _populate_table_status():\n [db_insert_or_get(Status, name=name) for name in app.config['STATUS_DICT'][1:]]\n db.session.commit()", "def create_table(self):\n pass", "def __create_wallets_table(self):\n cmd = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n %s text PRIMARY KEY,\n %s blob,\n %s blob);\"\"\" %(TABLE_WALLETS,\n COL_WALLETS_NAME,\n COL_WALLETS_PUB_KEY,\n COL_WALLETS_PVT_KEY)\n self.__dbcursor.execute(cmd)", "def _InitTables(self):\n \n self.config_table = sa.Table('config', self.metadata,\n sa.Column('name', sa.String(50), primary_key=True),\n sa.Column('url', sa.String, nullable=False),\n sa.Column('type', sa.String(20), nullable=False));\n #fetchall releases any locks\n results = self.config_table.select().execute().fetchall()\n for x in results:\n #Only process this entry if it is basic\n if x['type'] == \"banlist\":\n #Adds the table as a member of this object. The name of the variable\n #is the name of the table, with _table appended to it. \n table = sa.Table(x['name'], self.metadata,\n sa.Column('id', sa.Integer, autoincrement=True, primary_key=True),\n sa.Column('lname', sa.String(50), index=True),\n sa.Column('name', sa.String(50)),\n sa.Column('reason', sa.String, nullable=False),\n sa.Column('source', sa.String(50)),\n sa.Column('deleted', sa.Integer));\n table.create(checkfirst=True)\n setattr(self, \"%s_table\" % (x['name']), table)", "def _setup_user_bookmark_count(self):\r\n test_date_1 = datetime(2013, 11, 25)\r\n stat1 = factory.make_user_bookmark_count(username=u'admin',\r\n data=20,\r\n tstamp=test_date_1)\r\n test_date_2 = datetime(2013, 11, 15)\r\n stat2 = factory.make_user_bookmark_count(username=u'admin',\r\n data=30,\r\n tstamp=test_date_2)\r\n test_date_3 = datetime(2013, 12, 28)\r\n stat3 = factory.make_user_bookmark_count(username=u'admin',\r\n data=15,\r\n tstamp=test_date_3)\r\n transaction.commit()\r\n return [stat1, stat2, stat3]", "def get_account_stat(cls, temp_path, account_path, logger):\n info = {}\n resp = libaccountLib.AccountStatWithStatus()\n AccountServiceInterface.__get_account_stat(resp, temp_path, account_path, logger)\n logger.debug(\"get_account_stat responded with return code: %s\" \\\n % resp.get_return_status())\n if resp.get_return_status() == INFO_FILE_OPERATION_SUCCESS:\n info['container_count'] = \\\n resp.get_account_stat().get_container_count()\n info['object_count'] = resp.get_account_stat().get_object_count()\n info['bytes_used'] = resp.get_account_stat().get_bytes_used()\n info['put_timestamp'] = resp.get_account_stat().get_put_timestamp()\n info['account'] = resp.get_account_stat().get_account()\n info['created_at'] = resp.get_account_stat().get_created_at()\n info['delete_timestamp'] = \\\n resp.get_account_stat().get_delete_timestamp()\n info['hash'] = resp.get_account_stat().get_hash()\n info['id'] = resp.get_account_stat().get_id()\n info['status'] = resp.get_account_stat().get_status()\n info['status_changed_at'] = \\\n resp.get_account_stat().get_status_changed_at()\n #OBB-48: Library responding with incorrect value for metadata.\n #Fix: Since no metata support, returning with empty map.\n info['metadata'] = resp.get_account_stat().get_metadata()\n metadata = info['metadata']\n modified_meta = {}\n for key, value in metadata.iteritems():\n if key == 'r-':\n modified_meta.update({'x-account-read' : value})\n elif key == 'w-':\n modified_meta.update({'x-account-write' : value})\n else:\n ser_key = key.split('-')[0]\n if ser_key == 'm':\n key = 'x-account-meta-' + key.split('-', 1)[1]\n else:\n key = 'x-account-sysmeta-' + key.split('-', 1)[1]\n modified_meta.update({key : value})\n info['metadata'] = modified_meta\n logger.debug(\"get_account_stat from library: %s\" % info)\n return resp.get_return_status(), info\n return resp.get_return_status(), None", "def _stats_to_table_writer(sentiment_result):\n cur = get_db().cursor()\n data_tuple = (datetime.now(), sentiment_result)\n cur.execute(DB_OBJ.db_insert_stats_query, data_tuple)\n get_db().commit()", "def describe_account_attributes():\n pass", "def addPropstats(status, propstat):", "def create_meta_loan_table(self):\n table_exists = self.check_if_table_exists(\"meta_loan_tables\")\n\n if not table_exists:\n self.read_sql_from_file('create_meta_loan_tables.sql')\n return", "def add_statistics(self, stat_col):\n self.module.add_statistics(stat_col)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copied from AccountBroker before the sstoage_policy_index column was added; used for testing with TestAccountBrokerBeforeSPI. Create container table which is specific to the account DB.
def prespi_create_container_table(self, conn): conn.executescript(""" CREATE TABLE container ( ROWID INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, put_timestamp TEXT, delete_timestamp TEXT, object_count INTEGER, bytes_used INTEGER, deleted INTEGER DEFAULT 0 ); CREATE INDEX ix_container_deleted_name ON container (deleted, name); CREATE TRIGGER container_insert AFTER INSERT ON container BEGIN UPDATE account_stat SET container_count = container_count + (1 - new.deleted), object_count = object_count + new.object_count, bytes_used = bytes_used + new.bytes_used, hash = chexor(hash, new.name, new.put_timestamp || '-' || new.delete_timestamp || '-' || new.object_count || '-' || new.bytes_used); END; CREATE TRIGGER container_update BEFORE UPDATE ON container BEGIN SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT'); END; CREATE TRIGGER container_delete AFTER DELETE ON container BEGIN UPDATE account_stat SET container_count = container_count - (1 - old.deleted), object_count = object_count - old.object_count, bytes_used = bytes_used - old.bytes_used, hash = chexor(hash, old.name, old.put_timestamp || '-' || old.delete_timestamp || '-' || old.object_count || '-' || old.bytes_used); END; """)
[ "def pre_track_containers_create_container_table(self, conn):\n # revert to old trigger script to support one of the tests\n OLD_POLICY_STAT_TRIGGER_SCRIPT = \"\"\"\n CREATE TRIGGER container_insert_ps AFTER INSERT ON container\n BEGIN\n INSERT OR IGNORE INTO policy_stat\n (storage_policy_index, object_count, bytes_used)\n VALUES (new.storage_policy_index, 0, 0);\n UPDATE policy_stat\n SET object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used\n WHERE storage_policy_index = new.storage_policy_index;\n END;\n CREATE TRIGGER container_delete_ps AFTER DELETE ON container\n BEGIN\n UPDATE policy_stat\n SET object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used\n WHERE storage_policy_index = old.storage_policy_index;\n END;\n\n \"\"\"\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0,\n storage_policy_index INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\" + OLD_POLICY_STAT_TRIGGER_SCRIPT)", "def pre_track_containers_create_policy_stat(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE policy_stat (\n storage_policy_index INTEGER PRIMARY KEY,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0\n );\n INSERT OR IGNORE INTO policy_stat (\n storage_policy_index, object_count, bytes_used\n )\n SELECT 0, object_count, bytes_used\n FROM account_stat\n WHERE container_count > 0;\n \"\"\")", "def txn_createTables(self):\r\n self.db_create_nonce()\r\n self.db_create_assoc()\r\n self.db_create_settings()", "def create_table(self):\n pass", "def create_table(self):\n return None", "def _InitTables(self):\n \n self.config_table = sa.Table('config', self.metadata,\n sa.Column('name', sa.String(50), primary_key=True),\n sa.Column('url', sa.String, nullable=False),\n sa.Column('type', sa.String(20), nullable=False));\n #fetchall releases any locks\n results = self.config_table.select().execute().fetchall()\n for x in results:\n #Only process this entry if it is basic\n if x['type'] == \"banlist\":\n #Adds the table as a member of this object. The name of the variable\n #is the name of the table, with _table appended to it. \n table = sa.Table(x['name'], self.metadata,\n sa.Column('id', sa.Integer, autoincrement=True, primary_key=True),\n sa.Column('lname', sa.String(50), index=True),\n sa.Column('name', sa.String(50)),\n sa.Column('reason', sa.String, nullable=False),\n sa.Column('source', sa.String(50)),\n sa.Column('deleted', sa.Integer));\n table.create(checkfirst=True)\n setattr(self, \"%s_table\" % (x['name']), table)", "def _create_tables(self):\n\n sql = f\"\"\"CREATE UNLOGGED TABLE IF NOT EXISTS {self.name} (\n rank bigint,\n asn bigint,\n organization varchar (250),\n links bigint[]\n );\"\"\"\n self.cursor.execute(sql)", "def create_base_table(self, table_name):\n print('new')\n # Create table at first.\n select_stm = self.construct_base_table()\n exec_query('DROP TABLE IF EXISTS %s;' % table_name) \n sql = \"\"\"\n CREATE TABLE %s AS\n %s\n \"\"\" % (table_name, select_stm)\n exec_query(sql)", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):\n if not self.account:\n raise ValueError(\n 'Attempting to create a new database with no account set')\n self.create_container_table(conn)\n self.create_account_stat_table(conn, put_timestamp)", "def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()", "def __create_wallets_table(self):\n cmd = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n %s text PRIMARY KEY,\n %s blob,\n %s blob);\"\"\" %(TABLE_WALLETS,\n COL_WALLETS_NAME,\n COL_WALLETS_PUB_KEY,\n COL_WALLETS_PVT_KEY)\n self.__dbcursor.execute(cmd)", "def _create_intermediate_delta_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {blacklist_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname),\n blacklist_delta_tbl=sql.Identifier(self._blacklist_tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_delta_tblname\n notifications_delta_tbl = sql.Identifier(self._notifications_lists_tblname)\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {notifications_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n notifications_delta_tbl=notifications_delta_tbl))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_delta_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {exceptions_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n exceptions_delta_tbl=sql.Identifier(self._exceptions_lists_tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_delta_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def create_database_table_asa(self) -> None:\r\n\r\n cursor.execute('''CREATE TABLE Routing_ASA (context, prefix, protocol, admin_distance, nexthops, interfaces, \r\n metric, tag)''')\r\n mydb.commit()", "def test_create_hyperflex_cluster_storage_policy(self):\n pass", "def _createSettingsDB(self):\n logging.debug('Creating tables')\n self._connection.executescript('''\n CREATE TABLE IF NOT EXISTS config(\n config_id INTEGER,\n name TEXT UNIQUE,\n value TEXT,\n PRIMARY KEY(config_id)\n );\n \n CREATE TABLE IF NOT EXISTS images(\n image_id INTEGER,\n uri TEXT UNIQUE,\n image BLOB,\n PRIMARY KEY(image_id)\n );\n ''').close()\n\n logging.debug('Creating index')\n self._connection.execute('''\n CREATE INDEX IF NOT EXISTS idx_image_uri ON images(uri)\n ''').close()\n\n self._connection.execute('''\n CREATE INDEX IF NOT EXISTS idx_config_name ON config(name)\n ''').close()", "def __create_session_id_db():\n \n self.cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS SESSION_IDS\n (\n session_id INT\n )\n \"\"\"\n )", "def create_service_schema(self):\n\t\tself.pg_eng.create_service_schema()", "def __init__(self, db):\n self.c = db.c\n self.connection = db.connection\n self.c.execute(\"CREATE TABLE IF NOT EXISTS 'payment_channel_spend' \"\n \"(payment_txid text unique, payment_tx text, \"\n \"amount integer, is_redeemed integer, \"\n \"deposit_txid text)\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copied from AccountBroker before the container_count column was added. Create policy_stat table which is specific to the account DB. Not a part of Pluggable Backends, internal to the baseline code.
def pre_track_containers_create_policy_stat(self, conn): conn.executescript(""" CREATE TABLE policy_stat ( storage_policy_index INTEGER PRIMARY KEY, object_count INTEGER DEFAULT 0, bytes_used INTEGER DEFAULT 0 ); INSERT OR IGNORE INTO policy_stat ( storage_policy_index, object_count, bytes_used ) SELECT 0, object_count, bytes_used FROM account_stat WHERE container_count > 0; """)
[ "def pre_track_containers_create_container_table(self, conn):\n # revert to old trigger script to support one of the tests\n OLD_POLICY_STAT_TRIGGER_SCRIPT = \"\"\"\n CREATE TRIGGER container_insert_ps AFTER INSERT ON container\n BEGIN\n INSERT OR IGNORE INTO policy_stat\n (storage_policy_index, object_count, bytes_used)\n VALUES (new.storage_policy_index, 0, 0);\n UPDATE policy_stat\n SET object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used\n WHERE storage_policy_index = new.storage_policy_index;\n END;\n CREATE TRIGGER container_delete_ps AFTER DELETE ON container\n BEGIN\n UPDATE policy_stat\n SET object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used\n WHERE storage_policy_index = old.storage_policy_index;\n END;\n\n \"\"\"\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0,\n storage_policy_index INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\" + OLD_POLICY_STAT_TRIGGER_SCRIPT)", "def premetadata_create_account_stat_table(self, conn, put_timestamp):\n conn.executescript('''\n CREATE TABLE account_stat (\n account TEXT,\n created_at TEXT,\n put_timestamp TEXT DEFAULT '0',\n delete_timestamp TEXT DEFAULT '0',\n container_count INTEGER,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0,\n hash TEXT default '00000000000000000000000000000000',\n id TEXT,\n status TEXT DEFAULT '',\n status_changed_at TEXT DEFAULT '0'\n );\n\n INSERT INTO account_stat (container_count) VALUES (0);\n ''')\n\n conn.execute('''\n UPDATE account_stat SET account = ?, created_at = ?, id = ?,\n put_timestamp = ?\n ''', (self.account, Timestamp.now().internal, str(uuid4()),\n put_timestamp))", "def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):\n if not self.account:\n raise ValueError(\n 'Attempting to create a new database with no account set')\n self.create_container_table(conn)\n self.create_account_stat_table(conn, put_timestamp)", "def prespi_create_container_table(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\")", "def create_policies(data):", "def create_policy_rule_set_precommit(self, context):\n pass", "def create_playerstats(conn, curr):\n\n drop_table('PlayerStats', conn, curr)\n cmd = \"\"\"\n CREATE TABLE PlayerStats(\n user_id INTEGER NOT NULL,\n event_id INTEGER NOT NULL,\n team_id INTEGER,\n kills INTEGER,\n damage INTEGER,\n distance INTEGER,\n headshots INTEGER,\n time INTEGER,\n death BIT,\n score INTEGER,\n PRIMARY KEY (user_id, event_id),\n FOREIGN KEY(user_id) REFERENCES Players(user_id),\n FOREIGN KEY(event_id) REFERENCES Events(event_id),\n FOREIGN KEY(team_id) REFERENCES Teams(team_id)\n )\n \"\"\"\n curr.execute(cmd)\n conn.commit()", "def new(c, namespace, username):\n master_connection = Connection(c.config.master_host, user, config=c.config)\n create_hbase_namespace_hbase(master_connection, namespace)\n ranger.create_ranger_policy(\n c,\n ['{}.*'.format(namespace)],\n username,\n 'hbase_{}'.format(namespace),\n 'Hbase policy for namespace {}'.format(namespace),\n 'table',\n 'hbase')", "def _update_cardinality(self, c):\n if c.type in STRUCT:\n Log.error(\"not supported\")\n try:\n if c.table == \"meta.columns\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.columns, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.columns),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n return\n if c.table == \"meta.tables\":\n with self.meta.columns.locker:\n partitions = jx.sort([g[c.es_column] for g, _ in jx.groupby(self.meta.tables, c.es_column) if g[c.es_column] != None])\n self.meta.columns.update({\n \"set\": {\n \"partitions\": partitions,\n \"count\": len(self.meta.tables),\n \"cardinality\": len(partitions),\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"table\": c.table, \"name\": c.name}}\n })\n return\n\n es_index = c.table.split(\".\")[0]\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data={\n \"aggs\": {c.name: _counting_query(c)},\n \"size\": 0\n })\n r = result.aggregations.values()[0]\n count = result.hits.total\n cardinality = coalesce(r.value, r._nested.value, 0 if r.doc_count==0 else None)\n if cardinality == None:\n Log.error(\"logic error\")\n\n query = Data(size=0)\n if cardinality > 1000 or (count >= 30 and cardinality == count) or (count >= 1000 and cardinality / count > 0.99):\n Log.note(\"{{table}}.{{field}} has {{num}} parts\", table=c.table, field=c.es_column, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif c.type in _elasticsearch.ES_NUMERIC_TYPES and cardinality > 30:\n Log.note(\"{{field}} has {{num}} parts\", field=c.name, num=cardinality)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"last_updated\": Date.now()\n },\n \"clear\": [\"partitions\"],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n return\n elif len(c.nested_path) != 1:\n query.aggs[literal_field(c.name)] = {\n \"nested\": {\"path\": c.nested_path[0]},\n \"aggs\": {\"_nested\": {\"terms\": {\"field\": c.es_column, \"size\": 0}}}\n }\n else:\n query.aggs[literal_field(c.name)] = {\"terms\": {\"field\": c.es_column, \"size\": 0}}\n\n result = self.default_es.post(\"/\" + es_index + \"/_search\", data=query)\n\n aggs = result.aggregations.values()[0]\n if aggs._nested:\n parts = jx.sort(aggs._nested.buckets.key)\n else:\n parts = jx.sort(aggs.buckets.key)\n\n Log.note(\"{{field}} has {{parts}}\", field=c.name, parts=parts)\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": count,\n \"cardinality\": cardinality,\n \"partitions\": parts,\n \"last_updated\": Date.now()\n },\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n except Exception, e:\n if \"IndexMissingException\" in e and c.table.startswith(TEST_TABLE_PREFIX):\n with self.meta.columns.locker:\n self.meta.columns.update({\n \"set\": {\n \"count\": 0,\n \"cardinality\": 0,\n \"last_updated\": Date.now()\n },\n \"clear\":[\n \"partitions\"\n ],\n \"where\": {\"eq\": {\"es_index\": c.es_index, \"es_column\": c.es_column}}\n })\n else:\n self.meta.columns.update({\n \"set\": {\n \"last_updated\": Date.now()\n },\n \"clear\": [\n \"count\",\n \"cardinality\",\n \"partitions\",\n ],\n \"where\": {\"eq\": {\"table\": c.table, \"es_column\": c.es_column}}\n })\n Log.warning(\"Could not get {{col.table}}.{{col.es_column}} info\", col=c, cause=e)", "def get_traffic_policy_instance_count(self) -> Dict:\n pass", "def create_network_service_policy_postcommit(self, context):\n pass", "def _setup_user_bookmark_count(self):\r\n test_date_1 = datetime(2013, 11, 25)\r\n stat1 = factory.make_user_bookmark_count(username=u'admin',\r\n data=20,\r\n tstamp=test_date_1)\r\n test_date_2 = datetime(2013, 11, 15)\r\n stat2 = factory.make_user_bookmark_count(username=u'admin',\r\n data=30,\r\n tstamp=test_date_2)\r\n test_date_3 = datetime(2013, 12, 28)\r\n stat3 = factory.make_user_bookmark_count(username=u'admin',\r\n data=15,\r\n tstamp=test_date_3)\r\n transaction.commit()\r\n return [stat1, stat2, stat3]", "def _get_resource_count(project_policies, cloudsql_acls):\n resource_counts = {\n ResourceType.PROJECT: len(project_policies),\n ResourceType.CLOUDSQL_ACL: len(cloudsql_acls),\n }\n\n return resource_counts", "def _CreateStatsCollector(self):", "def create_sql_groupby_count(self):\n pass", "def add_policies(self):\n self.policy_collection.append(AlarmQuota())\n self.policy_collection.append(AlarmPeriod(1200))\n self.policy_collection.append(SampleRate(5))", "def test_update_hyperflex_cluster_storage_policy(self):\n pass", "def pg_stat_statements_collect_all(self, request, pk: PostgresqlSettings):\n logging.info(\n f'Getting pg_stat_statements statistic from database {pk} started...')\n response = None\n try:\n postgresql_setting: PostgresqlSettings = PostgresqlSettings.objects.get(\n pk=pk)\n logging.info(\n f'postgresql_setting{type(postgresql_setting)} = {postgresql_setting}')\n connection: psycopg2.extensions.connection = psycopg2.connect(user=postgresql_setting.username,\n password=postgresql_setting.password,\n host=postgresql_setting.host,\n port=postgresql_setting.port,\n database=postgresql_setting.dbname)\n\n logging.info(f'connection{type(connection)} = {connection}')\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM public.pg_stat_statements;\")\n pg_stat_statements_lines: List[PgStatStatementsLine] = []\n for row in cursor.fetchall():\n logging.info(f'row {type(row)} = {row}')\n pg_stat_statements_line = PgStatStatementsLine(userid=row[0], dbid=row[1], queryid=row[2], query=row[3],\n calls=row[4],\n total_time=row[5], min_time=row[6],\n max_time=row[7], mean_time=row[8], stddev_time=row[9],\n rows=row[10], shared_blks_hit=row[11],\n shared_blks_read=row[12],\n shared_blks_dirtied=row[13], shared_blks_written=row[14],\n local_blks_hit=row[15],\n local_blks_read=row[16],\n local_blks_dirtied=row[17], local_blks_written=row[18],\n temp_blks_read=row[19], temp_blks_written=row[20], blk_read_time=row[21], blk_write_time=row[22])\n logging.info(\n f'pg_stat_statements_line {type(pg_stat_statements_line)} = {pg_stat_statements_line}')\n pg_stat_statements_lines.append(pg_stat_statements_line)\n serializer = PgStatStatementsLineSerializer(\n pg_stat_statements_lines, many=True)\n response = HttpResponse(JSONRenderer().render(serializer.data),\n content_type=\"application/json; charset=utf-8\")\n response.status_code = 200\n except (Exception, psycopg2.Error) as error:\n error_message = f'Error while connecting to PostgreSQL: {error}'\n response = HttpResponse(error_message)\n response.status_code = 503\n logging.error(error_message)\n finally:\n try:\n if(connection):\n cursor.close()\n connection.close()\n except (UnboundLocalError) as error:\n logging.warning('Database connection already closed')\n logging.info(\n f'Getting pg_stat_statements statistic from database {pk} completed')\n return response", "def test_create_tam_advisory_count(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copied from AccountBroker before the container_count column was added (using old stat trigger script) Create container table which is specific to the account DB.
def pre_track_containers_create_container_table(self, conn): # revert to old trigger script to support one of the tests OLD_POLICY_STAT_TRIGGER_SCRIPT = """ CREATE TRIGGER container_insert_ps AFTER INSERT ON container BEGIN INSERT OR IGNORE INTO policy_stat (storage_policy_index, object_count, bytes_used) VALUES (new.storage_policy_index, 0, 0); UPDATE policy_stat SET object_count = object_count + new.object_count, bytes_used = bytes_used + new.bytes_used WHERE storage_policy_index = new.storage_policy_index; END; CREATE TRIGGER container_delete_ps AFTER DELETE ON container BEGIN UPDATE policy_stat SET object_count = object_count - old.object_count, bytes_used = bytes_used - old.bytes_used WHERE storage_policy_index = old.storage_policy_index; END; """ conn.executescript(""" CREATE TABLE container ( ROWID INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, put_timestamp TEXT, delete_timestamp TEXT, object_count INTEGER, bytes_used INTEGER, deleted INTEGER DEFAULT 0, storage_policy_index INTEGER DEFAULT 0 ); CREATE INDEX ix_container_deleted_name ON container (deleted, name); CREATE TRIGGER container_insert AFTER INSERT ON container BEGIN UPDATE account_stat SET container_count = container_count + (1 - new.deleted), object_count = object_count + new.object_count, bytes_used = bytes_used + new.bytes_used, hash = chexor(hash, new.name, new.put_timestamp || '-' || new.delete_timestamp || '-' || new.object_count || '-' || new.bytes_used); END; CREATE TRIGGER container_update BEFORE UPDATE ON container BEGIN SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT'); END; CREATE TRIGGER container_delete AFTER DELETE ON container BEGIN UPDATE account_stat SET container_count = container_count - (1 - old.deleted), object_count = object_count - old.object_count, bytes_used = bytes_used - old.bytes_used, hash = chexor(hash, old.name, old.put_timestamp || '-' || old.delete_timestamp || '-' || old.object_count || '-' || old.bytes_used); END; """ + OLD_POLICY_STAT_TRIGGER_SCRIPT)
[ "def prespi_create_container_table(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\")", "def pre_track_containers_create_policy_stat(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE policy_stat (\n storage_policy_index INTEGER PRIMARY KEY,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0\n );\n INSERT OR IGNORE INTO policy_stat (\n storage_policy_index, object_count, bytes_used\n )\n SELECT 0, object_count, bytes_used\n FROM account_stat\n WHERE container_count > 0;\n \"\"\")", "def create_table(self):\n pass", "def txn_createTables(self):\r\n self.db_create_nonce()\r\n self.db_create_assoc()\r\n self.db_create_settings()", "def _create_tables(self):\n\n sql = f\"\"\"CREATE UNLOGGED TABLE IF NOT EXISTS {self.name} (\n rank bigint,\n asn bigint,\n organization varchar (250),\n links bigint[]\n );\"\"\"\n self.cursor.execute(sql)", "def create_base_table(self, table_name):\n print('new')\n # Create table at first.\n select_stm = self.construct_base_table()\n exec_query('DROP TABLE IF EXISTS %s;' % table_name) \n sql = \"\"\"\n CREATE TABLE %s AS\n %s\n \"\"\" % (table_name, select_stm)\n exec_query(sql)", "def create(params):\n con_name = params['dbname']\n config_dat = Config.info[params['dbtype']]\n volumes = config_dat['volumes']\n for vol in volumes:\n if vol[0] == 'DBVOL': vol[0] = params['db_vol']\n if vol[0] == 'BAKVOL': vol[0] = Config.backup_vol\n if container_util.container_exists(con_name):\n return \"Container name %s already in use\" % con_name\n used_ports = container_util.get_ports()\n # find two consecutive ports that are not in usebolt_port\n for port in range(Config.base_port, max(used_ports) + 2):\n if port not in used_ports and (\n (port + 1) not in used_ports):\n break\n Ports = {}\n for p in config_dat['pub_ports']:\n Ports[int(p)] = (Config.container_ip, port)\n port += 1\n params['port_bindings'] = Ports\n bolt_port = params['port'] = Ports[7687][1]\n print(\"Ports: \", Ports)\n env = {'NEO4J_dbms_memory_pagecache_size': '4G',\n 'NEO4J_AUTH': 'neo4j/changeme',\n 'DB_USER': params['dbuser']}\n\n # create container\n container_util.make_dirs(con_name, volumes)\n command = config_dat['command']\n (c_id, con) = container_util.create_con(params, env, args=command)\n print('waiting for container to startup...')\n time.sleep(5)\n\n status = reset_neo4j_password(params['port'])\n if status:\n stat2 = create_account(params['dbuser'], params['dbuserpass'],\n params['port'])\n badness = 0\n while status is not True and badness < 6:\n print('create_account failed: %d' % badness)\n time.sleep(3)\n status = reset_neo4j_password(params['port'])\n if status:\n stat2 = create_account(params['dbuser'],\n params['dbuserpass'],\n params['port'])\n badness += 1\n if status is not True:\n print('DEBUG very bad')\n return \"Failed: neo4j unable to create accounts\"\n https_port = Ports[7473][1]\n res = \"Your neo4j container %s \" % con_name\n res += \"has been created.\\n\\n\"\n res += \"neo4j HTTPS interface: %d\\n\" % https_port\n res += \"neo4j Bolt interface: %d\\n\" % bolt_port\n res += '\\n'\n res += 'Web access: https://%s:%d' % (Config.container_host, https_port)\n res += '\\n\\n'\n res += 'Note: Web interface will display the default Bolt port of 7687. '\n res += 'Change the Bolt port number from 7687 to %s ' % bolt_port\n res += 'before loginging in.\\n\\n'\n res += 'bolt://%s:%d' % (Config.FQDN_host, bolt_port)\n msg = 'Neo4j created: %s\\n' % params['dbname']\n msg += 'Created by: %s <%s>\\n' % (params['owner'], params['contact'])\n send_mail(\"DB4SCI: created neo4j\", msg)\n return res", "def create_table(self):\n return None", "def premetadata_create_account_stat_table(self, conn, put_timestamp):\n conn.executescript('''\n CREATE TABLE account_stat (\n account TEXT,\n created_at TEXT,\n put_timestamp TEXT DEFAULT '0',\n delete_timestamp TEXT DEFAULT '0',\n container_count INTEGER,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0,\n hash TEXT default '00000000000000000000000000000000',\n id TEXT,\n status TEXT DEFAULT '',\n status_changed_at TEXT DEFAULT '0'\n );\n\n INSERT INTO account_stat (container_count) VALUES (0);\n ''')\n\n conn.execute('''\n UPDATE account_stat SET account = ?, created_at = ?, id = ?,\n put_timestamp = ?\n ''', (self.account, Timestamp.now().internal, str(uuid4()),\n put_timestamp))", "def __set_container_info(self):\n self.container = \"{}_{}_1\".format(self.build, self.service.lower())\n self.mysql_container = \"{}_{}-mysql_1\".format(self.build, self.service.lower())", "def create(self, context):\n values = self.obj_get_changes()\n cpuset_obj = values.pop('cpuset', None)\n if cpuset_obj is not None:\n values['cpuset'] = cpuset_obj._to_dict()\n annotations = values.pop('annotations', None)\n if annotations is not None:\n values['annotations'] = self.fields['annotations'].to_primitive(\n self, 'annotations', self.annotations)\n cni_metadata = values.pop('cni_metadata', None)\n if cni_metadata is not None:\n values['cni_metadata'] = self.fields['cni_metadata'].to_primitive(\n self, 'cni_metadata', self.cni_metadata)\n values['container_type'] = self.container_type\n db_container = dbapi.create_container(context, values)\n self._from_db_object(self, db_container)", "def sql_create(database):\r\n\r\n database.execute('''\r\n CREATE TABLE IF NOT EXISTS executives (\r\n executive TEXT PRIMARY KEY,\r\n name TEXT\r\n ) \r\n ''')", "def make_table(connection):\n sql = \"\"\"\n IF NOT (EXISTS (SELECT *\n FROM INFORMATION_SCHEMA.TABLES\n WHERE TABLE_NAME = '{0}'))\n BEGIN\n CREATE TABLE {0}\n (Folder nvarchar(max), Filename nvarchar(max), Lat float, Lon float, Bytes int,\n Gpsdate datetime2, Exifdate datetime2, Filedate datetime2)\n END\n \"\"\"\n sql = sql.format(Config.table)\n return execute_sql(connection, sql)", "def db_create_table(self):\n try:\n mydb = mysql.connector.connect(\n host=Data.db_info[0],\n port=Data.db_info[1],\n user=Data.db_info[2],\n passwd=Data.db_info[3],\n database=Data.db_info[4]\n )\n mycursor = mydb.cursor()\n # Dropping table\n sql = \"DROP TABLE IF EXISTS records\"\n mycursor.execute(sql)\n # Creating new table\n sql = 'CREATE TABLE records ('\n #header info\n x = ['REF_DATE', 'GEO', 'DGUID', 'FOODCATEGORIES', 'COMMODITY', 'UOM', 'UOM_ID',\n 'SCALAR_FACTOR', 'SCALAR_ID', 'VECTOR', 'COORDINATE', 'VALUE', 'STATUS', 'SYMBOL', 'TERMINATED',\n 'DECIMALS']\n count = 0\n for y in x:\n sql = sql+'`'+y+'`'+' VARCHAR(100)'\n # add comma if not the last block\n if count <15:\n sql = sql + ','\n count = count + 1\n sql = sql + ')'\n mycursor.execute(sql)\n mydb.commit()\n return 0\n except (Exception) as error:\n return 1\n finally:\n # Checking variable for existance https://stackoverflow.com/questions/843277/how-do-i-check-if-a-variable-exists\n if 'mycursor' in locals():\n mycursor.close()\n if 'mydb' in locals():\n mydb.close()", "def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()", "def create_and_insert_dls(connection: DBConnection) -> None:\n print(\"\\n[-] creating table dls\", end=\"\")\n connection.execute(\"\"\"\n CREATE TABLE dls AS\n SELECT did, SUM(tf) AS len FROM tfs GROUP BY did\n \"\"\")\n print(\"\\r[+] creating table dls\")", "def _create_intermediate_delta_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {blacklist_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname),\n blacklist_delta_tbl=sql.Identifier(self._blacklist_tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_delta_tblname\n notifications_delta_tbl = sql.Identifier(self._notifications_lists_tblname)\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {notifications_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n notifications_delta_tbl=notifications_delta_tbl))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_delta_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {exceptions_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n exceptions_delta_tbl=sql.Identifier(self._exceptions_lists_tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_delta_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def create_database_table_nexus(self) -> None:\r\n\r\n cursor.execute('''CREATE TABLE Routing_Nexus (vdc, vrf, prefix, protocol, admin_distance, nexthops, \r\n interfaces, metric, tag)''')\r\n mydb.commit()", "def _InitTables(self):\n \n self.config_table = sa.Table('config', self.metadata,\n sa.Column('name', sa.String(50), primary_key=True),\n sa.Column('url', sa.String, nullable=False),\n sa.Column('type', sa.String(20), nullable=False));\n #fetchall releases any locks\n results = self.config_table.select().execute().fetchall()\n for x in results:\n #Only process this entry if it is basic\n if x['type'] == \"banlist\":\n #Adds the table as a member of this object. The name of the variable\n #is the name of the table, with _table appended to it. \n table = sa.Table(x['name'], self.metadata,\n sa.Column('id', sa.Integer, autoincrement=True, primary_key=True),\n sa.Column('lname', sa.String(50), index=True),\n sa.Column('name', sa.String(50)),\n sa.Column('reason', sa.String, nullable=False),\n sa.Column('source', sa.String(50)),\n sa.Column('deleted', sa.Integer));\n table.create(checkfirst=True)\n setattr(self, \"%s_table\" % (x['name']), table)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the class count of ROIs for each sample.
def count_classes(self, index=None): if index is None: index = np.arange(self.Samples.shape[0]) elif isinstance(index, int): index = [index] count = np.zeros((len(index), len(self._classes)), dtype=np.int) for _ind in range(len(index)): rois = self.__getrois__(index[_ind]) count[_ind, :] = np.bincount(rois[:,4].astype(np.int), minlength=len(self._classes)) return count
[ "def class_counts(self) -> (List[int], None):\n if self.isclassifier():\n if self.shadow_tree.get_class_weight() is None:\n return np.array(np.round(self.shadow_tree.get_node_nsamples_by_class(self.id)), dtype=int)\n else:\n return np.round(\n self.shadow_tree.get_node_nsamples_by_class(self.id) / self.shadow_tree.get_class_weights()).astype(\n int)\n return None", "def get_class_counts(self, n_classes, log=True):\n if log:\n self.log(\"Counting class occurrences across {} loaded samples...\"\n .format(self.n_loaded))\n counts = {i: 0 for i in range(n_classes)}\n _, y = self.get_all_periods(stack=True)\n cls_ints, cls_counts = np.unique(y, return_counts=True)\n for cls_int, cls_count in zip(cls_ints, cls_counts):\n counts[cls_int] += cls_count\n cls, counts = zip(*counts.items())\n if log:\n self.log(\"Classes: {}\\n\"\n \"Counts: {}\".format(cls, counts))\n return cls, counts", "def compute_n_img_per_class(self, wanted_classes):\n imagenet_labels_url = \"https://raw.githubusercontent.com/mlperf/mobile_app/master/java/org/mlperf/inference/assets/imagenet_val.txt\"\n imagenet_all_labels = requests.get(imagenet_labels_url).text.split(\"\\n\")[:-1]\n n_img_per_class = defaultdict(int)\n total_number_img = 0\n\n #logging.info(f'******************************wanted classes {wanted_classes}')\n\n for label_id in imagenet_all_labels:\n label = self.classes_reverse[int(label_id)]\n if label in wanted_classes:\n n_img_per_class[label] += 1\n total_number_img += 1\n logging.info(f\"n_img_per_class: {n_img_per_class}, total :{total_number_img}\")\n return n_img_per_class, total_number_img", "def get_roi_counts(self):\n counts = [[roi.counts for roi in group.rois] for group in self.roi_groups]\n return counts", "def trueClassCounts(obs):\n TruePositives = sum(map(lambda x : x[1]==1, obs))\n TrueNegatives = len(obs)-TruePositives\n return TruePositives,TrueNegatives", "def get_roi_counts(self):\r\n nrois = len(self.rois)\r\n for roi in range(nrois):\r\n for pv in self.roi_data_pvs[roi].keys():\r\n pv.array_get()\r\n pvs[pv].pend_io()\r\n total = []\r\n net = []\r\n for i in range(nrois):\r\n pvs = self.roi_data_pvs[i]\r\n total.append(pvs['n'].getValue())\r\n net.append(pvs['nn'].getValue())\r\n return total, net", "def get_sample_counts(output_dir, dataset, class_names):\n df = pd.read_csv(os.path.join(output_dir, dataset + \".csv\"))\n total_count = df.shape[0]\n labels = df[class_names].as_matrix()\n positive_counts = np.sum(labels, axis=0)\n class_positive_counts = dict(zip(class_names, positive_counts))\n return total_count, class_positive_counts", "def summarize_classes(classes):\n u, indices = np.unique(classes,return_inverse=True)\n num_u=len(u)\n print(\"****************************\")\n print(\"Number of samples: {0}\".format(len(classes)))\n print(\"Number of Classes:{0}\".format(num_u))\n for c in u:\n num_c=np.sum(classes==c)\n print(\"Class {0}: {1} Samples\".format(c,num_c))\n print(\"****************************\")", "def classes_calculations(input):\n counts, _ = np.histogram(input, bins=int(\n input.max() + 1), range=(0, int(input.max())))\n return np.nonzero(counts)[0]", "def divide_and_count(examples, labels):\n class_data = divide(examples, labels)\n class_count = count(class_data)\n return class_count, class_data", "def classFreqCounter(data, classes):\n classCounts = [0] * 16\n for key in data.keys():\n for i, classy in enumerate(classes):\n if classy == data[key][0]:\n classCounts[i] += 1\n for i, n in enumerate(classCounts):\n if n == 0:\n classCounts[i] = 1\n CLASSSMOOTHING =+ 1\n return classCounts", "def class_distribution(y): \n # ===================== PLEASE WRITE HERE =====================\n \n bin_array = np.bincount(y)\n n_class1 = bin_array[1]\n n_class2 = bin_array[2]\n n_class3 = bin_array[3]\n \n # ===================== PLEASE WRITE HERE =====================\n \n print('Number of samples in class_1:', n_class1)\n print('Number of samples in class_2:', n_class2)\n print('Number of samples in class_3:', n_class3)", "def get_number_of_classes(self):\n return self.N", "def classesAndFrames(self):\n classes = defaultdict(int)\n with open(self.inputfile) as fin:\n for line in fin:\n arr = line.strip().split()\n y = int(arr[1])\n classes[y] += 1\n return classes", "def nr_classes(self):\n return self._nr_classes", "def num_classes(self):\n raise NotImplementedError", "def __computer_number_of_samples__(self):\n\n slice_count = []\n for ii in self.imgs_list:\n with h5py.File(ii, 'r') as f:\n aux = f['data'].shape[0]\n slice_count.append(aux - (self.crop[1] + self.crop[0]))\n\n slice_count = np.array(slice_count)\n return slice_count.sum(),slice_count.cumsum()", "def get_num_classes(df):\n classes = df.groupby('class_label')\n return classes.ngroups", "def test_class_counts(self):\n oz = ClassificationScoreVisualizer(GaussianNB())\n oz.fit(self.multiclass.X.train, self.multiclass.y.train)\n\n unique, counts = np.unique(self.multiclass.y.train, return_counts=True)\n npt.assert_array_equal(oz.classes_, unique)\n npt.assert_array_equal(oz.class_counts_, counts)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the maximum number of ROIs per batch sample in the dataset
def get_max_rois(self): maxsize = 0 for index in self.SampleID: rois = self.__getrois__(index); maxsize = max(maxsize, rois.shape[0]) return maxsize
[ "def max_batch_size(self):\n\n return int(np.min([self.max_rows, self.N]))", "def _maxNofdataset(self, selpex_results_object):\r\n maxNofdataset = 0\r\n line2write_list = []\r\n for aaseq in selpex_results_object.selpex_results: \r\n pepres = selpex_results_object.selpex_results[aaseq]\r\n numberofNs = len(pepres.get_theomips())\r\n if numberofNs > maxNofdataset:\r\n maxNofdataset = numberofNs\r\n return maxNofdataset", "def __len__(self):\n \n num_batches_per_epoch = np.floor(len(self.training_data_dict)/self.user_args.batch_size)\n return int(num_batches_per_epoch)", "def max_num_batches(self):\n return self._max_num_batches", "def _get_max_batch_size(cols, n_data):\n # Take in account the intermediate data structures used, also the precission\n\n # 2 times X (X and X transpose) + n_data (Y) + XTsXs_gpu (Square cols x cols) + XTsYs (cols x 1) + Invs ( cols, cols) + n (metric)\n # c = cols\n # n = data\n # 2nc + n + c**2 + c + c**2 + n = 2c**2 + 2nc + 2n\n # 90% of free memory in the card\n estimated_scalars_per_regression = 2*(cols**2) + 2*(cols*n_data) + 2*n_data\n free_memory = driver.mem_get_info()[0]\n required_bytes_per_regression = FLOAT_PRECISSION_SIZE * estimated_scalars_per_regression\n # Closest thousand\n max_batch = (int(free_memory/required_bytes_per_regression)/1000)*1000\n return max_batch", "def max_individuals(self) -> int:\n return self.group_size.upper * self.groups_allowed", "def get_max_rows_per_partition() -> int:\n pass", "def max_samples(self, N = 0) -> int:\n\t\tif N > 0:\n\t\t\tself.scope.write('VBS \"app.Acquisition.Horizontal.MaxSamples='+str(N)+'\"')\n\t\t# find out what happened:\n\t\treturn int(self.scope.query('VBS? \"return=app.Acquisition.Horizontal.NumPoints\"'))", "def max_batch_count(self) -> Optional[float]:\n return pulumi.get(self, \"max_batch_count\")", "def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes", "def _get_batch_size(*, module: nn.Module, batch_dim: int) -> int:\n max_batch_len = 0\n for out in module.activations:\n # out is typically a tuple of one element (x)\n # for embedding bag, it is a tuple of two elements (x, offsets)\n # where len(offsets) = batch_size\n if out[-1].shape[batch_dim] > max_batch_len:\n max_batch_len = out[-1].shape[batch_dim]\n\n return max_batch_len", "def batch_size(self) -> int:\n ...", "def get_max_batch_size(self):\n raise NotImplementedError()", "def __computer_number_of_samples__(self):\n\n slice_count = []\n for ii in self.imgs_list:\n with h5py.File(ii, 'r') as f:\n aux = f['data'].shape[0]\n slice_count.append(aux - (self.crop[1] + self.crop[0]))\n\n slice_count = np.array(slice_count)\n return slice_count.sum(),slice_count.cumsum()", "def load_max(self):\n return max(self.load_samples)", "def _min_sampled_from_batch(self):\n return min([col._last_batch_size for col in self._profile], default=0)", "def find_max_nr_doc(data):\n queries = list(set(data[:, 1].astype(int)))\n max_nr = 0\n for query in queries:\n n_max = data[data[:,1] == query].shape[0]\n if n_max > max_nr:\n max_nr = n_max\n return max_nr", "def get_max_readings( self ):\n return 2500", "def _update_num_batches(self):\n # maximum possible number of batches is equal to number of whole times\n # batch_size divides in to the number of data points which can be\n # found using integer division\n possible_num_batches = self.inputs.shape[0] // self.batch_size\n if self.max_num_batches == -1:\n self.num_batches = possible_num_batches\n else:\n self.num_batches = min(self.max_num_batches, possible_num_batches)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes dictionary of the station and their amount of connections.
def make_station_dict(self): self.station_dict = {} # interates over stations and puts the amount of connections in the dict for station in self.stations: length = len(self.stations[station].connections) self.station_dict[station] = length return self.station_dict
[ "def connection_count(self):\n connlist = qnetwork.connections(self.connection, self.credentials)\n per_node = {}\n for node_data in connlist.data:\n per_node[node_data['id']] = 0\n for _ in node_data['connections']:\n per_node[node_data['id']] += 1\n return per_node", "def get_number_stations(self):\n return len(self.stations)", "def get_online_count():\n return dict(online_user=get_online_users())", "def get_network_stats():\n network_stats = defaultdict(dict)\n for flavor in Network.FLAVORS.keys():\n network_stats[flavor] = defaultdict(int)\n network_stats[flavor][\"active\"] = 0\n network_stats[flavor][\"error\"] = 0\n\n networks = Network.objects.filter(deleted=False)\n for net in networks:\n state = \"error\" if net.state == \"ERROR\" else \"active\"\n network_stats[net.flavor][state] += 1\n\n return network_stats", "def get_stats(self):\n return {\n \"pings_sent\" : self.ping_count,\n \"measurements\" : self.measurements,\n }", "def __count_edges(edges: list) -> dict:\n edges_call_count = dict()\n\n for edge in edges:\n service_from = edge[0]\n call_count = edge[2].get('weight')\n if service_from in edges_call_count:\n edges_call_count[service_from] += call_count\n else:\n edges_call_count[service_from] = call_count\n\n return edges_call_count", "def num_stations(self) -> int:\n return self._num_stations", "def _get_networks_dict(self):\n return dict(network=self.network.state_dict())", "def totalStation(analyzer):\n return gr.numVertices(analyzer['graph'])", "def num_stations(self) -> int:\n return len(self.station_ids)", "def count_workstations(self):\n count = 0\n for item in self.workStations.values():\n count += len(item)\n return count", "def load(self):\r\n total = 0\r\n for val in self.connections.values():\r\n total += val\r\n return total", "def get_channel_counts(self):\n counts = {}\n for channel in self.ipc_channels:\n counts[channel.endpoint] = channel.frame_count\n\n return counts", "def get_connection_info(self):\n return {}", "def coordinates(self):\n coordinates = {}\n coordinates_x = []\n coordinates_y = []\n station = self.start_station\n\n coordinates_x.append(station.x)\n coordinates_y.append(station.y)\n\n for connection in self.connections:\n # Check if station is the first or second station, change station\n # and add coordinates coordinates\n if connection.station_1 == station:\n station = connection.station_2\n coordinates_x.append(station.x)\n coordinates_y.append(station.y)\n else:\n station = connection.station_1\n coordinates_x.append(station.x)\n coordinates_y.append(station.y)\n\n coordinates[\"x\"] = coordinates_x\n coordinates[\"y\"] = coordinates_y\n\n return coordinates", "def stats(series):\n\td={}\n\tfor index in series[\"Country Code\"].unique():\n\t\td[index]={\n\t\t\"total servers\" : len(series.loc[series[\"Country Code\"]==index]),\n\t\t\"lat\" : series.loc[series[\"Country Code\"]==index][\"LAT\"].iat[0],\n\t\t\"long\" : series.loc[series[\"Country Code\"]==index][\"LONG\"].iat[0]\n\t\t}\n\treturn d", "def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}", "def load(self):\n total = sum(self.connections.values())\n return total", "def get_dict_nw(self):\n\n dict_nw = defaultdict(list)\n for i in self._graph:\n for j in list(self._graph[i]):\n dict_nw[i].append(j)\n if j not in dict_nw:\n dict_nw[j] = []\n return dict(dict_nw)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sorts the station dict based on the amount of connections (value).
def create_station_list(self): sorted_station_list = sorted(self.station_dict, key=self.station_dict.get) return sorted_station_list
[ "def sort_by_station(channels):\n stations = collections.defaultdict(list)\n for trace_id, coords in channels.items():\n network, station, location, channel = trace_id.split('.')\n stations[(network, station)] += [(location, channel,) + coords]\n return stations.items()", "def sort(self):\n\t\tself.servers = sorted(self.servers, key=lambda s: s.load)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.distance_class)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.country == self.locale_info.country, reverse=True)", "def _get_sorted_by_n_connections(m):\n small = nx.Graph()\n for k, v in m.items():\n small.add_edge(k[0], k[1])\n return sorted(small.adj, key=lambda x: len(small[x])), small", "def _sort_compounds(self):\n self.sorted_molecules = sorted(self.values(), key=operator.attrgetter('criterion'))", "def sort_by_metric(self, value): \n self._sort_by_metric = value", "def make_station_dict(self):\n self.station_dict = {}\n\n # interates over stations and puts the amount of connections in the dict\n for station in self.stations:\n length = len(self.stations[station].connections)\n self.station_dict[station] = length\n \n return self.station_dict", "def _sort(self):\r\n get_key = lambda map: len(map[0])\r\n return sorted(self._mappings, key = get_key, reverse = True)", "def sort_bike_stations(bike_stations, location):\n\n stations = bike_stations.copy()\n\n for index, station in enumerate(stations):\n station_location = (station[\"lat\"], station[\"lon\"])\n dist = distance.distance(station_location, location).m\n stations[index][\"distance\"] = dist\n\n stations = sorted(stations, key=lambda station: station[\"distance\"])\n stations = list(filter(lambda station: station[\"bikesAvailable\"] > 0, stations))\n\n return stations", "def sortDistance(netlist):\n netlist_dictionary = {}\n for i in range(len(netlist)):\n start = chips[netlist[i][0]]\n end = chips[netlist[i][1]]\n\n delta_x = abs(start[0]-end[0])\n delta_y = abs(start[1]-end[1])\n distance = delta_x + delta_y\n\n netlist_dictionary[(netlist[i][0], netlist[i][1])] = distance\n\n sorted_dictionary = sorted(netlist_dictionary.items(), key=operator.itemgetter(1))\n sorted_netlist = []\n for j in range(len(sorted_dictionary)):\n sorted_netlist.append(sorted_dictionary[j][0])\n\n return sorted_netlist", "def sortAdjacency(self):\r\n\r\n # second argument of tuple 'e' is weight\r\n self.adjacency = {\r\n v: sorted(self.adjacency[v], key=lambda e: e[1])\r\n for v in self.nodes\r\n }", "def _sort_by_socket_and_coreid(self, icpu_dict):\n return (int(icpu_dict['numa_node']), int(icpu_dict['core']), int(icpu_dict['thread']))", "def sort_satellites(self):\r\n sats = self.satellites\r\n SMAs = [sat.orb.a for sat in sats]\r\n idxs = np.argsort(SMAs)\r\n newSatNames = []\r\n newSMAs = []\r\n for idx in idxs:\r\n newSatNames.append(sats[idx].name)\r\n newSMAs.append(SMAs[idx])\r\n \r\n # use name as tiebreaker\r\n ii = 0\r\n while ii < len(newSatNames):\r\n a = newSMAs[ii]\r\n duplicates = [jj for jj, sma in enumerate(newSMAs) if sma==a]\r\n if len(duplicates) > 1:\r\n names = [newSatNames[jj] for jj in duplicates]\r\n metaIdxs = np.argsort(names)\r\n for kk, metaIdx in enumerate(metaIdxs):\r\n newSatNames[ii+kk] = names[metaIdx]\r\n ii = ii+len(duplicates)\r\n \r\n newSats = []\r\n for name in newSatNames:\r\n sat = [bd for bd in sats if bd.name==name][0]\r\n newSats.append(sat)\r\n \r\n \r\n self.satellites = newSats\r\n return", "def _dict_sorting(d):\n CONTOUR_IMAGE_SEQUENCE_TAG = \"30060016\"\n REFERENCED_SOP_INSTANCE_UID_TAG = \"00081155\"\n\n referenced_sop_instance = d[CONTOUR_IMAGE_SEQUENCE_TAG][\"Value\"][0][\n REFERENCED_SOP_INSTANCE_UID_TAG\n ][\"Value\"][0]\n\n CONTOUR_DATA_TAG = \"30060050\"\n contour_data = d[CONTOUR_DATA_TAG][\"Value\"]\n\n return f\"{referenced_sop_instance}.{str(len(contour_data)).zfill(6)}\"", "def get_top_station_set(city):\n s = {}\n for file in os.listdir(exp_data_path + os.sep + 'station' + os.sep + city):\n with open(exp_data_path + os.sep + 'station' + os.sep + city + os.sep + file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0] not in s:\n s[row[0]] = 1\n else:\n s[row[0]] = s[row[0]] + 1\n\n sort_s = dict(sorted(s.items(), key=lambda x : x[1], reverse=True))\n first = True\n res = []\n for k, v in sort_s.items():\n if first:\n top = v\n first = False\n if top - v <= 30:\n res.append(k)\n print('before', len(sort_s))\n print('after', len(res))\n\n # restore new map [old_index, new_index]\n list_remap = {}\n new_index = 0\n for index in range(0, data_length[city]):\n if str(index) in res:\n list_remap[index] = new_index\n new_index = new_index + 1\n\n # print(list_remap)\n check_path(exp_data_path + os.sep + 'station_list')\n file_name = exp_data_path + os.sep + 'station_list' + os.sep + 'list_remap_{}'.format(city) + '.npy'\n if os.path.exists(file_name):\n os.remove(file_name)\n np.save(file_name, list_remap)", "def machine_name_sort():\n\n machine_name_dict = dict() # initiate a blank dictionary\n for line in BITLY_DATA:\n long_url = line.rstrip('\\n').split(\"\\t\")[4]\n machine_name = long_url.split('/')[2].replace('www.','')\n if machine_name not in machine_name_dict:\n machine_name_dict[machine_name] = 0\n machine_name_dict[machine_name] = machine_name_dict[machine_name] + 1\n machine_name_sorted = sorted(machine_name_dict, key = machine_name_dict.get)[0:9]\n print(machine_name_sorted)", "def sortPool(self):\n\t\tif not self.sorted:\n\t\t\tself.schedules.sort(key=lambda schedule: schedule.fitness, reverse=True)\n\t\t\tself.sorted = True", "def Sort():\n for i in range(Leishmania.spacesize):\n Leishmania.alive_dict[i] = [leish for leish in Leishmania.alive if leish.coord[0] == i]\n return", "def _topological_sort(self):\n self._reset_topological_order()\n\n def is_connected(src, dst):\n \"\"\"Judge two node whether are connected.\"\"\"\n for precursor in dst.precursor_nodes:\n if src == precursor.split(\":\")[0]:\n return 1\n return 0\n\n idx = 0\n while idx < len(self._topological_order):\n cur_node_name = self._topological_order[idx]\n cur_node = self.get_node(cur_node_name)\n # `scsr` is abbreviation for `successor`.\n for scsr_name in cur_node.successor_nodes:\n scsr_node = self.get_node(scsr_name)\n scsr_node.cur_in_degree -= is_connected(cur_node_name,\n scsr_node)\n if scsr_node.cur_in_degree == 0:\n self._topological_order.append(scsr_name)\n idx += 1\n self.sorted = True", "def sort(self, values):\n for level in self:\n for wire1, wire2 in level.iteritems():\n if values[wire1] > values[wire2]:\n values[wire1], values[wire2] = values[wire2], values[wire1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries all possible configurations starting at the first station and only adds the configuration with the best score.
def visit_all_possibilities(self, first_station, track, grid): # loops over connections of station for connection in first_station.connections: # keeps adding untill the max length of a track is reached if track.add_station(grid, self.stations[connection].name): # calculates the quality of adding the station and remembers it if it is the best score yet if grid.get_quality() > self.best_score: self.best_score = grid.get_quality() self.grid = copy.deepcopy(grid) print(f"new best score: {self.best_score}:\n{self.grid}\n\n") # repeat untill there are no more configurations left self.visit_all_possibilities(self.stations[connection], track, grid) track.remove_last_station()
[ "def pick_next_station(self, station):\n self.best_score = 0\n\n stations = self.grid.stations\n # all connections of the last added added station \n lookahead_1 = self.grid.get_station(self.best_connection[1]).connections\n\n for la1 in lookahead_1.values():\n next_station = la1[0].name\n # if adding the connection exceeds the tracks max time length \n if self.track.add_station(self.grid, next_station) is False:\n break\n\n lookahead_2 = self.grid.get_station(la1[0].name).connections\n\n # keeps adding stations untill the time limit is reached\n for la2 in lookahead_2:\n la2 = stations.get(la2)\n if self.track.add_station(self.grid, la2.name) is False:\n break\n \n quality = self.grid.get_quality()\n \n self.track.remove_last_station()\n\n # if quality improves, add first station to the track\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [la2.name, la1[0].name]\n \n self.track.remove_last_station()", "def get_bests(self):\n set_names = [\"training\", \"hp_selection\", \"validation\"]\n run_tec_conf_set = recursivedict()\n validation = self._campaign_configuration['General']['validation']\n hp_selection = self._campaign_configuration['General']['hp_selection']\n if (validation, hp_selection) in {(\"All\", \"All\"), (\"Extrapolation\", \"All\"), (\"All\", \"HoldOut\"), (\"HoldOut\", \"All\"), (\"HoldOut\", \"HoldOut\"), (\"Extrapolation\", \"HoldOut\")}:\n # For each run, for each technique the best configuration\n run_tec_best_conf = recursivedict()\n\n # Hyperparameter search\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n technique = conf.technique\n run_tec_conf_set[run][technique][str(conf.get_signature()[4:])] = conf.mapes\n # First experiment for this technique or better than the current best\n if technique not in run_tec_best_conf[run] or conf.mapes[\"hp_selection\"] < run_tec_best_conf[run][technique].mapes[\"hp_selection\"]:\n run_tec_best_conf[run][technique] = conf\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"-->Printing results for run %s\", str(run))\n overall_run_best = None\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp.get_signature()[4:], temp.mapes[\"training\"], temp.mapes[\"hp_selection\"], temp.mapes[\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp.mapes[\"hp_selection\"] < overall_run_best.mapes[\"hp_selection\"]:\n overall_run_best = temp\n best_model_description = overall_run_best.print_model()\n self._logger.info(\"<--Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best.get_signature()[3:], \"(\" + best_model_description + \")\" if best_model_description else \"\", overall_run_best.mapes[\"training\"], overall_run_best.mapes[\"hp_selection\"], overall_run_best.mapes[\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"All\"), (\"KFold\", \"HoldOut\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each fold, for each technique, the best configuration\n run_fold_tec_best_conf = recursivedict()\n\n # Hyperparameter search inside each fold\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[1].replace(\"f\", \"\"))\n technique = conf.technique\n if \"hp_selection\" not in run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] + conf.mapes[set_name] / folds\n # First experiment for this fold+technique or better than the current best\n if technique not in run_fold_tec_best_conf[run][fold] or conf.mapes[\"hp_selection\"] < run_fold_tec_best_conf[run][fold][technique].mapes[\"hp_selection\"]:\n run_fold_tec_best_conf[run][fold][technique] = conf\n\n # Aggregate different folds (only the value of the mapes)\n run_tec_set = recursivedict()\n for run in run_fold_tec_best_conf:\n for fold in run_fold_tec_best_conf[run]:\n for tec in run_fold_tec_best_conf[run][fold]:\n if \"hp_selection\" not in run_tec_set[run][technique]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_fold_tec_best_conf[run][fold][tec].mapes[set_name]\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", str(run))\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n # Overall best will contain as first argument the technique with the best (across runs) average (across folds) mape on validation; now we consider on all the runs and on all the folds the configuraiton of this technique with best validation mape\n\n elif (validation, hp_selection) in {(\"All\", \"KFold\"), (\"HoldOut\", \"KFold\"), (\"Extrapolation\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each technique, for each configuration, the aggregated mape\n run_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + conf.mapes[set_name] / folds\n\n # Select the best configuration for each technique across different folders\n run_tec_best_conf = recursivedict()\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n if tec not in run_tec_best_conf[run] or run_tec_conf_set[run][tec][conf][\"hp_selection\"] < run_tec_best_conf[run][tec][1][\"hp_selection\"]:\n run_tec_best_conf[run][tec] = (conf, run_tec_conf_set[run][tec][conf])\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = () # (technique, configuration, mapes)\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp[0], temp[1][\"training\"], temp[1][\"hp_selection\"], temp[1][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp[1][\"hp_selection\"] < overall_run_best[2][\"hp_selection\"]:\n overall_run_best = (technique, temp[0], temp[1])\n\n self._logger.info(\"---Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1], overall_run_best[2][\"training\"], overall_run_best[2][\"hp_selection\"], overall_run_best[2][\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each external fold, for each technique, the aggregated mape\n run_efold_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over internal folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n ext_fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n if configuration not in run_efold_tec_conf_set[run][ext_fold][technique]:\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n\n # Select the best configuration for each technique in each external fold across different internal folders\n run_efold_tec_best_conf = recursivedict()\n for run in run_efold_tec_conf_set:\n for efold in run_efold_tec_conf_set[run]:\n for tec in run_efold_tec_conf_set[run][efold]:\n for conf in run_efold_tec_conf_set[run][efold][tec]:\n if conf not in run_efold_tec_best_conf[run][efold][tec] or run_efold_tec_conf_set[run][efold][tec][conf][\"hp_selection\"] < run_efold_tec_best_conf[run][efold][tec][1][\"hp_selection\"]:\n run_efold_tec_best_conf[run][efold][tec] = (conf, run_efold_tec_conf_set[run][efold][tec][conf], run_efold_tec_conf_set[run][efold][tec][conf])\n\n # Aggregate on external folds\n run_tec_set = recursivedict()\n for run in run_efold_tec_best_conf:\n for efold in run_efold_tec_best_conf[run]:\n for tec in run_efold_tec_best_conf[run][efold]:\n if \"hp_selection\" not in run_tec_set[run][tec]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_tec_set[run][tec][set_name] + run_efold_tec_best_conf[run][efold][tec][1][set_name]\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n else:\n self._logger.error(\"Unexpected combination: %s\", str((validation, hp_selection)))\n sys.exit(1)\n best_confs = {}\n best_technique = None\n for conf in self._exp_confs:\n technique = conf.technique\n if technique not in best_confs or conf.mapes[\"validation\"] < best_confs[technique].mapes[\"validation\"]:\n best_confs[technique] = conf\n for technique in best_confs:\n if not best_technique or best_confs[technique].mapes[\"validation\"] < best_confs[best_technique].mapes[\"validation\"]:\n best_technique = technique\n if bool(self._campaign_configuration['General']['details']):\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n assert \"hp_selection\" in run_tec_conf_set[run][tec][conf]\n assert \"validation\" in run_tec_conf_set[run][tec][conf], \"training MAPE not found for \" + str(run) + str(tec) + str(conf)\n self._logger.info(\"Run %s - Technique %s - Conf %s - Training MAPE %f - Test MAPE %f\", str(run), ec.enum_to_configuration_label[tec], str(conf), run_tec_conf_set[run][tec][conf][\"hp_selection\"], run_tec_conf_set[run][tec][conf][\"validation\"])\n return best_confs, best_technique", "def pick_first_connection(self):\n self.best_connection = []\n stations = list(self.grid.stations.values())\n\n # add a first station to the track \n for station in stations:\n self.track = Track(f\"greedy_track_{self.count}\", self.grid)\n self.track.add_station(self.grid, station.name)\n\n lookahead_1 = station.connections\n\n # calculate quality of all connections and save the best connection\n for la1 in lookahead_1: \n next_station = stations[int(la1)].name\n self.track.add_station(self.grid, next_station)\n lookahead_2 = stations[int(la1)].get_connections()\n \n for la2 in lookahead_2:\n # if adding the connection exceeds the track's max time length \n if self.track.add_station(self.grid, la2[0].name) is False:\n break\n \n quality = self.grid.get_quality()\n self.track.remove_last_station()\n\n # checks if the quality of the track is the best one yet and remembers it\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [station.name, stations[int(la1)].name, la2[0].name]\n self.track.remove_last_station()\n \n # if adding another track does not lead to a better quality, stop algorithm\n if self.best_connection == []:\n return False\n \n # add best connection to the track\n self.track = Track(f\"greedy_track_{self.count}\", self.grid)\n self.track.add_station(self.grid, self.best_connection[0])\n\n self.count += 1\n\n return station", "def get_best_config(self):\n raise NotImplementedError", "def bestBeginStation(allConnections, allStations):\n\n criticalDestinations = None\n nonCriticalDestinations = None\n bestBeginStation = None\n\n for station in allStations:\n\n # Keep track of local critical- and non-critical connections\n myCriticalDestinations = []\n myNonCriticalDestinations = []\n for destination in allStations[station].destinations:\n for connection in allConnections:\n\n # Check if connection is still available.\n if [station, destination.name] == connection[0] or [destination.name, station] == connection[0]:\n\n # Check if connection is critical.\n if allStations[station].isCritical or destination.isCritical:\n myCriticalDestinations.append(connection)\n else:\n myNonCriticalDestinations.append(connection)\n break\n\n # Check if station is the best beginstation.\n if bestBeginStation == None:\n bestBeginStation = station\n criticalDestinations = myCriticalDestinations\n nonCriticalDestinations = myNonCriticalDestinations\n\n # Preference is the least amount of critical connections, where that\n # number % 2 == 1.\n elif len(myCriticalDestinations) % 2 == 1:\n if len(criticalDestinations) % 2 == 1:\n if len(myCriticalDestinations) < len(criticalDestinations):\n bestBeginStation = station\n criticalDestinations = myCriticalDestinations\n nonCriticalDestinations = myNonCriticalDestinations\n\n # The shortest time is preferred.\n elif len(myCriticalDestinations) == len(criticalDestinations):\n minimumTime = None\n for connection in criticalDestinations:\n if minimumTime == None or connection[1] < minimumTime:\n minimumTime = connection[1]\n myMinimumTime = None\n for connection in myCriticalDestinations:\n if myMinimumTime == None or connection[1] < myMinimumTime:\n myMinimumTime = connection[1]\n if myMinimumTime < minimumTime:\n bestBeginStation = station\n criticalDestinations = myCriticalDestinations\n nonCriticalDestinations = myNonCriticalDestinations\n else:\n bestBeginStation = station\n criticalDestinations = myCriticalDestinations\n nonCriticalDestinations = myNonCriticalDestinations\n\n # After that, preference is to have the least amount of critical\n # connections.\n elif len(criticalDestinations) % 2 == 0:\n if len(myCriticalDestinations) < len(criticalDestinations):\n bestBeginStation = station\n criticalDestinations = myCriticalDestinations\n nonCriticalDestinations = myNonCriticalDestinations\n\n # Shortest time is preferred.\n elif len(myCriticalDestinations) == len(criticalDestinations):\n minimumTime = None\n for connection in criticalDestinations:\n if minimumTime == None or connection[1] < minimumTime:\n minimumTime = connection[1]\n myMinimumTime = None\n for connection in myCriticalDestinations:\n if myMinimumTime == None or connection[1] < myMinimumTime:\n myMinimumTime = connection[1]\n if myMinimumTime != None and myMinimumTime < minimumTime:\n bestBeginStation = station\n criticalDestinations = myCriticalDestinations\n nonCriticalDestinations = myNonCriticalDestinations\n\n return bestBeginStation", "def next_config(self):\n\n delta = self.max_acc * self.step_time # denotes possible change in velocity\n right_vels = [self.right_vel + x * delta for x in [-1, 0, 1]]\n left_vels = [self.left_vel + x * delta for x in [-1, 0, 1]]\n best_possible = -math.inf\n best_l, best_r = 0, 0\n\n # attempt all velocities possible, check for the best one\n for new_r, new_l in itertools.product(left_vels, right_vels):\n if not self._speed_limit(new_r) or not self._speed_limit(new_l):\n continue # speed limit exceeded\n\n # steer the robot into a possible trajectory in the window to see if it is beneficial\n new_pos, theta = self._predict_trajectory(self.pos, new_l, new_r, self.theta, self.predict_time)\n gain = self.gain_weight * self._position_gain(new_pos) # gain from steering times gain weight\n loss = self.loss_weight * self._position_loss(new_pos) # loss from steering times loss weight\n total_cost = gain - loss\n\n # candidate for best steering option, as it maximizes the total cost\n if total_cost > best_possible:\n best_r, best_l, best_possible = new_r, new_l, total_cost\n\n # next config taken to be the argmax for total cost function\n self.right_vel, self.left_vel = best_r, best_l\n self.pos, self.theta = self._predict_trajectory(self.pos, self.left_vel, self.right_vel,\n self.theta, self.step_time)", "def get_optimum_config(\n self, tested_configs, fold_operation=FoldOperations.MEAN\n ):\n\n list_of_config_vals = []\n list_of_non_failed_configs = [\n conf for conf in tested_configs if not conf.config_failed\n ]\n\n if len(list_of_non_failed_configs) == 0:\n raise Warning(\"No Configs found which did not fail.\")\n try:\n\n if len(list_of_non_failed_configs) == 1:\n best_config_outer_fold = list_of_non_failed_configs[0]\n else:\n for config in list_of_non_failed_configs:\n list_of_config_vals.append(\n MDBHelper.get_metric(\n config,\n fold_operation,\n self.best_config_metric,\n train=False,\n )\n )\n\n if self.maximize_metric:\n # max metric\n best_config_metric_nr = np.argmax(list_of_config_vals)\n else:\n # min metric\n best_config_metric_nr = np.argmin(list_of_config_vals)\n\n best_config_outer_fold = list_of_non_failed_configs[\n best_config_metric_nr\n ]\n\n # inform user\n logger.debug(\n \"Optimizer metric: \"\n + self.best_config_metric\n + \"\\n\"\n + \" --> Maximize metric: \"\n + str(self.maximize_metric)\n )\n\n logger.info(\n \"Number of tested configurations: \" + str(len(tested_configs))\n )\n logger.photon_system_log(\n \"---------------------------------------------------------------------------------------------------------------\"\n )\n logger.photon_system_log(\"BEST_CONFIG \")\n logger.photon_system_log(\n \"---------------------------------------------------------------------------------------------------------------\"\n )\n logger.photon_system_log(\n json.dumps(\n best_config_outer_fold.human_readable_config,\n indent=4,\n sort_keys=True,\n )\n )\n\n return best_config_outer_fold\n except BaseException as e:\n logger.error(str(e))", "def run_algorithm(self):\n print(f\"Checking all possible configurations with {self.algorithm}...\")\n\n if self.algorithm == \"test\" or (self.algorithm == \"greedy\" and\n self.iterations == 1000):\n\n # Test each configuration found with greedy (1000 iterations)\n while True:\n try:\n self.index += 1\n self.batteries = self.load_batteries(self.index)\n\n # Break if all configurations are checked\n except FileNotFoundError:\n break\n self.calculate_cable()\n self.link_houses()\n greedy(self, 1000)\n\n # Load best solution if user wanted to run greedy\n if self.algorithm == \"greedy\":\n self.load()\n self.plot_houses()\n\n # Call correct algorithm\n else:\n self.load()\n if self.algorithm == \"stepdown\":\n stepdown(self)\n elif self.algorithm == \"greedy\":\n greedy(self, self.iterations)\n elif self.algorithm == \"hill\":\n hill_climber(self, self.iterations)\n elif self.algorithm == \"dfs\":\n dfs(self)\n elif self.algorithm == \"random\":\n random_algorithm(self, self.iterations)\n elif self.algorithm == \"bnb\":\n bnb(self)\n\n self.load()\n self.plot_houses()", "def get_best_config(results: pd.DataFrame):\n sorted_res = results.sort_values(\"average_res\", ascending=False)\n sorted_res = sorted_res.reset_index(drop=True)\n top_config = {}\n bottom_config = {}\n last_index = len(results)-1\n for label in ['config.learning_rate', 'config.dropout', 'config.epochs', 'config.batch_size']:\n new_label = label.split('config.')[-1]\n top_config[new_label] = sorted_res[label][0]\n bottom_config[new_label] = sorted_res[label][last_index]\n return (top_config, bottom_config)", "def next_tune_cfg(self):\n # generate tuning space according to user chosen tuning strategy\n\n while True:\n op_cfgs = {}\n op_cfgs['calib_iteration'] = int(np.random.choice(self.calib_iter))\n op_cfgs['op'] = {}\n for op, configs in self.opwise_quant_cfgs.items():\n cfgs_len = len(configs)\n if cfgs_len > 0:\n op_cfgs['op'][op] = configs[np.random.choice(cfgs_len)]\n else:\n op_cfgs['op'][op] = self.opwise_tune_cfgs[op][np.random.choice(\n len(self.opwise_tune_cfgs[op]))]\n\n yield op_cfgs", "def update_best(self):\r\n # The first best solution is a valid solution\r\n if self.eval_count == 1:\r\n self.best_solution = copy.deepcopy(self)\r\n\r\n # Update best solution\r\n if self.total_fitness() >= self.best_solution.total_fitness():\r\n self.best_solution = copy.deepcopy(self)", "def update_best(self, epoch, stats):\n self.best_epoch = epoch\n self.best_ppl = stats.ppl()\n self.best_nll = stats.nll()\n self.best_kl = stats.kl()\n self.bad_counter = 0", "def find_best_server(self):\n print_debug(\"Looking for closest and best server...\\n\")\n best = self.test_latency(closest([self.config['lat'], self.config['lon']], self.server_list, self.best_servers))\n for server in best:\n self.servers.append(server['url'])", "def _get_best_configs(\n self,\n configs: list[Configuration],\n bracket: int,\n stage: int,\n from_keys: list[InstanceSeedBudgetKey],\n ) -> list[Configuration]:\n try:\n n_configs = self._n_configs_in_stage[bracket][stage + 1]\n except IndexError:\n return []\n\n rh = self.runhistory\n configs = configs.copy()\n\n for config in configs:\n isb_keys = rh.get_instance_seed_budget_keys(config)\n if not all(isb_key in isb_keys for isb_key in from_keys):\n raise NotEvaluatedError\n\n selected_configs: list[Configuration] = []\n while len(selected_configs) < n_configs:\n # We calculate the pareto front for the given configs\n # We use the same isb keys for all the configs\n all_keys = [from_keys for _ in configs]\n incumbents = calculate_pareto_front(rh, configs, all_keys)\n\n # Idea: We recursively calculate the pareto front in every iteration\n for incumbent in incumbents:\n configs.remove(incumbent)\n selected_configs.append(incumbent)\n\n # If we have more selected configs, we remove the ones with the smallest crowding distance\n if len(selected_configs) > n_configs:\n all_keys = [from_keys for _ in selected_configs]\n selected_configs = sort_by_crowding_distance(rh, selected_configs, all_keys)[:n_configs]\n logger.debug(\"Found more configs than required. Removed configs with smallest crowding distance.\")\n\n return selected_configs", "def get_best_config_from_hyperparamsearch(hyperparam_search_dir, num_folds=5, num_trials=60, num_metrics=6, metric_indx=5, random_seed=42):\n # determine best config from hyperparam search\n fold_num = get_random_fold(num_folds, random_seed=random_seed)\n fold_dir = os.path.join(hyperparam_search_dir, f'fold_{fold_num}')\n\n scores = np.ones((num_trials, num_metrics))*-1\n exist_flag = False\n\n for config_num in range(num_trials):\n score_file = os.path.join(fold_dir, 'config_{}'.format(config_num), 'score_validation.pkl')\n if(os.path.isfile(score_file)):\n try:\n mscore = ReaderWriter.read_data(score_file)\n # print(mscore)\n scores[config_num, 0] = mscore.best_epoch_indx\n scores[config_num, 1] = mscore.s_precision\n scores[config_num, 2] = mscore.s_recall\n scores[config_num, 3] = mscore.s_f1\n scores[config_num, 4] = mscore.s_auc\n scores[config_num, 5] = mscore.s_aupr\n\n exist_flag = True\n except Exception as e:\n print(f'exception occured at config_{config_num}')\n continue\n else:\n print(\"WARNING: hyperparam search dir does not exist: {}\".format(score_file))\n\n if(exist_flag):\n argmax_indx = get_index_argmax(scores, metric_indx)\n mconfig, options = get_saved_config(os.path.join(fold_dir, 'config_{}'.format(argmax_indx), 'config'))\n return mconfig, options, argmax_indx, scores\n \n return None", "def guess_stations(flats_list, constraint, config):\n distance_threshold = config[\"max_distance_housing_station\"]\n opendata = {\n \"postal_codes\": data.load_data(PostalCode, constraint, config),\n \"stations\": data.load_data(PublicTransport, constraint, config),\n }\n\n for flat in flats_list:\n flat_station = flat.get(\"station\", None)\n\n if not flat_station:\n # Skip everything if empty station\n LOGGER.info(\"No stations field for flat %s, skipping stations lookup.\", flat[\"id\"])\n continue\n\n # Woob modules can return several stations in a comma-separated list.\n flat_stations = flat_station.split(\",\")\n # But some stations containing a comma exist, so let's add the initial\n # value to the list of stations to check if there was one.\n if len(flat_stations) > 1:\n flat_stations.append(flat_station)\n\n matched_stations = []\n for tentative_station in flat_stations:\n matched_stations += fuzzy_match(\n tentative_station,\n [x.name for x in opendata[\"stations\"]],\n limit=10,\n threshold=50,\n )\n\n # Keep only one occurrence of each station\n matched_stations = list(set(matched_stations))\n\n # Filter out the stations that are obviously too far and not well\n # guessed\n good_matched_stations = []\n postal_code = flat[\"flatisfy\"].get(\"postal_code\", None)\n if postal_code:\n # If there is a postal code, check that the matched station is\n # closed to it\n postal_code_gps = next((x.lat, x.lng) for x in opendata[\"postal_codes\"] if x.postal_code == postal_code)\n for station in matched_stations:\n # Note that multiple stations with the same name exist in a\n # city, hence the list of stations objects for a given matching\n # station name.\n stations_objects = [x for x in opendata[\"stations\"] if x.name == station[0]]\n for station_data in stations_objects:\n distance = tools.distance((station_data.lat, station_data.lng), postal_code_gps)\n if distance < distance_threshold:\n # If at least one of the coordinates for a given\n # station is close enough, that's ok and we can add\n # the station\n good_matched_stations.append(\n {\n \"key\": station[0],\n \"name\": station_data.name,\n \"confidence\": station[1],\n \"gps\": (station_data.lat, station_data.lng),\n }\n )\n break\n LOGGER.info(\n (\"Station %s is too far from flat %s (%dm > %dm), discarding this station.\"),\n station[0],\n flat[\"id\"],\n int(distance),\n int(distance_threshold),\n )\n else:\n LOGGER.info(\"No postal code for flat %s, skipping stations detection.\", flat[\"id\"])\n\n if not good_matched_stations:\n # No stations found, log it and cotninue with next housing\n LOGGER.info(\n \"No stations found for flat %s, matching %s.\",\n flat[\"id\"],\n flat[\"station\"],\n )\n continue\n\n LOGGER.info(\n \"Found stations for flat %s: %s (matching %s).\",\n flat[\"id\"],\n \", \".join(x[\"name\"] for x in good_matched_stations),\n flat[\"station\"],\n )\n\n # If some stations were already filled in and the result is different,\n # display some warning to the user\n if \"matched_stations\" in flat[\"flatisfy\"] and (\n # Do a set comparison, as ordering is not important\n set([station[\"name\"] for station in flat[\"flatisfy\"][\"matched_stations\"]])\n != set([station[\"name\"] for station in good_matched_stations])\n ):\n LOGGER.warning(\n \"Replacing previously fetched stations for flat %s. Found \"\n \"stations differ from the previously found ones.\",\n flat[\"id\"],\n )\n\n flat[\"flatisfy\"][\"matched_stations\"] = good_matched_stations\n\n return flats_list", "def search_station(st):\n\n res = []\n for key, val in _STATIONS.items():\n score = fuzz.token_set_ratio(st, key)\n res.append(\n {\n 'station': key,\n 'score': score,\n 'station_id': val\n }\n )\n if not res:\n return {}\n else:\n res = sorted(res, key=lambda k: k['score'], reverse=True)\n res = res[0]\n return res", "def best_last_option(self):\n \n # get essential values\n board = self.get_game_space()\n affinity = self.get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get all possible blocks to make a move in\n winning_blocks = board.get_winning_blocks(affinity)\n print('total winning blocks:'+str(len(winning_blocks)))\n best_blocks = []\n best_block = None\n\n # find the largest blocks to place a stone in\n for block in winning_blocks:\n if affinity == BLUE_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n elif len(block.blue) > len(best_blocks[0].blue):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.blue) == len(best_blocks[0].blue):\n best_blocks.append(block)\n elif affinity ==RED_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n if len(block.red) > len(best_blocks[0].red):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.red) == len(best_blocks[0].red):\n best_blocks.append(block)\n\n # find the best block to place a stone in\n for block in best_blocks:\n if best_block is None: best_block = block \n elif block.tiles[0][0] <= best_block.tiles[0][0]: \n if (block.tiles[0][1] != block.tiles[1][1]):\n if block.direction == 'vertical':\n if block.tiles[WINNING_ROW_SIZE()-1][1] >= best_block.tiles[WINNING_ROW_SIZE()-1][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block\n else:\n if block.tiles[0][1] >= best_block.tiles[0][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n else:\n if block.tiles[0][1] >= best_block.tiles[0][1] and block.tiles[1][0] <= best_block.tiles[1][0]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n\n # find the best move to make out of the best block \n # print('best block:'+str(best_block.tiles))\n best_move = (7,-1)\n for tile_i in range(len(best_block.tiles)):\n tile = best_block.tiles[tile_i]\n next_tile = None\n prev_tile = None \n if tile_i+1 in range(len(best_block.tiles)):\n next_tile = best_block.tiles[tile_i+1]\n if tile_i-1 in range(len(best_block.tiles)):\n prev_tile = best_block.tiles[tile_i-1]\n if board.get_tile(tile[0],tile[1]) == BLANK_TILE():\n if prev_tile is not None and next_tile is None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is None:\n if board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is not None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity or \\\n board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n \n return best_move", "def get_best_model(all_val_results, all_configurations, all_models):\r\n # Dictionary with performance measures as keys. Values are lists storing the average results for a particular performance\r\n # measure, for each configuration. So values are lists of average results that are parallel to all_configurations\r\n average_val_results = performance_measures_dict()\r\n for results in all_val_results:\r\n for measure in PERFORMANCE_MEASURES:\r\n average_val_results[measure].append(mean(results[measure]))\r\n\r\n print(\"\\naverage_val_results, for each configuration:\")\r\n print(average_val_results)\r\n\r\n # print(\"\\nAll configurations:\")\r\n # print(all_configurations)\r\n\r\n # Measures we look at for determining scores. Scores are the squared rooted, sum of these results\r\n selected_measures = [\"Binary accuracy\", \"AUC\", \"f1-score\"]\r\n scores = []\r\n for i in range(len(all_configurations)):\r\n cur_score = 0\r\n for performance_measure in selected_measures:\r\n # Square rooting gives preference to results that have less 'imbalances' between performance measures\r\n cur_score += math.sqrt(average_val_results[performance_measure][i])\r\n # Scores are normalized so they fall in the range [0,1] - doesn't affect which hyper-params chosen since\r\n # scores all multiplied by the same scalar\r\n scores.append((cur_score / len(selected_measures)))\r\n\r\n print(\"\\nScores:\\n\", scores, \"\\n\")\r\n\r\n best_hyper_params_index = scores.index(max(scores))\r\n\r\n # From the K_INNER models trained using the best hyper parameters, select the one with the best individual val\r\n # results.\r\n candidate_models = all_models[best_hyper_params_index]\r\n best_model = None\r\n best_model_index = 0\r\n best_score = -1\r\n\r\n for i in range(len(candidate_models)):\r\n # Compute individual score for the current model\r\n cur_score = 0\r\n for performance_measure in selected_measures:\r\n cur_score += math.sqrt(all_val_results[best_hyper_params_index][performance_measure][i])\r\n if cur_score > best_score:\r\n best_score = cur_score\r\n best_model = all_models[best_hyper_params_index][i]\r\n best_model_index = i\r\n\r\n # Return index of hyper-parameters corresponding to the highest score (or first highest if there are ties)\r\n return best_model, best_hyper_params_index, best_model_index" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates a species identified by taxid and containing empty dictionnary of orthologs
def __init__(self, taxid, species_name = None, lineage=None): self.genes = dict() self.taxid = taxid self.species = species_name self.lineage = lineage
[ "def init_taxon():\n if not exists('./data/taxdmp.zip'):\n ftp = FTP('ftp.ncbi.nih.gov')\n ftp.login()\n ftp.cwd('pub/taxonomy')\n ftp.retrbinary('RETR taxdmp.zip', open('./data/taxdmp.zip', 'wb').write)\n ftp.quit\n with ZipFile('./data/taxdmp.zip', 'r') as dumpfile:\n dumpfile.extractall(path='./data/')\n taxon_id = dict()\n data = list()\n name = dict()\n specie = list()\n son = dict()\n greatson = dict()\n parent = dict()\n rank = dict()\n global taxon\n taxon = list()\n with open('./data/names.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n if add[0] not in name or add[2] == 'scientific name':\n name[add[0]] = add[1]\n with open('./data/nodes.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n # 1696063|Sarcocystis corvusi||scientific name|\n taxon_id[add[0]] = add[1]\n rank[add[0]] = add[3]\n if add[2] == 'species':\n specie.append(add[0])\n for specie in specie:\n record = [specie, ]\n while taxon_id[specie] != '1':\n record.append(taxon_id[specie])\n specie = taxon_id[specie]\n # if '33090' in record:\n # record.pop()\n # record.pop()\n data.append(record)\n for data in data:\n for n in range(len(data)):\n if data[n] not in parent:\n parent[data[n]] = data[(n + 1):]\n if n == 0:\n continue\n if data[n] not in son:\n son[data[n]] = {data[n - 1], }\n else:\n son[data[n]].add(data[n - 1])\n if data[n] not in greatson:\n greatson[data[n]] = {data[0], }\n else:\n greatson[data[n]].add(data[0])\n for specie in name.items():\n if specie[0] not in son:\n son[specie[0]] = set()\n if specie[0] not in parent:\n parent[specie[0]] = list()\n if specie[0] not in greatson:\n greatson[specie[0]] = set()\n record = [specie[0], name[specie[0]], rank[specie[0]], son[specie[0]], parent[specie[0]], greatson[specie[0]]]\n taxon.append(record)\n\n con = sqlite3.connect('./data/DB')\n cur = con.cursor()\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS taxon (Id TEXT, Name TEXT, Rank TEXT, Son TEXT, Parent TEXT, GreatSon TEXT);')\n for line in taxon:\n son = ' '.join(line[3])\n parent = ' '.join(line[4])\n greatson = ' '.join(line[5])\n cur.execute('INSERT INTO taxon (Id, Name, Rank, Son, Parent, GreatSon) VALUES (?, ?, ?, ?, ?, ?);',\n (line[0], line[1], line[2], son, parent, greatson))\n con.commit()\n cur.close()\n con.close()\n print('Done.\\n')", "def get_species_by_taxid():\n\n with PooledCursor() as cursor:\n\n cursor.execute(\n '''\n SELECT sp_taxid, sp_id\n FROM odestatic.species;\n '''\n )\n\n return associate(cursor)", "def species(self):\n\t\tspecies_dict = {identifier:species for (identifier,species) in zip(self.df.id, self.df.species)}\n\t\treturn(species_dict)", "def writeTaxonomies( self ):\n\n self.logger.info( 'writeTaxonomies: START' )\n\n self.logger.info( 'writeTaxonomies: keggreader.getAllOrganisms(): START' )\n\n organisms = self.reader.getAllOrganisms()\n\n self.logger.info( 'writeTaxonomies: keggreader.getAllOrganisms(): DONE' )\n\n taxonomies = {} \n\n taxonomyFile = self.openInsertFile( 'taxonomiesInsert.psql' )\n\n self.logger.info( 'writeTaxonomies: We got ' + str(len(organisms)) + ' organisms and our insert file is taxonomiesInsert.psql' )\n\n\n for organism,taxonomyData in organisms.iteritems():\n for tax in taxonomyData['lineage']:\n\n taxonomies[ tax['name'] ] = { 'name': tax['name'], 'tax_id': tax['tax_id'], 'type': tax['type'] } \n\n\n self.logger.info( 'writeTaxonomies: We got ' + str(len(taxonomies)) + ' taxonomies.' )\n\n\n for taxonomy,taxData in taxonomies.iteritems():\n taxonomyInserted = self.writeFile( taxonomyFile, 'taxonomies', [ str(taxData['name']), str(taxData['tax_id']), str(taxData['type']) ] )\n self.taxonomiesInserted[ taxData['name'] ] = taxonomyInserted\n\n self.logger.info( 'writeTaxonomies: DONE' )", "def create_dicts(self):\n self.dict_species = {v: i for i, v in enumerate(self.all_species_list)}\n self.dict_qss_species = {\n v: i for i, v in enumerate(self.qssa_species_list)\n }\n self.dict_nonqss_species = {\n v: i for i, v in enumerate(self.nonqssa_species_list)\n }", "def get_genome_species(inputTaxon):\t\r\n \tstart_time = time.time()\r\n \tservice_url = \"http://phylo.cs.nmsu.edu:5004/phylotastic_ws/ts/ncbi/genome_species?taxon=\" + inputTaxon\r\n \t#service_documentation = \"https://github.com/phylotastic/phylo_services_docs/blob/master/ServiceDescription/PhyloServicesDescription.md#web-service-9\"\r\n\r\n \tfinal_result = {}\t\r\n \tg_response = find_genome_ids(inputTaxon)\r\n \t\r\n \tif g_response['status_code'] != 200:\t \t\r\n \t\tfinal_result = g_response\r\n \telse:\r\n \t\tstr_gids = form_cs_ids(g_response['genome_ids'])\r\n \t\ts_response = find_species_ids(str_gids)\r\n \t\tif s_response['status_code'] != 200:\r\n \t\t\tfinal_result = s_response\r\n \t\telse:\r\n \t\t\tstr_sids = form_cs_ids(s_response['species_ids'])\r\n \t\t\tfinal_result = get_species_names(str_sids)\r\n\t\r\n \tend_time = time.time()\r\n \texecution_time = end_time-start_time \r\n #service result creation time\r\n \tcreation_time = datetime.datetime.now().isoformat()\r\n \t\r\n \tfinal_result['creation_time'] = creation_time\r\n \tfinal_result['execution_time'] = \"{:4.2f}\".format(execution_time)\r\n \tif final_result['status_code'] == 200: \r\n \t\tfinal_result['total_names'] = len(final_result['species'])\r\n \telse:\r\n \t\tfinal_result['total_names'] = 0 \r\n \tfinal_result['source_urls'] = [\"https://www.ncbi.nlm.nih.gov/taxonomy\", \"https://www.ncbi.nlm.nih.gov/genome\"]\r\n \t#final_result['source_version'] = \"ott2.9draft12\"\r\n \tfinal_result['service_url'] = service_url\r\n \t#final_result['service_documentation'] = service_documentation\t\r\n \tfinal_result['taxon'] = inputTaxon\r\n\r\n \treturn json.dumps(final_result)", "def load_taxdict():\n tax = {}\n with open(\"../../data/taxonomy/tree_taxid.txt\", 'r') as file:\n for line in file:\n current_line = line.split() \n current_taxid = current_line[0]\n current_name = current_line[1]\n tax[current_taxid] = current_name \n\n return tax", "def add_taxonomy(tax_idx, pkl):\n for tax,v in tax_idx.items():\n for genome_id,genome_len in v.items():\n T = '|'.join(list(tax) + ['t__' + genome_id])\n pkl['taxonomy'][T] = ('', int(genome_len))\n return pkl", "def species_table(self):\n if self.hdf5_data is None:\n return None\n species_section = self.hdf5_data.get('/species', None)\n if species_section is None:\n return None\n return dict(\n (id, dict(name=name, radius=radius, D=D))\n for id, name, radius, D in species_section.value)", "def createTaxDict(taxFile):\n taxonomyDict = {}\n with open(taxFile, 'r') as f:\n for line in f:\n split = line.rstrip().split(\"\\t\")\n key = split[0]\n vals = split[1:]\n if \"Sendai virus\" in key:\n #print(key,flush=True)\n vals = [\"genus:Respirovirus\",\"family:Paramyxoviridae\",\"order:Mononegavirales\",\\\n \"class:Monjiviricetes\",\"phylum:Negarnaviricota\",\"resolution:genus\"]\n elif \"Bastrovirus\" in key:\n #print(key,flush=True)\n vals = [\"family:Astroviridae\",\"order:Stellavirales\",\"class:Stelpaviricetes\",\\\n \"phylum:Pisuviricota\",\"resolution:family\"]\n keyDict = {}\n for value in vals:\n splitVal = value.split(\":\")\n keyDict[splitVal[0]] = splitVal[1]\n taxonomyDict[key] = keyDict\n print(len(taxonomyDict))\n return taxonomyDict", "def summary(self,attr='raw'):\n g = {}\n g['gid'] = map(lambda x : x.gid, self.taxonomies)\n g['sp'] = map(lambda x : x.presences.species , self.taxonomies)\n \n g['gns'] = map(lambda x : x.presences.genera , self.taxonomies) \n g['fam'] = map(lambda x : x.presences.families , self.taxonomies)\n g['ord'] = map(lambda x : x.presences.orders , self.taxonomies)\n g['cls'] = map(lambda x : x.presences.classes , self.taxonomies)\n g['phy'] = map(lambda x : x.presences.phyla , self.taxonomies)\n g['kng'] = map(lambda x : x.presences.kingdoms , self.taxonomies)\n #g['all'] = map(lambda x : (x.gid,int(x.presences.species),int(x.genera),int(x.families),int(x.orders),int(x.classes),int(x.phyla),int(x.kingdoms)),self.taxonomies)\n keys = settings.TAXONOMIC_TREE_KEYS\n if attr == 'int':\n for key in keys:\n g[key] = map(lambda p : int(p) ,g[key])\n elif attr == 'str':\n for key in keys:\n g[key] = map(lambda p : str(p) ,g[key]) \n elif attr == 'list':\n for key in keys:\n g[key] = map(lambda p : p.list ,g[key]) \n elif attr == 'mapping':\n for key in keys:\n g[key] = map(lambda p : p.map ,g[key]) \n elif attr == 'raw':\n return g\n else:\n logger.error(\"Wrong attribute selection\")\n return None\n \n return g", "def make_taxon_table(result_together, samples):\n ##get a named list\n ##result = dict(zip(taxon,SB_100)) #continue from here\n pathogens = pd.Series()\n for sample in samples:\n pathogens = pathogens.append(result_together[sample]['species']['species'])\n\n # Get the unique genera \n pathogens = pathogens.unique()\n d = {'pathogens': pathogens}\n taxon_table = pd.DataFrame(d)\n\n # Remove the non detected pathogens\n taxon_table = taxon_table[taxon_table['pathogens'] != 'non_detected']\n # Create a dataframe with the sample names with values set at zero\n zeros_dataframe = pd.DataFrame(data=0, index=np.arange(len(taxon_table.index)),\\\n columns= samples)\n # Set the index of the zeros dataframe\n zeros_dataframe.index = taxon_table.index\n # Create a frame list\n frame = [taxon_table,zeros_dataframe]\n # Concatenate the dataframes along the columns\n taxon_table = pd.concat(frame, axis=1)\n # Set the index of the dataframe to the names of the pathogens\n taxon_table = taxon_table.set_index('pathogens')\n\n # Loop through every sample while getting the frequency for each pathogen\n for sample in samples:\n #print(sample)\n # Get the detect/pathogens for each sample\n detect = result_together[sample]['species']['species']\n # Get the index in a list form\n index = detect.index.tolist()\n # Get all the frequencies for the dtected pathogens\n frequency = result_together[sample]['species']['freq']\n # Loop\n for pathogen in taxon_table.index.tolist():\n for i in index:\n if (pathogen == detect[i]):\n taxon_table.loc[pathogen,sample] = frequency[i]\n \n return(taxon_table)", "def get_all_species(inputTaxon):\r\n \tstart_time = time.time()\r\n\r\n \tservice_url = base_url + \"all_species?taxon=\" + inputTaxon\r\n \t#service_documentation = \"https://github.com/phylotastic/phylo_services_docs/blob/master/ServiceDescription/PhyloServicesDescription.md#web-service-6\"\r\n \tlarge_result = False\r\n \thigh_rank = None\r\n\r\n \tott_id = match_taxon(inputTaxon)\r\n \tif ott_id == -1:\r\n \t\tfinal_result = {'taxon': inputTaxon,'species': [], 'message': 'No Taxon matched with %s' %(inputTaxon), 'status_code': 204}\r\n \t\tlen_splist = 0\t\r\n \telse: #taxon name matched\t\r\n \t\tspecies_list = []\r\n \t\t#print ott_id\r\n \t\tdata_json = get_children(ott_id)\r\n \t\t#print data_json\r\n \t\tif data_json['rank'] == 'species' or data_json['rank'] == 'subspecies':\r\n \t\t\tspecies_list.append(data_json['name'])\t\t\r\n \t\telif data_json['rank'] == 'genus':\r\n \t\t\tspecies_list = get_species_from_genus(data_json['children'])\r\n \t\telif data_json['rank'] in['superorder','order','suborder','infraorder','parvorder','class','superclass','subclass','infraclass','parvclass','phylum','kingdom','domain', 'no rank']:\r\n \t\t\tlarge_result = True\r\n \t\t\thigh_rank = data_json['rank']\r\n \t\telse:\r\n \t\t\tresult = get_species_from_highrank(data_json['children'], conn)\r\n \t\t\tif 'status_code' in species_list: #error occured in source web service\r\n \t\t\t\treturn result\r\n \t\t\tspecies_list = result['species']\r\n\r\n \t\tlen_splist = len(species_list)\r\n \t \t\r\n \tend_time = time.time()\r\n \texecution_time = end_time-start_time \r\n #service result creation time\r\n \tcreation_time = datetime.datetime.now().isoformat()\r\n\r\n \tif len_splist > 0:\r\n \t \tfinal_result = {'taxon': inputTaxon,'species': species_list, 'message': 'Success', 'status_code': 200}\r\n \telif len_splist == 0 and ott_id != -1 and not large_result:\t\r\n \t \tfinal_result = {'input_taxon': inputTaxon,'species': species_list, 'message': 'No species found', 'status_code': 200}\r\n \telif len_splist == 0 and ott_id != -1 and large_result:\t\r\n \t \tfinal_result = {'input_taxon': inputTaxon, 'species': [], 'message': \"Currently input taxon with '%s' rank is not supported\"%high_rank, 'status_code': 403}\r\n \t\r\n \tfinal_result['creation_time'] = creation_time\r\n \tfinal_result['execution_time'] = \"{:4.2f}\".format(execution_time)\r\n \tfinal_result['total_names'] = len_splist\r\n \tfinal_result['source_urls'] = [\"https://github.com/OpenTreeOfLife/opentree/wiki/Open-Tree-Taxonomy\"]\r\n\r\n \treturn json.dumps(final_result)", "def writeOrganismTaxonomies( self ):\n\n self.logger.info( 'writeOrganismTaxonomies: START' )\n\n organisms = self.reader.getAllOrganisms()\n\n taxonomies = {} \n\n self.logger.info( 'writeOrganismTaxonomies: insert file will be organismTaxonomiesInsert.psql' )\n\n taxonomyFile = self.openInsertFile( 'organismTaxonomiesInsert.psql' )\n\n for organism,taxonomyData in organisms.iteritems():\n for tax in taxonomyData['lineage']:\n\n taxId = self.taxonomiesInserted[ tax['name'] ] \n organismId = self.importerOrganism.organismsInserted[ organism ] \n\n self.writeFile( taxonomyFile, 'organism_taxonomies', [ str(organismId), str(taxId) ] )\n\n\n self.logger.info( 'writeOrganismTaxonomies: DONE' )", "def get_full_tax(idx):\n logging.info('Compiling the taxonomy for all genomes...')\n tax_idx = collections.defaultdict(dict)\n for cluster_id,v in idx.items():\n for tax,vv in v.items():\n for genome_id,x in vv.items():\n tax_idx[tax][genome_id] = x['genome_len']\n n_genomes = 0\n for tax,v in tax_idx.items():\n n_genomes += len(v.keys())\n logging.info(' Total number of genomes: {}'.format(n_genomes))\n # return\n return tax_idx", "def make_homologues_mirnas(phylogenetic_tree, mirna_seqs):\n species = [leaf.taxon.label for leaf in phylogenetic_tree.leaf_iter()]\n mirhomologues = pd.DataFrame({sp: {mirid: mirna_seqs[mirid][:21]\n for mirid in mirna_seqs.keys()}\n for sp in species}).transpose()\n return mirhomologues", "def species_lookup_by_taxonid(self, taxon_id):\n return self.species_name_lookup(taxon_id)", "def store_taxonomy(temp_tabtaxonomy):\n dict_seqid={}\n with open(temp_tabtaxonomy,'r') as read_handle:\n \n next(read_handle) #skip forst header line\n for line in read_handle:\n line = line.rstrip()\n tax_array=re.split('\\s+',line)\n dict_seqid[tax_array[0]]=tax_array[1:]\n \n #--for ends\n #-with ends\n\n return dict_seqid\n #------------------------\n #\n #-----------", "def parse_taxonomy( seq_id, lineage, key_dictionary ):\n\tif seq_id in sti_dict:\n\t\ttax_id = sti_dict[ seq_id ]\n\t\ttax_names = [ tax_id ] #list of taxon names\n\telse:\n\t\ttax_id = str( seq_id )\n\t\ttax_names = [ tax_id ] #list of taxon names\n\ttax_numbers = [ seq_id ]\n\tis_A_list = [] #store is_A relationships\n\n\twhile lineage != '1': #forces traversal through the tri file until we get to the root of taxonomy\n\t\t#print lineage\n\t\tif lineage == '0': #need this to process the root in the tri file. \n\t\t\tbreak\n\t\tis_A_list = [lineage] + is_A_list\n\t\ttax_numbers = [lineage] + tax_numbers\n\t\tif lineage in sti_dict: #we have the next taxonomic representative in the sti file\n\t\t\ttax_id = sti_dict[ lineage ]\n\t\t\ttax_names = [tax_id] + tax_names #append tax_id to front of list\n\t\telse: #the taxon does not have a sequence representative. \n\t\t\ttax_id = str( lineage ) \n\t\t\ttax_names = [tax_id] + tax_names\n\t\t#now process to next lineage\n\t\tlineage = tri_dict[ lineage ] \n\n\n\ttax_names = ['root'] + tax_names #append tax_id to front of list\n\ttax_numbers = [lineage] + tax_numbers\n\tis_A_list = ['0'] + [lineage] + is_A_list\n\n\t#now append all of these reuslts to the final dictionary, which will be keyed \n\t#off of the tax_numbers list (unique IDs for each taxonomic level.\n\n\tfor i in xrange( len( tax_numbers ) ):\n\t\tid = tax_numbers[i]\n\t\tif id in key_dictionary:\n\t\t\tpass\n\t\telse:\n\t\t\tparent = is_A_list[i]\n\t\t\tlevel = i #taxonomic level (how far down in levels are we?)\n\t\t\tnames = process_names( tax_names[:i+1] )\n\t\t\tkey_dictionary[ id ] = [ parent, level, names ]\n\n\treturn( key_dictionary )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add an entry in the dic with key "human gene ID" and value "ortholog gene ID"
def add_gene(self, human_gene, ortholog): if human_gene not in self.genes: self.genes[human_gene] = list() self.genes[human_gene].append(ortholog)
[ "def addGeneToGeneDict(self, gene, gene_ens):\n self.genes.update({gene_ens : gene})", "def AddGeneHomology(elem1, elem2, DicoHomologyLevel, homologyType):\n\tif elem1 not in DicoHomologyLevel: # if the gene from the specie 1 is not\n\t\t# in the dictionary, we add it and we create the list of homologs genes\n\t\tDicoHomologyLevel[homologyType][elem1] = []\n\t\tDicoHomologyLevel[homologyType][elem1].append(elem2) # add of the \"first\" homologue gene\n\telif elem1 in DicoHomologyLevel[homologyType] and elem2 not in DicoHomologyLevel[homologyType][elem1] :\n\t # the the gene from specie one is already in the dictionary but he \n\t # got a new homologue gene \n\t # (it is possible that the same pairs of gene homologue appear in the file, that's why this step is needed)\n\t\tDicoHomologyLevel[homologyType][elem1].append(elem2)\n\telse :\n\t\tprint \"Patate 6 : problem\"\n\treturn(DicoHomologyLevel)", "def _add_chebi_identifiers(map_dict) -> dict:\n sys.stdout.write(\"Adding ChEBI identifiers...\\n\")\n all_chebi = [k for k in map_dict if k.lower().startswith('chebi')]\n\n ch = ChEBI()\n\n for chebi_id in tqdm.tqdm(all_chebi, total=len(all_chebi)):\n uid = chebi_id.split(':')[-1]\n\n try:\n # query ChEBI API\n result = ch.getCompleteEntity(uid)\n except Exception as x:\n print(\"%s: %s\" % (chebi_id, x.__class__.__name__))\n continue\n\n to_add = []\n\n if hasattr(result, 'SecondaryChEBIIds'):\n to_add += [str(s) for s in result.SecondaryChEBIIds]\n\n if hasattr(result, 'OntologyChildren'):\n to_add += [str(ent.chebiId) for ent in result.OntologyChildren\n if ent.type in ('is conjugate acid of',\n 'is conjugate base of',\n 'is tautomer of')]\n\n if hasattr(result, 'OntologyParents'):\n to_add += [str(ent.chebiId) for ent in result.OntologyParents\n if ent.type in ('is conjugate acid of',\n 'is conjugate base of',\n 'is tautomer of')]\n\n for ent_id in to_add:\n new_id = '{}:{}'.format('ChEBI', ent_id.split(':')[-1])\n map_dict[chebi_id].add(new_id)\n\n return map_dict", "def buildAltIdDict(self) :\n altIdDict = dict()\n for GOid in self.GO :\n real_id = GOid[\"id\"][0]\n alt_id = [real_id] + GOid.get(\"alt_id\", [])\n for each in alt_id :\n assert altIdDict.get(each, \"not_recorded\") == \"not_recorded\"\n altIdDict[each] = real_id\n self.altIdDict = altIdDict", "def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(\",\"):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes", "def get_gene_id_dict(list_of_results):\n dict1 = {}\n for i, dict2 in enumerate(list_of_results):\n key = dict2[\"GeneID\"]\n if key in dict1.keys():\n # list1 = dict1[key]\n # list1.append(list_of_results[i])\n # dict1[key] = list1\n # list1.append(list_of_results[i])\n dict1[key].append(list_of_results[i])\n else:\n dict1[key] = [list_of_results[i]]\n return dict1", "def init_gene():\n gene_info = dict(\n id = '',\n name = '',\n source = '',\n strand = '',\n chr = '',\n transcripts = [],\n exons = [],\n is_alt_spliced = 0\n )\n return gene_info", "def addDic(dic, elt):\n pass", "def addIdMap (catalog, row):\n movies = catalog['idMap']\n movie = newMovie(row)\n pelicula= {'title': movie['title'], 'vote_average': float(movie['vote_average']), 'director':''}\n map.put(movies, movie['movies_id'], pelicula, compareByKey)", "def addToHistogram(s,d):\n\n if s in d:\n #if the character is in the dictionary, the amount of that character record increases \n d[s] += 1\n else:\n #if not in the dictionary, a new key & value will be made\n d[s] =1\n\n #lastly returns the dictionary itself \n return d", "def set_dict(self, lines):\n for line in lines:\n line = line.rstrip()\n split_line = line.split(\"\\t\")\n old_gene_id = split_line[0]\n new_gene_id = split_line[2]\n conv_dict = self.conversion_dict\n conv_dict[old_gene_id] = new_gene_id\n self.conversion_dict = conv_dict", "def add_species_metadata(self, genome_data):\n \n species = genome_data[\"species\"]\n accession = genome_data[\"assembly\"][\"accession\"]\n \n if not \"taxonomy_id\" in species:\n taxonomy = self.get_taxonomy_from_accession(accession)\n species[\"taxonomy_id\"] = taxonomy[\"taxon_id\"]\n \n if (not \"strain\" in species) and \"strain\" in taxonomy:\n species[\"strain\"] = taxonomy[\"strain\"]\n \n if not\"scientific_name\" in species:\n species[\"scientific_name\"] = taxonomy[\"scientific_name\"]", "def add_neuron_to_genome(self, neuron):\n self.genome.append(neuron.to_dict(self.sensors, self.actuators))\n self.neurons.append(neuron)\n self.possible_parents.append(neuron)\n self.possible_connections.insert(0, neuron)", "def add_to_dictionary(d, key, val):\n if key in d:\n d[key].append(val) # use honeypot = sensor as key\n else:\n elements = [val]\n d.update({key: elements}) # add key if not existing already", "def _add_uniprot_identifiers(map_dict) -> dict:\n sys.stdout.write(\"Adding UniProt identifiers...\\n\")\n r_session = base_utils.requests_retry_session()\n all_uniprot = [k for k in map_dict if k.lower().startswith('uniprot')]\n\n for uniprot_id in tqdm.tqdm(all_uniprot, total=len(all_uniprot)):\n db, uid = uniprot_id.split(':')\n\n try:\n # query UniProt API\n r = r_session.get(\n 'http://www.uniprot.org/uniprot/' + uid + '.xml'\n )\n except Exception as x:\n print(\"%s: %s\" % (uniprot_id, x.__class__.__name__))\n continue\n\n if r.content:\n root = etree.fromstring(r.content)\n if root:\n for s in root[0]:\n if s.tag.endswith('accession'):\n new_id = '{}:{}'.format('UniProt', s.text.split(':')[-1])\n map_dict[uniprot_id].add(new_id)\n else:\n break\n\n return map_dict", "def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict", "def init_gene():\n gene_details=dict(\n id = '', \n anno_id = [],\n confgenes_id = [],\n name = '',\n source = '',\n gene_info = {},\n alias = '',\n name2 = [],\n strand = '',\n chr = '',\n chr_num = [],\n paralogs = [],\n start = '',\n stop = '',\n transcripts = [],\n transcript_info = [],\n transcript_status = [],\n transcript_valid = [],\n exons = [],\n exons_confirmed = [],\n cds_exons = [],\n utr5_exons = [],\n utr3_exons = [],\n tis = [],\n tis_conf = [],\n tis_info = [],\n cdsStop = [],\n cdsStop_conf = [],\n cdsStop_info = [],\n tss = [],\n tss_info = [],\n tss_conf = [],\n cleave = [],\n cleave_info = [],\n cleave_conf = [],\n polya = [],\n polya_info = [],\n polya_conf = [],\n is_alt = [],\n is_alt_spliced = 0,\n is_valid = [],\n transcript_complete = [],\n is_complete = [],\n is_correctly_gff3_referenced = '',\n splicegraph = []\n )\n return gene_details", "def add_protogene(self, protogene):\n if protogene.name:\n name = protogene.name\n else:\n name = str(self.num_protogenes())\n self.protogenes[name] = protogene", "def addGenre(catalog, row):\n genres=catalog['genres']\n id=row['id']\n vote=row['vote_average']\n genre= map.get(genres, row['genres'], compareByKey)\n if genre:\n lt.addLast(genre['movies'], id)\n genre['sum_average_rating'] += float(vote)\n else:\n genre = newGenre(row)\n map.put(genres, genre['name'], genre, compareByKey)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute default analysis with baySeq
def run_bayseq(self): try: res = robjects.r('library("parallel")') res = robjects.r('library("stats4")') res = robjects.r('library("BiocGenerics")') res = robjects.r('library("S4Vectors")') res = robjects.r('library("IRanges")') res = robjects.r('library("GenomeInfoDb")') res = robjects.r('library("abind")') # res = robjects.r('library("perm")') res = robjects.r('library("GenomicRanges")') res = robjects.r('library("baySeq")') res = robjects.r('if(require("parallel")) cl <- makeCluster(4) else cl <- NUL') ct = 'table <- read.csv("' + self._table_count + '", row.names = 1, header = TRUE, stringsAsFactors = FALSE)' res = robjects.r(ct) res = robjects.r('m <- as.matrix(table)') replicates = "" assert isinstance(self._replic, int) for ind in iter(self._groups_name): aux = "'" + ind + "', " replicates = replicates + aux * self._replic replicates = replicates[:(len(replicates) - 2)] replicates = 'replicates <- c(' + replicates + ')' res = robjects.r(replicates) groups = 'groups <- list(NDE = c('+ "1," * len(self._groups_name) groups = groups[:(len(groups) - 1)] + ')' groups = groups + ', DE = c('+ '1,' * self._replic groups = groups + '2,' * self._replic groups = groups[:(len(groups) - 1)] + "))" print(groups) res = robjects.r(groups) res = robjects.r('CD <- new("countData", data = m, replicates = replicates, groups = groups)') res = robjects.r('libsizes(CD) <- getLibsizes(CD)') res = robjects.r('CD <- getPriors.NB(CD, samplesize = 1000, estimation = "QL", cl = cl, equalDispersions = TRUE)') res = robjects.r('CD <- getLikelihoods(CD, prs=c(0.5, 0.5), pET="BIC", cl=cl)') # CD.posteriors.DE < - exp(CD @ posteriors)[, 2] res = robjects.r('write.table(topCounts(CD, group = "DE", number = 65000, normaliseData = TRUE), "' + self._output +'", sep="\t", quote = FALSE)') self._message.message_9("--- baySeq is completed!") except RRuntimeError as rre: self._message.message_9("Error in baySeq execution: " + str(rre)) raise rre
[ "def run_analysis(self, argv):\n self._run_argparser(argv)\n self.run()", "def run_analysis(self):\n exe = where.__executable__\n cmd = \"{exe} {rundate:%Y %m %d} --{pipeline}\".format(exe=exe, **self.vars).split()\n if self.vars[\"id\"]:\n cmd.append(f\"--id={self.vars['id']}\")\n if self.vars.get(\"force_rerun\", False):\n cmd.append(\"-F\")\n if self.vars.get(\"fresh_run\", False):\n cmd.append(\"-N\")\n if self.vars.get(\"traceback\", False):\n cmd.append(\"-T\")\n if self.vars[\"pipeline\"] == \"vlbi\": # TODO better solution\n cmd.append(f\"--session_code={self.vars['session_code']}\")\n if self.vars.get(\"user\", None):\n cmd.append(f\"--user={self.vars['user']}\")\n\n description = f\"Running {self.vars['pipeline'].upper()} analysis: {' '.join(cmd)}\"\n self.master.master.run(subprocess.check_call, cmd, description=description)\n\n self.update_all()", "def run(self) :\n# print \"evaluating with laban\"\n # currently, labanx reads from a preset file\n os.system('labanx '+str(self.rank)+\" \"+self.input+\" \"+self.output)", "def main():\n\tnaiveBayes(\"last80kindlereviews.txt\",\"first20kindlereviews.txt\")", "def test_pandaseq_assembly(self):\n\n # write temp files\n self.writeTmpFastq(self.test_fn1, self.test_fn2)\n\n ### Run with recomended default function params ##\n params = {}\n params['-f'] = self.test_fn1\n params['-r'] = self.test_fn2\n \n pandaseq_app = PandaSeq(params=params,\n WorkingDir=self.temp_dir_string)\n\n pandaseq_app.Parameters['-F'].on()\n\n res = pandaseq_app([self.test_fn1, self.test_fn2])\n\n # assembly is sent to StdOut, check output\n self.assertEqual(res['StdOut'].read(), expected_default_assembly)\n \n res.cleanUp()\n\n ### Run with altered params ###\n # run with out -F option (output is FASTA format)\n params2 = {}\n params2['-f'] = self.test_fn1\n params2['-r'] = self.test_fn2\n \n pandaseq_app2 = PandaSeq(params=params2,\n WorkingDir=self.temp_dir_string)\n \n res2 = pandaseq_app2([self.test_fn1, self.test_fn2])\n\n # assembly is sent to StdOut, check output\n self.assertEqual(res2['StdOut'].read(), expected_default_assembly_fasta)\n \n res2.cleanUp()\n shutil.rmtree(self.temp_dir_string)", "def run(bmark):\r\n raise Exception(\"Not implemented\")", "def run_analysis(self, query, key=None):\n logger.info(\"Running analysis on query...\")\n core_annotation = Annotation(query, key)\n clf_pipeline = AnalysisPipeline()\n entity_pipeline = AnalysisPipeline()\n clf = self.clf_accessor.get_classification_pipeline('multiclass', 'intent_classifier')\n\n \"\"\" Create the IntentClassificationAnnotator using the pipeline 'clf' \"\"\"\n clf_annotator = IntentClassificationAnnotator('clf', clf)\n clf_pipeline.add_annotator(clf_annotator)\n \"\"\" Run clf_pipeline to obtain intent classification \"\"\"\n core_annotation = clf_pipeline.analyze(core_annotation)\n \"\"\" Ensure classification results exists, otherwise raise AnalyzerError \"\"\"\n if core_annotation.annotations['results']['classification'] is []:\n raise AnalyzerError(\"No intent classification results.\")\n \"\"\" Create annotators based on entity types of intent classification \"\"\"\n entities = core_annotation.annotations['entity_types']\n\n \"\"\" Obtain gazetteers associated with the given key \"\"\"\n gazetteers = self.gaz_accessor.get_gazeteers(key)\n\n logger.debug(\"Core annotation intents: {0}\".format(core_annotation.annotations['results']['classification']))\n logger.debug(\"Core annotation entities: {0}\".format(core_annotation.annotations['entity_types']))\n logger.debug(\"Core annotation stopwords: {0}\".format(core_annotation.annotations['stopwords']))\n\n \"\"\" Iterate over entities and create an the appropriate Annotator based on the entity_type \"\"\"\n for entity in entities:\n \"\"\" Access the binary classifier for the appropriate entity types and create BinaryClassifierAnnotator\"\"\"\n if entity['entity_type'] == 'binary_classifier':\n logger.debug(\"Creating BinaryClassificationAnnotator for: {0}\".format(entity['entity_name']))\n clf = self.clf_accessor.get_classification_pipeline('binary_classifier', entity['entity_name'])\n binary_clf_annotator = BinaryClassificationAnnotator(entity['entity_name'], clf)\n entity_pipeline.add_annotator(binary_clf_annotator)\n\n \"\"\" Create a RegexAnnotator for each regex entity type\"\"\"\n if entity['entity_type'] == 'regex':\n logger.debug(\"Creating RegexAnnotator for: {0}\".format(entity['entity_name']))\n regex_annotator = RegexAnnotator(entity['entity_name'], Regexer(entity['regular_expressions']))\n entity_pipeline.add_annotator(regex_annotator)\n\n \"\"\" Create a BinaryRegexAnnotator for each regex entity type\"\"\"\n if entity['entity_type'] == 'binary_regex':\n logger.debug(\"Creating BinaryRegexAnnotator for: {0}\".format(entity['entity_name']))\n regex_annotator = BinaryRegexAnnotator(entity['entity_name'], Regexer(entity['regular_expressions']))\n entity_pipeline.add_annotator(regex_annotator)\n\n \"\"\" Create a NaiveNumberAnnotator for each number entity type\"\"\"\n if entity['entity_type'] == 'number':\n logger.debug(\"Creating NaiveNumberAnnotator for: {0}\".format(entity['entity_name']))\n number_annotator = NaiveNumberAnnotator(entity['entity_name'], NumberExtractor())\n entity_pipeline.add_annotator(number_annotator)\n\n \"\"\" Create a FuzzyMatchAnnotator for each fuzzy_match entity type\"\"\"\n if entity['entity_type'] == 'fuzzy_match':\n logger.debug(\"Creating FuzzyFindAnnotator for: {0}\".format(entity['entity_name']))\n logger.debug(\"Entity Keywords: {}\".format(entity['keywords']))\n fuzzy_matcher_annotator = FuzzyMatcherAnnotator(entity['entity_name'], FuzzyMatcher(), entity['keywords'])\n entity_pipeline.add_annotator(fuzzy_matcher_annotator)\n\n \"\"\" Create a DatetimeAnnotator for each number entity type\"\"\"\n if entity['entity_type'] == 'datetime':\n logger.debug(\"Creating DatetimeAnnotator for: {0}\".format(entity['entity_name']))\n duckling_instance = self.duckling_factory.getDucklingInstance()\n parser = DucklingDatetimeParser(duckling_instance)\n datetime_annotator = DatetimeAnnotator(entity['entity_name'], parser)\n entity_pipeline.add_annotator(datetime_annotator)\n\n \"\"\" Access the gazetteer for the appropriate entity types and create an GazetteerAnnotator \"\"\"\n if entity['entity_type'] == 'gazetteer' or entity['entity_type'] == 'simple_gazetteer':\n if gazetteers is not None:\n logger.debug(\"Creating GazetteerAnnotator for: {0}\".format(entity['entity_name']))\n \"\"\" Check to make sure gazetteers contains the gazetteer type to avoid key error \"\"\"\n if entity['entity_name'] in gazetteers.keys():\n gaz_annotator = GazetteerAnnotator(entity['entity_name'], gazetteers[entity['entity_name']])\n entity_pipeline.add_annotator(gaz_annotator)\n\n core_annotation = entity_pipeline.analyze(core_annotation)\n return core_annotation.annotations['results']", "def BayesA(df_use, n, save_name, SCORES):\n \n cwd = os.getcwd()\n temp_name = 'temp_' + save_name\n df_use.to_csv(temp_name)\n\n # Write temp Rscript \n tmpR=open(\"%s_BayA.R\" % temp_name,\"w\")\n tmpR.write('library(BGLR)\\n')\n tmpR.write(\"setwd('%s')\\n\" % cwd)\n tmpR.write(\"df <- read.csv('%s', sep=',', header=TRUE, row.names=1)\\n\" % temp_name)\n tmpR.write(\"Y <- df[, 'Class']\\n\")\n tmpR.write(\"X <- df[, !colnames(df) %in% c('Class')]\\n\")\n tmpR.write(\"X=scale(X)\\n\")\n tmpR.write(\"ETA=list(list(X=X,model='BayesA'))\\n\")\n tmpR.write(\"fm=BGLR(y=Y,ETA=ETA,verbose=FALSE, nIter=12000,burnIn=2000)\\n\")\n tmpR.write(\"coef <- abs(fm$ETA[[1]]$b)\\n\")\n tmpR.write(\"coef_df <- as.data.frame(coef)\\n\")\n tmpR.write(\"write.table(coef_df, file='%s', sep=',', row.names=TRUE, quote=FALSE)\\n\" % (temp_name + '_BayAScores.txt'))\n tmpR.close()\n \n print('Running bayesA model from BGLR inplemented in R.')\n\n process = sp.Popen('module load R && export R_LIBS_USER=~/R/library && R CMD BATCH %s_BayA.R' % temp_name, shell=True)\n process.wait()\n\n coefs = pd.read_csv(temp_name + '_BayAScores.txt', sep = ',')\n coefs['coef_abs'] = coefs.coef.abs()\n coefs_top = coefs.sort_values(by='coef_abs', ascending=False)\n\n if SCORES.lower() == 'f':\n \tos.system(\"rm %s_BayAScores.txt\" % temp_name)\n else:\n os.system(\"mv %s_BayAScores.txt %s_BayAScores.txt\" % (temp_name, save_name))\n os.system(\"rm %s %s_BayA.R varE.dat mu.dat ETA_1_ScaleBayesA.dat ETA_1_lambda.dat\" % (temp_name, temp_name))\n \n for n_size in n:\n keep = coefs_top.index.values[0:int(n_size)]\n print(\"Top %s features selected using BayesA from BGLR: %s\" % (str(n_size), str(keep)))\n save_name2 = save_name + \"_\" + str(n_size)\n SaveTopFeats(keep, save_name2)", "def run():\n import hmmmAssembler ; reload(hmmmAssembler) # import helpers\n hmmmAssembler.main(Random) # this runs the code!", "def bwa(self) -> None:\n self.analysis.logger.info(\"Running alignment with BWA\")\n self.chdir()\n config = self.analysis.config\n executor = Executor(self.analysis)\n executor(\n f\"{config.bwa} mem -t 6 -L 5,10 -v 1 {{genome_ref}} \"\n f\"{{input_filename}}> {{output_filename}}\",\n input_function=lambda l: \" \".join(sorted(l)),\n input_split_reads=False,\n output_format=f\"{self.analysis.basename}{{organism_str}}.sam\",\n split_by_organism=True,\n only_human=self.only_human,\n unlink_inputs=True,\n )\n self.analysis.logger.info(\"Alignment finished. Aligner used: BWA\")", "def skesa_assemble(self):\n with progressbar(self.metadata) as bar:\n for sample in bar:\n # Initialise the assembly command\n sample.commands.assemble = str()\n try:\n if sample.general.trimmedcorrectedfastqfiles:\n # If the sample is a pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline\n try:\n status = sample.run.Description\n except AttributeError:\n status = 'unknown'\n if status == 'metagenome':\n self.merge(sample)\n else:\n # Set the output directory\n sample.general.assembly_output = os.path.join(sample.general.outputdirectory,\n 'assembly_output')\n make_path(sample.general.assembly_output)\n sample.general.assemblyfile = os.path.join(sample.general.assembly_output,\n '{name}_unfiltered.fasta'\n .format(name=sample.name))\n sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output,\n '{name}.fasta'\n .format(name=sample.name))\n fastqfiles = sample.general.trimmedcorrectedfastqfiles\n\n # Set the the forward fastq files\n sample.general.assemblyfastq = fastqfiles\n forward = fastqfiles[0]\n gz = True if '.gz' in forward else False\n # If there are two fastq files\n if len(fastqfiles) == 2:\n # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7\n sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\\n '--use_paired_ends --vector_percent 1 ' \\\n '--contigs_out {contigs}'\\\n .format(fastqfiles=','.join(fastqfiles),\n threads=self.cpus,\n contigs=sample.general.assemblyfile)\n # Same as above, but use single read settings for the assembler\n else:\n sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\\n '--vector_percent 1 --contigs_out {contigs}'\\\n .format(fastqfiles=','.join(fastqfiles),\n threads=self.cpus,\n contigs=sample.general.assemblyfile)\n # Specify that the files are gzipped\n if gz:\n sample.commands.assemble += ' --gz'\n # If there are no fastq files, populate the metadata appropriately\n else:\n sample.general.assembly_output = 'NA'\n sample.general.assemblyfastq = 'NA'\n sample.general.bestassemblyfile = 'NA'\n except AttributeError:\n sample.general.assembly_output = 'NA'\n sample.general.assemblyfastq = 'NA'\n sample.general.trimmedcorrectedfastqfiles = 'NA'\n sample.general.bestassemblyfile = 'NA'\n if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile):\n # Run the assembly\n out, err = run_subprocess(sample.commands.assemble)\n write_to_logfile(sample.commands.assemble,\n sample.commands.assemble,\n self.logfile,\n sample.general.logout,\n sample.general.logerr,\n None,\n None)\n write_to_logfile(out,\n err,\n self.logfile,\n sample.general.logout,\n sample.general.logerr,\n None,\n None)", "def run_bwa_backtrack_single(fastq_file, genome_fasta_file, genome_index_file, mark_duplicates, logger):\n\n fastq_file = dxpy.DXFile(fastq_file)\n genome_fasta_file = dxpy.DXFile(genome_fasta_file)\n genome_index_file = dxpy.DXFile(genome_index_file)\n\n dxpy.download_dxfile(fastq_file.get_id(), \"sample.fastq.gz\")\n dxpy.download_dxfile(genome_fasta_file.get_id(), \"genome.fa.gz\")\n dxpy.download_dxfile(genome_index_file.get_id(), \"genome.tar.gz\")\n\n subprocess.check_call(\"tar xzvf genome.tar.gz\", shell=True)\n num_cores = str(cpu_count())\n\n run_cmd(\"bwa-0.6.2 aln -t \" + num_cores + \" genome.fa.gz sample.fastq.gz > sample.sai\", logger)\n run_cmd(\"bwa-0.6.2 samse genome.fa.gz sample.sai sample.fastq.gz > sample0.sam\", logger)\n run_cmd(\"java -jar /CleanSam.jar INPUT=sample0.sam OUTPUT=sample1.bam\", logger)\n run_cmd(\"samtools sort -@ \" + num_cores + \" sample1.bam sample\", logger)\n\n if mark_duplicates:\n run_cmd(\"java -jar /MarkDuplicates.jar \" +\n \"INPUT=sample.bam OUTPUT=sample_deduped.bam METRICS_FILE=/dev/null\", logger)\n subprocess.check_call(\"mv sample_deduped.bam sample.bam\", shell=True)", "def run_experiment(self):", "def run(self):\n\n if self.analysis == None or self.histointerval == None:\n self._ws.put(['error', \"Missing analysis or histointerval property - nothing done\"])\n else:\n p = self.read_cmnd_file()\n p['_analysis'] = self.analysis\n db = PythiaDB()\n # If this analysis has already been run successfully with the supplied\n # parameters, just retrieve and display the stored results.\n if db.exists(p):\n yodafile = db.get_yoda(p)\n ref_histos = None\n try:\n ref_file = os.path.join(config.get('paths', 'refdata'), \"{}.yoda\".format(self.analysis))\n ref_histos = convert_histos(yoda.readYODA(ref_file))\n except IOError:\n print \"No refdata for {}\".format(self.analysis)\n try:\n histos = convert_histos(yoda.readYODA(os.path.join(config.get('paths', 'rivet_output'), yodafile)))\n self._ws.put(['histos', histos])\n if ref_histos:\n self._ws.put(['histos', ref_histos])\n self._ws.put(['yoda', yodafile.partition('.yoda')[0]])\n self._ws.put(['signal', SIM_END])\n except IOError:\n self._ws.put(['error', \"Unable to retrieve saved histograms\"])\n self._ws.put(['signal', SIM_ERR])\n else:\n with FIFOFile(self.fifo) as fifofile:\n # Generate events with PYTHIA\n self._generate(fifofile)\n\n # Small sleep time to allow PYTHIA subprocess to start\n time.sleep(0.5)\n\n # Analyse events with Rivet\n self._analyse(fifofile)\n\n self.rivet.join()\n\n # If Rivet does not terminate correctly (because of an exception\n # in the C code that cannot be caught in Python), kill PYTHIA,\n # otherwise a lot of \"setting badbit\" errors would appear in the\n # console, and PYTHIA wouldn't stop.\n if self.rivet.exitcode != 0:\n self._ws.put(['rivet', \"Rivet process terminated... Killing PYTHIA\\n\"])\n self.pythia.terminate()\n self._ws.put(['error', \"Rivet error - see console for details\"])\n self._ws.put(['signal', SIM_ERR])\n self.error = True\n self.pythia.join()\n\n # Back to the initial directory (of `main.py`)\n os.chdir(sys.path[0])\n\n if not self.error:\n if self.stopped:\n self._ws.put(['signal', SIM_STP])\n else:\n # Store successful analyses\n yodafile = self._y.get(True)\n p['_yoda'] = yodafile\n db.add(p)\n self._ws.put(['signal', SIM_END])\n\n with open(os.path.join(sys.path[0], 'rivet_out.log'), 'r') as f:\n for line in f.readlines():\n self._ws.put(['rivet_out', line])\n\n with open(os.path.join(sys.path[0], 'rivet_err.log'), 'r') as f:\n for line in f.readlines():\n self._ws.put(['rivet_err', line])", "def abort_analysis(self) -> None:\n\t\tcore.BNAbortAnalysis(self.handle)", "def abort_analysis(self):\n\t\tcore.BNAbortAnalysis(self.handle)", "def _analyze(self):\n self._pre_analysis()\n\n # normal analysis execution\n if self._graph_visitor is None:\n # There is no base graph that we can rely on. The analysis itself should generate successors for the\n # current job.\n # An example is the CFG recovery.\n\n self._analysis_core_baremetal()\n\n else:\n # We have a base graph to follow. Just handle the current job.\n\n self._analysis_core_graph()\n\n self._post_analysis()", "def main():\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-G', '--gold_captions', action='store_const', const='gold', default='gold', dest='captions',\n help='Use GOLD captions from COCO.')\n group.add_argument('-P', '--pred_captions', action='store_const', const='pred', dest='captions',\n help='use PRED captions extracted with NeuralBabyTalk.')\n parser.add_argument('-m', '--max_labels', type=int, default=20,\n help='The maximum number of labeled data-points to use for each sense.')\n parser.add_argument('-i', '--iterations', type=int, help='The number of Replicator Dynamics iterations to be run.')\n parser.add_argument('-a', '--all_senses', action='store_true',\n help='Ignore input verb, run inference on the senses of all verbs for each data point.')\n\n args = parser.parse_args()\n use_gold = args.captions == 'gold'\n use_all_senses = args.all_senses\n max_labels = args.max_labels\n\n if args.iterations:\n max_it = args.iterations\n else:\n max_it = 100\n\n features, senses, sense_labels = load_data(use_gold)\n run_gtg_experiment(senses, sense_labels, features, max_labels, max_it, use_all_senses)\n aggregate_stats('experiments.csv')", "def run(self):\n dprint_func_ir(self.func_ir, \"starting array analysis\")\n if config.DEBUG_ARRAY_OPT==1:\n print(\"variable types: \",sorted(self.typemap.items()))\n print(\"call types: \", self.calltypes)\n topo_order = find_topo_order(self.func_ir.blocks)\n for label in topo_order:\n self._analyze_block(self.func_ir.blocks[label])\n\n self._merge_equivalent_classes()\n self._cleanup_analysis_data()\n\n if config.DEBUG_ARRAY_OPT==1:\n self.dump()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the requested income range view in full detail.
def GetIncomeRangeView(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
[ "def getRange(self):\n \n pass", "def GetAgeRangeView(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_range(self, start, end):", "def get_display_trange(self) -> float:\n return float(self.query(':timebase:range?'))", "def getRange(self):\n return self.__range", "def getSoftRange(self):\n \n pass", "def get_endpoint_range(self, range):\n service = '/insight/endpoint/ip-range/' + range\n endpoint = self.api_get(service)\n return endpoint", "def _get_sight_range(self):\n raise NotImplementedError", "def test_get_range(self):\n pass", "def get_featureRange(self):\n\n return self.featureRange", "def income_report_gen(start, end):\n payments = get_income(start, end)\n row_title = [\"Name\", \"Boat\", \"Rent Day\", \"Pay Day\", \"Amount\"]\n data = []\n for payment in payments:\n temp = []\n for title, value in payment.items():\n temp.append(str(value))\n data.append(temp)\n row_format = \"{:>15}\" * (len(row_title)+1)\n print(row_format.format(\"\", *row_title))\n total_income = 0\n for i in range(len(data)):\n print(row_format.format(i+1, *data[i]))\n total_income += int(data[i][4])\n print(row_format.format(\"SUM\", *([\"--------------\"] * 4), str(total_income)))", "def range_(self):\n return self.bset.range_", "def getRange(self, index: int) -> ghidra.program.model.address.AddressRange:\n ...", "def show_total(request):\n user_id = request.user\n end_date = datetime.datetime.utcnow()\n start_date = end_date.replace(day=1,\n hour=datetime.time(0, 0, 0).hour,\n minute=datetime.time(0, 0, 0).minute,\n second=datetime.time(0, 0, 0).second)\n total = 0\n incomes_to_date = IncomeHistory.objects.filter(date__range=(start_date, end_date),\n income_id__owner_id=user_id)\n if not incomes_to_date:\n return HttpResponse(0, status=200)\n\n for income in incomes_to_date:\n if income.is_active:\n total = total + income.value\n return HttpResponse(total, status=200)", "def get_range(self):\r\n\t\tif self.battery_size == 70:\r\n\t\t\trange = 240\r\n\t\telif self.battery_size == 85:\r\n\t\t\trange = 270\r\n\t\t\t\r\n\t\tmessage = \"This car can go approx. \" + str(range)\r\n\t\tmessage += \" miles on a full charge.\"\r\n\t\tprint(message)", "def __call__(self, *args):\n return self.range_", "def range_table(self):\n raise NotImplementedError('Abstract method.')", "def page_custom_range(request):\n data = {\n \"title\": \"Тональність за обраний період часу\"\n }\n template_name = \"page-custom_range.html\"\n return render(\n request,\n template_name,\n data\n )", "def get_model_range(self):\n (x_min, x_max, y_min, y_max, z_min, z_max) = self.get_model_extent()\n from numpy import abs\n self.range_x = abs(x_max - x_min)\n self.range_y = abs(y_max - y_min)\n self.range_z = abs(z_max - z_min)\n return (self.range_x, self.range_y, self.range_z)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs a single byte through the packet parsing state amchine. Returns NOT_DONE if the packet is incomplete. Returns SUCCESS is the packet was received successfully. Returns CHECKSUM if a checksum error is detected.
def process_byte(self, byte): if self.index == -1: if byte == 0xff: self.index = 0 self.checksum = 0 elif self.index == 0: if byte != 0xff: self.checksum += byte self.pkt_bytes[0] = byte self.index += 1 else: self.checksum += byte self.pkt_bytes[self.index] = byte self.index += 1 if self.index == 7: # packet complete self.index = -1 if self.checksum & 0xff != 0xff: return CommanderRx.CHECKSUM self.lookv = self.pkt_bytes[0] - 128 # 0 - 255 ==> -128 - 127 self.lookh = self.pkt_bytes[1] - 128 self.walkv = self.pkt_bytes[2] - 128 self.walkh = self.pkt_bytes[3] - 128 self.button = self.pkt_bytes[4] self.ext = self.pkt_bytes[5] return CommanderRx.SUCCESS return CommanderRx.NOT_DONE
[ "def process(self, next_byte) -> bool:\r\n\r\n # Received Message Structures:\r\n # '#B25500' + payload bytes + '\\r\\n'\r\n # '#U00' + payload bytes + '\\r\\n'\r\n # Or for some NM3 firmware versions:\r\n # '#B25500' + payload bytes + 'T' + timestamp + '\\r\\n'\r\n # '#U00' + payload bytes + 'T' + timestamp + '\\r\\n'\r\n # Where timestamp is a 10 digit (fixed width) number representing a 32-bit counter value \r\n # on a 24 MHz clock which is latched when the synch waveform arrives\r\n # Or for future release firmware versions (v1.1.0+)\r\n # '#B25500' + payload bytes + 'Q' + lqi + 'D' + doppler + '\\r\\n'\r\n # '#U00' + payload bytes + 'Q' + lqi + 'D' + doppler + '\\r\\n'\r\n # And if we end up with mix and match addendums/addenda\r\n # '#B25500' + payload bytes + 'Q' + lqi + 'D' + doppler + 'T' + timestamp + '\\r\\n'\r\n # '#U00' + payload bytes + 'T' + timestamp + 'Q' + lqi + 'D' + doppler + '\\r\\n'\r\n\r\n return_flag = False\r\n\r\n # decoding_str = 'utf-8' # If payload bytes are 0xaa etc they're invalid in utf-8\r\n decoding_str = 'iso-8859-1' # Shouldn't balk in the same way that utf-8 does\r\n\r\n # print('next_byte: ' + bytes([next_byte]).decode(decoding_str))\r\n\r\n if self._parser_state != self.PARSERSTATE_IDLE:\r\n # Store bytes\r\n self._current_serial_string = self._current_serial_string + bytes([next_byte]).decode(decoding_str)\r\n\r\n\r\n if self._parser_state == self.PARSERSTATE_IDLE:\r\n\r\n if bytes([next_byte]).decode(decoding_str) == '#':\r\n self._current_serial_string = '#'\r\n # Next state\r\n self._parser_state = self.PARSERSTATE_TYPE\r\n\r\n elif self._parser_state == self.PARSERSTATE_TYPE:\r\n\r\n if bytes([next_byte]).decode(decoding_str) == 'B':\r\n self._current_message_packet = MessagePacket()\r\n self._current_message_packet.source_address = 0\r\n self._current_message_packet.destination_address = None\r\n self._current_message_packet.packet_type = MessagePacket.PACKETTYPE_BROADCAST\r\n self._current_message_packet.packet_payload = []\r\n self._current_message_packet.packet_timestamp_count = 0\r\n\r\n self._current_byte_counter = 3\r\n self._current_integer = 0\r\n self._parser_state = self.PARSERSTATE_ADDRESS\r\n\r\n elif bytes([next_byte]).decode(decoding_str) == 'U':\r\n self._current_message_packet = MessagePacket()\r\n self._current_message_packet.source_address = None\r\n self._current_message_packet.destination_address = None\r\n self._current_message_packet.packet_type = MessagePacket.PACKETTYPE_UNICAST\r\n self._current_message_packet.packet_payload = []\r\n self._current_message_packet.packet_timestamp_count = 0\r\n\r\n self._current_byte_counter = 2\r\n self._current_integer = 0\r\n self._parser_state = self.PARSERSTATE_LENGTH\r\n\r\n else:\r\n # Unknown packet type\r\n self._parser_state = self.PARSERSTATE_IDLE\r\n\r\n elif self._parser_state == self.PARSERSTATE_ADDRESS:\r\n self._current_byte_counter = self._current_byte_counter - 1\r\n\r\n # Append the next ascii string integer digit\r\n self._current_integer = (self._current_integer * 10) + int(bytes([next_byte]).decode(decoding_str))\r\n\r\n if self._current_byte_counter == 0:\r\n self._current_message_packet.source_address = self._current_integer\r\n self._current_integer = 0\r\n self._current_byte_counter = 2\r\n self._parser_state = self.PARSERSTATE_LENGTH\r\n\r\n elif self._parser_state == self.PARSERSTATE_LENGTH:\r\n self._current_byte_counter = self._current_byte_counter - 1\r\n\r\n # Append the next ascii string integer digit\r\n self._current_integer = (self._current_integer * 10) + int(bytes([next_byte]).decode(decoding_str))\r\n\r\n if self._current_byte_counter == 0:\r\n self._current_byte_counter = self._current_integer\r\n self._parser_state = self.PARSERSTATE_PAYLOAD\r\n\r\n elif self._parser_state == self.PARSERSTATE_PAYLOAD:\r\n self._current_byte_counter = self._current_byte_counter - 1\r\n\r\n self._current_message_packet.packet_payload.append(next_byte)\r\n\r\n if self._current_byte_counter == 0:\r\n # Completed this packet\r\n self._parser_state = self.PARSERSTATE_ADDENDUMFLAG\r\n\r\n elif self._parser_state == self.PARSERSTATE_ADDENDUMFLAG:\r\n\r\n # Timestamp Addendum\r\n if bytes([next_byte]).decode(decoding_str) == 'T':\r\n self._current_byte_counter = 14\r\n self._current_integer = 0\r\n self._current_integer_sign = 1\r\n self._parser_state = self.PARSERSTATE_TIMESTAMP\r\n\r\n # LQI Addendum\r\n elif bytes([next_byte]).decode(decoding_str) == 'Q':\r\n self._current_byte_counter = 2\r\n self._current_integer = 0\r\n self._current_integer_sign = 1\r\n self._parser_state = self.PARSERSTATE_LQI\r\n\r\n # Doppler Addendum\r\n elif bytes([next_byte]).decode(decoding_str) == 'D':\r\n self._current_byte_counter = 4\r\n self._current_integer = 0\r\n self._current_integer_sign = 1\r\n self._parser_state = self.PARSERSTATE_DOPPLER\r\n\r\n # Unrecognised or no addendum\r\n else:\r\n # No recognised addendum on this message. Completed Packet\r\n self._current_message_packet.serial_string = self._current_serial_string\r\n self._current_serial_string = None\r\n self._packet_queue.append(self._current_message_packet)\r\n self._current_message_packet = None\r\n return_flag = True\r\n self._parser_state = self.PARSERSTATE_IDLE\r\n\r\n elif self._parser_state == self.PARSERSTATE_TIMESTAMP:\r\n self._current_byte_counter = self._current_byte_counter - 1\r\n\r\n # Append the next ascii string integer digit\r\n self._current_integer = (self._current_integer * 10) + int(bytes([next_byte]).decode(decoding_str))\r\n\r\n if self._current_byte_counter == 0:\r\n # Completed this addendum\r\n self._current_message_packet.packet_timestamp_count = self._current_integer\r\n # Back to checking for further addendums/addenda\r\n self._parser_state = self.PARSERSTATE_ADDENDUMFLAG\r\n\r\n elif self._parser_state == self.PARSERSTATE_LQI:\r\n self._current_byte_counter = self._current_byte_counter - 1\r\n\r\n # Append the next ascii string integer digit\r\n self._current_integer = (self._current_integer * 10) + int(bytes([next_byte]).decode(decoding_str))\r\n\r\n if self._current_byte_counter == 0:\r\n # Completed this addendum\r\n self._current_message_packet.packet_lqi = self._current_integer\r\n # Back to checking for further addendums/addenda\r\n self._parser_state = self.PARSERSTATE_ADDENDUMFLAG\r\n\r\n elif self._parser_state == self.PARSERSTATE_DOPPLER:\r\n self._current_byte_counter = self._current_byte_counter - 1\r\n\r\n # Check for + or -\r\n if bytes([next_byte]).decode(decoding_str) == '+':\r\n self._current_integer_sign = 1\r\n elif bytes([next_byte]).decode(decoding_str) == '-':\r\n self._current_integer_sign = -1\r\n else:\r\n # Append the next ascii string integer digit\r\n self._current_integer = (self._current_integer * 10) + (self._current_integer_sign * int(bytes([next_byte]).decode(decoding_str)))\r\n\r\n if self._current_byte_counter == 0:\r\n # Completed this addendum\r\n self._current_message_packet.packet_doppler = self._current_integer\r\n # Back to checking for further addendums/addenda\r\n self._parser_state = self.PARSERSTATE_ADDENDUMFLAG\r\n\r\n else:\r\n # Unknown state\r\n self._parser_state = self.PARSERSTATE_IDLE\r\n\r\n return return_flag", "def is_ok_packet(self):\n return self.data[0] == 0x00", "def char_handler(self, char):\n if self.state == self.AWAITING_CHECKSUM:\n # If we are awaiting on a checksum, this is that byte. So,\n # don't add it to the buffer but tell checksum awaiter that we\n # are ready to process it.\n self.checksum = char\n self.checksum_ready.set()\n logger.debug(\"RX CHECKSUM: {0}\".format(char))\n return None\n else:\n # Otherwise, return it so it gets added to standard output buffer.\n return char", "def process(self, next_byte) -> bool:\r\n\r\n return_flag = False\r\n\r\n # decoding_str = 'utf-8' # If payload bytes are 0xaa etc they're invalid in utf-8\r\n decoding_str = 'iso-8859-1' # Shouldn't balk in the same way that utf-8 does\r\n\r\n # print('next_byte: ' + bytes([next_byte]).decode(decoding_str))\r\n\r\n if self._parser_state == self.PARSERSTATE_IDLE:\r\n\r\n if (bytes([next_byte]).decode(decoding_str) == '#') or (bytes([next_byte]).decode(decoding_str) == '$'):\r\n # Next state\r\n self._current_bytes = [next_byte] # Include the '#' or '$' character.\r\n self._current_byte_counter = 1\r\n self._parser_state = self.PARSERSTATE_STRING\r\n\r\n elif self._parser_state == self.PARSERSTATE_STRING:\r\n self._current_bytes.append(next_byte)\r\n self._current_byte_counter = self._current_byte_counter + 1\r\n\r\n # Check delimiter\r\n if next_byte == self._delimiter_byte:\r\n self._has_response_flag = True\r\n return_flag = True\r\n self._parser_state = self.PARSERSTATE_IDLE\r\n\r\n else:\r\n # Unknown\r\n self._parser_state = self.PARSERSTATE_IDLE\r\n\r\n return return_flag", "def process_buffer(self, buffer):\n\n \"\"\"\n This is an integer that indicates the state the message reading is in.\n 0: Starting a package read\n 1: Reading payload length - long version\n 2: Reading payload length - both versions\n 3: Reading payload\n 4: Reading CRC checksum - first byte\n 5: Reading CRC checksum - second byte\n 6: Checking checksum\n \"\"\"\n\n for x in range(len(buffer)):\n\n curr_byte = buffer[x]\n if self.phase == 0:\n self.payload = bytearray()\n self.length = 0\n self.crc = 0\n if curr_byte == 2:\n self.phase += 2\n elif curr_byte == 3:\n self.phase += 1\n elif self.phase == 1:\n self.length = curr_byte << 8\n self.phase += 1\n elif self.phase == 2:\n self.length |= curr_byte\n self.phase += 1\n elif self.phase == 3:\n self.payload.append(curr_byte)\n if len(self.payload) == self.length:\n self.phase += 1\n elif self.phase == 4:\n self.crc = curr_byte << 8\n self.phase += 1\n elif self.phase == 5:\n self.crc |= curr_byte\n self.phase += 1\n elif self.phase == 6:\n self.phase = 0\n if curr_byte == 3 and calc_crc(self.payload) == self.crc:\n self.goodpacket = Packet(self.payload)\n return True\n else:\n return False\n else:\n self.phase = 0", "def checksum_valid(packet):\n\n calculated_checksum = (0x01 + (~(sum(packet[:-1]) - 0x3A))) & 0xFF\n logging.info(\"Checksum was {}, expecting {}\".format(hex(calculated_checksum), hex(packet[-1])))\n logging.info(packet)\n return calculated_checksum == packet[-1]", "def screw_up_checksum(valid_packet):\n if valid_packet[-2] < 255:\n valid_packet[-2] += 1\n else:\n valid_packet[-2] -= 1\n return valid_packet", "async def wait_for_checksum(self):\n await asyncio.wait_for(self.checksum_ready.wait(), timeout=0.5)\n result = self.checksum\n self.prepare_checksum()\n self.set_state(self.IDLE)\n return result", "def validateChecksum(self) -> None:\r\n\r\n # Extract ICMP Echo Reply and zero out checksum.\r\n reply: bytes = self.__recvPacket[20:22] + struct.pack(\"!H\", 0) + self.__recvPacket[24:]\r\n\r\n # Re-Calculate checksum.\r\n self.__recalculateChecksum(reply)\r\n\r\n # Compare with original checksum.\r\n if self.getIcmpHeaderChecksum() == self.__computedChecksum:\r\n self.__isValidChecksum = True", "def recv(self, packet: DhcpPacket) -> Tuple[bool, Optional[DhcpPacket]]:\n\n try:\n if self.__transaction is None:\n self.__transaction = self.__gen(packet)\n return False, next(self.__transaction)\n else:\n return False, self.__transaction.send(packet)\n except StopIteration as ex:\n return True, ex.value", "def parse(self):\n try:\n if self.bitstream:\n # Parse message header\n self.bitstream.bytepos = 0\n\n if self.bitstream.endswith(\"\\n\"):\n pass\n\n else:\n raise PacketIncomplete(\"Packet does not end with carriage return\")\n\n if self.bitstream.find('0x 50 52 56 41 54',bytealigned=True): # If 'PRVAT' text in bitstream\n self.dataformat = 'NMEA'\n else:\n self.dataformat = 'TRITECH'\n\n if self.dataformat=='NMEA' and self.id != Message.CONFIGURATION_PARAM:\n # go to first comma\n self.bitstream.bytepos = self.bitstream.find('0x2C', bytealigned = True)[0]/8 + 1\n self.payload = self.bitstream.read('bytes:6')\n #skip comma\n self.bitstream.read('bytes:1')\n self.dataunits = self.bitstream.read('bytes:1')\n\n\n elif self.dataformat=='TRITECH' and self.id != Message.CONFIGURATION_PARAM:\n self.bitstream.bytepos = 0\n self.payload = self.bitstream.read('bytes:6')\n self.dataunits = self.bitstream.read('bytes:1')\n else:\n self.bitstream.bytepos = 0\n length_string = 'bytes:'+ str(len(self.bitstream)/8)\n self.payload = self.bitstream.read(length_string)\n\n else:\n pass\n\n except ValueError as e:\n raise PacketCorrupted(\"Unexpected error\", e)", "def __read_cmd_pkt(self):\n\n if self.__s_comm.isOpen() is True:\n # Read and discard results until a START_BYTE is found\n cntr = 0\n while((self.__read_byte_int() != sig_cmds.START_BYTE) and\n (cntr < 24)):\n cntr = cntr + 1\n\n # Read data from FIFO and return results\n buffer = self.__s_comm.read(11)\n\n if 11 != len(buffer):\n self.__throw_exception('SerialReadTimeout')\n\n s = struct.Struct('=BIIH')\n cmd, param1, param2, crc = s.unpack(buffer)\n\n # Reconstruct packet to validate CRC\n unpack_s = struct.Struct('=BBIIH')\n pkt = unpack_s.pack(sig_cmds.START_BYTE,\n cmd,\n param1,\n param2,\n crc)\n if(0 != ctrl_crc.crc16xmodem(pkt)):\n self.__throw_exception('Checksum invalid')\n\n return cmd, param1, param2, crc\n else:\n return False", "def assert_parse_idempotent(valid_packet):\n parse_result = hibike_message.parse_bytes(valid_packet)\n encoded = self.encode_packet(parse_result)\n self.assertEqual(valid_packet, encoded,\n \"parse not idempotent for {}\".format(valid_packet))", "def _compute_checksum(packet):\n # checksum is the sum of the bytes\n # from device id to the end of the data\n # mod (%) 256 and bit negated (~) (1's compliment)\n # and (&) with 0xFF to make sure it is a byte.\n return ~(sum(packet[2:]) % 0x100) & 0xFF", "def process(self, data):\n\n\t\t# Check if the 802.15.4 packet is valid\n\t\tif makeFCS(data[:-2]) != data[-2:]:\n\t\t\tprint(hue.bad(\"Received invalid packet\"))\n\t\t\treturn\n\n\t\tpacket = Dot15d4FCS(data)\n\n\t\tif packet.fcf_frametype == 2: # ACK\n\t\t\tself.last_ack = packet.seqnum", "def test_checksum(self):\n test_data = \"This tests the checksum algorithm.\"\n test_length = len(test_data)\n self.pap.attach_data(test_data)\n\n # Now build a header\n variable_tuple = (0xa3, 0x9d, 0x7a, self.pap.DATA_FROM_DRIVER,\n test_length + HEADER_SIZE, 0x0000,\n 0)\n self.pap.set_data_length(test_length)\n\n header_format = '>BBBBHHd'\n size = struct.calcsize(header_format)\n temp_header = ctypes.create_string_buffer(size)\n struct.pack_into(header_format, temp_header, 0, *variable_tuple)\n\n # Now set the header member in PortAgentPacket to the header\n # we built\n self.pap.set_header(temp_header.raw)\n\n # Now get the checksum and verify it is what we expect it to be.\n checksum = self.pap.calculate_checksum()\n self.assertEqual(checksum, 2)", "def is_eof_packet(self):\n return self.data[0] == 0xfe", "def checkChecksum(self):\n if not self.checkPacketLength():\n return False\n return CCSDS.DU.DataUnit.checkChecksum(self)", "def receive(self, packet):\n if packet.dest in self.address_to_port:\n # The packet is destined to one of the clients connected to this middlebox;\n # send the packet there.\n # if packet.is_fin:\n # print(\"2nd wan sees a fin\")\n\n if packet.is_fin and len(packet.payload) == 0:\n # print(\"empty fin, foward fin\")\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash not in self.hash_to_raw_data.keys():\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = False)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # reset buffer\n self.send(packet, self.address_to_port[packet.dest]) # forward empty fin\n return\n \n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n \n if packet.is_raw_data:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n pack_buff += packet.payload\n\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n # print(\"sending1\")\n if block_hash in self.hash_to_raw_data.keys():\n # send extract data from hash in packet\n block_to_send = self.hash_to_raw_data[block_hash]\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n\n if remaining_buff:\n # print(\"wan to client remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n block_hash = get_hash(remaining_buff)\n block_to_send = remaining_buff\n # print(\"sending2\")\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n # print(\"sending fin1\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n if packet.is_fin:\n # print(\"sending fin2\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff\n else:\n block_hash = packet.payload\n block_to_send = self.hash_to_raw_data[block_hash]\n # print(\"sending3\")\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n if packet.is_fin:\n # print(\"sending fin3\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n # self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # TESTING\n else:\n # The packet must be destined to a host connected to the other middlebox\n # so send it across the WAN.\n if packet.is_fin and len(packet.payload) == 0:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = True)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n self.send(packet, self.wan_port)\n return\n\n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n\n pack_buff += packet.payload\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n\n # send off all completed blocks\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n\n if remaining_buff:\n # print(\"wan to wan remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n # print(\"finfin\")\n block_to_send = remaining_buff\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.wan_port)\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers a function to run before each request.
def before_request(self, f): self.before_request_funcs.append(f) return f
[ "def before_request(self, f):\n self.before_request_funcs.setdefault(None, []).append(f)\n return f", "def before_request(self, func: typing.Callable):\n return self.add_hook(type_=\"pre\", hook=func)", "def before_request(self, f):\n self.before_request_funcs[None].append(f)\n return f", "def before_first_request(self, fn=None):\n if fn is None:\n return self.before_first_request\n\n self._defer(lambda app: app.before_first_request(fn))\n return fn", "def before_processing(self, func):\n if not callable(func):\n raise ValueError(\"A before processing hook must be callable\")\n\n self._before_hooks.append(func)", "def before_worker_start(func):\n _func_only(func)\n worker_methods_db.register_before_start(func)\n return func", "def on_pre_run(self, func: Callable[[Trial], Any]):\n\n self._register_handler(\"on_pre_run\", func)", "def configure_before_request_funcs(app):\n @app.before_request\n def conf_set_user_cookie_id():\n return set_user_cookie_id()\n \n @app.before_request\n def check_for_maintenance():\n if config.DOWN_FOR_MAINTENANCE:\n return 'Sorry, we\\'re down momentarily for a teensey bit of maintenance!', 503\n \n @app.before_request\n def count_uniques():\n return\n statsd.set('unique_users', g.user_cookie_id)\n statsd.set('unique_ips', request.remote_addr)\n \n @app.before_request\n def set_statsd_context():\n g.statsd_context = \"%s.%s\" % (request.endpoint, request.method)\n g.total_request_timer = statsd.timer(g.statsd_context + \".response_time\")\n g.total_request_timer.start()", "def add_before_response(self, fun: Callable):\n if self.__before.count(fun):\n raise ValueError(\"%s is in list yet\" % str(fun))\n self.__before.append(fun)", "def add_before_start_func(self, func):\n if func not in self._start_funcs:\n self._start_funcs.append(func)", "def hook_client_before_request(self, event):\r\n for functor in self._hooks['client_before_request']:\r\n functor(event)", "def add_before_processing_hook(self, func):\n if not callable(func):\n raise ValueError(\"A before processing hook must be callable\")\n\n self.__before_processors.append(func)", "def _execute_pre_hook(self, context, func_name, *args, **kwargs):", "def after_request(self, f):\n self.after_request_funcs.setdefault(None, []).insert(0, f)\n return f", "def before_response(self):\n def wrapper(fun):\n self.add_before_response(fun)\n return fun\n return wrapper", "def pre_request_hook(self):", "def request(self, func):\n self.add( Request(func) )", "def configure_hook(app):\n\n @app.before_request\n def before_request():\n pass", "def _register(fn):\n context.context().add_function(fn)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a function to be run after each request.
def after_request(self, f): self.after_request_funcs.append(f) return f
[ "def after_request(self, f):\n self.after_request_funcs.setdefault(None, []).insert(0, f)\n return f", "def after_request_handle(self, func):\n self.after_request.append(func)\n return func", "def after_request(self, f):\n self.after_request_handlers.append(f)\n return f", "def after_request(self, func: typing.Callable):\n return self.add_hook(type_=\"post\", hook=func)", "def after_request(self, func):\n return self.add_hook(type_=\"post\", hook=func)", "def after_response(self, request, fn, *args, **kwargs):\n\n self._requests[id(request)][\"callbacks\"].append((fn, args, kwargs))", "def after_response(self):\n def wrapper(fun):\n self.add_after_response(fun)\n return fun\n return wrapper", "def add_after_response(self, fun: Callable):\n if self.__after.count(fun):\n raise ValueError(\"%s is in list yet\" % str(fun))\n self.__after.append(fun)", "def pop_after_request(self, fun: Callable):\n self.pop_after_response(fun)", "def teardown_request(self, f):\n self.teardown_request_funcs.setdefault(None, []).insert(0, f)\n return f", "def _register(fn):\n context.context().add_function(fn)", "def request(self, func):\n self.add( Request(func) )", "def before_request(self, f):\n self.before_request_funcs[None].append(f)\n return f", "def after_worker_start(func):\n _func_only(func)\n worker_methods_db.register_after_start(func)\n return func", "def before_request(self, f):\n self.before_request_funcs.setdefault(None, []).append(f)\n return f", "def before_request(self, f):\n self.before_request_funcs.append(f)\n return f", "def after_error_request(self, f):\n self.after_error_request_handlers.append(f)\n return f", "def register_method_after(fn, phase): # type: (Callable, str) -> None\n PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn\n PackageMixinsMeta._add_method_after[phase].append(fn)", "def bofore_response_handle(self, func):\n self.before_response.append(func)\n return func" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers a template context processor function.
def context_processor(self, f): self.template_context_processors.append(f) return f
[ "def _register(fn):\n context.context().add_function(fn)", "def context_processor(self, f):\n self.template_context_processors[None].append(f)\n return f", "def context_processor(self, fn=None):\n if fn is None:\n return self.context_processor\n\n self._defer(lambda app: app.context_processor(fn))\n return fn", "def context_add(self, *args, **kwargs):\n\n if len(args):\n # TODO insert test for frame support\n caller_context = sys._getframe(1).f_locals\n for arg in args:\n if type(arg) in [str, unicode]:\n try:\n self.template_context[arg] = caller_context[arg]\n except:\n raise Exception('CTemplateView: \"%s\" variable not found in view context.' % arg)\n else:\n raise Exception('CTemplateView: Could not map template context variables.')\n\n if len(kwargs):\n for k,v in kwargs.iteritems():\n self.template_context[k] = v", "def register(self):\n REGISTERED_FUNCTIONS[self.path] = self", "def register_processor(self, processor):\n self._processors.append(processor)", "def register_process(name):\n def wrapper(func):\n REGISTRY[name] = func\n\n return func\n\n return wrapper", "def inclusion_tag(self, template):\n def decorator(f):\n @functools.wraps(f)\n def wrapper(*args, **kw):\n context = f(*args, **kw)\n t = env.get_template(template).render(context)\n return jinja2.Markup(t)\n return self.function(wrapper)\n return decorator", "def test_enable_extension_registers_context_processors(self):\n class TestExtension(Extension):\n context_processors = ['my_custom_processor']\n\n # Back up the list, so we can replace it later.\n if hasattr(settings, 'TEMPLATES'):\n orig_context_processors_list = \\\n list(settings.TEMPLATES[0]['OPTIONS']['context_processors'])\n else:\n orig_context_processors_list = \\\n list(settings.TEMPLATE_CONTEXT_PROCESSORS)\n\n # Sanity-check that the context processor didn't wind up in here.\n self.assertNotIn('my_custom_processor', orig_context_processors_list)\n\n try:\n extension = self.setup_extension(TestExtension)\n\n # We have to re-fetch these lists now, since they may have\n # been normalized to lists.\n if hasattr(settings, 'TEMPLATES'):\n context_processors_list = \\\n settings.TEMPLATES[0]['OPTIONS']['context_processors']\n else:\n context_processors_list = \\\n settings.TEMPLATE_CONTEXT_PROCESSORS\n\n # This should have been added, since the extension was enabled.\n self.assertIn('my_custom_processor', context_processors_list)\n\n # Shutting down the extension should remove the context\n # processor.\n self.manager.disable_extension(extension.id)\n self.assertNotIn('my_custom_processor',\n context_processors_list)\n finally:\n if hasattr(settings, 'TEMPLATES'):\n settings.TEMPLATES[0]['OPTIONS']['context_processors'] = \\\n orig_context_processors_list\n else:\n settings.TEMPLATE_CONTEXT_PROCESSORS = \\\n orig_context_processors_list", "def shell_context_processor(self, fn=None):\n if fn is None:\n return self.shell_context_processor\n\n self._defer(lambda app: app.shell_context_processor(fn))\n return fn", "def wrap_loader_context(function, context):\n if 'loader_context' in get_func_args(function):\n return partial(function, loader_context=context)\n else:\n return function", "def make_processor(cls, fnc):\n #def wrapper(**kw):\n # name = fnc.__name__\n # fnc.__name__ = 'run'\n # return type()\n # pass", "def tag_processor(fn, name):\n if hasattr(fn, '__call__'):\n return _tag_processor(fn, name)\n\n priority = fn\n\n def wrapper(fn):\n return _tag_processor(fn, name, priority)\n return wrapper", "def register_template_callback_before(cls):\n\n return register_callback(cls, \"TEMPLATE_BEFORE\")", "def template_func(cls, name):\n def helper(func):\n if cls.template_funcs is None:\n cls.template_funcs = {}\n cls.template_funcs[name] = func\n return func\n return helper", "def register_key_function(function):\n key_functions.register(function)", "def register_template_renderer(\n self, plugin, template_name, context=default_context\n ):\n self._renderers[plugin] = (template_name, context)", "def register_task(self) -> None:\n if __debug__:\n logger.debug(\n \"[@TASK] Registering the function %s in module %s\",\n self.decorated_function.function_name,\n self.decorated_function.module_name,\n )\n binding.register_ce(self.core_element)", "def template_global(self,\n arg: Optional[Callable] = None,\n *,\n name: Optional[str] = None,\n pass_context: bool = False,\n inject: Optional[Union[bool, Iterable[str]]] = None,\n safe: bool = False,\n ) -> Callable:\n def wrapper(fn):\n fn = _inject(fn, inject)\n if safe:\n fn = _make_safe(fn)\n if pass_context:\n fn = jinja2.contextfunction(fn)\n self._defer(lambda app: app.add_template_global(fn, name=name))\n return fn\n\n if callable(arg):\n return wrapper(arg)\n return wrapper" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enroll a new profile to Azure Speaker ID.
def enroll_profile(region, subscription_key, wav_path): fs, audio_data = _check_and_load_wav_file_length(wav_path) profile_id = _add_profile(region, subscription_key) url = "%s/speaker/identification/v2.0/text-independent/profiles/%s/enrollments" % ( _get_azure_endpoint(region), profile_id) headers = { "Ocp-apim-subscription-key": subscription_key, "Content-Type": "audio/wav; codecs=audio/pcm; samplerate=%s" % fs, } session = requests.Session() resp = session.post(url, headers=headers, data=audio_data) print("Enrollment response status code: %s\n" % resp.status_code) print(json.dumps(json.loads(resp.content), indent=2))
[ "def attach_security_profile(securityProfileName=None, securityProfileTargetArn=None):\n pass", "def _createSpeakerByName(self, name):\n data = DEFAULT_SPEAKER\n data['name'] = name\n return Speaker(**data).put()", "def test_speaker(self):\n self.course.speakers.create(\n name='Fabricio Nogueira',\n slug='fabricio-nogueira',\n website='http://fabricionogueira.me'\n )\n self.assertEqual(1, self.course.speakers.count())", "def enroll(self, request, *args, **kwargs):\n course = self.get_object()\n course.students.add(request.user)\n return Response({'enrolled': True})", "def set_speaker(self, identifier):\n self.up_to_date = False\n self._speaker = identifier", "def add_profile(self, profile):\r\n self.profiles.append(profile)", "def test_add_profile(self):\n # Create a profile for the current user.\n\n my_profile = apiTestHelpers.create_profile()\n\n # Calculate a global ID and account secret for the other user. Note\n # that the user won't have a profile yet.\n\n other_user_global_id = utils.calc_unique_global_id()\n other_user_account_secret = utils.random_string()\n\n # Create a conversation between these two users.\n\n conversation = \\\n apiTestHelpers.create_conversation(my_profile.global_id,\n other_user_global_id)\n\n # Ask the \"/changes\" endpoint for the current anchor value.\n\n headers = utils.calc_hmac_headers(\n method=\"GET\",\n url=\"/api/changes\",\n body=\"\",\n account_secret=my_profile.account_secret\n )\n\n url = \"/api/changes?my_global_id=\" + my_profile.global_id\n response = self.client.get(url, \"\", content_type=\"application/json\",\n **headers)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], \"application/json\")\n data = json.loads(response.content)\n self.assertItemsEqual(data.keys(), [\"next_anchor\"])\n\n anchor = data['next_anchor']\n\n # Create a new profile for the second user.\n\n request = json.dumps({\n 'profile' : {\n 'global_id' : other_user_global_id,\n 'name' : utils.random_string(),\n 'name_visible' : True,\n 'email' : utils.random_string(),\n 'picture_id' : utils.random_string(),\n 'picture_id_visible' : True,\n },\n 'account_secret' : other_user_account_secret,\n })\n\n headers = utils.calc_hmac_headers(\n method=\"POST\",\n url=\"/api/profile/\" + other_user_global_id,\n body=request,\n account_secret=other_user_account_secret\n )\n\n response = self.client.post(\"/api/profile/\" + other_user_global_id,\n request,\n content_type=\"application/json\",\n **headers)\n\n self.assertEqual(response.status_code, 201)\n\n # Now ask the \"/changes\" endpoint for the things that have changed\n # since the anchor was calculated.\n\n headers = utils.calc_hmac_headers(\n method=\"GET\",\n url=\"/api/changes\",\n body=\"\",\n account_secret=my_profile.account_secret\n )\n\n url = \"/api/changes?my_global_id=\" + my_profile.global_id \\\n + \"&anchor=\" + anchor\n\n response = self.client.get(url, \"\", content_type=\"application/json\",\n **headers)\n if response.status_code != 200:\n print response.content\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], \"application/json\")\n data = json.loads(response.content)\n self.assertItemsEqual(data.keys(), [\"changes\", \"next_anchor\"])\n\n # Check that the changes includes the updated profile.\n\n found = False\n for change in data['changes']:\n if change['type'] == \"profile\":\n if change['data']['global_id'] == other_user_global_id:\n found = True\n\n self.assertTrue(found)", "def profile(self, profile):\n\n self._profile = profile", "def enroll_new_user(args):\n establish_sqlite_db(args.db_table)\n validate_id(args.id)\n mfcc = sample_from_mfcc(read_mfcc(args.audio_path, SAMPLE_RATE), NUM_FRAMES)\n insert_db_row(args.db_table, args.id, mfcc)", "def update_profile(ProfileArn=None, ProfileName=None, IsDefault=None, Timezone=None, Address=None, DistanceUnit=None, TemperatureUnit=None, WakeWord=None, Locale=None, SetupModeDisabled=None, MaxVolumeLimit=None, PSTNEnabled=None, MeetingRoomConfiguration=None):\n pass", "def enable(self,\n profile_id=None):\n if profile_id is None:\n self._enabled = True\n else:\n self._profiles[profile_id] = True", "def create(self, validated_data):\n return Speaker.objects.create(**validated_data)", "def execute(profile_name=\"default\"):\n session = Session(profile_name=profile_name) \n polly = session.client(\"polly\")\n try:\n response = polly.synthesize_speech(Text=\"Hello World!\", OutputFormat=\"mp3\", VoiceId=\"Joanna\")\n\n except(BotoCoreError, ClientError) as error:\n print(error)\n sys.exit(-1)\n \n if \"AudioStream\" in response:\n return response[\"AudioStream\"]\n \n else:\n print(\"Could not stream audio\")\n sys.exit(-1)", "def assign_profile(self, objProf):\n self.Profile = objProf\n pass", "def CreateProfile(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _add_new_profile(cls, user_id, email):\n #ehiller - if for some reason we can't share student profile, I don't think a teacher would care, we probably\n # don't even need this function\n #if not CAN_SHARE_STUDENT_PROFILE.value:\n # return None\n\n old_namespace = namespace_manager.get_namespace()\n try:\n namespace_manager.set_namespace(cls.TARGET_NAMESPACE)\n\n profile = models.PersonalProfile(key_name=user_id)\n profile.email = email\n profile.enrollment_info = '{}'\n profile.put()\n return profile\n finally:\n namespace_manager.set_namespace(old_namespace)", "def populate_profile(sender, user, request, **kwargs):\n modified = False\n profile,created = UserProfile.objects.get_or_create(user=user)\n if not profile.uhash:\n\n profile.uhash = id_encoder.from_decimal(random.getrandbits(32)) #hashlib.sha1(user.username).hexdigest()\n modified = True\n \n #ToDo get idp from request and set\n idp = request.META.get('Shib_Identity_Provider',None)\n if idp is not None:\n profile.idp = idp\n modified = True\n\n #auto-populate idp table\n idp_object,created = IdentityProvider.objects.get_or_create(uri=idp)\n\n if modified:\n profile.save()\n \n levels = []\n authn_context_class = request.META.get('Shib_AuthnContext_Class',None)\n if authn_context_class is not None:\n for uri in authn_context_class.split():\n try:\n level = Assurance.objects.get(uri=uri,assignable=False) #only look for \"real\" LoAs\n levels.append(level)\n except ObjectDoesNotExist:\n pass\n \n if levels is None and idp is not None:\n # fall back to default for the IdP if exists\n try:\n identity_provider = IdentityProvider.objects.get_or_create(uri=idp)\n if identity_provider.default_assurance != None:\n levels.append(identity_provider.default_assurance)\n except ObjectDoesNotExist:\n pass\n\n if user.username in get_custom_setting('AUTO_REMOTE_SUPERUSERS',[]):\n user.is_superuser = True\n user.is_staff = True\n user.password = User.objects.make_random_password(length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789')\n with open(\"/tmp/%s\" % user.username,'w') as pwf:\n pwf.write(user.password)\n os.chmod(\"/tmp/%s\" % user.username,0600)\n user.save()\n \n request.session['assurance_levels'] = levels\n return", "async def on_speaking(self, speaking, uid):\n pass", "def enroll(request, id):\n\tif request.user.is_authenticated() and RegStudent.objects.get(user=request.user).active:\n\t\tcourse = Course.objects.get(id=id)\n\t\treg_student = RegStudent.objects.get(user=request.user)\n\t\tstudent = Student.objects.get(name=reg_student)\n\t\tstudent.courses.add(course)\n\t\tmessages.success(request, 'You are enrolled in %s' %(course))\n\t\treturn HttpResponseRedirect('/student/courses/')\n\telse:\n\t\traise Http404" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the number of suicides for a type of agent given game mode, observability, and game seed. If game seed passed is 1, then all game seeds are aggregated.
def suicide_query(game_mode=0, observability=-1, game_seed=-1, agent=-1): event_id = "death" # Keep only those games within given configuration if game_seed != -1: selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability) & (data['game_seed'] == game_seed)] else: selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability)] if agent != -1: for index, row in selection.iterrows(): if agent not in row["agents"]: selection.drop(index, inplace=True) # print(selection.size) team_kill_count = [] ngames = 0 # Number of games in which this agent dies suicides = 0 # Number of games in which this agent commits suicide events_per_sample = [] team_kills = 0 # Iterate through selected game data for index, row in selection.iterrows(): if agent in row["agents"] and row['event_id'] == event_id: # This agent played in the game # Find its agent ID depending on its position in the agent list. There may be more than 1 agent of this # type in the game, so iterate over all and check individually. ll = row["agents"] indices = [i for i, el in enumerate(ll) if el == agent] for agent_id in indices: # teammate = (agent_id + 2) % 4 sample_event_counter = 0 for event in row["event_data"]: if event["agent_id"] == agent_id: # This agent dies if event["killer"] == agent_id: # Suicide sample_event_counter += 1 # if event["killer"] == teammate: # Killed by teammate # team_kills += 1 # if event["agent_id"] == teammate: # Teammate dies # if event["killer"] == agent_id: # Killed by this agent # team_kill_count += 1 ngames += 1 events_per_sample.append(sample_event_counter) suicides += sample_event_counter # suicide_count.append(100*suicides/ngames) # Showing percentage of game suicides # team_kill_count.append(100*team_kills/games) # percentage = 100 * suicides / ngames # mean = ngames * (percentage / 100) # variance = mean * (1 - (percentage / 100)) # std_dev = math.sqrt(variance) # std_err = std_dev / math.sqrt(ngames) # h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95 confidence interval # return percentage, h # print(events_per_sample) mean = suicides/ngames variance = sum([pow(x - mean, 2) for x in events_per_sample])/len(events_per_sample) std_dev = math.sqrt(variance) std_err = std_dev/math.sqrt(len(events_per_sample)) h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95% confidence interval return mean * 100, h * 100 # , team_kill_count
[ "def get_agent_number_of_players(players):\n return sum([count_players(player) for player in players\n if player.startswith('agent')])", "def get_num_coop_agents(model):\n\n agent_cooperations = [a.number_of_c for a in model.schedule.agents]\n # print(\"hey\", agent_cooperations)\n agent_cooperations = np.sum(agent_cooperations)\n # print(\"lol\", agent_cooperations.item())\n return agent_cooperations.item()", "def get_unique_heroes_count(data):\n\n unique_set = set()\n for i in range(1, 6):\n dire = data['r{0}_hero'.format(i)].unique()\n radiant = data['d{0}_hero'.format(i)].unique()\n unique_set = unique_set | set(dire) | set(radiant)\n return len(unique_set)", "def num_deviation_profiles(game, rest):\n rest = np.asarray(rest, bool)\n utils.check(game.is_restriction(rest), \"restriction must be valid\")\n num_role_strats = np.add.reduceat(rest, game.role_starts)\n num_devs = game.num_role_strats - num_role_strats\n dev_players = game.num_role_players - np.eye(game.num_roles, dtype=int)\n return np.sum(utils.game_size(dev_players, num_role_strats).prod(1) * num_devs)", "def sixes(player_kept: []) -> int:\n return SIX() * player_kept.count(SIX())", "def agent_count(city):\n agents = 0\n empty = 0\n landmarks = 0\n for (x,y), house in np.ndenumerate(city):\n if x > w or y > h:\n continue\n if not (house.empty or house.landmark):\n agents += 1\n elif not house.empty:\n landmarks += 1\n else:\n empty += 1", "def test_winners_per_type_sum(self):\n sim = ss.Simulation()\n sim.run_simulation(14)\n winners = sim.winners_per_type()\n assert sum(winners.values()) == 14", "def get_num_suits(self):\n return self._data_holder.numSuits", "def diversity(population):\n damages = defaultdict(int)\n for ch in population:\n damages[ch.total_damage] += 1\n return len(damages)", "def get_number_of_agents(model):\n\n n_agents = len(model.schedule.agents_by_type['Customer'])\n return n_agents", "def evaluate_agent(agent: Agent, num_runs=100, num_steps_per_run=4000):\n\n names = ['obstacles', 'foodhunt', 'combination', 'fixed_random']\n counts = {\n 'food' : {\n n: [] for n in names\n },\n 'obs' : {\n n: [] for n in names\n }\n }\n funcs = [SingleAgentSimulation.obstacle_avoidance_level, SingleAgentSimulation.food_finding_level, SingleAgentSimulation.combination_hard_coded_eval_level, SingleAgentSimulation.fixed_random_eval_level]\n for index, (name, func) in enumerate(zip(names, funcs)):\n print(f\"Doing Eval Level {name}\")\n for run_no in range(num_runs):\n print(f\"\\r\\t\\tEpisode {run_no} / {num_runs}\", end='')\n if name == 'fixed_random':\n level = funcs[index](run_no + 1234567)\n else:\n level = funcs[index]()\n \n # ensure no dodgy things will happen regarding modifying the agent.\n tmp = copy.deepcopy(agent)\n s = SimulationRunner(level, tmp)\n for j in range(num_steps_per_run):\n s.update()\n num_obs_hit = (level.total_rewards[0] - level.score[0] * level.rewards[EntityType.FOOD]) / SingleAgentSimulation.OOB_REWARD\n num_food_gotten = level.score[0]\n counts['food'][name].append(num_food_gotten)\n counts['obs'][name].append(num_obs_hit)\n print(f\"\\n\\tRewards for this step: {get_means(counts, name)}\")\n return counts", "def number_of_suits(self):\r\n suits = []\r\n for i in self.cards:\r\n if i.suit not in suits:\r\n suits.append(i.suit)\r\n return len(suits)", "def get_number_of_investors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'Investor'])\n return n_agents", "def get_agent_distribution(self):\n logger.info('Finding agent distribution among model statements.')\n agent_count = defaultdict(int)\n for stmt in self.statements:\n for agent in stmt.agent_list():\n if agent is not None:\n agent_count[agent.name] += 1\n return sorted(agent_count.items(), key=lambda x: x[1], reverse=True)", "def _niched_count_calculation(self, cadidate):\n\n niche_count = 0\n for individual in self.population:\n \"\"\"\n The formula increments the niche count by \n (1 - (neighbor_distance / niched_radius)) \n if the neighbor distance is less then niched radius.\n \"\"\"\n\n neighbor_distance = _Utility._euclidean_distance(\n cadidate.fitness, individual.fitness)\n \n if neighbor_distance <= self._niched_radius:\n sh = 1.0 - (neighbor_distance / self._niched_radius)\n else:\n sh = 0\n\n niche_count = niche_count + sh\n\n return niche_count", "def number_of_neurites(morph, neurite_type=NeuriteType.all):\n return sum(1 for _ in iter_neurites(morph, filt=is_type(neurite_type)))", "def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number", "def _generate_suit_histogram(cards):\n return Counter(card.suit for card in cards)", "def n_neurites(neurites, neurite_type=NeuriteType.all):\n return sum(1 for _ in iter_neurites(neurites, filt=is_type(neurite_type)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a module item.
def create_module_item(self, module_item, **kwargs): unrequired_types = ["ExternalUrl", "Page", "SubHeader"] if isinstance(module_item, dict) and "type" in module_item: # content_id is not required for unrequired_types if module_item["type"] in unrequired_types or "content_id" in module_item: kwargs["module_item"] = module_item else: raise RequiredFieldMissing( "Dictionary with key 'content_id' is required." ) else: raise RequiredFieldMissing("Dictionary with key 'type' is required.") response = self._requester.request( "POST", "courses/{}/modules/{}/items".format(self.course_id, self.id), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
[ "def create_module(self, information):\n self.module = self.get_module(activate=True, information=information)\n self.module.create()\n self.change_status(color=\"yellow\")", "def load_new_item(self, item_name, item_type):\n\n new_item = item_type(item_name, self.IDgen.new_id())\n # create object of class item_type, assigning a unique id number & name\n\n self.libsys.add_new_item(new_item)\n # tell libsys to add object to given itemlist", "def _create_module(self, rootdir):\n name = 'module_' + rootdir.get_name()\n moduleobj = Module(name, rootdir)\n rootdir.set_module(moduleobj)\n self._modules[name] = moduleobj", "def createItem(*args):\n\n itemName = cmds.textFieldGrp('create_customname', q=True, text=True)\n itemCommand = cmds.textFieldGrp('create_customCommand', q=True, text=True)\n value = cmds.radioButtonGrp('create_customSourceRadioButton', q=True, select=True)\n if not itemName or not itemCommand or not value:\n raise UserWarning('You need to fill all the fields!')\n if value == 1:\n sourceType = 'python'\n else:\n sourceType = 'mel'\n cmds.deleteUI(WINDOWNAME, window=True)\n # create the custom item\n item = cmds.menuItem(parent=PARENT, label=itemName, command=itemCommand, sourceType=sourceType)\n SETTINGS.add(item, [itemName, itemCommand, sourceType])", "async def item_create(item_in: ItemCreate, db: Session = Depends(get_db)):\n return create_item(db=db, item=item_in)", "def new_item(source_name, item):\n\t# id is required\n\tif 'id' not in item:\n\t\traise Exception(f'Cannot create item with no id. Value = {item}')\n\n\t# source must be filled in, so if it is absent it is auto-populated with\n\t# source_name. Note: this allows sources to fill in a different source.\n\tif 'source' not in item:\n\t\titem['source'] = source_name\n\n\t# active is forced to True for new items\n\titem['active'] = True\n\n\t# created is forced to the current timestamp\n\titem['created'] = timestamp.now()\n\n\t# title is auto-populated with the id if missing\n\tif 'title' not in item:\n\t\titem['title'] = item['id']\n\n\t# tags is auto-populated if missing (not if empty!)\n\tif 'tags' not in item:\n\t\titem['tags'] = [source_name]\n\n\t# All other fields are optional.\n\titem_path = os.path.join(DUNGEON_PATH, item['source'], f'{item[\"id\"]}.item')\n\treturn WritethroughDict.create(item_path, item)", "def add_module(self, *args):\n module_name = args[0]\n information = {}\n\n if len(args) > 1:\n information = args[1]\n\n item = QtWidgets.QListWidgetItem()\n widget = ModuleWidget(module_name=module_name,\n list_widget=self.module_form.list,\n item=item,\n parent=self,\n information=information)\n\n # makes sure the modules are shown correctly in the list widget\n item.setSizeHint(widget.sizeHint())\n\n # add a widget to the list\n self.module_form.list.addItem(item)\n self.module_form.list.setItemWidget(item, widget)\n return widget", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def create_item(self, user: User, **kwargs) -> None:", "def create_module(cls, *args, **kwargs): # real signature unknown\r\n pass", "def add_item(self):\n new_item = LibraryItemGenerator.generate_item()\n # new_item = ItemFactory.generate_item()\n found_item = self._retrieve_item_by_call_number(\n new_item.call_number)\n if found_item:\n print(f\"Could not add item with call number \"\n f\"{new_item.call_number}. It already exists. \")\n else:\n self._item_list.append(new_item)\n print(\"Item added successfully! Item details:\")\n print(new_item)", "def create_module(self, body: list, **kwargs):\n return ast.Module(body=body)", "def add_item(self):\n item = LibGen.create_item()\n if not self.item_exists(item.call_number):\n self.item_list[item.call_number] = item\n print(f\"Item({item.call_number}) bas been added.\")\n else:\n print(\"This item already exists.\")", "def add_item(self):\n new_item = LibraryItemGenerator.generate_item(self._type)\n found_item = self._retrieve_item_by_call_number(\n new_item.call_number)\n if found_item:\n print(f\"Could not add item with call number \"\n f\"{new_item.call_number}. It already exists. \")\n else:\n self._item_list.append(new_item)\n print(\"item added successfully! item details:\")\n print(new_item)", "def Create(settings, args, folder, module_name=None):\n if module_name is None: # if the module name was not specified separately then take it from the settings\n module_name = settings[\"type\"].GetString()\n full_module_path = \".\".join([folder, module_name])\n\n try:\n imported_module = import_module(full_module_path)\n except ImportError:\n try:\n imported_module = import_module(module_name)\n except ImportError:\n raise ImportError('Module \"{}\" could neither be imported from CoSimulation nor from PYTHONPATH'.format(module_name))\n\n return imported_module.Create(settings, *args)", "def new_item(self, ui_info):\n new_item = ToDoItem()\n self.model.items.append(new_item)", "def create_work_item(self):", "def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")", "def create_module(self, mid, paras):\n class_name = self.modules[mid]['class']\n names = class_name.split('.')\n # Module path in python environment\n python_module_path = '.'.join(names[0:-1])\n # Import python module\n python_module = __import__(python_module_path)\n # Fetch target class\n module_class = getattr(python_module, names[-1])\n\n new_paras = self.modules[mid].copy()\n new_paras.update(paras)\n\n return module_class(paras=new_paras)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete this module item.
def delete(self, **kwargs): response = self._requester.request( "DELETE", "courses/{}/modules/{}/items/{}".format( self.course_id, self.module_id, self.id ), _kwargs=combine_kwargs(**kwargs), ) module_item_json = response.json() module_item_json.update({"course_id": self.course_id}) return ModuleItem(self._requester, module_item_json)
[ "def delete_item(self, item_id):\n pass", "def module_delete(self):\n self.test_runner.run_module_delete_auto_by_non_admin()", "def delete_item(self, item):\r\n item.delete_item_from_room(self)", "def module_delete_existing(self):\n self.test_runner.run_module_delete_existing()", "def deleteItem(self, parentPath: unicode, itemName: unicode, version: int) -> None:\n ...", "def delete_mod(self):\n return self.mod_in", "def DeleteItem(*args, **kwargs):\n return _core_.Menu_DeleteItem(*args, **kwargs)", "def removeItemInManager(self):\n\n modelIndex = self.noteManager.selectedIndexes()[0] #get the model index of the selected item\n item = modelIndex.internalPointer() #get the item\n itemFilePath = item.getFilePath() #get the path to the item\n message = \"You are about to remove:\\n{0}\\n\\nAre you sure?\".format(itemFilePath)\n buttonSelected = QMessageBox.warning(self, \"Removing an Item - Quark Note Taker\", message, QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Cancel)\n if buttonSelected == QMessageBox.Ok:\n if type(item) is QuarkNoteModel:\n os.remove(itemFilePath)\n elif type(item) is QuarkNotebookModel:\n shutil.rmtree(itemFilePath)\n\n self.noteManager.model().updateModel()", "def delete_module(id):\n check_admin()\n verify_module_access('Modules')\n \n module = Module.query.get_or_404(id)\n db.session.delete(module)\n db.session.commit()\n flash('You have successfully deleted the module.')\n\n # redirect to the modules page\n return redirect(url_for('admin.list_modules'))\n\n return render_template(title=\"Delete module\")", "def delete(self, item):\n # eg. node=item to attrs, telling item type to Graphviz._setattr\n self.graph._del(self.parent.handle, **{self.type: item})", "def storage_del(self, key=None):\n if not self._module:\n return\n self._storage_init()\n module_name = self._module.module_full_name\n return self._storage.storage_del(module_name, key=key)", "def delete_item(self) -> None:\n item = self.get_selected_item(self.tree_cache)\n if item is None:\n return\n\n item.data().set_enabled(False)\n self.sync_tree_cache()", "def __delitem__(self,item):\n if item == self.lastKey: return\n installer = self.data[item]\n apath = self.dir.join(item)\n if isinstance(installer,InstallerProject):\n apath.rmtree(safety='Installers')\n else:\n apath.remove()\n del self.data[item]", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def delete(self):\n if not self.game_id:\n raise modioException(\n \"This endpoint cannot be used for ModFile object recuperated through the me/modfiles endpoint\"\n )\n\n resp = self.connection.delete_request(f\"/games/{self.game_id}/mods/{self.mod}/files/{self.id}\")\n return resp", "def delete(self):\n\n\t\tpprint (self.request.arguments)\n\t\tuser_id = self.request.arguments.get(\"user_id\")[0].decode(\"utf-8\")\n\t\tmodule_id = self.request.arguments.get(\"module_id\")[0].decode(\"utf-8\")\n\t\ttry:\n\t\t\t#post_arguments = json.loads(self.request.body.decode(\"utf-8\"))\n\t\t\t#user_id = post_arguments.get(\"user_id\", None) ##who created this category\n\t\t\tif not module_id:\n\t\t\t\traise Exception(\"Please send the module id\")\n\t\t\n\t\t\tmodule = yield self.module_collection.find_one({\"module_id\": module_id})\n\t\t\tuser = yield self.user_collection.find_one({\"user_id\": user_id})\n\t\t\tif user[\"user_type\"] == \"superadmin\":\n\t\t\t\tprint (\"Ohh fuck the user is superadmin\")\n\t\t\t\tyield DeleteModule.delete_module(self.db, module, self.module_collection, self.child_collection_name, \n\t\t\t\t\t\tself.parent_collection, self.permission_collection)\n\n\t\t\t\tmessage = \"Module %s with module_id %s and module_name %s has been deleted\"%(self.module_type, module_id, module[\"module_name\"])\n\t\t\t\tself.write({\"error\": False, \"success\": True, \"data\": {\"module_id\": module_id, \"message\": message}})\n\t\t\t\tself.finish()\n\t\t\t\treturn\n\t\t\t\t\n\n\t\t\tresult = yield Permissions.get_permission_rest_parameter(user, module, \"delete\", self.permission_collection)\n\t\t\tpprint(result)\n\t\t\tif result:\n\t\t\t##mark domain and its children under \"deletion_approval\": pending\n\t\t\t\tyield DeleteModule.mark_module(self.db, module, self.module_collection, self.child_collection_name, \n\t\t\t\t\t\tself.parent_collection, self.permission_collection)\n\n\t\t\t\n\t\t\t\tmessage = \"Module %s with module_id %s and module_name %s submitted for deletion and requires superadmin approval\\\n\t\t\t\t\t\t\t\t\t\"%(self.module_type, module_id, module[\"module_name\"])\n\t\t\t\tself.write({\"error\": False, \"success\": True, \"data\": {\"module_id\": module_id, \"message\": message}})\n\t\t\t\tself.finish()\n\t\t\t\treturn\n\n\t\t\telse:\n\t\t\t\tmessage = \"Insufficient permissions\"\n\t\t\t\tself.set_status(400)\n\t\t\t\tself.write(message)\n\t\t\t\tself.finish()\n\t\t\t\treturn \n\t\texcept Exception as e:\n\t\t\tprint (e)\n\t\t\tself.set_status(403)\n\t\t\tself.write(str(e))\n\t\t\tself.finish()\n\t\t\treturn \n\n\t\tpprint (module_id)\n\t\tself.write({\"data\": module_id})\n\t\tself.finish()\n\t\treturn", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def module_delete_non_existent(self):\n self.test_runner.run_module_delete_non_existent()", "def delete(self, kwargs=None):\n\n conn, cur = get_conn_cur()\n item_composition_delete_template = \"DELETE FROM \\\"ItemComposition\\\" WHERE id=%s;\"\n id = None\n\n if kwargs:\n if 'id' not in kwargs.keys():\n if self.fields['id'] is not None:\n id = self.fields['id']\n else:\n id = int(kwargs['id'])\n if id:\n try:\n cur.execute(item_composition_delete_template, (id,))\n conn.commit()\n except psycopg2.DatabaseError as e:\n conn.rollback()\n raise e\n finally:\n for name in self.fields.keys():\n self.fields[name] = None\n close(conn, cur)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
One sample/paired sample permutation test based on a tstatistic. This function can perform the test on one variable or simultaneously on multiple variables. When applying the test to multiple variables, the "tmax" method is used for adjusting the pvalues of each variable for multiple comparisons. Like Bonferroni correction, this method adjusts pvalues in a way that controls the familywise error rate. However, the permutation method will be more powerful than Bonferroni correction when different variables in the test
def permutation_t_test( X, n_permutations=10000, tail=0, n_jobs=None, seed=None, verbose=None ): from .cluster_level import _get_1samp_orders n_samples, n_tests = X.shape X2 = np.mean(X**2, axis=0) # precompute moments mu0 = np.mean(X, axis=0) dof_scaling = sqrt(n_samples / (n_samples - 1.0)) std0 = np.sqrt(X2 - mu0**2) * dof_scaling # get std with var splitting T_obs = np.mean(X, axis=0) / (std0 / sqrt(n_samples)) rng = check_random_state(seed) orders, _, extra = _get_1samp_orders(n_samples, n_permutations, tail, rng) perms = 2 * np.array(orders) - 1 # from 0, 1 -> 1, -1 logger.info("Permuting %d times%s..." % (len(orders), extra)) parallel, my_max_stat, n_jobs = parallel_func(_max_stat, n_jobs) max_abs = np.concatenate( parallel( my_max_stat(X, X2, p, dof_scaling) for p in np.array_split(perms, n_jobs) ) ) max_abs = np.concatenate((max_abs, [np.abs(T_obs).max()])) H0 = np.sort(max_abs) if tail == 0: p_values = (H0 >= np.abs(T_obs[:, np.newaxis])).mean(-1) elif tail == 1: p_values = (H0 >= T_obs[:, np.newaxis]).mean(-1) elif tail == -1: p_values = (-H0 <= T_obs[:, np.newaxis]).mean(-1) return T_obs, p_values, H0
[ "def PermutationTest(self):\n # U = union of B and T\n union_sample = np.concatenate((self.x_benchmark, self.x_trial), axis=0)\n n_samples = self.NB + self.NT\n \n # Initialize array of test statistic values\n self.TS_tilde = np.zeros(self.n_perm, dtype=np.float)\n \n count=0\n print(\"Running {:d} Permutations... 0%\".format(self.n_perm))\n \n # loop over different samplings\n for i in range(self.n_perm):\n \n # Print progress\n progress = int(round(((i+1)/self.n_perm)*100,0))\n progress_list = [25, 50, 75, 100]\n if count < len(progress_list) and progress == progress_list[count]:\n count+=1\n print(\"Running {:d} Permutations... {:d}%\".format(self.n_perm, progress))\n \n # Random permutations of U (sampling without replacement)\n x_resampled = shuffle(union_sample)\n # Assign first NB elements to Benchmark\n B_resampled = x_resampled[:self.NB]\n # Assign remaning NT elements to Trial\n T_resampled = x_resampled[self.NB:]\n \n # Compute the test statistic\n self.TS_tilde[i] = self.TestStatistic(B_resampled, T_resampled)", "def post_hoc_perm(conditions, n_shuffles, dataframe, method = scipy.stats.ttest_rel, seed = 1010):\n \n np.random.seed(seed)\n\n pairs = [pair for pair in itertools.combinations(conditions, 2)]\n n_pairs = len(pairs)\n\n t = np.floor(n_pairs * 0.25)\n\n obs_cond = {}\n perm_cond = {}\n p_cond = {}\n p_ph = {}\n\n maxT = np.zeros(n_shuffles)\n\n #First loop: Generate permutations\n for n, pair in enumerate(pairs):\n\n if n % t == 0:\n print((n / n_pairs) * 100)\n\n term = pair[0] + '_vs_' + pair[1]\n obs, perm, p = t_perm(dataframe[pair[0]], dataframe[pair[1]], n_shuffles, term)\n obs_cond.update(obs)\n perm_cond.update(perm)\n p_cond.update(p)\n\n\n\n for n in range(0, n_shuffles):\n shuffle = np.array([shuffles[n] for shuffles in perm_cond.values()])\n maxT[n] = shuffle[np.squeeze(np.where(abs(shuffle) == np.max(np.abs(shuffle))))]\n\n p_ph = {cond: sum(abs(maxT) >= abs(obs_cond[cond])) / n_shuffles for cond in obs_cond.keys()}\n \n print('Complete')\n return(obs_cond, perm_cond, maxT, p_ph)", "def _pvalue_half_permutation(self, num_permutations=1000):\n perm_stats = [self.test_stat]\n yhat = self.classifier.predict_proba(self._test)[1]\n for _ in range(num_permutations):\n perm_yhat = np.random.permutation(yhat)\n perm_test_stat = self.eval_metric(self._test[self.sample_label], perm_yhat) # type: ignore\n perm_stats.append(perm_test_stat)\n pval = (self.test_stat <= np.array(perm_stats)).mean()\n return pval", "def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):\r\n x = mean(a)\r\n v = var(a)\r\n n = len(a)\r\n df = n-1\r\n svar = ((n-1)*v)/float(df)\r\n t = (x-popmean)/math.sqrt(svar*(1.0/n))\r\n prob = betai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Single-sample T-test.'\r\n outputpairedstats(printit,writemode,\r\n 'Population','--',popmean,0,0,0,\r\n name,n,x,v,min(a),max(a),\r\n statname,t,prob)\r\n return t,prob", "def pairedTTest(sample1, sample2):\n tStatistic, pValue = scipy.stats.ttest_rel(sample1, sample2)\n # NOTE: for unpaired, two-sample t-test use ttest_ind()\n return pValue", "def permutation_test(x1, x2, alternative='two-sided', times=1000):\n x1 = x1[x1==x1]\n x2 = x2[x2==x2]\n\n L1 = len(x1)\n xx = np.append(x1, x2)\n d0 = np.mean(x1) - np.mean(x2)\n \n N_success = 0\n for i in range(times):\n yy = np.random.permutation(xx)\n d1 = np.mean(yy[:L1]) - np.mean(yy[L1:])\n if alternative == \"two-sided\" and abs(d1) >= abs(d0):\n N_success += 1\n if alternative == \"greater\" and d1 >= d0:\n N_success += 1\n if alternative == \"less\" and d1 <= d0:\n N_success += 1\n p_value = N_success / (times+0.0)\n return p_value", "def manual_perm_test(model: 'Fitted sklearn estimator',\n X: 'Pandas df',\n y: 'Pandas series',\n true_score: float,\n n_permutations: int=10000,\n plot: bool=True,\n clf: bool=False) -> 'p-value, null_counts':\n\n scores = [] # Empty list for null distribution scores\n n_perms = range(1, n_permutations, 1) # Range of values to permute\n for n in tqdm(n_perms, desc='Permutation test'): # tqdm for progress bar\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, stratify=y, test_size=0.90, random_state=n\n )\n model.fit(X_train, y_train)\n y_test_perm = np.random.permutation(y_test) # Permuting class labels\n chance_scores = round(model.score(X=X_test, y=y_test_perm), 4)\n scores.append(chance_scores)\n\n # Converting to a pandas dataframe\n perm_scores_df = pd.DataFrame(data=scores, columns=['null_dist'])\n perm_scores_df['null_dist'] *= 100\n null_counts = (\n perm_scores_df # Counts greater than or equal to our test set score\n .loc[(perm_scores_df['null_dist']) >= true_score]\n .count()\n .iloc[0]\n )\n p_value = (null_counts + 1) / (n_permutations + 1)\n p_value = np.round(p_value, decimals=5)\n\n if plot is True: # Plotting a histogram of permutation scores\n plt.figure(figsize=(10, 10))\n sns.distplot(a=perm_scores_df['null_dist'],\n hist=True,\n label='Permutation scores')\n ylim = plt.ylim()\n if clf is False:\n # True classifier score and p-value\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='R2 score %s (pvalue : %s)' %\n (true_score, p_value))\n else:\n plt.plot(2 * [true_score],\n ylim,\n '--g',\n linewidth=3,\n label='Multimodal AUC score: %s (pvalue = %s)' %\n (true_score, p_value))\n n_classes = np.unique(y).size\n chance = 2 * [100. / n_classes]\n plt.plot(chance,\n ylim,\n '--k',\n linewidth=3,\n label='Null model mean AUC score: %s' % 50.00)\n \n plt.ylim(ylim)\n plt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.38))\n plt.tight_layout()\n\n if clf is False:\n plt.xlabel(xlabel='R2 Scores')\n else:\n plt.xlabel(xlabel='AUC Scores')\n plt.title(label='Null Distribution')\n plt.savefig('quadratic_null_dist.png', dpi=300, bbox_inches='tight')\n plt.show()\n\n return p_value, null_counts", "def ttest_1samp(a, popmean):\n x = mean(a)\n v = var(a)\n n = len(a)\n df = n - 1\n svar = ((n - 1) * v) / float(df)\n t = (x - popmean) / math.sqrt(svar * (1.0 / n))\n prob = betai(0.5 * df, 0.5, float(df) / (df + t * t))\n\n return t, prob", "def pairedTTest(sample1, sample2):\n mean1 = np.mean(sample1)\n mean2 = np.mean(sample2)\n stDev1 = np.std(sample1)\n stDev2 = np.std(sample2)\n stdErr1 = stDev1/np.sqrt(len(sample1))\n stdErr2 = stDev2/np.sqrt(len(sample2))\n tStatistic, pValue = scipy.stats.ttest_rel(sample1, sample2)\n # NOTE: for unpaired, two-sample t-test use ttest_ind()\n return pValue", "def attest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):\r\n if type(a) != N.ndarray:\r\n a = N.array(a)\r\n x = amean(a)\r\n v = avar(a)\r\n n = len(a)\r\n df = n-1\r\n svar = ((n-1)*v) / float(df)\r\n t = (x-popmean)/math.sqrt(svar*(1.0/n))\r\n prob = abetai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Single-sample T-test.'\r\n outputpairedstats(printit,writemode,\r\n 'Population','--',popmean,0,0,0,\r\n name,n,x,v,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n statname,t,prob)\r\n return t,prob", "def welchs_t_test(data, alternative):\n\n if len(data) != 2:\n raise ValueError(\"2 groups are needed\")\n\n a = data[0]\n b = data[1]\n\n n_a = a.shape[0]\n n_b = b.shape[0]\n\n var_a = np.var(a, ddof=1)\n var_b = np.var(b, ddof=1)\n\n nu_a = n_a - 1\n nu_b = n_b - 1\n\n sd = np.sqrt(var_a/n_a + var_b/n_b)\n t = (np.mean(a) - np.mean(b)) / sd\n\n df = (sd ** 4) / ((var_a ** 2)/(nu_a * (n_a ** 2)) + (var_b ** 2)/(nu_b * (n_b ** 2)))\n\n if alternative == \"<>\":\n p = stats.t.cdf(np.fabs(t)*-1, df=df) * 2\n _, p2 = stats.ttest_ind(a, b, axis=0, equal_var=False)\n assert(np.isclose(p, p2))\n elif alternative == \">\":\n # t should be negative\n p = 1.0 - stats.t.cdf(t, df=df)\n _, p2 = stats.ttest_ind(a, b, axis=0, equal_var=False)\n assert(np.isclose(p, p2))\n elif alternative == \"<\":\n # t should be positive\n p = stats.t.cdf(t, df=df)\n _, p2 = stats.ttest_ind(a, b, axis=0, equal_var=False)\n assert(np.isclose(p, 1-p2))\n return p", "def plain_t_test(data, alternative):\n\n if len(data) != 2:\n raise ValueError(\"2 groups are needed\")\n\n a = data[0]\n b = data[1]\n N = a.shape[0]\n\n if a.shape != b.shape:\n raise ValueError(\"The 2 groups must have the same number of observations\")\n\n var_a = np.var(a, ddof=1)\n var_b = np.var(b, ddof=1)\n\n sp = np.sqrt((var_a + var_b)/2.0)\n t = (np.mean(a) - np.mean(b)) / (sp * np.sqrt(2.0/N))\n df = 2*N - 2\n\n if alternative == \"<>\":\n p = stats.t.cdf(np.fabs(t)*-1, df=df) * 2\n _, p2 = stats.ttest_ind(a, b)\n assert(np.isclose(p, p2))\n elif alternative == \">\":\n # t should be negative\n p = 1.0 - stats.t.cdf(t, df=df)\n _, p2 = stats.ttest_ind(a, b)\n assert(np.isclose(p, p2))\n elif alternative == \"<\":\n # t should be positive\n p = stats.t.cdf(t, df=df)\n _, p2 = stats.ttest_ind(a, b)\n assert(np.isclose(p, 1-p2))\n\n return p", "def test_permutations(experiment, verbose=False):\n topics = experiment.topics\n no_topics = len(topics) # The total number of topics used for the given experiment.\n no_permutations = experiment.n # The total number of possible permutations.\n\n if verbose:\n print \"Topics: {0} (total of {1})\".format(topics, no_topics)\n print \"Total permutations: {0}\".format(no_permutations)\n print\n\n for i in range(0, no_permutations):\n rotations = experiment.get_rotations(i)\n\n if verbose:\n print \"Permutation {0} ({1})\".format(i, rotations)\n\n for k in range(0, no_topics):\n rotation_topic = experiment.get_rotation_topic(i, k)\n\n if verbose:\n print \"\\tTopic {0} at permutation list position {1}\".format(rotation_topic, k)\n\n if experiment.get_rotations(i)[k] == experiment.get_rotation_topic(i, k):\n if verbose:\n print \"\\t\\tPASS\"\n else:\n if verbose:\n print \"\\t\\tFAIL\"\n return False\n\n if verbose:\n print \"Permutation check PASSED\"\n\n return True", "def ttest_review(sample_1, sample_2, alpha=.05):\n\n result = stats.ttest_ind(sample_1, sample_2)\n crit_val, p_val = result\n \n ## Creating interpretation based on p-value results.\n\n if p_val < .05:\n print(f'The feature is statistically significant with a p-value of {p_val}.')\n\n else:\n print(f'The feature is not statistically significant with a p-value of {p_val}.')\n \n return p_val", "def probability_for_t(cls, t_statistic, dir, df):\n # pval is the probability that any samples will have\n # EQUAL OR MORE THAN abs(t_statistic).\n #\n # FWIW sf = survival function\n pval = t.sf(abs(t_statistic), df)\n return pval * 2 if dir == StatTool.TWO_TAILED_TEST else pval", "def permutation_test(overlap_bins, nonoverlap_bins, thresh, ntrials):\n X = num_top_snps(I(overlap_bins.values()), thresh)\n if X == 0:\n return thresh, 0, 0, 0, 1, 0, 0\n overlap_counts = {k: len(overlap_bins[k]) for k in overlap_bins}\n Y = [num_top_snps(match(overlap_counts, nonoverlap_bins), thresh) for _ in range(ntrials)]\n mean, variance = moments(Y)\n anderson, critical_values, _ = scipy.stats.anderson(Y)\n exact_p = (1 + len([y for y in Y if y >= X])) / (1 + ntrials)\n return thresh, X, mean, variance, exact_p, anderson, critical_values[2]", "def paired_permutation_test(D1, a, b, tradeoff, threshold=0.05, R=10000, verbose=1):\n\n # extract the scores by example for each system\n A = D1[D1.policy == a]\n B = D1[D1.policy == b]\n assert (A.example == B.example).all()\n assert (A.index == B.index).all()\n\n W = B.want.sum() # number of thing we want is constant among permutations\n n = len(A.index)\n\n AC = np.array(A.want_and_got) * 1.0\n AG = np.array(A.got) * 1.0\n A_runtime = np.array(A.pushes) * 1.0\n\n BC = np.array(B.want_and_got) * 1.0\n BG = np.array(B.got) * 1.0\n B_runtime = np.array(B.pushes) * 1.0\n\n # observed value of test statistic -- the difference of rewards.\n T_observed = test_statistic(AC, AG, A_runtime,\n BC, BG, B_runtime,\n np.zeros(n, dtype=np.int32), W, tradeoff)\n\n r = 0.0\n for _ in iterview(range(R), msg='perm test'):\n # randomly generate a vector of zeros and ones (uniformly).\n # Note: endpoint not included in np.random.randit (that's why theres a 2).\n flip = np.random.randint(0, 2, size=n).astype(np.int32)\n if test_statistic(AC, AG, A_runtime,\n BC, BG, B_runtime,\n flip, W, tradeoff) >= T_observed:\n r += 1\n s = (r+1)/(R+1)\n\n # observed rewards\n ra = cgw_f(AC.sum(), AG.sum(), W) - tradeoff*A_runtime.mean()\n rb = cgw_f(BC.sum(), BG.sum(), W) - tradeoff*B_runtime.mean()\n\n if verbose:\n # which system has higher reward? is it significant?\n asig = (red % bold) if ra > rb and s <= 0.05 else '%s'\n bsig = (blue % bold) if rb > ra and s <= 0.05 else '%s'\n any_sig = bold if s <= threshold else yellow\n\n print asig % 'R(A) = %g (%s)' % (ra, a)\n print bsig % 'R(B) = %g (%s)' % (rb, b)\n print any_sig % 'confidence = %g' % (1-s)\n print\n\n if s <= threshold:\n return s, -1 if ra > rb else +1\n else:\n return s, 0 # \"statistical tie\"", "def test_permutation(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.permutation((20,), 10), updates=random.updates())\r\n\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n # rng.permutation outputs one vector at a time, so we iterate.\r\n numpy_val0 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n numpy_val1 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def run_paired_t(data_generator):\r\n test_stats, pvals = [], []\r\n for b_data, a_data in data_generator:\r\n test_stat, pval = t_paired(b_data, a_data)\r\n test_stats.append(test_stat)\r\n pvals.append(pval)\r\n return test_stats, pvals" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get confidence intervals from nonparametric bootstrap.
def bootstrap_confidence_interval( arr, ci=0.95, n_bootstraps=2000, stat_fun="mean", random_state=None ): if stat_fun == "mean": def stat_fun(x): return x.mean(axis=0) elif stat_fun == "median": def stat_fun(x): return np.median(x, axis=0) elif not callable(stat_fun): raise ValueError("stat_fun must be 'mean', 'median' or callable.") n_trials = arr.shape[0] indices = np.arange(n_trials, dtype=int) # BCA would be cool to have too rng = check_random_state(random_state) boot_indices = rng.choice(indices, replace=True, size=(n_bootstraps, len(indices))) stat = np.array([stat_fun(arr[inds]) for inds in boot_indices]) ci = (((1 - ci) / 2) * 100, ((1 - ((1 - ci) / 2))) * 100) ci_low, ci_up = np.percentile(stat, ci, axis=0) return np.array([ci_low, ci_up])
[ "def get_bootstrap_ci(cons: np.ndarray, n_trials: int = 100):\n cons_trials = np.zeros((n_trials, cons.shape[0]))\n\n for t in range(n_trials):\n\n shuff_scores = np.random.choice(cons, cons.shape[0], replace=True)\n cons_trials[t, :] = shuff_scores\n\n conf_ints = np.zeros((2, n_trials - 1))\n\n # sort shuffled conservation scores by calculating difference\n # between empirical and true means\n cons_trials_diffs = np.mean(cons_trials, axis=1) - np.mean(cons)\n\n sorted_idxs = cons_trials_diffs.argsort()\n sorted_vals = cons_trials[sorted_idxs]\n\n critval = int(0.95 * n_trials)\n lo_crit, hi_crit = n_trials - critval - 1, critval - 1\n\n lo_bound = sorted_vals[lo_crit]\n hi_bound = sorted_vals[hi_crit]\n\n conf_ints[0] = np.histogram(lo_bound, bins=99)[0]\n conf_ints[1] = np.histogram(hi_bound, bins=99)[0]\n\n conf_ints_fracs = conf_ints / np.sum(conf_ints, axis=1)[:, None]\n\n return conf_ints_fracs", "def confidence_intervals(data):\r\n\r\n x_bar = np.nanmean(data) # Mean value\r\n s = np.nanstd(data) # Standard deviation\r\n n = len(data) # Sample size\r\n\r\n lo_conf = x_bar - (1.96 * (s / np.sqrt(n))) # Lower bound of confidence interval\r\n hi_conf = x_bar + (1.96 * (s / np.sqrt(n))) # Upper bound of confidence interval\r\n\r\n conf_range = hi_conf - lo_conf # Size of the 95% confidence interval\r\n\r\n return lo_conf, hi_conf, conf_range", "def bootstrap_ci(x, n=300, ci=0.95):\n\n low_per = 100 * (1 - ci) / 2\n high_per = 100 * ci + low_per\n x = removena_numpy(x)\n if not len(x):\n return (np.nan, np.nan)\n bootstrap_samples = choice(a=x, size=(\n len(x), n), replace = True).mean(axis = 0)\n return np.percentile(bootstrap_samples, [low_per, high_per])", "def eeg_bootstrapCI(array,alpha):\t\n\t\n\tif len(array.shape) == 3:\n\t\tprint \"Only works on 2D bootstrapped data (ntpts x nboot)\"\n\t\tarray_low = []\n\t\tarray_high = []\n\telse:\n\t\tntpts, nboot = array.shape\n\t\t#sort along last (bootstrap) dimension\n\t\tarray_srt = np.sort(array,axis=1)\n\t\tarray_low = array_srt[:,np.round(nboot*alpha/2)-1]\n\t\tarray_high = array_srt[:,np.round(nboot*(1-alpha/2))-1]\n\t\treturn array_low,array_high", "def calc_conf_intervals(self, bootstrap_vals: np.ndarray) -> np.ndarray:\n mtf_vals = np.expand_dims(self._mtf_vals, axis=1)\n\n diff_conf_int = np.quantile(\n a=bootstrap_vals - mtf_vals, q=self._crit_points_inds, axis=1\n ).T\n\n mtf_conf_int = -diff_conf_int + mtf_vals\n\n return mtf_conf_int", "def confidence_intervals(self, level = 95):\n margin = (100 - level) / 2 # interval is middle level% of vals, so this is margin to either side of it\n try:\n len(self.binom_control)\n len(self.binom_treatment)\n\n except:\n self.binom_distribution()\n\n control = self.binom_control\n treatment = self.binom_treatment\n\n control_upper = np.percentile(a = control, q = level + margin)\n control_lower = np.percentile(a = control, q = margin)\n self.interval_control = {'lower': control_lower, 'upper':control_upper, 'level':level}\n\n treatment_upper = np.percentile(a = treatment, q = level + margin)\n treatment_lower = np.percentile(a = treatment, q = margin)\n self.interval_treatment = {'lower': treatment_lower, 'upper':treatment_upper, 'level':level}\n\n return self.interval_control, self.interval_treatment", "def boot_conf_intervals(indep,\n dep,\n estimator,\n display_name=None,\n resample_cases=False,\n significance=0.05,\n num_sims=10000,\n verbose=True,\n seed=None,\n precision=4):\n if display_name is None:\n display_name = \"\"\n\n est_params = estimator.fit(indep, dep)\n est_params = np.array(est_params)\n\n params_arr = resampling.boot_param_dist(indep=indep,\n dep=dep,\n estimator=estimator,\n num_sims=num_sims,\n resample_cases=resample_cases,\n seed=seed,\n include_fixed_params=False,\n verbose=verbose)\n\n if estimator.has_restricted_params:\n est_params = est_params[estimator.estimated_params_indices]\n\n (bca_ci_df,\n percentile_ci_df,\n basic_ci_df) = _confidence_intervals(params_arr=params_arr,\n est_params=est_params,\n significance=significance,\n estimator=estimator,\n indep=indep,\n dep=dep)\n\n if verbose:\n def my_formatter(x):\n format_str = '.' + str(precision) + 'f'\n return format(x, format_str)\n\n formatters = [my_formatter for dummy in range(len(bca_ci_df.columns))]\n\n print()\n print(\"confidence level: \", 100.0 * (1.0 - significance), \"%\")\n print()\n print(\"bootstrap bca confidence intervals\")\n print()\n print(bca_ci_df.to_string(formatters=formatters))\n# if latex:\n# print(bca_ci_df.to_latex(escape=False, formatters=formatters))\n# else:\n print(\"bootstrap percentile confidence intervals\")\n print()\n print(percentile_ci_df.to_string(formatters=formatters))\n print()\n print(\"bootstrap basic confidence intervals\")\n print()\n print(basic_ci_df.to_string(formatters=formatters))\n print()\n\n return bca_ci_df, percentile_ci_df, basic_ci_df", "def compute_confidence_interval(data):\n a = 1.0 * np.array(data)\n m = np.mean(a)\n std = np.std(a)\n pm = 1.96 * (std / np.sqrt(len(a)))\n return m, pm", "def bootstrap_ci(x, B=1000, alpha=0.05, seed=42):\n\n x_arr = np.ravel(x)\n\n if B < 2:\n raise ValueError(\"B must be >= 2\")\n\n if alpha < 0 or alpha > 1:\n raise ValueError(\"alpha must be in [0, 1]\")\n\n np.random.seed(seed)\n\n bmean = np.empty(B, dtype=np.float)\n for b in range(B):\n idx = np.random.random_integers(0, x_arr.shape[0] - 1, x_arr.shape[0])\n bmean[b] = np.mean(x_arr[idx])\n\n bmean.sort()\n lower = int(B * (alpha * 0.5))\n upper = int(B * (1 - (alpha * 0.5)))\n\n return (bmean[lower], bmean[upper])", "def extract_with_confidence(\n self,\n ) -> t.Tuple[\n t.Sequence[str], t.Sequence[float], t.Sequence[float], np.ndarray\n ]:\n if not self._fit:\n raise TypeError(\n \"Please call BootstrapExtractor.fit() method before \"\n \"BootstrapExtractor.extract_with_confidence()\"\n )\n\n if self.verbose > 0:\n print(\n \"Started data resampling with bootstrap with the following \"\n \"configurations:\"\n )\n\n print(\n \"{} Total data resamples: {}\".format(\n _internal.VERBOSE_BLOCK_MID_SYMBOL, self.sample_num\n )\n )\n print(\n \"{} Confidence levels used: {} (total of {}).\".format(\n _internal.VERBOSE_BLOCK_MID_SYMBOL,\n self.confidence,\n len(self.confidence),\n )\n )\n print(\n \"{} Random seeds:\".format(_internal.VERBOSE_BLOCK_END_SYMBOL)\n )\n print(\n \" {} For extractor model: {}\".format(\n _internal.VERBOSE_BLOCK_MID_SYMBOL,\n self._extractor.random_state,\n )\n )\n\n print(\n \" {} For bootstrapping: {}\".format(\n _internal.VERBOSE_BLOCK_END_SYMBOL, self.random_state\n )\n )\n\n bootstrap_vals = self._extract_with_bootstrap(\n mtf_num=len(self._mtf_names)\n )\n\n if self.verbose > 0:\n print(\"Finished data resampling with bootstrap.\")\n print(\"Now calculating confidence intervals...\", end=\" \")\n\n mtf_conf_int = self.calc_conf_intervals(bootstrap_vals)\n\n if self.verbose > 0:\n print(\"Done.\")\n\n return self._mtf_names, self._mtf_vals, self._mtf_time, mtf_conf_int", "def bootstrap_sample(test_x, test_y, model, n):\n aucs = []\n for sample in range(n):\n ind_pos = np.where(test_y.values > 0)\n ind_neg = np.where(test_y.values <= 0)\n pos_x = test_x[ind_pos[0], ]\n neg_x = test_x[ind_neg[0], ]\n pos_y = test_y.iloc[ind_pos[0]]\n neg_y = test_y.iloc[ind_neg[0]]\n resampled_pos_x, resampled_pos_y = resample(pos_x, pos_y)\n resampled_neg_x, resampled_neg_y = resample(neg_x, neg_y)\n resampled_x = scipy.sparse.vstack((resampled_pos_x, resampled_neg_x))\n resampled_y = pd.concat((resampled_pos_y, resampled_neg_y), axis=0)\n probs = model.predict_proba(resampled_x)\n aucs.append(roc_auc_score(resampled_y.replace(\n to_replace=-1, value=0), probs[:, 1]))\n # Return 95% confidence interval\n CI = (np.percentile(aucs, 2.5), np.percentile(aucs, 97.5))\n return CI", "def confidenceIntervalForAllPairs(payoffs_mean, payoffs_std, nb_sim, confidence):\n n = len(payoffs_mean)\n confidence_intervals = np.zeros((n,n,2))\n for i in range (n):\n for j in range (n):\n h = payoffs_std[i, j] * t.ppf((1 + confidence) / 2, nb_sim - 1)\n confidence_intervals[i, j] = [payoffs_mean[i, j] - h, payoffs_mean[i, j] + h]\n return confidence_intervals", "def ci_using_normal(data):\n n = len(data)\n\n mu = np.mean(data)\n sigma = np.sqrt(1/n)*np.std(data)\n\n lower = scipy.stats.norm.ppf(\n 0.025, \n loc=mu, \n scale=sigma\n )\n upper = scipy.stats.norm.ppf(\n 0.975, \n loc=mu, \n scale=sigma\n )\n\n return (lower, upper)", "def confidence_intervals(x, y, z_flat, model, degree, alpha = 0, noise = 0):\n X = create_design_matrix(x, y, degree)\n resample = Resampling(X, z_flat)\n betas, variance = resample.bootstrap(model, get_beta_var=True)\n\n CI = 1.96*np.sqrt(variance)\n\n\n #plotting\n plt.xticks(np.arange(0, len(betas), step=1))\n plt.errorbar(range(len(betas)), betas, CI, fmt=\"b.\", capsize=3, label=r'$\\beta_j \\pm 1.96 \\sigma$')\n plt.legend()\n plt.xlabel(r'index $j$')\n plt.ylabel(r'$\\beta_j$')\n plt.grid()\n plt.show()", "def plot_confidence (\r\n data = None, \r\n *, \r\n y=None, \r\n x=None, \r\n ci =.95 , \r\n kind ='line', \r\n b_samples = 1000, \r\n **sns_kws\r\n ): \r\n #y = np.array (y) \r\n #x= x or ( np.arange (len(y)) if \r\n ax=None \r\n if 'lin' in str(kind).lower(): \r\n ax = sns.lineplot(data= data, x=x, y=y, ci=ci, **sns_kws)\r\n elif 'reg' in str(kind).lower(): \r\n ax = sns.regplot(data = data, x=x, y=y, ci=ci, **sns_kws ) \r\n else: \r\n if not y: \r\n raise ValueError(\"y should not be None when using the boostrapping\"\r\n \" for plotting the confidence interval.\")\r\n b_samples = _assert_all_types(\r\n b_samples, int, float, objname=\"Bootstrap samples `b_samples`\")\r\n \r\n from sklearn.metrics import resample \r\n # configure bootstrap\r\n n_iterations = 1000 # here k=no. of bootstrapped samples\r\n n_size = int(len(y))\r\n \r\n # run bootstrap\r\n medians = list()\r\n for i in range(n_iterations):\r\n s = resample(y, n_samples=n_size);\r\n m = np.median(s);\r\n medians.append(m)\r\n \r\n # plot scores\r\n plt.hist(medians)\r\n plt.show()\r\n \r\n # confidence intervals\r\n p = ((1.0-ci)/2.0) * 100\r\n lower = np.percentile(medians, p)\r\n p = (ci+((1.0-ci)/2.0)) * 100\r\n upper = np.percentile(medians, p)\r\n \r\n print(f\"\\n{ci*100} confidence interval {lower} and {upper}\")\r\n \r\n return ax", "def confidenceInterval(model, N = 30):\n predicted_accuracies = [0]*N\n predicted_roc = [0]*N\n for i in tqdm(range(N)):\n X_train, X_test, y_train, y_test = train_test_split(X, y_binary, random_state=i)\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n model = model.fit(X_train, y_train)\n predicted_accuracies[i] = accuracy_score(model.predict(X_test), y_test)\n predicted_roc[i] = roc_auc_score(model.predict(X_test), y_test)\n r = np.mean(predicted_roc)\n m = np.mean(predicted_accuracies)\n\n variance_roc = np.var(predicted_roc)\n variance_acc = np.var(predicted_accuracies)\n sd_acc = np.sqrt(variance_acc)\n sd_roc = np.sqrt(variance_roc)\n CI_acc = 2*sd_acc\n CI_roc = 2*sd_roc\n return m, CI_acc, r, CI_roc", "def compute_interval_limits(bias, acceleration, n_boots, ci=95):\n from scipy.stats import norm\n from numpy import isnan, nan\n\n alpha = _compute_alpha_from_ci(ci)\n\n alpha_low = alpha / 2\n alpha_high = 1 - (alpha / 2)\n\n z_low = norm.ppf(alpha_low)\n z_high = norm.ppf(alpha_high)\n\n kws = {'bias': bias, 'acceleration': acceleration}\n low = _compute_quantile(z_low, **kws)\n high = _compute_quantile(z_high, **kws)\n\n if isnan(low) or isnan(high):\n return low, high\n\n else:\n low = int(norm.cdf(low) * n_boots)\n high = int(norm.cdf(high) * n_boots)\n return low, high", "def calculate_ci(ci_value, data):\n df = len(data) - 1 # degrees of freedom\n ci = stats.t.interval(ci_value, df, loc=np.mean(data),\n scale=stats.sem(data))\n return ci", "def bootstrap(series, func=statistics.mean, confidence=0.9):\n n = len(series)\n n_bootstrap = 250\n digests = []\n for j in range(n_bootstrap):\n bootstrap_sample = [\n random.choice(series)\n for _ in range(n)\n ]\n digest = func(bootstrap_sample)\n digests.append(digest)\n digests.sort()\n low, mid, high = (1.0-confidence)/2.0, 0.5, (1.0+confidence)/2.0\n low, mid, high = int(low*n_bootstrap), int(mid*n_bootstrap), int(high*n_bootstrap)\n return digests[low], digests[mid], digests[high]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if word is provided in slot values. Send word to URLbuilder and return JSON data. Give user definition information.
def my_word_definition_handler(handler_input): # type: (HandlerInput) -> Response slots = handler_input.request_envelope.request.intent.slots if word_slot in slots: curr_word = slots[word_slot].value handler_input.attributes_manager.session_attributes[ word_slot_key] = curr_word try: response = http_get(curr_word, False) if response: speech = ("The definition of {} with part of speech {} " "is: {}".format(curr_word, response[0]['fl'], response[0]['shortdef'][0])) reprompt = ("What word would you like me to look up?") else: speech = ("I am sorry I could not find the word {}").format(curr_word) reprompt = ("What word would you like me to look up?") except: speech = ("I am sorry I could not find the word {}. " "Can I look up another word?").format(curr_word) reprompt = ("What word would you like me to look up?") else: speech = "I'm not sure what word to look up, please try again" reprompt = ("I didn't catch that. What word would you like me " "me to look up?") handler_input.attributes_manager.session_attributes[previous_key] = speech handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
[ "def my_word_example_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if example_slot in slots:\n curr_word = slots[example_slot].value\n handler_input.attributes_manager.session_attributes[\n example_slot_key] = curr_word\n\n try:\n response = http_get(curr_word, False)\n\n if response:\n example = response[0]['def'][0]['sseq'][0][0][1]['dt'][1][0]\n if example == \"vis\":\n vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][1][1][0]['t'])\n speech = (\"An example with {} (part of speech {}) \"\n \"is: {}\".format(curr_word, response[0]['fl'],\n vis))\n elif example == \"wsgram\":\n vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][2][1][0]['t'])\n speech = (\"An example with {} (part of speech {}) \"\n \"is: {}\".format(curr_word, response[0]['fl'],\n vis))\n else:\n speech = (\"No example is available for {}\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n else:\n speech = (\"No example is available for {}\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n except Exception as e:\n speech = (\"No example is available for {}. \"\n \"Can I look up another word?\").format(curr_word)\n reprompt = (\"What word would you like me to look up?\")\n else:\n speech = \"I'm not sure what word to look up, please try again\"\n reprompt = (\"I didn't catch that. What word would you like me \"\n \"me to look up?\")\n\n handler_input.attributes_manager.session_attributes[previous_key] = speech\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def guess():\n\n if request.data:\n req = json.loads(request.data)\n user_input = req['guess']\n print(\"user_input:\", user_input)\n\n board = session['board']\n response = boggle_game.check_valid_word(board=board, word=user_input)\n print(response)\n my_result = {\"user_input\": user_input,\n \"result\": response}\n return my_result\n else:\n return \"NOT OK\", 400", "def lookup_word(word):\n\n return API.get_response(word)", "def validate_word(word: str) -> bool:\n if word:\n url = f'{OXFORD_DICT_BASE_URL}{OXFORD_DICT_ENTRY_URL}/en-us/{word.lower()}'\n headers = {\n 'app_id': settings.OXFORD_APP_ID,\n 'app_key': settings.OXFORD_API_KEY,\n }\n\n logger.info(f'validating {word} against oxford dictionary...')\n response = requests.get(\n url,\n headers=headers,\n )\n\n if response.status_code == status.HTTP_200_OK:\n return True\n else:\n return False\n\n return False", "def search_word(word : str = typer.Argument(..., help=\"Searches the trie if the word exists\")):\n response_url = url + \"/search/\" + word\n response = requests.get(response_url)\n typer.echo(response.json()[\"status\"])", "async def submit(self, ctx, word : str = None):\r\n if word is None:\r\n await ctx.reply((f\"**Lost on a word you think should be valid? Submit it here!**\\n\\n\"\r\n f\"Unfortunately, getting every little word in the English language \"\r\n f\"is difficult. We apologize if you lost your potentially \"\r\n f\"record-winning game due to using a word that wasn't in our \"\r\n f\"database.\\n\\nIf you feel a word should be added in the database, \"\r\n f\"please do `%wc add <word>` to add it to a list for review. \"\r\n f\"There are, however, some guidelines to follow: \"\r\n f\"1. All words are __alphabetic__, meaning each character in that \"\r\n f\"word is one of the 26 letters. Hyphenated words such as 'x-ray' \"\r\n f\"have been shortened to xray. Likewise, words with spaces like \"\r\n f\"'a cappella' are simply 'acappella'.\\n\"\r\n f\"2. All words, even proper nouns, will become lowercase. Common \"\r\n f\"first names such as 'John' are permissible to be added, but not \"\r\n f\"last names or company names. Place names are also permissible.\\n\"\r\n f\"3. Commonly used slang terms are currently not included in the \"\r\n f\"database. They can also be submitted for review.\\n\"\r\n f\"4. The character limit is 32 characters.\"))\r\n return\r\n\r\n # Make sure word could possibly be valid\r\n word = word.lower()\r\n\r\n if not word.isalpha():\r\n return await ctx.reply('Every character in this word must be one of 26 letters.')\r\n\r\n if len(word) > 32:\r\n return await ctx.reply('The character limit is 32 characters.')\r\n\r\n # See if word already exists in database\r\n psql = \"\"\"SELECT id FROM word_list WHERE word = $1\"\"\"\r\n async with self.client.en_dict.acquire() as conn:\r\n if await conn.fetchval(psql, word) is not None:\r\n return await ctx.reply('The word you submitted is already in our database.')\r\n\r\n # Add it to list of words to be reviewed\r\n psql = \"\"\"INSERT INTO word_review (word) VALUES ($1)\"\"\"\r\n await conn.execute(psql, word)\r\n await ctx.reply('Your word has been submitted for review.')", "def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n channel = tracker.get_latest_input_channel().strip() #Tracking the Current channel name\n channel = \"rest\" # Making the channel rest for now\n intent = tracker.get_intent_of_latest_message().strip() # Tracking the Intent\n intent_org = copy(intent)\n if intent in [\"location\", \"contact\"] and tracker.get_slot(\"form_intent\") is None:\n return []\n intent = intent if intent not in [\"location\", \"contact\"] else tracker.get_slot(\"form_intent\")\n slot_value = tracker.get_slot(intent) # Getting the slot value\n response_number = tracker.get_slot(f\"{intent}_count\") # Getting the Index number\n if slot_value is None:\n \"\"\"\n If the Slot values are None. That is if the user is intitating a specific intent for the 1st time.\n Since there are no slots are filled. The bot prompts the Entities that are extracted from the JSON file.\n \"\"\"\n slot_values = basics.giveEntity(intent = intent) # Getting the entities based on the intent\n buttons = Utilities.createButtons(intent = intent, entity = intent, slot_values = slot_values) # Creating the buttons from the list of slot values\n dispatcher.utter_message(text = \"Select one\", buttons = buttons) # Dispatching the text and buttons\n return [SlotSet(\"form_intent\", intent)]\n else:\n \"\"\"\n If the user is already in the conversation with the same intent.\n The bot looks for the responses\n \"\"\"\n slot_value = slot_value.lower().strip() # Converting the slot value to lower inorder to maintain equaity for matching\n if response_number is not None:\n response_number = response_number.strip()\n else:\n \"\"\"If the user is in his 1st conversation, setting the response number to str(0)\"\"\"\n response_number = \"0\"\n response, next_response = basics.giveResponse(intent = intent, entity = slot_value, response_number = response_number, channel = channel) # Getting the Current and next response from the JSON file\n str_next_response = \" \".join(next_response)\n if next_response == \"\":\n \"\"\"\n If the next_response is None that is the current message is End of the conversation.\n Dispatching the message, Setting the final slot values.\n \"\"\"\n current_slots = tracker.get_slot(\"data\")\n if current_slots is None:\n current_slots = []\n current_slots.append(tracker.get_slot(intent_org))\n if \"location\" not in str_next_response.lower() and \"contact\" not in str_next_response.lower() and \"location\" not in response.lower() and \"contact\" not in response.lower():\n dispatcher.utter_message(text = response)\n\n dummy_slot_value = tracker.get_slot(\"dummy\")\n if dummy_slot_value is not None: current_slots.append(dummy_slot_value)\n current_slots.insert(0, tracker.get_slot(intent))\n\n if \"location\" in str_next_response.lower() or \"contact\" in str_next_response.lower() or \"location\" in response.lower() or \"contact\" in response.lower():\n current_slots = [i for i in current_slots if i is not None]\n for index, values in enumerate(current_slots[:-1], 0):\n if values == current_slots[index+1]:\n current_slots.pop(index)\n string = Utilities.BeautigyString(current_slots)\n dispatcher.utter_message(text = f\"Please confirm that {string}\", buttons = Utilities.createButtonsWithDiff(intent = \"general_form\", entity = \"dummy\", slot_values = [\"Yes\"]) )\n return [SlotSet(\"data\", current_slots)]\n else:\n for index, values in enumerate(current_slots[:-1], 0):\n if values == current_slots[index+1]:\n current_slots.pop(index)\n # print(\"The Final slot values that are needed to send to the server are: \", current_slots)\n string = Utilities.convertListToRequiredFormat(data = current_slots)\n auction_response = Auction.writeScipt(description = string)\n dispatcher.utter_message(text = auction_response)\n return [SlotSet(\"data\", current_slots), AllSlotsReset()]\n \n else:\n \"\"\"\n If there is still a next response.\n The entity is changed to dummy inorder to preserve the entity name.\n \"\"\"\n current_slots = tracker.get_slot(\"data\")\n if current_slots is None:\n current_slots = []\n dummy_slot_value = tracker.get_slot(\"dummy\")\n if dummy_slot_value is not None:\n current_slots.append(tracker.get_slot(\"dummy\"))\n for i in [\"location\", \"contact\"]:\n temp = tracker.get_slot(i)\n if temp is not None:\n current_slots.append(temp)\n str_next_response = \" \".join(next_response)\n if not \"location\" or \"contact\" in str_next_response.lower():\n entity = \"dummy\"\n buttons = Utilities.createButtons(intent = intent, entity = entity, slot_values = next_response[1:]) \n elif \"location\" in str_next_response.lower() or \"contact\" in str_next_response.lower() or \"location\" in response or \"contact\" in response:\n if current_slots[0] != tracker.get_slot(intent_org): current_slots.append(tracker.get_slot(intent_org))\n current_slots.append(tracker.get_slot(\"dummy\"))\n current_slots = [i for i in current_slots if i is not None]\n for index, values in enumerate(current_slots[:-1], 0):\n if values == current_slots[index+1]:\n current_slots.pop(index)\n string = Utilities.BeautigyString(current_slots)\n dispatcher.utter_message(text = f\"Please confirm that {string}\", buttons = Utilities.createButtonsWithDiff(intent = \"general_form\", entity = \"dummy\", slot_values = [\"Yes\"]) )\n else:\n entity = \"dummy\"\n if \"select\" in next_response[0].lower():\n next_response = next_response[1:]\n buttons = Utilities.createButtons(intent = intent_org, entity = entity, slot_values = next_response)\n buttons = buttons if Utilities.checkButtons(buttons) else None\n dispatcher.utter_message(text = response, buttons = buttons)\n return [SlotSet(f\"{intent}_count\", str(int(response_number)+1)), SlotSet(\"data\", current_slots)]", "def wordInfo(self, input_word):\n return self.app.get('/words/1.0/info/' + input_word, follow_redirects=True, headers=self.headers)", "def get_phrase(self):\n if self.skye.config.get(\"urban_dictionary\", \"api_key\") == \"\":\n self.skye.speak(\"No API key has been set in the config file.\")\n return\n self.skye.speak(\"What phrase should I define?\")\n word = self.skye.active_listen()\n if (word != -1 and\n word != -2 and\n word != -3):\n self.get_definition(word)\n else:\n self.skye.speak(\"Sorry, I didn't understand you.\")", "def test_word_info_bad_request(self):\n word = \"defination of vitality \"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n \"code\": 400,\n \"message\": \"A Term must be only a single word\"\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 400)\n self.assertEquals(response_data[\"code\"], expected_output[\"code\"])\n self.assertEquals(response_data[\"message\"], expected_output[\"message\"])", "def urbandictionary(self, irc, msg, args, optlist, optterm):\n \n # default args for output.\n args = {'showExamples': True, 'numberOfDefinitions':'10', 'showVotes': False }\n \n # optlist to change args.\n if optlist:\n for (key, value) in optlist:\n if key == 'disableexamples':\n args['showExamples'] = False\n if key == 'showvotes':\n args['showVotes'] = True\n if key == 'num': # number of definitions. max 10 default but also is enforced \n if value > self.registryValue('maxNumberOfDefinitions') or value <= 0:\n args['numberOfDefinitions'] = '10'\n else:\n args['numberOfDefinitions'] = value\n \n # url \n url = 'http://api.urbandictionary.com/v0/define?term=%s' % (urllib.parse.quote(optterm))\n\n # try fetching url.\n try:\n request = urllib.request.Request(url)\n response = urllib.request.urlopen(request)\n except Exception as e:\n irc.reply(\"{0} fetching {1}\".format(self._red(\"Error\"), url))\n self.log.debug(\"Error fetching: {0} :: {1}\".format(url, e))\n return\n\n # try parsing json. \n try:\n jsondata = response.read().decode('utf-8')\n jsondata = json.loads(jsondata.replace(r'\\r','').replace(r'\\n','')) # odds chars in UD.\n except:\n irc.reply(\"Failed to parse json data. Check logs for error\")\n return\n \n # handle output based on what comes back. 2 different results, fallback on else.\n # asshole - \"has_related_words\": true, \"result_type\": \"exact\"\n # assmole - \"has_related_words\": true, \"result_type\": \"exact\", total: 1\n # asswole - \"has_related_words\": false, \"result_type\": \"no_results\" - list->term\n definitions = jsondata.get('list', None) \n result_type = jsondata.get('result_type', None)\n total = jsondata.get('total', None)\n \n if result_type == \"exact\" and len(jsondata['list']) > 0: # exact, means we found, and we have definitions.\n output = [] # container to put all def/ex in.\n for item in jsondata['list'][0:int(args['numberOfDefinitions'])]: \n outputstring = \"{0}\".format(item['definition'].strip()) # start outputstring.\n if args['showExamples']: # if we're showing examples\n try:\n if self.registryValue('disableANSI'):\n outputstring += \" {0} {1} {2}\".format(\"[ex:]\", item['example'].strip(), \"[/ex]\")\n else:\n outputstring += \" {0} {1} {2}\".format(self._bu(\"[ex:]\"), item['example'].strip(), self._bu(\"[/ex]\"))\n except:\n self.log.warning(\"UrbanDictionary: Failed to find exampple for query '\" + optterm + \"'\")\n if args['showVotes']: # if we're showing votes\n outputstring += \" (+{0}/-{1})\".format(item['thumbs_up'], item['thumbs_down'])\n \n output.append(outputstring) # finally add to output\n \n #output.\n if self.registryValue('disableANSI'):\n irc.reply(\"{0} ({1}): {2}\".format(optterm, total, \" | \" .join([item for item in output])))\n else:\n irc.reply(\"{0} ({1}): {2}\".format(self._red(optterm), total, \" | \" .join([item for item in output])))\n\n elif result_type == \"no_results\" and len(jsondata['list']) > 0:\n outrelated = \" | \".join([item['term'] for item in jsondata['list']])\n \n if self.registryValue('disableANSI'):\n irc.reply(\"{0}: {1} not found. {2}: {3}\".format(\"ERROR\", optterm, \"Related terms\", outrelated))\n else:\n irc.reply(\"{0}: {1} not found. {2}: {3}\".format(self._red(\"ERROR\"), optterm, self._bu(\"Related terms\"), outrelated))\n\n else:\n if self.registryValue('disableANSI'):\n irc.reply(\"{0} nothing found in output looking up: {1}\".format(\"ERROR\", optterm))\n else:\n irc.reply(\"{0} nothing found in output looking up: {1}\".format(self._red(\"ERROR\"), optterm))", "async def validate_title(\n self,\n slot_value: Any,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: DomainDict,\n ) -> Dict[Text, Any]:\n if type(slot_value) is str:\n if slot_value.lower() == \"skip\":\n return {\"title\": \"skip\"}\n else:\n return {\"title\": slot_value} \n \n dispatcher.utter_message(response=\"utter_wrong_type\")\n return {\"title\": None}", "def test_word_info_bad_word(self):\n word = \"hdiasudhisuahdiasushdiaushdiaushdiasuhdisauh\"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n word: {\n \"frequency\": None,\n \"defination\": None,\n \"antonyms\": None,\n \"examples\": None,\n \"pronounciation\": None,\n \"synonyms\": None\n }\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 200)\n self.assertEquals(response_data[word][\"defination\"], expected_output[word][\"defination\"])\n self.assertEquals(response_data[word][\"antonyms\"], expected_output[word][\"antonyms\"])\n self.assertEquals(response_data[word][\"examples\"], expected_output[word][\"examples\"])\n self.assertEquals(response_data[word][\"frequency\"], expected_output[word][\"frequency\"])\n self.assertEquals(response_data[word][\"pronounciation\"], expected_output[word][\"pronounciation\"])\n self.assertEquals(response_data[word][\"synonyms\"], expected_output[word][\"synonyms\"])", "def makeRequestForWord(self, word):\n\n\t\turl = 'https://od-api-2445581300291.apicast.io:443/api/v1/entries/en/'\\\n\t\t+ word + '?include=lexicalCategory'\n\t\theader = { \"Accept\": \"application/json\",\n\t \t\t\t\"app_id\": self.parser.get('api_keys', 'oxford_app_id'),\n\t \t\t\t\"app_key\": self.parser.get('api_keys', 'oxford_key')}\n\t\tresponse = requests.get(url, headers = header)\n\t\tif response.status_code == 200:\n\t\t\ttempDict = response.json()\n\t\t\treturn self.extractOxfordSamples(tempDict, word)\n\t\tprint('Status code: {}'.format(response.status_code))\n\t\treturn None", "def urban_dict(word):\n\n url = \"https://mashape-community-urban-dictionary.p.rapidapi.com/define\"\n\n querystring = {}\n\n querystring[\"term\"] = word\n\n headers = config.headers\n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n print(response.text)", "async def _check(self, ctx, word : str):\r\n word = word.lower()\r\n psql = \"\"\"SELECT id FROM word_list WHERE word = $1\"\"\"\r\n\r\n async with self.client.en_dict.acquire() as conn:\r\n word_id = await conn.fetchval(psql, word)\r\n\r\n if word_id is None:\r\n return await ctx.reply((f'**{word}** is not currently in our database.\\nThink it '\r\n f'should be? Submit it for review with `%wc add`!'))\r\n else:\r\n word_score = sum([point_conversion[char] for char in word])\r\n await ctx.reply((f\"**{word}** is a valid term for use in Word Chain!\\nUsing it in \"\r\n f\"Scrabble Mode would net you **{word_score}** points!\"))", "async def validate_title(\n self,\n slot_value: Any,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: DomainDict,\n ) -> Dict[Text, Any]:\n if type(slot_value) is str:\n if slot_value.lower() == \"skip\":\n return {\"title\": \"skip\"}\n else:\n return {\"title\": slot_value}\n \n dispatcher.utter_message(response=\"utter_wrong_type\")\n return {\"title\": title}", "def create_validation_function(name_of_slot):\n def validate_slot(\n self,\n value: Text,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> Dict[Text, Any]:\n \"\"\"Validate user input.\"\"\"\n\n if value.lower() in self.answers_db()[name_of_slot]:\n # validation succeeded, set the value of the slot to \n # user-provided value\n return {name_of_slot: value}\n else:\n # find the closest answer by some measure (edit distance?)\n choices = self.answers_db()[name_of_slot]\n answer = process.extractOne(value.lower(), choices)\n\n # check to see if distnace is greater than some threshold\n if answer[1] < 45:\n # if so, set slot to \"other\"\n return {name_of_slot: \"other\"}\n else:\n return {name_of_slot: answer[0]}\n \n return(validate_slot)", "def submit_definition():\n if request.method == \"POST\":\n game = mongo.db.games.find_one(\n {\"game_name\": request.form.get(\"game_name\")})\n user = mongo.db.users.find_one({\"username\": session[\"user\"]})\n today = date.today()\n submission_date = today.strftime(\"%Y/%m/%d\")\n definition = {\n \"term_header\": request.form.get(\"term_header\").upper(),\n \"game_fk\": game['_id'],\n \"short_definition\": request.form.get(\"short_definition\"),\n \"long_description\": request.form.get(\"long_description\", False),\n \"youtube_link\": request.form.get(\"youtube_link\", False),\n \"submitted_by\": user[\"_id\"],\n \"submission_date\": submission_date,\n \"rating\": 1,\n \"upvoted_by\": [user[\"_id\"]],\n \"downvoted_by\": []\n }\n mongo.db.terms.insert_one(definition)\n updateUserRating(definition, 1)\n flash(f\"Thank you, {session['user']}, for your submission\",\n category=\"success\")\n return redirect(url_for(\"get_terms\"))\n try:\n # Ensure that user is logged in before displaying page\n if session[\"user\"]:\n games = mongo.db.games.find().sort(\"game_name\", 1)\n return render_template(\"add_term.html\", games=games)\n except KeyError:\n # Redirect user to homepage if not logged in\n flash(Markup(\"Please <a href='login'>\"\n \"login</a> or <a href='register'>\"\n \"register</a> to add a new definition\"), category=\"error\")\n return redirect(url_for(\"get_terms\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function handles the example sentence intent
def my_word_example_handler(handler_input): # type: (HandlerInput) -> Response slots = handler_input.request_envelope.request.intent.slots if example_slot in slots: curr_word = slots[example_slot].value handler_input.attributes_manager.session_attributes[ example_slot_key] = curr_word try: response = http_get(curr_word, False) if response: example = response[0]['def'][0]['sseq'][0][0][1]['dt'][1][0] if example == "vis": vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][1][1][0]['t']) speech = ("An example with {} (part of speech {}) " "is: {}".format(curr_word, response[0]['fl'], vis)) elif example == "wsgram": vis = remove_italics(response[0]['def'][0]['sseq'][0][0][1]['dt'][2][1][0]['t']) speech = ("An example with {} (part of speech {}) " "is: {}".format(curr_word, response[0]['fl'], vis)) else: speech = ("No example is available for {}").format(curr_word) reprompt = ("What word would you like me to look up?") else: speech = ("No example is available for {}").format(curr_word) reprompt = ("What word would you like me to look up?") except Exception as e: speech = ("No example is available for {}. " "Can I look up another word?").format(curr_word) reprompt = ("What word would you like me to look up?") else: speech = "I'm not sure what word to look up, please try again" reprompt = ("I didn't catch that. What word would you like me " "me to look up?") handler_input.attributes_manager.session_attributes[previous_key] = speech handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
[ "def get_sentence(self):", "def get_suggested_text(): \n # for now, just print the sentences\n pass", "def motivation_letter(mistake_word, true_word):", "def hook(self, sentence, words):\n pass", "def sentence():\r\n return nounPhrase() + \" \" + verbPhrase()", "def test_make_sentences():\n long_comment = ['I think this movie was really good and will go and see it again. '\n 'This movie really sucked and I hated it']\n new_sentences = make_sentences(long_comment[0])\n print(new_sentences)", "def test_extract_sentence(self) -> None:\n\n def selection_func(_sen_id: int, _pos: int, _example: Example) -> bool:\n return True\n\n extracted_activations, extracted_labels = self._base_extract(selection_func)\n\n self.assertTrue(\n (extracted_activations == self.all_activations).all(),\n \"Selection function didn't extract all activations\",\n )\n self.assertTrue(\n (extracted_labels == self.all_labels).all(),\n \"Selection function didn't extract all labels\",\n )", "def _reason(self, sentences, features):\n raise NotImplementedError", "def test_get_sentence_sentiments():\n long_comment = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\"]\n\n sentence_score_list = get_sentence_sentiments(long_comment[0])\n print(long_comment[0])\n print('per sentence sentiment:', sentence_score_list)\n print()", "def nao_speech(possible_sentences):\n\n print(random.choice(possible_sentences))", "def main(words, s):\n if words:\n words = int(words)\n click.echo(lorem.words(words))\n\n # Returns a lorem ipsum sentence\n elif s:\n click.echo(lorem.sentence())\n\n # Returns a lorem ipsum paragraph by default\n else:\n click.echo(lorem.paragraph())", "def respondToEmotion(self, words, sentiment):\r\n self.askAboutEmotion += 1\r\n noRec = \" By the way, I did not recognize any movie or genre title in your response.\"\r\n word = random.choice(\r\n [w for w in words if not w.lower() in self.genres])\r\n wordSentiment = self.sentiment.get(word)\r\n if word in [\"like\", \"love\", \"hate\", \"dislike\", \"enjoy\"]:\r\n return random.choice([\"What do you %s about it?\",\r\n \"Why do you %s it?\",\r\n \"What makes you %s it?\",\r\n ]) % word + noRec + self.addendumMild()\r\n if word in [\"liked\", \"loved\", \"hated\", \"disliked\", \"enjoyed\"]:\r\n return random.choice([\"What did you %s about it?\",\r\n \"Why did you %s it?\",\r\n \"What made you %s it?\",\r\n ]) % word + noRec+ self.addendumMild()\r\n else:\r\n newWord = word\r\n if sentiment > 0:\r\n if wordSentiment == 'neg':\r\n newWord = 'not ' + word\r\n if word.lower() in ['great', 'awesome', 'fun', 'cool', 'amazing', 'interesting', 'fascinating', 'good']:\r\n sent = random.choice([\"I am not sure what is %s in your opinion. Please explain.\",\r\n \"What makes you feel it is %s?\",\r\n \"And what makes it so %s?\",\r\n \"Is there any particular reason you feel it is %s?\",\r\n ]) % newWord + noRec + self.addendumMild() \r\n elif word.lower() in ['happy', 'glad', 'satisfied', 'curious']:\r\n sent = random.choice([\"I am not sure what you are %s about. Please explain.\",\r\n \"What makes you feel %s?\",\r\n \"And how does it feel to be %s?\",\r\n \"Is there any particular reason you feel %s?\",\r\n ]) % newWord + noRec + self.addendumMild()\r\n else:\r\n sent = random.choice([\"I am happy to hear you feel that way.\",\r\n \"I am glad you feel that way.\",\r\n \"That is good to hear.\",\r\n \"Ok, that's good.\",\r\n \"I appreciate you sharing it with me.\"]) + noRec + self.addendum()\r\n\r\n return sent\r\n if sentiment < 0:\r\n if wordSentiment == 'pos':\r\n newWord = 'not ' + newWord\r\n\r\n if word.lower() in ['bad', 'annoying', 'boring', 'miserable', 'stupid', 'ridiculous', 'uninteresting', 'pointless']:\r\n sent = random.choice([\"I am not sure what is %s in your opinion. Please explain.\",\r\n \"What makes you feel it is %s?\",\r\n \"And what makes it so %s?\",\r\n \"Is there any particular reason you feel it is %s?\",\r\n ]) % newWord + noRec + self.addendumMild()\r\n elif word.lower() in ['sad', 'angry', 'unhappy', 'upset', 'bored']:\r\n #\"Is there anything I can do to make you not %s?\"\r\n sent = random.choice([\"I am not sure what you are %s about. Please explain.\",\r\n \"What makes you feel %s?\",\r\n \"Thank you for sharing this with me. We can continue this discussion if you want.\",\r\n \"I am sorry to hear you are %s! Feel free to tell me more about it.\",\r\n \"And how does it feel to be %s?\",\r\n \"Is there any particular reason you feel %s?\",\r\n\r\n ]) % newWord + noRec + self.addendumMild()\r\n else:\r\n sent = random.choice([\"I am sorry to hear you feel that way.\",\r\n \"That is not so good to hear.\",\r\n \"I am sorry to hear that. Would you like to elaborate?\",\r\n \"Ok, gotcha.\",\r\n \"I appreciate you sharing it with me.\"]) + noRec + ' '+self.addendum()\r\n return sent", "def handle_gui_example_one_intent(self, message):\n self.gui.show_text(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec placerat varius turpis porta scelerisque. Nam feugiat, lectus a ultricies tempus, mi sem tempor felis, vitae laoreet nisi ipsum vitae mauris.\")", "def test_extend_to_sentence(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year, Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000 tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly weather impacts every American. Communities can now rely on the National Weather Service’s StormReady program to help them guard against the ravages of Mother Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading to around 500 deaths per year and nearly $14 billion in damage. StormReady, a program started in 1999 in Tulsa, OK, helps arm America's communities with the communication and safety skills needed to save lives and property– before and during the event. StormReady helps community leaders and emergency managers strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of severe weather through better planning, education, and awareness. No community is storm proof, but StormReady can help communities save lives. Does StormReady make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.5\", \"3.5\"),\n after_sel=(\"1.395\", \"3.142\"),\n command_name=\"extend-to-sentence\",\n )", "def test_one_noun(self):\n cc = self.setup()\n create = cc.create_sentence\n res1 = create('noun !')\n self.assertEqual(res1, 'koer !')", "def response(sentence, model, user_id='123', context={}, show_details=False):\n # Load intents\n data_path = os.path.join(\"data/\", \"data_intents.json\")\n with open(data_path) as json_data:\n intents = json.load(json_data)\n\n # Classify sentence\n results = classify(sentence, model)\n # if we have a classification then find the matching intent tag\n if results:\n # loop as long as there are matches to process\n while results:\n for i in intents['intents']:\n # find a tag matching the first result\n if i['tag'] == results[0][0]:\n # set context for this intent if necessary\n if 'context_set' in i:\n if show_details: print('context:', i['context_set'])\n context[user_id] = i['context_set']\n\n # check if this intent is contextual and applies to this user's conversation\n if not 'context_filter' in i or \\\n (user_id in context and 'context_filter' in i and i['context_filter'] == context[user_id]):\n if show_details: print ('tag:', i['tag'])\n # a random response from the intent\n if i[\"tag\"] == \"goodbye\":\n print(random.choice(i['responses']))\n sys.exit()\n else:\n return print(random.choice(i['responses']))\n\n results.pop(0)", "def intent_feedback(ner_result):\n reply_list = []\n\n for ner in ner_result:\n intent, value = ner.split('\\t')\n if intent == \"景点-名称\":\n reply_list.append(f\"{value}附近有什么酒店?\")\n reply_list.append(f\"{value}在哪里?\")\n reply_list.append(f\"{value}周围有什么好吃的?\")\n\n elif intent == \"地铁-出发地\":\n reply_list.append(f\"{value}在哪里?\")\n\n elif intent == \"餐馆-名称\":\n reply_list.append(f\"{value}有什么推荐菜?\")\n reply_list.append(f\"{value}在哪里?\")\n reply_list.append(f\"{value}评分怎么样?\")\n\n\n elif intent == \"餐馆-推荐菜\":\n reply_list.append(f\"帮我找一家有{value}的餐馆。\")\n\n elif intent == \"餐馆-营业时间\":\n reply_list.append(f\"{value}的时候,故宫附近有什么餐馆在营业?\")\n\n elif intent == \"景点-门票\":\n reply_list.append(f\"帮我找个免费的景点。\")\n reply_list.append(f\"帮我找个我买不起门票的景点。\")\n\n elif intent == \"景点-评分\":\n reply_list.append(f\"帮我找个评分为{value}的景点。\")\n\n reply = \"抱歉,小七没听懂。您是不是想问:\"\n\n if len(reply_list) > 4:\n random.shuffle(reply_list)\n\n count = 0\n for sentence in reply_list:\n reply += f\"<br>{sentence}\"\n count += 1\n if count >= 4:\n break\n\n return reply, len(reply_list)", "def subject_info(intent, extra_info=[]):\n\n text = intent['inputTranscript'].lower()\n utterances = AS.load_file('sample_utterances.txt')\n\n # add \"book\" and \"books\" to every utterance\n for line in list(utterances):\n utterances.insert(0, line + \" book\")\n utterances.insert(0, line + \" books\")\n\n # tells how many characters needs to be dropped before the subject starts\n to_drop = 0\n\n for line in utterances:\n if text.startswith(line):\n to_drop = len(line)\n break\n\n # drops the characters and makes a list from the strings that are left\n text = text[to_drop:].strip()\n text_list = text.split(' ', len(text))\n\n subject_list = []\n keywords = [\"books\", \"book\", \"by\", \"published\", \"written\"]\n keyword = \"\"\n\n # Find out when the book name ends\n for word in text_list:\n if word not in keywords:\n subject_list.append(word)\n else:\n break\n\n subject = \" \".join(subject_list)\n\n # Get all the keywords in the middle, so they can be\n # all be dropped at once, eg written by, books by\n text_list = text_list[len(subject_list):]\n if text_list:\n word = text_list[0]\n while word in keywords:\n keyword += word + \" \"\n text_list = text_list[1:]\n if text_list:\n word = text_list[0]\n else:\n break\n\n # search for an author from the rest of the characters\n author_text = text[len(keyword):].strip()\n author = AS.search(author_text, False)\n if author is \"\":\n author = None\n\n # There might be old info in the extra_info (author), so \n # we need to clear it\n extra_info.clear()\n\n # add the author to extra info so it can be used in the Finna API call\n if author:\n extra_info += [\"author:\\\"\" + author + \"\\\"\"]\n elif intent['sessionAttributes'].get('author'):\n extra_info += [\n \"author:\\\"\" + intent['sessionAttributes']['author'] + \"\\\"\"\n ]\n\n # The Finna API call\n request = lookfor(term=subject, filter=extra_info)['json']\n\n return parse_subject(request, subject, {'author': author})", "def intent_of_text_LnDOR(ChapterTextS, TargetQuestionsD, TestS, StopWords):\n \n # Chapter Text - stokenize\n StokensCT = stokenize(ChapterTextS, StopWords) \n\n # Test question - stokenize\n StokensTest = stokenize(TestS, StopWords)\n\n # Knowledge Base Dict - stokenize\n KBD_structure = stokenizeKBD(TargetQuestionsD, StopWords)\n\n # List (because list is mutable, set is not) of all stokens in document\n StokensDoc = StokensCT[:] # from chapter text\n StokensDoc.extend(StokensTest[:]) # += Test string\n\n # extend list of stokens in Doc\n for i in TargetQuestionsD:\n StokensDoc.extend(TargetQuestionsD[i][\"mq stokens\"][:]) # += KB target [matched Q]s\n StokensDoc.extend(TargetQuestionsD[i][\"ans stokens\"][:]) # += KB answers\n \n StokensTestV = set(StokensTest)\n StokensDocV = set(StokensDoc)\n StokensAntiTgtV = StokensDocV\n \n # Complement of all targets\n for i in TargetQuestionsD:\n StokensAntiTgtV = StokensAntiTgtV.difference(set(TargetQuestionsD[i][\"mq stokens\"]))\n \n # calculate confusion matrix and DOR etc.\n LnDORD = {}\n # Anti Target\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensAntiTgtV, StokensTestV) \n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN) \n \n LnDORD[\"AntiTgt\"] = {'lndor': LnDOR, 'theta': someAngle}\n\n # total occurences\n total_occ = 0\n for i in TargetQuestionsD:\n total_occ += TargetQuestionsD[i]['count']\n\n for i in TargetQuestionsD:\n StokensTgtV = set(TargetQuestionsD[i][\"mq stokens\"][:])\n\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensTgtV, StokensTestV) \n priorOR = TargetQuestionsD[i]['count'] / total_occ\n\n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN, priorOR) \n \n LnDORD[i] = {'lndor': LnDOR, 'theta': someAngle}\n # LnDORD = {i: {'lndor': , 'theta': }}, KB indices + \"AntiTgt\"\n\n return LnDORD" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
AMAZON.FallbackIntent is only available in enUS locale. This handler will not be triggered except in that locale, so it is safe to deploy on any locale.
def fallback_handler(handler_input): # type: (HandlerInput) -> Response speech = ( "The {} skill can't help you with that. " "I can look up a word in the dictionary for you").format(skill_name) reprompt = ("I can look up a word in the dictionary, " "Just say any word in English") handler_input.response_builder.speak(speech).ask(reprompt) return handler_input.response_builder.response
[ "def _fallback_range(self, utterances, lang, message, fb_range):\n msg = message.reply(\n 'mycroft.skills.fallback',\n data={'utterance': utterances[0][0],\n 'lang': lang,\n 'fallback_range': (fb_range.start, fb_range.stop)}\n )\n response = self.bus.wait_for_response(msg, timeout=10)\n if response and response.data['handled']:\n ret = IntentMatch('Fallback', None, {}, None)\n else:\n ret = None\n return ret", "def get_fallback_language():\n return settings.DEFAULT_LANGUAGE", "def test_language_fallback(self):\n create_page(\"page\", \"nav_playground.html\", \"en\", published=True)\n request = self.get_request('/', 'de')\n response = details(request, '')\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response['Location'], '/en/')\n with SettingsOverride(CMS_LANGUAGE_FALLBACK=False):\n self.assertRaises(Http404, details, request, '')", "def is_fallback(self):\n return True", "def localization_intercepter(handler_input):\n skill_locale = handler_input.request_envelope.request.locale\n\n # language_strings.jsonから言語用データを取得\n with open(\"language_strings.json\") as language_prompts:\n language_data = json.load(language_prompts)\n # set default translation data to broader translation\n data = language_data[skill_locale[:2]]\n \n # if a more specialized translation exists, then select it instead\n # example: \"fr-CA\" will pick \"fr\" translations first, but if \"fr-CA\" translation exists,\n # then pick that instead\n if skill_locale in language_data:\n data.update(language_data[skill_locale])\n handler_input.attributes_manager.request_attributes[\"_\"] = data\n\n # configure the runtime to treat time according to the skill locale\n skill_locale = skill_locale.replace('-','_')\n \n locale.setlocale(locale.LC_TIME, skill_locale)", "def _load_transliterated_regional(self):\n# global approved, conflicts, suggestions, unknown, cldr, current\n start = self.lblFallback['text'].find('=>') + 2\n if self.lblFallback['text'][start:]:\n self.preferred.set(\\\n self._transliterate_text(self.lblFallback['text'][start:]))\n pass", "def initialize(self):\n # Warning:\n # This sets the fallback priority to 1, making it happen before\n # all other fallbacks, even padatious intents.\n # This is good for this example but BAD in general,\n # a fallback prio between 11 and 89 is good for most skills.\n\n self.register_fallback(self.handle_fallback, 1)\n\n # Any other initialize code goes here", "def auto_english_property_fallback(self, auto_english_property_fallback):\n\n self._auto_english_property_fallback = auto_english_property_fallback", "def fallback_trans(x):\r\n t = _(x)\r\n if t == x:\r\n l = h.get_lang()\r\n h.set_lang('en', graceful_fail = True)\r\n t = _(x)\r\n if l and l[0] != 'en':\r\n h.set_lang(l[0])\r\n return t", "def notify_osd_fallback(title, message, sound, fallback):\n\n # Fallback to wxPython notification\n fallback(title, message, sound)", "def disable_fallback(self):\n return self._disable_fallback", "def testAudioFallback(self):\n if self.audioFallback in tools.AUDIO_FALLBACKS:\n self.assertEqual(\n self.audioFallback,\n self.config.audioFallback\n )\n else:\n self.assertNotEqual(\n self.audioFallback,\n self.config.audioFallback\n )\n self.assertEqual(\n tools.AUDIO_FALLBACK_DEFAULT,\n self.config.audioFallback\n )", "def fallbackFont(self, font):\n if font:\n font = _tryInstallFontFromFontName(font)\n font = font.encode(\"ascii\", \"ignore\")\n testFont = AppKit.NSFont.fontWithName_size_(font, self._fontSize)\n if testFont is None:\n raise DrawBotError(\"Fallback font '%s' is not available\" % font)\n self._fallbackFont = font", "def use_en(self):\n pass", "def is_fallback_only(self, language):\n lang_status = self.language_status.get(language, {})\n if lang_status:\n if (not lang_status.get('lsp', {}) and\n not lang_status.get('kite', {})):\n return True\n return False", "def _load_regional(self):\n# global approved, conflicts, suggestions, unknown, cldr, current\n start = self.lblFallback['text'].find('=>') + 2\n if self.lblFallback['text'][start:]:\n self.preferred.set(self.lblFallback['text'][start:])\n pass", "def disable_fallback(self, disable_fallback):\n self._disable_fallback = disable_fallback", "def test_fallback_language_no_current(self):\n x = SimpleModel()\n x.set_current_language(self.conf_fallback)\n x.tr_title = \"TITLE_FALLBACK\"\n\n self.assertEqual(\n x.safe_translation_getter(\"tr_title\", language_code=self.other_lang1), \"TITLE_FALLBACK\"\n )", "def setFontFallback(self,value):\n self.PDFreactorConfiguration.in1[\"fontFallback\"] = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of snapshot_ids associated with the given image
def getSnapshotsOf(image): snapshotIds = [] deviceMapping = image.block_device_mapping # dict of devices devices = deviceMapping.keys() for d in devices: snapshotId = deviceMapping[d].snapshot_id if snapshotId is not None: snapshotIds.append(snapshotId.encode()) return snapshotIds
[ "def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids", "def getAmisOf(snapshot, images):\n amis = []\n for im in images:\n snapshotsOfThisIm = getSnapshotsOf(im)\n for soti in snapshotsOfThisIm:\n if soti == snapshot.id:\n amis.append(im)\n return amis", "def all_image_ids(self):\n return list(self._image_id_to_pattern)", "def cmd_account_image_ids(client, args):\n account_image_ids = client.get_account_image_ids(args.username, args.page)\n generate_output({'account_image_ids': account_image_ids})", "def get_image_ids():\n # build url query\n url = \"http://api.brain-map.org/api/v2/data/query.xml?criteria=model::AtlasImage,\"\n url += \"rma::criteria,\"\n url += \"[annotated$eqtrue],\"\n url += \"atlas_data_set(atlases[id$eq265297125]),\"\n url += \"alternate_images[image_type$eq'Atlas+-+Human'],\"\n url += \"rma::options[order$eq'sub_images.section_number'][num_rows$eqall]\"\n\n # create request object\n r = requests.get(url)\n\n # parse XML\n root = ET.fromstring(r.text)\n image_ids = []\n for image in root.iter('atlas-image'):\n image_id = image.find('id').text\n image_ids.append(image_id)\n\n return image_ids", "def get_images_id(images):\n images_id = []\n for item in images:\n id = item[\"id\"]\n images_id.append(id)\n return images_id", "def snapshot(self) -> List[Token]:\n return self.process_image(self.capture_image())", "def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts", "def __get_image_id(self):\n return self.__get_multi_images_ids(1)", "def get_roi_ids(conn: BlitzGateway, image_id: int,\n across_groups: Optional[bool] = True) -> List[int]:\n if not isinstance(image_id, int):\n raise TypeError('Image ID must be an integer')\n roi_ids = []\n roi_svc = conn.getRoiService()\n roi_list = roi_svc.findByImage(image_id, None)\n for roi in roi_list.rois:\n roi_ids.append(roi.id.val)\n return roi_ids", "def get_img_ids(self):\n return tuple(self.data.keys())", "def get_image_ids(gateway, dataset_id):\n\tbrowse = gateway.getFacility(BrowseFacility)\n\texperimenter = gateway.getLoggedInUser()\n\tctx = SecurityContext(experimenter.getGroupId())\n\timages = []\n\tids = ArrayList(1)\n\tids.add(Long(dataset_id))\n\tj = browse.getImagesForDatasets(ctx, ids).iterator()\n\twhile j.hasNext():\n\t image = j.next()\n\t images.append({\n\t 'Image Id': String.valueOf(image.getId()),\n\t 'Image Name': image.getName(),\n\t 'Dataset Id': dataset_id,\n\t })\n\treturn images", "def image_ids_from_names(image_names):\n ec2 = boto3.resource('ec2')\n images = ec2.images.filter(Filters=[{\n 'Name': 'name',\n 'Values': image_names\n }])\n return [i.image_id for i in images]", "def __get_picture_id_list(new):\n id_list = []\n\n if new.image1:\n id_list.append(1)\n if new.image2:\n id_list.append(2)\n if new.image3:\n id_list.append(3)\n if new.image4:\n id_list.append(4)\n\n return id_list", "def get_annotations_list_by_image_id(self, annotations, ifile):\n return annotations['RELEASE'][0][0][0][0][ifile][3][0]", "def get_my_image_ids(self) -> Union[List[int], None]:\n if self.imported is not True:\n logging.error(f'File {self.file_path} has not been imported')\n return None\n else:\n q = self.conn.getQueryService()\n params = Parameters()\n path_query = self.make_substitutions()\n path_query = path_query.strip('/')\n params.map = {\"cpath\": rstring(path_query)}\n results = q.projection(\n \"SELECT i.id FROM Image i\"\n \" JOIN i.fileset fs\"\n \" JOIN fs.usedFiles u\"\n \" WHERE u.clientPath=:cpath\",\n params,\n self.conn.SERVICE_OPTS\n )\n self.image_ids = [r[0].val for r in results]\n return self.image_ids", "def _get_ids_from_name_private(self, name):\r\n results = self.list_private_images(name=name)\r\n return [result['id'] for result in results]", "def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots", "def get_image_sources(conn, image_id):\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n cur.execute('SELECT * FROM detected_source WHERE image_id = %s',\n (image_id, ))\n rows = cur.fetchall()\n\n conn.commit()\n\n detected_sources = []\n for row in rows:\n detected_sources.append(dbclasses.DetectedSource())\n dbclasses.dict2attr(detected_sources[-1], row)\n cur.execute('SELECT * FROM detected_island WHERE isl_id = %s AND image_id = %s', (detected_sources[-1].isl_id,detected_sources[-1].image_id))\n row2=cur.fetchone()\n detected_sources[-1].rms_isl = row2['rms']\n detected_sources[-1].mean_isl = row2['mean']\n detected_sources[-1].resid_rms = row2['resid_rms']\n detected_sources[-1].resid_mean = row2['resid_mean']\n detected_sources[-1].total_flux_isl = row2['total_flux']\n detected_sources[-1].total_flux_islE = row2['e_total_flux']\n\n cur.close()\n return detected_sources" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use dictionaries 'cos we'll have to crossreference to get snapshots that go with the AMIs returns list of dictionaries representing images from one region
def getImagesD(region): images = getImages(region) imageDicts = [] for im in images: imageDict = {"name": im.name, "id": im.id, "region": im.region.name, "state": im.state, "created": im.creationDate, "type": im.type, "KEEP": getKeepTag(im), "name_tag": get_name_tag(im), "snapshots": getSnapshotsOf(im), "description": im.description, "PROD": isProduction(im) } imageDicts.append(imageDict) return imageDicts
[ "def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts", "def images_mapped(self):\n try:\n return dict([x for x in enumerate(self.images())])\n except:\n return None", "def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids", "def ListImages(region):\r\n all_images = GetImages(region, GetOwnerIDs(region))\r\n running_images = set([i.image_id for i in ListInstances(region)])\r\n\r\n if len(all_images) == 0:\r\n print 'No images in region %s' % region\r\n return\r\n print '# %-14s %-8s %-40s %-40s' % ('ID', 'Active', 'Name', 'Description')\r\n for i in all_images:\r\n active_str = 'ACTIVE' if i.id in running_images else ''\r\n print '%-16s %-8s %-40s %-40s' % (i.id, active_str, i.name, i.description)", "def get_imagelist(self, regionobj):\n\n return _getimagelist(regionobj)", "def getAmisOf(snapshot, images):\n amis = []\n for im in images:\n snapshotsOfThisIm = getSnapshotsOf(im)\n for soti in snapshotsOfThisIm:\n if soti == snapshot.id:\n amis.append(im)\n return amis", "def get_images(self):\n data = {}\n \n # Get current Sol\n sol = self.tosol()\n \n # Go back one Mars fortnight through tosol\n for sol in range(sol-14, sol+1):\n print \"getting sol %d\" % sol\n # get on sol of images\n sol_data = self.get_images_from_sol(sol)\n if not sol_data:\n print \"sol %d not retrieved\" % sol\n else:\n now = datetime.datetime.now()\n now = int(time.mktime(now.timetuple()))\n data[\"%d\"%sol] = {\"images\": sol_data, \"time\": now}\n time.sleep(1)\n \n \n return data", "def _getimagelist(regionobj):\n\n try:\n target = regionobj.target\n auth = identity.Password(\n auth_url=target['keystone_url'], username=target['user'],\n password=target['password'], tenant_name=target['tenant'],\n )\n sess = session.Session(auth=auth)\n token = auth.get_token(sess)\n endpoint = auth.get_endpoint(\n session, 'image', region_name=regionobj.region)\n glance_client = GlanceClient('1', endpoint=endpoint, token=token)\n glance_client.images.client.timeout = timeout\n # images = glance_client.images.list()\n pool = Pool(1)\n result = pool.apply_async(_getrawimagelist, (glance_client,))\n images = result.get(timeout=20)\n image_list = list()\n for image in images:\n i = GlanceSyncImage(\n image['name'], image['id'], regionobj.fullname,\n image['owner'], image['is_public'], image['checksum'],\n image['size'], image['status'], image['properties'], image)\n\n image_list.append(i)\n\n except Exception, e:\n cause = str(e)\n if not cause:\n cause = repr(e)\n msg = regionobj.fullname + \\\n ': Error retrieving image list. Cause: ' + cause\n logger.error(msg)\n raise Exception(msg)\n\n return image_list", "def _images_dict():\n import os\n\n store_path = os.path.dirname(os.path.abspath(__file__))\n image_list_file = \"/images.json\"\n images_location = \"{}{}\".format(store_path, image_list_file)\n images = dict()\n with open(images_location, 'r') as file:\n images.update(json.load(file))\n return images", "def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds", "def get_image_information(client):\n\n pipeline = [{\"$match\": {\"camera_views\": {\"$exists\": 1}}}, {\"$unwind\": {\"path\": \"$camera_views\"}}, {\"$addFields\": {\n \"camera_views.average_linear_distance\": {\n \"$divide\": [\n \"$camera_views.total_linear_distance\",\n \"$camera_views.num_entities\"\n ]\n },\n \"camera_views.average_angular_distance\": {\n \"$divide\": [\n \"$camera_views.total_angular_distance\",\n \"$camera_views.num_entities\"\n ]\n },\n \"camera_views.timestamp\": \"$timestamp\",\n \"camera_views._id\": \"$_id\",\n \"camera_views.database\": client.database.name,\n \"camera_views.collection\": client.name,\n 'camera_views.file_id':\"$camera_views.images.file_id\", #Add the Color image id for downloading and testing\n }}, {\"$replaceRoot\": {\"newRoot\": \"$camera_views\"}}, {\"$project\": {\n \"_id\": 1,\n \"num_entities\": 1,\n \"average_linear_distance\": 1,\n \"average_angular_distance\": 1,\n \"timestamp\": 1,\n \"duplicate\": 1,\n \"database\":1,\n \"collection\":1,\n \"file_id\":{\"$arrayElemAt\":[\"$images.file_id\",0]}, # Only keep the first file id (The Color image)\n }}]\n pprint.pprint(pipeline)\n result = list(client.aggregate(pipeline))\n return result", "def get_public_images(self):\n return dict((i.id, i) for i in self.glance.images.list()\n if i.is_public)", "def _get_all_images(self):\n\t\tq = ndb.gql(\"SELECT answer FROM Image_db\")\n\t\tdb_data = []\n\t\tfor item in q:\n\t\t\tdb_data.append({'img_id': item.key.id(), 'answer': item.answer}) \n\t\treturn json.dumps(db_data)", "def loadImagesAvatar(self): \n dictionary = {}\n dictionary[\"body\"] = None\n dictionary[\"shoes\"] = None\n dictionary[\"shirt\"] = None\n dictionary[\"trousers\"] = None\n dictionary[\"skirt\"] = None\n dictionary[\"head\"] = None\n dictionary[\"hair\"] = None\n dictionary[\"mask\"] = None\n return dictionary", "def test_get_imagelist_inmutable(self):\n images1 = self.mock_master.get_imagelist(self.region1)\n images2 = self.mock_master.get_imagelist(self.region1)\n r2dict = dict((i.id, i) for i in images2)\n self.assertEquals(images1, images2)\n self.assertNotEquals(id(images1), id(images2))\n for image in images1:\n self.assertIn(image.id, r2dict)\n image2 = r2dict[image.id]\n self.assertEquals(image, image2)\n self.assertNotEquals(id(image), id(image2))\n self.assertNotEquals(id(image.user_properties),\n id(image2.user_properties))", "def returnSnapshots(self):\n tempDict = {}\n for i in self.trials.keys():\n tempDict[i] = {}\n for j in self.trials[i].trajectories.keys():\n tempDict[i][j] = self.trials[i].trajectories[j].snapshots.keys()\n return tempDict", "def get_images(self):\r\n if self.images is None:\r\n self.images = {}\r\n for name, img_num in self.images.iteritems():\r\n if isinstance(img_num, int):\r\n yield (name, img_num)", "def get_imgid_dict(ann):\n return {item[1][\"file_name\"]: item[0] for item in ann.imgs.items()}", "def _vmware_get_snapshots(self, content, inventory):\n snapshots_count_table = []\n snapshots_age_table = []\n virtual_machines = self._vmware_get_obj(content, [vim.VirtualMachine])\n for virtual_machine in virtual_machines:\n if not virtual_machine or virtual_machine.snapshot is None:\n continue\n else:\n self.threader.thread_it(self._vmware_get_snapshot_details,\n [snapshots_count_table, snapshots_age_table, virtual_machine, inventory])\n return snapshots_count_table, snapshots_age_table" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return a list of dictionaries representing snapshots from one region
def getSnapshotsD(region): # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it) snapshots = getSnapshots(region) snapshotsDicts = [] ims = getImages(region) for s in snapshots: amis = getAmisOf(s, ims) amiIds = [] amiKeeps = [] if len(amis) == 1: amiIds = amis[0].id.encode() amiKeeps = getKeepTag(amis[0]) elif len(amis) == 0: amiIds = "-------no-AMI-found" amiKeeps = "-------no-AMI-found" else: for a in amis: amiIds.append(a.id.encode()) amiKeeps.append(getKeepTag(a)) snapshotsDict = {"id": s.id, "status": s.status, "region": s.region.name, "progress": s.progress, "start_time": s.start_time, "volume_id": s.volume_id, "volume_size": s.volume_size, "KEEP-tag": getKeepTag(s), "Name": get_name_tag(s), "AMI(s)": amiIds, "AMI_KEEP-tags": amiKeeps, "PROD": isProduction(s), "Description": s.description } snapshotsDicts.append(snapshotsDict) return snapshotsDicts
[ "def get_snapshots():\n #client = boto_ec2_client(region)\n paginator = ec2.get_paginator('describe_snapshots')\n response_iterator = paginator.paginate(OwnerIds=[accountId])\n snapshots = list()\n for page in response_iterator:\n for obj in page['Snapshots']:\n snapshots.append(obj)\n\n return(snapshots)", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def list_snapshots(self):\n snapshots_raw = self.__compute_client.snapshots.list()\n\n snapshots = []\n for snapshot in snapshots_raw:\n snapshot_dict = {}\n snapshot_dict['name'] = snapshot.name\n snapshot_dict['snapshot_type'] = snapshot.account_type.value\n snapshot_dict['location'] = snapshot.location\n snapshot_dict['snapshot_time'] = snapshot.time_created.isoformat()\n snapshot_dict['tags'] = snapshot.tags\n snapshot_dict['resource_group'] = snapshot.id.split('/')[4].lower()\n\n snapshots.append(snapshot_dict)\n\n return snapshots", "def snapshots(self):\n command = [\"/sbin/zfs\", \"list\", \"-H\", \"-p\", \"-o\", \"name,creation,receive_resume_token\",\n \"-r\", \"-d\", \"1\", \"-t\", \"snapshot\", \"-s\", \"creation\",\n self.target]\n try:\n output = subprocess.check_output(command).split(\"\\n\")\n except subprocess.CalledProcessError:\n # We'll assume this is because there are no snapshots\n return []\n snapshots = []\n for snapshot in output:\n if not snapshot:\n continue\n (name, ctime, resume_token) = snapshot.rstrip().split()\n d = {\"Name\" : name, \"CreationTime\" : int(ctime) }\n if resume_token != \"-\":\n d[\"ResumeToken\"] = resume_token\n snapshots.append(d)\n \n return snapshots", "def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )", "def _vmware_get_snapshots(self, content, inventory):\n snapshots_count_table = []\n snapshots_age_table = []\n virtual_machines = self._vmware_get_obj(content, [vim.VirtualMachine])\n for virtual_machine in virtual_machines:\n if not virtual_machine or virtual_machine.snapshot is None:\n continue\n else:\n self.threader.thread_it(self._vmware_get_snapshot_details,\n [snapshots_count_table, snapshots_age_table, virtual_machine, inventory])\n return snapshots_count_table, snapshots_age_table", "def getImagesD(region):\n images = getImages(region)\n imageDicts = []\n for im in images:\n imageDict = {\"name\": im.name,\n \"id\": im.id,\n \"region\": im.region.name,\n \"state\": im.state,\n \"created\": im.creationDate,\n \"type\": im.type,\n \"KEEP\": getKeepTag(im),\n \"name_tag\": get_name_tag(im),\n \"snapshots\": getSnapshotsOf(im),\n \"description\": im.description,\n \"PROD\": isProduction(im)\n }\n imageDicts.append(imageDict)\n return imageDicts", "def get_snapshots(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/snapshots\"\n\n response = self.connector.http_call(\"get\", _url)\n self.snapshots = response.json()", "def get_snapshots(self) -> SnapshotListing:\n return self.snapshots", "def vm_snapshotlist(args):\n snapshot = args.snapshot\n name = args.name\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return", "def target_snapshots(self):\n m = self.mapfile\n if debug:\n print(\"mapfile = {}\".format(m), file=sys.stderr)\n if self.source in m:\n return m[self.source][\"snapshots\"]\n else:\n return []", "def get_instance_snapshots(pageToken=None):\n pass", "def returnSnapshots(self):\n tempDict = {}\n for i in self.trials.keys():\n tempDict[i] = {}\n for j in self.trials[i].trajectories.keys():\n tempDict[i][j] = self.trials[i].trajectories[j].snapshots.keys()\n return tempDict", "def get_snapshots(self):\r\n ec2 = self.get_ec2_connection()\r\n rs = ec2.get_all_snapshots()\r\n all_vols = [self.volume_id] + self.past_volume_ids\r\n snaps = []\r\n for snapshot in rs:\r\n if snapshot.volume_id in all_vols:\r\n if snapshot.progress == '100%':\r\n snapshot.date = dateutil.parser.parse(snapshot.start_time)\r\n snapshot.keep = True\r\n snaps.append(snapshot)\r\n snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))\r\n return snaps", "def _get_snapshots(ds):\n command = [\"/sbin/zfs\", \"list\", \"-H\", \"-p\", \"-o\", \"name,creation,receive_resume_token\",\n \"-r\", \"-d\", \"1\", \"-t\", \"snapshot\", \"-s\", \"creation\",\n ds]\n if debug:\n print(\"get_snapshots: {}\".format(\" \".join(command)), file=sys.stderr)\n try:\n output = CHECK_OUTPUT(command).decode('utf-8').split(\"\\n\")\n except subprocess.CalledProcessError:\n # We'll assume this is because there are no snapshots\n return []\n snapshots = []\n for snapshot in output:\n snapshot = snapshot.rstrip()\n if not snapshot:\n continue\n if debug:\n print(\"Output line: {}\".format(snapshot), file=sys.stderr)\n (name, ctime, resume_token) = snapshot.split(\"\\t\")\n name = name.split('@')[1]\n d = { \"Name\" : name, \"CreationTime\" : int(ctime) }\n if resume_token != \"-\":\n d[\"ResumeToken\"] = resume_token\n snapshots.append(d)\n return snapshots", "def list_snapshots(args):\n html_doc = document.Document(get_code(args.file))\n edition, region, snapshots = html_doc.list(date=args.edition, region=args.region)\n print('Snapshots for {:s} {:%B %d, %Y}'.format(region.capitalize(), edition))\n for i in range(len(snapshots)):\n print('({:2d}) {!r:} -'.format(i, snapshots[i][1]) +\n ' {0:%B} {0.day:2}, {0:%Y %l:%M:%S.%f %p}'.format(snapshots[i][0]))", "def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots", "def get_regions_from_audit_resources(audit_resources: list) -> list:\n audited_regions = []\n for resource in audit_resources:\n region = resource.split(\":\")[3]\n if region and region not in audited_regions: # Check if arn has a region\n audited_regions.append(region)\n if audited_regions:\n return audited_regions\n return None", "def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return a list of dictionaries representing volumes from one region
def getVolumesD(region): volumes = getVolumes(region) instances = getInstancesD(region) volumesDicts = [] for v in volumesDicts: volumesDict = {"id": v.id, "KEEP-tag": getKeepTag(v), "instance_KEEP-tag": getKeepTag(getInstanceOf(v)), "instance": v.attach_data.instance_id, "status": v.status, "size": v.size, "create-time": v.create_time, "region": v.region.name, "zone": v.zone, "snapshot_id": v.snapshot_id, "PROD": isProduction(v) }
[ "def get_volumes(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_volumes = conn.get_all_volumes()\n except boto.exception.EC2ResponseError:\n return [] # This better not fail silently or I'll cut a person.\n return region_volumes", "def _get_data_volumes(vm_):\n ret = []\n volumes = vm_[\"volumes\"]\n for key, value in volumes.items():\n # Verify the required 'disk_size' property is present in the cloud\n # profile config\n if \"disk_size\" not in volumes[key].keys():\n raise SaltCloudConfigError(\n \"The volume '{}' is missing 'disk_size'\".format(key)\n )\n # Use 'HDD' if no 'disk_type' property is present in cloud profile\n if \"disk_type\" not in volumes[key].keys():\n volumes[key][\"disk_type\"] = \"HDD\"\n\n # Construct volume object and assign to a list.\n volume = Volume(\n name=key,\n size=volumes[key][\"disk_size\"],\n disk_type=volumes[key][\"disk_type\"],\n licence_type=\"OTHER\",\n )\n\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in volumes[key].keys():\n volume.availability_zone = volumes[key][\"disk_availability_zone\"]\n\n ret.append(volume)\n\n return ret", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print json.dumps({'data': lst})", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))", "def volumes(self):\n return VolumesAsDictionaryWrapper(self.spec.volumes, self._messages.Volume)", "def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()", "def get_volumes(vol_filter=\"all\"):\n outobj = subprocess.run(\n \"localcli --formatter json storage filesystem list\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n shell=True,\n )\n file_list = json.loads(outobj.stdout)\n if vol_filter is not None:\n vol_filter = vol_filter.lower()\n if vol_filter == 'vmfs':\n file_list = [i['Mount Point'] for i in file_list if i['Type'].lower() == 'VFFS' or\n 'vmfs' in i['Type'].lower()]\n elif vol_filter == 'vfat':\n file_list = [i['Mount Point'] for i in file_list if 'vfat' in i['Type'].lower()]\n else:\n file_list = [i['Mount Point'] for i in file_list]\n return file_list", "def database_volume_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.Volume)\n\n volume_objs = list()\n for volume in query.all():\n nfvi_volume_data = json.loads(volume.nfvi_volume_data)\n nfvi_volume = nfvi.objects.v1.Volume(nfvi_volume_data['uuid'],\n nfvi_volume_data['name'],\n nfvi_volume_data['description'],\n nfvi_volume_data['avail_status'],\n nfvi_volume_data['action'],\n nfvi_volume_data['size_gb'],\n nfvi_volume_data['bootable'],\n nfvi_volume_data['encrypted'],\n nfvi_volume_data['image_uuid'])\n volume_obj = objects.Volume(nfvi_volume)\n volume_objs.append(volume_obj)\n return volume_objs", "def generateInfoVolumes(regions):\n print \"\\nWriting volumes info to output file %s\" % volumes_data_output_file\n with open(volumes_data_output_file, 'w') as f1:\n f1.write(\"VOLUMES\\n\")\n f1.write(\n \"Name\\tvolume_ID\\tKEEP-tag_of_volume\\tKEEP-tag_of_instance\\tproduction?\\tvolume_attachment_state\\tassociated_instance\\tinstance_state\\tsize\\tcreate_time\\tregion\\tzone\\tassociated_snapshot\\n\\n\")\n for r in regions:\n volumes = getVolumes(r)\n print \".\" # give some feedback to the user\n for v in volumes:\n f1.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\"\n % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size,\n v.create_time, v.region.name, v.zone, v.snapshot_id))", "def getVolumes(self):\n response, body = self.http.get('/volumes')\n return body", "def getImagesD(region):\n images = getImages(region)\n imageDicts = []\n for im in images:\n imageDict = {\"name\": im.name,\n \"id\": im.id,\n \"region\": im.region.name,\n \"state\": im.state,\n \"created\": im.creationDate,\n \"type\": im.type,\n \"KEEP\": getKeepTag(im),\n \"name_tag\": get_name_tag(im),\n \"snapshots\": getSnapshotsOf(im),\n \"description\": im.description,\n \"PROD\": isProduction(im)\n }\n imageDicts.append(imageDict)\n return imageDicts", "def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts", "def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols", "def get_volumes(container):\n mounts = container.get('Mounts')\n return [dict(source=mount['Source'], destination=mount['Destination']) for mount in mounts]", "def variants_in_region(region):\n\n url = (\"http://{}/overlap/region/human/{}?feature=variation\"\n \"&species=homo_sapiens\"\n \"&content-type=application/json\")\n\n url = url.format(ensembl_url, region)\n logging.debug(\"Queried url: \" + url)\n\n with urllib.request.urlopen(url) as stream:\n res = json.loads(stream.read().decode(\"utf-8\"))\n\n return res", "def get_volumes(self, vserver_name=None, vfiler_name=None):\n\n volumes = []\n if self.cluster_mode:\n with self.vserver_context(vserver_name):\n api_vol_attrib = NaElement(\"volume-attributes\")\n api_vol_attrib.child_add(NaElement(\"volume-id-attributes\"))\n api_vol_attrib.child_add(NaElement(\"volume-space-attributes\"))\n api_vol_attrib.child_add(NaElement(\"volume-language-attributes\"))\n api_desired_attributes = NaElement(\"desired-attributes\")\n api_desired_attributes.child_add(api_vol_attrib)\n\n for volume in self._invoke_cmode_iterator('volume-get-iter', desired_attributes_el=api_desired_attributes):\n name = volume.child_get('volume-id-attributes').child_get_string('name')\n uuid = volume.child_get('volume-id-attributes').child_get_string('instance-uuid')\n aggr = Aggr(self, volume.child_get('volume-id-attributes').child_get_string('containing-aggregate-name'))\n space_attrs = volume.child_get('volume-space-attributes')\n if space_attrs is not None:\n size_used = space_attrs.child_get_int('size-used') if space_attrs.child_get_string('size-used') is not None else 0\n size_available = space_attrs.child_get_int('size-available') if space_attrs.child_get_string('size-available') is not None else 0\n size_total = space_attrs.child_get_int('size-total') if space_attrs.child_get_string('size-total') is not None else 0\n else:\n size_used = None\n size_available = None\n size_total = None\n lang_attrs = volume.child_get('volume-language-attributes')\n if lang_attrs is not None:\n language_code = lang_attrs.child_get_string('language-code')\n else:\n language_code = None\n volumes.append(FlexVol(self, name, vserver_name=vserver_name, size_used=size_used, size_available=size_available, size_total=size_total, uuid=uuid, containing_aggregate=aggr, language_code=language_code))\n else:\n out = self.invoke('volume-list-info')\n for volume in out.child_get('volumes').children_get():\n name = volume.child_get_string('name')\n uuid = volume.child_get_string('uuid')\n aggr = Aggr(self, volume.child_get_string('containing-aggregate'))\n vfiler = volume.child_get_string('owning-vfiler')\n size_used = volume.child_get_int('size-used') if volume.child_get_string('size-used') is not None else 0\n size_available = volume.child_get_int('size-available') if volume.child_get_string('size-available') is not None else 0\n size_total = volume.child_get_int('size-total') if volume.child_get_string('size-total') is not None else 0\n if vfiler_name is None or vfiler_name == vfiler:\n volumes.append(FlexVol(self, name, vfiler_name=vfiler, size_used=size_used, size_available=size_available, size_total=size_total, uuid=uuid, containing_aggregate=aggr))\n return volumes", "def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()", "def list_volumes(self, ex_zone=None):\n list_volumes = []\n zone = ex_zone or self.zone\n if zone == 'all':\n zone = None\n if zone is None:\n request = '/aggregated/disks'\n elif hasattr(zone, 'name'):\n request = '/zones/%s/disks' % zone.name\n else:\n request = '/zones/%s/disks' % zone\n\n response = self.connection.request(request, method='GET').object\n if 'items' in response:\n # The aggregated response returns a dict for each zone\n if zone is None:\n for v in response['items'].values():\n zone_volumes = [self._to_storage_volume(d) for d in\n v.get('disks', [])]\n list_volumes.extend(zone_volumes)\n else:\n list_volumes = [self._to_storage_volume(d) for d in\n response['items']]\n return list_volumes", "def get_cg_volumes(self, group_id):\r\n for volume in self.xcli_client.cmd.vol_list(cg=group_id):\r\n if volume.snapshot_of == '':\r\n yield volume.name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return a list of dictionaries representing instances for one region, will help with volumeinstanceKEEPtag lookup. Maybe.
def getInstancesD(region): instances = getInstances(region) instancesDicts = {"id": i.id, "KEEP-tag": getKeepTag(i), "instance_type": i.instance_type, "state": i.state, "launch_time": i.launch_time, "security_groups": getGroups(i), "region": i.region.name, "PROD": isProduction(i) }
[ "def get_instances(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_instances = []\n reservations = conn.get_all_reservations()\n for reservation in reservations:\n for instance in reservation.instances:\n region_instances.append(instance)\n except boto.exception.EC2ResponseError:\n return []\n return region_instances", "def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv", "def get_rds_instances():\n return {\n 'DBInstances': [\n {\n 'MultiAZ': True,\n 'DBInstanceClass': 'db.t2.medium',\n 'DBInstanceIdentifier': 'test1'\n },\n {\n 'MultiAZ': False,\n 'DBInstanceClass': 'db.m3.medium',\n 'DBInstanceIdentifier': 'test2'\n }\n ]\n }", "def listInstancesRegionZone(region,zone):\n\tprint \"-\"*80\n\tprint \"# Region :\",region,\" Zone\", zone\t\n\tprint \"-\"*80\n\tinstances = getInstancesRegionZone(region,zone)\n\tif instances:\n\t\tfor instance in instances:\n\t\t\tprint \"[\",instance.ami_launch_index,\"]\",instance.ip_address,\" (\",instance.private_ip_address,\") \",instance.instance_type,\" key=\",instance.key_name", "def getVolumesD(region):\n volumes = getVolumes(region)\n instances = getInstancesD(region)\n\n volumesDicts = []\n for v in volumesDicts:\n volumesDict = {\"id\": v.id,\n \"KEEP-tag\": getKeepTag(v),\n \"instance_KEEP-tag\": getKeepTag(getInstanceOf(v)),\n \"instance\": v.attach_data.instance_id,\n \"status\": v.status,\n \"size\": v.size,\n \"create-time\": v.create_time,\n \"region\": v.region.name,\n \"zone\": v.zone,\n \"snapshot_id\": v.snapshot_id,\n \"PROD\": isProduction(v)\n }", "def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})", "def get_region_instances(region, tables, auth):\n table = PrettyTable([\n \"Name\", \"Key-Name\", \"Type\", \"Placement\", \"Public-DNS\",\n \"Private-IP\", \"Instance-ID\", \"State\", \"Launch Time\"\n ])\n table.padding_width = 1\n ec2 = boto.ec2.connect_to_region(\n region.name, aws_access_key_id=auth.aws_access_key_id,\n aws_secret_access_key=auth.aws_secret_access_key)\n if ec2:\n reservations = ec2.get_all_instances()\n if reservations:\n for reservation in reservations:\n for i in reservation.instances:\n try:\n instance_name = i.tags['Name']\n except KeyError:\n instance_name = \"N/A\"\n if i.public_dns_name:\n accessname = i.public_dns_name\n elif i.ip_address:\n accessname = i.ip_address\n else:\n accessname = \"n/a\"\n table.add_row([\n instance_name,\n i.key_name,\n i.instance_type,\n i.placement,\n accessname,\n i.private_ip_address,\n i.id,\n i.state,\n i.launch_time\n ])\n tables[region.name] = table\n return", "def instances_by_region(aws_region=config.AWS_AWS_REGION):\n instances = _get_instances(aws_region)\n formatter = InstanceFormatter(instances)\n formatter.display()", "def getImagesD(region):\n images = getImages(region)\n imageDicts = []\n for im in images:\n imageDict = {\"name\": im.name,\n \"id\": im.id,\n \"region\": im.region.name,\n \"state\": im.state,\n \"created\": im.creationDate,\n \"type\": im.type,\n \"KEEP\": getKeepTag(im),\n \"name_tag\": get_name_tag(im),\n \"snapshots\": getSnapshotsOf(im),\n \"description\": im.description,\n \"PROD\": isProduction(im)\n }\n imageDicts.append(imageDict)\n return imageDicts", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def get_instances_by_name(client, instance_name):\n response = client.describe_instances()\n instances = []\n # response[\"Reservations\"][0][\"Instances\"][0][\"Tags\"]\n try:\n for r in response[\"Reservations\"]:\n for i in r[\"Instances\"]:\n for t in i[\"Tags\"]:\n if t.get(\"Key\", None) == \"Name\" and t.get(\"Value\", None) == instance_name:\n instances.append(i[\"InstanceId\"])\n except Exception as e:\n print(str(e))\n\n return instances", "def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts", "def get_all_instances(self):\n final_result = list()\n\n for region in AWSAccount.get_aws_account().regions.values():\n AWSAccount.set_aws_region(region)\n for instance in self.execute(self.client.describe_instances, \"Reservations\"):\n final_result.extend(instance['Instances'])\n return [EC2Instance(instance) for instance in final_result]", "def list_instances():\n return get_workfile_metadata(SECTION_NAME_INSTANCES)", "def getXeprInstances():\n apilib = _loadapilib()\n instances = _findInst(apilib)\n return dict([(p, t) for p, t in instances])", "def get_ec2(l_session: boto3.session, l_regions: []):\n l_ec2s = []\n for region in l_regions:\n ec2 = l_session.resource('ec2', region_name=region)\n print(\"Checking region: {}\".format(region))\n for instance in ec2.instances.all():\n line = defaultdict(lambda: \"-\")\n line['id'] = instance.id\n line['instance_type'] = instance.instance_type\n line['cpus'] = instance.cpu_options['CoreCount'] * \\\n instance.cpu_options['ThreadsPerCore']\n line['memory'] = get_instance_type_memory(l_session, line['instance_type'])\n size = 0\n for volume in instance.volumes.all():\n size += volume.size\n line['storage'] = size\n try:\n line['platform'] = str(instance.image.platform_details).replace(',',';')\n line['image_description'] = str(instance.image.description).replace(',',';')\n except:\n pass\n\n line['state'] = instance.state['Name']\n line['region'] = region\n l_ec2s.append(line)\n return l_ec2s", "def get_aws_instances(self, connection):\n try:\n all_reservations_list = connection.get_all_instances()\n all_instances_list = [instances for reservations in all_reservations_list for instances in reservations.instances]\n return all_instances_list\n except Exception as exp_object:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n self.ah_obj.print_exception(\"aws_helper.py\", \"get_aws_instances()\", exp_object, exc_type, exc_obj, exc_tb)\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
retrieve list of AMIs that refer to a given snapshot
def getAmisOf(snapshot, images): amis = [] for im in images: snapshotsOfThisIm = getSnapshotsOf(im) for soti in snapshotsOfThisIm: if soti == snapshot.id: amis.append(im) return amis
[ "def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds", "def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids", "def vm_snapshotlist(args):\n snapshot = args.snapshot\n name = args.name\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return", "def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts", "def get_snaps_for_instance(client, rds_instance, snapshot_type=''):\n snapshots = []\n\n resp = client.describe_db_snapshots(\n DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],\n SnapshotType=snapshot_type\n )\n while 'Marker' in resp:\n snapshots.extend(resp['DBSnapshots'])\n resp = client.describe_db_snapshots(\n DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],\n SnapshotType=snapshot_type,\n Marker=resp['Marker']\n )\n snapshots.extend(resp['DBSnapshots'])\n return snapshots", "def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )", "def snapshot_access_get_all(self, context, snapshot):\n rules = self.db.share_snapshot_access_get_all_for_share_snapshot(\n context, snapshot['id'], {})\n return rules", "def get_snap_list(mnode):\n\n ret, out, _ = g.run(mnode, \"gluster snapshot list --xml\")\n if ret != 0:\n g.log.error(\"Failed to execute 'snapshot list' on node %s. \"\n \"Hence failed to get the snapshot list.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster snapshot \"\n \"list xml output.\")\n return None\n\n snap_list = []\n for snap in root.findall(\"snapList/snapshot\"):\n snap_list.append(snap.text)\n\n return snap_list", "def snapshots(self, owner=None, restorable_by=None):\r\n rs = self.connection.get_all_snapshots(owner=owner,\r\n restorable_by=restorable_by)\r\n mine = []\r\n for snap in rs:\r\n if snap.volume_id == self.id:\r\n mine.append(snap)\r\n return mine", "def find_asg_using_amis(ami_ids):\n # ref: return = { ami_id : \"lc_arns\":[]}\n ami_ids = listify(ami_ids)\n result = {id: [] for id in ami_ids}\n\n client_asg = boto3.client('autoscaling')\n lc = client_asg.describe_launch_configurations()\n\n for a_lc in lc['LaunchConfigurations']:\n if a_lc['ImageId'] in ami_ids:\n result[a_lc['ImageId']].append(a_lc['LaunchConfigurationARN'])\n return result", "def snapshots(self):\n command = [\"/sbin/zfs\", \"list\", \"-H\", \"-p\", \"-o\", \"name,creation,receive_resume_token\",\n \"-r\", \"-d\", \"1\", \"-t\", \"snapshot\", \"-s\", \"creation\",\n self.target]\n try:\n output = subprocess.check_output(command).split(\"\\n\")\n except subprocess.CalledProcessError:\n # We'll assume this is because there are no snapshots\n return []\n snapshots = []\n for snapshot in output:\n if not snapshot:\n continue\n (name, ctime, resume_token) = snapshot.rstrip().split()\n d = {\"Name\" : name, \"CreationTime\" : int(ctime) }\n if resume_token != \"-\":\n d[\"ResumeToken\"] = resume_token\n snapshots.append(d)\n \n return snapshots", "def jail_snapshot_list(jnid = ''):\n jname = jnid\n if 'BASE-' in jnid:\n jnid = '/BASE-RW/%s@' % jnid\n else:\n jnid = '/%s@' % jnid\n \n try:\n jsnap = subprocess.check_output(\"zfs list -t snapshot |grep \"+jnid, shell=True)\n except:\n msg = \" ERROR: No zfs snapshots found for '%s'\" % (jnid)\n log(msg)\n return False\n\n jsnap = jsnap.split('\\n')\n jsnapn = []\n for i in jsnap:\n i = i.split(' ')\n while True:\n try:\n i.remove(\"\")\n except ValueError:\n break\n jsnapn.append(i)\n\n lmen = ['Number', \"'%s' current snapshots\" % jname, 'Size']\n del jsnapn[-1]\n jsn = 0\n jsnn = []\n for i in jsnapn:\n jsnn.append([jsn, i[0], i[3]])\n jsn = jsn + 1\n\n return [jsnn, lmen]", "def _find_snapshots_for_instance(self, conn, instance, name):\n filters = {\"tag:Name\": name,\n \"tag:Original_instance\": instance.id}\n snapshots = conn.get_all_snapshots(filters=filters)\n if snapshots:\n # Sort by date so that the latest snapshot is at the head\n # of the list.\n snapshots.sort(reverse=True, key=lambda sn: sn.start_time)\n return snapshots", "def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots", "def get_snapshots():\n #client = boto_ec2_client(region)\n paginator = ec2.get_paginator('describe_snapshots')\n response_iterator = paginator.paginate(OwnerIds=[accountId])\n snapshots = list()\n for page in response_iterator:\n for obj in page['Snapshots']:\n snapshots.append(obj)\n\n return(snapshots)", "def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs", "def RetrieveACISA():\n\tdb = DBConnector()\n\tcur = db.cursor()\n\n\tSQLcmd = \"SELECT * FROM snaps.SNAPsLocation\"\n\tcur.execute(SQLcmd)\n\treturnList = []\n\tcount = 0\n\tfor item in cur.fetchall():\n\t\tcount += 1\n\t\ttmplist = [item[1], item[2], count, str(item[0])]\n\t\treturnList.append(tmplist)\n\treturn returnList", "def find_ec2_using_amis(ami_ids):\n # ref: return = { \"ami_id\" : \"ec2\":[]}\n ami_ids = listify(ami_ids)\n result = {id: [] for id in ami_ids}\n\n client = boto3.client('ec2')\n image_id_filter = {'Name': 'image-id', 'Values': ami_ids}\n filter = [image_id_filter]\n\n response = client.describe_instances(Filters=filter)\n for res in response['Reservations']:\n for instance in res['Instances']:\n result[instance['ImageId']].append(instance['InstanceId'])\n\n return result", "def snapshot_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"snapshot_arns\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If tag with key='KEEP' exists, return its value (can be an empty string), else it's 'notag'
def getKeepTag(obj): if 'KEEP' in obj.tags: return obj.tags['KEEP'] else: return "-------no-tag" # try: # tag = obj.tags['KEEP'] # except: # # Note: some with empty KEEP-tags, through web console they look the same as those untagged # return "-----" # return tag
[ "def get_keep_tag(obj):\n if 'KEEP' in obj.tags and len(obj.tags['KEEP'].strip()) != 0:\n return obj.tags['KEEP']\n else:\n return \"-------no-tag\"", "def get_tag(key: str) -> Optional[str]:\n _check_active_model_version()\n return _active_model_version.get_tag(key) # type: ignore", "def keep_tag_by_default(self, tag):\n result = self.__default_tag_map.get(tag)\n if result is not None:\n return result\n for regex in self.__exclude_tag_regexes:\n if regex.match(tag):\n self.__default_tag_map[tag] = False\n return False\n for regex in self.__include_tag_regexes:\n if regex.match(tag):\n self.__default_tag_map[tag] = True\n return True\n self.__default_tag_map[tag] = self.__default_keep_tag\n return self.__default_keep_tag", "def handleKeepFileTag(tag):\n infoString = tag.getAttribute('info')\n displayColor = tag.getAttribute('displayColor')\n\n keepString = getText(tag.childNodes)\n\n return {'keepFileString':keepString,\n 'infoString':infoString,\n 'displayColor':displayColor}", "def get_ignore_value(self, topic):\n return self._get_value('ignore', topic)", "def _get_tag(self, key):\n # type: (_TagNameType) -> Optional[Text]\n return self._dd_span.get_tag(key)", "def salvage_tag_data(tag_text):\n data = process_start_tag(tag_text)\n tag = data[0]\n attributes = data[1]\n # Jloggraph applet data\n if tag == \"param\" and \"name\" in attributes:\n if attributes[\"name\"] == \"table\" and \"value\" in attributes:\n return attributes[\"value\"]\n # Spacegroup\n if tag_is_spacegroup(tag_text):\n return tag_text\n\n # Return an empty string by default\n return \"\"", "def extract_strict(container, tag):\n if tag in container:\n return container.get(tag)\n else:\n raise Exception(\"{} is not in the container\".format(tag))", "def get_tag(self, key, defaultvalue=None):\n if key not in self.tags:\n return defaultvalue\n return self.tags[key]", "def tp_key_value(str_tag):\n rgx_split = re.compile(r'[\\@\\(\\)\\{\\}]')\n str_key, str_value = '', ''\n\n # count the pieces\n lst_parts = rgx_split.split(str_tag)\n lng_parts = len(lst_parts)\n\n # and winnow the noise\n if lng_parts > 1:\n str_key = lst_parts[1]\n if lng_parts > 2:\n for str_value in lst_parts[2:]:\n if str_value != '':\n break\n\n return (str_key, str_value)", "def get_config_value(keyword):\n if g_configs and keyword in g_configs:\n return g_configs[keyword]\n return \"\"", "def value_by_tag(self, tag = \"\"):\n if tag == \"\":\n return None\n ante = ('<' , tag, '>')\n post = ('</', tag, '>')\n anteStr = ''.join(ante)\n postStr = ''.join(post)\n index = self.string.find(anteStr) + len(anteStr)\n outdex = self.string.find(postStr)\n if index == -1 or outdex == -1:\n return None\n assert outdex >= index\n value = self.string[index:outdex]\n print('successful <{0}> tag extract: {1}'.format(tag, value))\n return value", "def tag_to_wordnet(tag):\n if (tag == 'ADJ'): return('a')\n elif (tag == 'ADV'): return('r')\n elif (tag == 'NOUN'): return('n')\n elif (tag == 'VERB'): return('v')\n else: return None", "def find_tag_value(tag):\n for elem in tags:\n if elem['key'] == tag:\n return elem['value']\n return None", "def cypher_unknownTag_keyword(self, variable_tagUnknown=\"tag_unknown\"):\n\n if not self.keyword:\n return \"\"\n return f'({variable_tagUnknown}{self.label}'+ \\\n \"{\" + f'{self.databaseInfoTag[\"properties\"][\"keyword\"]}:\\'{self.keyword}\\'' + \"})\"", "def filter_tag(tags=None):\n tagdict = defaultdict(list)\n Besarkecil = lambda f: ' '.join(re.findall('[A-Z][^A-Z]*', f))\n for obj in list(tags):\n if len(obj.split(':')) == 2:\n k, v = obj.split(':')\n # filtering key Besarkecil, lowercase\n k = str(Besarkecil(k)).lower()\n # print(k)\n if k in ['cari', 'jadwal', 'keberangkatan', 'maskapai', 'type', 'ibadah', 'jumlah hari', 'rute', 'tour']:\n res = re.findall(r\"(^[A-Z][^A-Z]+)|([^\\W\\d_]+|[\\d+]+)\", v)\n arres = []\n for resple in res:\n arres.append(filter(None, resple)[0])\n # print([e for e in resple])\n # print(' '.join(arres))\n tagdict[k].append(' '.join(arres))\n return tagdict", "def get_tag_argument() -> Optional[str]:", "def _tag_or_branch(self, library):\n if self.config.has_option(library, \"tag\"):\n return self.config.get(library, \"tag\")\n else:\n return self.config.get(library, \"branch\")", "def tagify(x):\n\t\tm = tag_match.match(x)\n\t\tif m:\n\t\t\tg = m.groups()\n\n\t\t\tword = (g[0] if g[0] is not None else \"NA\")\n\t\t\ttag = (g[1] if g[1] is not None else \"NA\")\n\t\t\treturn (word,tag)\n\t\t\t#if g[1] is None: return (g[0], \"NA\")\n\t\t\t#else: return g\n\t\telse: return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key
def isProduction(obj): return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD')
[ "def tag_key_exists(self, key):\n return key in self.map", "def hasKey(self, key):\n for t in self.getTags():\n if t.getKey().getName() == key:\n return True\n return False", "def is_object_metadata(data):\n for key in ['app_commit', 'app_release']:\n if key in data.keys():\n return True\n return False", "def has_key(self, key):\n return self._instances.has_key(KeyAsAttr(key))", "def is_virus(self):\n for key in list(self.tags['virus'].keys()):\n val = self.tags['virus'][key]\n if val:\n return True\n return False", "def _is_incex_key(self, key, value):\n key_out = ((self.included_attributes and\n (key not in self.included_attributes)) or\n (key in self.excluded_attributes))\n value_out = True\n if isinstance(value, dict):\n for change_key in value:\n if isinstance(value[change_key], dict):\n for key in value[change_key]:\n if ((self.included_attributes and\n (key in self.included_attributes)) or\n (key not in self.excluded_attributes)):\n value_out = False\n return key_out and value_out", "def is_tagged(node: Dict, key: str, value: str) -> bool:\n if tags := node.get(\"tag\"):\n if isinstance(tags, dict):\n tags = [tags]\n\n for tag in tags:\n if (tag.get(\"k\"), tag.get(\"v\")) == (key, value):\n return True\n\n return False", "def check_tag(self, nvr, tag):\n\n tags = self.session.listTags(build=nvr)\n for build_tag in tags:\n if tag == build_tag['name']:\n return True\n\n return False", "def __contains__(self, key):\r\n return key in self.get_attributes()", "def does_contain(self, key):\n print \"Iterating over components...\"\n for key in self.componentDict:\n print \"Checking keys...\"\n if key in self.componentDict:\n return True\n\n else:\n print \"No key found\"", "def is_valid_product_update_key(self, key):\n return key in self._PRODUCT_KEY_MAP", "def IsSetup (self, key: str) -> bool:\n\n\t\treturn key in self._storage", "def available_key(cls, key : int) -> bool:\r\n\r\n return key in cls.__description_structure.keys()", "def has_attribute(self, key):\n return key in self.__dict", "def _does_product_contains_given_attributes(self, product, *attrs):\n\n for attribute in list(attrs[0]):\n if not product.get(attribute):\n return False\n\n return True", "def __contains__(self, key: str) -> bool:\n\t\treturn self.hasAttribute(key)", "def available(cls, tag_id : int) -> bool:\r\n\r\n return tag_id in cls.__tag_ids", "def __contains__(self, key):\n return key in self._group._opts", "def _is_approved_tags(self) -> bool:\n non_approved_tags = set()\n try:\n approved_tags = tools.get_approved_tags()\n pack_meta_file_content = json.loads(self._read_file_content(self.pack_meta_file))\n non_approved_tags = set(pack_meta_file_content[PACK_METADATA_TAGS]) - set(approved_tags)\n if non_approved_tags:\n if self._add_error(Errors.pack_metadata_non_approved_tags(non_approved_tags), self.pack_meta_file):\n return False\n except (ValueError, TypeError):\n if self._add_error(Errors.pack_metadata_non_approved_tags(non_approved_tags), self.pack_meta_file):\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write volumes to file
def generateInfoVolumes(regions): print "\nWriting volumes info to output file %s" % volumes_data_output_file with open(volumes_data_output_file, 'w') as f1: f1.write("VOLUMES\n") f1.write( "Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n") for r in regions: volumes = getVolumes(r) print "." # give some feedback to the user for v in volumes: f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size, v.create_time, v.region.name, v.zone, v.snapshot_id))
[ "def write_volume(self):\n volfile = open(self.basedir + \"/volume\", 'w')\n volume = self.get_volume()\n volfile.write(volume + \"\\n\")\n volfile.close()", "def write_inventory_file(inventory_item):\n try:\n with open('inventory', 'w') as file:\n file.write(inventory_item)\n except OSError:\n pass", "def vf_write(self, vf_tmp):\n if type(vf_tmp) is list:\n vf_tmp = self.eol.join(vf_tmp)\n\n with open(self.vagrantfile, 'w', newline=self.eol) as f:\n f.write(vf_tmp)\n\n self.validate()", "def write(self):\n for abs_path, metadata in self._abs_path2metadata.items():\n say(f'writing {abs_path} back to disk')\n metadata.adapter.write(abs_path, metadata.file_obj)", "def write(self, filename: Union[str, Path]):\n ret_code = lib.vec_fst_write_file(self.ptr, str(filename).encode(\"utf-8\"))\n err_msg = f\"Write failed. file: {filename}\"\n check_ffi_error(ret_code, err_msg)", "def write(self, device=None, char=0, bs=None, count=None):\n volume = self.get_volume(device)\n block_size = bs or BLOCK_SIZE\n\n # Calculate the number of blocks that are in the volume\n count = count or (volume['size'] / block_size)\n\n data = \"\".join([chr(int(char)) for i in range(0, block_size)])\n\n print(\"Writing: '%c'\" % data[0])\n total = 0\n with directio.open(volume['path'], buffered=block_size) as file:\n for i in range(0, count):\n self.dot()\n total += file.write(data)\n print(\"\\nWrote: \", total)\n return 0", "def write(self, filename):\n pass", "def test_write_volume(self):\n pcxml.write_volume(\n new_values=EXPECTED_VOLUME_WRITE,\n tree=self.tree,\n path=\"cell_definitions/cell_definition[@name='default']/phenotype/volume\",\n )\n self.tree.write(WRITE_PATH)\n\n new_tree = ElementTree.parse(WRITE_PATH)\n volume_data = pcxml.parse_volume(\n tree=new_tree,\n path=\"cell_definitions/cell_definition[@name='default']/phenotype/volume\",\n )\n self.assertEqual(EXPECTED_VOLUME_WRITE, volume_data)", "def write(self, f):\n f = Path(f)\n if f.suffix != \".pvd\":\n f = f.with_suffix(\".pvd\")\n\n with f.open(\"w\") as foo:\n foo.writelines(self.__data)\n foo.write(\"</Collection>\\n\")\n foo.write(\"</VTKFile>\")", "def add_writable_file_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt, # type: Optional[Text]\n tmpdir_prefix # type: Text\n ):\n if self.inplace_update:\n self._add_volume_binding(volume.resolved, volume.target, writable=True)\n else:\n if host_outdir_tgt:\n # shortcut, just copy to the output directory\n # which is already going to be mounted\n log.debug('shutil.copy({}, {})'.format(volume.resolved, host_outdir_tgt))\n shutil.copy(volume.resolved, host_outdir_tgt)\n else:\n log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))\n tmpdir = tempfile.mkdtemp(dir=self.tmpdir)\n file_copy = os.path.join(\n tmpdir, os.path.basename(volume.resolved))\n log.debug('shutil.copy({}, {})'.format(volume.resolved, file_copy))\n shutil.copy(volume.resolved, file_copy)\n self._add_volume_binding(file_copy, volume.target, writable=True)\n ensure_writable(host_outdir_tgt or file_copy)", "def volumes(self, volumes):\n self._volumes = volumes", "def write_buf_to_disk(self, filename):\n path = './blk_data/{name}.txt'.format(name=filename)\n with open(path, 'w') as f:\n for i in range(len(self.data)):\n f.write(str(self.data[i]))\n\n # Record the IO changes\n self.num_IO += 8\n self.free_buffer()", "def write_to_file(self, filename: str) -> None:", "def write_partition(self):\n \n# np_pv = np.array(self.part_vert,dtype=np.int32);\n# fn = 'parts.lbm';\n# np_pv.astype('int32').tofile(fn)\n parts = open('parts.lbm','w')\n for p in self.part_vert:\n parts.write('%d \\n'% p)\n\n parts.close()", "def _write(self, *args, **kwargs):\n raise NotImplementedError('Writing OUTCAR files is not supported.')", "def write(self, filename, **kwargs):\n filename = str(make_path(filename))\n self.hdu_list.writeto(filename, **kwargs)", "def save_vcard(out, vcard):\n\n if os.path.exists(out):\n print(\"Append to file %s\" % out)\n else:\n print(\"Create file: %s\" % out)\n\n with codecs.open(out, \"a\", \"utf-8\") as fobj:\n fobj.write(vcard)\n fobj.write(\"\\n\")", "def add_volume_info(self, vi):\n vol_num = vi.volume_number\n self.volume_info_dict[vol_num] = vi\n if self.fh:\n self.fh.write(vi.to_string() + \"\\n\")", "def clientapp_write_file(blocks_info): \n file_name = blocks_info['file_name']\n block_size = blocks_info['block_size']\n blocks_to_dns = blocks_info['data_blocks']\n # Sort the blocks \n block_l = []\n for block_id in blocks_to_dns.keys():\n block_l.append( int( block_id ))\n block_l.sort()\n # Read the file in chunks and send to datanodes\n read_blocks_and_send_to_dns( file_name, block_size, block_l , blocks_to_dns )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the application directory.
def get_appdir(): return APP_PATH
[ "def app_dir(self):\n return os.path.dirname(self.config_file)", "def app_dir(self):\n return self._app_dir", "def get_global_app_dir(self):\n globalapps = self.choose_path(Installer.global_app_dirs)\n if globalapps:\n return globalapps\n self.report_error('Unable to determine global app directory!')", "def app_folder(self):\n if getattr(self, \"_app_folder\", None):\n return self._app_folder\n return os.path.expanduser(\"~/Applications/.appify\")", "def get_app_path():\n return config.get(\"app\", \"path\")", "def application_path():\n\n if frozen_executable():\n return os.path.dirname(sys.executable)\n\n return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))", "def get_app_dir(app_dir=None):\n app_dir = app_dir or os.environ.get('JUPYTERLAB_DIR')\n app_dir = app_dir or pjoin(ENV_JUPYTER_PATH[0], 'lab')\n return os.path.realpath(app_dir)", "def get_app_dir():\n # Default to the override environment variable.\n if os.environ.get(\"JUPYTERLAB_DIR\"):\n # We must resolve the path to get the canonical case of the path for\n # case-sensitive systems\n return str(Path(os.environ[\"JUPYTERLAB_DIR\"]).resolve())\n\n # Use the default locations for data_files.\n app_dir = pjoin(sys.prefix, \"share\", \"jupyter\", \"lab\")\n\n # Check for a user level install.\n # Ensure that USER_BASE is defined\n if hasattr(site, \"getuserbase\"):\n site.getuserbase()\n userbase = getattr(site, \"USER_BASE\", None)\n if HERE.startswith(userbase) and not app_dir.startswith(userbase):\n app_dir = pjoin(userbase, \"share\", \"jupyter\", \"lab\")\n\n # Check for a system install in '/usr/local/share'.\n elif (\n sys.prefix.startswith(\"/usr\")\n and not osp.exists(app_dir)\n and osp.exists(\"/usr/local/share/jupyter/lab\")\n ):\n app_dir = \"/usr/local/share/jupyter/lab\"\n\n # We must resolve the path to get the canonical case of the path for\n # case-sensitive systems\n return str(Path(app_dir).resolve())", "def _app_folder():\n directory = os.path.expanduser('~/.LostPhotosFound')\n if not os.path.isdir(directory):\n os.makedirs(directory, 0o700)\n return directory", "def app_path(self):\n return self._app_path", "def get_base_dir(self):\n\n if self.config['paths'].get('install_base', None):\n return self.parse_user_path(self.config['paths']['install_base'])\n\n if self.use_global:\n return self.get_global_app_dir()\n\n # Default local install.\n return self.get_home_app_dir()", "def get_app_dirs():\n return appdirs.AppDirs(\"BioSimulatorsUtils\", \"BioSimulatorsTeam\")", "def af_directory():\n return os.path.dirname(os.path.abspath(__file__))", "def get_root_directory() -> str:\n return \"{}/../\".format(get_cur_directory(__file__))", "def API_DEFAULTDIR() -> str:\n return user_cache_dir(appname=__name__.split('.')[0])", "def AppPath(self):\n\t\treturn self.acad.Path", "def path_to_program_dir(self):\n\tpath = sys.argv[0]\n\n\tif not os.path.isdir(path):\n\t path = os.path.dirname(path)\n\n\tif not path: return '.'\n\n\treturn path", "def getConfDir():\n\t\n\tmodulePath=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n\tpath=modulePath+\"/configurations\"+\"/\"\n\treturn path", "def root_directory(self):\n return self._context_root_directory" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the TSV file corresponding to the current annotation level.
def tsv_name(): if PAR['level'] == 1: return 'col.tsv' else: return 'myc.tsv'
[ "def _get_traj_file(self):\n return self._traj_file", "def saveTSV(self, filename=None, **kargs):\n self.saveCSV(filename, tsv=True, **kargs)", "def open_tsv(tsv_name):\n\t\n\ttsv_file = pd.read_csv(tsv_name, sep = '\\t')\n\n\treturn tsv_file", "def to_tsv_files(self, enclosing_folder):\n # Save raw eeg\n raw_eeg_filename = os.path.join(enclosing_folder, EegData.RAW_EEG_TSV)\n raw_eeg = open(raw_eeg_filename, 'w')\n self.print_raw_eeg_tsv(raw_eeg)\n raw_eeg.close()\n # Save power levels\n levels_filename = os.path.join(enclosing_folder,\n EegData.POWER_LEVELS_TSV)\n levels = open(levels_filename, 'w')\n self.print_power_levels_tsv(levels)\n levels.close()", "def write_basic_kaggle_file(tsvfile, outfname):\n f = open(outfname, 'w')\n f.write(\"ID,LABEL\\n\")\n i = 0\n with open(tsvfile, 'r') as tf:\n for line in tf:\n (label,review) = line.strip().split(\"\\t\")\n i += 1\n f.write(str(i))\n f.write(\",\")\n f.write(\"POSITIVE\")\n f.write(\"\\n\")\n f.close()", "def save(self, path):\n tsv_copy = subset_tsv_cols(self)\n ts = str(time.time()).replace(\".\",\"\")\n filename = self.value.replace(\" \", \"_\")+'_'+ts+'.tsv'\n f = open(path+'/'+filename, 'w')\n f.write('\\t'.join(tsv_copy.colnames())+'\\n')\n for idx in range(tsv_copy.nrow()):\n f.write('\\t'.join(str(x) for x in tsv_copy.row(idx))+'\\n')\n return filename", "def path_tsv_sheet_germline():\n return os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"data\", \"example_germline_variants.tsv\"\n )", "def create_tsv(self):\n self.update_progress('Converting data source: ' + self.get_local_bed_path())\n temp_path = self.get_local_tsv_path()\n with open(self.get_local_bed_path(), 'r') as bedfile:\n with open(temp_path, 'w') as outfile:\n self.convert_tsv_data(bedfile, outfile)\n while True:\n line = bedfile.readline()\n if line:\n parts = line.split('\\t')\n included_values = parts[:4]\n (chorm, start_range, end_range, value) = included_values\n result_values = [chorm, start_range, end_range, value, self.name, \"[{},{}]\".format(start_range, end_range)]\n outfile.write('\\t'.join(result_values) + '\\n')\n else:\n break", "def _current_vlog_fn(level):\n return getattr(vlog.Vlog, _LOG_MAPPING[level].__name__)", "def _get_tsv_version(tsv_file_name: str) -> str:\n\n version_pattern = re.compile(r\"_v(\\d+)_\")\n if match := re.search(version_pattern, tsv_file_name):\n return match.group(1)\n return \"000\"", "def get_time_trace_file(root_dir, exp_name, plane_num):\n exp_dir = os.path.join(root_dir, exp_name)\n time_trace_dir = os.path.join(exp_dir, 'time_trace')\n plane_string = 'plane{0:02d}'.format(plane_num+1)\n plane_dir = os.path.join(time_trace_dir, plane_string)\n trace_file = os.path.join(plane_dir, 'timetrace_roi.mat')\n return trace_file", "def gencode_gtf(self):\n return op.join(self.root_dir, \"gencode.annotation.gtf\")", "def to_tsv(path):\n new_path = path.replace('txt', 'tsv')\n if not os.path.exists(new_path):\n with open(path) as data_file, open(new_path, 'w') as tsv_file:\n tsv_file.write(data_file.read().replace('|||', '\\t'))\n return new_path", "def tsv_header(self):\n return self.tsv_lines[0]", "def get_mode_file() -> Path:\n return ARENA_ROOT / 'robot_mode.txt'", "def path_tsv_sheet_cancer():\n return os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"data\", \"example_cancer_matched.tsv\"\n )", "def fileNameByLevel(level):\n fname = \"{}-{}.log\".format(logConfig[\"logFilePrefix\"], level)\n return os.path.join(logConfig[\"logFolder\"], fname)", "def annotation_filename(self):\n return self._annotation_filename", "def get_activity_annotation_of_file(self, annotations, ifile):\n return annotations['RELEASE'][0][0][4][ifile][0][0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Indicate whether the current level is level 1 (colonization).
def colonization(): return get('level') == 1
[ "def is_single_level(self):\n return self.fragments_tree.height <= 2", "def is_top_level(self) -> bool:\n return self._indent == ''", "def is_mainline(self) -> bool:\n node = self\n\n while node.parent:\n parent = node.parent\n\n if not parent.variations or parent.variations[0] != node:\n return False\n\n node = parent\n\n return True", "def top_left_dot(self) -> bool:\n return bool(self._getindicator(1))", "def unnecessary_colon(self):\n if self.line.endswith(':'):\n return True", "def first_level_text_is_displayed(self):\n first_level_text = self.driver.find_element_by_name(self.FIRST_LEVEL_TEXT_NAME)\n return first_level_text.is_displayed()", "def is_leaf(self):\n return len(self.links) == 1", "def is_toplevel(self):\n return self.srcnode == self.path", "def is_multi_level(self):\n return self.is_flag_set(StatefulParser.FLAGS.MULTI_LEVEL)", "def is_1DNN(self):\n if not self.dims == 1:\n return False\n if not set(self.__m__.keys()) <= set(((0,),(1,),(-1,))):\n return False\n\n return True", "def is_toplevel(self):\n return self.srcnode == self.path", "def isSetInitialLevel(self):\n return _libsbml.QualitativeSpecies_isSetInitialLevel(self)", "def is_colonne_empty(damier, case):\n for i in range(0, damier.get_taille()):\n case_tmp = Coordonnées(i, case.colonne)\n if damier.get_case(case_tmp) == 1 \\\n and not (case_tmp.ligne == case.ligne):\n return False\n return True", "def is_single_path(self) -> bool:\n if not self.children:\n return_value = True\n elif len(self.children) > 1:\n return_value = False\n else:\n child = self.children[0] # guaranteed to have exactly 1 by if and elif clauses above\n return_value = child.is_single_path()\n\n return return_value", "def IsRoot(self):\n\t\treturn self.root_prefix == \"/\"", "def is_simple(self):\n return self._hypergraph.is_simple()", "def path_is_base(self, path):\n\n return path is not None and len(path) == len(self.levels)", "def isRoot(self):\n\t\tif self.parent == None:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0", "def is_one(self):\n return self == self.parent().one()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Indicate whether the current level is level 2 (AM fungal structures).
def intra_struct(): return get('level') == 2
[ "def support_level2_data(self) -> bool:\n return self._support_level2_data", "def is_subgroup(self, right):\n if right.level() == 1:\n return True\n if is_Gamma0(right):\n return self.level() % right.level() == 0\n if is_Gamma1(right):\n if right.level() >= 3:\n return False\n elif right.level() == 2:\n return self.level() == 2\n # case level 1 dealt with above\n else:\n return GammaH_class.is_subgroup(self, right)", "def is_single_level(self):\n return self.fragments_tree.height <= 2", "def is_multi_level(self):\n return self.is_flag_set(StatefulParser.FLAGS.MULTI_LEVEL)", "def is_2d(self) -> bool:\n return self.layers == 1 and self.times == 1", "def EnableLayer2(self):\n\t\treturn self._get_attribute('enableLayer2')", "def has_2D(self):\n\t\tif self.have_fastas is False:\n\t\t\tself._extract_fastas_from_fast5()\n\t\t\tself.have_fastas = True\n\n\t\tif self.fastas.get('twodirections') is not None:\n\t\t\treturn True\n\t\treturn False", "def is_py2():\n if sys.version_info.major == 2:\n return True\n else:\n return False", "def out2(self) -> bool:\n return self.__device.query(\":OUTP2:STATE?\") == \"ON\"", "def is_cfv2() -> bool:\n\n return True", "def pFlopIsTwoConnected(self):\n\t\tif self._pFlopIsTwoConnected is None:\n\t\t\tself._pFlopIsTwoConnected = prob(6, 51)*(1-prob(6, 50) )*4\n\t\treturn self._pFlopIsTwoConnected", "def cat_l2_supported():\n return common.CAT_L2_CAP in SYSTEM_CAPS", "def isTwoOfAKind(self):\n if self.card1[0] == self.card2[0]:\n return True\n cards = self.tableCards('number')\n for card in cards.keys():\n if card == int(self.card1[0]) or card == int(self.card2[0]):\n return True\n return False", "def colonization():\n\n return get('level') == 1", "def has_action2(self, feature):\n return feature in self._action2", "def is_if_active(level: int):\n return level >= len(if_stack) or if_stack[-1-level][0]", "def second_weapon_set(self) -> bool:\n return self.xml.find(\"Items\").get(\"useSecondWeaponSet\") == \"true\"", "def isHigherLevelPairPossible(self, (side1, side2)):\n if side1.startswith(side1FeaPrefix):\n side1Group = side1\n side1Glyph = None\n else:\n side1Group = self.flatSide1Groups.get(side1)\n side1Glyph = side1\n if side2.startswith(side2FeaPrefix):\n side2Group = side2\n side2Glyph = None\n else:\n side2Group = self.flatSide2Groups.get(side2)\n side2Glyph = side2\n\n havePotentialHigherLevelPair = False\n if side1.startswith(side1FeaPrefix) and side2.startswith(side2FeaPrefix):\n pass\n elif side1.startswith(side1FeaPrefix):\n if side2Group is not None:\n if (side1, side2) in self.pairs:\n havePotentialHigherLevelPair = True\n elif side2.startswith(side2FeaPrefix):\n if side1Group is not None:\n if (side1, side2) in self.pairs:\n havePotentialHigherLevelPair = True\n else:\n if side1Group is not None and side2Group is not None:\n if (side1Glyph, side2Glyph) in self.pairs:\n havePotentialHigherLevelPair = True\n elif (side1Group, side2Glyph) in self.pairs:\n havePotentialHigherLevelPair = True\n elif (side1Glyph, side2Group) in self.pairs:\n havePotentialHigherLevelPair = True\n elif side1Group is not None:\n if (side1Glyph, side2Glyph) in self.pairs:\n havePotentialHigherLevelPair = True\n elif side2Group is not None:\n if (side1Glyph, side2Glyph) in self.pairs:\n havePotentialHigherLevelPair = True\n return havePotentialHigherLevelPair", "def is_python2():\n\n return sys.version_info.major == 2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds AMFinder commandline parser.
def build_arg_parser(): main = ArgumentParser(description='AMFinder command-line arguments.', allow_abbrev=False, formatter_class=RawTextHelpFormatter) subparsers = main.add_subparsers(dest='run_mode', required=True, help='action to be performed.') _ = training_subparser(subparsers) _ = prediction_subparser(subparsers) _ = diagnostic_subparser(subparsers) return main
[ "def build_parser(self, parser: ArgumentParser) -> None:", "def build_parser(self):\n parser = argparse.ArgumentParser(\n description=\"Run Crystal Matching algorithm attempting to translate co-ordinates \"\n \"on an input image to the coordinate-space of an output image while \"\n \"accounting for possible movement of crystals in the sample.\")\n\n if sys.version_info[0] < 3:\n parser.add_argument('Formulatrix_image',\n metavar=\"Formulatrix_image_path\",\n type=file,\n help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '\n 'this image.')\n else:\n parser.add_argument('Formulatrix_image',\n metavar=\"Formulatrix_image_path\",\n type=argparse.FileType('r'),\n help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '\n 'this image.')\n parser.add_argument('beamline_stack_path',\n metavar=\"beamline_stack_path\",\n help=\"A path pointing at a directory which stores images to be stacked or a path to a stacked image.\")\n parser.add_argument('selected_points',\n metavar=\"x,y\",\n nargs='*',\n help=\"Comma-separated co-ordinates of selected points to be translated from the marked image \"\n \"to the target image.\")\n parser.add_argument('-o','--output',\n metavar=\"focused_image_path\",\n help=\"Specify directory for the stacked image. \"\n \"A file called 'processed.tif' will be created in the directory.\"\n \"'processed.tif' will be created in log directory if this is not set.\")\n parser.add_argument('--config',\n metavar=\"path\",\n action=ReadableConfigDir,\n default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME),\n help=\"Sets the configuration directory.\")\n parser.add_argument('--scale',\n metavar=\"scale\",\n help=\"The scale between the Formulatrix and beamline image given as the resolution of each \"\n \"image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value \"\n \"must be specified for each image using the format \"\n \"'[Formulatrix_image_resolution]:[beamline_image_resolution]'.\")\n parser.add_argument('-j', '--job',\n metavar=\"job_id\",\n help=\"Specify a job_id - this will be reported in the output to help identify this run.\")\n parser.add_argument('--to_json',\n action='store_true',\n help=\"Output a JSON object.\")\n parser.add_argument('--version',\n action='version',\n version=VersionHandler.version_string())\n parser.add_argument('--log',\n metavar=\"path\",\n help=\"Write log files to the directory specified by path.\")\n self.parser = parser", "def build_parser():\r\n parser = ArgumentParser()\r\n \r\n parser.add_argument('algorithm_name', type=str,\r\n help='Name of algorithm to look up input file.', metavar='D')\r\n \r\n return parser", "def build_argparser(self):\n firstletters = ''\n for name, (categ, rest) in self.data.items():\n firstletters += name[0]\n\n self.argparser = argparse.ArgumentParser(\n usage='m3 x {} [arguments]'.format(self.name))\n\n for name, (categ, rest) in self.data.items():\n argargs = {}\n if rest.get('help'):\n argargs['help'] = rest['help']\n if rest.get('type') == 'flag':\n argargs['action'] = 'store_true'\n argargs['required'] = False\n elif 'default' not in rest:\n argargs['required'] = True\n if firstletters.count(name[0]) == 1:\n self.argparser.add_argument('-' + name[0],\n '--' + name, **argargs) # noqa: T484\n else:\n self.argparser.add_argument('--' + name, **argargs) # noqa:T484", "def build_parser(self):\n parser = argparse.ArgumentParser(self.name)\n\n subparsers = parser.add_subparsers(dest='run_type', help='sub-command help')\n\n parser_training = subparsers.add_parser('training', help='Training help')\n parser_training.add_argument('csv_paths', help='List of csv paths for training e.g. straights and corners', type=string_list)\n parser_training.add_argument('output_dir', help='For outputting the weights and logs of the training')\n parser_training.add_argument('--model_path', help='Path of the model to keep training', type=str, default=None)\n parser_training.add_argument('--crop_to', help='Area of the image to crop', type=int, default=0)\n parser_training.add_argument('--split_test_size', help='Size of training-test split', default=TRAINING_TEST_SPLIT)\n parser_training.add_argument('--batch_size', help='Batch size', type=int, default=BATCH_SIZE)\n parser_training.add_argument('--steps_per_epoch', help='Samples per epoch', type=int, default=STEPS_PER_EPOCH) # number of iterations?\n parser_training.add_argument('--epochs', help='Number of epochs', type=int, default=EPOCHS)\n parser_training.add_argument('--learning_rate', help='Learning rate', default=LEARNING_RATE, type=float)\n\n parser_testing = subparsers.add_parser('testing', help='Testing help')\n parser_testing.add_argument('model_path', help='Path of trained model')\n parser_testing.add_argument('groundtruth_path', help='Groundtruth angles in a CSV file')\n parser_testing.add_argument('output_dir', help='Directory to save video/image prediction')\n parser_testing.add_argument('--video_path', help='Path of video to test')\n parser_testing.add_argument('--image_path', help='Path of the image to test')\n parser_testing.add_argument('--crop_to', help='Area of the image to crop', type=int, default=0)\n\n parser_evaluate = subparsers.add_parser('evaluate', help='Evaluate help')\n parser_evaluate.add_argument('model_path', help='Path of trained model')\n parser_evaluate.add_argument('csv_paths', help='List of csv paths for training e.g. straights and corners', type=string_list)\n parser_evaluate.add_argument('output_dir', help='Directory to save results to')\n parser_evaluate.add_argument('--crop_to', help='Aread of the iamge to crop', type=int, default=0)\n parser_evaluate.add_argument('--split_test_size', help='Size of training-test split', default=TRAINING_TEST_SPLIT)\n\n parser_debug = subparsers.add_parser('debug', help='Debug help')\n parser_debug.add_argument('csv_paths', help='List of csv paths e.g. straights and corners', type=string_list)\n parser_debug.add_argument('output_dir', help='For outputting debug graphs and images')\n parser_debug.add_argument('--image_path', help='Image to show neural network debug information on', type=str)\n parser_debug.add_argument('--crop_to', help='Area of the image to crop', type=int, default=0)\n parser_debug.add_argument('--model_path', help='Path of trained model')\n parser_debug.add_argument('--split_test_size', help='Size of training-test split', default=TRAINING_TEST_SPLIT)\n\n return parser", "def create_parser():\n\n # Create the initial parser\n parser = argparse.ArgumentParser(\n description=\"Build project files. Copyright by Rebecca Ann Heineman. \"\n \"Builds *.sln, *.mcp, *.cbp, *.wmk, *.rezscript, *.slicerscript, \"\n \"doxyfile, makefile and *.xcodeproj files\")\n\n parser.add_argument(\"--version\", action=\"version\",\n version=\"%(prog)s \" + __version__)\n\n parser.add_argument(\"-r\", \"-all\", dest=\"recursive\", action=\"store_true\",\n default=False, help=\"Perform a recursive build\")\n\n parser.add_argument(\"-v\", \"-verbose\", dest=\"verbose\", action=\"store_true\",\n default=False, help=\"Verbose output.\")\n\n parser.add_argument(\"-n\", \"-preview\", dest=\"preview\", action=\"store_true\",\n default=False, help=\"Preview build commands.\")\n\n parser.add_argument(\"--generate-rules\", dest=\"generate_build_rules\",\n action=\"store_true\", default=False,\n help=\"Generate a sample configuration file and exit.\")\n\n parser.add_argument(\n \"--rules-file\",\n dest=\"rules_file\",\n metavar=\"<file>\",\n default=BUILD_RULES_PY,\n help=\"Specify a configuration file.\")\n\n parser.add_argument(\"-q\", dest=\"fatal\", action=\"store_true\",\n default=False, help=\"Quit immediately on any error.\")\n\n parser.add_argument(\"-f\", dest=\"files\", action=\"append\",\n metavar=\"<filename>\",\n help=\"Project file to process.\")\n\n parser.add_argument(\"-d\", dest=\"directories\", action=\"append\",\n metavar=\"<directory>\",\n help=\"Directory to process.\")\n\n parser.add_argument(\"-c\", dest=\"configurations\", action=\"append\",\n metavar=\"<configuration>\",\n help=\"Configuration to process.\")\n\n parser.add_argument(\"-docs\", dest=\"documentation\", action=\"store_true\",\n default=False, help=\"Compile Doxyfile files.\")\n\n parser.add_argument(\"args\", nargs=argparse.REMAINDER,\n help=\"project filenames\")\n\n return parser", "def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')", "def generate_main_parser() -> ArgumentParser:\n # Create parser\n parser = ArgumentParser(\n description=\"Command line interface tool for iic2343.\",\n )\n\n # Add version flag\n parser.add_argument(\n \"-v\",\n \"--version\",\n action=\"version\",\n version=f\"iic2343 version {iic2343.__version__}\",\n )\n\n # Create subparsers\n subparsers = parser.add_subparsers(help=\"Action to be executed.\")\n\n # Serial ports subparser\n generate_serial_ports_subparser(subparsers)\n\n return parser", "def parser(cls, *args, **kwargs):\n\n parser = ArgumentParser(*args, **kwargs)\n parser.add_argument('-a', \"--address\",\n help=\"Force entry point address\", default=None)\n parser.add_argument('-b', \"--dumpblocs\", action=\"store_true\",\n help=\"Log disasm blocks\")\n parser.add_argument('-z', \"--singlestep\", action=\"store_true\",\n help=\"Log single step\")\n parser.add_argument('-d', \"--debugging\", action=\"store_true\",\n help=\"Debug shell\")\n parser.add_argument('-g', \"--gdbserver\", type=int,\n help=\"Listen on port @port\")\n parser.add_argument(\"-j\", \"--jitter\",\n help=\"Jitter engine. Possible values are: gcc (default), tcc, llvm, python\",\n default=\"gcc\")\n parser.add_argument(\n '-q', \"--quiet-function-calls\", action=\"store_true\",\n help=\"Don't log function calls\")\n parser.add_argument('-i', \"--dependencies\", action=\"store_true\",\n help=\"Load PE and its dependencies\")\n\n for base_cls in cls._classes_():\n base_cls.update_parser(parser)\n return parser", "def create_parser():\n parser = argparse.ArgumentParser(description='Watching for files containing magictext')\n parser.add_argument('--ext', help='File extensions to filter on, default=.txt', default='.txt')\n parser.add_argument('--poll', help=\"Polling interval in seconds, default=1.0\", type=float, default=1.0)\n parser.add_argument('directory', help='Directory to watch.')\n parser.add_argument('magictext', help='Text to search for within matching files.')\n return parser", "def build_cli(self):\n parser = argparse.ArgumentParser(\"xsgen\",\n conflict_handler='resolve', argument_default=NotSpecified)\n for plugin in self.plugins:\n plugin.update_argparser(parser)\n self.parser = parser\n return parser", "def setup_parser(cls, option_group, args, mkflag):", "def create_parser():\n parser = argparse.ArgumentParser()\n # parser.add_argument(\n # '-s', '--scrpfrom', help='url you will be scraping')\n parser.add_argument('url', help='url')\n\n return parser", "def initCmdLineParser():\n\n # Init parser and all general flags\n logging.debug(\"initiating command line option parser\")\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n parser.add_option(\"--gen-answer-file\", help=\"Generate a template of an answer file, using this option excludes all other option\")\n parser.add_option(\"--answer-file\", help=\"Runs the configuration in none-interactive mode, extracting all information from the \\\n configuration file. using this option excludes all other option\")\n parser.add_option(\"--no-mem-check\", help=\"Disable minimum memory check\", action=\"store_true\", default=False)\n\n # For each group, create a group option\n for group in controller.getAllGroups():\n groupParser = OptionGroup(parser, group.getKey(\"DESCRIPTION\"))\n\n for param in group.getAllParams():\n cmdOption = param.getKey(\"CMD_OPTION\")\n paramUsage = param.getKey(\"USAGE\")\n optionsList = param.getKey(\"OPTION_LIST\")\n useDefault = param.getKey(\"USE_DEFAULT\")\n if not useDefault:\n if optionsList:\n groupParser.add_option(\"--%s\" % cmdOption, metavar=optionsList, help=paramUsage, choices=optionsList)\n else:\n groupParser.add_option(\"--%s\" % cmdOption, help=paramUsage)\n\n # Add group parser to main parser\n parser.add_option_group(groupParser)\n\n return parser", "def get_parser():\n\n parser = ArgumentParser()\n\n req_argument = parser.add_argument_group('required arguments')\n\n parser.add_argument(\"-o\", \"--outdir\", type=str, default='result',\n help=\"Path for results\")\n parser.add_argument(\"-fname\", \"--file_name\", type=str, default=\"try1\",\n help=\"The name the output file should have within the output directory\")\n parser.add_argument(\"-freq\", \"--frequency\", type=str,\n help=\"File to read the haplotype frequencies from\")\n parser.add_argument(\"-over\", \"--overlap\", type=str,\n help=\"File to read the peptide vs alleles or peptide vs haplotype data\")\n parser.add_argument(\"-o_a\", \"--overlap_allele\", type=int, default=0,\n help=\"1 if the --overlap file passed in is peptide vs alleles and 0 if it is peptide vs haplotypes and has already been binarized\")\n # parser.add_argument(\"-n\", \"--ntarget\", type=int, default=5,\n # help=\"The ntarget for max n-times coverage\")\n parser.add_argument(\"-maxpep\", \"--max_number_of_pepts\", type=int, default=30,\n help=\"The maximum number of peptides allowed in a vaccine\")\n parser.add_argument(\"-c\", \"--cut\", type=int, default=3,\n help=\"The cut value for ommitting peptides that are too similar; a value of 0 should be provided if similar peptides are not to be excluded from a vaccine design.\")\n\n\n \n return parser", "def setParser():\n parser = argparse.ArgumentParser(\n prog=\"Nussinov Algorithm Solver\",\n description=\"A program that runs Nussinov's Algorithm on a given RNA strand and returns the most viable pairings.\"\n )\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"-f\", \"--filepath\", help=\"the path to a text file with a sequence\")\n group.add_argument(\"-s\", \"--sequence\", help=\"the RNA sequence to evaluate\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"More verbose output\")\n parser.add_argument(\"-u\", \"--uncommon\", action=\"store_true\", help=\"Use Uncommon RNA matches (G,U)\")\n return parser", "def build_parser():\n parser = argparse.ArgumentParser(description='The classic FizzBuzz game in programmatic form.', add_help=False)\n parser.add_argument('-h', '--help', default=argparse.SUPPRESS, action='help',\n help='Show this help message and exit.')\n parser.add_argument('-s', '--start', default=1, type=int, action='store', metavar='START',\n help='The number to start FizzBuzzing at (inclusive).')\n parser.add_argument('stop', type=int, action='store', metavar='STOP',\n help='The number to end FizzBuzzing at (exclusive).')\n return parser", "def create_arguments_parser():\n description = \"Statically analyse SBML files for modelling errors\"\n parent_arg_parser = rate_checker_sbml.create_arguments_parser()\n parser = argparse.ArgumentParser(description=description,\n parents=[parent_arg_parser])\n return parser", "def initialize_parser():\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\"-root\", \"--root\", help=r\"Root destination -> C:\\...\\\")\r\n return parser.parse_args()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns absolute paths to input files.
def abspath(files): files = sum([glob.glob(x) for x in files], []) return [os.path.abspath(x) for x in files]
[ "def input_files(self) -> List[str]:\n raise NotImplementedError # P0", "def input_file_paths(base_path):\n\tpaths = []\n\tfor dirpath, dirnames, filenames in os.walk(base_path):\n\t\tfor onefile in filenames:\n\t\t\t# the following if statement is due to OS X .DsStore bullshit...\n\t\t\tif not (onefile.startswith('.DS') or onefile.endswith('.log')):\n\t\t\t\t#paths.append(dirpath+\"/\"+onefile) \n\t\t\t\tpaths.append(os.path.join(os.getcwd(), dirpath, onefile))\n\treturn paths", "def get_input_filepaths(dirpath):\n return [os.path.join(dirpath, filename) for filename in\n os.listdir(dirpath) if '.xml' in filename]", "def get_file_list(input_dir):\n\tfile_paths = [input_dir +'/' + f for f in listdir(input_dir) if isfile(join(input_dir, f)) ]\n\treturn file_paths", "def sources_absolute_paths(self):\r\n abs_target_base = os.path.join(get_buildroot(), self.target_base)\r\n for src in self.sources:\r\n yield os.path.join(abs_target_base, src)", "def get_filenames(args, logger):\n infilenames = sorted([os.path.join(args.indirname, fname) for\n fname in os.listdir(args.indirname) if\n fname.startswith(args.prefix)])\n logger.info(\"Input files: %s\", infilenames)\n for fname in infilenames:\n if ' ' in os.path.relpath(fname):\n logger.error(\"Relative path to file or directory \" +\n \"'%s' contains whitespace (exiting)\", fname)\n return 1\n return infilenames", "def _get_file_paths(self):\n return [os.path.join(self.path, self.mode, 'waveforms', file_name + '.npy') for file_name in self.file_names]", "def input_output_paths(test_file_suffix):\n i = os.path.join(_BASELINE_DIR, 'test_query_inputs', 'input_%s' % test_file_suffix)\n o = os.path.join(_BASELINE_DIR, 'test_query_baseline', 'baseline_%s' % test_file_suffix)\n return (i, o)", "def get_input_files(self):\n return self.__input_files", "def get_file_paths():\n audio_folder = MY_DIR + '/assets'\n\n audio_addy_list = []\n for file in os.scandir(audio_folder):\n audio_addy_list.append(file.path)\n\n return audio_addy_list", "def get_files(self):\n for root, _, files in walk(self.input_root, followlinks=True):\n for filename in files: # for all files\n # Match input_regex only -- do not match read regex for input\n # files in this case, since we need to find single end\n if search(self.input_regex, filename):\n if self.extension is not None:\n if search(self.extension, filename):\n abs_path = join(root, filename)\n self.files += [abs_path]\n\n if self.verbose:\n print(abs_path, file=stderr)\n else:\n abs_path = join(root, filename)\n self.files += [abs_path]\n\n if self.verbose:\n print(abs_path, file=stderr)", "def find_input_documents(self):\n paths = []\n itr = chain(texutils.input_pattern.finditer(self.text),\n texutils.input_ifexists_pattern.finditer(self.text))\n for match in itr:\n fname = match.group(1)\n if not fname.endswith('.tex'):\n full_fname = \".\".join((fname, 'tex'))\n else:\n full_fname = fname\n paths.append(full_fname)\n return paths", "def get_paths(file_path):\n return glob(path.join(file_path, '*'))", "def input_path(self, filename):\n\n return self.filename_path_join(self.input_dir, filename)", "def get_file_paths():\n print(os.getcwd())\n\n filepath = os.getcwd() + '/event_data'\n file_path_list = glob.glob(os.path.join(filepath, '*'))\n\n return file_path_list", "def getpaths_fromfile(input_prefix_, file_handle_):\n\n input_paths = []\n\n for line in file_handle_:\n line = line.strip()\n if line != \"\":\n dirname = line\n path = os.path.join(input_prefix_, \"%s*\" % dirname)\n input_paths.append(tuple([dirname, path]))\n\n return input_paths", "def handle_files_args(*paths_args):\n paths = []\n\n for paths_arg in paths_args:\n # Handle paths implicitly rooted at user home dir\n paths_arg = os.path.expanduser(paths_arg)\n\n # Expand wildcards\n paths_arg = glob.glob(paths_arg)\n\n # Create list of pathlib.Path objects\n paths.extend([pathlib.Path(path_arg) for path_arg in paths_arg])\n\n return paths", "def __return_file_paths(self, barcode_parent):\n\n # this will be returned\n file_paths = []\n\n # iterate through each directory, and collect files that contain \"fastq_runid\" in the name\n for root, directory, files in os.walk(barcode_parent):\n for name in files:\n file_paths.append( os.path.join(root, name) ) # append file to file_paths\n return file_paths", "def file_paths(self) -> Sequence[Any]:\n return pulumi.get(self, \"file_paths\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creating a custom time entry, minimum must is hour duration and project param
def createTimeEntry(self, hourduration, description=None, projectid=None, projectname=None, taskid=None, clientname=None, year=None, month=None, day=None, hour=None, billable=False, hourdiff=-2): data = { "time_entry": {} } if not projectid: if projectname and clientname: projectid = (self.getClientProject(clientname, projectname))['data']['id'] elif projectname: projectid = (self.searchClientProject(projectname))['data']['id'] else: print('Too many missing parameters for query') exit(1) if description: data['time_entry']['description'] = description if taskid: data['time_entry']['tid'] = taskid year = datetime.now().year if not year else year month = datetime.now().month if not month else month day = datetime.now().day if not day else day hour = datetime.now().hour if not hour else hour timestruct = datetime(year, month, day, hour + hourdiff).isoformat() + '.000Z' data['time_entry']['start'] = timestruct data['time_entry']['duration'] = hourduration * 3600 data['time_entry']['pid'] = projectid data['time_entry']['created_with'] = 'NAME' data['time_entry']['billable'] = billable response = self.postRequest(Endpoints.TIME_ENTRIES, parameters=data) return self.decodeJSON(response)
[ "def _create_time_entry(duration, description, projectid, start):\n data = {\n \"time_entry\": {\n \"description\": description,\n \"start\": start.astimezone().isoformat(),\n \"duration\": duration, # duration in seconds\n \"pid\": projectid,\n \"created_with\": \"gtimelog2toggl\",\n }\n }\n response = toggl.postRequest(Endpoints.TIME_ENTRIES, parameters=data)\n return toggl.decodeJSON(response)", "def create_entry(username, task, project, date, duration, notes):\n return TimeSheets.create(username=username, project=project, task=task, date=date, duration=duration, notes=notes)", "def submit_new_timeslot(submission):\n\n\tstart = submission['start_hour'] + \":\" + submission['start_minute'] + \" \" + submission['start_AM_PM']\n\tend = submission['end_hour'] + \":\" + submission['end_minute'] + \" \" + submission['end_AM_PM']\n\n\tnew_time = Project_Times(project_id=submission['project_id'], start_time=start, end_time=end)\n\n\tdb.session.add(new_time)\n\tdb.session.commit()", "def timeField(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, changeCommand: Script=None, defineTemplate: AnyStr=\"\",\n docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None, dragCommand:\n Script=None, dropCallback: Script=None, editable: bool=True, enable: bool=True,\n enableBackground: bool=True, enableKeyboardFocus: bool=True, enterCommand:\n Script=None, exists: bool=True, fullPathName: bool=True, height: Union[int,\n bool]=0, highlightColor: Union[List[float, float, float], bool]=None, isObscured:\n bool=True, manage: bool=True, noBackground: bool=True, numberOfPopupMenus:\n bool=True, parent: Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, precision:\n Union[int, bool]=0, preventOverride: bool=True, receiveFocusCommand: Script=None,\n statusBarMessage: AnyStr=\"\", step: Union[time, bool]=None, useTemplate: AnyStr=\"\",\n value: Union[time, bool]=None, visible: bool=True, visibleChangeCommand:\n Union[Script, bool]=None, width: Union[int, bool]=0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def test_issue_add_time(self):\n pass", "def _setup_volunteer_hours(\n volunteer,\n npf_admin,\n org,\n project,\n datetime_start,\n datetime_end,\n description=\"Manually tracked time \",\n event_type=\"MN\",\n is_verified=False,\n action_type='req'\n):\n event = Event.objects.create(\n project=project,\n is_public=True,\n description=\"finished event\",\n location=\"test_location\",\n coordinator=npf_admin,\n event_type=event_type,\n datetime_start=datetime_start,\n datetime_end=datetime_end\n )\n\n volunteer_timelog = UserTimeLog.objects.create(\n user=volunteer,\n event=event,\n datetime_start=datetime_start,\n datetime_end=datetime_end,\n is_verified=is_verified\n )\n\n actiontimelog = AdminActionUserTime.objects.create(\n user=npf_admin,\n usertimelog=volunteer_timelog,\n action_type=action_type\n )\n\n return volunteer_timelog, actiontimelog, event", "def __init__(self, hrs=0, mins=0, secs=0):\n totalsecs = hrs*3600 + mins*60 + secs\n self.hours = totalsecs // 3600\n leftoversecs = totalsecs % 3600\n self.minutes = leftoversecs // 60\n self.seconds = leftoversecs % 60", "def __init__(self,\n label=None,\n validators=None,\n format='%I:%M%p', # 1:45PM\n **kwargs):\n super(TimeField, self).__init__(label, validators, **kwargs)\n self.format = format", "def __init__(self, name=\"\", time=None):\n super().__init__(\"time\", name)\n self.time = time", "def gen_build_time(**kwargs):\n ti = kwargs['ti']\n ti.xcom_push(key='date_start', value='1325347200000')\n ti.xcom_push(key='date_end', value='1325433600000')", "def create(cls, time, obsid_start, task_name, event):\n if not isinstance(time, Time):\n raise ValueError(\"time must be an astropy Time object\")\n time = floor(time.gps)\n\n return cls(time=time, obsid_start=obsid_start, task_name=task_name, event=event)", "def create_base_entry(vin=\"INVALID\", time_unix=None):\n\t\treturn LogEntry(vin=vin, app_id=\"INVALID\", time_unix=time_unix)", "def _make_time(t):\n string = f'{t:04d}'\n return datetime.strptime(string, '%H%M').time()", "def _add_time(self):\n\n self.params[self.EventParams.TIME] = int(round(time.time() * 1000))", "def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]", "def __init__(self, hrs: int = 0, mins: int = 0, secs: int = 0) -> object:\n # Calculate total seconds to represent\n totalsecs = hrs * 3600 + mins * 60 + secs\n self.hours = totalsecs // 3600 # Split in h, m, s\n leftoversecs = totalsecs % 3600\n self.minutes = leftoversecs // 60\n self.seconds = leftoversecs % 60", "def _add_time(self):\n\n self.params[self.EventParams.TIME] = int(time.time())", "def __init__(self, dt=60*60*24):\n pass", "def setSubmitTime(t):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fast query given the Client's name and Project's name
def getClientProject(self, clientName, projectName): for client in self.getClients(): if client['name'] == clientName: cid = client['id'] if not cid: print('Could not find such client name') return None for projct in self.getClientProjects(cid): if projct['name'] == projectName: pid = projct['id'] if not pid: print('Could not find such project name') return None return self.getProject(pid)
[ "def _project_search(self, cr, uid, obj, name, args, context=None):\n cr.execute(\"\"\"\n SELECT pp.id,*\n FROM (\n Select\n node.id, node.name AS short_name,\n --cast ((count(parent.name)) as int) as nivel\n replace( array_to_string( array_agg( parent.name order by parent.nivel asc), ' / ' ), '\\n', ' ') as full_name\n from account_analytic_account as node, ( SELECT vw.nivel, account_analytic_account.*\n FROM (\n Select\n node.id, node.name AS short_name,\n cast ((count(parent.name)) as int) as nivel\n --array_to_string( array_agg( distinct parent.name ), ' / ' ) as full_name\n from account_analytic_account as node,account_analytic_account as parent\n where node.parent_left between parent.parent_left and parent.parent_right\n group by node.name,node.parent_left,node.id\n order by node.parent_left\n ) vw\n inner join account_analytic_account\n ON vw.id = account_analytic_account.id) as parent\n where node.parent_left between parent.parent_left and parent.parent_right\n group by node.name,node.parent_left,node.id\n order by node.parent_left\n ) vw join project_project pp\n on pp.analytic_account_id = vw.id\n WHERE vw.full_name \"\"\" + tools.ustr( args[0][1] ) + \"\"\" '%%%s%%' \"\"\" % ( tools.ustr( args[0][2] ),) )\n datas = cr.dictfetchall()\n ids = [('id', 'in', [data['id'] for data in datas])]\n return ids", "def get_client_projects(self, client=None):\n if type(client) is Client:\n return [p for p in self.project_list if client.client_id == p.client_id]", "def client_project_list(request, client_id):\n client = Client.objects.get(id=client_id)\n projects = Project.objects.filter(client=client.harvest_id)\n return HttpResponse(dumps(projects,indent=2, ensure_ascii=False),mimetype='application/json')", "def query_project(self, project_query_options):\n\n query = \"select * from project where \"\n row_names = [\"Proj_ID\", \"Cus_ID\", \"Emp_ID\", \"Proj_Date\",\n \"Proj_Descrpt\", \"Proj_EstDateSt\", \"Proj_EstDateEnd\",\n \"Proj_EstBudget\", \"Proj_ActDateSt\",\n \"Proj_ActDateEnd\", \"Proj_ActCost\"]\n\n entries = project_query_options\n options_index = []\n arguments = []\n\n index = 0\n for item in entries:\n if item is not None:\n arguments.append(item)\n options_index.append(index)\n index += 1\n\n count = 0\n for arg in arguments:\n if count == 0:\n query = query + \"{}='{}' \".format(\n row_names[options_index[count]],\n arg)\n else:\n query = query + \"and {}='{}' \".format(\n row_names[options_index[count]],\n arg)\n count += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def projects(lancet, query):\n projects = lancet.timer.projects()\n\n if query:\n regexp = re.compile(query, flags=re.IGNORECASE)\n\n def match(project):\n match = regexp.search(project[\"name\"])\n if match is None:\n return False\n project[\"match\"] = match\n return True\n\n projects = (p for p in projects if match(p))\n\n for project in sorted(projects, key=lambda p: p[\"name\"].lower()):\n name = project[\"name\"]\n\n if \"match\" in project:\n m = project[\"match\"]\n s, e = m.start(), m.end()\n match = click.style(name[s:e], fg=\"green\")\n name = name[:s] + match + name[e:]\n\n click.echo(\n \"{:>9d} {} {}\".format(\n project[\"id\"], click.style(\"‣\", fg=\"yellow\"), name\n )\n )", "def project_by_name(self,project_name=''):\n logger.debug(f'project_by_name project_name={project_name}')\n return self.get('{}/groups/byName/{}'.format(ApiVersion.CM1.value,project_name))", "def findProject(self):\n\n # check that we actually have json\n if hasattr(cherrypy.request, 'json'):\n data = cherrypy.request.json\n else:\n data = dict()\n\n # TODO validate projectNumbers; verify projectNumbers is list of ints\n\n validNum = []\n result = []\n if 'projectNumbers' in data:\n # if not admin, find only authorized projects\n if cherrypy.session['role'] == 'admin':\n validNum = data['projectNumbers']\n else:\n for pNum in data['projectNumbers']:\n if pNum in cherrypy.session['projectNumbers']:\n validNum.append(pNum)\n\n for project in validNum:\n for res in self.colProjects.find({'projectNumber': project, 'status': 'active'}):\n res['_id'] = str(res['_id'])\n result.append(res)\n #~ return result\n else:\n if cherrypy.session['role'] != 'admin':\n validNum = cherrypy.session['projectNumbers']\n for project in validNum:\n for res in self.colProjects.find({'projectNumber': project, 'status': 'active'}):\n res['_id'] = str(res['_id'])\n result.append(res)\n #~ return result\n else: # is admin\n for res in self.colProjects.find({'status': 'active'}):\n res['_id'] = str(res['_id'])\n result.append(res)\n #~ return result\n\n for res in result:\n res = self.calculateBudget(res[\"projectNumber\"])\n return result", "def select(project_id):\n select_project(project_id)", "def get_objects_in_project(project, ccm=None, server=None, database=None, ccmpool=None, use_cache=False):\n start = time.time()\n if ccmpool:\n if ccmpool.nr_sessions == 1:\n result = get_objects_in_project_serial(project, ccm=ccmpool[0], server=server, database=database, use_cache=use_cache)\n else:\n result = get_objects_in_project_parallel(project, ccmpool=ccmpool, use_cache=use_cache)\n else:\n result = get_objects_in_project_serial(project, ccm=ccm, database=database, use_cache=use_cache)\n logger.debug(\"Time used fetching all objects and paths in %s: %d s.\",\n project, time.time() - start)\n if use_cache:\n ccm_object = ccm_cache.get_object(project)\n ccm_object.set_members(result)\n ccm_cache.force_cache_update_for_object(ccm_object)\n return result", "def gmf_project():\n \n # A constant that defines the record fields that we wish to retrieve.\n FIELDS = {\n 'Country': True,\n 'Type': True,\n 'Count': True,\n '_id': False\n }\n \n # Open a connection to MongoDB using a with statement such that the\n # connection will be closed as soon as we exit the with statement\n with MongoClient(MONGODB_URI) as conn:\n # Define which collection we wish to access\n collection = conn[DBS_NAME][COLLECTION_NAME]\n # Retrieve a result set only with the fields defined in FIELDS\n # and limit the the results to 55000\n projects = collection.find(projection=FIELDS, limit=55000)\n # Convert projects to a list in a JSON object and return the JSON data\n return json.dumps(list(projects))", "def get_projects(self):\n self.cur.execute('SELECT * FROM projects;')\n projects = [Projects(row) for row in self.cur.fetchall()]\n return projects", "def test_list_project_request(self):\n pass", "def search(self, client_id=\"\", client_name=\"\", client_cnp=\"\"):\n clients = self.get_all()\n found_clients = []\n\n # recursive search\n def recursive_search(clients, index=0):\n # base case\n if index == len(clients):\n return\n\n appended = False\n if client_id and not appended:\n if client_id == clients[index].id:\n found_clients.append(clients[index])\n appended = True\n\n if client_name and not appended:\n if client_name.lower() in clients[index].name.lower():\n found_clients.append(clients[index])\n appended = True\n\n if client_cnp and not appended:\n if client_cnp == clients[index].cnp:\n found_clients.append(clients[index])\n appended = True\n\n recursive_search(clients, index + 1)\n\n recursive_search(clients, 0)\n\n # iterative search\n # appended = False # append guard\n # for client in clients:\n # if client_id and not appended:\n # if client_id == client.id:\n # found_clients.append(client)\n # appended = True\n #\n # if client_name and not appended:\n # if client_name.lower() in client.name.lower():\n # found_clients.append(client)\n # appended = True\n #\n # if client_cnp and not appended:\n # if client_cnp == client.cnp:\n # found_clients.append(client)\n # appended = True\n #\n # appended = False\n\n return found_clients", "def find(project_id):\n return find_entity(\"projects\", project_id)", "def searchClient(self, name, surname):\n list = []\n allClients = self.repository.getAll()\n for client in allClients:\n if client.getName() == name and client.getSurname() == surname:\n list.append(client)\n if len(list) != 0:\n return list\n else:\n return None", "def list_projects():\n projects = get_client_instance().project.all()\n for idx, pro in enumerate(projects):\n print(\" {}. {}\".format(idx+1, pro.name))", "def search_project_ctf(json: Dict, project: str) -> List[Dict]:\n list_projects = list()\n for ctf in get_all_ctf(json):\n if (\"name\" in ctf and project.lower() in ctf[\"name\"].lower()) or (\n \"description\" in ctf\n and project.lower() in ctf[\"description\"].lower()\n ):\n list_projects.append(ctf)\n return list_projects", "def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)", "def get_all_project_details() -> list:\n\n list_project = db_connector.collection(Collections.PROJECTS).find(\n {}, {\"_id\": 0}).sort(\"project_name\", pymongo.ASCENDING)\n list_projects_to_be_send = []\n for project in list_project:\n list_projects_to_be_send.append(project)\n return list_projects_to_be_send" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update data for an existing client. If the name or notes parameter is not supplied, the existing data on the Toggl server will not be changed.
def updateClient(self, id, name=None, notes=None): data = {} data['client'] = {} data['client']['name'] = name data['client']['notes'] = notes response = self.postRequest(Endpoints.CLIENTS + '/{0}'.format(id), parameters=data, method='PUT') return self.decodeJSON(response)
[ "def update_client(client_name, updated_client_name): # Operacion modificar\n global clients\n\n if client_name in clients:\n index = clients.index(client_name)\n clients[index] = updated_client_name\n else:\n print(\"Client isn\\'t in the client list\")", "def client_notes(self, client_notes):\n\n self._client_notes = client_notes", "def update_client(\n body: ClientmodelClientUpdateRequest,\n client_id: str,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = UpdateClient.create(\n body=body,\n client_id=client_id,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def update_clients(self):\n pass", "def save(self):\n # Get information.\n name = self.ids['name'].text.strip()\n phone = self.ids['phone'].text.strip()\n\n # Check if a name was specified.\n if not name:\n ErrorPopup('É necessário especificar um novo nome para o cliente!').open()\n return\n\n # Update the client.\n self.client.name = name\n self.client.phone = phone\n app = App.get_running_app()\n app.session.commit()\n\n # Update information of the current screen.\n app.consult.sm.client = self.client\n app.consult.sm.current_screen.on_pre_enter()\n\n # Show that the operation succeeded.\n SuccessPopup(\"Cliente editado com sucesso!\").open()\n\n # Close the popup.\n self.dismiss()", "def update_client(self, display_name, player=PLAYER_IDENTIFIER):\n self.state.update_client(display_name, player)", "def set_edit_form(self, client_dict):\n logger.debug('Setting Client Edit Form')\n logger.debug(json.dumps(client_dict, indent=2))\n self.client_obj = api.Client(**client_dict)\n self.msgvar.set(\n '%s: %s' % (\n client_dict['id'], client_dict['name']))\n self.name.set(client_dict['name'])\n self.street1.set(client_dict['street1'])\n self.street2.set(client_dict['street2'])\n self.city.set(client_dict['city'])\n self.state.set(client_dict['state'])\n self.zip.set(client_dict['zip'])\n self.active.set(client_dict['active'])\n self.terms.set(client_dict['terms'])", "def test_update_client(self):\n pass", "def test_client_update(self):\n pass", "def test_single_async_updateClient(self):\n self.try_function(\n 'updateClient',\n 'post',\n argumentNames=['clientId', 'payload', ],\n )", "def put(self, id_cliente):\n data = request.json\n cliente = update_cliente(id_cliente, data)\n if not cliente:\n api.abort(404)\n else:\n return cliente", "def update(self, cid, data):\n return requests.put(get_api_url(cid), data=json.dumps(data), headers=self.api_headers)", "def document_update(self, client):\n obj = self.documentcloud_object(client)\n for key, value in self.documentcloud_fields.items():\n setattr(obj, key, value)\n # self.documentcloud_url = obj.canonical_url\n obj.save()", "def saveClient():\n # Extract data from the request.\n building = request.form['building'].strip().title()\n firstName = request.form['firstName'].strip().title()\n lastName = request.form['lastName'].strip().title()\n locality = request.form['locality'].strip().title()\n postCode = request.form['postCode'].strip().title()\n province = request.form['province'].strip().title()\n street = request.form['street'].strip().title()\n town = request.form['town'].strip().title()\n\n # If a client with the specified name already exists, display an error\n # message.\n if (Client.count(Client.firstName == firstName, Client.lastName == lastName) > 0):\n return showClients(error='Unable to add client because a client with the specified name already exists.')\n\n # Add a new client with the specified details.\n Client(firstName = firstName\n , lastName = lastName\n , building = building\n , street = street\n , locality = locality\n , town = town\n , province = province\n , postCode = postCode).save()\n\n return showClients()", "def upsert_client_rate():\n print(request)\n new_client_dict = request.json\n new_client_dict_keys = new_client_dict.keys()\n new_client_dict_values = new_client_dict.values()\n # We want to update if the client exist in the client_rate.json data\n for i in range(1, len(new_client_dict)+1):\n if new_client_dict_keys[i] is None or new_client_dict_values[i] is None:\n continue\n else:\n update_client_rates(new_client_dict_keys[i], new_client_dict_values[i])\n # Or insert a new client-rate pair into client_rate.json data\n # After getting post request - how to update json file?\n return request.get_json()", "def update_client(self, old_client=None, new_client=None):\n old_is_client = type(old_client) is Client\n new_is_client = type(new_client) is Client\n\n # cancel if these are no clients\n if not old_is_client and not new_is_client:\n return False\n\n # try to change the id (and its files) first\n old_index = self.get_client_index(old_client)\n id_available = self.set_client_id(\n client=old_client,\n client_id=new_client.client_id\n )\n\n # only go on, if the ID is possible\n if id_available:\n self.client_list[old_index] = new_client\n return True\n else:\n return False", "def update(self, **kwargs):\n context = self.to_dict().copy()\n data = {**context, **kwargs}\n self.client.update_strain(data)", "async def update_entity_details(cls, entity, client):\n raise NotImplemented", "def fusion_api_update_client_certificate(self, aliasname, body, api=None, headers=None):\n return self.client_certificate.put(aliasname, body, api, headers)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method will run all the episodes with epsilon greedy strategy
def run_epsilon(env, num_of_bandits, iterations, episodes): # Initialize total mean rewards array per episode by zero epsilon_rewards = np.zeros(iterations) for i in range(episodes): print(f"Running Epsilon episode:{i}") n = 1 action_count_per_bandit = np.ones(num_of_bandits) mean_reward = 0 total_rewards = np.zeros(iterations) mean_reward_per_bandit = np.zeros(num_of_bandits) env.reset() epsilon = 0.5 for j in range(iterations): a = get_epsilon_action(epsilon, env, mean_reward_per_bandit) observation, reward, done, info = env.step(a) # Update counts n += 1 action_count_per_bandit[a] += 1 # Update mean rewards mean_reward = mean_reward + ( reward - mean_reward) / n # Update mean rewards per bandit mean_reward_per_bandit[a] = mean_reward_per_bandit[a] + ( reward - mean_reward_per_bandit[a]) / action_count_per_bandit[a] # Capture mean rewards per iteration total_rewards[j] = mean_reward # Update mean episode rewards once all the iterations of the episode are done epsilon_rewards = epsilon_rewards + (total_rewards - epsilon_rewards) / (i + 1) return epsilon_rewards
[ "def run(self) -> None:\n for episode in range(1, self.episodes + 1):\n print('Episode:', episode)\n steps, state_action_history = self.run_one_episode()\n self.steps_per_episode.append(steps)\n if episode % parameters.CACHING_INTERVAL == 0 or steps < 1000:\n visualize.animate_track(state_action_history, f'agent-{episode}')\n\n print('Training completed.')\n visualize.plot_steps_per_episode(self.steps_per_episode)\n visualize.plot_epsilon(self.agent.epsilon_history)\n\n if parameters.VISUALIZE_FINAL_GAME:\n print('Showing one episode with the greedy strategy.')\n self.agent.epsilon = 0\n steps, state_action_history = self.run_one_episode()\n print(f'Episode completed in {steps} steps.')\n visualize.animate_track(state_action_history)", "def run_episode(Q, max_steps=100, discount=0.9, alpha=0.4, epsilon=0.05):\n start_state = START_STATE\n episode_reward = 0\n while max_steps > 0:\n # print(max_steps)\n action = epsilon_greedy(Q[start_state], epsilon)\n next_state = sample_state(start_state, action)\n # print(next_state)\n #val_temp = Q[start_state][ACTION_IDX[action]]\n TD_backup(Q, start_state, next_state, action, discount, alpha)\n #episode_reward += Q[start_state][ACTION_IDX[action]] - val_temp\n start_state = next_state\n if MAZE[start_state] == GOAL:\n episode_reward += 10\n break\n max_steps -= 1\n return episode_reward", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n player = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, player)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, player, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, player = self.game.getNextState(board, player, action)\n\n r = self.game.getGameEnded(board, player)\n\n if r != 0:\n ex = [(x[0], x[2], r * ((-1) ** (x[1] != player))) for x in trainExamples]\n return ex", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 1\n noProgress = 0\n pCount = 0\n jump_valids = [0]*self.game.getActionSize()\n jump_valids[-1] = 1\n # track the three stages\n stage1 = 0\n stage2 = 0\n stage3 = 0\n\n while True:\n canonicalBoard = self.game.getCanonicalForm(board,self.curPlayer)\n stage = self.game.getStages(canonicalBoard)\n valids = self.game.getValidMoves(canonicalBoard,1) if jump_valids[-1] == 1 else jump_valids\n action = -1\n if sum(valids) == 1:\n action = np.where(valids==1)[0][0]\n else:\n if stage == 1:\n stage1 += 2\n stage2 += 2\n stage3 += 2\n elif stage == 2:\n stage2 += 2\n stage3 += 2\n elif stage == 3:\n stage3 += 2\n temp = int(episodeStep < self.args.tempThreshold)\n # call the MCTS\n pi = self.mcts.getActionProb(canonicalBoard, self.curPlayer, episodeStep, jump_valids, noProgress, temp=temp)\n action = np.random.choice(len(pi), p=pi)\n # flip the color, double examples\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b,p,flip in sym:\n trainExamples.append([b, self.curPlayer, flip, p, episodeStep, noProgress , None])\n\n board, _ = self.game.getNextState(canonicalBoard, 1, action)\n # track the number of no progress states\n if pCount == self.curPlayer*self.game.getScore(board,1):\n noProgress += 1\n else:\n noProgress = 0\n pCount = self.curPlayer*self.game.getScore(board,1)\n # flip board back\n board = self.game.getCanonicalForm(board,self.curPlayer)\n\n jump_valids = self.game.check_valid_jump(canonicalBoard, 1, action)\n if jump_valids[-1] == 1:\n self.curPlayer *= -1\n episodeStep += 1\n r = self.game.getGameEnded(self.game.getCanonicalForm(board, self.curPlayer), 1, episodeStep, noProgress)\n if r != 0: \n if abs(r) == 2:\n # draw\n r = 0\n print(r)\n return ([(self.game.getFeatureBoard(x[0],x[2]*x[1],x[4],x[5]),x[3],x[2]*r*((-1)**(x[1]!=self.curPlayer))) for x in trainExamples], stage1, stage2, stage3)", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board,self.curPlayer)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b,p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n\n r = self.game.getGameEnded(board, self.curPlayer)\n\n if r!=0:\n return [(x[0],x[2],r*((-1)**(x[1]!=self.curPlayer))) for x in trainExamples]", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n\n r = self.game.getGameEnded(board, self.curPlayer)\n\n if r != 0:\n return [(x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))) for x in trainExamples]", "def run_game(nb_episodes, agent, verbose=False):\n returns_sum = defaultdict(float)\n returns_count = defaultdict(float)\n discount_factor = 0.1\n start_time = time.time()\n\n\n #reward_values = {\"positive\": 1.0, \"negative\": 0.0, \"tick\": 0.0, \"loss\": 0.0, \"win\": 0.0}\n # TODO: when training use the following instead:\n reward_values = agent.reward_values()\n \n env = PLE(FlappyBird(), fps=30, display_screen=False, force_fps=False, rng=None,\n reward_values = reward_values)\n # TODO: to speed up training change parameters of PLE as follows:\n # display_screen=False, force_fps=True \n env.init()\n\n n_episode_list = []\n score_list = []\n episode_list = []\n episode_count = 0\n score = 0\n while nb_episodes > episode_count:\n # pick an action\n # TODO: for training using agent.training_policy instead\n state = env.game.getGameState()\n # TODO: Cleanup\n state = agent.construct_state((state['player_vel'], state['player_y'], state['next_pipe_top_y'], state['next_pipe_dist_to_player']))\n action = agent.training_policy(state)#policy(state)\n #print action\n #if action == 1:\n # print \"TICK\"\n\n # step the environment\n reward = env.act(env.getActionSet()[action])\n #print(\"reward=%d\" % reward)\n\n # TODO: for training let the agent observe the current state transition\n\n episode_list.append((state, action, reward))\n\n score += reward\n \n # reset the environment if the game is over\n if env.game_over():\n if verbose:\n print(\"score for this episode: %d\" % score)\n\n env.reset_game()\n\n # Find all (state, action) pairs we've visited in this episode\n # We convert each state to a tuple so that we can use it as a dict key\n sa_in_episode = set([(tuple(x[0]), x[1]) for x in episode_list])\n for state, action in sa_in_episode:\n #if Q[state][1] > 0.0:\n # print Q[state]\n sa_pair = (state, action)\n # Find the first occurance of the (state, action) pair in the episode\n first_occurence_idx = next(i for i, x in enumerate(episode_list)\n if x[0] == state and x[1] == action)\n # Sum up all rewards since the first occurance\n G = sum([x[2] * (discount_factor ** i) for i, x in enumerate(episode_list[first_occurence_idx:])])\n # Calculate average return for this state over all sampled episodes\n returns_sum[sa_pair] += G\n returns_count[sa_pair] += 1.0\n agent.Q[state][action] = returns_sum[sa_pair] / returns_count[sa_pair]\n\n score = 0\n env.init()\n\n # Do a test run every 10 episodes\n if episode_count % 10 == 0:\n game_on = True\n if verbose:\n print('========= Starting trained game =========')\n while game_on:\n state = env.game.getGameState()\n state = (\n state['player_vel'], state['player_y'], state['next_pipe_top_y'], state['next_pipe_dist_to_player'])\n state = agent.construct_state(state)\n action = agent.policy(state)\n reward = env.act(env.getActionSet()[action])\n score += reward\n\n if env.game_over():\n if verbose:\n print(\"Score for the Game: \", score)\n n_episode_list.append(episode_count)\n score_list.append(score)\n env.reset_game()\n score = 0\n game_on = False\n\n episode_count += 1\n score = 0\n\n print 'Time Training: {} min'.format((time.time() - start_time)/60)\n\n print('========= Starting Final Test Game =========')\n\n # Play 1 games after training\n score = 0\n env.init()\n game_on = True\n while game_on:\n state = env.game.getGameState()\n state = (state['player_vel'], state['player_y'], state['next_pipe_top_y'], state['next_pipe_dist_to_player'])\n state = agent.construct_state(state)\n action = agent.policy(state)\n reward = env.act(env.getActionSet()[action])\n score += reward\n\n if env.game_over():\n print(\"Score for the Game: \", score)\n env.reset_game()\n game_on = False\n\n\n agent.save_Q()\n plot_results(n_episode_list, score_list, save=True)\n save_results(n_episode_list, score_list)", "def train(self, num_episodes = 10000, verbose = True):\n start_time = datetime.now().replace(microsecond=0)\n for e in range(num_episodes):\n S_old = self.env.reset()\n steps = 0\n # there is an interal limit of 100 steps\n while steps < 1000:\n steps += 1\n A = self.epsilon_greedy(S_old)\n S_new, reward, done, info = self.env.step(A)\n self.Q[S_old, A] = self.Q[S_old, A] + self.alpha * \\\n (reward + self.gamma * np.max(self.Q[S_new, :]) - self.Q[S_old, A])\n if done:\n break\n S_old = S_new\n if verbose:\n clear_output(wait=True)\n now_time = datetime.now().replace(microsecond=0)\n print(\"Epoch: {}/{} - Steps: {:4} - Duration: {}\".format(e+1, num_episodes, steps, now_time-start_time))\n\n return self.Q", "def episodes(self, num_episodes, num_steps_per_episode):\n for ep in range(num_episodes):\n self.start_episode()\n for step in range(num_steps_per_episode):\n next_sensation,reward = self.env(self.next_action)\n self.collect_data(self.last_sensation, self.next_action, reward, next_sensation)\n self.next_action = self.agent(next_sensation,reward)\n self.last_sensation = next_sensation\n if self.last_sensation == TERMINAL_STATE:\n break", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n print('------ ITER ' + str(i) + '------')\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for eps in range(self.args.numEps):\n print('------ Self Play Episode ' + str(eps) + '------')\n self.mcts = TSPMCTS(self.args, self.game, self.nnet) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n self.trainExamplesHistory.pop(0)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n\n # training new network\n if self.args.numEps > 0:\n self.nnet.train(trainExamples)\n nmcts = TSPMCTS(self.args, self.game, self.nnet)\n\n print('PLAYING GAMES')\n if self.args.arenaCompare:\n arena = SinglePlayerArena(lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n wins, losses = arena.playSinglePlayerGames(self.args.arenaCompare)\n print('WINS/LOSSES: %d / %d' % (wins, losses))", "def run_episode(self):\n # Reset environment\n self.agent.env.reset()\n done = False\n step_count = 0\n total_reward = 0\n\n while not done:\n reward, done = self.agent.explore()\n step_count += 1\n if step_count % 100 == 0:\n print('step count {}'.format(step_count))\n total_reward += self.agent.params['gamma']**step_count * reward\n return step_count, total_reward", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n log.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n log.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')", "def run_all_episodes(self, episode_count):\n # Holds final result\n step_arr = []\n reward_arr = []\n new_abstr = {}\n detached_states = []\n\n if 'abstraction_type' in self.params.keys() and self.params['abstraction_type'] == 'discretization':\n self.agent.make_abstraction()\n\n while self.episode_count < episode_count:\n # Run episode, record results\n steps, reward = self.run_episode()\n step_arr.append(steps)\n reward_arr.append(reward)\n self.episode_count += 1\n if self.episode_count % 1 == 0:\n print('Episode {} finished with step count {}'.format(self.episode_count, steps))\n\n # Create temporal abstraction if applicable\n if 'make_abstraction' in self.params.keys() and self.episode_count in self.params['make_abstraction']:\n self.agent.make_abstraction()\n new_abstr = self.agent.params['s_a'].abstr_dict\n\n # Detach states if applicable\n if 'refine_abstraction' in self.params.keys() and self.episode_count in self.params['refine_abstraction']:\n newly_detached = self.agent.refine_abstraction()\n detached_states.extend(newly_detached)\n print('final abstraction')\n for i in range(len(self.agent.params['s_a'].cell_to_abstract_cell)):\n for key, value in self.agent.params['s_a'].cell_to_abstract_cell[i].items():\n print(key, value)\n\n return step_arr, reward_arr, new_abstr, detached_states", "def step(self):\n num_episodes, num_timesteps = 0, 0\n all_results = []\n\n while num_episodes < self.config['min_episodes_per_batch'] or \\\n num_timesteps < self.config['min_steps_per_batch']:\n # setting the latest params to the actor, getting the fitness and the noise seed sync.\n future_object_ids = [remote_actor.sample(self.latest_flat_weights) \\\n for remote_actor in self.remote_actors]\n results = [\n future_object.get() for future_object in future_object_ids\n ]\n\n for result in results:\n num_episodes += sum(\n len(pair) for pair in result['noisy_lengths'])\n num_timesteps += sum(\n sum(pair) for pair in result['noisy_lengths'])\n # each step we need to get the fitness, but there is no so many actor, so it needs to run\n # many times. The total results are in the 'all_results'.\n all_results.extend(results)\n\n all_noise_indices = []\n all_training_rewards = []\n all_training_lengths = []\n all_eval_rewards = []\n all_eval_lengths = []\n\n for result in all_results:\n all_eval_rewards.extend(result['eval_rewards'])\n all_eval_lengths.extend(result['eval_lengths'])\n\n all_noise_indices.extend(result['noise_indices'])\n all_training_rewards.extend(result['noisy_rewards'])\n all_training_lengths.extend(result['noisy_lengths'])\n\n assert len(all_eval_rewards) == len(all_eval_lengths)\n assert (len(all_noise_indices) == len(all_training_rewards) ==\n len(all_training_lengths))\n\n self.sample_total_episodes += num_episodes\n self.sample_total_steps += num_timesteps\n\n eval_rewards = np.array(all_eval_rewards)\n eval_lengths = np.array(all_eval_lengths)\n noise_indices = np.array(all_noise_indices)\n noisy_rewards = np.array(all_training_rewards)\n noisy_lengths = np.array(all_training_lengths)\n\n # normalize rewards to (-0.5, 0.5), shape:[batch_size, 2]\n proc_noisy_rewards = utils.compute_centered_ranks(noisy_rewards)\n # noise shape:[batch_size, weight_total_size]\n noises = [\n self.noise.get(index, self.agent.weights_total_size)\n for index in noise_indices\n ]\n\n # Update the parameters of the model.\n self.agent.learn(proc_noisy_rewards, noises)\n self.train_steps += 1\n self.latest_flat_weights = self.agent.get_flat_weights()\n\n # Update obs filter to all the actor sync.\n self._update_filter()\n\n # Store the evaluate rewards\n if len(all_eval_rewards) > 0:\n self.eval_rewards_stat.add(np.mean(eval_rewards))\n self.eval_lengths_stat.add(np.mean(eval_lengths))\n\n metrics = {\n \"episodes_this_iter\": noisy_lengths.size,\n \"sample_total_episodes\": self.sample_total_episodes,\n 'sample_total_steps': self.sample_total_steps,\n \"evaluate_rewards_mean\": self.eval_rewards_stat.mean,\n \"evaluate_steps_mean\": self.eval_lengths_stat.mean,\n \"timesteps_this_iter\": noisy_lengths.sum(),\n }\n\n self.log_metrics(metrics)\n return metrics", "def run_versus_greedy_bot(self):\n for i in range(len(self.individuals)):\n self.individuals[i].evaluate_versus_greedy_bot()\n # print(self.individuals[i].score)", "def train_model(self, model, n_episodes=100, epsilon=.1, alpha=.1, discount=.9, save_results=True, save_every=25):\n self.on_init()\n # Training variables\n exp_replay = ExperienceReplay(max_memory=10_000, alpha=alpha, discount=discount)\n history = []\n moves_per_apple_per_episode = []\n loss = float('inf')\n apple_count = 0\n total_moves = 0\n apples_per_epsiode = []\n t0 = time.time()\n\n if save_results:\n if not os.path.exists('./results/deep_q_results'):\n os.makedirs('./results/deep_q_results')\n\n for e in range(n_episodes):\n apple_episode_count = 0\n move_count = 0\n # The next new episode.\n self.agent.reset_game()\n pygame.event.pump()\n self.on_render()\n over = self.agent.lost_game()\n score = len(self.agent.x)\n pygame.display.set_caption(f'{self.caption[:-2]} {score}')\n while not over:\n pygame.event.pump()\n self.on_render()\n # Get initial input\n current_state = self.agent.get_state()\n q = model.predict(current_state)\n if (np.random.rand() <= epsilon) and np.sum(current_state[:2] < 2):\n new_direction = np.random.randint(0, 3, size=1)[0]\n if (e % save_every) == 0:\n print(self.agent.print_board())\n print('moving randomly')\n else:\n new_direction = np.argmax(q[0])\n if (e % save_every) == 0:\n print(self.agent.print_board())\n print(q)\n\n # Apply action, get rewards and new state.\n future_state, reward = self.agent.move_snake(new_direction)\n pygame.event.pump()\n self.on_render()\n move_count = move_count + 1\n if reward == .5:\n score = score + 1\n pygame.display.set_caption(f'{self.caption[:-2]} {score}')\n apple_episode_count = apple_episode_count + 1\n\n # Store experience.\n over = self.agent.lost_game()\n exp_replay.remember([current_state, new_direction, reward, future_state], over)\n\n # Get collected data to train model.\n inputs, targets = exp_replay.get_batch(model, batch_size=50)\n\n # Train model on experiences.\n loss = model.train_on_batch(inputs, targets)\n history.append(loss)\n total_moves = total_moves + 1\n\n if apple_episode_count == 0: \n moves_per_apple_per_episode.append(0) \n else:\n moves_per_apple_per_episode.append(move_count / apple_episode_count)\n\n if (e > 0) and (e % save_every == 0):\n t1 = time.time()\n time_delta = t1 - t0\n\n print(f\"{time_delta/60:.2f} minutes\")\n print(f\"Epoch {e:,}/{n_episodes:,} | Loss {loss:.3f} | Moves per Game: {total_moves/e:.2f} | Apple count {apple_count}\")\n apple_count = 0\n\n if save_results:\n # save model and and plot various results to local directory\n # serialize model to JSON\n model_json = model.to_json()\n with open(f\"./results/deep_q_results/model-{e}.json\", \"w\") as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n model.save_weights(f\"./results/deep_q_results/model-{e}.h5\")\n print(f\"Saved model to disk\")\n\n # plot results\n fig = plt.figure(figsize=(12, 7))\n plt.plot(history)\n plt.savefig(f'./results/deep_q_results/loss_per_move-{e}.png')\n plt.close()\n fig = plt.figure(figsize=(12, 7))\n plt.plot(apples_per_epsiode, '.')\n plt.savefig(f'./results/deep_q_results/score_per_episode-{e}.png')\n plt.close()\n apples_per_epsiode.append(apple_episode_count)", "def random_episodes(self, nep=6, render=False):\n for ep in range(nep):\n done = False\n self.env.reset()\n i = 0\n reward = 0.0\n while not done:\n if render:\n self.env.render()\n action = self.random_action()\n image, r, done, _ = self.env.step(action)\n if i % self.d == 0:\n reward += r\n self.memory_add_im(to_Ychannel(image, data_format='channels_first'))\n self.memory_add_r(reward)\n self.memory_add_a(action)\n elif i % self.d == 1:\n reward = r\n else:\n reward += r\n i += 1", "def adapt(self):\n\n for game in self.games:\n losses = []\n \n if not self.args['skipSelfPlay']:\n # bookkeeping\n log.info(f'Self-playing game {type(game).__name__} ...')\n joatwinrates = []\n rwinrates = []\n gwinrates = []\n\n for _ in tqdm(range(self.args['adaptationIterations']), desc = 'Adapting'):\n\n if not self.args['skipSelfPlay']:\n # bookkeeping\n log.info(f'Self-playing game {type(game).__name__} ...')\n\n # run self play on game variante\n variationTrainExamples = deque([], maxlen=self.args['maxlenOfQueue'])\n\n for _ in tqdm(range(self.args['numEps']), desc=\"Self Play\"):\n self.mcts = MCTS(game, self.joat, self.args) # reset search tree\n variationTrainExamples += self.executeEpisode(game)\n\n # shuffle examples before training\n trainExamples = []\n for e in variationTrainExamples:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n self.trainExamplesHistory[game.__class__] = trainExamples\n\n if len(self.trainExamplesHistory[game.__class__]) > self.args['numItersForTrainExamplesHistory']:\n log.warning(\n f\"Removing the oldest entry in trainExamples for game. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(game.__class__)\n \n log.info(f'Training/Adapting network...')\n # training new network\n joatmcts = MCTS(game, self.joat, self.args)\n \n pi_v_losses = self.adapt_joat.train(self.trainExamplesHistory)\n adapt_joatmcts = MCTS(game, self.adapt_joat, self.args)\n\n for pi,v in pi_v_losses:\n losses.append((pi, v, type(game).__name__))\n\n self.plot_current_progress(losses)\n\n # ARENA\n\n log.info('PITTING ADAPTED AGAINST ORIGINAL JOAT')\n arena = Arena(lambda x: np.argmax(joatmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(adapt_joatmcts.getActionProb(x, temp=0)), [game])\n pwins, nwins, draws = arena.playGames(self.args['arenaComparePerGame'])\n joatwinrate = float(nwins) / float(pwins + nwins + draws)\n log.info('Joat Win Rate vs. Original JOAT : %d' % (joatwinrate))\n\n log.info('ADAPTED/ORIGINAL WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n\n if self.args['evalOnBaselines']:\n log.info('PITTING ADAPTED AGAINST RANDOM POLICY')\n arena = Arena('random',\n lambda x: np.argmax(adapt_joatmcts.getActionProb(x, temp=0)), [game])\n pwins, nwins, draws = arena.playGames(self.args['arenaComparePerGame'])\n rwinrates = float(nwins) / float(pwins + nwins + draws)\n log.info('Joat Win Rate vs. Random : %d' % (rwinrates))\n log.info('ADAPTED/RANDOM WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n \n log.info('PITTING ADAPTED AGAINST GREEDY POLICY')\n arena = Arena('greedy',\n lambda x: np.argmax(adapt_joatmcts.getActionProb(x, temp=0)), [game])\n pwins, nwins, draws = arena.playGames(self.args['arenaComparePerGame'])\n gwinrates = float(nwins) / float(pwins + nwins + draws)\n log.info('Joat Win Rate vs. Greedy : %d' % (gwinrates))\n log.info('ADAPTED/GREEDY WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))", "def evalRun_one_episode(env, agent, agent_name=\"random\", max_steps_per_episode=100):\n observation = env.reset()\n action = agent.begin_episode(observation)\n step_number = 0\n total_watch = 0.\n q_videos = []\n w_videos = []\n total_qual = 0\n start_time = time.time()\n total_length_videos = 0\n while True:\n observation, reward, done, info, _ = env.step(action)\n\n for j in range(len(observation['response'])):\n if observation['response'][j]['click'] == 1:\n index = action[j]\n total_length_videos += list(observation['doc'].values())[index][-1]\n total_watch += reward[1]\n total_qual += reward[0]\n q_videos += [reward[0]]\n w_videos += [reward[1]]\n\n # Update environment-specific metrics with responses to the slate.\n env.update_metrics(observation['response'], info)\n step_number += 1\n\n if done:\n break\n elif step_number == max_steps_per_episode:\n # Stop the run loop once we reach the true end of episode.\n break\n else:\n if agent_name == \"random\":\n action = agent.step(observation)\n elif agent_name == \"fsq\":\n action = agent.step(reward[1], observation)\n else:\n print(\"agent name is not correct, please select (random || fsq))\")\n\n agent.end_episode(reward[1], observation)\n time_diff = time.time() - start_time\n\n return step_number, total_watch, time_diff, total_qual/step_number, q_videos, w_videos" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Train agent over given number of iterations. Each iteration consists of self play over n_episodes and then a learn step where agent updates network based on random sample from replay buffer
def train(self, iters, n_episodes): for i in range(iters): self.self_play(n_episodes) self.learn()
[ "def train(self, num_episodes: int) -> List:\n reward_per_episode: List[float] = []\n num_steps_sampled: int = 0\n\n for episode_idx in range(1, num_episodes + 1):\n # Log progress\n if episode_idx % self.config.LOG_EVERY == 0:\n window_rewards = reward_per_episode[-self.config.LOG_EVERY :]\n print(\n f\"episode {episode_idx}/{num_episodes}, \"\n f\"avg. episode reward: {sum(window_rewards) / len(window_rewards)}, \"\n f\"num steps sampled: {num_steps_sampled}\"\n )\n\n # Sample one episode\n observation = self.env.reset()\n episode_length: int = 0\n episode_reward: float = 0.0\n while True:\n epsilon = self.epsilon_scheduler.get_value(num_steps_sampled)\n action = self.compute_action(observation, epsilon)\n next_obs, reward, done, _ = self.env.step(action)\n self.replay_buffer.add(observation, action, reward, done, next_obs)\n observation = next_obs\n episode_length += 1\n episode_reward += reward\n if (\n episode_length % self.config.UPDATE_EVERY == 0\n and num_steps_sampled > self.config.LEARNING_STARTS\n ):\n self.update_once()\n if done is True:\n break\n\n reward_per_episode.append(episode_reward)\n num_steps_sampled += episode_length\n return reward_per_episode", "def train(self, env):\n\n\t\tmin_average_reward_for_stopping = 195\n\t\tconsecutive_successful_episodes_to_stop = 10\n\t\tlast_10_rewards = deque(maxlen=consecutive_successful_episodes_to_stop)\n\n\t\tnum_Episodes = []\n\t\tEpisode_Rewards = []\n\n\t\tfor episode in range(self.episodes):\n\t\t\tstate = env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\t\t\tdone = False\n\t\t\ttotal_reward = 0\n\n\t\t\twhile not done:\n\t\t\t\taction = self.act(state)\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\t\t\t\tnext_state = np.reshape(next_state, [1, self.state_size])\n\t\t\t\tself.remember(state, action, reward, next_state, done)\n\t\t\t\tstate = next_state\n\t\t\t\ttotal_reward += reward\n\n\t\t\tnum_Episodes.append(episode)\n\t\t\tEpisode_Rewards.append(total_reward)\n\t\t\tlast_10_rewards.append(total_reward)\n\t\t\tlast_10_avg_reward = np.mean(last_10_rewards)\n\t\t\tprint(episode, last_10_avg_reward)\n\n\t\t\t# call experience relay\n\t\t\tif len(self.memory) >= self.batch_size:\n\t\t\t\tself.replay(self.batch_size)\n\t\t\t# Stopping criteria\n\t\t\tif len(\n\t\t\t\t\tlast_10_rewards) == consecutive_successful_episodes_to_stop \\\n\t\t\t\t\tand last_10_avg_reward > min_average_reward_for_stopping:\n\t\t\t\tprint(\"Solved after {} epsiodes\".format(episode))\n\t\t\t\tbreak", "def train_model(self, model, n_episodes=100, epsilon=.1, alpha=.1, discount=.9, save_results=True, save_every=25):\n self.on_init()\n # Training variables\n exp_replay = ExperienceReplay(max_memory=10_000, alpha=alpha, discount=discount)\n history = []\n moves_per_apple_per_episode = []\n loss = float('inf')\n apple_count = 0\n total_moves = 0\n apples_per_epsiode = []\n t0 = time.time()\n\n if save_results:\n if not os.path.exists('./results/deep_q_results'):\n os.makedirs('./results/deep_q_results')\n\n for e in range(n_episodes):\n apple_episode_count = 0\n move_count = 0\n # The next new episode.\n self.agent.reset_game()\n pygame.event.pump()\n self.on_render()\n over = self.agent.lost_game()\n score = len(self.agent.x)\n pygame.display.set_caption(f'{self.caption[:-2]} {score}')\n while not over:\n pygame.event.pump()\n self.on_render()\n # Get initial input\n current_state = self.agent.get_state()\n q = model.predict(current_state)\n if (np.random.rand() <= epsilon) and np.sum(current_state[:2] < 2):\n new_direction = np.random.randint(0, 3, size=1)[0]\n if (e % save_every) == 0:\n print(self.agent.print_board())\n print('moving randomly')\n else:\n new_direction = np.argmax(q[0])\n if (e % save_every) == 0:\n print(self.agent.print_board())\n print(q)\n\n # Apply action, get rewards and new state.\n future_state, reward = self.agent.move_snake(new_direction)\n pygame.event.pump()\n self.on_render()\n move_count = move_count + 1\n if reward == .5:\n score = score + 1\n pygame.display.set_caption(f'{self.caption[:-2]} {score}')\n apple_episode_count = apple_episode_count + 1\n\n # Store experience.\n over = self.agent.lost_game()\n exp_replay.remember([current_state, new_direction, reward, future_state], over)\n\n # Get collected data to train model.\n inputs, targets = exp_replay.get_batch(model, batch_size=50)\n\n # Train model on experiences.\n loss = model.train_on_batch(inputs, targets)\n history.append(loss)\n total_moves = total_moves + 1\n\n if apple_episode_count == 0: \n moves_per_apple_per_episode.append(0) \n else:\n moves_per_apple_per_episode.append(move_count / apple_episode_count)\n\n if (e > 0) and (e % save_every == 0):\n t1 = time.time()\n time_delta = t1 - t0\n\n print(f\"{time_delta/60:.2f} minutes\")\n print(f\"Epoch {e:,}/{n_episodes:,} | Loss {loss:.3f} | Moves per Game: {total_moves/e:.2f} | Apple count {apple_count}\")\n apple_count = 0\n\n if save_results:\n # save model and and plot various results to local directory\n # serialize model to JSON\n model_json = model.to_json()\n with open(f\"./results/deep_q_results/model-{e}.json\", \"w\") as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n model.save_weights(f\"./results/deep_q_results/model-{e}.h5\")\n print(f\"Saved model to disk\")\n\n # plot results\n fig = plt.figure(figsize=(12, 7))\n plt.plot(history)\n plt.savefig(f'./results/deep_q_results/loss_per_move-{e}.png')\n plt.close()\n fig = plt.figure(figsize=(12, 7))\n plt.plot(apples_per_epsiode, '.')\n plt.savefig(f'./results/deep_q_results/score_per_episode-{e}.png')\n plt.close()\n apples_per_epsiode.append(apple_episode_count)", "def train_MDP_agent(game, number_of_training_for_session=100):\n game.players[1][PLAYER].init_repeat_counter()\n print(\"Weights before current training: \\n\", game.players[1][PLAYER].get_weights_with_names())\n\n for i in range(number_of_training_for_session):\n game.run_single_game(True)\n\n print(\"Weights after current training: \\n\", game.players[1][PLAYER].get_weights_with_names())\n game.players[1][PLAYER].decay_alpha()", "def train(\n env: DiscreteEnvironment[TState, TAction],\n agent: DiscreteAgent[TState, TAction],\n n_episodes: int,\n on_action: Callable[[TState, TAction, float, int], None] = None,\n on_episode_end: Callable[[int], None] = None,\n) -> None:\n for ep in range(n_episodes):\n t = 0\n while not env.terminated:\n s, a, r = agent.act_and_train(t) # returns (S_t, A_t, R_t)\n if on_action:\n on_action(s, a, r, t)\n t += 1\n agent.episode_end()\n if on_episode_end:\n on_episode_end(t)\n env.reset()", "def train(self):\n if self.train_time <= 0:\n # train on episodes alone\n time = []\n b = datetime.datetime.utcnow()\n for ep in range(self.episodes+1):\n begin = datetime.datetime.utcnow()\n self.train_episode()\n end = datetime.datetime.utcnow()\n\n if self.verbose:\n delta = end - begin\n delta = delta.total_seconds()\n time.append(delta)\n eta = np.mean(time)*(self.episodes - ep)\n print(\"Ep %d done in %.3f seconds, ETA %.2f seconds\" %(ep+1, delta, eta))\n \n e = datetime.datetime.utcnow()\n delta = (e-b).total_seconds()\n self.pprint(\"Done training, took %.2f seconds\" %(delta))\n else:\n begin = datetime.datetime.utcnow()\n ep = 0\n while datetime.datetime.utcnow() - begin < self.calculation_time:\n ep_begin = datetime.datetime.utcnow()\n self.train_episode()\n ep_end = datetime.datetime.utcnow()\n\n ep += 1\n if self.verbose:\n delta = ep_end - ep_begin\n delta = delta.total_seconds()\n total = datetime.datetime.utcnow() - begin\n total = total.total_seconds()\n print(\"Ep %d done in %.3f seconds, TOTAL: %.2f\" %(ep, delta, total))\n\n self.episodes = ep\n end = datetime.datetime.utcnow()\n delta = end - begin\n delta = delta.total_seconds()\n self.pprint(\"Done training, took %.2f seconds\" %(delta)) \n\n self.trained = True", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n log.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n log.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n print('------ ITER ' + str(i) + '------')\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for eps in range(self.args.numEps):\n print('------ Self Play Episode ' + str(eps) + '------')\n self.mcts = TSPMCTS(self.args, self.game, self.nnet) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n self.trainExamplesHistory.pop(0)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n\n # training new network\n if self.args.numEps > 0:\n self.nnet.train(trainExamples)\n nmcts = TSPMCTS(self.args, self.game, self.nnet)\n\n print('PLAYING GAMES')\n if self.args.arenaCompare:\n arena = SinglePlayerArena(lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n wins, losses = arena.playSinglePlayerGames(self.args.arenaCompare)\n print('WINS/LOSSES: %d / %d' % (wins, losses))", "def train_agent(self):\n # Retrieve collected experiences from memory\n experiences = np.array(self.replay.get_all())\n # rewards = np.array([h['reward'] for h in experiences])\n #rewards = experiences[:,2]\n rewards = np.array([r[2] for r in experiences])\n\n # Discount and normalize rewards\n norm_rewards = self.discount_rewards_and_normalize(rewards)\n\n # Shuffle for better learning\n shuffled_experiences = np.random.shuffle(experiences)\n\n # Feed the experiences through the network with rewards to compute and\n # minimize the loss.\n\n feed={\n self.X: [r[0] for r in experiences],\n self.rewards:norm_rewards,\n self.ep_actions:experiences[:,1]\n }\n self.tf_sess.run(self.train,feed_dict=feed)\n\n pass", "def train(self, n, *_, render_every=None):\n for i in range(1, n + 1):\n render = render_every and i % render_every == 0\n self.train_on_episode(render=render)", "def learn(self):\n iter_arr = []\n rp_arr = []\n loss_arr = []\n elo_scores_rp = 0\n start_i = self.loadLog()\n # examples of the iteration\n self.loadTrainExamples(start_i)\n for i in range(start_i, self.args.numIters+1):\n # bookkeeping\n print('------ITER ' + str(i) + '------')\n if not self.skipFirstSelfPlay or i>1:\n iterationTrainExamples1 = []\n iterationTrainExamples2 = []\n iterationTrainExamples3 = []\n iterationTrainExamples = []\n \n for eps in range(self.args.numEps):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n e, stage1, stage2, stage3 = self.executeEpisode()\n iterationTrainExamples.extend(e)\n # slice stages into example histories\n for board, pi, r in e[0:stage1+1]:\n compressed_pi = self.game.compressActions(pi)\n iterationTrainExamples1.append((board, compressed_pi, r))\n if stage2 > stage1:\n for board, pi, r in e[stage1+1:stage2+1]:\n compressed_pi = self.game.compressActions(pi)\n iterationTrainExamples2.append((board, compressed_pi, r))\n if stage3 > stage2:\n iterationTrainExamples3.extend(e[stage2+1:stage3+1])\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append((iterationTrainExamples, iterationTrainExamples1, iterationTrainExamples2, iterationTrainExamples3))\n \n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(self.trainExamplesHistory), \" => remove the oldest trainExamples\")\n self.trainExamplesHistory.pop(0)\n \n # shuffle examples before training\n trainExamples = []\n trainExamples_stage1 = []\n for es0, es1, es2, es3 in self.trainExamplesHistory:\n trainExamples.extend(es0)\n trainExamples_stage1.extend(es1)\n trainExamples_stage1.extend(es2)\n shuffle(trainExamples)\n shuffle(trainExamples_stage1)\n\n # training new network, keeping a copy of the old one\n self.nnet['n1'].save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet['n1'].load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n if self.args.budget_ratio > 1:\n self.nnet['s1'].save_checkpoint(folder=self.args.checkpoint, filename='temps.pth.tar')\n self.pnet['s1'].load_checkpoint(folder=self.args.checkpoint, filename='temps.pth.tar')\n if self.args.three_stages:\n self.nnet['s2'].save_checkpoint(folder=self.args.checkpoint, filename='temps2.pth.tar')\n self.pnet['s2'].load_checkpoint(folder=self.args.checkpoint, filename='temps2.pth.tar')\n if self.args.three_stages and self.args.budget_ratio > 1 and self.args.large:\n self.nnet['s3'].save_checkpoint(folder=self.args.checkpoint, filename='temps3.pth.tar')\n self.pnet['s3'].load_checkpoint(folder=self.args.checkpoint, filename='temps3.pth.tar')\n nmcts = MCTS(self.game, self.nnet, self.args)\n pmcts = MCTS(self.game, self.pnet, self.args)\n loss_dict = {}\n if self.args.three_stages:\n loss_dict['total_loss'] = self.nnet['n1'].train(trainExamples)\n loss_dict['s2total_loss'] = self.nnet['s2'].train(trainExamples_stage1)\n if self.args.budget_ratio > 1:\n loss_dict['stotal_loss'] = self.nnet['s1'].train(trainExamples)\n if self.args.budget_ratio > 1 and self.args.large:\n loss_dict['s3total_loss'] = self.nnet['s3'].train(trainExamples_stage1)\n else:\n loss_dict['total_loss'] = self.nnet['n1'].train(trainExamples)\n if self.args.budget_ratio > 1:\n loss_dict['stotal_loss'] = self.nnet['s1'].train(trainExamples)\n self.loss_dicts.append(loss_dict)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i-1)\n\n print('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda v, w,x,y,z: np.argmax(pmcts.getActionProb(v, w, x, y, z, temp=0)),\n lambda v, w,x,y,z: np.argmax(nmcts.getActionProb(v, w, x, y, z, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins+nwins == 0 or float(nwins)/(pwins+nwins) < self.args.updateThreshold:\n print('REJECTING NEW MODEL')\n self.nnet['n1'].load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n if self.args.budget_ratio > 1:\n self.nnet['s1'].load_checkpoint(folder=self.args.checkpoint, filename='temps.pth.tar')\n if self.args.three_stages:\n self.nnet['s2'].load_checkpoint(folder=self.args.checkpoint, filename='temps2.pth.tar')\n if self.args.budget_ratio > 1 and self.args.large:\n self.nnet['s3'].load_checkpoint(folder=self.args.checkpoint, filename='temps3.pth.tar')\n else:\n print('ACCEPTING NEW MODEL')\n self.saveLog(i)\n\n loss_arr.append(loss_dict['total_loss'])\n if self.args.plot and i%5 == 0:\n plt.plot([x for x in loss_arr])\n plt.title(\"Total loss\")\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"pi_loss+v_loss\")\n plt.savefig(self.args.checkpoint +\"/\"+str(i) + \"loss.jpg\")\n plt.close()", "def learn(self) -> None:\n for i in range(1, self.args.num_selfplay_iterations + 1):\n print(f'------ITER {i}------')\n if not self.update_on_checkpoint or i > 1: # else: go directly to backpropagation\n\n # Self-play/ Gather training data.\n iteration_train_examples = list()\n for _ in trange(self.args.num_episodes, desc=\"Self Play\", file=sys.stdout):\n self.mcts.clear_tree()\n iteration_train_examples.append(self.executeEpisode())\n\n if sum(map(len, iteration_train_examples)) > self.args.max_buffer_size:\n iteration_train_examples.pop(0)\n\n # Store data from previous self-play iterations into the history.\n self.trainExamplesHistory.append(iteration_train_examples)\n\n # Print out statistics about the replay buffer, and back-up the data history to a file (can be slow).\n GameHistory.print_statistics(self.trainExamplesHistory)\n self.saveTrainExamples(i - 1)\n\n # Flatten examples over self-play episodes and sample a training batch.\n complete_history = GameHistory.flatten(self.trainExamplesHistory)\n\n # Training new network, keeping a copy of the old one\n self.neural_net.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n\n # Backpropagation\n for _ in trange(self.args.num_gradient_steps, desc=\"Backpropagation\", file=sys.stdout):\n batch = self.sampleBatch(complete_history)\n\n self.neural_net.train(batch)\n self.neural_net.monitor.log_batch(batch)\n\n # Pitting\n accept = True\n if self.args.pitting:\n # Load in the old network.\n self.opponent_net.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n\n # Perform trials with the new network against the old network.\n arena = Arena(self.game, self.arena_player, self.arena_opponent, self.args.max_trial_moves)\n accept = arena.pitting(self.args, self.neural_net.monitor)\n\n if accept:\n print('ACCEPTING NEW MODEL')\n self.neural_net.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.neural_net.save_checkpoint(folder=self.args.checkpoint, filename=self.args.load_folder_file[-1])\n else:\n print('REJECTING NEW MODEL')\n self.neural_net.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')", "def train_agent(\n self,\n env: UnityEnvironment,\n agent: AgentEnsemble,\n verbose: bool = True,\n exit_when_solved: bool = True,\n number_episodes: int = 1000,\n **kwargs,\n ) -> None:\n agent.set_train_mode(True)\n brain_name = env.brain_names[0]\n number_of_agents = agent.number_of_agents\n\n # training loop over the number of episodes\n for _ in range(number_episodes):\n episode_number = len(self.scores) + 1\n\n # reset the environment and agent\n env_info = env.reset(train_mode=True)[brain_name]\n agent.reset()\n\n # get initial state and reset scores\n states = env_info.vector_observations\n scores = np.zeros(number_of_agents)\n while True:\n # select actions\n actions = agent.act(states, True)\n # take actions\n env_info = env.step(actions)[brain_name]\n # observe actions\n rewards = env_info.rewards\n # observe next states\n next_states = env_info.vector_observations\n # see if the episode is completed\n dones = env_info.local_done\n # give the experiences to the agent to store and potentially learn from\n agent.step(states, actions, rewards, next_states, dones)\n # update score\n scores += rewards\n states = next_states\n # if the episode is over, break\n if np.all(dones):\n break\n\n self.scores.append(np.max(scores))\n average_score_window = np.mean(self.scores[-self.score_window_size :])\n\n if verbose:\n print(\n f\"\\rEpisode {episode_number}\\tAverage Score: {average_score_window:.2f}\",\n end=\"\",\n )\n if episode_number % 100 == 0:\n print(\n f\"\\rEpisode {episode_number}\\tAverage Score: {average_score_window:.2f}\",\n )\n\n if (\n exit_when_solved\n and episode_number >= self.score_window_size\n and average_score_window >= self.score_threshold\n ):\n if verbose:\n print(\n f\"\\rEnvironment solved in {len(self.scores)} episodes! \",\n )\n break", "def train(self):\n n_itr = self.startup()\n # with logger.prefix(f\"itr #0 \"):\n # eval_traj_infos, eval_time = self.evaluate_agent(0)\n # self.log_diagnostics(0, eval_traj_infos, eval_traj_infos, eval_time)\n for itr in range(n_itr):\n logger.set_iteration(itr)\n with logger.prefix(f\"itr #{itr} \"):\n self.agent.sample_mode(itr)\n samples, traj_infos = self.sampler.obtain_samples(itr)\n self.agent.train_mode(itr)\n opt_info = self.algo.optimize_agent(itr, samples)\n self.store_diagnostics(itr, traj_infos, opt_info)\n if (itr + 1) % self.log_interval_itrs == 0:\n eval_traj_infos, eval_time = self.evaluate_agent(itr)\n self.log_diagnostics(itr, traj_infos, eval_traj_infos, eval_time)\n avg_return = np.average([e['Return'] for e in eval_traj_infos])\n if self._best_avg_return is None:\n self._best_avg_return = avg_return\n self._eval_returns.append(avg_return)\n if itr > self._warm_up_itr and self.ran_out_of_patience(n=30):\n logger.log(f\"Early stopping on itr #{itr}\")\n break\n self.shutdown()", "def training(self):\r\n\r\n self.agent.train = common.function(self.agent.train)\r\n #Counter of steps\r\n self.agent.train_step_counter.assign(0)\r\n #Compute the average return (network without training)\r\n avg_return = compute_avg_return(self.eval_env, self.agent.policy, self.num_eval_episodes)\r\n returns = [avg_return]\r\n losses = []\r\n\r\n\r\n for _ in range(self.num_iterations):\r\n\r\n for _ in range(self.collect_steps_per_iteration):\r\n #Save the step make it with the actual neural network\r\n collect_step(self.train_env, self.agent.collect_policy, self.replay_buffer)\r\n\r\n #Previous states for replay experience\r\n experience, unused_info = next(self.iterator)\r\n #Loss of the network \r\n train_loss = self.agent.train(experience).loss\r\n #Actual step\r\n step = self.agent.train_step_counter.numpy()\r\n\r\n if step % self.log_interval == 0:\r\n print('step = {}: loss = {}'.format(step, train_loss))\r\n losses.append([train_loss.numpy()])\r\n \r\n\r\n if step % self.eval_interval == 0:\r\n #Evaluates the behaviour while the network is training\r\n avg_return = compute_avg_return(self.eval_env, self.agent.policy, self.num_eval_episodes)\r\n print('step = {}: Average Return = {}'.format(step, avg_return))\r\n returns.append(avg_return) \r\n \r\n return returns, losses", "def train(self, n):\n\t\tfor i in range(n):\n\t\t\tself.train_one_time(i)", "def run_game(nb_episodes, agent, verbose=False):\n returns_sum = defaultdict(float)\n returns_count = defaultdict(float)\n discount_factor = 0.1\n start_time = time.time()\n\n\n #reward_values = {\"positive\": 1.0, \"negative\": 0.0, \"tick\": 0.0, \"loss\": 0.0, \"win\": 0.0}\n # TODO: when training use the following instead:\n reward_values = agent.reward_values()\n \n env = PLE(FlappyBird(), fps=30, display_screen=False, force_fps=False, rng=None,\n reward_values = reward_values)\n # TODO: to speed up training change parameters of PLE as follows:\n # display_screen=False, force_fps=True \n env.init()\n\n n_episode_list = []\n score_list = []\n episode_list = []\n episode_count = 0\n score = 0\n while nb_episodes > episode_count:\n # pick an action\n # TODO: for training using agent.training_policy instead\n state = env.game.getGameState()\n # TODO: Cleanup\n state = agent.construct_state((state['player_vel'], state['player_y'], state['next_pipe_top_y'], state['next_pipe_dist_to_player']))\n action = agent.training_policy(state)#policy(state)\n #print action\n #if action == 1:\n # print \"TICK\"\n\n # step the environment\n reward = env.act(env.getActionSet()[action])\n #print(\"reward=%d\" % reward)\n\n # TODO: for training let the agent observe the current state transition\n\n episode_list.append((state, action, reward))\n\n score += reward\n \n # reset the environment if the game is over\n if env.game_over():\n if verbose:\n print(\"score for this episode: %d\" % score)\n\n env.reset_game()\n\n # Find all (state, action) pairs we've visited in this episode\n # We convert each state to a tuple so that we can use it as a dict key\n sa_in_episode = set([(tuple(x[0]), x[1]) for x in episode_list])\n for state, action in sa_in_episode:\n #if Q[state][1] > 0.0:\n # print Q[state]\n sa_pair = (state, action)\n # Find the first occurance of the (state, action) pair in the episode\n first_occurence_idx = next(i for i, x in enumerate(episode_list)\n if x[0] == state and x[1] == action)\n # Sum up all rewards since the first occurance\n G = sum([x[2] * (discount_factor ** i) for i, x in enumerate(episode_list[first_occurence_idx:])])\n # Calculate average return for this state over all sampled episodes\n returns_sum[sa_pair] += G\n returns_count[sa_pair] += 1.0\n agent.Q[state][action] = returns_sum[sa_pair] / returns_count[sa_pair]\n\n score = 0\n env.init()\n\n # Do a test run every 10 episodes\n if episode_count % 10 == 0:\n game_on = True\n if verbose:\n print('========= Starting trained game =========')\n while game_on:\n state = env.game.getGameState()\n state = (\n state['player_vel'], state['player_y'], state['next_pipe_top_y'], state['next_pipe_dist_to_player'])\n state = agent.construct_state(state)\n action = agent.policy(state)\n reward = env.act(env.getActionSet()[action])\n score += reward\n\n if env.game_over():\n if verbose:\n print(\"Score for the Game: \", score)\n n_episode_list.append(episode_count)\n score_list.append(score)\n env.reset_game()\n score = 0\n game_on = False\n\n episode_count += 1\n score = 0\n\n print 'Time Training: {} min'.format((time.time() - start_time)/60)\n\n print('========= Starting Final Test Game =========')\n\n # Play 1 games after training\n score = 0\n env.init()\n game_on = True\n while game_on:\n state = env.game.getGameState()\n state = (state['player_vel'], state['player_y'], state['next_pipe_top_y'], state['next_pipe_dist_to_player'])\n state = agent.construct_state(state)\n action = agent.policy(state)\n reward = env.act(env.getActionSet()[action])\n score += reward\n\n if env.game_over():\n print(\"Score for the Game: \", score)\n env.reset_game()\n game_on = False\n\n\n agent.save_Q()\n plot_results(n_episode_list, score_list, save=True)\n save_results(n_episode_list, score_list)", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def step(self):\n num_episodes, num_timesteps = 0, 0\n all_results = []\n\n while num_episodes < self.config['min_episodes_per_batch'] or \\\n num_timesteps < self.config['min_steps_per_batch']:\n # setting the latest params to the actor, getting the fitness and the noise seed sync.\n future_object_ids = [remote_actor.sample(self.latest_flat_weights) \\\n for remote_actor in self.remote_actors]\n results = [\n future_object.get() for future_object in future_object_ids\n ]\n\n for result in results:\n num_episodes += sum(\n len(pair) for pair in result['noisy_lengths'])\n num_timesteps += sum(\n sum(pair) for pair in result['noisy_lengths'])\n # each step we need to get the fitness, but there is no so many actor, so it needs to run\n # many times. The total results are in the 'all_results'.\n all_results.extend(results)\n\n all_noise_indices = []\n all_training_rewards = []\n all_training_lengths = []\n all_eval_rewards = []\n all_eval_lengths = []\n\n for result in all_results:\n all_eval_rewards.extend(result['eval_rewards'])\n all_eval_lengths.extend(result['eval_lengths'])\n\n all_noise_indices.extend(result['noise_indices'])\n all_training_rewards.extend(result['noisy_rewards'])\n all_training_lengths.extend(result['noisy_lengths'])\n\n assert len(all_eval_rewards) == len(all_eval_lengths)\n assert (len(all_noise_indices) == len(all_training_rewards) ==\n len(all_training_lengths))\n\n self.sample_total_episodes += num_episodes\n self.sample_total_steps += num_timesteps\n\n eval_rewards = np.array(all_eval_rewards)\n eval_lengths = np.array(all_eval_lengths)\n noise_indices = np.array(all_noise_indices)\n noisy_rewards = np.array(all_training_rewards)\n noisy_lengths = np.array(all_training_lengths)\n\n # normalize rewards to (-0.5, 0.5), shape:[batch_size, 2]\n proc_noisy_rewards = utils.compute_centered_ranks(noisy_rewards)\n # noise shape:[batch_size, weight_total_size]\n noises = [\n self.noise.get(index, self.agent.weights_total_size)\n for index in noise_indices\n ]\n\n # Update the parameters of the model.\n self.agent.learn(proc_noisy_rewards, noises)\n self.train_steps += 1\n self.latest_flat_weights = self.agent.get_flat_weights()\n\n # Update obs filter to all the actor sync.\n self._update_filter()\n\n # Store the evaluate rewards\n if len(all_eval_rewards) > 0:\n self.eval_rewards_stat.add(np.mean(eval_rewards))\n self.eval_lengths_stat.add(np.mean(eval_lengths))\n\n metrics = {\n \"episodes_this_iter\": noisy_lengths.size,\n \"sample_total_episodes\": self.sample_total_episodes,\n 'sample_total_steps': self.sample_total_steps,\n \"evaluate_rewards_mean\": self.eval_rewards_stat.mean,\n \"evaluate_steps_mean\": self.eval_lengths_stat.mean,\n \"timesteps_this_iter\": noisy_lengths.sum(),\n }\n\n self.log_metrics(metrics)\n return metrics" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
generate pair message between node Hv and Hw. since the cat operation, msgs from hv > hw and hw > hv are different
def __init__(self, dim_hv, dim_hw, msg_dim): super(PairMessageGenerator, self).__init__() self.dim_hv, self.dim_hw, self.msg_dim = dim_hv, dim_hw, msg_dim self.in_dim = dim_hv + dim_hw # row * feature_dim, 2048 self.mlp = nn.Sequential( nn.LayerNorm(self.in_dim), # this layer norm is important to create diversity nn.Linear(self.in_dim, self.msg_dim), nn.LeakyReLU(0.2) )
[ "def __mk_output_msg(filter):\n msg = Sonar()\n msg.header.stamp = rospy.Time.now()\n best = filter.get_best_particle()\n cov = filter.get_covariance()\n msg.x = best[0]\n msg.y = best[1]\n msg.covariance = cov.flatten().tolist()\n return msg", "def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)", "def create_ising_wishbone(h, w, **kwargs):\n assert h == 2 # Only works for 2 branches\n G = nx.empty_graph(h * w)\n n = w\n G.add_edges_from([(v, v+1) for v in range(n-1)])\n G.add_edges_from([(v, v+1) for v in range(n,2*n-1)])\n G.add_edges_from([(v, v+n) for v in range(n // 2)]) # Connect first half of nodes\n return nx.to_numpy_matrix(G)", "def write_message(mat, gen_coo, gen_bin_msg):\n for (h, w, c), target in merge_gen(gen_coo, gen_bin_msg):\n n = mat[h][w][c]\n n = n - n % 2 + target # (n >> 2 << 2)+j\n mat[h][w][c] = n", "def link_cervix_hpv(self, mention, labels, sim_thr=0.7): # @smarchesin TODO: too hardcoded? too simplistic?\n\n\t\tif mention.text == 'hpv': # mention contains 'hpv' only\n\t\t\treturn [[mention.text, 'human papilloma virus infection']]\n\t\telif mention.text == 'hpv infection': # mention contains 'hpv infection'\n\t\t\treturn [[mention.text, 'human papilloma virus infection']]\n\t\telif mention[:2].text == 'hpv infection': # 'hpv infection' as first term - match rest of mention w/ similarity-based linking\n\t\t\treturn [[mention[:2].text, 'human papilloma virus infection']] + self.associate_mention2candidate(mention[2:], labels, sim_thr)\n\t\telif mention[-2:].text == 'hpv infection': # 'hpv infection' as last term - match rest of mention w/ similarity-based linking\n\t\t\treturn [[mention[-2:].text, 'human papilloma virus infection']] + self.associate_mention2candidate(mention[:-2], labels, sim_thr)\n\t\telif mention[0].text == 'hpv': # 'hpv' as first term - match rest of mention w/ similarity-based linking\n\t\t\treturn [[mention[0].text, 'human papilloma virus infection']] + self.associate_mention2candidate(mention[1:], labels, sim_thr)\n\t\telif mention[-1].text == 'hpv': # 'hpv' as last term - match rest of mention w/ similarity-based linking\n\t\t\treturn [[mention[-1].text, 'human papilloma virus infection']] + self.associate_mention2candidate(mention[:-1], labels, sim_thr)\n\t\telse: # biopsy not BOS or EOS\n\t\t\thpv_idx = [idx for idx, term in enumerate(mention) if 'hpv' in term.text][0] # get 'hpv' mention index \n\t\t\tpre_anatomical_location = [['', '']]\n\t\t\tpost_anatomical_location = [['', '']]\n\t\t\tif mention[:hpv_idx]: # link mention before 'hpv'\n\t\t\t\tpre_anatomical_location = self.associate_mention2candidate(mention[:hpv_idx], labels, sim_thr)\n\t\t\tif mention[hpv_idx+1:]: # link mention after 'hpv'\n\t\t\t\tpost_anatomical_location = self.associate_mention2candidate(mention[hpv_idx+1:], labels, sim_thr)\n\t\t\tif pre_anatomical_location[0][1] and post_anatomical_location[0][1]: # both mentions matched\n\t\t\t\treturn [[mention[hpv_idx].text, 'human papilloma virus infection']] + pre_anatomical_location + post_anatomical_location\n\t\t\telif pre_anatomical_location[0][1]: # only pre mention matched\n\t\t\t\treturn [[mention[hpv_idx].text, 'human papilloma virus infection']] + pre_anatomical_location\n\t\t\telif post_anatomical_location[0][1]: # only post mention matched\n\t\t\t\treturn [[mention[hpv_idx].text, 'human papilloma virus infection']] + post_anatomical_location\n\t\t\telse: # no mention matched - return only 'human papilloma virus infection' concept\n\t\t\t\treturn [[mention[hpv_idx].text, 'human papilloma virus infection']]", "def mk_msg(self, flow):\r\n msg = of.ofp_flow_mod()\r\n msg.command = int(flow.command)\r\n msg.priority = int(flow.priority)\r\n msg.match = of.ofp_match()\r\n if flow.match.dl_type is not None:\r\n msg.match.dl_type = int(flow.match.dl_type)\r\n if flow.match.nw_src is not None:\r\n msg.match.nw_src = IPAddr(flow.match.nw_src)\r\n if flow.match.nw_dst is not None:\r\n msg.match.nw_dst = IPAddr(flow.match.nw_dst)\r\n if flow.match.dl_src is not None:\r\n msg.match.dl_src = EthAddr(flow.match.dl_src)\r\n if flow.match.dl_dst is not None:\r\n msg.match.dl_dst = EthAddr(flow.match.dl_dst)\r\n for outport in flow.actions:\r\n msg.actions.append(of.ofp_action_output(port=int(outport)))\r\n return msg", "def mpm_create_common_photon_uptake(new_model, nr_phases):\n \n new_model.add_metabolites([cobra.Metabolite('Photon_uptake_e_common', name='Photon_uptake_e_common')])\n \n for phase in range(1, nr_phases + 1): # for all phases generate water vapor reaction and then put together\n \n r_select = new_model.reactions.get_by_id('Photon_tx_{p}'.format(p=str(phase).zfill(2)))\n r_select.add_metabolites({'Photon_uptake_e_common': 1})\n\n common_reaction = cobra.Reaction('Photon_uptake_common', name='Photon_uptake_common',subsystem='Balance')\n new_model.add_reaction(common_reaction)\n common_reaction.add_metabolites({'Photon_uptake_e_common': -1})", "def generate_huawei_2g_node_level_discrepancies(self):\n engine = create_engine('postgresql://bodastage:password@database/bts')\n vendor_pk = 2\n tech_pk = 1\n schema_name = 'hua_cm_2g'\n\n conn = psycopg2.connect(\"dbname=bts user=bodastage password=password host=database\")\n conn.autocommit = True\n cur = conn.cursor()\n\n # Get MO\n sql = \"\"\"\n SELECT DISTINCT\n t3.name as mo,\n t3.pk as pk,\n t3.affect_level\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 7 -- BSC\n \"\"\".format(vendor_pk, tech_pk)\n cur.execute(sql)\n mo_list = cur.fetchall()\n\n for mo in mo_list:\n mo_name, mo_pk, mo_affect_level = mo\n\n # Get parameters\n sql = \"\"\"\n SELECT \n t2.name as pname,\n t2.pk as pk\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n INNER JOIN network_entities t4 on t4.pk = t3.affect_level\n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 7 -- BSC\n WHERE\n t3.name = '{}'\n \"\"\".format(vendor_pk, tech_pk, mo_name)\n cur.execute(sql)\n\n parameters = cur.fetchall()\n\n attr_list = [p[0] for p in parameters]\n\n str_param_values = \",\".join([\"t_mo.{0}{1}{0}\".format('\"', p) for p in attr_list])\n str_param_names = \",\".join([\"{0}{1}{0}\".format('\\'', p) for p in attr_list])\n\n # Join all cell level mos with the primary cell mo i.e. GCELL.\n # p_mo for primary MO\n cell_level_join = \"\"\" INNER JOIN {0}.BSCBASIC p_mo ON p_mo.neid = t_mo.neid \n AND p_mo.module_type = t_mo.module_type \"\"\".format(schema_name)\n\n # Add new entries\n sql = \"\"\"\n INSERT INTO network_audit.baseline_node_parameters \n (node, mo, parameter, bvalue, nvalue, vendor, technology, age, modified_by, added_by, date_added, date_modified)\n SELECT TT1.* FROM (\n SELECT\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 1 as age,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value \n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1\n LEFT JOIN network_audit.baseline_node_parameters TT2 on TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT2.node is NULL\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Delete old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.* FROM \n network_audit.baseline_node_parameters TT2\n LEFT JOIN \n (\n select\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT1.node IS NULL\n )\n DELETE FROM network_audit.baseline_node_parameters t1\n WHERE t1.pk IN (SELECT pk from rd)\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Update old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.pk, TT1.* FROM \n network_audit.baseline_node_parameters TT2\n INNER JOIN \n (\n select\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n trim(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n )\n UPDATE network_audit.baseline_node_parameters AS nb\n SET \n date_modified = rd.date_added, \n age=DATE_PART('day',AGE(nb.date_added, rd.date_added))\n FROM \n rd \n where \n rd.pk = nb.pk\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)", "def twist_tup_to_msg(self, tup):\n msg=Twist()\n msg.linear.x = tup[0][0]\n msg.linear.y = tup[0][1]\n msg.linear.z = tup[0][2]\n msg.angular.x = tup[1][0]\n msg.angular.y = tup[1][1]\n msg.angular.z = tup[1][2] \n return msg", "def create_output(self, messages):", "def update(G,CP,h):\n hexpv, hp = f[h]\n #print 'DB10',hp\n # filter new pairs (h,g), g in G\n C = G.copy()\n D = set()\n \n while C:\n # select a pair (h,g) by popping an element from C\n g = C.pop()\n gexpv = f[g][0]\n LCMhg = lcm_expv(hexpv, gexpv)\n \n def lcm_divides(p):\n expv = lcm_expv(hexpv, f[p][0])\n # LCM(LM(h), LM(p)) divides LCM(LM(h),LM(g))\n return monomial_div(LCMhg,expv)\n \n # HT(h) and HT(g) disjoint: hexpv + gexpv == LCMhg\n if monomial_mul(hexpv,gexpv) == LCMhg or (\\\n not any( lcm_divides(f) for f in C ) and \\\n not any( lcm_divides(pr[1]) for pr in D )):\n D.add((h,g))\n\n E = set()\n while D:\n # select h,g from D\n h,g = D.pop()\n gexpv = f[g][0]\n LCMhg = lcm_expv(hexpv, gexpv)\n if not monomial_mul(hexpv,gexpv) == LCMhg:\n E.add((h,g))\n \n # filter old pairs\n B_new = set()\n \n while CP:\n # select g1,g2 from CP\n g1,g2 = CP.pop()\n g1expv = f[g1][0]\n g2expv = f[g2][0]\n LCM12 = lcm_expv(g1expv,g2expv)\n # if HT(h) does not divide lcm(HT(g1),HT(g2))\n if not monomial_div(LCM12, hexpv) or \\\n lcm_expv(g1expv,hexpv) == LCM12 or \\\n lcm_expv(g2expv,hexpv) == LCM12:\n B_new.add((g1,g2))\n \n B_new |= E\n \n # filter polynomials\n G_new = set()\n while G:\n g = G.pop()\n if not monomial_div(f[g][0], hexpv):\n G_new.add(g)\n G_new.add(h)\n \n return G_new,B_new", "def test_homography(h, src_id, trg_id, kps_1, kps_2):\r\n \r\n src = np.array([kps_1[src_id][2], kps_1[src_id][3] , 1]).reshape(3,1)\r\n trg = np.array([kps_2[trg_id][2], kps_2[trg_id][3] , 1]).reshape(3,1)\r\n \r\n projection = np.dot(h, src)\r\n print(\"asource\")\r\n print(src)\r\n print(\"proj\")\r\n print(projection/projection[-1])\r\n print(\"target\")\r\n print(trg)", "def lhco_line(self):\n if not self.check_def(['eta','phi','pt','mass','pid']): \n sys.exit('Particle error: some attribute not defined')\n\n jet=[1,2,3,4,5,6,21]\n inv_list=[12,14,16,18,1000022,1000023,1000024,1000025,1000035]\n\n #define pid-> type\n pid_to_type={11:1,-11:1,13:2,-13:2,15:3,-15:3,22:0}\n for data in jet:\n pid_to_type[data]=4\n pid_to_type[-data]=4\n for data in inv_list:\n pid_to_type[data]=6\n pid_to_type[-data]=6\n\n\n \n type=''\n for key in pid_to_type.keys():\n if self.pid==key:\n type=pid_to_type[key]\n break\n \n if type=='':\n print 'Warning unknown type'\n return ''\n\n text =' '+str(type) #type LHCO\n text+=' '+str(self.eta) #ETA\n text+=' '+str(self.phi) #PHI\n text+=' '+str(self.pt) #PT\n text+=' '+str(self.mass) #JMASS\n if self.pid in [11,13]: #NTRK\n text+=' -1' \n else:\n text+=' 1'\n if self.pid in [-5,5]: #BTAG\n text+=' 2'\n else:\n text+=' 0'\n text+=' 0' #HAD/EM\n text+=' 0' #DUMMY 1\n text+=' 0' #DUMMY 2\n \n return text", "def generate_huawei_2g_cell_level_discrepancies(self):\n engine = create_engine('postgresql://bodastage:password@database/bts')\n vendor_pk = 2\n tech_pk = 1\n schema_name = 'hua_cm_2g'\n\n conn = psycopg2.connect(\"dbname=bts user=bodastage password=password host=database\")\n conn.autocommit = True\n cur = conn.cursor()\n\n # Get MO\n sql = \"\"\"\n SELECT DISTINCT\n t3.name as mo,\n t3.pk as pk,\n t3.affect_level\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 1\n \"\"\".format(vendor_pk, tech_pk)\n cur.execute(sql)\n mo_list = cur.fetchall()\n\n for mo in mo_list:\n mo_name, mo_pk, mo_affect_level = mo\n\n # Get parameters\n sql = \"\"\"\n SELECT \n t2.name as pname,\n t2.pk as pk\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n INNER JOIN network_entities t4 on t4.pk = t3.affect_level\n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n WHERE\n t3.name = '{}'\n \"\"\".format(vendor_pk, tech_pk, mo_name)\n cur.execute(sql)\n\n parameters = cur.fetchall()\n\n attr_list = [p[0] for p in parameters]\n\n str_param_values = \",\".join([\"t_mo.{0}{1}{0}\".format('\"', p) for p in attr_list])\n str_param_names = \",\".join([\"{0}{1}{0}\".format('\\'', p) for p in attr_list])\n\n # Join all cell level mos with the primary cell mo i.e. GCELL\n cell_level_join = \"\"\" INNER JOIN {0}.GCELL gcell ON gcell.\"CELLID\" = t_mo.\"CELLID\" AND gcell.neid = t_mo.neid \n AND gcell.module_type = t_mo.module_type \"\"\".format(schema_name)\n\n # Add new entries\n sql = \"\"\"\n INSERT INTO network_audit.network_baseline \n (node, site, cellname, mo, parameter, bvalue, nvalue, vendor, technology, age, modified_by, added_by, date_added, date_modified)\n SELECT TT1.* FROM (\n SELECT\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 1 as age,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1\n LEFT JOIN network_audit.network_baseline TT2 on TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT2.cellname is NULL\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Delete old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.* FROM \n network_audit.network_baseline TT2\n LEFT JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT1.cellname IS NULL\n )\n DELETE FROM network_audit.network_baseline t1\n WHERE t1.pk IN (SELECT pk from rd)\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Update old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.pk, TT1.* FROM \n network_audit.network_baseline TT2\n INNER JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n trim(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n )\n UPDATE network_audit.network_baseline AS nb\n SET \n date_modified = rd.date_added, \n age=DATE_PART('day',AGE(nb.date_added, rd.date_added))\n FROM \n rd \n where \n rd.pk = nb.pk\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)", "def test_direct_nw_hamming_metric():\n \n seqs = ['CASSLDRGEVFF', # Seq1\n 'CASSLDRGEVFF', # Seq2 = Seq1 i.e., D(s1,s2) = 0\n 'CASSLDAGEVFF', # Seq3 = Seq1 (subsitition A for R) i.e., D(s1,s3) = 1\n 'CASSLRGEVFF'] # Seq4 = Seq1 (Delete D for R) i.e., D(s1,s4) = 1 and D(s3,s4) = 2\n \n result = pw.apply_pairwise_rect(metric = pw.metrics.nw_hamming_metric, seqs1 = seqs, uniqify= False, ncpus=1)\n expectation = np.array([[0, 0, 1, 1],[0, 0, 1, 1],[1, 1, 0, 2],[1, 1, 2, 0]])\n assert np.all(result == expectation )", "def generate_cap_reg_switch_inverter_machine_meas_message_9500():\n\n # Load up the capacitor data.\n caps = pd.read_csv(CAP_MEAS_9500)\n cap_mrids = caps['state_meas_mrid'].tolist()\n # Load up regulator data.\n regs = pd.read_csv(REG_MEAS_9500)\n reg_mrids = regs['pos_meas_mrid'].tolist()\n # Load up switch data.\n switches = pd.read_csv(SWITCH_MEAS_9500)\n switch_mrids = switches['state_meas_mrid'].tolist()\n # Load up inverter data.\n inverters = pd.read_csv(INVERTER_MEAS_9500)\n inverter_mrids = inverters['meas_mrid'].tolist()\n # Load up synchronous machine data.\n synch_mach = pd.read_csv(SYNCH_MACH_MEAS_9500)\n machine_mrids = synch_mach['meas_mrid'].tolist()\n\n # Initialize fn_mrid_list for a SimOutRouter.\n fn_mrid_list = [{'function': _dict_to_json, 'mrids': cap_mrids,\n 'kwargs': {'fname': CAP_MEAS_MSG_9500}},\n {'function': _dict_to_json, 'mrids': reg_mrids,\n 'kwargs': {'fname': REG_MEAS_MSG_9500}},\n {'function': _dict_to_json, 'mrids': switch_mrids,\n 'kwargs': {'fname': SWITCH_MEAS_MSG_9500}},\n {'function': _dict_to_json, 'mrids': inverter_mrids,\n 'kwargs': {'fname': INVERTER_MEAS_MSG_9500}},\n {'function': _dict_to_json, 'mrids': machine_mrids,\n 'kwargs': {'fname': SYNCH_MACH_MEAS_MSG_9500}}\n ]\n\n platform = gridappsd_platform.PlatformManager()\n starttime = datetime(2013, 1, 14, 16, 0)\n sim_id = platform.run_simulation(feeder_id=FEEDER_MRID_9500,\n start_time=starttime,\n duration=5, realtime=False)\n\n # Create a SimOutRouter to save the measurements.\n # noinspection PyUnusedLocal\n router = gridappsd_platform.SimOutRouter(platform_manager=platform,\n sim_id=sim_id,\n fn_mrid_list=fn_mrid_list)\n\n # Wait for simulation completion.\n platform.wait_for_simulation()", "def output_loss_and_grads(self, h, V, c, y):\n\n loss, dh, dV, dc = 0.0, [], np.zeros_like(self.V), np.zeros_like(self.c)\n # calculate the output (o) - unnormalized log probabilities of classes\n # calculate yhat - softmax of the output\n # calculate the cross-entropy loss\n # calculate the derivative of the cross-entropy softmax loss with respect to the output (o)\n # calculate the gradients with respect to the output parameters V and c\n # calculate the gradients with respect to the hidden layer h\n for t in range(self.sequence_length):\n hp = h[:, t, :] # BS x H\n #o = self.output(hp, V, c) # leng x BS\n o = self.output(hp, V, c) # BS x leng\n #exp = np.exp(o) # leng x BS\n exp = np.exp(o) # BS x leng\n #s = exp / np.sum(exp, axis=0, keepdims=True) # leng x BS\n s = exp / np.sum(exp, axis=1, keepdims=True) # BS x leng\n yp = y[:, t, :]\n #dO = s - yp # leng x BS\n dO = s - yp # BS x leng\n #dV += np.dot(dO, hp.T) # ( leng x BS ) * ( H x BS ).T = leng x H\n dV += np.dot(hp.T, dO) # ( BS x H ).T * ( BS x leng ) = H x leng\n #dc += np.sum(dO, axis=1).reshape([-1, 1]) #\n dc += np.sum(dO, axis=0).reshape([1, -1]) #\n #dh.append(np.dot(self.V.T, dO)) # ( leng x H ).T * ( leng x BS ) = ( BS x H )\n dh.append(np.dot(dO, self.V.T)) # ( BS x leng ) * ( H x leng ).T = ( BS x H )\n loss += -np.sum(np.log(s)*yp)\n return loss, np.array(dh), dV, dc", "def _create_msg(self, tr_id, i_triples, i_type, r_triples, r_type, confirm):\n params = SSAP_UPDATE_PARAM_TEMPLATE % (str(i_type).upper(),\n str(i_triples),\n str(r_type).upper(),\n str(r_triples),\n str(confirm).upper())\n tmp = SSAP_MESSAGE_TEMPLATE % (str(self.node_id), str(self.targetSS),\n self.tr_type, str(tr_id), params)\n return tmp", "def megamixSS(self):\n buf=''\n for i in range(self.L):\n if self.secondary['dssp'][i]=='H': buf+='H'\n elif self.secondary['dssp'][i]=='E': buf+='E'\n elif self.secondary['mix'][i]=='E': buf+='E'\n else: buf+='C'\n self.secondary['megamix']=buf\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import all model data using the loader.
def import_data(self): self.models = [] for o in self.loader.load(): klass = self.type_for(o) if hasattr(klass, "from_api"): self.models.append(klass.from_api(o)) else: self.models.append(klass(o)) return self.models
[ "def import_all():\n\n # count the number of files loaded\n count = 0\n\n # get model name\n model_name_list = [model for data_models in settings.OBJECT_DATA_MODELS\n for model in data_models]\n\n model_name_list += [model for model in settings.OTHER_DATA_MODELS]\n\n # import models one by one\n for model_name in model_name_list:\n import_model(model_name)\n\n # import localized strings\n import_localized_strings(settings.LANGUAGE_CODE)", "def load_models(self):\n logger.info('Loading {name} data'.format(name=self.__class__.__name__))\n for type_name, type_ in self.data_dict.iteritems():\n # An exclude for correlations. Isn't created nor has an ID.\n if type_name == \"correlations_main\":\n continue\n task_response = self.do_task(\n self.types[type_name],\n type_['taskId']\n )\n self.data_dict[type_name]['job_id'] = json.loads(\n task_response.content\n )['JobId']\n logger.info(\n 'Load {name} response: '.format(name=type_name) +\n task_response.content\n )\n\n print(\"Loaded model\")", "def _load(self):\n save_name = os.path.join(\"../pickle\",self.name)+\".pkl\"\n with open(save_name,\"rb\") as f:\n models = pk.load(f)\n self.model1 = models[0]\n self.model2 = models[1]\n self.model3 = models[2]\n self.model4 = models[3]\n self.model5 = models[4]\n self.model6 = models[5]\n print(\"[_load] models loaded succesfully\")", "def load(self, *, models: Generator[_Model, None, None]):", "def loadParts(self):\n for i in range(15):\n self.model_parts[i] = loadModel(\"ato_{}.pkl\".format(str(i)))", "def load_models(self):\n print('=====================================================================')\n print('Loading the trained model:')\n self.gen_model = load_model(os.path.join(self.load_model_dir_path, self.gen_model_filename),\n custom_objects={'InstanceNormalization': InstanceNormalization,\n 'generator_loss': self.generator_loss})\n if os.path.isfile(os.path.join(self.load_model_dir_path, self.dis_model_filename)):\n self.dis_model = load_model(os.path.join(self.load_model_dir_path, self.dis_model_filename),\n custom_objects={'InstanceNormalization': InstanceNormalization})\n print(' done!')", "def _load_model(self):\n pass", "def load_data(self):\n self.dataset, self.info = DataLoader().load_data(self.config.data)\n self._preprocess_data()", "def import_all_model_modules():\r\n import brokerage.model\r\n # ensure that these imports don't get auto-deleted! they have side effects.\r\n brokerage.model", "def load_data_models(self, data_model_path):\n # Parse each fields and load data models\n json_list = [\n each for each in os.listdir(data_model_path) if each.endswith(\".json\")\n ]\n for each_json in json_list:\n yield DataModel(\n JSONSchema.parse_data_model(os.path.join(data_model_path, each_json))\n )", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def import_model_from_path(self, model, data_path):\n\n if os.path.exists(data_path):\n file_directory = os.listdir(data_path)\n else:\n logging.error(\"Path {} does not exist.\".format(data_path))\n raise Exception\n\n for file_name in file_directory:\n if file_name[0] != \".\":\n file_path = os.path.join(data_path, file_name)\n document_data = model.data_from_file(file_path)\n document = self.data_dict_to_doc(model, document_data)\n self.insert_document(model, document)\n return", "def _load_models(self, root, paths):\n\n if root:\n self.logger.debug(\"Models root: %s\" % root)\n else:\n self.logger.debug(\"Models root set to current directory\")\n\n for path in paths:\n self.logger.debug(\"Loading model %s\" % path)\n if root and not os.path.isabs(path):\n path = os.path.join(root, path)\n self.add_model(path)", "def init_data_loaders(self, model):\n params = self[\"data_loaders\"]\n data_loader_type = params[\"type\"]\n data_loader_args = params[\"args\"]\n\n data_loader_args[\"label_vocab\"] = model.output_vocab\n\n data_loaders = dict()\n for p in params[\"paths\"]:\n data_loader_args[\"corpus_path\"] = params[\"paths\"][p]\n data_loaders[p] = getattr(data_loaders_module, data_loader_type)(**data_loader_args)\n\n return data_loaders", "def _import_models(self):\n self.logger.debug(\n f'Importing SQLAlchemy models from {self.models_filepath}'\n )\n\n def _import_model_classes_from_file(filepath):\n \"\"\"\n Import the SQLAlchemy models from the Python module at `filepath`\n \"\"\"\n imported_model_classes = []\n mod = import_module_from_file(filepath)\n # NOTE - We cannot use\n # pfb_exporter.utils.import_subclass_from_module here because\n # we are unable to use issubclass to test if the SQLAlchemy model\n # class is a subclass of its parent\n # (sqlalchemy.ext.declarative.api.Base)\n # The best we can do is make sure the class is a SQLAlchemy object\n # and check that the object is a DeclarativeMeta type\n for cls_name, cls_path in inspect.getmembers(mod, inspect.isclass):\n cls = getattr(mod, cls_name)\n try:\n sqla_inspect(cls)\n except NoInspectionAvailable:\n # Not a SQLAlchemy object\n pass\n else:\n if type(cls) == DeclarativeMeta:\n imported_model_classes.append(cls)\n\n return imported_model_classes\n\n if (os.path.isfile(self.models_filepath) and\n os.path.splitext(self.models_filepath)[-1] == '.py'):\n filepaths = [self.models_filepath]\n else:\n filepaths = [\n os.path.join(root, fn)\n for root, dirs, files in os.walk(self.models_filepath)\n for fn in files\n if os.path.splitext(fn)[-1] == '.py'\n ]\n\n self.logger.debug(\n f'Found {len(filepaths)} Python modules:\\n{pformat(filepaths)}'\n )\n # Add the imported modules to a dict\n for fp in filepaths:\n classes = _import_model_classes_from_file(fp)\n for cls in classes:\n self.model_dict[cls.__name__] = cls\n\n self.logger.info(\n f'Imported {len(self.model_dict)} SQLAlchemy models:'\n f'\\n{pformat(list(self.model_dict.keys()))}'\n )", "def __load_data(self):\n print(\"loading training data...\")\n training_data = []\n files = glob('data/*.json')\n for file in files:\n print(\"loading\", file)\n with open(file) as data_file:\n training_data.append(json.load(data_file))\n return training_data", "def _load(self):\n if self.mode == 'local':\n lazy_readers = [open(filename, 'r') for filename in glob.glob(self.path)]\n self.data = itertools.chain(*lazy_readers) # Concatenate multiple generators\n else: # spark\n from pyspark.sql import SparkSession\n spark = SparkSession\\\n .builder\\\n .appName('Data_Parsing')\\\n .getOrCreate()\n sc = spark.sparkContext\n self.data = sc.textFile(self.path)", "def load_data(self):\n loader = DatasetAnnotationLoader(\n is_full=self.is_full,\n data_path=self.data_path,\n cache_path=self.cache_path,\n verbose=self.verbose\n )\n yield {\"train\": loader.load_trainval_data()}\n yield {\"train01\": loader.load_train_data()}\n yield {\"val01\": loader.load_val_data()}\n yield {\"test\": loader.load_test_data()}", "def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test GenBank parsing invalid product line raises ValueError
def test_invalid_product_line_raises_value_error(self): def parse_invalid_product_line(): rec = SeqIO.read(path.join('GenBank', 'invalid_product.gb'), 'genbank') self.assertRaises(ValueError, parse_invalid_product_line)
[ "def test_malformed_line():\n \n header_parser = get_header()\n # Missing position\n variant_line = \"1\\t.\\tA\\tT\\t100\\tPASS\\tMQ=1\\tGT:GQ\\t0/1:60\\t\"\\\n \"0/1:60\\t1/1:60\"\n \n with pytest.raises(SyntaxError):\n variant = format_variant(\n line = variant_line, \n header_parser=header_parser, \n check_info=True\n )", "def testBadLine(self):\n\n self.assertRaises(\n ValueError,\n tools._trackInfo,\n 'not a real line'\n )", "def test_invalid_regref(self, parse_input_mocked_metadata):\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved for register references\"):\n parse_input_mocked_metadata(\"float q0 = 5\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved for register references\"):\n parse_input_mocked_metadata(\"float array q4 =\\n\\t-0.1, 0.2\")", "def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))", "def test_step_with_non_number():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n HISTOGRAM bigfoot STEP \"hello there\" X temperature_mid\n \"\"\"\n\n # TODO make exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_read_line(self):\n\n expected_data = ['\\\"lu, jr\\\"','ming-yuan','\\\"DRUG,1\\\"',135.999,True,3]\n input_string = '001,\\\"LU, JR\\\",MING-YUAN,\\\"DRUG,1\\\",135.999\\n'\n data = read_line(input_string)\n self.assertEqual(expected_data[0],data[0])\n self.assertEqual(expected_data[1],data[1])\n self.assertEqual(expected_data[2],data[2])\n self.assertAlmostEqual(expected_data[3],data[3])\n self.assertEqual(expected_data[4],data[4])\n self.assertAlmostEqual(expected_data[5],data[5])\n\n #Check for odd numers of quotation marks\n input_string = '001,\\\"LU\\\",\\\"MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for missing fields\n input_string = '001,,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for corrupted fields\n input_string = '001x,LU,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,1ag5\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def test_add_parser_quantity_fail(vasp_parser_without_parsing):\n parser, file_path = vasp_parser_without_parsing\n parser.add_parsable_quantity('quantity_with_alternatives', {\n 'inputs': [],\n 'prerequisites': [],\n })\n with pytest.raises(RuntimeError):\n parser.parse(retrieved_temporary_folder=file_path)", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def test_addr_zip_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_addr_zip(val))", "def test_team_reg_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_team_reg(val))", "def test_gender_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_gender(val))", "def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')", "def test_invalid_aggregation():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot X date BY YEAR Y temperature_mid ARGMIN\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_build_output_bank_raise_value_error_invalid_bank(self):\n with pytest.raises(ValueError):\n self.app.rgb_driver.build_output_bank(\"C\")", "def test_split_report_bad_input(self):\n purchase_quantity = 1.0\n buy = transaction_utils.make_transaction(BUY, purchase_quantity, 0, 100.0)\n sell = transaction_utils.make_transaction(SELL, 2.0, 0, 100.0)\n with self.assertRaises(AssertionError):\n basis._split_report(\n buy, purchase_quantity, sell\n ) # Should not split the basis coin, quantity matches", "def test_lsusb_parse_error_generic(self):\n self.assertRaises(ParseError, jc.parsers.lsusb.parse, self.generic_lsusb_t, quiet=True)", "def test_separate_good_bad_data(self):\n self.c = CmdFunction()\n self.c.processor.validator.set_raw_data([\"T109,M,74,861,-,22\"])\n self.c.processor.validator.parse_data()\n self.c.processor.database.add_people(self.c.processor.validator.export_good_data())\n self.assertTrue(self.c.processor.validator.export_good_data() == {})\n self.assertTrue(self.c.processor.validator.export_bad_data() == [\"T109,M,74,861,-,22\"])", "def test_add_parser_quantity_fail(vasp_parser_without_parsing):\n parser, path = vasp_parser_without_parsing\n parser.add_parsable_quantity('quantity_with_alternatives', {\n 'inputs': [],\n 'prerequisites': [],\n })\n with pytest.raises(RuntimeError):\n parser.parse(retrieved_temporary_folder=path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
\b Lists all your published apps. $ 21 publish list Results from the list command are paginated. Use 'n' to move to the next page and 'p' to move to the previous page. You can view detailed admin information about an app by specifying it's id at the prompt.
def list(ctx): # pylint: disable=redefined-builtin _list_apps(ctx.obj['config'], ctx.obj['client'])
[ "def cli_list(ctx):\n try:\n r = api.apps_get()\n pprint(r)\n except ApiException as e:\n print(\"Exception when calling AppsApi->apps_get: %s\\n\", e)", "async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()", "def list_app():\n response = dict(\n status_code=200, message=\"List running applications\", data=[]\n )\n if not submitter.app_list.keys():\n response[\"message\"] = \"There are no running applications\"\n response[\"status_code\"] = 200\n return jsonify(response)\n\n for key, value in submitter.app_list.items():\n # if dryrun:\n # response[\"message\"]=\"Application {} deployed in DRY-RUN mode\".format(key)\n response[\"data\"].append(\n dict(\n type=\"application\",\n id=key,\n outputs=value.get(\"output\"),\n components=value.get(\"components\"),\n dryrun=value.get(\"dry_run\"),\n )\n )\n return jsonify(response)", "def view_all_apps(self):\n return self.db.apps_by_num", "def list(self):\n\n return self._list(self._path(), 'apps')", "def list_apps(self):\n with hide(\"output\", \"running\"):\n result = local((\"redis-cli -h {host} -p 6379 -n {db} keys \\\"*\\\"\"\n .format(host=self.host,\n db=REDIS_APPLICATION_DB_NUM)),\n capture=True)\n\n if len(result.stdout) > 0:\n return result.stdout\n else:\n print(\"Clipper has no applications registered\")\n return \"\"", "def app_list():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n appls_query = Applic.query(ancestor = base_key).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n userid = user.user_id()\n #return userid\n appls_query = Applic.query(Applic.user==userid).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n redirect('/')", "def get_app() -> List:\n\n apps = [] # empty list to hold the command in variables\n if len(sys.argv) > 1:\n apps = sys.argv[1:]\n else:\n print('No application stated')\n return apps # return the list of all app", "def list_apps(request, pk=0):\n context = {'items': [], 'resource_type': 'App'}\n\n if pk == 0:\n context['h2'] = \"Managed Applications\"\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n refresh_managed_software_status()\n apps = MacOSApp.objects.filter(merged_into__isnull=True).reverse()\n if not request.user.has_perm('devices.manage_apps'):\n apps = apps.filter(managed=True).exclude(installed__isnull=True, pending_install__isnull=True)\n for app in apps:\n assignment_count = app.pending_install.count()\n installed_on = app.installed.all()\n data = {'meta': app, 'assignment_count': assignment_count, 'installed': installed_on}\n context['items'].append(data)\n else:\n if not request.user.has_perm('devices.manage_apps'):\n raise PermissionDenied\n\n device = get_object_or_404(Laptop, pk=pk)\n context['h2'] = \"Applications on {}\".format(device.name)\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n context['device_view'] = True\n context['device_id'] = pk\n apps = MacOSApp.objects.filter(pending_install__in=[device])\n apps |= MacOSApp.objects.filter(installed__in=[device])\n for app in apps:\n status = 'Not assigned'\n for entry in app.installed.all():\n if entry == device:\n status = 'Installed'\n for entry in app.pending_install.all():\n if entry == device:\n status = 'Assigned'\n data = {'meta': app, 'status': status}\n context['items'].append(data)\n\n return render(request, 'mdm/resource_list.html', context)", "def apps():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Applications', level=1)\r\n apps = get_qlik_sense.get_apps()\r\n num_of_apps = len(apps)\r\n table = document.add_table(rows=num_of_apps+1, cols=7)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'App name'\r\n row.cells[1].text = 'App description'\r\n row.cells[2].text = 'Publish time'\r\n row.cells[3].text = 'Stream'\r\n row.cells[4].text = 'File size'\r\n row.cells[5].text = 'Owner userId'\r\n row.cells[6].text = 'Owner userName'\r\n for app in range(num_of_apps):\r\n row = table.rows[app+1]\r\n row.cells[0].text = str(apps[app][0])\r\n row.cells[1].text = str(apps[app][1])\r\n row.cells[2].text = str(apps[app][2])\r\n row.cells[3].text = str(apps[app][3])\r\n row.cells[4].text = str(apps[app][4])\r\n row.cells[5].text = str(apps[app][5])\r\n row.cells[6].text = str(apps[app][6])\r\n document.add_page_break()", "def app_list(request):\n\n apps = App.objects.all()\n context = {\"app_list\": apps}\n \n return render(request, 'home.html', context)", "async def app_list(self) -> Mapping[str, Any]:\n return await self._send_command(\"FetchLaunchableApplicationsEvent\", {})", "def listapps(parser):\n\n print('Function List')\n subparsers_actions = [\n # pylint: disable=protected-access\n action for action in parser._actions\n # pylint: disable=W0212\n if isinstance(action, argparse._SubParsersAction)]\n # there will probably only be one subparser_action,\n # but better safe than sorry\n for subparsers_action in subparsers_actions:\n # get all subparsers and print help\n for choice, subparser in subparsers_action.choices.items():\n print(\"Function: '{}'\".format(choice))\n print(subparser.format_help())\n # print(parser.format_help())", "def _get_apps(self):\n return self.api.get('/v2/apps')", "def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))", "def list_apps( state, active_threads ):\n\n command_str = \"\"\"\n Active Apps:\n \"\"\"\n\n for i in range(0, len(active_threads)):\n app_name = active_threads[i].name\n command_str += '%s: %s\\n' % (i+1, app_name)\n\n\n command_str = format_cmd_prompt(command_str)\n\n print( command_str)\n\n # set the state to return to the main menu\n state = 0\n\n return state", "def list_applications(self):\n print self.applications.keys()\n return list(self.applications.keys())", "def listApplications(self, pattern=\"*\"):\n raise dbsException.MethodNotImplemented(args=\"This method should be overridden in the derived DBS API class.\")", "def get_public_apps(self):\n # https://api.relayr.io/apps\n url = '{0}/apps'.format(self.host)\n _, data = self.perform_request('GET', url, headers=self.headers)\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
\b Removes a published app from the Marketplace. $ 21 publish remove [yes] {app_id} \b Removes all published apps from the Marketplace. $ 21 publish remove [yes] all \b
def remove(ctx, app_id, all, assume_yes): if all and not app_id: for _app_id in _get_all_app_ids(ctx.obj['config'], ctx.obj['client']): _delete_app(ctx.obj['config'], ctx.obj['client'], _app_id, assume_yes) elif app_id and not all: _delete_app(ctx.obj['config'], ctx.obj['client'], app_id, assume_yes) else: logger.info(ctx.command.get_help(ctx)) sys.exit(1)
[ "def remove():\n run('pew rm {0}'.format(package_name()))", "def app_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete app /app.name:\"{1}\"'.format(self.APP_CMD, name))", "def remove_app(self):\n \n pass", "def delete_app(AppId=None):\n pass", "def del_editable_app(self, request):\n app = request.GET.get(\"app\", \"\")\n if not app:\n return Response(\"ok\")\n\n obj = AccessManagerConfig.objects.get(type=\"editable\")\n apps = obj.names\n try:\n apps = apps.split(\",\")\n apps = set(apps)\n apps.remove(app)\n obj.names = \",\".join(apps)\n obj.save()\n return Response(apps)\n except Exception as e:\n return Response(str(e))", "def delete_app(self, name):\n raise NotImplementedError", "def remove_hero(apps, schema_editor):\n pass", "def delete_feed(self,reg,feed): \n p = get_podcasts_region(reg)\n \n if p.exists(feed,4):\n click(p.getLastMatch())\n type(Key.DELETE)\n remove_confirm(self,reg,\"remove\")", "async def remove_apps(token: str):\n async with aiohttp.ClientSession() as session:\n api = SmartThings(session, token)\n\n apps = await api.apps()\n installed_apps = await api.installed_apps()\n\n for app in apps:\n if not app.app_name.startswith('homeassistant.'):\n continue\n # Remove installed apps first\n for installed_app in installed_apps:\n if installed_app.app_id == app.app_id:\n await api.delete_installed_app(\n installed_app.installed_app_id)\n print(\"Removed installed app '{}' ({})\".format(\n installed_app.display_name,\n installed_app.installed_app_id))\n # Remove the app itself\n await api.delete_app(app.app_id)\n print(\"Removed app '{}' ({})\".format(app.app_name, app.app_id))", "def delete(self, application_id):", "def test_remove(self):\n result = self.env.run('phonebook ' + \\\n ('remove \"Mary Anderson\" ') + \\\n ('-b %s/phonebook_fixture.pb' % self.prefix))\n expected_output = \"Removed Mary Anderson from %s/phonebook_fixture.pb.\" % self.prefix\n nose.tools.assert_in(expected_output, result.stdout)\n self.assert_not_added([\"Mary Anderson\"])", "def example_untag1():\n\n # Define app_id and secret\n my_app_id = 'my_app_id'\n my_secret = 'my_secret'\n # Create a Pushbots instance\n pushbots = Pushbots(app_id=my_app_id, secret=my_secret)\n # Define alias, tag and platform\n alias = 'alias_to_remove_tag'\n tag = 'tag_to_be_removed'\n platform = Pushbots.PLATFORM_ANDROID\n code, message = pushbots.untag(platform=platform, alias=alias, tag=tag)\n print('Returned code: {0}'.format(code))\n print('Returned message: {0}'.format(message))", "def remove_app(self, app_name):\n self.remove_list_setting('applications', 'installed_apps',\n app_name)", "def remove_actions(self, arg):\n req = req_from_anything(arg)\n assert req.name\n index = dict(self.ec.collections[0].query(**req.as_dict()))\n if len(index) == 0:\n raise EnpkgError(\"package %s not installed in: %r\" %\n (req, self.prefixes[0]))\n if len(index) > 1:\n assert self.hook\n versions = ['%(version)s-%(build)d' % d\n for d in index.itervalues()]\n raise EnpkgError(\"package %s installed more than once: %s\" %\n (req.name, ', '.join(versions)))\n return [('remove', index.keys()[0])]", "def delete_robot_application(application=None, applicationVersion=None):\n pass", "def _monit_remove():\n print(\"removing Monit\")", "def test_unpublish_app_from_a_project(default_domino_client):\n response = default_domino_client.app_unpublish()\n assert response.status_code == 200, f\"{response.status_code}: {response.reason}\"", "def uninstallWorkflows(self, package, out):\n\n ##code-section workflow-uninstall #fill in your manual code here\n ##/code-section workflow-uninstall\n\n pass", "def remove(self, apps, groups=None):\n if not apps:\n self.log.error(\"nothing to remove\")\n return\n\n if isinstance(apps, (str, unicode)):\n # not pythonic, but better than adding each chr of the string\n apps = (apps,)\n elif isinstance(apps, AppList):\n # not pythonic, but better than adding each chr of the string\n apps = apps.names\n\n if not groups:\n groups = self.groups()\n\n self.log.debug(u\"removing: %r from %r\", apps, groups)\n for group in groups:\n current = set(self.config.get(group, []))\n self.log.debug(\"current: %r\", current)\n modified = current - set(apps)\n self.log.debug(\"modified: %r\", modified)\n self.config.update({group: list(modified)})\n self._record = self.config.read()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
\b Publishes an app to 21 Marketplace. $ 21 publish submit path_to_manifest/manifest.yaml The contents of the manifest file should follow the guidelines specified at
def submit(ctx, manifest_path, marketplace, skip, parameters): if parameters is not None: try: parameters = _parse_parameters(parameters) except: logger.error( "Manifest parameter overrides should be in the form 'key1=\"value1\" " "key2=\"value2\".", fg="red") return _publish(ctx.obj['client'], manifest_path, marketplace, skip, parameters)
[ "def _publish(client, manifest_path, marketplace, skip, overrides):\n try:\n manifest_json = check_app_manifest(manifest_path, overrides, marketplace)\n app_url = \"{}://{}\".format(manifest_json[\"schemes\"][0], manifest_json[\"host\"])\n app_ip = urlparse(app_url).hostname\n\n if not skip:\n address = get_zerotier_address(marketplace)\n\n if address != app_ip:\n wrong_ip = click.style(\"It seems that the IP address that you put in your manifest file (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\") is different than your current 21market IP (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\")\\nAre you sure you want to continue publishing with \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\"?\")\n if not click.confirm(wrong_ip.format(app_ip, address, app_ip)):\n switch_host = click.style(\"Please edit \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" and replace \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" with \") +\\\n click.style(\"[{}].\", bold=True)\n logger.info(switch_host.format(manifest_path, app_ip, address))\n return\n\n except exceptions.ValidationError as ex:\n # catches and re-raises the same exception to enhance the error message\n publish_docs_url = click.style(\"https://21.co/learn/21-publish/\", bold=True)\n publish_instructions = \"For instructions on publishing your app, please refer to {}\".format(publish_docs_url)\n raise exceptions.ValidationError(\n \"The following error occurred while reading your manifest file at {}:\\n{}\\n\\n{}\"\n .format(manifest_path, ex.args[0], publish_instructions),\n json=ex.json)\n\n app_name = manifest_json[\"info\"][\"title\"]\n app_endpoint = \"{}://{}{}\".format(manifest_json[\"schemes\"][0],\n manifest_json[\"host\"],\n manifest_json[\"basePath\"])\n\n logger.info(\n (click.style(\"Publishing {} at \") + click.style(\"{}\", bold=True) + click.style(\" to {}.\"))\n .format(app_name, app_endpoint, marketplace))\n payload = {\"manifest\": manifest_json, \"marketplace\": marketplace}\n try:\n response = client.publish(payload)\n except ServerRequestError as e:\n if e.status_code == 403 and e.data.get(\"error\") == \"TO600\":\n logger.info(\n \"The endpoint {} specified in your manifest has already been registered in \"\n \"the marketplace by another user.\\nPlease check your manifest file and make \"\n \"sure your 'host' field is correct.\\nIf the problem persists please contact \"\n \"support@21.co.\".format(app_endpoint), fg=\"red\")\n return\n else:\n raise e\n\n if response.status_code == 201:\n response_data = response.json()\n mkt_url = response_data['mkt_url']\n permalink = response_data['permalink']\n logger.info(\n click.style(\n \"\\n\"\n \"You have successfully published {} to {}. \"\n \"You should be able to view the listing within a few minutes at {}\\n\\n\"\n \"Users will be able to purchase it, using 21 buy, at {} \",\n fg=\"magenta\")\n .format(app_name, marketplace, permalink, mkt_url)\n )", "def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n sys.exit()", "def deploy():\n docs()\n cmd(\"google_appengine/appcfg.py update app\")", "def deploy():\n cmd(\"google_appengine/appcfg.py update app\")", "def publish_manifest(ctx, name, tag, image, signed_push=False):\n manifest_spec = {\"image\": \"{}:{}\".format(name, tag)}\n src_images = []\n\n for img in image:\n img_splitted = img.replace(' ', '').split(',')\n if len(img_splitted) != 2:\n print(\"Impossible to parse source format for: '{}'\".format(img))\n raise Exit(code=1)\n\n platform_splitted = img_splitted[1].split('/')\n if len(platform_splitted) != 2:\n print(\"Impossible to parse platform format for: '{}'\".format(img))\n raise Exit(code=1)\n\n src_images.append(\n {\"image\": img_splitted[0], \"platform\": {\"architecture\": platform_splitted[1], \"os\": platform_splitted[0]}}\n )\n manifest_spec[\"manifests\"] = src_images\n\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:\n temp_file_path = f.name\n yaml.dump(manifest_spec, f, default_flow_style=False)\n\n print(\"Using temp file: {}\".format(temp_file_path))\n ctx.run(\"cat {}\".format(temp_file_path))\n\n try:\n result = retry_run(ctx, \"manifest-tool push from-spec {}\".format(temp_file_path))\n if result.stdout:\n out = result.stdout.split('\\n')[0]\n fields = out.split(\" \")\n\n if len(fields) != 3:\n print(\"Unexpected output when invoking manifest-tool\")\n raise Exit(code=1)\n\n digest_fields = fields[1].split(\":\")\n\n if len(digest_fields) != 2 or digest_fields[0] != \"sha256\":\n print(\"Unexpected digest format in manifest-tool output\")\n raise Exit(code=1)\n\n digest = digest_fields[1]\n length = fields[2]\n\n if signed_push:\n cmd = \"\"\"\n notary -s https://notary.docker.io -d {home}/.docker/trust addhash \\\n -p docker.io/{name} {tag} {length} --sha256 {sha256} \\\n -r targets/releases\n \"\"\"\n retry_run(ctx, cmd.format(home=os.path.expanduser(\"~\"), name=name, tag=tag, length=length, sha256=digest))\n finally:\n os.remove(temp_file_path)", "def deploy(app, message, hard):\n cli.user()\n\n payload = test.compile_app(app, False) # Also adds a spinner.\n\n if payload is None:\n sys.exit(1) # Error already printed by compile_app.\n\n num_stories = len(payload['stories'])\n if num_stories == 0:\n click.echo(\n click.style('No stories were found for your app.', fg='red'),\n err=True\n )\n click.echo()\n click.echo('You can write an example story using:', err=True)\n cli.print_command('story template http > http.story')\n\n sys.exit(1)\n\n echo_with_tick(f\"Compiled {num_stories} {pluralise('story', num_stories)}\")\n click.echo()\n\n click.echo(click.style(f'Deploying app {app}... ', bold=True), nl=False)\n\n with spinner():\n config = Config.get(app)\n release = Releases.create(config, payload, app, message, hard)\n\n url = f'https://{app}.storyscriptapp.com/'\n click.echo()\n echo_with_tick(f'Version {release[\"id\"]} of your app'\n ' has been queued for deployment.')\n click.echo()\n\n click.echo(click.style('Waiting for deployment to complete… ',\n bold=True), nl=False)\n with spinner():\n if Apps.maintenance(app, maintenance=None):\n click.echo()\n click.echo()\n click.echo(\n 'Your app is in maintenance mode.\\n'\n 'Run the following to turn off it off:'\n )\n cli.print_command('story maintenance off')\n click.echo()\n click.echo(\n 'Once maintenance mode is turned off, '\n 'your app will be deployed immediately.'\n )\n return\n\n state = 'QUEUED'\n while state in ['DEPLOYING', 'QUEUED']:\n state = Releases.get(app)[0]['state']\n time.sleep(0.5)\n\n click.echo()\n if state == 'DEPLOYED':\n echo_with_tick(f'Configured '\n f\"{num_stories} {pluralise('story', num_stories)}\")\n for s in payload['stories']:\n click.echo(' - ' + s)\n\n num_services = len(payload['services'])\n echo_with_tick(f'Deployed '\n f\"{num_services} {pluralise('service', num_services)}\")\n for s in payload['services']:\n click.echo(' - ' + s)\n\n echo_with_tick(f'Created ingress route')\n echo_with_tick('Configured logging')\n echo_with_tick('Configured health checks')\n echo_with_tick('Deployment successful!')\n click.echo()\n click.echo('To see your app\\'s logs, please run:\\n story logs -f')\n click.echo()\n click.echo(\n f'If your Story responds to HTTP requests, please visit:\\n {url}'\n )\n elif state == 'FAILED':\n click.echo(\n click.style('X', fg='red') + ' Deployment failed!', err=True\n )\n click.echo(\n 'Please use the following command to view your app\\'s logs:',\n err=True,\n )\n cli.print_command('story logs')\n elif state == 'TEMP_DEPLOYMENT_FAILURE':\n click.echo(\n click.style('X', fg='red') + ' Deployment failed!', err=True\n )\n click.echo(\n 'An internal error occurred.\\n'\n 'The Storyscript team has been notified.\\n'\n 'Please visit https://status.storyscript.io/ '\n 'for incident reports and updates.',\n err=True,\n )\n else:\n click.echo(\n f'An unhandled state of your app has been encountered - {state}',\n err=True,\n )\n click.echo(f'Please shoot an email to support@storyscript.io')", "def apk(config, name, version, groups):\n for group in groups:\n if config.verbose:\n click.echo('Deploying {}:{}...'.format(name, version))\n if not config.mason.deploy(\"apk\", name, version, group, config.push):\n exit('Unable to deploy item')", "def _publish():\n modlog.info(\"publishing release...\")\n\n try:\n result = subprocess.check_output([\"python\", \"setup.py\", \"bdist_wheel\", \"upload\"],\n stderr=subprocess.STDOUT, universal_newlines=True)\n except subprocess.CalledProcessError as err:\n modlog.error(\"Failed to publish new PyJen release ({0})\".format(err.returncode))\n modlog.error(err.output)\n exit(1)\n modlog.debug(result)\n\n # todo: after the publish completes, auto-update the version number\n # todo: lay tag on release\n modlog.info(\"release published successfully\")", "def main(owner: str, repository: str, token: str, tag: Optional[str]) -> None:\n if tag is None:\n today = datetime.date.today()\n tag = f\"{today:%Y.%-m.%-d}\"\n\n try:\n publish_release(\n owner=owner,\n repository_name=repository,\n token=token,\n tag=tag,\n )\n except Exception as error:\n click.secho(f\"error: {error}\", fg=\"red\")\n sys.exit(1)", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def _publish_app(self, app: AppConfig, channel: str, **options) -> dict | None:\n # TODO: Verify the app has been packaged\n state = None\n self.verify_app(app)\n\n state = self.publish_app(app, channel=channel, **full_options(state, options))\n\n return state", "def pub_deploy(args, project=\"\", account=\"\", api_key=\"\"):\n base_url, api_key, updated = get_project_connect(\n 'djaodjin',\n base_url=DEFAULT_API_ENDPOINT,\n api_key=api_key)\n project, account, updated = get_project_account(\n project=project, account=account)\n if updated:\n save_config()\n\n api_container_url = \\\n \"%(base_url)s/api/containers/%(organization)s/apps/%(app)s/\" % {\n 'base_url': base_url,\n 'organization': str(account),\n 'app': str(project)}\n data = None\n container_location = args[0] if args else None\n if container_location:\n data = {'location': container_location}\n resp = requests.post(api_container_url, data=data, auth=(api_key, \"\"))\n LOGGER.info(\"POST %s returns %d %s\",\n api_container_url, resp.status_code, resp.text)", "def publish(session):\n print(\"REMINDER: Has the changelog been updated?\")\n session.run(\"rm\", \"-rf\", \"dist\", \"build\", external=True)\n publish_deps = [\"setuptools\", \"wheel\", \"twine\"]\n session.install(*publish_deps)\n session.run(\"make\", \"build_frontend\", external=True)\n session.run(\"python\", \"setup.py\", \"--quiet\", \"sdist\", \"bdist_wheel\")\n session.run(\"python\", \"-m\", \"twine\", \"upload\", \"dist/*\")\n publish_docs(session)\n publish_static_webapp(session)", "def deploy():\n print('Deploying to pypi...')\n subprocess.check_call(['bash', 'tools/deploy.sh'])\n print('Done!')\n\n print('Deploying to npm...')\n otp = input('Please provide 2FA OTP for NPM: ')\n subprocess.check_call(['npm', 'publish', '--otp', otp])\n print('Done!')", "def deploy():\n local('appcfg.py --no_cookies --email=mccutchen@gmail.com update .',\n capture=False)", "def test_publish_app_from_a_project(default_domino_client):\n response = default_domino_client.app_publish()\n assert response.status_code == 200, f\"{response.status_code}: {response.reason}\"", "def deploy(fingerengine, fingerprint):\r\n\r\n war_file = abspath(fingerengine.options.deploy)\r\n war_name = parse_war_path(war_file)\r\n war_raw = war_file.rsplit('/', 1)[1]\r\n utility.Msg(\"Preparing to deploy {0}...\".format(war_file))\r\n\r\n base = \"http://{0}:{1}/management\".format(fingerengine.options.ip,\r\n fingerprint.port)\r\n add_content = \"/add-content\"\r\n association = '{{\"address\":[{{\"deployment\":\"{0}\"}}],\"operation\":\"add\",'\\\r\n '\"runtime-name\":\"{2}\",\"content\":[{{\"hash\":{{\"BYTES_VALUE\"'\\\r\n ':\"{1}\"}}}}],\"name\":\"{0}\"}}'\r\n deploy = '{{\"operation\":\"deploy\", \"address\":{{\"deployment\":\"{0}\"}}}}'\r\n headers = {\"Content-Type\":\"application/json\"}\r\n\r\n try:\r\n fwar = {war_file : open(war_file, \"r\").read()}\r\n except:\r\n utility.Msg(\"Failed to open WAR (%s)\" % war_file, LOG.ERROR)\r\n return\r\n\r\n # first we POST the WAR to add-content\r\n response = utility.requests_post(base + add_content, files=fwar)\r\n if response.status_code == 401:\r\n response = redo_auth(fingerengine, fingerprint, base + add_content, \r\n files=fwar)\r\n\r\n if response.status_code != 200:\r\n utility.Msg(\"Failed to POST data (HTTP %d)\" % response.status_code, LOG.ERROR)\r\n return\r\n\r\n # fetch our BYTES_VALUE\r\n if response.json()['outcome'] != 'success':\r\n utility.Msg(\"Failed to POST data\", LOG.ERROR)\r\n utility.Msg(response.json(), LOG.DEBUG)\r\n return\r\n\r\n BYTES_VALUE = response.json()['result']['BYTES_VALUE']\r\n\r\n # now we need to associate the bytes with a name\r\n response = utility.requests_post(base, \r\n data=association.format(war_name, BYTES_VALUE, war_raw),\r\n headers=headers)\r\n\r\n if response.status_code == 401:\r\n response = redo_auth(fingerengine, fingerprint, base,\r\n data=association.format(war_name, BYTES_VALUE, war_raw),\r\n headers=headers)\r\n\r\n if response.status_code != 200:\r\n utility.Msg(\"Failed to associate content (HTTP %d)\" % response.status_code, LOG.ERROR)\r\n utility.Msg(response.content, LOG.DEBUG)\r\n return\r\n\r\n # now enable the WAR\r\n deploy = deploy.format(war_name)\r\n\r\n response = utility.requests_post(base, data=deploy, headers=headers)\r\n if response.status_code == 401:\r\n response = redo_auth(fingerengine, fingerprint, base, data=deploy,\r\n headers=headers)\r\n \r\n if response.status_code != 200:\r\n utility.Msg(\"Failed to enable WAR (HTTP %d)\" % response.status_code, LOG.ERROR)\r\n utility.Msg(response.content, LOG.DEBUG)\r\n return\r\n\r\n utility.Msg(\"%s deployed to %s.\" % (war_file, fingerengine.options.ip), \r\n LOG.SUCCESS)", "def test_publish_deployment_run(self):\n pass", "def publish(self, consumer_name, version, pact_dir=None,\n tag_with_git_branch=None, consumer_tags=None, branch=None, build_url=None, auto_detect_version_properties=None):\n if self.broker_base_url is None \\\n and \"PACT_BROKER_BASE_URL\" not in os.environ:\n raise RuntimeError(\"No pact broker URL specified. \"\n + \"Did you expect the PACT_BROKER_BASE_URL \"\n + \"environment variable to be set?\")\n\n pact_files = fnmatch.filter(\n os.listdir(pact_dir),\n self._normalize_consumer_name(consumer_name) + '*.json'\n )\n pact_files = list(map(lambda pact_file: f'{pact_dir}/{pact_file}', pact_files))\n command = [\n BROKER_CLIENT_PATH,\n 'publish',\n '--consumer-app-version={}'.format(version)]\n\n command.append('--broker-base-url={}'.format(self._get_broker_base_url()))\n\n if self.broker_username is not None:\n command.append('--broker-username={}'.format(self.broker_username))\n if self.broker_password is not None:\n command.append('--broker-password={}'.format(self.broker_password))\n if self.broker_token is not None:\n command.append('--broker-token={}'.format(self.broker_token))\n\n command.extend(pact_files)\n\n if tag_with_git_branch:\n command.append('--tag-with-git-branch')\n\n if consumer_tags is not None:\n for tag in consumer_tags:\n command.extend(['-t', tag])\n\n if branch:\n command.extend(['--branch={}'.format(branch)])\n\n if build_url:\n command.extend(['--build-url={}'.format(build_url)])\n\n if auto_detect_version_properties is True:\n command.append('--auto-detect-version-properties')\n\n log.debug(f\"PactBroker publish command: {command}\")\n\n publish_process = Popen(command)\n publish_process.wait()\n if publish_process.returncode != 0:\n url = self._get_broker_base_url()\n raise RuntimeError(\n f\"There was an error while publishing to the pact broker at {url}.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses parameters string and returns a dict of overrides. This function assumes that parameters string is in the form of '"key1="value1" key2="value2"'. Use of single quotes is optional but is helpful for strings that contain spaces.
def _parse_parameters(parameters): if not re.match(r'^(\w+)="([^=]+)"(\s{1}(\w+)="([^=]+)")*$', parameters): raise ValueError # first we add tokens that separate key/value pairs. # in case of key='ss sss ss', we skip tokenizing when we se the first single quote # and resume when we see the second replace_space = True tokenized = "" for c in parameters: if c == '\"': replace_space = not replace_space elif c == ' ' and replace_space: tokenized += "$$" else: tokenized += c # now get the tokens tokens = tokenized.split('$$') result = {} for token in tokens: # separate key/values key_value = token.split("=") result[key_value[0]] = key_value[1] return result
[ "def parse_input_args(input_str: str):\n output_dict = {}\n if not input_str:\n raise ValueError(\"Empty input string: {}\".format(input_str))\n\n key_pairs: list = input_str.split(\",\")\n\n key_pairs = [x.strip() for x in key_pairs]\n\n if not key_pairs:\n raise ValueError(\"Incorrect format: {}\".format(input_str))\n\n for each_key in key_pairs:\n try:\n key, value = each_key.split(\"=\")\n except ValueError as value_error:\n raise ValueError(\"Expected input format \"\n \"'key1=value1, key2=value2' \"\n \"but received {}\".format(input_str)) \\\n from value_error\n if value.isdigit():\n value = int(value)\n output_dict[key] = value\n\n return output_dict", "def parse_parameters(self, input_string):\n parameters = {}\n p1 = input_string.find('?')\n if p1 >= 0:\n split_parameters = input_string[p1 + 1:].split('&')\n for name_value_pair in split_parameters:\n # xbmc.log(\"parseParameter detected Value: \" + str(name_value_pair))\n if (len(name_value_pair) > 0) & ('=' in name_value_pair):\n pair = name_value_pair.split('=')\n key = pair[0]\n value = self.decode(unquote_plus(pair[1]))\n parameters[key] = value\n return parameters", "def _parse_params( self ):\n paramDic={}\n # Parameters are on the 3rd arg passed to the script\n paramStr=sys.argv[2]\n print paramStr\n if len(paramStr)>1:\n paramStr = paramStr.replace('?','')\n \n # Ignore last char if it is a '/'\n if (paramStr[len(paramStr)-1]=='/'):\n paramStr=paramStr[0:len(paramStr)-2]\n \n # Processing each parameter splited on '&' \n for param in paramStr.split(\"&\"):\n try:\n # Spliting couple key/value\n key,value=param.split(\"=\")\n except:\n key=param\n value=\"\"\n \n key = urllib.unquote_plus(key)\n value = urllib.unquote_plus(value)\n \n # Filling dictionnary\n paramDic[key]=value\n print paramDic\n return paramDic", "def parse_key_value_pairs(arg_string):\n try:\n return {key: value for (key, value) in [tuple(str(arg).split('=', 1)) for arg in arg_string]}\n except ValueError:\n raise click.ClickException(\"argument string must be in the form x=y\")", "def _parse_config_args(args):\r\n config_dict = dict()\r\n for config_str in args:\r\n try:\r\n components = config_str.split('=')\r\n if len(components) >= 2:\r\n config_dict[components[0]] = \"=\".join(components[1:])\r\n\r\n except:\r\n print \"Warning: could not interpret config value '{0}'\".format(config_str)\r\n pass\r\n\r\n return config_dict", "def parse_params(params):\n pairs = params.split(' ')\n content = dict()\n for key, value in [pair.split('=') for pair in pairs]:\n content[key] = int(value)\n return content", "def __parse_options_dict(options_str):\n # type: (str) -> Dict[str, str]\n opts = options_str.split('&') # type: List[str]\n res = {} # Type: Dict\n\n for opt in opts:\n key, value = opt.split('=') # type: List[str, str]\n res[key] = value # type: str\n\n return res", "def string_to_dict(string):\n if string:\n pairs = [s.strip() for s in string.split(\",\")]\n return dict(pair.split(\"=\") for pair in pairs)", "def parse_function_params(params: Text) -> Dict:\n function_meta = {\"args\": [], \"kwargs\": {}}\n\n params_str = params.strip()\n if params_str == \"\":\n return function_meta\n\n args_list = params_str.split(\",\")\n for arg in args_list:\n arg = arg.strip()\n if \"=\" in arg:\n key, value = arg.split(\"=\")\n function_meta[\"kwargs\"][key.strip()] = parse_string_value(value.strip())\n else:\n function_meta[\"args\"].append(parse_string_value(arg))\n\n return function_meta", "def split_params(param_string):\n\t#TODO: check for negatives i.e. alpha--1\n\tparts = param_string.split('_')\n\tparams = {}\n\n\tfor i in range(len(parts)):\n\t\tparam = split_items(parts[i])\n\t\tif len(param) < 2:\n\t\t\ttry:\n\t\t\t\tparts[i+1] = parts[i] + \"_\" + parts[i+1]\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tcontinue\n\t\telif len(param) == 2:\n\t\t\tparams[param[0]] = param[1]\n\t\telif len(param) == 3 and len(param[1]) == 0:\n\t\t\tparams[param[0]] = -param[2]\n\t\telse:\n\t\t\tparams[param[0]] = param[1:]\n\treturn params", "def parse_param(seed):\n dict_out = {}\n # Compile seperator\n spliter = re.compile(r\"[ :=]+\")\n with open(seed + \".param\") as seedfile:\n for line in seedfile:\n if \"#\" in line:\n continue\n # Forcing lower case\n pair = line.strip()\n # Split and filter out empty strings\n pair = list(filter(None, spliter.split(pair, 1)))\n if pair:\n dict_out.update({pair[0].lower(): pair[1]})\n return dict_out", "def _prepare_param_dict_from_filter(filterstr: str) -> Dict[str, str]:\n get_params = {}\n for filter_param in re.split(r\"[\\?\\&]+\", filterstr):\n if filter_param:\n attr = filter_param.split(\"=\")[0]\n val = filter_param.split(\"=\")[1]\n get_params[attr] = val\n return get_params", "def parse_key_value_string(kv_str: str) -> t.Dict:\n lexer = shlex(kv_str, posix=True)\n lexer.whitespace = \",\"\n lexer.wordchars += \"=\"\n return dict(word.split(sep=\"=\", maxsplit=1) for word in lexer)", "def _define_params_from_string(self, params_string):\n params_list = params_string.split()\n self.token_name = params_list[0]\n self.df = params_list[1]\n self.tf = params_list[2]\n self.doc_dict = self._create_doc_dict_from_string(params_string[3:])", "def parse_attributes( string ):\n result = {}\n parts = string.split( \";\" )\n for part in parts:\n keyValue = part.split( \"=\" )\n result[keyValue[0]] = keyValue[1]\n return result", "def parse_config_string(config_string, issue_warnings=True):\n config_dict = {}\n my_splitter = shlex.shlex(config_string, posix=True)\n my_splitter.whitespace = ','\n my_splitter.whitespace_split = True\n for kv_pair in my_splitter:\n kv_pair = kv_pair.strip()\n if not kv_pair:\n continue\n kv_tuple = kv_pair.split('=', 1)\n if len(kv_tuple) == 1:\n if issue_warnings:\n MsafConfigWarning.warn(\n (\"Config key '%s' has no value, ignoring it\" %\n kv_tuple[0]), stacklevel=1)\n else:\n k, v = kv_tuple\n # subsequent values for k will override earlier ones\n config_dict[k] = v\n return config_dict", "def parse_query_string(query_string: str) -> dict:\n _query_dict = {\n _each.split('=')[0]: _each.split('=')[1] for _each in query_string.split('&') if\n _each and len(_each.split('=')) > 1\n }\n return _query_dict", "def _parseOptions(self, optionsString):\n\n options = dict()\n pairs = optionsString.split(\";\")\n for pair in pairs:\n if not pair or \"=\" not in pair:\n continue\n\n key, value = pair.split(\"=\")\n options[key] = int(value)\n\n return options", "def _parse_args(argv):\n result = {}\n for arg in argv:\n k, v = arg.split(\"=\")\n result[k] = v\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Publishes application by uploading the manifest to the given marketplace
def _publish(client, manifest_path, marketplace, skip, overrides): try: manifest_json = check_app_manifest(manifest_path, overrides, marketplace) app_url = "{}://{}".format(manifest_json["schemes"][0], manifest_json["host"]) app_ip = urlparse(app_url).hostname if not skip: address = get_zerotier_address(marketplace) if address != app_ip: wrong_ip = click.style("It seems that the IP address that you put in your manifest file (") +\ click.style("{}", bold=True) +\ click.style(") is different than your current 21market IP (") +\ click.style("{}", bold=True) +\ click.style(")\nAre you sure you want to continue publishing with ") +\ click.style("{}", bold=True) +\ click.style("?") if not click.confirm(wrong_ip.format(app_ip, address, app_ip)): switch_host = click.style("Please edit ") +\ click.style("{}", bold=True) +\ click.style(" and replace ") +\ click.style("{}", bold=True) +\ click.style(" with ") +\ click.style("[{}].", bold=True) logger.info(switch_host.format(manifest_path, app_ip, address)) return except exceptions.ValidationError as ex: # catches and re-raises the same exception to enhance the error message publish_docs_url = click.style("https://21.co/learn/21-publish/", bold=True) publish_instructions = "For instructions on publishing your app, please refer to {}".format(publish_docs_url) raise exceptions.ValidationError( "The following error occurred while reading your manifest file at {}:\n{}\n\n{}" .format(manifest_path, ex.args[0], publish_instructions), json=ex.json) app_name = manifest_json["info"]["title"] app_endpoint = "{}://{}{}".format(manifest_json["schemes"][0], manifest_json["host"], manifest_json["basePath"]) logger.info( (click.style("Publishing {} at ") + click.style("{}", bold=True) + click.style(" to {}.")) .format(app_name, app_endpoint, marketplace)) payload = {"manifest": manifest_json, "marketplace": marketplace} try: response = client.publish(payload) except ServerRequestError as e: if e.status_code == 403 and e.data.get("error") == "TO600": logger.info( "The endpoint {} specified in your manifest has already been registered in " "the marketplace by another user.\nPlease check your manifest file and make " "sure your 'host' field is correct.\nIf the problem persists please contact " "support@21.co.".format(app_endpoint), fg="red") return else: raise e if response.status_code == 201: response_data = response.json() mkt_url = response_data['mkt_url'] permalink = response_data['permalink'] logger.info( click.style( "\n" "You have successfully published {} to {}. " "You should be able to view the listing within a few minutes at {}\n\n" "Users will be able to purchase it, using 21 buy, at {} ", fg="magenta") .format(app_name, marketplace, permalink, mkt_url) )
[ "def submit(ctx, manifest_path, marketplace, skip, parameters):\n if parameters is not None:\n try:\n parameters = _parse_parameters(parameters)\n except:\n logger.error(\n \"Manifest parameter overrides should be in the form 'key1=\\\"value1\\\" \"\n \"key2=\\\"value2\\\".\",\n fg=\"red\")\n return\n\n _publish(ctx.obj['client'], manifest_path, marketplace, skip, parameters)", "def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n sys.exit()", "def apk(config, name, version, groups):\n for group in groups:\n if config.verbose:\n click.echo('Deploying {}:{}...'.format(name, version))\n if not config.mason.deploy(\"apk\", name, version, group, config.push):\n exit('Unable to deploy item')", "def _deploy_apps_function():\n app_integration_package = AppIntegrationPackage(config=config, version=apps_version)\n app_integration_package.create_and_upload()\n return app_integration_package", "def deploy():\n cmd(\"google_appengine/appcfg.py update app\")", "def deploy():\n docs()\n cmd(\"google_appengine/appcfg.py update app\")", "def upload_manifests(self, host_id, ks_cf=None, manifest_files=None):\n pass", "def upload_package(package):\n print \"Uploading:\", package, \"to binstar\"\n\n bld_dir = find_conda_build_dir()\n # find package filename:\n package_files = [name for name in os.listdir(bld_dir) if name.startswith(package)]\n package_files.sort()\n package_file = package_files[-1] # should get the latest version\n \n package_path = os.path.join(bld_dir, package_file)\n print package_path\n try:\n print \"Uploading %s to binstar\"%package\n DESCRIPTION = \"Package: %s, auto-generated from PyPI\"\n cmds = [\"binstar\", \"upload\",\n \"-d\", \"DESCRIPTION\",\n \"--user\", BINSTAR_USER,\n package_path]\n result = check_output(cmds,\n stderr=STDOUT)\n except CalledProcessError as err:\n if \"Distribution already exists\" in err.output:\n print \"Package: %s already is binstar. Delete it if you want the new copy uploaded\"%package", "def _publish_app(self, app: AppConfig, channel: str, **options) -> dict | None:\n # TODO: Verify the app has been packaged\n state = None\n self.verify_app(app)\n\n state = self.publish_app(app, channel=channel, **full_options(state, options))\n\n return state", "def install_app(self, pbz_path, launch_on_install=True):\n\n\t\tbundle = PebbleBundle(pbz_path)\n\t\tif not bundle.is_app_bundle():\n\t\t\traise PebbleError(self.id, \"This is not an app bundle\")\n\t\tapp_metadata = bundle.get_app_metadata()\n\n\t\tbinary = bundle.zip.read(bundle.get_application_info()['name'])\n\t\tif bundle.has_resources():\n\t\t\tresources = bundle.zip.read(bundle.get_resources_info()['name'])\n\t\telse:\n\t\t\tresources = None\n\n\t\tapps = self.get_appbank_status()\n\n\t\tif not apps:\n\t\t\traise PebbleError(self.id, \"could not obtain app list; try again\")\n\n\t\tfirst_free = 1\n\t\tfor app in apps[\"apps\"]:\n\t\t\tif app[\"index\"] == first_free:\n\t\t\t\tfirst_free += 1\n\t\tif first_free == apps[\"banks\"]:\n\t\t\traise PebbleError(self.id, \"All %d app banks are full\" % apps[\"banks\"])\n\t\tlog.debug(\"Attempting to add app to bank %d of %d\" % (first_free, apps[\"banks\"]))\n\n\t\tclient = PutBytesClient(self, first_free, \"BINARY\", binary)\n\t\tself.register_endpoint(\"PUTBYTES\", client.handle_message)\n\t\tclient.init()\n\t\twhile not client._done and not client._error:\n\t\t\tpass\n\t\tif client._error:\n\t\t\traise PebbleError(self.id, \"Failed to send application binary %s/pebble-app.bin\" % pbz_path)\n\n\t\tif resources:\n\t\t\tclient = PutBytesClient(self, first_free, \"RESOURCES\", resources)\n\t\t\tself.register_endpoint(\"PUTBYTES\", client.handle_message)\n\t\t\tclient.init()\n\t\t\twhile not client._done and not client._error:\n\t\t\t\tpass\n\t\t\tif client._error:\n\t\t\t\traise PebbleError(self.id, \"Failed to send application resources %s/app_resources.pbpack\" % pbz_path)\n\n\t\ttime.sleep(2)\n\t\tself._add_app(first_free)\n\t\ttime.sleep(2)\n\n\t\tif launch_on_install:\n\t\t\tself.launcher_message(app_metadata['uuid'].bytes, \"RUNNING\", uuid_is_string=False)", "def add_manifest(self, manifest, architecture, os):\n assert manifest.media_type in ALLOWED_MEDIA_TYPES\n self.add_manifest_digest(\n manifest.digest,\n len(manifest.bytes.as_encoded_str()),\n manifest.media_type,\n architecture,\n os,\n )", "def publish_manifest(ctx, name, tag, image, signed_push=False):\n manifest_spec = {\"image\": \"{}:{}\".format(name, tag)}\n src_images = []\n\n for img in image:\n img_splitted = img.replace(' ', '').split(',')\n if len(img_splitted) != 2:\n print(\"Impossible to parse source format for: '{}'\".format(img))\n raise Exit(code=1)\n\n platform_splitted = img_splitted[1].split('/')\n if len(platform_splitted) != 2:\n print(\"Impossible to parse platform format for: '{}'\".format(img))\n raise Exit(code=1)\n\n src_images.append(\n {\"image\": img_splitted[0], \"platform\": {\"architecture\": platform_splitted[1], \"os\": platform_splitted[0]}}\n )\n manifest_spec[\"manifests\"] = src_images\n\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:\n temp_file_path = f.name\n yaml.dump(manifest_spec, f, default_flow_style=False)\n\n print(\"Using temp file: {}\".format(temp_file_path))\n ctx.run(\"cat {}\".format(temp_file_path))\n\n try:\n result = retry_run(ctx, \"manifest-tool push from-spec {}\".format(temp_file_path))\n if result.stdout:\n out = result.stdout.split('\\n')[0]\n fields = out.split(\" \")\n\n if len(fields) != 3:\n print(\"Unexpected output when invoking manifest-tool\")\n raise Exit(code=1)\n\n digest_fields = fields[1].split(\":\")\n\n if len(digest_fields) != 2 or digest_fields[0] != \"sha256\":\n print(\"Unexpected digest format in manifest-tool output\")\n raise Exit(code=1)\n\n digest = digest_fields[1]\n length = fields[2]\n\n if signed_push:\n cmd = \"\"\"\n notary -s https://notary.docker.io -d {home}/.docker/trust addhash \\\n -p docker.io/{name} {tag} {length} --sha256 {sha256} \\\n -r targets/releases\n \"\"\"\n retry_run(ctx, cmd.format(home=os.path.expanduser(\"~\"), name=name, tag=tag, length=length, sha256=digest))\n finally:\n os.remove(temp_file_path)", "def perform_app_upload(self, context, rpc_app, tarfile):\n self._app.perform_app_upload(rpc_app, tarfile)", "def upload_pypi():\n import os\n from . import remove, dir_char\n remove('dist')\n if os.system('python3 setup.py sdist'):\n os.system('python setup.py sdist')\n os.system('twine upload dist%s*' % dir_char)", "def _push_app(self, app, registry):\n # First find all the images that need to\n # be pushed to the Docker registry. \n content = self._read_app_content(app)\n if content:\n images = self._get_user_images(content)\n for i in images:\n self._push_image(i, registry)\n \n # Register the application in the Ferry database. \n account, key, server = self.installer.get_ferry_account()\n if account:\n # Read in the contents of the application and\n # generate the API key. \n with open(app, \"r\") as f:\n name = account + '/' + os.path.basename(app)\n name, ext = os.path.splitext(name)\n content = f.read()\n req = { 'action' : 'register',\n 'app' : name,\n 'account' : account }\n sig = self.installer.create_signature(json.dumps(req), key)\n\n try:\n payload = { 'id' : account,\n 'app' : name, \n 'ext' : ext, \n 'content' : content,\n 'sig' : sig }\n res = requests.post(server + '/app', data=payload)\n status = json.loads(res.text)\n if status['status'] == 'fail':\n logging.error(\"failed to register app \" + app)\n return \"Failed to register app \" + app\n else:\n return status['name']\n except ConnectionError:\n logging.error(\"could not connect to application server\")\n return \"Could not register the application.\"\n except ValueError as e:\n logging.error(str(e))\n return \"Registration server sent back unknown reply\"\n else:\n logging.error(\"could not read account information\")\n return \"Could not read account information.\"", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def package_minifest(request):\n if not settings.MARKETPLACE_GUID:\n return HttpResponseNotFound()\n return mini_manifest(request, settings.MARKETPLACE_GUID)", "def perform_valid_manifest_post(context, manifest, url):\n filename = \"data/{manifest}\".format(manifest=manifest)\n files = {'manifest[]': open(filename, 'rb')}\n endpoint = \"{coreapi_url}{url}\".format(coreapi_url=context.coreapi_url, url=url)\n response = requests.post(endpoint, files=files)\n response.raise_for_status()\n context.response = response.json()\n print(response.json())", "def addApplication(self, name, url):\n\t\tlogging.info(\"Adding application %s : %s\"%(name,url))\n\t\tuser = users.get_current_user() \n\t\tif user:\n\t\t\tapp = Application(name=name, url=url, addedBy=user)\n\t\t\tapp.put()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Queries the marketplace for published apps
def get_search_results(config, client, page): resp = client.get_published_apps(config.username, page) resp_json = resp.json() search_results = resp_json["results"] if search_results is None or len(search_results) == 0: logger.info( click.style("You haven't published any apps to the marketplace yet. Use ", fg="blue") + click.style("21 publish submit {PATH_TO_MANIFEST_FILE}", bold=True, fg="blue") + click.style(" to publish your apps to the marketplace.", fg="blue"), fg="blue") return 0 total_pages = resp_json["total_pages"] logger.info("\nPage {}/{}".format(page + 1, total_pages), fg="green") headers = ["id", "Title", "Url", "Rating", "Is up", "Is healthy", "Average Uptime", "Last Update"] rows = [] for r in search_results: rating = "Not yet Rated" if r["rating_count"] > 0: rating = "{:.1f} ({} rating".format(r["average_rating"], int(r["rating_count"])) if r["rating_count"] > 1: rating += "s" rating += ")" rows.append([r["id"], r["title"], r["app_url"], rating, str(r["is_up"]), str(r["is_healthy"]), "{:.2f}%".format(r["average_uptime"] * 100), util.format_date(r["last_update"])]) logger.info(tabulate(rows, headers, tablefmt="simple")) return total_pages
[ "def _get_apps(self):\n return self.api.get('/v2/apps')", "def get_apps(provider, query):\n\n workdir = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(workdir, '..', 'config.yml')) as f:\n config = yaml.load(f)\n ex = Explorer()\n logging.info('Read bucket: %s', config['SCOOP_BUCKET'])\n apps = ex.get_apps(os.path.expandvars(config['SCOOP_BUCKET']), query)\n logging.info(\"Apps count = %d\", len(apps))\n installed = provider.get_installed()\n\n # check if already installed\n for app in apps:\n app['installed'] = app['name'] in installed\n\n return apps", "def get_public_apps(self):\n # https://api.relayr.io/apps\n url = '{0}/apps'.format(self.host)\n _, data = self.perform_request('GET', url, headers=self.headers)\n return data", "def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')", "def cli_list(ctx):\n try:\n r = api.apps_get()\n pprint(r)\n except ApiException as e:\n print(\"Exception when calling AppsApi->apps_get: %s\\n\", e)", "def view_all_apps(self):\n return self.db.apps_by_num", "def get_all_apps(self):\n return list(self.apps.values())", "def first_free_hosted_app(self):\n for i in range(1, self.paginator.total_page_number + 1):\n for app in self.submitted_apps:\n if app.has_price and app.price == 'Free' and not app.is_packaged_app:\n return app\n if self.paginator.is_paginator_present:\n if not self.paginator.is_next_page_disabled:\n self.paginator.click_next_page()\n else:\n raise Exception('App not found')", "def retrieve_apps(self, api_key, device_name):\n raise NotImplementedError", "def get_apps(self, request, app_ids):\n sq = WebappIndexer.search()\n if request.query_params.get('filtering', '1') == '1':\n # With filtering (default).\n for backend in self.filter_backends:\n sq = backend().filter_queryset(request, sq, self)\n sq = WebappIndexer.filter_by_apps(app_ids, sq)\n\n # Store the apps to attach to feed elements later.\n with statsd.timer('mkt.feed.views.apps_query'):\n apps = sq.execute().hits\n return dict((app.id, app) for app in apps)", "def get(self):\n return read_heroku_apps(request.args)", "async def app_list(self) -> Mapping[str, Any]:\n return await self._send_command(\"FetchLaunchableApplicationsEvent\", {})", "def get_publisher_apps(self, publisherID):\n # https://api.relayr.io/publishers/<id>/apps\n url = '{0}/publishers/{1}/apps'.format(self.host, publisherID)\n _, data = self.perform_request('GET', url, headers=self.headers)\n return data", "def apps(self):\n filters = {\n 'disabled_by_user': False,\n 'status': mkt.STATUS_PUBLIC\n }\n return self._apps.order_by(self.membership_relation).filter(**filters)", "def get_app(app_name):\n client = get_api_client()\n\n LOG.info(\"Searching for existing applications with name {}\".format(app_name))\n\n resp, err = client.application.list(params={\"filter\": \"name=={}\".format(app_name)})\n if err:\n raise Exception(\"[{}] - {}\".format(err[\"code\"], err[\"error\"]))\n resp = resp.json()\n if resp[\"metadata\"][\"total_matches\"] > 0:\n LOG.info(\"Application found with name {}\".format(app_name))\n return resp\n\n LOG.info(\"No existing application found with name {}\".format(app_name))\n return None", "def list(self):\n\n return self._list(self._path(), 'apps')", "def test_app_filter(self):\n\n eq_(query(\"\", limit=1, app=amo.MOBILE.id)[0].id, 4664)\n # Poor sunbird, nobody likes them.\n eq_(len(query(\"\", limit=1, app=amo.SUNBIRD.id)), 0)", "def list_apps(self):\n with hide(\"output\", \"running\"):\n result = local((\"redis-cli -h {host} -p 6379 -n {db} keys \\\"*\\\"\"\n .format(host=self.host,\n db=REDIS_APPLICATION_DB_NUM)),\n capture=True)\n\n if len(result.stdout) > 0:\n return result.stdout\n else:\n print(\"Clipper has no applications registered\")\n return \"\"", "def getApps():\n logging.info(f\"获取App的序列号描述表\")\n interface = dbus_interface()\n result = interface.GetApps()\n if isinstance(result, dbus.Dictionary):\n logging.info(f\"App序列号描述表: {result}\")\n keys = sorted(result.keys())\n for key in keys:\n logging.info(f'{key}: {result[key]}')\n return True\n else:\n logging.info(f\"GetApps 返回的数据类型不是预期的bus.Dictionary,实际类型为{type(result)}\")\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace "AUTO" in the host and quickbuy with the ZeroTier IP. The server subsequently replaces, in the displayed quickbuy, instances of the manifest host value with a mkt.21.co address.
def replace_auto(manifest_dict, marketplace): manifest_dict = copy.deepcopy(manifest_dict) def get_formatted_zerotier_address(marketplace): host = get_zerotier_address(marketplace) if "." not in host: return "[{}]".format(host) else: return host if 'AUTO' in manifest_dict['host']: manifest_dict['host'] = manifest_dict['host'].replace( 'AUTO', get_formatted_zerotier_address(marketplace)) if 'AUTO' in manifest_dict['info']['x-21-quick-buy']: manifest_dict['info']['x-21-quick-buy'] = manifest_dict['info']['x-21-quick-buy'].replace( 'AUTO', get_formatted_zerotier_address(marketplace)) return manifest_dict
[ "def set_host_addr(self):\n \n # if using on same computer use random loopback address\n # because IP server stores as key value pairs and overwrites if same address (only for testing)\n if self.scope == \"localhost\":\n num1 = random.randint(0,199)\n num2 = random.randint(0,199)\n num3 = random.randint(0,199)\n host_addr = \"127.\" + str(num1) + \".\" + str(num2) + \".\" + str(num3)\n \n # if using in same LAN use local IP\n elif self.scope == \"internal\":\n host_name = socket.gethostname()\n host_addr = socket.gethostbyname(host_name)\n\n else:\n # get loating IP address of this host (only for testing)\n host_addr = urllib.request.urlopen(\"https://api.zipixx.com/forwardedfor\").read().decode(\"utf-8\")\n\n self.host_addr = (host_addr,self.port)", "def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)", "def test_replace_host_subnet(self):\n pass", "def _update_baremetal_host_ip_data(self, rule_data):\n\n # Ger defult ip offset\n default_ip_offset = rule_data['default']\n\n host_idx = 0\n LOG.info(\"Update baremetal host ip's\")\n for racks in self.data['baremetal'].keys():\n rack_hosts = self.data['baremetal'][racks]\n for host in rack_hosts:\n host_networks = rack_hosts[host]['ip']\n for net in host_networks:\n ips = list(self.network_subnets[net])\n host_networks[net] = str(ips[host_idx + default_ip_offset])\n host_idx = host_idx + 1\n\n LOG.debug(\"Updated baremetal host:\\n{}\".format(\n pprint.pformat(self.data['baremetal'])))", "def configure_host_ips(h3, h4, ip_address_hs):\n\n h3.libs.ip.flush_ip('eth1')\n h3.libs.ip.interface('eth1', up=False)\n\n h4.libs.ip.flush_ip('eth1')\n h4.libs.ip.interface('eth1', up=False)\n\n h3.libs.ip.interface(portlbl='eth1', addr=\"{}/{}\".format(\n ip_address_hs[0], MASK), up=True)\n h4.libs.ip.interface(portlbl='eth1', addr=\"{}/{}\".format(\n ip_address_hs[1], MASK), up=True)", "def set_host_ip(self, host, host_ip):\n host.setIP(str(host_ip.ip), prefixLen=self.NETPREFIX)", "def test_patch_host_subnet(self):\n pass", "def softupdate_ip(request, ipaddress):\n\n softupdate_key = settings.SOFTUPDATE_KEY\n if request.POST.get(\"key\", \"invalid_key\") != softupdate_key:\n raise PermissionDenied()\n\n # LC: UGGLY and not \"portable\"\n STATUS_EN_SERVICE = 'En service'\n\n def noanswer(reason=\"\"):\n message = \"\"\"Modification impossible.\\n\"\"\"\n if reason and settings.DEBUG:\n message += \"\"\"%s\\n\"\"\" % (reason,)\n return HttpResponse(message, content_type=\"plain/text\")\n\n serial = request.POST.get(\"serial\", None)\n hostname = request.POST.get(\"hostname\", None)\n\n host = None\n errmsgs = []\n\n if serial:\n hosts = Host.objects.filter(serial=serial)\n if len(hosts) == 1:\n host = hosts[0]\n elif len(hosts) > 1:\n for h in hosts:\n if h.ip == ipaddress:\n host = h\n break\n\n if not host:\n errmsgs.append(\"Le host serial=%s est introuvable.\" % (serial,))\n\n if hostname and not host:\n hosts = Host.objects.filter(hostname=hostname,\n status__description=STATUS_EN_SERVICE)\n if len(hosts) == 1:\n host = hosts[0]\n elif len(hosts) > 1:\n for h in hosts:\n if h.ip == ipaddress:\n host = h\n break\n\n # Get the last log entry\n hostlogs = HostIPLog.objects.filter(host=host, log_ip=ipaddress) \\\n .order_by(\"-date\")\n if hostlogs:\n hostlog = hostlogs[0]\n else:\n hostlog = HostIPLog(host=host, log_ip=ipaddress)\n \n hostlog.log_queryfrom = get_request_remote_addr(request)\n hostlog.log_hostname = request.POST.get('hostname', 'unknown')\n hostlog.save()\n\n return HttpResponse('ok.', content_type='plain/text')", "def set_host():\n import socket\n soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n soc.connect(('8.8.8.8', 80)) # try creating connection to external destination\n settings.HOST_IP = soc.getsockname()[0]\n logger.info('Host IP: {}'.format(settings.HOST_IP))\n soc.close()", "def do_change_hostname(self, line):\n print('This functionality is not yet implemented')\n print('Basically, its needed:\\n'\n ' - edit conf/web.properties and change hostname there\\n'\n ' - ant deployear in EJBCA to redeploy EJBCA to JBoss with new settings (preserves DB)\\n'\n ' - edit /etc/enigma/config.json ejbca_hostname field\\n'\n ' - edit /etc/enigma/config.json ejbca_hostname_custom to true\\n'\n ' - call renew command')\n return self.return_code(1)", "def set_new_ip():\n with Controller.from_port(port=9051) as controller:\n controller.authenticate(password='tor_password')\n controller.signal(Signal.NEWNYM)", "def set_ip_adresses(self):\n # unfold a config tree for the current suffix, if any\n for interface, details in self.interfaces.items():\n for k, v in details.items():\n if k == 'address':\n ip, prefix = address_to_ip_prefix(v)\n self.interfaces[interface]['ip_address'] = ip\n self.interfaces[interface]['ip_prefix'] = prefix\n break\n if interface == 'wan':\n self.ip_address = ip\n if interface == 'ha_sync':\n self.ha_sync_ip_address = ip", "def reserve_fixed_ip(cls, cloudname, fixed_ip_addr):\n try:\n cloud_provider = CloudProvider(cloudname).provider\n cloud_provider.reserve_fixed_ip(fixed_ip_addr=fixed_ip_addr)\n return \"Success.\"\n except Exception as ex:\n Console.error(ex.message)\n\n\n return", "def change_ip():\r\n with Controller.from_port(port=9051) as controller:\r\n controller.authenticate()\r\n controller.signal(Signal.NEWNYM)", "def change_IP(self,server_IP,MAC):\n content = {'server_IP':server_IP,'MAC_address':MAC}\n content = json.dumps(content)\n headers = {\"Content-Type\":\"application/json\"}\n #address will be given by the api\n r = requests.post(f\"http://{self.webserver_address}/api/camera/update_ip\", data = content,headers = headers,verify=False)\n if(r.status_code == 200):\n return True\n return False", "def get_externalip(self):\n\n myip = \"\"\n for i in range(5):\n myip = self.fetch(random.choice(self.server_list))\n if myip != \"\":\n return myip\n else:\n continue\n return \"\"", "def calculate_trima_address(testMachine):\r\n _machineBase = int(testMachine/256)\r\n _machineRemainder = int(testMachine-(_machineBase*256))\r\n _machineBase = str(_machineBase)\r\n _machineRemainder = str(_machineRemainder)\r\n _address = \"172.21.\"+_machineBase+\".\"+_machineRemainder\r\n \r\n return _address", "def setIpaddr(self):\n\t\tself.ipaddr = self.settings.getKeyValue('ipaddr')\n\t\tself.socket.send('setenv ipaddr ' + self.ipaddr+'\\r', 1)\t\t\n\t\treturn None", "def get_local_host_ip(self) -> str:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validates the manifest file Ensures that the required fields in the manifest are present and valid
def validate_manifest(manifest_json): manifest_json = copy.deepcopy(manifest_json) for field in ["schemes", "host", "basePath", "info"]: if field not in manifest_json: raise exceptions.ValidationError( click.style("Field '{}' is missing from the manifest file.", fg="red").format(field), json=manifest_json) for field in ["contact", "title", "description", "x-21-total-price", "x-21-quick-buy", "x-21-category"]: if field not in manifest_json["info"]: raise exceptions.ValidationError( click.style( "Field '{}' is missing from the manifest file under the 'info' section.", fg="red").format(field), json=manifest_json) for field in {"name", "email"}: if field not in manifest_json["info"]["contact"]: raise exceptions.ValidationError( click.style( "Field '{}' is missing from the manifest file under the 'contact' section.", fg="red") .format(field), json=manifest_json) for field in ["min", "max"]: if field not in manifest_json["info"]["x-21-total-price"]: raise exceptions.ValidationError( click.style("Field '{}' is missing from the manifest file under the " "'x-21-total-price' section.", fg="red"), json=manifest_json) if len(manifest_json["schemes"]) == 0: raise exceptions.ValidationError( click.style( "You have to specify either HTTP or HTTPS for your endpoint under the " "`schemes` section.", fg="red"), json=manifest_json) valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'} if manifest_json["info"]["x-21-category"].lower() not in valid_app_categories: valid_categories = ", ".join(valid_app_categories) raise exceptions.ValidationError( click.style("'{}' is not a valid category for the 21 marketplace. Valid categories are {}.", fg="red").format( manifest_json["info"]["x-21-category"], valid_categories), json=manifest_json)
[ "def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def load_manifest(filename):\n\n data = manifest.load(filename)\n for field in manifest.validate(data):\n name = field.cfg or ''\n if name and name[-1] != '.':\n name += '>'\n name += field.name\n for msg in field.warnings:\n print('WARNING: {}@{} {}'.format(filename, name, msg))\n for msg in field.errors:\n print('CRITICAL: {}@{} {}'.format(filename, name, msg))\n return data", "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "def validate(self):\n self.failedTests = []\n ret = True\n @testCase\n def testZip(self): self.zipfile.testzip()\n ret = testZip(self) and ret\n\n @testCase\n def validateManifest(self):\n jsonschema.validate(self.manifest, Aptofile.SCHEMA,\n Aptofile.VALIDATOR,\n format_checker = jsonschema.FormatChecker())\n ret = validateManifest(self) and ret\n\n @testCase\n def fileDate(self): self._checkTimestamp(self.manifest['date'])\n ret = fileDate(self) and ret\n\n return ret", "def _parse_and_validate_manifest(manifest_filename):\n\n # Strip comments while keeping line numbers.\n s = \"\"\n with open(manifest_filename, \"r\") as f_in:\n for line in f_in:\n comment_pos = line.find(\"//\")\n s += line[:comment_pos] + \"\\n\"\n\n\n manifest = json.loads(s)\n manifest_val = ArgumentsValidator(manifest, \"Dataset manifest\")\n with manifest_val:\n\n compression_type = manifest_val.get(\"compression\", [ATYPE_NONE, ATYPE_STRING], True)\n if compression_type is not None:\n compression_type = compression_type.upper()\n if compression_type not in [\"ZLIB\", \"GZIP\"]:\n raise ValueError(\"Unsupported compression type: %s\" % compression_type)\n\n allow_var_len = manifest_val.get(\"allow_var_len\", ATYPE_BOOL, True)\n features_list = manifest_val.get(\"features\", ATYPE_DICTS_LIST, True)\n\n\n\n # Validate each feature and create parser objects.\n feat_parsers = {}\n feat_shapes = {}\n feat_dtypes = {}\n\n for feat in features_list:\n\n feat_val = ArgumentsValidator(feat, \"Dataset feature\")\n with feat_val:\n name = feat_val.get(\"name\", ATYPE_STRING, True)\n dtype = tf.as_dtype(feat_val.get(\"dtype\", ATYPE_STRING, True))\n shape = feat_val.get(\"shape\", ATYPE_INTS_LIST, True)\n deserialize_type = feat_val.get(\"deserialize_type\", ATYPE_STRING, True)\n deserialize_args = feat_val.get(\"deserialize_args\", ATYPE_DICT, False, default={})\n var_len = feat_val.get(\"var_len\", ATYPE_BOOL, allow_var_len, default=False)\n\n if var_len and not allow_var_len:\n raise ValueError(\"Variable length features not allowed for this dataset.\")\n\n try:\n shape = [int(x) for x in list(shape)]\n except:\n raise ValueError(\"Invalid shape for feature `%s`: %s\" % (name, shape))\n\n \n try:\n feat_parsers[name] = _PARSERS[deserialize_type](shape, dtype, deserialize_args, var_len)\n except KeyError:\n raise ValueError(\"Unsupported deserialization type: %s\" % deserialize_type)\n\n\n if var_len:\n feat_shapes[name] = [-1] + shape\n else:\n feat_shapes[name] = shape\n\n feat_dtypes[name] = dtype\n\n\n return compression_type, allow_var_len, feat_parsers, feat_shapes, feat_dtypes", "def perform_valid_manifest_post(context, manifest, url):\n filename = \"data/{manifest}\".format(manifest=manifest)\n files = {'manifest[]': open(filename, 'rb')}\n endpoint = \"{coreapi_url}{url}\".format(coreapi_url=context.coreapi_url, url=url)\n response = requests.post(endpoint, files=files)\n response.raise_for_status()\n context.response = response.json()\n print(response.json())", "def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def parse_manifest(self):\n\n if not self.manifest.exists():\n print(f'Manifest \"{self.manifest}\" does not exist!')\n sys.exit(3)\n\n self.input_manifest = EleTree.parse(self.manifest)", "def readManifestFile(syn, manifestFile):\n table.test_import_pandas()\n import pandas as pd\n\n sys.stdout.write('Validation and upload of: %s\\n' % manifestFile)\n # Read manifest file into pandas dataframe\n df = pd.read_csv(manifestFile, sep='\\t')\n if 'synapseStore' not in df:\n df = df.assign(synapseStore=None)\n df.synapseStore[df['path'].apply(is_url)] = False # override synapseStore values to False when path is a url\n df.synapseStore[df['synapseStore'].isnull()] = True # remaining unset values default to True\n df.synapseStore = df.synapseStore.astype(bool)\n df = df.fillna('')\n\n sys.stdout.write('Validating columns of manifest...')\n for field in REQUIRED_FIELDS:\n sys.stdout.write('.')\n if field not in df.columns:\n sys.stdout.write('\\n')\n raise ValueError(\"Manifest must contain a column of %s\" % field)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all paths exist')\n df.path = df.path.apply(_check_path_and_normalize)\n\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all files are unique...')\n if len(df.path) != len(set(df.path)):\n raise ValueError(\"All rows in manifest must contain a unique file to upload\")\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating provenance...')\n df = _sortAndFixProvenance(syn, df)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that parents exist and are containers...')\n parents = set(df.parent)\n for synId in parents:\n try:\n container = syn.get(synId, downloadFile=False)\n except SynapseHTTPError:\n sys.stdout.write('\\n%s in the parent column is not a valid Synapse Id\\n' % synId)\n raise\n if not is_container(container):\n sys.stdout.write('\\n%s in the parent column is is not a Folder or Project\\n' % synId)\n raise SynapseHTTPError\n sys.stdout.write('OK\\n')\n return df", "def metadata_validate(self):\n # Set path to `service_schema` stored in the `resources` directory from cwd of `mpe_service.py`\n current_path = Path(__file__).parent\n relative_path = '../../snet/snet_cli/resources/service_schema'\n path_to_schema = (current_path / relative_path).resolve()\n with open(path_to_schema, 'r') as f:\n schema = json.load(f)\n metadata = load_mpe_service_metadata(self.args.metadata_file)\n try:\n validate(instance=metadata.m, schema=schema)\n except Exception as e:\n docs = \"http://snet-cli-docs.singularitynet.io/service.html\"\n error_message = f\"\\nVisit {docs} for more information.\"\n if e.validator == 'required':\n raise ValidationError(e.message + error_message)\n elif e.validator == 'minLength':\n raise ValidationError(f\"`{e.path[-1]}` -> cannot be empty.\" + error_message)\n elif e.validator == 'minItems':\n raise ValidationError(f\"`{e.path[-1]}` -> minimum 1 item required.\" + error_message)\n elif e.validator == 'type':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'enum':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'additionalProperties':\n if len(e.path) != 0:\n raise ValidationError(f\"{e.message} in `{e.path[-2]}`.\" + error_message)\n else:\n raise ValidationError(f\"{e.message} in main object.\" + error_message)\n else:\n exit(\"OK. Ready to publish.\")", "def test_is_valid_manifest_with_missing_md5_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_md5_column.tsv\",\n )\n missing_md5_message = (\n 'could not find a column name corresponding to required \"Columns.MD5\"'\n )\n assert missing_md5_message in caplog.text\n assert result == False", "def validate_metadata(self, metadata):\n self.logger.info(\"Validating metadata: {}\".format(metadata))\n for required in [\"name\", \"version\"]:\n if required not in metadata:\n return False\n return True", "def test_sanity(tmpdir, manifest_file, manifest):\n _file = tmpdir.join('manifest.yaml')\n _file.write(manifest_file)\n assert get_manifest_from_path(str(_file)).contents == manifest.contents", "def validate_output_manifest(self, source, **kwargs):\n return self._validate_manifest(\"output_manifest\", source, **kwargs)", "def __verify_manifest(self, fmri, mfstpath, alt_pub=None):\n\n try:\n return self.transport._verify_manifest(fmri,\n mfstpath=mfstpath, pub=alt_pub)\n except InvalidContentException:\n return False", "def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")", "def validate(self):\n\n class NullBuffer(object):\n def write(self, bytes):\n pass\n\n prefixes = set(self.envoy.prefixes())\n\n null = NullBuffer()\n success = True\n\n def fail(message):\n nonlocal success\n success = False\n log.error(message)\n\n for digest in self.meta.get('files', ()):\n\n if digest not in prefixes:\n fail(f\"{digest} is unknown\")\n\n else:\n m = hash_implementation()\n self.envoy.recv(digest, null, after_decrypt=m.update)\n\n if digest != m.hexdigest():\n fail(f\"Expected {digest} but got {m.hexdigest()}\")\n\n for path in self.meta['files'][digest]:\n if path not in self.meta['structure']:\n fail(f\"the metadata for {path} is missing\")\n\n return success" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the zerotier IP address from the given marketplace name
def get_zerotier_address(marketplace): logger.info("You might need to enter your superuser password.") address = zerotier.get_address(marketplace) if not address: join_cmd = click.style("21 join", bold=True, reset=False) no_zt_network = click.style( "You are not part of the {}. Use {} to join the market.", fg="red") raise UnloggedException(no_zt_network.format(marketplace, join_cmd)) return address
[ "def get_ip():\n return '219.45.143.143'", "def get_vip_address(self, vip_name):\n networks = self.nailgun_client.get_networks(self.cluster_id)\n vip = networks.get('vips').get(vip_name, {}).get('ipaddr', None)\n asserts.assert_is_not_none(\n vip, \"Failed to get the IP of {} server\".format(vip_name))\n\n logger.debug(\"VIP '{0}': {1}\".format(vip_name, vip))\n return vip", "def GetExternalIp():\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n url = 'http://whatismyip.akamai.com'\n resp, content = h.request(url, 'GET')\n if resp.status == 200:\n return content\n for provider in (UltraDNSAuth(), MyResolverInfo()):\n answer = provider.GetClientIp()\n if answer:\n return answer", "def _lookup_static_ip_address(self, name, networktype):\n try:\n # address names are refined by network type to ensure they are\n # unique across different address pools\n name = cutils.format_address_name(name, networktype)\n address = self.dbapi.address_get_by_name(name)\n return address.address\n except exception.AddressNotFoundByName:\n return None", "def lookup_static_ip_address(name, networktype):\n try:\n # address names are refined by network type to ensure they are\n # unique across different address pools\n name = '%s-%s' % (name, networktype)\n address = pecan.request.dbapi.address_get_by_name(name)\n return address.address\n except exception.AddressNotFoundByName:\n return None", "def reverse_lookup_zone(ipaddress):\n return reverse_dotted_decimals(ipaddress) + '.in-addr.arpa'", "def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()", "def getPublicIP():\r\n\t\r\n\treturn request.urlopen('http://ip.42.pl/raw').read().decode()", "def get_ip(pc_name):\n pc_ip = '' \n try: \n pc_ip = socket.gethostbyname(pc_name) \n except Exception, e:\n initlog('failed to get PC ip; %s' % str(e)) \n return pc_ip", "def get_public_ip() -> str:\n try:\n return json.loads(urlopen(\"https://api.myip.com\").read())[\"ip\"]\n except Exception as e:\n return \"\"", "def get_own_ip():\n # LINUX AKA RASPBERRY PI\n import os\n ipv4 = os.popen('ip addr show wlan0').read().split(\"inet \")[1].split(\"/\")[0]\n return ipv4\n\n # MAC OS\n # return ((([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")] or [[(s.connect((\"8.8.8.8\", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + [\"no IP found\"])[0])", "def ztranser_addr(self, address_name):\n _data = self.address(address_name)\n return \"{}:{}\".format(_data.ztranser_host(),_data.ztranser_port())", "def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())", "def get_ip():\n g = geocoder.ip('me')\n return g.latlng", "def get_ip_address(self):\n return self.crb['tester IP']", "def ip(name):\n instances = instances_by_name(name)\n for instance in instances:\n click_info(name)\n click_info(\" Public: {:}\".format(instance.public_ip_address))\n click_info(\" Private: {:}\".format(instance.private_ip_address))", "def get_ip_address(self) -> Optional[str]:\n\n # if ip address has been hard coded in config file, use that\n ip_address = self.config.get('ip_address')\n if ip_address is not None:\n return ip_address\n\n # if auto determine is enabled, use ipify to lookup the ip\n if self.config.auto_determine_ip_address:\n ip_address = get('https://api.ipify.org').text\n return ip_address", "def get_local_host_ip(self) -> str:", "def get_ip_address(device):\n try:\n capwap_client_rcb = device.parse('show capwap client rcb')\n except SchemaEmptyParserError as e:\n log.error(e)\n return ''\n\n return capwap_client_rcb.get('mwar_ap_mgr_ip', '')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set mode wireframe only
def setDisplayMode(self, mode): return "Wireframe"
[ "def toggle_wireframe(self):\n self.view['wireframe'] = not self.view['wireframe']\n self.update_flags()", "def toggle_wireframe():\n model_editor = viewport.get_model_panel()\n if model_editor:\n viewport.toggle_wireframe(model_editor)", "def setSurfaceShadingMode(mode='flat'):\n sdict = {'flat':'FLAT','smooth':'SMOOTH'}\n dislin.shdmod(sdict[mode], 'SURFACE')", "def wireframe(self):\n return self.uniform_buffer.data[\"wireframe\"] > 0", "def testWireframeDisplay():\n \n viewer = wd.WireframeViewer(600, 400)\n viewer.addWireframe('cube', shape.Cuboid((80,150,0), (200,200,200)))\n viewer.displayFaces = False\n viewer.run()", "def add_wireframe_modifier(self):\n scene = self.set_as_active()\n\n # if the user selected a material, use it\n if w_var.cb_mat_wire:\n wireframe_mat = bpy.data.materials[w_var.mat_wire_name]\n\n # else, create a new one with the color selected\n else:\n color_wire = w_var.color_wire\n\n # separating rgb and alpha\n wireframe_color_rgb = color_wire[0:3]\n wireframe_color_alpha = color_wire[-1]\n wireframe_mat = bpy.data.materials.new('wireframe')\n\n renderengine = scene.wirebomb.data_renderengine\n \n if renderengine == 'CYCLES':\n wireframe_mat.use_nodes = True\n tree = wireframe_mat.node_tree\n tree.nodes.clear()\n\n # creating the nodes\n node_transparent = tree.nodes.new('ShaderNodeBsdfTransparent')\n node_transparent.location = -300, 100\n\n node_diffuse = tree.nodes.new('ShaderNodeBsdfDiffuse')\n node_diffuse.location = -300, -100\n node_diffuse.inputs[0].default_value = wireframe_color_rgb + (1.0,)\n node_diffuse.color = wireframe_color_rgb\n node_diffuse.name = 'addon_wireframe_color' # referencing to this ID in the real-time change\n\n node_mixshader = tree.nodes.new('ShaderNodeMixShader')\n node_mixshader.location = 0, 50\n node_mixshader.inputs[0].default_value = wireframe_color_alpha\n node_mixshader.name = 'addon_wireframe_alpha' # referencing to this ID in the real-time change\n\n node_output = tree.nodes.new('ShaderNodeOutputMaterial')\n node_output.location = 300, 50\n\n # connecting the nodes\n tree.links.new(node_transparent.outputs[0], node_mixshader.inputs[1])\n tree.links.new(node_diffuse.outputs[0], node_mixshader.inputs[2])\n tree.links.new(node_mixshader.outputs[0], node_output.inputs[0])\n\n for node in tree.nodes:\n node.select = False\n\n # sets the viewport color\n wireframe_mat.diffuse_color = wireframe_color_rgb\n\n elif renderengine == 'BLENDER_RENDER':\n wireframe_mat.diffuse_color = wireframe_color_rgb\n wireframe_mat.use_transparency = True\n wireframe_mat.alpha = wireframe_color_alpha\n\n self.select('SELECT', {'MESH'}, objects_excluded={'ELSE'})\n\n for obj in scene.objects:\n if obj.select:\n obj.data.materials.append(wireframe_mat)\n modifier_wireframe = obj.modifiers.new(name='Wireframe', type='WIREFRAME')\n modifier_wireframe.use_even_offset = False # Causes spikes on some models\n modifier_wireframe.use_replace = False\n modifier_wireframe.thickness = w_var.slider_wt_modifier\n\n # arbitrary high number because wire material is always added to end\n modifier_wireframe.material_offset = 12345\n\n # referencing to this ID in the real-time change\n modifier_wireframe.name = 'addon_wireframe'\n\n return wireframe_mat", "def wireframe(self):\n return self._wireframe", "def toggle_surface(self):", "def SetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetPlaneMode(self, *args)", "def addWireframe(self, wireframe):\n self.wireframe = wireframe\n self.tf_wireframe = wireframe.copy()", "def SetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_SetPlaneMode(self, *args)", "def setMode(self, mode):\n if mode in [\n 'singlepoint',\n 'multipoints'\n ]:\n self.mode = mode", "def testWireframe():\n \n # Create a triangle by explictly passing the nodes and edges\n print (\"\\nTriangle\")\n triangle = wf.Wireframe([[100,200,10], [200,200,10], [125,100,500]])\n triangle.addEdges([(0,1), (1,2), (2,0)])\n triangle.output()\n \n # Create a cuboid using the basicShape module\n print (\"\\nCuboid\")\n cuboid = shape.Cuboid((100,100,10), (20,30,40))\n cuboid.output()", "def toggle_surface_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if poly in self.hole_list:\n poly.setBrush(QBrush(QColor(255, 255, 255)))\n else:\n poly.setBrush(QBrush(QColor(0, 0, 0, 50)))\n\n # Disable the selection of edges and hide the marker if there is one\n for edge in self.edge_list:\n edge.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(False)\n\n # Hide markers on points\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(False)", "def set_up_wireframe_modifier(self):\n scene = self.set_as_active()\n \n if w_var.cb_clear_materials and w_var.is_any_affected:\n self.clear_materials()\n\n # updates progress bar to 50 %\n bpy.context.window_manager.progress_update(50)\n\n if w_var.cb_clay:\n\n # adding clay material before wireframe material for material offset in wireframe modifier to be correct\n self.set_up_clay()\n\n # updates progress bar to 75 %\n bpy.context.window_manager.progress_update(75)\n\n # sets up renderlayer and adds wireframe modifier/material to affected meshes and saves wireframe material\n self.set_up_rlayer('wireframe')\n scene.wirebomb.data_material_wire = self.add_wireframe_modifier().name\n\n # updates progress bar to 99 %\n bpy.context.window_manager.progress_update(99)\n\n if w_var.cb_ao:\n self.set_up_all_ao()\n\n # deselects all objects as a last thing to clean up\n self.select('DESELECT', objects={'ALL'})", "def set_up_wireframe_freestyle(self):\n scene = self.set_as_active()\n \n # sets up renderlayer(s) (depending on 'Composited wireframing' checkbox) and freestyle wireframing\n # also saves freestyle linestyle name\n self.set_up_rlayer('wireframe', rlname_other='other')\n scene.wirebomb.data_freestyle_linestyle = self.add_wireframe_freestyle().name\n\n # updates progress bar to 50 %\n bpy.context.window_manager.progress_update(50)\n\n if w_var.cb_clear_materials and w_var.is_any_affected:\n self.clear_materials()\n\n # updates progress bar to 75 %\n bpy.context.window_manager.progress_update(75)\n\n if w_var.cb_clay:\n self.set_up_clay()\n\n # updates progress bar to 99 %\n bpy.context.window_manager.progress_update(99)\n\n if w_var.cb_ao and not w_var.cb_composited:\n self.set_up_all_ao()\n\n elif w_var.cb_composited:\n\n # sets up composition for wireframe and sets up ambient occlusion lighting if used\n self.comp_add_wireframe_freestyle()\n \n if scene.render.engine == 'CYCLES':\n scene.cycles.film_transparent = True\n\n else:\n scene.render.alpha_mode = 'TRANSPARENT'\n\n if w_var.cb_ao:\n self.set_up_world_ao()\n\n # deselects all objects as a last thing to clean up\n self.select('DESELECT', objects={'ALL'})", "def setColorMode(mode='full'):\n mdict = {'low':'NONE','full':'FULL'}\n dislin.clrmod(mdict[mode])", "def testSurfaceDisplayWithCube():\n \n viewer = wd.WireframeViewer(600, 400)\n viewer.addWireframe('cube', shape.Cuboid((225,100,0), (200,200,200)))\n # viewer.addWireframe('sphere', shape.Spheroid((80,150,0), (200,200,200), resolution=3))\n viewer.displayEdges = False\n viewer.run()", "def toggle_xray_mode(self):\n self.xray_mode = not self.xray_mode\n if self.xray_mode:\n self.level_model.setColorScale((1, 1, 1, 0.5))\n self.level_model.setTransparency(TransparencyAttrib.MDual)\n else:\n self.level_model.setColorScaleOff()\n self.level_model.setTransparency(TransparencyAttrib.MNone)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register an asset required by a dashboard module. Some modules require special scripts or stylesheets, like the
def register_module_asset(self, asset): self._module_assets.append(asset)
[ "def process_xmodule_assets():\r\n sh('xmodule_assets common/static/xmodule')", "def assets():\n pass", "def register(app):\n app.register_blueprint(bp)\n\n assets = app.jinja_env.assets_environment\n\n assets.register('site_css', Bundle(\n 'scss/site.scss', filters='libsass', output='site.dist.css'))\n assets.register('site_js', Bundle(\n 'js/vendor/jquery-2.2.4.min.js', 'js/vendor/imagesloaded-4.1.4.min.js',\n 'js/site.js', filters='rjsmin', output='site.dist.js'))", "def add_assets_mapping(config, mapping):\n assets = config.registry.queryUtility(IAssets) or Assets()\n assets.update(mapping)\n config.registry.registerUtility(assets, IAssets)", "def add_asset(self, asset_name):\r\n self._assets.extend(asset_name)", "def asset_cli():", "def register_scss(assets):\n assets.append_path(app.static_folder, app.static_url_path)\n assets.config['SASS_PATH'] = 'app/scss'\n\n bundle = Bundle('scss/client.scss',\n output='css/gen/client.%(version)s.css',\n depends=('**/*.scss'),\n filters=('scss', 'cssmin'))\n assets.register('scss_client', bundle)", "def test_register_plugin_asset():\n app = create_ctfd(setup=False)\n register_plugin_asset(app, asset_path='/plugins/__init__.py')\n app = setup_ctfd(app)\n with app.app_context():\n with app.test_client() as client:\n r = client.get('/plugins/__init__.py')\n assert len(r.get_data(as_text=True)) > 0\n assert r.status_code == 200\n destroy_ctfd(app)", "def test_import_system_asset(self):\n pass", "def addAsset(self, name, asset):\n self.__assets[name] = asset\n return True", "def add_asset(urn: str, asset: str, validate_assets: bool) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.add_asset(asset)\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"add assets\")\n if validate_assets:\n _abort_if_non_existent_urn(\n graph,\n asset,\n \"add assets. Use --no-validate-assets if you want to turn off validation\",\n )\n for mcp in dataproduct_patcher.build():\n graph.emit(mcp)", "def configure_ext_assets(app, xstatic):\n assets = Environment(app)\n coffee_lib = Bundle(\n 'coffee/lib/*.coffee',\n filters='coffeescript',\n output='gen/lib.js'\n )\n assets.register('coffee_lib', coffee_lib)\n coffee_pages = Bundle(\n 'coffee/pages/*.coffee',\n filters='coffeescript',\n output='gen/pages.js'\n )\n assets.register('coffee_lib', coffee_lib)\n coffee = Bundle(\n coffee_lib,\n coffee_pages,\n output='gen/app.js'\n )\n assets.register('coffee_app', coffee)\n\n coffee_spec = Bundle(\n 'coffee/spec/*.coffee',\n filters='coffeescript',\n output='gen/coffee_spec.js'\n )\n assets.register('coffee_spec', coffee_spec)\n\n vendor_js = Bundle(\n os.path.join(xstatic.path_for('jquery'), 'jquery.min.js'),\n 'vendor/pdfjs-' + app.config['X_PDFJS_VERSION'] + '-dist/build/pdf.js',\n 'vendor/jquery.jeditable.mini.js',\n 'vendor/jquery-ui-1.11.2/jquery-ui.min.js',\n output='gen/vendor_js.js',\n )\n assets.register('vendor_js', vendor_js)\n\n scss_bundle = Bundle(\n 'scss/site.scss',\n depends='**/*.scss',\n filters='pyscss',\n output='gen/app.css'\n )\n assets.register('scss_all', scss_bundle)\n\n this_dir = os.path.dirname(os.path.abspath(__file__))\n scss.config.LOAD_PATHS = [\n os.path.join(xstatic.path_for('bootstrap_scss'), 'scss'),\n os.path.join(this_dir, '../static/vendor/bootswatch-darkly'),\n ]", "def createAsset(assFolder, *args):\n createAssetUI(assFolder)", "def autoload():\r\n global _ASSETS_LOADED\r\n if _ASSETS_LOADED:\r\n return False\r\n\r\n # Import this locally, so that we don't have a global Django\r\n # dependency.\r\n from django.conf import settings\r\n\r\n for app in settings.INSTALLED_APPS:\r\n # For each app, we need to look for an assets.py inside that\r\n # app's package. We can't use os.path here -- recall that\r\n # modules may be imported different ways (think zip files) --\r\n # so we need to get the app's __path__ and look for\r\n # admin.py on that path.\r\n #if options.get('verbosity') > 1:\r\n # print \"\\t%s...\" % app,\r\n\r\n # Step 1: find out the app's __path__ Import errors here will\r\n # (and should) bubble up, but a missing __path__ (which is\r\n # legal, but weird) fails silently -- apps that do weird things\r\n # with __path__ might need to roll their own registration.\r\n try:\r\n app_path = import_module(app).__path__\r\n except AttributeError:\r\n #if options.get('verbosity') > 1:\r\n # print \"cannot inspect app\"\r\n continue\r\n\r\n # Step 2: use imp.find_module to find the app's assets.py.\r\n # For some reason imp.find_module raises ImportError if the\r\n # app can't be found but doesn't actually try to import the\r\n # module. So skip this app if its assetse.py doesn't exist\r\n try:\r\n imp.find_module('assets', app_path)\r\n except ImportError:\r\n #if options.get('verbosity') > 1:\r\n # print \"no assets module\"\r\n continue\r\n\r\n # Step 3: import the app's assets file. If this has errors we\r\n # want them to bubble up.\r\n import_module(\"%s.assets\" % app)\r\n #if options.get('verbosity') > 1:\r\n # print \"assets module loaded\"\r\n\r\n # Load additional modules.\r\n for module in getattr(settings, 'ASSETS_MODULES', []):\r\n import_module(\"%s\" % module)\r\n\r\n _ASSETS_LOADED = True", "def require_module(module):\n if not require_settings.REQUIRE_DEBUG and module in require_settings.REQUIRE_STANDALONE_MODULES:\n return mark_safe(\n \"\"\"<script src=\"{module}\"></script>\"\"\".format(\n module=get_static_url(\n resolve_require_module(require_settings.REQUIRE_STANDALONE_MODULES[module][\"out\"])),\n )\n )\n\n return mark_safe(\n \"\"\"<script src=\"{src}\" data-main=\"{module}\"></script>\"\"\".format(\n src=get_static_url(resolve_require_url(require_settings.REQUIRE_JS)),\n module=get_static_url(resolve_require_module(module)),\n )\n )", "def require_module(module):\n if not require_settings.REQUIRE_DEBUG and module in require_settings.REQUIRE_STANDALONE_MODULES:\n return u\"\"\"<script src=\"{module}\"></script>\"\"\".format(\n module = staticfiles_storage.url(resolve_require_module(require_settings.REQUIRE_STANDALONE_MODULES[module][\"out\"])),\n )\n return u\"\"\"<script src=\"{src}\" data-main=\"{module}\"></script>\"\"\".format(\n src = staticfiles_storage.url(resolve_require_url(require_settings.REQUIRE_JS)),\n module = staticfiles_storage.url(resolve_require_module(module)),\n )", "def _add_assets(self):\n directory = \"resources/js/pages\"\n if not os.path.exists(os.path.realpath(directory)):\n os.makedirs(os.path.realpath(directory))\n shutil.copyfile(\n os.path.join(demo_path, \"static/app.js\"), \"resources/js/inertia_demo.js\"\n )\n shutil.copyfile(\n os.path.join(demo_path, \"static/pages/Index.vue\"),\n \"resources/js/pages/Index.vue\",\n )\n shutil.copyfile(\n os.path.join(demo_path, \"static/pages/Hello.vue\"),\n \"resources/js/pages/Hello.vue\",\n )", "def test_import_test_asset(self):\n pass", "def test_create_system_asset(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare this dashboard instance to run.
def _prepare(self): # Set configuration defaults and save to the project document self.config.setdefault('PAGINATION', True) self.config.setdefault('PER_PAGE', 25) # Create and configure the Flask application self.app = self._create_app(self.config) # Add assets and routes self.assets = self._create_assets() self._register_routes() # Add module assets and routes self._module_assets = [] for module in self.modules: try: module.register(self) except Exception as e: logger.error('Error while registering {} module: {}'.format( module.name, e)) logger.error('Removing module {} from dashboard.'.format( module.name)) self.modules.remove(module) # Clear dashboard and project caches. self.update_cache()
[ "def prepare(self):\n scenario = self.get_scenario()\n self.kpi_file = self.engine.create_artifact(\"selenium_tests_report\", \".txt\")\n script_type, script_is_folder = self.detect_script_type(scenario.get(\"script\"))\n runner_config = BetterDict()\n\n if script_type == \".py\":\n self.runner = NoseTester\n runner_config = self.settings.get(\"selenium-tools\").get(\"nose\")\n\n elif script_type == \".jar\" or script_type == \".java\":\n self.runner = JunitTester\n runner_config = self.settings.get(\"selenium-tools\").get(\"junit\")\n\n runner_config[\"script-type\"] = script_type\n runner_working_dir = self.engine.create_artifact(runner_config.get(\"working-dir\", \"classes\"), \"\")\n runner_config[\"working-dir\"] = runner_working_dir\n runner_config.get(\"artifacts-dir\", self.engine.artifacts_dir)\n runner_config.get(\"working-dir\", runner_working_dir)\n runner_config.get(\"report-file\", self.kpi_file)\n\n if Scenario.SCRIPT in scenario:\n if script_is_folder:\n shutil.copytree(scenario.get(\"script\"), runner_working_dir)\n else:\n os.makedirs(runner_working_dir)\n shutil.copy2(scenario.get(\"script\"), runner_working_dir)\n\n self.runner = self.runner(runner_config, scenario, self.log)\n self.runner.prepare()\n self.reader = SeleniumDataReader(self.kpi_file, self.log)\n if isinstance(self.engine.aggregator, ConsolidatingAggregator):\n self.engine.aggregator.add_underling(self.reader)", "def prepare_screening(self):\r\n self.prepared_folder(folder_name=\"dock\")\r\n self.prepare_receptors()\r\n self.prepare_ligands()", "def setUp(self):\n\n self.logger_stats = DataScreen()", "def prepare(self):\n self.status.preparing()\n while not enough_space(self.BACKUPDIR, \"LATEST\") and not self.DRY_RUN and not self._last_remaining_backup():\n self._remove_oldest_backup()\n \n if not createRootBackupDir(self.BACKUPDIR):\n raise CrashPlanError(\"Cannot create Root folder.\")\n\n os.chdir(self.BACKUPDIR)", "def _preparation_workflow(self):\n self._validate_environment()\n self._validate_parameters()\n self._update_verbosity()", "def bootstrap(self):\n None", "def prepare(self):\n if self._state != 0:\n raise GameRunnerStateError\n self.backend.start_new_game(self.board_size, self.komi)\n self._state = 1", "def _startup(self):\n\n with self.app_context():\n model.setup(self.publ_config.database_config)\n\n self.search_index = search.SearchIndex(self.publ_config)\n self.jinja_env.globals.update( # pylint: disable=no-member\n search=self.search_index.query,\n )\n\n import click\n\n ctx = click.get_current_context(silent=True)\n if not ctx or ctx.info_name == 'run':\n index.scan_index(self.publ_config.content_folder)\n if self.publ_config.index_enable_watchdog:\n index.background_scan(self.publ_config.content_folder)", "def run(self):\n\n # Avoid running the initializer twice. This might happen e.g. due to a bad testing set up where the initializer creation is not scoped properly. E.g. Jinja template engine will get very confused.\n assert not self._already_run, \"Attempted to run initializer twice. Please avoid double initialization as it will lead to problems.\"\n\n self.configure_logging()\n\n # Configure addons before anything else, so we can override bits from addon, like template lookup paths, later easily\n self.include_addons()\n\n # Serving\n self.configure_templates()\n self.configure_static()\n\n # Authentication and authorization\n # (Must be before any views are included)\n self.configure_authentication()\n\n # Forms\n self.configure_csrf()\n self.configure_forms()\n self.configure_crud()\n\n # Email\n self.configure_mailer()\n\n # Timed tasks\n self.configure_tasks()\n\n # Core view and layout related\n self.configure_root()\n self.configure_error_views()\n self.configure_views()\n self.configure_panels()\n self.configure_sitemap()\n self.configure_tweens()\n\n # Website administration\n self.configure_admin()\n\n # Addon models\n self.configure_models()\n\n # Redis (preferably before sessions)\n self.configure_redis()\n\n # Sessions and users\n self.configure_sessions()\n self.configure_user()\n self.configure_user_forms()\n self.configure_user_models()\n self.configure_password()\n self.configure_federated_login()\n\n # Configure web shell\n self.configure_notebook()\n\n # Database and models\n self.configure_instrumented_models()\n self.configure_model_admins()\n self.configure_database()\n\n # Tests can pass us some extra initialization work on ad hoc\n extra_init = self.global_config.get(\"extra_init\")\n if extra_init:\n resolver = DottedNameResolver()\n extra_init = resolver.resolve(extra_init)\n extra_init(self)\n\n self._already_run = True", "def __init__(self):\n self.args = self._prepare_args(locals())\n self.requires_full_dataset_in_memory = False", "def initialise(self):\n self.env.process(self.ram_traffic_generation())", "def __init__(self):\n self.cron = CronTab(user=True)\n self.update_cron_data()", "def setup(self):\n self.log.debug('upm - in upm setup()')\n # Add resource setup code here", "def __setup(self):\n\n backupFolder = self.config['destination']\n self.__createBackupFolder(backupFolder)\n\n # create the project based backup folder\n today = date.today()\n\n if 'projects' in self.config:\n for project in self.config['projects'].iterkeys():\n timestamp = datetime.now().strftime('%d-%H-%M-%S')\n backupDestination = os.path.join(backupFolder, project, str(today.year), today.strftime('%m'), timestamp)\n self.__createBackupFolder(backupDestination)\n self.config['projects'][project]['destination'] = backupDestination", "def initialize(self):\n \n casalog.origin(\"ParallelDataHelper\")\n\n # self._arg is populated inside ParallelTaskHelper._init_()\n self._arg['vis'] = os.path.abspath(self._arg['vis'])\n # MPI setting\n if self._mpi_cluster:\n self._cluster.start_services()\n \n if (self._arg['outputvis'] != \"\"):\n self._arg['outputvis'] = os.path.abspath(self._arg['outputvis']) \n\n outputPath, self.outputBase = os.path.split(self._arg['outputvis'])\n try:\n if self.outputBase[-1] == '.':\n self.outputBase = self.outputBase[:self.outputBase.rindex('.')]\n except ValueError:\n # outputBase must not have a trailing .\n pass\n\n if self.outputBase == '.' or self.outputBase == './':\n raise ValueError, 'Error dealing with outputvis'\n \n # The subMS are first saved inside a temporary directory\n self.dataDir = outputPath + '/' + self.outputBase+'.data'\n if os.path.exists(self.dataDir): \n shutil.rmtree(self.dataDir)\n\n os.mkdir(self.dataDir)", "def setUp(self):\n self.comm = MPI.COMM_WORLD\n sigma1, sigma2 = 1.0, 1.0\n self.banana = banana_rv.Banana_2D(sigma1=sigma1, sigma2=sigma2)\n self.sampler_dict = {\n 'demc': self._setup_demc(self.banana.ln_like),\n 'dream': self._setup_dream(self.banana.ln_like),\n 'dram': self._setup_dram(self.banana.ln_like),\n }\n\n if self.comm.rank == 0:\n # plot true pdf and true samples\n self._plot_banana()", "def prepare_process(self):\n max_wallclock_seconds = self.ctx.inputs.metadata.options.get('max_wallclock_seconds', None)\n\n if max_wallclock_seconds is not None and 'time_limit' not in self.ctx.inputs.parameters['INPUT_XSPECTRA']:\n self.set_max_seconds(max_wallclock_seconds)\n\n if self.ctx.restart_calc:\n self.ctx.inputs.parameters['INPUT_XSPECTRA']['restart_mode'] = 'restart'\n self.ctx.inputs.parent_folder = self.ctx.restart_calc.outputs.remote_folder", "def prepare_acquisition(self):\n self.lib.PrepareAcquisition()", "def setup(self):\n self.set_players()\n self.board = Board()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Override this method for custom job titles. This method generates job titles. By default, the title is a pretty (but verbose) form of the job state point, based on the project schema.
def job_title(self, job): def _format_num(num): if isinstance(num, bool): return str(num) elif isinstance(num, Real): return str(round(num, 2)) return str(num) try: s = [] for keys in sorted(self._schema_variables()): v = job.statepoint()[keys[0]] try: for key in keys[1:]: v = v[key] except KeyError: # Particular key is present in overall continue # schema, but not this state point. else: s.append('{}={}'.format('.'.join(keys), _format_num(v))) return ' '.join(s) except Exception as error: logger.debug( "Error while generating job title: '{}'. " "Returning job-id as fallback.".format(error)) return str(job)
[ "def get_job_title(self, job_name):\n return ''", "def _postage_title(self, cube, label_mems, label_ref_dates):\n title = ''\n if label_mems:\n try:\n title += '%s: %s' % (self.realization.title(),\n cube.coord(self.realization).points[0])\n except:\n pass\n if label_ref_dates:\n try:\n time_unit = cube.coord(self.time_coord).units\n fcast_ref = cube_time_converter(\n cube.coord(self.forecast_ref_time).points[0],\n time_unit)\n title += '\\nInit. date: %s' % fcast_ref.isoformat()[:10]\n except:\n pass\n return title", "def get_title(self):\n\n if \"title_prefix\" in self:\n return \"{} : {}\".format(self.title_prefix, self.title)\n else:\n return self.title", "def make_title(self):\n\t\treturn _(\"%(y)s per %(x)s\") % {\n\t\t\t'y': self.y_axis.name.title(),\n\t\t\t'x': self.x_axis.name.title()\n\t\t}", "def job_title(self, job_title):\n\n self._job_title = job_title", "def job_title(self):\n if \"jobTitle\" in self._prop_dict:\n return self._prop_dict[\"jobTitle\"]\n else:\n return None", "def get_page_title(self):\n title = super().get_page_title()\n return title + \" %s %s for %s\" % (\n self.model.node_child_verbose_name,\n self.opts.verbose_name,\n self.parent_instance,\n )", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def getTitle(self, depletionRoot):\n self.jobTitle = 'defaultInstant'\n for child in depletionRoot.findall(\".//title\"):\n self.jobTitle = child.text.strip()\n if \" \" in self.jobTitle:\n raise IOError(\"Job title can not have spaces in the title but must be a single string. E.g. from \"+self.jobTitle+ \" to \"+ self.jobTitle.replace(\" \",\"\"))\n break\n return", "def getTaskTitle(self) -> unicode:\n ...", "def numbered_title(self):\n return f\"{self.title}\"", "def title(self):\n name = self.name.upper()\n if not _IsPositional(name):\n name = name[len(_PREFIX):].replace('-', ' ')\n return '{}'.format(name)", "def job_subtitle(self, job):\n return str(job)[:max(8, self._project_min_len_unique_id())]", "def get_full_title(self):\n title = '%s #%s' % (self.volume_name, self.issue_number)\n if self.name:\n title += ': %s' % self.name\n return title", "def _init_titles(self):\r\n super(ModelRestApi, self)._init_titles()\r\n class_name = self.datamodel.model_name\r\n if self.label_title:\r\n self.list_title = \"遍历 \" + self.label_title\r\n self.add_title = \"添加 \" + self.label_title\r\n self.edit_title = \"编辑 \" + self.label_title\r\n self.show_title = \"查看 \" + self.label_title\r\n\r\n if not self.list_title:\r\n self.list_title = \"List \" + self._prettify_name(class_name)\r\n if not self.add_title:\r\n self.add_title = \"Add \" + self._prettify_name(class_name)\r\n if not self.edit_title:\r\n self.edit_title = \"Edit \" + self._prettify_name(class_name)\r\n if not self.show_title:\r\n self.show_title = \"Show \" + self._prettify_name(class_name)\r\n self.title = self.list_title", "def __repr_title(self):\n return (\n self.title if not self.done\n else '̶'.join(c for c in self.title)\n )", "def _title(self):\n title = self.nxpath\n if 'title' in self.entries:\n return str(self.title)\n elif self.nxgroup:\n if 'title' in self.nxgroup.entries:\n return str(self.nxgroup.title)\n return self.nxpath", "def sub_case_title(self, arg_tc):\n return self.title" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Override this method for custom job subtitles. This method generates job subtitles. By default, the subtitle is a minimal unique substring of the job id.
def job_subtitle(self, job): return str(job)[:max(8, self._project_min_len_unique_id())]
[ "def subtitle(self, txt):\n num = len(txt)\n ticks = \"-\" * num\n print(txt)\n print(ticks)", "def get_subtitle_print(subs: List[Track]) -> List[str]:\n data = []\n if not subs:\n data.append(\"--\")\n for sub in subs:\n line_items = []\n\n # following sub.title tree checks and supports three different language and title scenarios\n # The second scenario is the recommended option to choose if you are open to choosing any\n # The third scenario should be used if you have nothing unique to state about the track\n # | Language | Track Title | Output |\n # | ------------ | ----------------------------- | --------------------------------------------- |\n # | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) |\n # | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) |\n # | es / Spanish | None | - Spanish, SubRip (SRT) |\n language = pycountry.languages.get(alpha_2=sub.language).name\n if sub.title:\n if language.lower() in sub.title.lower():\n line_items.append(sub.title)\n else:\n line_items.append(f\"{language}, {sub.title}\")\n else:\n line_items.append(language)\n\n line_items.append(sub.format.replace(\"UTF-8\", \"SubRip (SRT)\"))\n\n line = \"- \" + \", \".join(line_items)\n data += [\n (\" \" + x if i > 0 else x)\n for i, x in enumerate(textwrap.wrap(line, 64))\n ]\n return data", "def create_subtitle(self):\n label_subtitle = Label(self.frame, text=\"Projet Python 2020\", font=(\"Arial\", 25), bg='light blue',\n fg='white')\n label_subtitle.pack()", "def subtitle(self,) -> str:\n return self.__data['Description']", "def get_subject_name(id):\n if id < 10:\n return 'sub-00{}'.format(id)\n elif id < 100:\n return 'sub-0{}'.format(id)\n else:\n return 'sub-{}'.format(id)", "def get_job_title(self, job_name):\n return ''", "def SubTitle(Text):\n pass", "def subtitle(request, playlist_id):\n\ttry: \n\t\targs = json.loads(request.body)\n\t\tplaylist = Playlist.objects.get(pk=playlist_id)\n\t\tplaylist.subtitle = args['subtitle']\n\texcept Playlist.DoesNotExist:\n\t\treturn error('Invalid playlist id')\n\texcept KeyError:\n\t\treturn error('Query does not contain required arguments')\n\n\tplaylist.save()\n\treturn success()", "def job_title(self, job):\n def _format_num(num):\n if isinstance(num, bool):\n return str(num)\n elif isinstance(num, Real):\n return str(round(num, 2))\n return str(num)\n\n try:\n s = []\n for keys in sorted(self._schema_variables()):\n v = job.statepoint()[keys[0]]\n try:\n for key in keys[1:]:\n v = v[key]\n except KeyError: # Particular key is present in overall\n continue # schema, but not this state point.\n else:\n s.append('{}={}'.format('.'.join(keys), _format_num(v)))\n return ' '.join(s)\n except Exception as error:\n logger.debug(\n \"Error while generating job title: '{}'. \"\n \"Returning job-id as fallback.\".format(error))\n return str(job)", "def get_page_subtitle(self, page_id):\n with open(page_id) as f:\n return json.load(f)['subtitle']", "def map_subtitle1(self):\n # noinspection PyProtectedMember, PyUnresolvedReferences\n return minqlx._map_subtitle1", "def write_subtitle(self, subtitle: str, break_page: bool, class_txt: str) -> str:\n if break_page:\n str_title = \"\"\"<h2 class=\"break-before\">\"\"\" + subtitle + \"\"\"</h2>\\n\"\"\"\n else:\n str_title = \"\"\"<h2 class=\\\"\"\"\" + class_txt + \"\"\"\\\">\"\"\" + subtitle + \"\"\"</h2>\\n\"\"\"\n self.html_doc = self.html_doc + str_title\n return self.html_doc", "def extract_subtitles(\n video_source_filename,\n subtitle_id=0,\n verbose_flag=0,\n dry_run_flag=0):\n cmd = \"mencoder -quiet '%(video_source_filename)s' -o /dev/null -nosound -ovc copy -vobsubout subtitles -vobsuboutindex 0 -sid %(subtitle_id)s\" % locals()\n if verbose_flag:\n print cmd\n if not dry_run_flag:\n run(cmd)\n print", "def getJobId() -> str:\n s = time.strftime(r\"%Y%m%d-%H%M%S\", time.localtime())\n return \"%s-%d\" % (s, random.randint(100, 999))", "def create_job_id() -> str:\n return str(uuid.uuid1())", "def _gen_subname(videofile, language, ext):\n root, basename = os.path.split(videofile)\n name, _ = os.path.splitext(basename)\n subname = '{basename}.{language}.{ext}'.format(\n basename=name,\n language=language,\n ext=ext)\n return subname", "def _gen_subname(self, videofile, language, ext):\n root, basename = os.path.split(videofile)\n name, _ = os.path.splitext(basename)\n unique = os.urandom(4).hex()\n unique = os.urandom(4).hex()\n subname = '{basename}.{language}.{ext}'.format(\n basename=name, \n language=language,\n ext=ext)\n p = os.path.join(root, subname)\n return p", "def map_subtitle2(self):\n # noinspection PyProtectedMember, PyUnresolvedReferences\n return minqlx._map_subtitle2", "def assign_job_id(self):\n num_string = str(randint(0, 10000)).zfill(5)\n job_id = self.jobname + str(num_string) + datetime.today().strftime(\"%Y%m%d\")\n return job_id" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Override this method for custom job sorting. This method returns a key that can be compared to sort jobs. By
def job_sorter(self, job): key = natsort.natsort_keygen(key=self.job_title, alg=natsort.REAL) return key(job)
[ "def sort_key(self):\n ...", "def sort_key(self) -> SortKeyCallable:\n return self.__sort_key", "def sort_key(self):\n return self._sort_key", "def job_priority_key(self, job):\n camp, user = job.camp, job.user\n end = camp.time_left / user.shares # lower value -> higher priority\n # The `end` should be further multiplied by\n # `_stats.active_shares` / `_stats.cpu_used`.\n # However, that gives the same value for all the jobs\n # and we only need the ordering, not the absolute value.\n return (end, camp.created, user.ID, camp.ID,\n job.submit, job.ID)", "def sort_key(self):\n return general.iterable_sort_key(self)", "def get_sort_key(self, item):\n return item.number", "def sortkey(self):\n if not hasattr(self, '_reference'):\n self.parse()\n return self._sortkey", "def __cmp__(self, job):\n return cmp(job.name, job.name)", "def sortkey(self):\n return NotImplemented", "def _sort_key(obj):\n return obj.__definition_serial__", "def job_priority_key(self, job):\n if not self._stats.total_usage:\n fairshare = 1\n else:\n user = job.user\n effective = user.cpu_clock_used / self._stats.total_usage\n #shares_norm = user.shares # already normalized\n fairshare = 2.0 ** -(effective / user.shares)\n prio = int(fairshare * 100000) # higher value -> higher priority\n # TODO if needed change the constant to a configuration setting\n # TODO and add more components to the priority value\n return (-prio, job.submit, job.ID)", "def __hash__(self):\n return hash(self.jobid)", "def benchmark_sort_key(benchmark):\n if not \"label\" in benchmark:\n return \"\"\n return benchmark[\"label\"]", "def sortKey(self):\n # Should start with T to be committed after elastic data manager\n return 'testdatamanger' + str(id(self))", "def product_key_by_category(self, product):\n if self._category_sort is None:\n self._init_category_sort()\n category = product[\"Category\"]\n if category in self._category_sort:\n category_sort_key = \"{:05d}\".format(self._category_sort[category])\n else:\n category_sort_key = category\n return category_sort_key", "def sort_key(self, key):\n\n sortkey = None\n\n def mk_sort_name(hname):\n \"\"\"Create a string from a HumanName that we can use as\n a search key.\n\n \"\"\"\n sortname = hname.last + ', ' + hname.first + ' ' + hname.middle\n return sortname.lower()\n\n lower_key = key.lower()\n\n if 'year' in lower_key:\n sortkey = self['Year']\n\n if 'title' in lower_key:\n sortkey = self['Title']\n \n if 'num' in lower_key:\n # return the ajbnum string\n sortkey = self.sort_num_str()\n\n if 'place' in lower_key:\n # return the first publisher place\n sortkey = self['Publishers'][0]['Place']\n\n if 'publisher' in lower_key:\n sortkey = self['Publishers'][0]['PublisherName']\n\n if 'language' in lower_key:\n sortkey = self['Language']\n \n if 'author' in lower_key:\n if self['Authors']:\n sortkey = mk_sort_name(self['Authors'][0])\n elif self['Editors']:\n sortkey = mk_sort_name(self['Editors'][0])\n else:\n sortkey = ''\n \n if 'editor' in lower_key or\\\n 'translator' in lower_key or \\\n 'compiler' in lower_key or \\\n 'contributor' in lower_key:\n\n lower_key += 's'\n cap_key =lower_key.capitalize()\n if self[cap_key]:\n # if the list exists return the first name\n sortkey = mk_sort_name(self[cap_key][0])\n else:\n sortkey = ''\n\n return sortkey", "def get_sort_key(self):\n return self.sort_key or self.importance", "def sortKey(self, p_str): # real signature unknown; restored from __doc__\n return QCollatorSortKey", "def sort_key(p):\n # Filename is the 0th entry in tuple\n return p[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers routes with the Flask application. This method configures context processors, templates, and sets up routes for a basic Dashboard instance. Additionally, routes declared by modules are registered by this method.
def _register_routes(self): dashboard = self @dashboard.app.after_request def prevent_caching(response): if 'Cache-Control' not in response.headers: response.headers['Cache-Control'] = 'no-store' return response @dashboard.app.context_processor def injections(): session.setdefault('enabled_modules', [i for i in range(len(self.modules)) if self.modules[i].enabled]) return { 'APP_NAME': 'signac-dashboard', 'APP_VERSION': __version__, 'PROJECT_NAME': self.project.config['project'], 'PROJECT_DIR': self.project.config['project_dir'], 'modules': self.modules, 'enabled_modules': session['enabled_modules'], 'module_assets': self._module_assets } # Add pagination support from http://flask.pocoo.org/snippets/44/ @dashboard.app.template_global() def url_for_other_page(page): args = request.args.copy() args['page'] = page return url_for(request.endpoint, **args) @dashboard.app.template_global() def modify_query(**new_values): args = request.args.copy() for key, value in new_values.items(): args[key] = value return '{}?{}'.format(request.path, url_encode(args)) @dashboard.app.errorhandler(404) def page_not_found(error): return self._render_error(str(error)) self.add_url('views.home', ['/']) self.add_url('views.settings', ['/settings']) self.add_url('views.search', ['/search']) self.add_url('views.jobs_list', ['/jobs/']) self.add_url('views.show_job', ['/jobs/<jobid>']) self.add_url('views.get_file', ['/jobs/<jobid>/file/<path:filename>']) self.add_url('views.change_modules', ['/modules'], methods=['POST'])
[ "def _install_routes(self):\n @self.app.route('/', defaults = { 'page': 1 })\n @self.app.route('/<int:page>/')\n def index(page = 1):\n return self.render_template('index.html', page = page)\n\n @self.app.route('/archive/')\n @self.app.route('/archive/<tag>/')\n def archive(tag = None):\n if tag is not None and not tag in self.content.posts_by_tag:\n abort(404)\n return self.render_template('archive.html', tag = tag)\n\n @self.app.route('/page/<path:path>/')\n def page(path):\n current_page = self.content.pages_by_path.get(path) or abort(404)\n return self.render_template('page.html', page = current_page)\n\n @self.app.route('/post/<path:path>/')\n def post(path):\n current_post = self.content.posts_by_path.get(path) or abort(404)\n return self.render_template('post.html', post = current_post)", "def init_flask_restful_routes(app):\n app.logger.info('Initialising API Routes')\n api = Api(app)\n\n from src.api.views.health_check import HealthCheck\n from src.api.views.users import Users\n\n api.add_resource(HealthCheck, '/api/v1/health-check', endpoint=\"health check\")\n api.add_resource(Users, '/api/v1/instagram/user', endpoint=\"users endpoints\")", "def register_routes(self):\n pass", "def register_routes(self, api):\n api.add_resource(controllers.UserRegistration, '/registration')\n api.add_resource(controllers.UserLogin, '/login')\n api.add_resource(controllers.TokenRefresh, '/token/refresh')", "def setup_routes(self):\n\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'])\n\n # Setup a default route for the root of object dispatch\n map.connect('*url', controller='root', action='routes_placeholder')\n\n config['routes.map'] = map", "def _init_routes(self):\n before_hooks = [\n helpers.require_accepts_json,\n helpers.extract_project_id,\n\n # NOTE(kgriffs): Depends on project_id being extracted, above\n functools.partial(helpers.validate_queue_name,\n self._validate.queue_name)\n ]\n\n self.app = falcon.API(before=before_hooks)\n\n queue_controller = self._storage.queue_controller\n message_controller = self._storage.message_controller\n claim_controller = self._storage.claim_controller\n\n # Home\n self.app.add_route('/v1', v1.V1Resource())\n\n # Queues Endpoints\n queue_collection = queues.CollectionResource(self._validate,\n queue_controller)\n self.app.add_route('/v1/queues', queue_collection)\n\n queue_item = queues.ItemResource(queue_controller, message_controller)\n self.app.add_route('/v1/queues/{queue_name}', queue_item)\n\n stats_endpoint = stats.Resource(queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/stats', stats_endpoint)\n\n # Metadata Endpoints\n metadata_endpoint = metadata.Resource(self._wsgi_conf, self._validate,\n queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/metadata', metadata_endpoint)\n\n # Messages Endpoints\n msg_collection = messages.CollectionResource(self._wsgi_conf,\n self._validate,\n message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages', msg_collection)\n\n msg_item = messages.ItemResource(message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages/{message_id}', msg_item)\n\n # Claims Endpoints\n claim_collection = claims.CollectionResource(self._wsgi_conf,\n self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims', claim_collection)\n\n claim_item = claims.ItemResource(self._wsgi_conf, self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims/{claim_id}', claim_item)\n\n # Health\n self.app.add_route('/v1/health', health.HealthResource())", "def __setupRoutes():\n # import all in the routes/ dir\n __import__('routes')", "def _configure_api_routes(self, app: FastAPI):\n authenticator = JWTAuthenticator(self.signer)\n\n data_update_publisher: Optional[DataUpdatePublisher] = None\n if self.publisher is not None:\n data_update_publisher = DataUpdatePublisher(self.publisher)\n\n # Init api routers with required dependencies\n data_updates_router = init_data_updates_router(\n data_update_publisher,\n self.data_sources_config,\n authenticator\n )\n webhook_router = init_git_webhook_router(self.pubsub.endpoint, authenticator)\n security_router = init_security_router(self.signer, StaticBearerAuthenticator(self.master_token))\n\n # mount the api routes on the app object\n app.include_router(bundles_router, tags=[\"Bundle Server\"], dependencies=[Depends(authenticator)])\n app.include_router(data_updates_router, tags=[\"Data Updates\"], dependencies=[Depends(authenticator)])\n app.include_router(webhook_router, tags=[\"Github Webhook\"])\n app.include_router(security_router, tags=[\"Security\"])\n app.include_router(self.pubsub.router, tags=[\"Pub/Sub\"])\n\n if self.jwks_endpoint is not None:\n # mount jwts (static) route\n self.jwks_endpoint.configure_app(app)\n\n # top level routes (i.e: healthchecks)\n @app.get(\"/healthcheck\", include_in_schema=False)\n @app.get(\"/\", include_in_schema=False)\n def healthcheck():\n return {\"status\": \"ok\"}\n\n return app", "def register_routes(self, api):\n # Device Registration\n api.add_resource(controllers.UserDeviceRegistration, '/device-registration')", "def add_routes(self, app: Application) -> None:\n app.http_router.add(\n {'GET'},\n '/',\n self._index_redirect\n )\n app.http_router.add(\n {'GET'},\n '/index.html',\n self._index\n )\n app.http_router.add(\n {'GET'},\n '/create.html',\n self._create\n )\n app.http_router.add(\n {'POST'},\n '/create.html',\n self._save_create\n )\n app.http_router.add(\n {'GET'},\n '/read.html',\n self._read\n )\n app.http_router.add(\n {'GET'},\n '/update.html',\n self._update\n )\n app.http_router.add(\n {'POST'},\n '/update.html',\n self._save_update\n )\n app.http_router.add(\n {'GET'},\n '/delete.html',\n self._delete\n )", "def register_blueprints(app):\n app.register_blueprint(MAIN)", "def initialize_routes(app):\n # Authentification \n app.add_resource(auth.LoginApi, '/auth/login')\n app.add_resource(auth.SignupApi, '/auth/SignupApi')\n # Intialisation et activation d'un parking\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/add')\n app.add_resource(parkingInit.ActivateParking, '/administrate/activate')\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/getall', endpoint='getall')\n # Gestion de Clients\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/get')\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/getById/<int:idUser>', endpoint='get_by_id')\n # statistiques financéres\n app.add_resource(stats.Money, '/administrate/finance/monthly', endpoint='monthly')\n app.add_resource(stats.Money, '/administrate/finance/yearly', endpoint='yearly')", "def _register_blueprint(self):\n self.dash_app.server.register_blueprint(\n self.blueprint, url_prefix=\"/login\")", "def setup_routes(app: web.Application) -> None:\n # API\n base = URL(\"/api/v1\")\n\n app.router.add_view(to_path(base / \"names\" / \"{name}\"), check_name.CheckNameView)\n app.router.add_view(to_path(base / \"whatTimeIsIt\"), current_time.GetCurrentTime)\n app.router.add_view(to_path(base / \"how-to-fibo\"), fibonacci.GetFibonacci)\n app.router.add_view(to_path(base / \"lets_dict\"), generate_dict.GenerateDict)\n app.router.add_view(to_path(base / \"books\"), books.BooksView)", "def register_blueprints(app):\n blueprints = {INDEX, DASHBOARD, COMMENT_SECTION}\n\n for blueprint in blueprints:\n app.register_blueprint(blueprint)", "def register_blueprints(app):\n app.register_blueprint(api.views.blueprint)", "def init_routes(app):\n\n async def index(r):\n f = r.app['config']['path']['frontend_dir'] / 'index.html'\n return web.FileResponse(f)\n\n app.router.add_static('/css', app['config']['path']['frontend_dir'] / 'css', name='css')\n app.router.add_static('/js', app['config']['path']['frontend_dir'] / 'js', name='js')\n\n app.router.add_get('/', index)\n app.router.add_get('/stack/{a:.*}', index)\n app.router.add_get('/more/{a:.*}', index)", "def init_app():\n app = Flask(__name__)\n\n with app.app_context():\n # Import parts of our core Flask app\n from . import routes\n\n from .plotlydash.index import init_dashboard\n app = init_dashboard(app)\n\n return app", "def create_routes(self):\n if self.component:\n route = self.uri_base\n self._app.route(route, methods=['GET', 'POST'], endpoint='api_%s' % (self.component,))(self.process_request)\n if self.iterable:\n route = '%s/<string:identifier>' % (self.uri_base,)\n self._app.route(route, methods=['DELETE', 'GET', 'PATCH', 'PUT'], endpoint='api_%s' % (self.component,))(self.process_request)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clear project and dashboard server caches. The dashboard relies on caching for performance. If the data space is altered, this method may need to be called before the dashboard reflects those changes.
def update_cache(self): # Try to update signac project cache. Requires signac 0.9.2 or later. with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) try: self.project.update_cache() except Exception: pass # Clear caches of all dashboard methods members = inspect.getmembers(self, predicate=inspect.ismethod) for func in filter(lambda f: hasattr(f, 'cache_clear'), map(lambda x: x[1], members)): func.cache_clear()
[ "def clear_cache(self):\n template = 'https://ci.appveyor.com/api/projects/{0.username}/{0.project}/buildcache'\n url = template.format(self)\n headers = {'Authorization': 'Bearer {0.api_token}'.format(self)}\n response = requests.delete(url, headers=headers)\n print('Status code: %s'%response.status_code)\n if response.status_code == requests.codes.ok:\n print('Cache deleted.')\n else:\n response.raise_for_status()", "def clear_cache(self):", "def clear_cache(self):\n pass", "def clear_cache(self):\n self._cache.clear()", "def clearCache( self ):\n self._cache = self._dataset", "def clear_cache():\r\n if env.multi_server:\r\n run('restart-memcache.sh %(memcached_server_address)' % env)\r\n else:\r\n sudo('service memcached restart')", "def clear(self, cacheDir):", "def ClearCache(self):\n pass", "def clear_data_cache():\n load_glove.cache_clear()", "def clear_required_caches():\n\n return get_component(CachingPackage.COMPONENT_NAME).clear_required_caches()", "def clear_cache(self):\n requests.get(url=self.proxy_url+'/clear_cache')", "def clear_cache(self):\n self.part_cache.clear()", "def clear_cache():\n if os.path.exists(OPTIONS['cachedir']):\n shutil.rmtree(OPTIONS['cachedir'])", "def clear_cache(self):\n self.nfields.clear()\n self.kfields.clear()\n self.gfields.clear()\n self.nsimplefields.clear()\n self.ksimplefields.clear()\n self.gsimplefields.clear()", "def _reset_cache(self):\n self._nblock_cache = None\n self._archive_cache = None\n self._grid_cache = None", "def test_clear_cache(self):\n api_helpers.clear_cache()", "def clear_caches():\r\n from jinja2.environment import _spontaneous_environments\r\n from jinja2.lexer import _lexer_cache\r\n _spontaneous_environments.clear()\r\n _lexer_cache.clear()", "def clear_cache(self):\n if os.path.exists(self.cache_folder) and os.path.isdir(self.cache_folder):\n shutil.rmtree(self.cache_folder)", "def clear_cache() -> None:\n if os.path.isfile('/usr/local/sbin/clearcache.sh'):\n os.system('sudo /usr/local/sbin/clearcache.sh')\n else:\n os.system('sudo sh -c \"sync; echo 1 > /proc/sys/vm/drop_caches\"')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the command line interface. Call this function to use signacdashboard from its command line
def main(self): def _run(args): kwargs = vars(args) if kwargs.get('host', None) is not None: self.config['HOST'] = kwargs.pop('host') if kwargs.get('port', None) is not None: self.config['PORT'] = kwargs.pop('port') self.config['PROFILE'] = kwargs.pop('profile') self.config['DEBUG'] = kwargs.pop('debug') self.run() parser = argparse.ArgumentParser( description="signac-dashboard is a web-based data visualization " "and analysis tool, part of the signac framework.") parser.add_argument( '--debug', action='store_true', help="Show traceback on error for debugging.") parser.add_argument( '--version', action='store_true', help="Display the version number and exit.") subparsers = parser.add_subparsers() parser_run = subparsers.add_parser('run') parser_run.add_argument( '-p', '--profile', action='store_true', help='Enable flask performance profiling.') parser_run.add_argument( '-d', '--debug', action='store_true', help='Enable flask debug mode.') parser_run.add_argument( '--host', type=str, help='Host (binding address). Default: localhost') parser_run.add_argument( '--port', type=int, help='Port to listen on. Default: 8888') parser_run.set_defaults(func=_run) # This is a hack, as argparse itself does not # allow to parse only --version without any # of the other required arguments. if '--version' in sys.argv: print('signac-dashboard', __version__) sys.exit(0) args = parser.parse_args() if args.debug: logger.setLevel(logging.DEBUG) if not hasattr(args, 'func'): parser.print_usage() sys.exit(2) try: self.observer.start() args.func(args) except RuntimeWarning as warning: logger.warning("Warning: {}".format(warning)) if args.debug: raise sys.exit(1) except Exception as error: logger.error('Error: {}'.format(error)) if args.debug: raise sys.exit(1) finally: self.observer.stop() self.observer.join()
[ "def cli():\n config, auth, execute_now = read_command_line_arguments()\n main(config, auth, execute_now)", "def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)", "def main():\n # Get the path to the settings file.\n if len(sys.argv) == 1:\n settings_path = '/etc/taba/agent_settings.json'\n print 'Using default settings path %s' % settings_path\n\n elif len(sys.argv) == 2:\n settings_path = sys.argv[1]\n\n else:\n _Usage('Invalid arguments')\n\n if not os.path.exists(settings_path):\n _Usage('Settings file does not exist')\n\n # Decode the settings.\n settings = misc_util.DecodeSettingsFile(settings_path)\n\n # Launch the Taba Agent.\n StartTabaAgentServer(\n settings['SERVER_ENDPOINTS'],\n settings['AGENT_FLUSH_SECONDS'],\n settings['AGENT_PORT'],\n settings.get('AGENT_QUEUES_PER_SERVER', 1),\n settings.get('DOUBLE_AGENT', None))", "def main():\n\tcli = Cli()\n\tcli.run()", "def call_das_cli(*args):\n oldarg = deepcopy(sys.argv)\n sys.argv += args\n print sys.argv\n ret = das_cli.main()\n sys.argv = oldarg\n return ret", "def main():\n superrocket = SuperRocket.from_cmd_args()\n superrocket.run()", "def console_script(cls):\n cls().start(sys.argv[1:])", "def main(args=None):\n click.echo(\"Replace this message by putting your code into \"\n \"nxapi.cli.main\")\n click.echo(\"See click documentation at http://click.pocoo.org/\")\n return 0", "def main():\n log(\"NGG CLI\", color=\"green\", figlet=True)\n log(\"Welcome to NGG CLI!\", \"yellow\")", "def main():\n from dwim import DEFAULT_PROFILE, dwim\n # Initialize logging to the terminal.\n coloredlogs.install()\n # Define the command line option defaults.\n profile_script = DEFAULT_PROFILE\n # Parse the command line arguments.\n try:\n options, _ = getopt.getopt(sys.argv[1:], 'c:vqh', [\n 'config=', 'verbose', 'quiet', 'help',\n ])\n for option, value in options:\n if option in ('-c', '--config'):\n profile_script = value\n elif option in ('-v', '--verbose'):\n coloredlogs.increase_verbosity()\n elif option in ('-q', '--quiet'):\n coloredlogs.decrease_verbosity()\n elif option in ('-h', '--help'):\n usage(__doc__)\n sys.exit(0)\n except Exception as e:\n warning(\"Error: Failed to parse command line arguments! (%s)\", e)\n sys.exit(1)\n # Execute the requested action(s).\n try:\n dwim(profile_script)\n except Exception:\n logger.exception(\"Caught a fatal exception! Terminating ..\")\n sys.exit(1)", "def main():\n parser = argparse.ArgumentParser()\n # Set the default entrypoint for nothing.\n parser.set_defaults(func=lambda x: None)\n # Configure the CLI for this script.\n appsec_wtf.cli.exec_poc.set_cli_opts(parser)\n\n # Parse the CLI arguments.\n args = parser.parse_args()\n # Execute the entry point of the command being executed.\n args.func(args)", "def main(args=None):\n click.echo(\"Replace this message by putting your code into \"\n \"virtualfilesystem.cli.main\")\n click.echo(\"See click documentation at http://click.pocoo.org/\")", "def main():\r\n parser = argparse.ArgumentParser(description=\"\"\"Starts SSH session with one\r\n of ARC\\'s Raspberrypis.\"\"\")\r\n\r\n parser.add_argument('usr', help='Username for the remote device.')\r\n parser.add_argument('pwd', help='Password for arc.pi.reg@gmail.com.')\r\n\r\n args = parser.parse_args()\r\n\r\n address = get_IP(IP_list(args.pwd), args.usr)\r\n os.system(\"ssh \" + \"pi\" + \"@\" + address)", "def main():\n logging.setup(CONFIG, \"tmu\")\n tmu = TmuCli(sys.argv[1:])\n tmu.run()\n return tmu.result", "def main():\n parser = create_parser()\n args = parser.parse_args()\n\n # in-toto-mock should not be used to secure the supply chain but only to try\n # out in-toto-run with max. user feedback, hence we set a verbose log level\n LOG.setLevel(logging.INFO)\n\n try:\n in_toto.runlib.in_toto_mock(args.name, args.link_cmd, args.use_dsse)\n\n except Exception as e: # pylint: disable=broad-exception-caught\n LOG.error(\"(in-toto-mock) %s: %s\", type(e).__name__, e)\n sys.exit(1)\n\n sys.exit(0)", "def main():\n '''opts = parse_args()\n import os\n if not os.path.exists(opts.apk_path):\n print \"apk not exist\"\n return'''\n\n droidbot = DroidBot(app_path=\"/Users/maomao/Desktop/p/AppOffer_2.4.9_Demo.apk\",\n device_serial=\"08fd8e5ba2439881\",\n output_dir=\"/Users/maomao/Desktop/AppOffer_2.4.9_Demo\",\n env_policy=\"static\",\n event_policy=\"utg_dynamic\",\n with_droidbox=None,\n event_interval=None,\n event_duration=None,\n event_count=None,\n quiet=False,\n use_hierarchy_viewer=True)\n droidbot.start()\n return", "def main():\n\n parser = argparse.ArgumentParser(description=\"Redfish integration with RAID controller.\")\n _api_args(parser)\n _command_args(parser)\n\n args = parser.parse_args()\n #print(str(args))\n\n results = get_run_application_usecase(args.api_type,\n args.login_host,\n args.login_account,\n args.login_password,\n args,\n system=args.system,\n api_prefix=args.api_prefix)()\n with open('data.out', 'w') as writer:\n writer.write(results)\n print(results)", "def run_cli(self):\n self.cli_server = server.CliServer()\n self.cli_server.start()", "def start() -> None:\n ready = (\n locked_folder_path.exists()\n and testfiles_folder_path.exists()\n and output_files_folder.exists()\n )\n\n if not ready:\n log(\n \"\"\"ERROR:: Make sure you run the check command and setup the test files.\"\"\",\n \"red\",\n )\n log(\n \"\"\"run python cli.py setup-test-files\"\"\",\n \"green\",\n )\n exit(1)\n\n log(\n \"\\n **************************************** WELCOME **************************************** \\n\",\n \"white\",\n )\n log(\" Press 1 to Hide. \\n Press 2 to Recover. \\n Press 3 to Exit.\", \"blue\")\n option = click.prompt(\" Enter Choice \", type=int)\n if option == 1:\n randomize()\n log(\"\\n Thank You!\", \"green\")\n elif option == 2:\n derandomize()\n log(\"\\n Thank You!\", \"green\")\n elif option == 3:\n log(\"\\n Exiting...\", \"green\")\n exit()\n else:\n log(\"\\n Invalid Choice!\", \"red\")\n exit(0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl simple addition example
def test_documentation_popxl_addition(self): filename = "simple_addition.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_and_numbers(self):\n self.assertEqual(add(3,8), 11)", "def test_calculator_add():\n assert calculator.calculator_add(4, 4) == 8\n assert calculator.calculator_add(0, 0) == 0\n assert calculator.calculator_add(0, 1) == 1\n assert calculator.calculator_add(0, -5) == -5", "def test_add_case1():\n q1 = session4.Qualean(random.sample(qualean_inputs, 1)[0])\n q_sum = round(q1.number * 100, 10)\n q_temp = q1\n for _ in range(99):\n q_temp = q_temp + q1\n\n assert q_sum == q_temp.number, 'Sum implementation is buggy'", "def test_add_two_numbers(self):\n self.assertEqual(add(5, 9), 14)", "def test_add(self):\n p1 = Point(1, 2)\n p2 = Point(3, 4)\n p3 = p1 + p2\n assert p3.x == 4\n assert p3.y == 6", "def test_addition():\n assert calculator.add(7, 3) == 10\n assert calculator.add(7.0, 3.0) == 10.0\n assert calculator.add(7, -3) == 4\n assert calculator.add(7.0, -3.0) == 4.0", "def test_add_operators(self, op0, op1):\n op = op0 + op1\n assert isinstance(op, Sum)\n assert qml.equal(op[0], op0)\n assert qml.equal(op[1], op1)", "def test_add_numbers(self):\n a, b = 5, 6\n expected = a + b\n # check for equality, real vs expected\n self.assertEqual(add(a, b), expected)", "def test_add_case2():\n q1 = session4.Qualean(random.sample(qualean_inputs, 1)[0])\n q2 = session4.Qualean(random.sample(qualean_inputs, 1)[0])\n q3 = session4.Qualean(random.sample(qualean_inputs, 1)[0])\n q_temp = q1 + q2 + q3\n\n q_sum = round(q1.number + q2.number + q3.number, 10)\n assert q_sum == q_temp.number, 'Sum implementation is buggy'", "def test_two_numbers(self):\n self.assertEqual(add(3, 4), 7)", "def test_add_more_zero(self):\n result = calculation.add(4,5)\n self.assertEqual(result,9)", "def test_unaryPlus(self):\r\n self.flakes('+1')", "def test_add(self):\r\n operation = Operation(3, 4)\r\n result = operation.add()\r\n self.assertEqual(result, 7)", "def test_addition(l1, l2):\n result = addTwoNumbers(l1, l2)\n assert result.val == '5'\n assert result.next.val == '8'\n assert result.next.next.val == '0'\n assert result.next.next.next.val == '1'", "def test_arithmetic(self):\n for test in [\n TypeTest(sir.BinOpCode(name = 'OP_ADD', left = sir.Int(5), right = sir.Int(6)), SymbolType.Integer),\n TypeTest(sir.BinOpCode(name = 'OP_ADD', left = sir.Bytes('05'), right = sir.Bytes('06')), SymbolType.Integer),\n ]:\n self._test(test)", "def test_add(self):\n self.assertEqual(3, add(1, 2))\n self.assertNotEqual(3, add(2, 2))", "def test_q43_add_3rd_element():\n result = q43_add_3rd_element([1, 2, 3, 4, 5, 6, 7, 8, 9])\n assert result == 12", "def testAddTo(self):\n self.true_result = [2, 2, 2, 2, 2]\n self.test_result = VectorOps.addTo(self.target, self.operand)\n self.assertEqual(self.true_result, self.test_result)", "def test_basic_addition(self):\n self.failUnlessEqual(1 + 1, 2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl basic subgraph example
def test_documentation_popxl_basic_subgraph(self): filename = "basic_graph.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_documentation_popxl_create_multi_subgraph(self):\n filename = \"create_multi_graphs_from_same_func.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_get_subgraph(self):\n query_string = [('seedNodes', 'seedNodes_example')]\n response = self.client.open(\n '/NCATS_GNBR/GNBR_API/1.0.0/getInducedSubgraph',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def show_subgraph(dfs_codes, nsupport, mapper):\n\tglobal __subgraph_count\n\n\tg = build_graph(dfs_codes)\n\tg.id = __subgraph_count\n\t__subgraph_count += 1\n\tg.gprint(nsupport, mapper)", "def main():\n\n # Build the test graph\n n1 = Node(1)\n n2 = Node(2)\n n3 = Node(3)\n n4 = Node(4)\n n5 = Node(5)\n n6 = Node(6)\n n1.child_nodes = [n2, n3, n4]\n n2.child_nodes = [n5, n6]\n n5.child_nodes = [n1]\n n6.child_nodes = [n4]\n\n # Traverse the graph\n walk(n1)", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def subgraph(self, nodeids):\n _eps, _vars = self._eps, self._vars\n _hcons, _icons = self._hcons, self._icons\n top = index = xarg = None\n eps = [_eps[nid] for nid in nodeids]\n lbls = set(ep[2] for ep in eps)\n hcons = []\n icons = []\n subvars = {}\n if self.top:\n top = self.top\n tophc = _hcons.get(top, None)\n if tophc is not None and tophc[2] in lbls:\n subvars[top] = {}\n elif top not in lbls:\n top = None # nevermind, set it back to None\n # do index after we know if it is an EPs intrinsic variable.\n # what about xarg? I'm not really sure.. just put it in\n if self.xarg:\n xarg = self.xarg\n subvars[self.xarg] = _vars[self.xarg]['props']\n subvars.update((lbl, {}) for lbl in lbls)\n subvars.update(\n (var, _vars[var]['props'])\n for ep in eps for var in ep[3].values()\n if var in _vars\n )\n if self.index in subvars:\n index = self.index\n # hcons and icons; only if the targets exist in the new subgraph\n for var in subvars:\n hc = _hcons.get(var, None)\n if hc is not None and hc[2] in lbls:\n hcons.append(hc)\n for ic in _icons.get(var, []):\n if ic[0] in subvars and ic[2] in subvars:\n icons.append(ic)\n return Xmrs(\n top=top, index=index, xarg=xarg,\n eps=eps, hcons=hcons, icons=icons, vars=subvars,\n lnk=self.lnk, surface=self.surface, identifier=self.identifier\n )", "def test_dummy3(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.parenthesize() is xp)", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def render_subgraph(self, ontol, nodes, **args):\n subont = ontol.subontology(nodes, **args)\n return self.render(subont, **args)", "def test_get_subgraph_by_annotation_values(self):\n graph = BELGraph()\n graph.annotation_list[\"Subgraph\"] = set(\"ABCDE\")\n\n a, b, c, d = [protein(namespace=\"test\", name=n()) for _ in range(4)]\n\n k1 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"A\"}})\n\n k2 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"B\"}})\n\n k3 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"A\", \"C\", \"D\"}})\n\n k4 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"C\", \"D\"}})\n\n subgraph = get_subgraph_by_annotation_value(graph, \"Subgraph\", {\"A\", \"C\"})\n self.assertIsInstance(subgraph, BELGraph)\n\n self.assertIn(a, subgraph)\n self.assertIn(b, subgraph)\n self.assertIn(b, subgraph[a])\n self.assertIn(k1, subgraph[a][b])\n self.assertNotIn(k2, subgraph[a][b])\n self.assertIn(k3, subgraph[a][b])\n self.assertIn(k4, subgraph[a][b])", "def test_depth_traversal_multi_node(graph_multi_node):\n assert 'gn5' in graph_multi_node.depth_first_traversal('gn1')", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def test_get_subgraph_by_annotation_value(self):\n graph = BELGraph()\n graph.annotation_url[\"Subgraph\"] = n()\n a, b, c, d = [protein(namespace=\"test\", name=n()) for _ in range(4)]\n\n k1 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"A\"}})\n\n k2 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"B\"}})\n\n k3 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"A\", \"C\", \"D\"}})\n\n subgraph = get_subgraph_by_annotation_value(graph, \"Subgraph\", \"A\")\n self.assertIsInstance(subgraph, BELGraph)\n\n self.assertIn(a, subgraph)\n self.assertIn(b, subgraph)\n self.assertIn(b, subgraph[a])\n self.assertIn(k1, subgraph[a][b])\n self.assertNotIn(k2, subgraph[a][b])\n self.assertIn(k3, subgraph[a][b])", "def graph_test(self):\n raise NotImplementedError", "def test_createSubLinkographWithoutCommands(self):\n self.performTestForParams()", "def visualize_subgraph(graph, node, prog = \"twopi\", **kwargs):\n subgraph = get_subgraph(graph, node)\n nx.draw_graphviz(subgraph, prog, **kwargs)", "def run_tests(g: Graph) -> None:\n print( g.nodes() , \"->\" , ', '.join([f\"{l}\" for l in g.scc()]) , f\"({g.cyclic()})\" )\n for n in g.nodes():\n for m in [m for m in g.nodes() if m != n]:\n p = g.path(n,m)\n if p is not None:\n assert p[0] == n\n assert p[-1] == m\n for i in range(1,len(p)):\n assert g.is_edge(p[i-1], p[i])\n print(\" \", n, \"->\", m, \":\", ' -> '.join([f\"{v}\" for v in p]))", "def test_get_vertex_from_subvertex(self):\n subvertices = list()\n subvertices.append(PartitionedVertex(None, \"\"))\n subvertices.append(PartitionedVertex(None, \"\"))\n\n subvert1 = PartitionedVertex(None, \"\")\n subvert2 = PartitionedVertex(None, \"\")\n\n graph_mapper = GraphMapper()\n vert = TestVertex(10, \"Some testing vertex\")\n\n vertex_slice = Slice(0, 1)\n graph_mapper.add_subvertex(subvert1, vertex_slice, vert)\n vertex_slice = Slice(2, 3)\n graph_mapper.add_subvertex(subvert2, vertex_slice, vert)\n\n self.assertEqual(\n vert, graph_mapper.get_vertex_from_subvertex(subvert1))\n self.assertEqual(\n vert, graph_mapper.get_vertex_from_subvertex(subvert2))\n self.assertEqual(\n None, graph_mapper.get_vertex_from_subvertex(subvertices[0]))\n self.assertEqual(\n None, graph_mapper.get_vertex_from_subvertex(subvertices[1]))", "def graph(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl replication example
def test_documentation_popxl_replication(self): filename = "replication.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_replicate_pg_to_pg(self):\n # TODO - Real and more complex e2e tests will be added here\n assert True", "def generate_replica(self):", "def setup_replication(master_vals, slave_vals, rpl_user,\n options, test_db=None):\n verbosity = options.get(\"verbosity\", 0)\n\n conn_options = {\n 'src_name': \"master\",\n 'dest_name': 'slave',\n 'version': \"5.0.0\",\n 'unique': True,\n }\n servers = connect_servers(master_vals, slave_vals, conn_options)\n master = servers[0]\n slave = servers[1]\n\n rpl_options = options.copy()\n rpl_options['verbosity'] = verbosity > 0\n\n # Create an instance of the replication object\n rpl = Replication(master, slave, rpl_options)\n errors = rpl.check_server_ids()\n for error in errors:\n print error\n\n # Check for server_id uniqueness\n if verbosity > 0:\n print \"# master id = %s\" % master.get_server_id()\n print \"# slave id = %s\" % slave.get_server_id()\n\n errors = rpl.check_server_uuids()\n for error in errors:\n print error\n\n # Check for server_uuid uniqueness\n if verbosity > 0:\n print \"# master uuid = %s\" % master.get_server_uuid()\n print \"# slave uuid = %s\" % slave.get_server_uuid()\n\n # Check InnoDB compatibility\n if verbosity > 0:\n print \"# Checking InnoDB statistics for type and version conflicts.\"\n\n errors = rpl.check_innodb_compatibility(options)\n for error in errors:\n print error\n\n # Checking storage engines\n if verbosity > 0:\n print \"# Checking storage engines...\"\n\n errors = rpl.check_storage_engines(options)\n for error in errors:\n print error\n\n # Check master for binary logging\n print \"# Checking for binary logging on master...\"\n errors = rpl.check_master_binlog()\n if not errors == []:\n raise UtilError(errors[0])\n\n # Setup replication\n print \"# Setting up replication...\"\n if not rpl.setup(rpl_user, 10):\n raise UtilError(\"Cannot setup replication.\")\n\n # Test the replication setup.\n if test_db:\n rpl.test(test_db, 10)\n\n print \"# ...done.\"", "def test_replicate_mariadb_to_pg(self):\n # TODO - Real and more complex e2e tests will be added here\n assert True", "def test_11_clone(self):\n # Test basic operation of cloning repo which contains one\n # publisher to repo which contains same publisher\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that packages in dst which are not in src get removed.\n self.pkgsend_bulk(self.durl2, (self.amber30))\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone reports publishers not in the dest repo.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2), exit=1)\n\n # Test that clone adds new publishers if requested.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1,\n self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone removes all packages if source is empty\n self.pkgrecv(self.durl3, \"--clone -d {0}\".format(self.dpath2))\n self.pkgrepo(\"-s {0} list -H -p test2\".format(self.dpath2))\n self.assertEqualDiff(\"\", self.output)\n\n # Test that clone works fine with mulitple publishers\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n\n path = os.path.join(self.dpath2, \"publisher/test1\")\n shutil.rmtree(path)\n path = os.path.join(self.dpath2, \"publisher/test2\")\n shutil.rmtree(path)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2 -p test1\".format(\n self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\",\n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone fails if --raw is specified.\n self.pkgrecv(self.durl1, \"--raw --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -c is specified.\n self.pkgrecv(self.durl1, \"-c /tmp/ --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -a is specified.\n self.pkgrecv(self.durl1, \"-a --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if --newest is specified.\n self.pkgrecv(self.durl1, \"--newest --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_addition(self):\n filename = \"simple_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_clone_deployment(self):\n pass", "def test_replication(self):\n self.webHDFS.create(TEST_DIR_PATH + '/foo.txt', \"foobar\", True)\n self.webHDFS.set_replication(TEST_DIR_PATH + '/foo.txt', 2)\n file_status = self.webHDFS.status(TEST_DIR_PATH + '/foo.txt')\n self.assertEqual(file_status['replication'], 2)", "def test_backup_restore_with_xdcr(self):\n rest_src = RestConnection(self.backupset.cluster_host)\n rest_dest = RestConnection(self.servers[1])\n\n try:\n rest_src.remove_all_replications()\n rest_src.remove_all_remote_clusters()\n kwargs = {}\n if self.input.param(\"enforce_tls\", False):\n kwargs[\"demandEncryption\"] = 1\n trusted_ca = rest_dest.get_trusted_CAs()[-1][\"pem\"]\n kwargs[\"certificate\"] = trusted_ca\n rest_src.add_remote_cluster(self.servers[1].ip, self.servers[1].port, self.backupset.cluster_host_username,\n self.backupset.cluster_host_password, \"C2\", **kwargs)\n rest_dest.create_bucket(bucket='default', ramQuotaMB=512)\n self.sleep(10)\n repl_id = rest_src.start_replication('continuous', 'default', \"C2\")\n if repl_id is not None:\n self.log.info(\"Replication created successfully\")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n tasks = self._async_load_all_buckets(self.master, gen, \"create\", 0)\n\n reps = rest_src.get_replications()\n start_time = datetime.datetime.now()\n while reps[0][\"status\"] != \"running\" or reps[0][\"changesLeft\"] > 0:\n if (datetime.datetime.now() - start_time).total_seconds() > 600:\n self.fail(\"Timed out waiting for replications\")\n self.sleep(10, \"Waiting for replication...\")\n reps = rest_src.get_replications()\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\"<=\")\n for task in tasks:\n task.result()\n finally:\n rest_dest.delete_bucket()", "def test_clone_scenario(self):\n pass", "def test_documentation_popxl_in_sequence(self):\n filename = \"in_sequence.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_clone_system(self):\n pass", "def test_replicate_pg_to_rs(self):\n assertions.assert_run_tap_success('postgres_to_rs', 'redshift', ['singer'])\n # Add an object reference to avoid to use classmethod. TODO: Add more real tests\n assert self.e2e == self.e2e", "def test_remix(self):\n reference = Nations()\n for random_state in range(20):\n derived = reference.remix(random_state=random_state)\n self.assertEqual(reference.training.num_triples, derived.training.num_triples)\n self.assertFalse((reference.training.mapped_triples == derived.training.mapped_triples).all())\n\n self.assertEqual(reference.testing.num_triples, derived.testing.num_triples)\n self.assertFalse((reference.testing.mapped_triples == derived.testing.mapped_triples).all())\n\n self.assertEqual(reference.validation.num_triples, derived.validation.num_triples)\n self.assertFalse((reference.validation.mapped_triples == derived.validation.mapped_triples).all())", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_relic():\n mongo_db = pymongo.MongoClient()\n init_db(mongo_db.roguesim_python)\n populate_db(mongo_db.roguesim_python)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl create multiple subgraph example
def test_documentation_popxl_create_multi_subgraph(self): filename = "create_multi_graphs_from_same_func.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_get_subgraph(self):\n query_string = [('seedNodes', 'seedNodes_example')]\n response = self.client.open(\n '/NCATS_GNBR/GNBR_API/1.0.0/getInducedSubgraph',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def subgraph(self, nodeids):\n _eps, _vars = self._eps, self._vars\n _hcons, _icons = self._hcons, self._icons\n top = index = xarg = None\n eps = [_eps[nid] for nid in nodeids]\n lbls = set(ep[2] for ep in eps)\n hcons = []\n icons = []\n subvars = {}\n if self.top:\n top = self.top\n tophc = _hcons.get(top, None)\n if tophc is not None and tophc[2] in lbls:\n subvars[top] = {}\n elif top not in lbls:\n top = None # nevermind, set it back to None\n # do index after we know if it is an EPs intrinsic variable.\n # what about xarg? I'm not really sure.. just put it in\n if self.xarg:\n xarg = self.xarg\n subvars[self.xarg] = _vars[self.xarg]['props']\n subvars.update((lbl, {}) for lbl in lbls)\n subvars.update(\n (var, _vars[var]['props'])\n for ep in eps for var in ep[3].values()\n if var in _vars\n )\n if self.index in subvars:\n index = self.index\n # hcons and icons; only if the targets exist in the new subgraph\n for var in subvars:\n hc = _hcons.get(var, None)\n if hc is not None and hc[2] in lbls:\n hcons.append(hc)\n for ic in _icons.get(var, []):\n if ic[0] in subvars and ic[2] in subvars:\n icons.append(ic)\n return Xmrs(\n top=top, index=index, xarg=xarg,\n eps=eps, hcons=hcons, icons=icons, vars=subvars,\n lnk=self.lnk, surface=self.surface, identifier=self.identifier\n )", "def show_subgraph(dfs_codes, nsupport, mapper):\n\tglobal __subgraph_count\n\n\tg = build_graph(dfs_codes)\n\tg.id = __subgraph_count\n\t__subgraph_count += 1\n\tg.gprint(nsupport, mapper)", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def render_subgraph(self, ontol, nodes, **args):\n subont = ontol.subontology(nodes, **args)\n return self.render(subont, **args)", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def test_build_graph(self):\n insert_good_data()\n dataframe = get_dataframe()\n results = processing.build_graph(dataframe, figure_path, False)\n # 1\n self.assertEqual(results, \"Updated html File and Opened it\")", "def test_get_subgraph_by_annotation_values(self):\n graph = BELGraph()\n graph.annotation_list[\"Subgraph\"] = set(\"ABCDE\")\n\n a, b, c, d = [protein(namespace=\"test\", name=n()) for _ in range(4)]\n\n k1 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"A\"}})\n\n k2 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"B\"}})\n\n k3 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"A\", \"C\", \"D\"}})\n\n k4 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"C\", \"D\"}})\n\n subgraph = get_subgraph_by_annotation_value(graph, \"Subgraph\", {\"A\", \"C\"})\n self.assertIsInstance(subgraph, BELGraph)\n\n self.assertIn(a, subgraph)\n self.assertIn(b, subgraph)\n self.assertIn(b, subgraph[a])\n self.assertIn(k1, subgraph[a][b])\n self.assertNotIn(k2, subgraph[a][b])\n self.assertIn(k3, subgraph[a][b])\n self.assertIn(k4, subgraph[a][b])", "def graph(self):", "def main():\n\n # Build the test graph\n n1 = Node(1)\n n2 = Node(2)\n n3 = Node(3)\n n4 = Node(4)\n n5 = Node(5)\n n6 = Node(6)\n n1.child_nodes = [n2, n3, n4]\n n2.child_nodes = [n5, n6]\n n5.child_nodes = [n1]\n n6.child_nodes = [n4]\n\n # Traverse the graph\n walk(n1)", "def test_dummy3(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.parenthesize() is xp)", "def setUp(self):\n self.graph = BELGraph()\n\n self.graph.namespace_url[\"test\"] = test_namespace_url\n self.graph.annotation_url[\"subgraph\"] = test_annotation_url\n\n self.graph.add_increases(\n a,\n b,\n citation=citation,\n evidence=evidence,\n annotations={\"subgraph\": {\"1\", \"2\"}},\n )\n self.graph.add_increases(a, c, citation=citation, evidence=evidence, annotations={\"subgraph\": {\"1\"}})\n self.graph.add_increases(\n b,\n d,\n citation=citation,\n evidence=evidence,\n annotations={\"subgraph\": {\"1\", \"2\"}},\n )\n self.graph.add_increases(a, d, citation=citation, evidence=evidence, annotations={\"subgraph\": {\"2\"}})\n self.graph.add_increases(c, d, citation=citation, evidence=evidence)", "def generate(self):\n self.graph_repl = self.master.graph_repl", "def test_get_subgraph_by_annotation_value(self):\n graph = BELGraph()\n graph.annotation_url[\"Subgraph\"] = n()\n a, b, c, d = [protein(namespace=\"test\", name=n()) for _ in range(4)]\n\n k1 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"A\"}})\n\n k2 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"B\"}})\n\n k3 = graph.add_increases(a, b, citation=n(), evidence=n(), annotations={\"Subgraph\": {\"A\", \"C\", \"D\"}})\n\n subgraph = get_subgraph_by_annotation_value(graph, \"Subgraph\", \"A\")\n self.assertIsInstance(subgraph, BELGraph)\n\n self.assertIn(a, subgraph)\n self.assertIn(b, subgraph)\n self.assertIn(b, subgraph[a])\n self.assertIn(k1, subgraph[a][b])\n self.assertNotIn(k2, subgraph[a][b])\n self.assertIn(k3, subgraph[a][b])", "def create_four_subplots():\n pass", "def test_depth_traversal_multi_node(graph_multi_node):\n assert 'gn5' in graph_multi_node.depth_first_traversal('gn1')", "def dump_subgraph_for_debug(self):\n\n import pypipegraph2 as ppg\n\n nodes = []\n seen = set()\n edges = []\n counter = [0]\n node_to_counters = {}\n\n def descend(node):\n if node in seen:\n return\n seen.add(node)\n j = self.runner.jobs[node]\n if isinstance(j, ppg.FileInvariant):\n nodes.append(f\"Path('{counter[0]}').write_text('A')\")\n nodes.append(f\"job_{counter[0]} = ppg.FileInvariant('{counter[0]}')\")\n elif isinstance(j, ppg.ParameterInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.ParameterInvariant('{counter[0]}', 55)\"\n )\n elif isinstance(j, ppg.FunctionInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.FunctionInvariant('{counter[0]}', lambda: 55)\"\n )\n elif isinstance(j, ppg.SharedMultiFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.SharedMultiFileGeneratingJob('{counter[0]}', {[x.name for x in j.files]!r}, dummy_smfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.TempFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.TempFileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.FileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.FileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiTempFileGeneratingJob):\n files = [counter[0] + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiTempFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiFileGeneratingJob):\n files = [str(counter[0]) + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.DataLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.DataLoadingJob('{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.AttributeLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.AttributeLoadingJob('{counter[0]}', DummyObject(), 'attr_{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n else:\n raise ValueError(j)\n node_to_counters[node] = counter[0]\n counter[0] += 1\n for parent in self.runner.dag.predecessors(node):\n descend(parent)\n\n def build_edges(node):\n for parent in self.runner.dag.predecessors(node):\n edges.append(\n f\"edges.append(('{node_to_counters[node]}', '{node_to_counters[parent]}'))\"\n )\n build_edges(parent)\n\n descend(self.job_id)\n edges.append(\"edges = []\")\n build_edges(self.job_id)\n edges.extend(\n [\n \"for (a,b) in edges:\",\n \" if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\",\n \" ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\",\n ]\n )\n with open(\"subgraph_debug.py\", \"w\") as op:\n lines = \"\"\"\nclass DummyObject:\n pass\n\ndef dummy_smfg(files, prefix):\n Path(prefix).mkdir(exist_ok=True, parents=True)\n for f in files:\n f.write_text(\"hello\")\n\n\ndef dummy_mfg(files):\n for f in files:\n f.parent.mkdir(exist_ok=True, parents=True)\n f.write_text(\"hello\")\n\ndef dummy_fg(of):\n of.parent.mkdir(exist_ok=True, parents=True)\n of.write_text(\"fg\")\n\n\"\"\".split(\n \"\\n\"\n )\n lines += nodes\n lines += edges\n lines += [\"\", \"ppg.run()\", \"ppg.run\"]\n\n op.write(\"\\n\".join(\" \" + l for l in lines))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the code loading example
def test_documentation_popxl_code_loading(self): filename = "code_loading.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_examples():\n import airconics\n # pytest runs test files in ./__pycache__: need to go up two levels\n example_dir = os.path.abspath(\n os.path.join(__file__, '..', '..', 'examples', 'core'))\n example_scripts = os.listdir(example_dir)\n for script in example_scripts:\n if script.endswith('.py'):\n fname = os.path.join(example_dir, script)\n try:\n subprocess.check_call(['python', fname])\n except subprocess.CalledProcessError:\n raise AssertionError('Example {} failed'.format(fname))", "def test_main(self):\n pass", "def test_jupyter_example():\n FIXTURE.check(EXAMPLE_DIR / \"simple_example.ipynb\")", "def test_readmeExamplesAreExecutable(self) -> None:\n from tests.source_files import readme_examples", "def test_load_quality_codes():\n assert len(code_reader.load_quality_codes()) > 0", "def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def test_documentation_popxl_nested_code_loading(self):\n filename = \"code_loading_nested.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def test_demo_runs(self):\n self.star.run_demo()", "def test_load_data(self):\n pass", "def test():\n\timport unittest\n\ttests = unittest.TestLoader().discover('tests')#loads the tests\n\tunittest.TextTestRunner(verbosity=2).run(tests)#runs the tests", "def test_pep8_conformance_example(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n path += \"/../docs/examples/\"\n\n # Find all the examples files\n file_paths = []\n for root, dirnames, filenames in os.walk(path):\n for file_path in fnmatch.filter(filenames, '*.py'):\n file_paths.append(os.path.join(root, file_path))\n\n for path in file_paths:\n self.run_check(path)", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def Run(self, test_definition):", "def test_helloworld(self):\n self._launch('helloworld/helloworld.py')\n launch_response = self.test_client.perform_launch()\n self.assertTrue('hello' in launch_response.text)", "def test_execution(self):\n\t\tself.execute(\"casapy_3c129_tutorial\")", "def test_example():\n exceptions = {\n 'ocds_budget_and_spend_extension',\n }\n\n if repo_name in exceptions:\n return\n\n path = os.path.join(cwd, 'README.md')\n if os.path.isfile(path):\n with open(path) as f:\n readme = f.read()\n\n # ocds_enquiry_extension doesn't have an \"Example\" heading.\n if not re.search(r'\\bexamples?\\b', readme, re.IGNORECASE) or '```json' not in readme:\n warnings.warn('{} expected an example'.format(path))\n else:\n assert False, 'expected a README.md file'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the nested code loading example
def test_documentation_popxl_nested_code_loading(self): filename = "code_loading_nested.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_load_data(self):\n pass", "def test_documentation_popxl_code_loading(self):\n filename = \"code_loading.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_yaml_loader(self):\n self.loader_test('obo_sample.yaml', Package, yaml_loader)", "def test_json_loader(self):\n self.loader_test('obo_sample.json', Package, json_loader)", "def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]", "def test_main(self):\n pass", "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def Run(self, test_definition):", "def test_nested_lazy_fixture(executed_docstring_source):\n\n assert_that(executed_docstring_source.allure_report,\n has_test_case(\"test_nested_lazy_fixture_example\",\n has_container(executed_docstring_source.allure_report,\n has_before(\"my_lazy_fixture\")\n ),\n )\n )", "def test_NKT13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import NKT13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.NKT13\", test]", "def test_CL13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL13\", test]", "def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]", "def test_documentation_popxl_nested_session_contexts(self):\n filename = \"nested_session_contexts.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_jupyter_example():\n FIXTURE.check(EXAMPLE_DIR / \"simple_example.ipynb\")", "def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def fixture_example_data():\n import_example_data()", "def load_teststep(raw_testinfo):\n # reference api\n if \"api\" in raw_testinfo:\n __extend_with_api_ref(raw_testinfo)\n\n # TODO: reference proc functions\n # elif \"func\" in raw_testinfo:\n # pass\n\n # reference testcase\n elif \"testcase\" in raw_testinfo:\n __extend_with_testcase_ref(raw_testinfo)\n\n # define directly\n else:\n pass\n\n return raw_testinfo", "def test_compute_glycemic_load(self):\n pass", "def test_pep8_conformance_example(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n path += \"/../docs/examples/\"\n\n # Find all the examples files\n file_paths = []\n for root, dirnames, filenames in os.walk(path):\n for file_path in fnmatch.filter(filenames, '*.py'):\n file_paths.append(os.path.join(root, file_path))\n\n for path in file_paths:\n self.run_check(path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the nested Session contexts example
def test_documentation_popxl_nested_session_contexts(self): filename = "nested_session_contexts.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_to_session(self):\n pass", "def test_session(self):\n try:\n self.client.connect()\n session = self.client.session()\n if self.client.admin_party:\n self.assertIsNone(session)\n else:\n self.assertEqual(session['userCtx']['name'], self.user)\n finally:\n self.client.disconnect()", "def test_list_my_sessions(self):\n pass", "def test_new_session_promotion(self):\r\n cursor = self.db.cursor()\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())", "def test_newSession(self):\n session = self.mdk.session()\n session2 = self.mdk.session()\n self.assertSessionHas(session, session._context.traceId, [0])\n self.assertSessionHas(session2, session2._context.traceId, [0])\n self.assertNotEqual(session._context.traceId,\n session2._context.traceId)", "def session_fixture():\n return 10", "def test_childSession(self):\n session = self.mdk.session()\n session.setProperty(\"other\", 123)\n session._context.tick()\n session._context.tick()\n session._context.tick()\n session.setTimeout(13.0)\n session2 = self.mdk.derive(session.externalize())\n self.assertNotEqual(session._context.traceId,\n session2._context.traceId)\n self.assertEqual(session2.getRemainingTime(), None)\n self.assertSessionHas(session2, session2._context.traceId, [1],\n other=123)", "async def _build_context(self, session: UseCaseSession) -> UseCaseContext:", "def db_subsession(session):\n try:\n with session.begin_nested():\n yield\n except:\n logger.exception(\"Problem with DB sub-session, rolling back.\")", "def test_get_open_session_detail_1(self):\n\n data = self.session.get_open_session_detail()", "def test_do_login(self):\r\n\r\n with app.test_request_context():\r\n u1 = User.query.filter_by(username='testuser').one()\r\n\r\n self.assertNotIn(CURR_USER_KEY, session)\r\n do_login(u1)\r\n self.assertEqual(session[CURR_USER_KEY], u1.id)", "def test_membersuite_session(self):\n session = MemberSuiteSession()\n self.assertTrue(session.session_id)", "def test_distinct_sessions_cookie(self):\n\n sess1 = None\n sess2 = None\n with self.app_sess1 as c:\n sess1 = c.get('/').data\n\n with self.app_sess2 as c:\n sess2 = c.get('/').data\n\n self.assertNotEqual(sess1, sess2)", "def test_client_custom_session():\n c_session = requests.Session()\n client = ConfigureClients(custom_session=c_session)\n assert client.session == c_session", "def test_joinSession(self):\n session = self.mdk.session()\n session.setProperty(\"key\", 456)\n session.setProperty(\"key2\", [456, {\"zoo\": \"foo\"}])\n session2 = self.mdk.join(session.externalize())\n self.assertSessionHas(session2, session._context.traceId, [1, 0],\n key=456, key2=[456, {\"zoo\": \"foo\"}])", "def test_session_auth_token(self):\n\n sess1 = None\n sess2 = None\n test_header = {'X-Auth-Token': 'pretend_token'}\n\n with self.app_sess1 as c:\n ret = c.get('/', headers=test_header)\n sess1 = ret.data\n\n with self.app_sess2 as c:\n ret = c.get('/', headers=test_header)\n sess2 = ret.data\n\n self.assertEqual(sess1, sess2)", "def test_disable_my_other_sessions(self):\n pass", "def test_sessions():\n CHECKS = (check_correct_usage, check_expiration, check_bad_cookie, check_various_session_sizes)\n for no_datastore in (False, True):\n if no_datastore:\n test_db = 'without'\n else:\n test_db = 'with'\n for cot in (0, 10*1024, 2**30):\n if cot == 0:\n test_cookie = 'no data stored in cookies'\n elif cot == 2**30:\n test_cookie = 'data only stored in cookies'\n else:\n test_cookie = 'store data in cookies when its encoded size<=%dB' % cot\n for check in CHECKS:\n logger.debug('\\n\\n' + '*'*50)\n logger.debug('Running %s %s datastore and %s' % (check.__name__, test_db, test_cookie))\n yield check, no_datastore, cot", "def test_set_session_id(self, context):\n context.set_session_id(b\"abc\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl call_with_info example
def test_documentation_popxl_call_with_info(self): filename = "call_with_info.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_main_info(info):\n info.return_value.infotype = \"list\"\n argv = [\"info\", \"levels\"]\n args = cli._parse_args(argv)\n cli._execute(args)\n\n info.return_value.infotype = \"total_precipitation\"\n argv = [\"info\", \"total_precipitation\"]\n args = cli._parse_args(argv)\n cli._execute(args)", "def test_get_info(self):\n pass", "def test_application_info(self, mocked_serial, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertEqual({'name': 'appli'}, rpc.get_application_info('dummy'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('dummy')], mocked_serial.call_args_list)", "def test_process_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test first RPC call with process namespec\n self.assertEqual([{'name': 'proc'}], rpc.get_process_info('appli:proc'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:proc')], mocked_get.call_args_list)\n # reset patches\n mocked_check.reset_mock()\n mocked_get.reset_mock()\n # test second RPC call with group namespec\n self.assertEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_process_info('appli:*'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*')], mocked_get.call_args_list)", "def test_get_info_function() -> None:\n current_directory = Path.cwd()\n with zipfile.ZipFile(\n current_directory / 'app' / 'tests' / 'files' / 'oneFile.zip') as zip_object:\n res = get_info_about_file(zip_object, 'dotnetfx.exe')\n assert res == {'path': 'dotnetfx.exe', 'size': 21823560}", "def testInfo(self):\n self.assertTrue(self.dispatcher.info('libc6', 'sid', 'i386'))\n self.assertRaises(PackageNotFoundError, self.dispatcher.info, 'nosuchpackage', 'sid', 'i386')\n self.assertRaises(PackageNotFoundError, self.dispatcher.info, 'libc6', 'sid', 'ia64')", "def test_documentation_popxl_addition(self):\n filename = \"simple_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def hxlinfo_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):\n parser = make_args(\n 'Display JSON-formatted metadata for a data source (does not have to be HXLated).',\n hxl_output=False\n )\n\n args = parser.parse_args(args)\n\n do_common_args(args)\n\n json.dump(hxl.input.info(args.infile or stdin, make_input_options(args)), stdout, indent=2, ensure_ascii=False)\n\n return EXIT_OK", "def test_info(self):\n\n # Run info\n revt = core.svn_opt_revision_t()\n revt.kind = core.svn_opt_revision_head\n client.info(REPOS_URL, revt, revt, self.info_receiver,\n False, self.client_ctx)\n\n # Check output from running info. This also serves to verify that\n # the internal 'info' object is still valid\n self.assertEqual(self.path, os.path.basename(REPOS_PATH))\n self.info.assert_valid()\n self.assertEqual(self.info.URL, REPOS_URL)\n self.assertEqual(self.info.repos_root_URL, REPOS_URL)", "def test_print_info(clarisse):\n info = \"test print info\"\n assert bool(clarisse.print_info(info)) is False", "def test_all_process_info(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'serial.return_value': {'name': 'proc_2'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_all_process_info())\n self.assertEqual([call()], mocked_check.call_args_list)", "def _handle_info_response(self, resp, info, prev_info):\r\n if info.line_num != prev_info.line_num:\r\n return\r\n\r\n if resp['calltip']:\r\n info.editor.show_calltip('Arguments', resp['calltip'],\r\n signature=True,\r\n at_position=prev_info.position)\r\n\r\n if resp['name']:\r\n self.send_to_inspector.emit(\r\n resp['name'], resp['argspec'],\r\n resp['note'], resp['docstring'],\r\n not prev_info.auto)", "def test_get_patch_info_returns(self):\n # This test assumes IIQ isn't installed, thus the pile of errors that'll\n # occur shouldn't prevent us from getting a PatchInfo object\n fake_log = MagicMock()\n patch_info = versions.get_patch_info('bogus-patch.tgz', fake_log)\n\n self.assertTrue(isinstance(patch_info, versions._PatchInfo))\n self.assertEqual(patch_info.iiq_dir, '')", "def test_get_call_args(self):\n from puresnmp.x690.types import Integer, OctetString, Sequence, ObjectIdentifier\n from puresnmp.pdu import GetRequest\n from puresnmp.const import Version\n data = readbytes('get_sysdescr_01.hex') # any dump would do\n packet = Sequence(\n Integer(Version.V2C),\n OctetString('public'),\n GetRequest(0, ObjectIdentifier(1, 2, 3))\n )\n with patch('puresnmp.send') as mck, patch('puresnmp.get_request_id') as mck2:\n mck2.return_value = 0\n mck.return_value = data\n get('::1', 'public', '1.2.3')\n mck.assert_called_with('::1', 161, bytes(packet))", "def export_info_to_excel_format(ptu_obj: extract_data.PtuWorkBook, ptu_workbook: Workbook) -> None:\r\n\r\n class Coordinate:\r\n \"\"\"\r\n Class for representing coordinate of an excel workbook cell.\r\n \"\"\"\r\n\r\n def __init__(self, y, x):\r\n self.row = y\r\n self.column = x\r\n\r\n def write_cell_info(cell, value, horizontal_alignment=\"left\", font_name=\"Calibri\", font_size=11, is_bold=False,\r\n is_wrapped_text=False, style=\"Normal\", is_italic=False):\r\n \"\"\"\r\n This function is used almost in every other functions for writing info.\r\n Input is cell features and value will be written into cell according to features.\r\n Some default features are also considered.\r\n \"\"\"\r\n try:\r\n cell.value = value\r\n except excel_exceptions.IllegalCharacterError: # When value string contains a character which is not writeable\r\n print(value)\r\n cell.value = \"IllegalCharacterError while parsing\"\r\n is_italic = True\r\n cell.style = style\r\n cell.font = Font(name=font_name, size=font_size, bold=is_bold, italic=is_italic)\r\n cell.alignment = Alignment(horizontal=horizontal_alignment, vertical=\"center\", wrap_text=is_wrapped_text)\r\n\r\n def get_coordinate(defined_name):\r\n \"\"\"\r\n There are some cells in excel workbooks that can be defined by a name.\r\n This function returns coordinate of a cell with given defined name and also name of related workbook.\r\n \"\"\"\r\n defined_names = ptu_workbook.defined_names\r\n for title, coord in defined_names[defined_name].destinations:\r\n cell = ptu_workbook[title][coord]\r\n coordinate = Coordinate(cell.row, cell.column)\r\n return title, coordinate\r\n\r\n def get_cell(cell_name):\r\n \"\"\"\r\n :returns cell of excel workbook with defined name \"cell_name\"\r\n \"\"\"\r\n worksheet_title, coord = get_coordinate(cell_name)\r\n return ptu_workbook[worksheet_title].cell(coord.row, coord.column)\r\n\r\n def write_preface_info():\r\n \"\"\"\r\n Create and write information of preface worksheet\r\n \"\"\"\r\n write_cell_info(get_cell(\"Purpose\"), value=ptu_obj.preface.purpose)\r\n write_cell_info(get_cell(\"Processor\"), value=ptu_obj.preface.processor)\r\n write_cell_info(get_cell(\"Tool_chain\"), value=ptu_obj.preface.tool_chain)\r\n write_cell_info(get_cell(\"HEADER.module_name\"), value=ptu_obj.preface.header.module_name,\r\n horizontal_alignment=\"center\")\r\n write_cell_info(get_cell(\"HEADER.module_version\"), value=ptu_obj.preface.header.module_version,\r\n horizontal_alignment=\"center\")\r\n write_cell_info(get_cell(\"HEADER.test_plan_version\"), value=ptu_obj.preface.header.test_plan_version,\r\n horizontal_alignment=\"center\")\r\n\r\n def write_include_info():\r\n \"\"\"\r\n Write information of included header files in include worksheet\r\n \"\"\"\r\n worksheet_title, coord = get_coordinate(\"include\")\r\n worksheet = ptu_workbook[worksheet_title]\r\n for counter in range(len(ptu_obj.include)):\r\n cell = worksheet.cell(coord.row + counter, coord.column)\r\n write_cell_info(cell, value=ptu_obj.include[counter])\r\n\r\n def write_comment_info():\r\n \"\"\"\r\n Write comments in COMMENT worksheet\r\n \"\"\"\r\n worksheet_title, coord = get_coordinate(\"COMMENT\")\r\n worksheet = ptu_workbook[worksheet_title]\r\n for counter in range(len(ptu_obj.comment)):\r\n cell = worksheet.cell(coord.row + counter, coord.column)\r\n write_cell_info(cell, value=ptu_obj.comment[counter])\r\n\r\n def write_user_code_info():\r\n \"\"\"\r\n\r\n \"\"\"\r\n worksheet_title, coord = get_coordinate(\"USER_CODE\")\r\n worksheet = ptu_workbook[worksheet_title]\r\n\r\n counter = 0\r\n if ptu_obj.user_code:\r\n cell = worksheet.cell(coord.row, coord.column)\r\n write_cell_info(cell, value=\"Before Services\", horizontal_alignment=\"center\", style=\"Input\", is_bold=True)\r\n\r\n counter += 1\r\n user_code_start_row = coord.row + counter\r\n for user_code in ptu_obj.user_code:\r\n cell = worksheet.cell(coord.row + counter, coord.column)\r\n write_cell_info(cell, value=user_code.code)\r\n\r\n cell = worksheet.cell(coord.row + counter, coord.column - 1)\r\n write_conditions(cell, user_code.conditions)\r\n\r\n counter += 1\r\n user_code_end_row = coord.row + counter - 1\r\n worksheet.row_dimensions.group(user_code_start_row, user_code_end_row, hidden=True)\r\n\r\n services_sheet_title, services_sheet_row_num = get_coordinate(\"SERVICE\")\r\n services_sheet_row_num = services_sheet_row_num.row - 1\r\n\r\n for service in ptu_obj.services:\r\n services_sheet_row_num += 1\r\n if not service.has_user_code():\r\n continue\r\n\r\n cell = worksheet.cell(coord.row + counter, coord.column)\r\n write_cell_info(cell, value=\"In Service \\\"\" + service.name + \"\\\"\", is_bold=True, style=\"Input\",\r\n horizontal_alignment=\"center\")\r\n cell.hyperlink = \"#\" + services_sheet_title + \"!A\" + str(services_sheet_row_num)\r\n ptu_workbook[services_sheet_title][\"A\" + str(services_sheet_row_num)].hyperlink = \\\r\n \"#\" + worksheet.title + \"!B\" + str(cell.row)\r\n\r\n counter += 1\r\n service_user_code_start_row = coord.row + counter\r\n if service.user_code:\r\n cell = worksheet.cell(coord.row + counter, coord.column)\r\n write_cell_info(cell, value=\"Common\", style=\"Output\", horizontal_alignment=\"center\")\r\n\r\n counter += 1\r\n for user_code in service.user_code:\r\n cell = worksheet.cell(coord.row + counter, coord.column)\r\n write_cell_info(cell, value=user_code.code)\r\n\r\n cell = worksheet.cell(coord.row + counter, coord.column - 1)\r\n write_conditions(cell, user_code.conditions)\r\n\r\n counter += 1\r\n\r\n for test in service.test_list:\r\n services_sheet_row_num += 1\r\n element = test.element\r\n if not element.user_code:\r\n continue\r\n\r\n cell = worksheet.cell(coord.row + counter, coord.column)\r\n write_cell_info(cell, value=\"In Test \\\"\" + test.name + \"\\\"\", style=\"Output\",\r\n horizontal_alignment=\"center\")\r\n cell.hyperlink = \"#TEST_ROW\" + str(services_sheet_row_num) + \"!A1\"\r\n\r\n test_worksheet = ptu_workbook[\"TEST_ROW\" + str(services_sheet_row_num)]\r\n test_worksheet[\"A\" + str(test_worksheet.max_row - 1)].hyperlink = \\\r\n \"#\" + worksheet.title + \"!B\" + str(cell.row)\r\n\r\n counter += 1\r\n for user_code in element.user_code:\r\n cell = worksheet.cell(coord.row + counter, coord.column)\r\n write_cell_info(cell, value=user_code.code)\r\n\r\n cell = worksheet.cell(coord.row + counter, coord.column - 1)\r\n write_conditions(cell, user_code.conditions)\r\n\r\n counter += 1\r\n service_user_code_end_row = coord.row + counter - 1\r\n worksheet.row_dimensions.group(service_user_code_start_row, service_user_code_end_row, hidden=True)\r\n\r\n def write_conditions(cell, conditions, style=\"Normal\"):\r\n \"\"\"\r\n This function is used for writing conditions of any type of data in the given cell\r\n \"\"\"\r\n all_conditions = \",\".join(conditions) # Concatenate all conditions\r\n write_cell_info(cell, value=all_conditions, style=style)\r\n\r\n def write_stub_definitions_info():\r\n \"\"\"\r\n Writes all stub definitions and their content in STUBS sheet\r\n \"\"\"\r\n worksheet_title, coord = get_coordinate(\"DEFINE_STUB\")\r\n worksheet = ptu_workbook[worksheet_title]\r\n row_counter = 0\r\n for define_stub in ptu_obj.stub_definitions:\r\n # Writing define stub name and conditions\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=define_stub.name, horizontal_alignment=\"center\", is_bold=True, style=\"Output\")\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + 1)\r\n write_conditions(cell, define_stub.conditions, style=\"Output\")\r\n\r\n # Writing Stubs\r\n row_counter += 1\r\n define_stub_row_start = coord.row + row_counter\r\n for stub in define_stub.stub_list:\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=stub.stub_definition, is_wrapped_text=True)\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + 1)\r\n write_conditions(cell, stub.conditions)\r\n\r\n row_counter += 1\r\n\r\n # Grouping Stub Definitions\r\n define_stub_row_end = coord.row + row_counter - 1\r\n worksheet.row_dimensions.group(define_stub_row_start, define_stub_row_end, hidden=True)\r\n\r\n def write_initialisation_info():\r\n \"\"\"\r\n Initialisation in ptu files is optional.\r\n If there is an initialisation scope in ptu file, this function writes it in INITIALISATION sheet.\r\n \"\"\"\r\n worksheet_title, coord = get_coordinate(\"INITIALISATION\")\r\n worksheet = ptu_workbook[worksheet_title]\r\n\r\n counter = 0\r\n if ptu_obj.initialisation:\r\n for initialisation in ptu_obj.initialisation:\r\n cell = worksheet.cell(coord.row + counter, coord.column)\r\n write_cell_info(cell, value=initialisation.description)\r\n\r\n cell = worksheet.cell(coord.row + counter, coord.column + 1)\r\n write_conditions(cell, initialisation.conditions)\r\n\r\n counter += 1\r\n\r\n def write_test_case_info(worksheet, coord, test_case):\r\n \"\"\"\r\n Any test has multiple parameters as test cases.\r\n For each of test cases in a single test, this function is called.\r\n \"\"\"\r\n cell = worksheet.cell(coord.row, coord.column)\r\n write_cell_info(cell, value=test_case.param_type)\r\n\r\n cell = worksheet.cell(coord.row, coord.column + 1)\r\n write_cell_info(cell, value=test_case.param_name)\r\n\r\n cell = worksheet.cell(coord.row, coord.column + 2)\r\n write_cell_info(cell, value=test_case.init)\r\n\r\n cell = worksheet.cell(coord.row, coord.column + 3)\r\n write_cell_info(cell, value=test_case.ev)\r\n\r\n def write_environments_info():\r\n \"\"\"\r\n Environments in ptu files are optional.\r\n This function writes all environments and their test cases in ENVIRONMENT worksheet\r\n \"\"\"\r\n worksheet_title, coord = get_coordinate(\"ENVIRONMENT\")\r\n worksheet = ptu_workbook[worksheet_title]\r\n row_counter = 0\r\n for environment in ptu_obj.environments:\r\n # Writing Environment name and conditions\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=environment.name, horizontal_alignment=\"center\", is_bold=True, style=\"Output\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ENVIRONMENT_WIDTH - 1)\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + const.ENVIRONMENT_WIDTH)\r\n write_conditions(cell, environment.conditions, style=\"Output\")\r\n\r\n # Writing test cases\r\n row_counter += 1\r\n environment_row_start = coord.row + row_counter\r\n for test_case in environment.test_case_list:\r\n write_test_case_info(worksheet, Coordinate(coord.row + row_counter, coord.column), test_case)\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + const.ENVIRONMENT_WIDTH)\r\n write_conditions(cell, test_case.conditions)\r\n\r\n row_counter += 1\r\n\r\n # Grouping Environments\r\n environment_row_end = coord.row + row_counter - 1\r\n worksheet.row_dimensions.group(environment_row_start, environment_row_end, hidden=True)\r\n\r\n def write_test_info(test, row, service_name):\r\n \"\"\"\r\n Every service has multiple tests. Data in each of them should be written in a new sheet.\r\n This function writes info of a single test in a new sheet. It is called in a loop in write_service_info func.\r\n :param test: Info of this test is written in a new sheet.\r\n :param row: Row number of test in service sheet. Used for naming test sheet and also for hyperlink.\r\n :param service_name: Service name is shown at the top row of the sheet.\r\n \"\"\"\r\n # Create new worksheet\r\n worksheet = ptu_workbook.create_sheet(title=\"TEST_ROW\" + str(row))\r\n coord = Coordinate(1, 1)\r\n row_counter = 1\r\n\r\n \"\"\" Writing Title Part\"\"\"\r\n\r\n cell = worksheet.cell(coord.row, coord.column)\r\n title = \"SERVICE NAME : \" + service_name\r\n title += \" \"\r\n title += \"===================================\"\r\n title += \" \"\r\n title += \"TEST NAME : \" + test.name\r\n write_cell_info(cell, value=title, style=\"Check Cell\", font_size=14, is_bold=True,\r\n horizontal_alignment=\"center\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n # Freezing top row of sheet\r\n worksheet.freeze_panes = 'A2'\r\n\r\n \"\"\" Writing Comment part \"\"\"\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=\"COMMENTS\", horizontal_alignment=\"center\", style=\"Accent6\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n\r\n row_counter += 1\r\n comment_row_start = coord.row + row_counter\r\n\r\n # If there was no COMMENT, write an empty row\r\n if not test.comment:\r\n test.comment.append(\"\")\r\n # Writing all comments\r\n for comment in test.comment:\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=comment)\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n row_counter += 1\r\n\r\n comment_row_end = coord.row + row_counter - 1\r\n # Grouping Comments\r\n worksheet.row_dimensions.group(comment_row_start, comment_row_end, hidden=True)\r\n\r\n # Getting element of test\r\n element = test.element\r\n\r\n \"\"\" Writing USE part \"\"\"\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=\"USE\", horizontal_alignment=\"center\", style=\"Accent2\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n\r\n row_counter += 1\r\n use_row_start = coord.row + row_counter # !\r\n\r\n # If there was no USE, write an empty row\r\n if not element.use:\r\n element.use.append(\"\")\r\n # Writing all uses\r\n for use in element.use:\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=use)\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n row_counter += 1\r\n\r\n use_row_end = coord.row + row_counter - 1\r\n # Grouping uses\r\n worksheet.row_dimensions.group(use_row_start, use_row_end, hidden=True)\r\n\r\n \"\"\" Writing TEST DATA PART\"\"\"\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=\"TEST DATA\", horizontal_alignment=\"center\", style=\"Accent1\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n row_counter += 1\r\n\r\n \"\"\" Writing TEST DATA PART - IDENTIFIER COLUMNS \"\"\"\r\n\r\n test_data_columns = [\"Conditions\", \"<param>\", \"<name>\", \"init\", \"ev\"]\r\n for column_counter in range(const.ELEMENT_WIDTH):\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + column_counter)\r\n write_cell_info(cell, value=test_data_columns[column_counter], font_size=14, is_bold=True,\r\n horizontal_alignment=\"center\")\r\n # Setting default column dimensions for worksheet\r\n worksheet.column_dimensions[get_column_letter(column_counter + 1)].width = \\\r\n const.TEST_SHEET_COLUMNS_WIDTH[column_counter]\r\n row_counter += 1\r\n\r\n \"\"\" Writing TEST DATA - input data\"\"\"\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=\"input data\", horizontal_alignment=\"center\", style=\"Output\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n row_counter += 1\r\n input_data_row_start = coord.row + row_counter\r\n\r\n # If there was no input data, write an empty row\r\n if not element.input_data:\r\n element.input_data.append(extract_data.PtuWorkBook.TestCase())\r\n # Writing all input data\r\n for test_case in element.input_data:\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_conditions(cell, test_case.conditions)\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + 1)\r\n write_test_case_info(worksheet, Coordinate(cell.row, cell.column), test_case)\r\n\r\n row_counter += 1\r\n\r\n input_data_row_end = coord.row + row_counter - 1\r\n # Grouping all input data\r\n worksheet.row_dimensions.group(input_data_row_start, input_data_row_end, hidden=True)\r\n\r\n \"\"\" Writing TEST DATA - calibrations\"\"\"\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=\"calibrations\", horizontal_alignment=\"center\", style=\"Output\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n row_counter += 1\r\n calibrations_row_start = coord.row + row_counter\r\n\r\n # If there was no calibrations, write an empty row\r\n if not element.calibrations:\r\n element.calibrations.append(extract_data.PtuWorkBook.TestCase())\r\n # Writing all calibrations\r\n for test_case in element.calibrations:\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_conditions(cell, test_case.conditions)\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + 1)\r\n write_test_case_info(worksheet, Coordinate(cell.row, cell.column), test_case)\r\n\r\n row_counter += 1\r\n\r\n calibrations_row_end = coord.row + row_counter - 1\r\n # Grouping all calibrations\r\n worksheet.row_dimensions.group(calibrations_row_start, calibrations_row_end, hidden=True)\r\n\r\n \"\"\" Writing TEST DATA - output data \"\"\"\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=\"output data\", horizontal_alignment=\"center\", style=\"Output\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n row_counter += 1\r\n output_data_row_start = coord.row + row_counter\r\n\r\n # If there was no output data, write an empty row\r\n if not element.output_data:\r\n element.output_data.append(extract_data.PtuWorkBook.TestCase())\r\n # Writing all output data\r\n for test_case in element.output_data:\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_conditions(cell, test_case.conditions)\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + 1)\r\n write_test_case_info(worksheet, Coordinate(cell.row, cell.column), test_case)\r\n\r\n row_counter += 1\r\n\r\n output_data_row_end = coord.row + row_counter - 1\r\n # Grouping all output data\r\n worksheet.row_dimensions.group(output_data_row_start, output_data_row_end, hidden=True)\r\n\r\n \"\"\" Writing STUB part \"\"\"\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=\"STUB\", horizontal_alignment=\"center\", style=\"Accent5\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n row_counter += 1\r\n\r\n \"\"\" Writing STUB part - IDENTIFIER COLUMNS \"\"\"\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=\"Conditions\", font_size=14, is_bold=True, horizontal_alignment=\"center\")\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + 1)\r\n write_cell_info(cell, value=\"<stub>\", font_size=14, is_bold=True, horizontal_alignment=\"center\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 2)\r\n\r\n row_counter += 1\r\n stub_row_start = coord.row + row_counter\r\n\r\n \"\"\" Writing STUB part - STUBs \"\"\"\r\n\r\n # If there was no stub call data, write an empty row\r\n if not element.stub_calls:\r\n element.stub_calls.append(extract_data.PtuWorkBook.Stub())\r\n # Writing all STUB\r\n for stub in element.stub_calls:\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_conditions(cell, stub.conditions)\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + 1)\r\n write_cell_info(cell, value=stub.stub_definition)\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 2)\r\n\r\n row_counter += 1\r\n\r\n stub_row_end = coord.row + row_counter - 1\r\n # Grouping all Stubs\r\n worksheet.row_dimensions.group(stub_row_start, stub_row_end, hidden=True)\r\n\r\n \"\"\" Writing USER CODE part - A row linked to user code sheet\"\"\"\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=\"See User Code for this test\", style=\"40 % - Accent4\", is_italic=True,\r\n horizontal_alignment=\"center\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n row_counter += 1\r\n\r\n \"\"\" A row for returning to services sheet\"\"\"\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=\"Return to Service list\", horizontal_alignment=\"center\", style=\"Bad\",\r\n is_italic=True)\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.ELEMENT_WIDTH - 1)\r\n cell.hyperlink = \"#SERVICES!A\" + str(row)\r\n\r\n return worksheet\r\n\r\n def write_services_info():\r\n \"\"\"\r\n A single ptu file has multiple services(functions) to test.\r\n For each of them, this function is called to write info of that service in SERVICES sheet.\r\n \"\"\"\r\n worksheet_title, coord = get_coordinate(\"SERVICE\")\r\n worksheet = ptu_workbook[worksheet_title]\r\n row_counter = 0\r\n for service in ptu_obj.services:\r\n # Writing service name and conditions\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=service.name, horizontal_alignment=\"center\", is_bold=True, style=\"Output\")\r\n worksheet.merge_cells(start_row=cell.row, start_column=cell.column, end_row=cell.row,\r\n end_column=cell.column + const.SERVICE_WIDTH - 1)\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + const.SERVICE_WIDTH)\r\n write_conditions(cell, service.conditions, style=\"Output\")\r\n\r\n # Writing test cases\r\n row_counter += 1\r\n service_row_start = coord.row + row_counter\r\n for test in service.test_list:\r\n # Creating test sheet\r\n new_worksheet = write_test_info(test, coord.row + row_counter, service.name)\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column)\r\n write_cell_info(cell, value=test.name)\r\n cell.hyperlink = \"#\" + new_worksheet.title + \"!A1\"\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + 1)\r\n write_cell_info(cell, value=test.family)\r\n\r\n cell = worksheet.cell(coord.row + row_counter, coord.column + const.SERVICE_WIDTH)\r\n write_conditions(cell, test.conditions)\r\n\r\n row_counter += 1\r\n\r\n # Grouping services\r\n service_row_end = coord.row + row_counter - 1\r\n worksheet.row_dimensions.group(service_row_start, service_row_end, hidden=True)\r\n\r\n \"\"\" Calling all of functions to write their own part in the workbook \"\"\"\r\n\r\n write_preface_info()\r\n write_include_info()\r\n write_comment_info()\r\n write_stub_definitions_info()\r\n write_initialisation_info()\r\n write_environments_info()\r\n write_services_info()\r\n write_user_code_info()", "def test_all_applications_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'dummy_1': None, 'dummy_2': None}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'appli_1'}, {'name': 'appli_2'}],\n rpc.get_all_applications_info())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertItemsEqual([call('dummy_1'), call('dummy_2')],\n mocked_get.call_args_list)", "def svn_info_invoke_receiver(svn_info_receiver_t__obj, void_baton, char_path, svn_info_t_info, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def test_address_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known address\n self.assertEqual('address_info', rpc.get_address_info('10.0.0.1'))\n # test with unknown address\n with self.assertRaises(RPCError) as exc:\n rpc.get_address_info('10.0.0.0')\n self.assertEqual(Faults.BAD_ADDRESS, exc.exception.code)\n self.assertEqual('BAD_ADDRESS: address 10.0.0.0 unknown in Supvisors',\n exc.exception.text)", "def test_info_get(self):\n response = self.client.open(\n '/info',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl basic repeat example
def test_documentation_popxl_repeat_0(self): filename = "repeat_graph_0.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_simple_repeat(self):\n r = mi.repeatfunc(lambda: 5)\n self.assertEqual([5, 5, 5, 5, 5], [next(r) for _ in range(5)])", "def test_documentation_popxl_in_sequence(self):\n filename = \"in_sequence.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_list(self):\n self.r.lpush('l1', 'a')\n print(self.r.lrange('l1', 0, -1))\n self.r.lpushx('l1', 'a')\n self.r.lpushx('l1', 'b')\n print(self.r.lrange('l1', 0, -1))\n for i in range(4):\n self.r.rpop('l1')\n print(self.r.lrange('l1', 0, -1))", "def criando_populacao():\n pop_temp = []\n total = 0\n valor = 0\n while True:\n total = 0\n for t in range(0,3):\n if t == 0:\n n = random()\n pop_temp.append(item_um(n))\n elif t == 1:\n n = random()\n pop_temp.append(item_dois(n))\n elif t == 2:\n n = random()\n pop_temp.append(item_tres(n))\n \n for i in range(len(pop_temp)):\n if i == 0:\n total = (pop_temp[i] * 3)\n valor = (pop_temp[i] * 40)\n elif i == 1: \n total += (pop_temp[i] * 5)\n valor += (pop_temp[i] * 100)\n elif i == 2: \n total += (pop_temp[i] * 2)\n valor += (pop_temp[i] * 50)\n \n if total > 20:\n pop_temp.clear()\n else:\n break\n t = []\n t.append(total)\n v = []\n v.append(valor)\n return v, t, pop_temp", "def repeat(x, repeats, axis=None):\r\n return RepeatOp(axis=axis)(x, repeats)", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def start_population(self):\n for i in range(self.pop_size):\n ind = self.random_ind()\n self.pop.append(ind)", "async def repeat(self,ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "def repeat(self, count):\n return self.Sequence((self,) * count)", "def test_repeat_seq():\n\n out_file = \"out.txt\"\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {repeat}')\n assert rv == 0\n expected = (' 1: amigo_repeat.txt\\n'\n 'Wrote 5 gene IDs from 1 file to file \"out.txt\"')\n assert out == expected\n assert os.path.isfile(out_file)\n exp_repeat = '\\n'.join(\n sorted(\"\"\"\n AT4G14690 AT5G41340 AT5G03720 AT5G12020 AT2G22360\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_repeat.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def foxGrowth():\r\n # you need these lines for modifying global variables\r\n global CURRENTRABBITPOP\r\n global CURRENTFOXPOP\r\n\r\n # TO DO\r\n #pass\r\n for i in range(CURRENTFOXPOP):\r\n if CURRENTRABBITPOP > 10:\r\n if random.random() <= (CURRENTRABBITPOP/MAXRABBITPOP):\r\n CURRENTRABBITPOP -= 1\r\n # fox reproducing\r\n if random.random() <= (1/3):\r\n CURRENTFOXPOP += 1\r\n else:\r\n # fox dying\r\n if random.random() <= 0.1:\r\n CURRENTFOXPOP -= 1", "def test_x_repeating(name, ipset_x_repeating):\n with pytest.raises(ValueError):\n interpolation.interpolate(*ipset_x_repeating, kind=name, **IPARGS.get(name, {}))", "def repeat(self) -> int:\n return self._repeat", "async def repeat(times : int, content='repeating...'):\r\n for i in range(times):\r\n await bot.say(content)", "def test_popping_1():\n res1 = ssdts.popping_greedy_timestamp_match(\n SHORT_SERIES_1, SHORT_SERIES_2, 1)\n assert len(res1) == 2\n assert res1[3] == 3\n assert res1[10] == 10\n\n res2 = ssdts.popping_greedy_timestamp_match(\n SHORT_SERIES_1, SHORT_SERIES_2, 2)\n assert len(res2) == 5\n assert res2[1] == 2\n assert res2[3] == 3\n assert res2[4] == 5\n assert res2[8] == 7\n assert res2[10] == 10", "def repeat_items(self: 'LinkedList') -> None:\n pass", "def test_op_repeat(self) -> None:\n op_base = OpIncrForTest()\n kwargs_per_step_to_add = [\n dict(key_in=\"data.val.a\", key_out=\"data.val.b\"),\n dict(key_in=\"data.val.b\", key_out=\"data.val.c\"),\n dict(key_in=\"data.val.b\", key_out=\"data.val.d\"),\n dict(key_in=\"data.val.d\", key_out=\"data.val.d\"),\n ]\n op_repeat = OpRepeat(op_base, kwargs_per_step_to_add)\n sample_dict = NDict({})\n sample_dict[\"data.val.a\"] = 5\n sample_dict = op_repeat(sample_dict, \"_.test_repeat\", incr_value=3)\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 14)\n\n op_repeat.reverse(\n sample_dict,\n key_to_follow=\"data.val.d\",\n key_to_reverse=\"data.val.d\",\n op_id=\"_.test_repeat\",\n )\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 8)\n\n sample_dict[\"data.val.e\"] = 48\n op_repeat.reverse(\n sample_dict,\n key_to_follow=\"data.val.d\",\n key_to_reverse=\"data.val.e\",\n op_id=\"_.test_repeat\",\n )\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 8)\n self.assertEqual(sample_dict[\"data.val.e\"], 42)", "def repeat(self, value):\n return self._query(\"tl.repeat\", str=str(value))", "def init_pop(n_pop, n_var, xl, xu):\n X = np.random.uniform(xl, xu, (n_pop, n_var))\n return X" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl getting / setting tensor data example
def test_documentation_popxl_get_set_tensors(self): filename = "tensor_get_write.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_predictor():", "def test_add_get_tensor(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 1D tensors of all data types\n data = mock_data.create_data(10)\n add_get_arrays(dataset, data)", "def test_pauli_rep(self):\n X = qml.PauliX(0)\n Y = qml.PauliY(2)\n t = Tensor(X, Y)\n assert t._pauli_rep is None", "def test_random_subpackage(self):\r\n x = np.random.normal(size=[2, 3])\r\n assert isinstance(x, np.tensor)", "def test_add_get_tensor_3D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 3D tensors of all datatypes\n data_3D = mock_data.create_data((10, 10, 10))\n add_get_arrays(dataset, data_3D)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_snow_pumps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = snow_pumps(test_path)\n try:\n assert x_train.shape == (13, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_create_training_dataset(self):\n pass", "def test_real_data(self):\n self.assert_dim_type_supported({\"x\": \"uniform(0, 5)\"})", "def test_predict(self):", "def test_synth_tr():\n test_path = tempfile.mkdtemp()\n x_train, metadata = synth_tr(test_path)\n try:\n assert x_train.shape == (250, 3)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_coop():\n test_path = tempfile.mkdtemp()\n x_train, metadata = coop(test_path)\n try:\n assert x_train.shape == (252, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_predictor(predict_proba):\n assert len(predict_proba) == 6", "def test_genotype():\n test_path = tempfile.mkdtemp()\n x_train, metadata = genotype(test_path)\n try:\n assert x_train.shape == (61, 3)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_init_prediction_data(raw_data):\n prediction_data = PredictionData(**raw_data)\n assert prediction_data", "def test_construct(self):\n X = qml.PauliX(0)\n Y = qml.PauliY(2)\n T = Tensor(X, Y)\n assert T.obs == [X, Y]\n\n T = Tensor(T, Y)\n assert T.obs == [X, Y, Y]\n\n with pytest.raises(\n ValueError, match=\"Can only perform tensor products between observables\"\n ):\n Tensor(T, qml.CNOT(wires=[0, 1]))", "def test_int_data(self):\n self.assert_dim_type_supported({\"x\": \"uniform(0, 5000, discrete=True)\"})", "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def rand_data():\n # 100 examples, with seq_len=10, each holding 300 features\n return torch.randn((100, 10, 300))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl autodiff op
def test_documentation_popxl_autodiff(self): filename = "autodiff.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_57o_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 7.9)", "def test_99_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 15.6)", "def test_pressure_increasing_check_some_decreasing(mocker, pressure_values, expected):\n profile = mocker.patch.object(argortqcpy.profile, \"Profile\")\n profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))\n\n pic = PressureIncreasingCheck(profile, None)\n output = pic.run()\n\n assert np.all(output.get_output_flags_for_property(\"PRES\").data == expected)", "def test31_clear_air():\n assert not is_precip_mode(31), 'VCP 31 is not precip'", "def test_modexp(self):\n self.assertEqual(MathFunctions.modexp(2, 5, 7), 4)\n self.assertEqual(MathFunctions.modexp(2, 10, 8), 0)", "def test_AKs_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 20.7)", "def test_analytical_vs_numerical():\n pass", "def test_pressure_increasing_check_some_bad(mocker, pressure_values, expected):\n profile = mocker.patch.object(argortqcpy.profile, \"Profile\")\n profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))\n\n pic = PressureIncreasingCheck(profile, None)\n output = pic.run()\n\n assert np.all(output.get_output_flags_for_property(\"PRES\").data == expected)", "def test_pauli_rep(self, op, rep):\n assert op._pauli_rep == rep # pylint: disable=protected-access", "def test_operas_get(self):\n pass", "def test_get_meta_range(self):\n pass", "def test_with_orographic_enhancement(self):\n plugin = CreateExtrapolationForecast(\n self.precip_cube,\n self.vel_x,\n self.vel_y,\n orographic_enhancement_cube=self.oe_cube,\n )\n result = plugin.extrapolate(10)\n expected_result = np.array(\n [\n [np.nan, np.nan, np.nan],\n [np.nan, 1.03125, 1.0],\n [np.nan, 1.0, 0.03125],\n [np.nan, 0, 2.0],\n ],\n dtype=np.float32,\n )\n expected_result = np.ma.masked_invalid(expected_result)\n expected_forecast_period = np.array([600], dtype=np.int64)\n # Check we get the expected result, and the correct time coordinates.\n self.assertArrayEqual(\n np.ma.getmask(expected_result), np.ma.getmask(result.data)\n )\n self.assertArrayAlmostEqual(expected_result.data, result.data.data)\n self.assertArrayAlmostEqual(\n result.coord(\"forecast_period\").points, expected_forecast_period\n )\n self.assertEqual(result.coord(\"forecast_period\").units, \"seconds\")\n self.assertEqual(\n result.coord(\"forecast_reference_time\").points,\n self.precip_cube.coord(\"time\").points,\n )\n self.assertEqual(\n result.coord(\"time\").points, self.precip_cube.coord(\"time\").points + 600\n )", "def c_test_population_function(self, function):\r\n return 1", "def test_get_range(self):\n pass", "def test_autodiff_collisions(self):\n # Get the autodiff copy\n copy_ad = self._model.toAutoDiffXd()\n # Check that the number of collision frames and poses is equal to the original\n self.assertEqual(len(copy_ad.collision_frames), len(self._model.collision_frames), msg = \"wrong number of collision frames\")\n self.assertEqual(len(copy_ad.collision_poses), len(self._model.collision_poses), msg = \"wrong number of collision poses\")", "def testMedicationsImmunosupp(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"immunosupp\")\n\n self.util.boolPropertyTest(self, attr, \"immunosupp\")", "def test_zpfo(self):\n self.run_setting('zpfo', 100)", "def test_increment_generations(self, pop, conf):\n P2 = pop.clone()\n P2.increment_generations()\n assert np.array_equal(pop.generations+1, P2.generations)", "def test_T4_DP_Q():" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl in sequence context manager
def test_documentation_popxl_in_sequence(self): filename = "in_sequence.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_pop(self):\n self.stack.pop()", "def test_pop_methods(self):\n\n batch = Batch(Mock())\n\n # mock BatchRequests\n mock_obj = Mock()\n mock_ref = Mock()\n batch._objects_batch = mock_obj\n batch._reference_batch = mock_ref\n\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_not_called()\n\n # pop object default value\n batch.pop_object()\n mock_obj.pop.assert_called_with(-1)\n mock_ref.pop.assert_not_called()\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop object at index\n batch.pop_object(10)\n mock_obj.pop.assert_called_with(10)\n mock_ref.pop.assert_not_called()\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop reference default value\n batch.pop_reference()\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_called_with(-1)\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop reference at index\n batch.pop_reference(9)\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_called_with(9)", "def test_close():\n while True:\n yield", "def test_pop_sequence(test_deque):\n l = []\n while True:\n try:\n poped_data = test_deque[2].pop()\n l.append(poped_data)\n except IndexError:\n break\n assert l == [1, 2, 3, 4, 5]", "def test_subpop(self):\n self.fs.f = iter(['test\\n', 'foo\\n', 'bar\\n', 'baz\\n'])\n for i, line in enumerate(['test', 'foo', 'bar', 'baz']):\n self.assertEqual(self.fs.line, i)\n self.assertEqual(self.fs.subpop(), line + '\\n')\n self.assertEqual(self.fs.line, i + 1)\n self.assertRaises(StopIteration, self.fs.subpop)", "def test_pop(self):\n with self.assertRaises(IndexError):\n self.stack.pop()\n self.stack.push(1)\n self.assertEqual(self.stack.pop(), 1)", "def test_stack_pop(val1, val2, val3):\n from stack import Stack\n test_stack = Stack()\n test_stack.push(val1)\n test_stack.push(val2)\n test_stack.push(val3)\n assert test_stack.pop() == val3\n assert test_stack.pop() == val2\n assert test_stack.pop() == val1", "def testGetSequence():\r\n\t\r\n\t#a few of hand-tested genome positions\r\n\ttest_data = [\t('1',500,520,'GTCTGACCTGAGGAGAACTGT'),\r\n\t\t\t\t\t('2',500,520,'CCCGACCCCGACCCCGACCCA'),\r\n\t\t\t\t\t('3',50000,50020,'TCTTCTTTTATGAAAAAGGAT'),\r\n\t\t\t\t\t('4',50000,50020,'AGAGCCCTGCAATTTGAAGAT'),\r\n\t\t\t\t\t('5',100000,100020,'AATGTTCACCAGTATATTTTA'),\r\n\t\t\t\t\t('X',100000,100020,'TAGGTCTCATTGAGGACAGAT'),\r\n\t\t\t\t\t('Y',100000,100020,'TAGGTCTCATTGAGGACAGAT')]\r\n\t\t\t\t\t\r\n\tfor this_check in test_data:\r\n\t\tyield CheckGetSequence, this_check", "def pop_command():\n try:\n ret = sequence.pop(0)\n except IndexError:\n ret = None\n\n return ret", "def test_pop_empty_stack():\n from stack import Stack\n s = Stack()\n with pytest.raises(IndexError):\n s.pop()", "def test_pop(self):\n stack = Stack()\n self.assertEqual(stack.pop(), None)\n stack.push(1)\n stack.push(2)\n stack.push(3)\n self.assertEqual(stack.pop(), 3)\n self.assertEqual(stack.size(), 2)", "def test_pop_results(self):\n tree = BTree.create(\"tree\", 5)\n items = [(str(x), x) for x in range(50)]\n items.sort()\n tree.update(items)\n while items:\n # Delete medians. This produced an error once.\n idx = len(items) / 2\n seq_item = items.pop(idx)\n tree_item = tree.pop(idx)\n self.assertEqual(seq_item, tree_item)\n self.validate_empty_tree(tree)", "def test_push_pop(values):\n test_stack = stack.Stack()\n\n for value in values:\n test_stack.push(value)\n\n for expected_value in reversed(values):\n value = test_stack.pop()\n assert value == expected_value\n\n with pytest.raises(stack.StackEmptyError):\n test_stack.pop()", "def test_pop_operation(self):\n empty_stack , filled_stack = [],[1,2,3]\n\n s = Stack()\n for each_element in empty_stack:\n print('PUSHING--->',each_element)\n s.push(each_element)\n\n result = s.pop()\n self.assertEqual(result, 'UNDERFLOW')\n\n for each_element in filled_stack:\n print('PUSHING--->',each_element)\n s.push(each_element)\n\n result = s.pop()\n self.assertEqual(result, 3)", "def test_pop_no_args(self):\r\n msg_list = messages.MessageList()\r\n # Adds 5 Message objects to the list.\r\n msg_list.push(messages.StringMessage(\"a\"))\r\n msg_list.push(messages.StringMessage(\"b\"))\r\n msg_list.push(messages.StringMessage(\"c\"))\r\n msg_list.push(messages.StringMessage(\"d\"))\r\n msg_list.push(messages.StringMessage(\"e\"))\r\n\r\n self.assertEqual(msg_list.length(), 5)\r\n popped = msg_list.pop()\r\n self.assertEqual(msg_list.length(), 4)\r\n self.assertEqual(popped.msg, \"e\")\r\n msg_list.pop()\r\n msg_list.pop()\r\n msg_list.pop()\r\n msg_list.pop()\r\n self.assertRaises(IndexError, msg_list.pop)", "def test_too_many_pops(self):\n self.assertRaises(NodeBoxError, self.ctx.pop)", "def test_peek_empty():\n test_stack = stack.Stack()\n\n with pytest.raises(stack.StackEmptyError):\n test_stack.peek()", "def test_ignore_close():\n try:\n yield\n except GeneratorExit:\n yield", "def test_pop_on_small_stack(small_stack):\n assert small_stack.pop().val == 3\n assert small_stack.pop().val == 2\n assert small_stack._size == 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl remote variable
def test_documentation_popxl_remote_var(self): filename = "remote_variable.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_documentation_popxl_remote_rts_var(self):\n filename = \"remote_rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_rts_var(self):\n filename = \"rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_push_localscope(self):\n\n self.ip.run_cell('''\ndef rmagic_addone(u):\n %Rpush u\n %R result = u+1\n %Rpull result\n return result[0]\nu = 0\nresult = rmagic_addone(12344)\n''')\n result = self.ip.user_ns['result']\n np.testing.assert_equal(result, 12345)", "def _is_pop(self, words):\n if words[0] == 'pop':\n if len(words) != 3:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_POP command.\".format(self._file_line))\n if words[1] not in ['temp', 'pointer', 'static', 'local', 'argument', 'this', 'that']:\n raise SyntaxError(\"File line {}: Invalid second argument.\".format(self._file_line))\n return True\n else:\n return False", "def var_pop(self):\n expected = convert_output(\"\"\"\n var_pop\n -----------------------\n 21704\n 13868.75\n 11266.666666666666\n 4225\n 0\n \"\"\")\n\n execute_query(\"\"\"\n SELECT VAR_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS var_pop\n FROM values('i Int8, n Int32', (1,600),(2,470),(3,170),(4,430),(5,300))\n \"\"\",\n expected=expected\n )", "def test_endpointPOP3(self):\n self._endpointTest(\"pop3\")", "def _platformix_get(self, context, fake_reply, prop):\r\n if hasattr(self.host, prop):\r\n self._reply(context, proto_success(getattr(self.host, prop), prop), fake_reply)\r\n else:\r\n self._reply(context, proto_failure(\"Property {} not found on {}\".format(prop, self.host.name)), fake_reply)", "async def rd_rpoplpush_cmd(self):\n key1, key2, key3 = 'key1', 'key2', 'key3'\n values_rpush = ('TEST1', 'TEST2')\n with await self.rd1 as conn:\n await conn.rpush(key1, *values_rpush)\n await conn.rpoplpush(key1, key2)\n await conn.rpoplpush(key3, key2)\n res1 = await conn.lrange(key1, 0, -1)\n res2 = await conn.lrange(key2, 0, -1)\n await conn.delete(key1, key2, key3)\n frm = \"LIST_CMD - 'RPOPLPUSH': KEYS - {0}, RPOP - {1}, RPOP_NOT_EXIST - {2}\\n\"\n logger.debug(frm.format([key1, key2, key3], res1, res2))", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_pop3(self):\n self._endpointServerTest(\"pop3\", protocols.POP3Factory)", "def test_cmd_injection(self, ip):\n try:\n conn = remote(ip, 2222)\n prompt = conn.recv()\n log.debug(prompt)\n conn.send(\"ls;ls /home/\\n\")\n results = conn.recv()\n except:\n log.info(\"Error connecting to \" + ip)\n return None\n log.debug(results)\n conn.close()\n if \"jackbauer\" in results:\n log.info(ip + \" VULNERABLE to cmd injection\")\n return True\n else:\n log.info(ip + \" NOT VULNERABLE cmd injection\")\n return False", "def getnpop(var):\n if var in session:\n return session.pop(var)", "def test_check_nip(client):\n is_assigned, request_id = client.check_nip(\n \"8655104670\", \"41146786026458860703735932\"\n )\n\n assert is_assigned", "def hasGridProxy():\n import os\n from subprocess import Popen, PIPE\n \n arguments = 'dirac-proxy-info --checkvalid'\n arguments = ['dirac-command'] + arguments.split()\n logger.verbose ( 'hasGridProxy:use Popen(%s)' % arguments)\n\n p = Popen(arguments, stdout=PIPE, stderr=PIPE)\n (cout, cerr) = p.communicate()\n #\n if 0 != p.returncode: return False\n #\n if py3 :\n cout = cout.decode ( 'utf-8' ) if cout else cout \n cerr = cerr.decode ( 'utf-8' ) if cerr else cerr \n # \n\n if 'expired' in cout : return False\n if 'Insane' in cout : return False\n if 'Error' in cout : return False\n #\n return 0 == p.returncode and cout and not cerr", "def pull():", "def _plzchkpv(inpvnam):\n try:\n currval=efc.rPV(inpvnam,display=False)\n if currval is False:\n msgout='rPV fail!'\n else:\n currvalstr=str(currval)\n if len(currvalstr) > 10:\n currvalstr=currvalstr[:10]+'...'\n msgout = currvalstr+' vs oldval'\n except TimeoutError as err:\n msgout = 'Timeout!';err;\n return msgout", "def test_stack_pop(val1, val2, val3):\n from stack import Stack\n test_stack = Stack()\n test_stack.push(val1)\n test_stack.push(val2)\n test_stack.push(val3)\n assert test_stack.pop() == val3\n assert test_stack.pop() == val2\n assert test_stack.pop() == val1", "def main():\n client = POP3Client('pop.yandex.ru', 995)\n client.connect_to_serv()\n commands = {\n 'stat': client.stat,\n 'list': client.par_list,\n 'top': client.par_top,\n 'retr': client.par_retr,\n 'uidl': client.par_uidl,\n 'dele': client.par_dele,\n 'rset': client.rset,\n 'quit': client.quit,\n\n }\n cmd=''\n while cmd != 'quit':\n print(\"Input command\")\n cmd = raw_input()\n parse=cmd.split(' ')\n if parse[0] in commands:\n command = commands[parse[0]]\n command(*parse[1:])\n else:\n print(\"Wrong command\")\n # if cmd == 'STAT':\n # client.stat()\n # elif parse[0] == 'TOP':\n # if len(parse)==1:\n # client.print_top()\n # elif len(parse)==2:\n # client.print_top(parse[1])\n # else:\n # client.print_top(parse[1],parse[2])\n # elif parse[0] == 'LIST':\n # if len(parse)==1:\n # client.print_list()\n # else:\n # client.print_list(parse[1])\n # elif parse[0] == 'UIDL':\n # if len(parse)==1:\n # client.print_uidl()\n # else:\n # client.print_uidl(parse[1])\n\n #denispopclient1994@yandex.ru\n #denis11021994", "def check_remote_rpm_install(self, rpm_package_name, host):\n results = run_remote_command(\"rpm -q %s --dbpath %s\" % (rpm_package_name, RPM_DATABASE), host)\n self.assertEqual(results, rpm_package_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl remote rts variable
def test_documentation_popxl_remote_rts_var(self): filename = "remote_rts_var.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_documentation_popxl_remote_var(self):\n filename = \"remote_variable.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_rts_var(self):\n filename = \"rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_push_localscope(self):\n\n self.ip.run_cell('''\ndef rmagic_addone(u):\n %Rpush u\n %R result = u+1\n %Rpull result\n return result[0]\nu = 0\nresult = rmagic_addone(12344)\n''')\n result = self.ip.user_ns['result']\n np.testing.assert_equal(result, 12345)", "def test_plc_read_val(plc_ip, tag_name):\n\n plc = ClxDriver()\n if plc.open(plc_ip):\n tagg = plc.read_tag(tag_name)\n plc.close()\n return (tagg)\n \n else:\n print(\"Unable to open\", plc_ip)", "def test_rpcCall(self):\n pass", "def _plzchkpv(inpvnam):\n try:\n currval=efc.rPV(inpvnam,display=False)\n if currval is False:\n msgout='rPV fail!'\n else:\n currvalstr=str(currval)\n if len(currvalstr) > 10:\n currvalstr=currvalstr[:10]+'...'\n msgout = currvalstr+' vs oldval'\n except TimeoutError as err:\n msgout = 'Timeout!';err;\n return msgout", "def test_run_gnmi(self, gnmi_get_mock):\n operation = 'get'\n steps = \"STEP 1: Starting action yang on device 'ncs1004'\"\n datastore = {\n 'type': '',\n 'lock': False,\n 'retry': 10\n }\n rpc_data = {\n 'namespace': {\n 'oc-sys': 'http://openconfig.net/yang/system'\n },\n 'nodes': [{\n 'xpath': '/oc-sys:system'\n }]\n }\n returns = [{\n 'id': 1,\n 'name': 'rate-limit',\n 'op': 'range',\n 'selected': True,\n 'datatype': 'integer',\n 'value': '50 - 70',\n 'xpath': '/system/ssh-server/state/rate-limit'\n }]\n response = [{\n 'update': [[\n (True, '/system/ssh-server/state/enable'),\n ('V2', '/system/ssh-server/state/protocol-version'),\n (30, '/system/ssh-server/state/timeout'),\n (64, '/system/ssh-server/state/session-limit'),\n (60, '/system/ssh-server/state/rate-limit'),\n ('NONE', '/system/logging/console/selectors/selector/facility'),\n ('DISABLE', '/system/logging/console/selectors/selector/severity'),\n ('DISABLE', '/system/logging/console/selectors/selector/state/severity'),\n (True, '/system/grpc-server/state/enable'),\n (57400, '/system/grpc-server/state/port'),\n (False, '/system/grpc-server/state/transport-security'),\n ('SM/HW_ENVMON_FAN_ALARM/201#CHASSIS/LCC/1', '/system/alarms/alarm/id'),\n ('SM/HW_ENVMON_FAN_ALARM/201#CHASSIS/LCC/1', '/system/alarms/alarm/state/id'),\n ('0', '/system/alarms/alarm/state/resource'),\n ('Fan: One or more LCs missing, running fans at max speed.', '/system/alarms/alarm/state/text'),\n ('1612588606', '/system/alarms/alarm/state/time-created'),\n ('openconfig-alarm-types:CRITICAL', '/system/alarms/alarm/state/severity'),\n ('openconfig-alarm-types:HW_ENVMON_RM_LC_REMOVAL', '/system/alarms/alarm/state/type-id'),\n ('SYSTEM/HW_ERROR/82#CHASSIS/LCC/1:CONTAINER/LC/1', '/system/alarms/alarm/id'),\n ('SYSTEM/HW_ERROR/82#CHASSIS/LCC/1:CONTAINER/LC/1', '/system/alarms/alarm/state/id'),\n ('0/0', '/system/alarms/alarm/state/resource'), ('Verification of SUDI Certificate Failed On LC.', '/system/alarms/alarm/state/text'),\n ('1612590967', '/system/alarms/alarm/state/time-created'), ('openconfig-alarm-types:MAJOR', '/system/alarms/alarm/state/severity'),\n ('openconfig-alarm-types:LC_SUDI_FAILURE', '/system/alarms/alarm/state/type-id'),\n ('SYSTEM/HW_ERROR/12#CHASSIS/LCC/1:CONTAINER/LC/4', '/system/alarms/alarm/id'),\n ('SYSTEM/HW_ERROR/12#CHASSIS/LCC/1:CONTAINER/LC/4', '/system/alarms/alarm/state/id'),\n ('0/3', '/system/alarms/alarm/state/resource'),\n ('LC_CPU_MOD_FW is corrupt, system booted with golden copy.', '/system/alarms/alarm/state/text'),\n ('1612590967', '/system/alarms/alarm/state/time-created'),\n ('openconfig-alarm-types:MAJOR', '/system/alarms/alarm/state/severity'),\n ('openconfig-alarm-types:LC_CPU_CORRUPTION', '/system/alarms/alarm/state/type-id'),\n ('root', '/system/aaa/authentication/users/user/username'),\n ('root', '/system/aaa/authentication/users/user/state/username'),\n ('root-lr', '/system/aaa/authentication/users/user/state/role'),\n ('$6$O/qa30UhNVPK630.$fwZsgRvyIkhIAcwwhaaAEbQEggRCNaEMHbUayTvJzPb9MNBsxXjVVJ76R8.t2K/fkz6RnONCa8/EOff2XaxO7.', '/system/aaa/authentication/users/user/state/password-hashed')\n ]]\n }]\n\n format = {\n 'auto-validate': False\n }\n\n gnmi_get_mock.return_value = response\n result = run_gnmi(\n operation=operation,\n device=self.gnmi_device,\n steps=steps,\n datastore=datastore,\n rpc_data=rpc_data,\n returns=returns,\n format=format\n )\n\n self.assertEqual(result, True)", "def test_rap_get(self):\n\n # the function to be tested:\n rap1 = self.urihandler.get(self.hmc,\n '/api/cpcs/1/reset-activation-profiles/r1',\n True)\n\n exp_rap1 = {\n 'name': 'r1',\n 'class': 'reset-activation-profile',\n 'parent': '/api/cpcs/1',\n 'element-uri': '/api/cpcs/1/reset-activation-profiles/r1',\n 'description': 'Reset profile #1 in CPC #1',\n }\n assert rap1 == exp_rap1", "def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )", "def test_check_nip(client):\n is_assigned, request_id = client.check_nip(\n \"8655104670\", \"41146786026458860703735932\"\n )\n\n assert is_assigned", "async def test_one_pt2262(hass: HomeAssistant, rfxtrx) -> None:\n entry_data = create_rfx_test_cfg(\n devices={\n \"0913000022670e013970\": {\n \"data_bits\": 4,\n \"command_on\": 0xE,\n \"command_off\": 0x7,\n }\n }\n )\n mock_entry = MockConfigEntry(domain=\"rfxtrx\", unique_id=DOMAIN, data=entry_data)\n\n mock_entry.add_to_hass(hass)\n\n await hass.config_entries.async_setup(mock_entry.entry_id)\n await hass.async_block_till_done()\n await hass.async_start()\n\n state = hass.states.get(\"binary_sensor.pt2262_22670e\")\n assert state\n assert state.state == STATE_UNKNOWN\n assert state.attributes.get(\"friendly_name\") == \"PT2262 22670e\"\n\n await rfxtrx.signal(\"0913000022670e013970\")\n state = hass.states.get(\"binary_sensor.pt2262_22670e\")\n assert state.state == \"on\"\n\n await rfxtrx.signal(\"09130000226707013d70\")\n state = hass.states.get(\"binary_sensor.pt2262_22670e\")\n assert state.state == \"off\"", "async def test_x10_flag(self):\n pub.sendMessage(\"send.{}\".format(self.topic), **self.kwargs)\n assert self.msg.x10_flag == self.x10_flag", "def test_get_pdu(self):\n self._get_pdu_once()", "def test_library_niftyreg_c():\n number = 101 # just a number\n descriptor = [ {'name':'input', 'type':'int', 'value':number},\n {'name':'output', 'type':'int', 'value':None }, ]\n r = call_c_function( niftyreg_c.echo, descriptor ) \n return r.output == number", "def test_send_and_parse_cmd(phil_base, regtest):\n _regtest(regtest, phil_base.send_and_parse_cmd(\"rr 0 10\"))\n _regtest(regtest, phil_base.send_and_parse_cmd(\"rr 0 2\"))\n _regtest(regtest, phil_base.send_and_parse_cmd(\"rr 0 2\", True))\n _regtest(regtest, phil_base.send_and_parse_cmd(\"rr 0 2\", True, 0.1))\n _regtest(regtest, phil_base.send_and_parse_cmd(\"help\"))\n _regtest(regtest, phil_base.get_version())", "def test_get_multiple_return_binds(self):\n data = readbytes('get_sysoid_01_error.hex')\n with patch('puresnmp.send') as mck:\n mck.return_value = data\n with self.assertRaisesRegexp(SnmpError, 'varbind'):\n get('::1', 'private', '1.2.3')", "def var_pop(self):\n expected = convert_output(\"\"\"\n var_pop\n -----------------------\n 21704\n 13868.75\n 11266.666666666666\n 4225\n 0\n \"\"\")\n\n execute_query(\"\"\"\n SELECT VAR_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS var_pop\n FROM values('i Int8, n Int32', (1,600),(2,470),(3,170),(4,430),(5,300))\n \"\"\",\n expected=expected\n )", "def test_api_connection_pc():\n api_test = PrismApi(ip_address='192.168.1.44', username='admin', password='fUUif4l0CF!iPVv2mpE6wbT9&Rf5tw').test()\n assert api_test", "def test_set_get_value_1(self):\n value = 23.0\n port = cn.Port(value)\n self.assertEqual(port.value, value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl rts variable
def test_documentation_popxl_rts_var(self): filename = "rts_var.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_documentation_popxl_remote_rts_var(self):\n filename = \"remote_rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_get_state_tostring_incorrect(self):\n got_var, const_number = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_number')\n if not got_var:\n print 'Could not get variable from controller. Test will not be run.'\n sys.exit()\n # Checks if wrong rapid data is inserted.\n self.assertEqual(rapid_bool.get_state_tostring(const_number), 'DataType is num and not bool.')\n # Checks if rapid data is not inserted.\n self.assertIsInstance(rapid_bool.get_state_tostring(10), Exception)", "def var_pop(self):\n expected = convert_output(\"\"\"\n var_pop\n -----------------------\n 21704\n 13868.75\n 11266.666666666666\n 4225\n 0\n \"\"\")\n\n execute_query(\"\"\"\n SELECT VAR_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS var_pop\n FROM values('i Int8, n Int32', (1,600),(2,470),(3,170),(4,430),(5,300))\n \"\"\",\n expected=expected\n )", "def test_get_robax_tostring_incorrect(self):\n got_var, const_number = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_number')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n # Checks if wrong rapid data is inserted.\n robax = rapid_jointtarget.get_robax_tostring(const_number)\n self.assertEqual(robax, 'DataType is num and not jointtarget.')\n # Checks if wrong data is inserted.\n robax = rapid_jointtarget.get_robax_tostring(10)\n self.assertIsInstance(robax, Exception)", "def _plzchkpv(inpvnam):\n try:\n currval=efc.rPV(inpvnam,display=False)\n if currval is False:\n msgout='rPV fail!'\n else:\n currvalstr=str(currval)\n if len(currvalstr) > 10:\n currvalstr=currvalstr[:10]+'...'\n msgout = currvalstr+' vs oldval'\n except TimeoutError as err:\n msgout = 'Timeout!';err;\n return msgout", "def test_get_extax_tostring_incorrect(self):\n got_var, const_number = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_number')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n # Checks if wrong rapid data is inserted.\n extax = rapid_jointtarget.get_extax_tostring(const_number)\n self.assertEqual(extax, 'DataType is num and not jointtarget.')\n # Checks if wrong data is inserted.\n extax = rapid_jointtarget.get_extax_tostring(10)\n self.assertIsInstance(extax, Exception)", "async def test_x10_flag(self):\n pub.sendMessage(\"send.{}\".format(self.topic), **self.kwargs)\n assert self.msg.x10_flag == self.x10_flag", "def has_regvar(*args):\n return _ida_frame.has_regvar(*args)", "def test_get_robax_tostring_correct(self):\n got_var, const_jtar = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_jtarget')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n robax = rapid_jointtarget.get_robax_tostring(const_jtar)\n self.assertEqual(robax, 'RobAx: [Rax_1,Rax_2,Rax_3,Rax_4,Rax_5,Rax_6] = [0,0,0,10,0,0]')", "def pintest(self, barcode, pin):\n u = self.dump(barcode)\n if 'ERRNUM' in u:\n return False\n return len(barcode) == 14 or pin == barcode[0] * 4", "def test_get_state_tostring_correct(self):\n got_var, const_bool = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_boolean')\n if not got_var:\n print 'Could not get variable from controller. Test will not be run.'\n sys.exit()\n self.assertEqual(rapid_bool.get_state_tostring(const_bool), 'State = True')", "def test_documentation_popxl_remote_var(self):\n filename = \"remote_variable.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def _is_pop(self, words):\n if words[0] == 'pop':\n if len(words) != 3:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_POP command.\".format(self._file_line))\n if words[1] not in ['temp', 'pointer', 'static', 'local', 'argument', 'this', 'that']:\n raise SyntaxError(\"File line {}: Invalid second argument.\".format(self._file_line))\n return True\n else:\n return False", "def test_get_extax_tostring_correct(self):\n got_var, const_jtar = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_jtarget')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n extax = rapid_jointtarget.get_extax_tostring(const_jtar)\n self.assertEqual(extax, 'Extax: [Eax_a,Eax_b,Eax_c,Eax_d,Eax_e,Eax_f] = [9E9,9E9,9E9,9E9,9E9,9E9]')", "def test_rnd_instruction(self):\n self.vm_opcode.instruction_lookup(0xC1FF)\n self.assertIn(self.vm_opcode.vm.v[1], xrange(0, 0xff + 1))", "def test_get(self):\n self.assertListEqual(\n Fix.get_fixes('CMIP5', 'ACCESS1-0', 'Amon', 'tas'),\n [AllVars(None)])", "def test_library_niftyreg_c():\n number = 101 # just a number\n descriptor = [ {'name':'input', 'type':'int', 'value':number},\n {'name':'output', 'type':'int', 'value':None }, ]\n r = call_c_function( niftyreg_c.echo, descriptor ) \n return r.output == number", "async def test_one_pt2262(hass: HomeAssistant, rfxtrx) -> None:\n entry_data = create_rfx_test_cfg(\n devices={\n \"0913000022670e013970\": {\n \"data_bits\": 4,\n \"command_on\": 0xE,\n \"command_off\": 0x7,\n }\n }\n )\n mock_entry = MockConfigEntry(domain=\"rfxtrx\", unique_id=DOMAIN, data=entry_data)\n\n mock_entry.add_to_hass(hass)\n\n await hass.config_entries.async_setup(mock_entry.entry_id)\n await hass.async_block_till_done()\n await hass.async_start()\n\n state = hass.states.get(\"binary_sensor.pt2262_22670e\")\n assert state\n assert state.state == STATE_UNKNOWN\n assert state.attributes.get(\"friendly_name\") == \"PT2262 22670e\"\n\n await rfxtrx.signal(\"0913000022670e013970\")\n state = hass.states.get(\"binary_sensor.pt2262_22670e\")\n assert state.state == \"on\"\n\n await rfxtrx.signal(\"09130000226707013d70\")\n state = hass.states.get(\"binary_sensor.pt2262_22670e\")\n assert state.state == \"off\"", "def test_plc_read_val(plc_ip, tag_name):\n\n plc = ClxDriver()\n if plc.open(plc_ip):\n tagg = plc.read_tag(tag_name)\n plc.close()\n return (tagg)\n \n else:\n print(\"Unable to open\", plc_ip)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl mnist with replication example
def test_documentation_popxl_mnist_replication_train(self): filename = "mnist_rts.py --replication-factor 2" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def test_remix(self):\n reference = Nations()\n for random_state in range(20):\n derived = reference.remix(random_state=random_state)\n self.assertEqual(reference.training.num_triples, derived.training.num_triples)\n self.assertFalse((reference.training.mapped_triples == derived.training.mapped_triples).all())\n\n self.assertEqual(reference.testing.num_triples, derived.testing.num_triples)\n self.assertFalse((reference.testing.mapped_triples == derived.testing.mapped_triples).all())\n\n self.assertEqual(reference.validation.num_triples, derived.validation.num_triples)\n self.assertFalse((reference.validation.mapped_triples == derived.validation.mapped_triples).all())", "def pick_data(ns, digits):\n f = gzip.open('data/mnist.pkl.gz', 'rb')\n train_set, valid_set, test_set = cPickle.load(f)\n f.close()\n images, labels = train_set\n\n originals = []; \n shapes = []; \n true_labels = [];\n i = 0\n for n, d in zip(ns, digits):\n # picking n elements with digit d\n x = np.where(labels==d)[0]\n idx = np.random.choice(x, n, replace=False)\n imgs = images[idx]\n originals.append(imgs)\n contours = [mnistshape.get_shape2(im.reshape((28,28)), n=30, s=5, ir=2)\n for im in imgs]\n shapes.append(contours)\n true_labels.append([i]*n)\n i += 1\n originals = np.concatenate(originals)\n true_labels = np.concatenate(true_labels)\n \n new_shapes = []\n for cluster in shapes:\n for shape in cluster:\n new_shapes.append(shape)\n new_shapes = np.array(new_shapes)\n\n # return shuffled data\n idx = range(len(originals))\n np.random.shuffle(idx)\n return originals[idx], new_shapes[idx], true_labels[idx]", "def test_LoadMnistDataset():\n loader = mnist_data_loader.MnistDataLoader()\n data = loader.Load()\n\n assert len(data.train.images) == 60000\n assert len(data.train.labels) == 60000\n\n assert len(data.test.images) == 10000\n assert len(data.test.labels) == 10000", "def setup_replication(self, experiment, num_rep, seed_list):\n self.cmd_logger.print_cmd('\\nSetting up replications...')\n\n if experiment != None and experiment.isA(\"GKExperiment\") \\\n and experiment.getSimulatorEngine() == GKExperiment.eMicro:\n\n # add replications here\n replication_list = experiment.getReplications()\n\n # ===============================\n # create new replications\n if len(replication_list) == 0:\n # create replications\n for i in range(0, num_rep):\n replication = GKSystem.getSystem().newObject(\"GKReplication\", self.model, -1, True)\n replication.setExperiment(experiment)\n replication_list.append(replication)\n\n if seed_list is not None:\n replication.setRandomSeed(seed_list[i])\n self.cmd_logger.print_cmd('---- Created replication {0} with seed {1}'.format(replication.getId(),\n replication.getRandomSeed()))\n else:\n # show replcations:\n self.cmd_logger.print_cmd('---- Reloading {0} replications: {1} \\n'.format(len(replication_list),\n [replication.getId() for\n replication in\n replication_list]))\n\n # create the average experiment result\n avg_result = GKSystem.getSystem().newObject(\"GKExperimentResult\", self.model)\n avg_result.setName('average_result')\n self.cmd_logger.print_cmd('Created new average replication: {0}'.format(avg_result.getName()))\n # print_cmd('Total number of replications is: {0}',format(len(experiment.getReplications()))\n\n # set the experiment of this result object\n avg_result.setExperiment(experiment)\n # add replcations to the average\n for replication in replication_list:\n avg_result.addReplication(replication)\n self.cmd_logger.print_cmd(\n '---- Added replication {0} to {1}'.format(replication.getId(), avg_result.getName()))\n\n # compute the average; add to the experiment.\n experiment.addReplication(avg_result)\n\n return avg_result", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def mnist_raw():\n # CVDF mirror of http://yann.lecun.com/exdb/mnist/\n base_url = \"https://storage.googleapis.com/cvdf-datasets/mnist/\"\n\n def parse_labels(filename):\n with gzip.open(filename, \"rb\") as fh:\n _ = struct.unpack(\">II\", fh.read(8))\n return numpy.array(array.array(\"B\", fh.read()), dtype=numpy.uint8)\n\n def parse_images(filename):\n with gzip.open(filename, \"rb\") as fh:\n _, num_data, rows, cols = struct.unpack(\">IIII\", fh.read(16))\n return numpy.array(array.array(\"B\", fh.read()),\n dtype=numpy.uint8).reshape(num_data, rows, cols)\n\n for filename in [\"train-images-idx3-ubyte.gz\", \"train-labels-idx1-ubyte.gz\",\n \"t10k-images-idx3-ubyte.gz\", \"t10k-labels-idx1-ubyte.gz\"]:\n _download(base_url + filename, filename)\n\n train_images = parse_images(path.join(_DATA, \"train-images-idx3-ubyte.gz\"))\n train_labels = parse_labels(path.join(_DATA, \"train-labels-idx1-ubyte.gz\"))\n test_images = parse_images(path.join(_DATA, \"t10k-images-idx3-ubyte.gz\"))\n test_labels = parse_labels(path.join(_DATA, \"t10k-labels-idx1-ubyte.gz\"))\n\n return train_images, train_labels, test_images, test_labels", "def test_nswpsid1():\n test_path = tempfile.mkdtemp()\n x_train, metadata = nswpsid1(test_path)\n try:\n assert x_train.shape == (2787, 10)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_read_mnist(self):\n \n lmdb_path = 'tests/mnist_test_lmdb'\n lmdb = tools.lmdb_io.LMDB(lmdb_path)\n \n keys = lmdb.keys(5)\n for key in keys:\n image, label, key = lmdb.read(key)\n \n image_path = 'tests/mnist_test/' + key + '.png'\n assert os.path.exists(image_path) \n \n image = cv2.imread(image_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n \n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n self.assertEqual(image[i, j], image[i, j])", "def test_mnist():\n skip_if_no_data()\n mode = get_default_mode()\n if hasattr(mode, 'check_py_code'):\n old_value = mode.check_py_code\n mode.check_py_code = False\n try:\n if config.mode == \"DEBUG_MODE\":\n yaml_file = 'mnist_fast'\n else:\n yaml_file = 'mnist'\n limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'\n % yaml_file))\n try:\n os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))\n os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))\n except Exception:\n pass\n finally:\n if hasattr(mode, 'check_py_code'):\n mode.check_py_code = old_value", "def test_LoadMnistDataset_numpy():\n loader = mnist_data_loader.MnistDataLoader()\n data = loader.Load()\n\n train_X = np.array(data.train.images, dtype=np.int8).reshape((60000, 28 * 28))\n train_y = np.array(data.train.labels, dtype=np.int8).reshape((60000, 1))\n\n assert train_X.shape == ((60000, 28 * 28))\n assert train_y.shape == ((60000, 1))\n\n test_X = np.array(data.test.images, dtype=np.int8).reshape((10000, 28 * 28))\n test_y = np.array(data.test.labels, dtype=np.int8).reshape((10000, 1))\n\n assert test_X.shape == ((10000, 28 * 28))\n assert test_y.shape == ((10000, 1))", "def main():\n # \"\"\"Prepare neuromorphic MNIST image datasets for use in caffe\n # Each dataset will be generated with different number of unique spikes\n # \"\"\"\n # initial_size = 1e6 #best to make this big enough avoid expensive\n # re-allocation\n # test_dir = os.path.abspath('testFull')\n # train_dir = os.path.abspath('trainFull')\n\n # for num_spikes in range(150, 260, 10):\n # #test directory\n # image_dataset = generate_nmnist_dataset(initial_size, test_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'testlmdb' + str(num_spikes)\n # database = save_to_lmdb(image_dataset, output_lmdb)\n # #database.process_all_data(show_lmdb_datum)\n\n # #train directory\n # image_dataset = generate_nmnist_dataset(initial_size, train_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'trainlmdb' + str(num_spikes)\n # save_to_lmdb(image_dataset, output_lmdb)\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))\n # best to make this big enough avoid expensive re-allocation\n initial_size = 6e5\n test_dir = os.path.abspath('testFull')\n train_dir = os.path.abspath('trainFull')\n\n # test directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, test_dir)\n save_to_lmdb(image_dataset, 'testlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_test.mat')\n # database.process_all_data(show_lmdb_datum)\n\n # train directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, train_dir)\n save_to_lmdb(image_dataset, 'trainlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_train.mat')\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))", "def cmd_subsample(lat):\n data = np.load(lat.npz_path)\n eval_images = data['data'][lat.train_size:]\n eval_labels = data['labels'][lat.train_size:]\n images = data['data'][:lat.train_size]\n labels = data['labels'][:lat.train_size]\n logger.debug(f'images: {images.shape}')\n\n n = 2000\n logger.info(f\"subsampling...\")\n for i in range(0, lat.num_classes, 2):\n logger.info(f\"filling {i+1} with examples from {i}\")\n even_indices = np.random.permutation(np.where(labels == i)[0])[:n] # examples to fill with\n odd_indices = np.random.permutation(np.where(labels == i+1)[0])[:n] # examples to fill in\n for even_idx, odd_idx in zip(even_indices, odd_indices):\n translation = list(np.random.uniform(0,2,size=2))\n rotation = np.random.uniform(-np.pi/36, np.pi/36)\n image = images[even_idx]\n image = ndimage.rotate(image, rotation, reshape=False)\n image = ndimage.shift(image, translation)\n images[odd_idx] = image\n labels[odd_idx] = i\n \n new_images = np.concatenate((images, eval_images), axis=0)\n new_labels = np.concatenate((labels, eval_labels), axis=0)\n npz_path = 'data/unbalanced_mnist/unbalanced_mnist.npz'\n np.savez(npz_path, data=new_images, labels=new_labels)\n logger.info(f'saved subsample to {npz_path}')\n dat.convert_from_npz(npz_path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl mnist with RTS example
def test_documentation_popxl_mnist_rts_train(self): filename = "mnist_rts.py --replication-factor 2 --rts" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def main():\n # load the MNIST dataset and apply min/max scaling to scale the pixel intensity values\n # to the range [0, 1] (each image is represented by an 8 x 8 = 64-dim feature vector)\n print(\"[INFO] loading MNIST (sample) dataset...\")\n digits = datasets.load_digits()\n data = digits.data.astype(\"float\") # pylint: disable=no-member\n data = (data - data.min()) / (data.max() - data.min())\n print(\"[INFO] samples: {}, dim: {}\".format(data.shape[0], data.shape[1]))\n\n # construct the training and testing splits\n (train_x, test_x, train_y, test_y) = train_test_split(\n data, digits.target, test_size=0.25\n ) # pylint: disable=no-member\n\n # convert the labels from integers to vectors using one-hot-encoding\n train_y = LabelBinarizer().fit_transform(train_y)\n test_y = LabelBinarizer().fit_transform(test_y)\n\n # train the network\n print(\"[INFO] training network...\")\n network = NeuralNetwork([train_x.shape[1], 32, 16, 10])\n print(\"[INFO] {}\".format(network))\n network.fit(train_x, train_y, epochs=1000)\n\n # evaluate the network\n print(\"[INFO] evaluating network...\")\n predictions = network.predict(test_x)\n predictions = predictions.argmax(axis=1)\n print(classification_report(test_y.argmax(axis=1), predictions))", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def test_predictor():", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def test_X_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_test.equals(atom.mnb.X_test)\n assert check_scaling(atom.lr.X_test)", "def test8():\n test = HMM(range(10), range(50))\n print 'Generating sample data...'\n l = []\n test.set_random_proba()\n for i in range(10):\n obs = test.simulate(100)\n l.append(obs)\n print 'Original'\n print 'A =', test.A\n print 'B =', test.B\n print 'pi =', test.pi\n print\n print 'Randomizing model...'\n test.set_random_proba()\n print 'Training model...'\n test.multiple_learn(l)\n print 'trained values'\n print 'A =', test.A\n print 'B =', test.B\n print 'pi =', test.pi", "def test_model_MNIST(model : neural_network.NeuralNetwork, dataset) -> (float, list):\n print(\"Testing Neural Network on MNIST Test Dataset\")\n total = 10000\n correct = 0\n incorrect_indices = []\n for i in range(10000):\n num = test_single_image_MINST(i, model, dataset)\n if num == dataset.get_test_image_label(i):\n correct += 1\n else:\n incorrect_indices.append(i)\n success_rate = correct / total\n print(\"Accuracy: {:.3f} %\".format(success_rate * 100))\n return success_rate, incorrect_indices", "def mnist_raw():\n # CVDF mirror of http://yann.lecun.com/exdb/mnist/\n base_url = \"https://storage.googleapis.com/cvdf-datasets/mnist/\"\n\n def parse_labels(filename):\n with gzip.open(filename, \"rb\") as fh:\n _ = struct.unpack(\">II\", fh.read(8))\n return numpy.array(array.array(\"B\", fh.read()), dtype=numpy.uint8)\n\n def parse_images(filename):\n with gzip.open(filename, \"rb\") as fh:\n _, num_data, rows, cols = struct.unpack(\">IIII\", fh.read(16))\n return numpy.array(array.array(\"B\", fh.read()),\n dtype=numpy.uint8).reshape(num_data, rows, cols)\n\n for filename in [\"train-images-idx3-ubyte.gz\", \"train-labels-idx1-ubyte.gz\",\n \"t10k-images-idx3-ubyte.gz\", \"t10k-labels-idx1-ubyte.gz\"]:\n _download(base_url + filename, filename)\n\n train_images = parse_images(path.join(_DATA, \"train-images-idx3-ubyte.gz\"))\n train_labels = parse_labels(path.join(_DATA, \"train-labels-idx1-ubyte.gz\"))\n test_images = parse_images(path.join(_DATA, \"t10k-images-idx3-ubyte.gz\"))\n test_labels = parse_labels(path.join(_DATA, \"t10k-labels-idx1-ubyte.gz\"))\n\n return train_images, train_labels, test_images, test_labels", "def test_LoadMnistDataset():\n loader = mnist_data_loader.MnistDataLoader()\n data = loader.Load()\n\n assert len(data.train.images) == 60000\n assert len(data.train.labels) == 60000\n\n assert len(data.test.images) == 10000\n assert len(data.test.labels) == 10000", "def test(self, test, test_labels):", "def main(): \n symbolic_sample()\n print 'Done.'", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the popxl mnist with RTS example
def test_documentation_popxl_mnist_rts_train_test(self): filename = "mnist_rts.py --replication-factor 2 --rts --test" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
[ "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def main():\n # load the MNIST dataset and apply min/max scaling to scale the pixel intensity values\n # to the range [0, 1] (each image is represented by an 8 x 8 = 64-dim feature vector)\n print(\"[INFO] loading MNIST (sample) dataset...\")\n digits = datasets.load_digits()\n data = digits.data.astype(\"float\") # pylint: disable=no-member\n data = (data - data.min()) / (data.max() - data.min())\n print(\"[INFO] samples: {}, dim: {}\".format(data.shape[0], data.shape[1]))\n\n # construct the training and testing splits\n (train_x, test_x, train_y, test_y) = train_test_split(\n data, digits.target, test_size=0.25\n ) # pylint: disable=no-member\n\n # convert the labels from integers to vectors using one-hot-encoding\n train_y = LabelBinarizer().fit_transform(train_y)\n test_y = LabelBinarizer().fit_transform(test_y)\n\n # train the network\n print(\"[INFO] training network...\")\n network = NeuralNetwork([train_x.shape[1], 32, 16, 10])\n print(\"[INFO] {}\".format(network))\n network.fit(train_x, train_y, epochs=1000)\n\n # evaluate the network\n print(\"[INFO] evaluating network...\")\n predictions = network.predict(test_x)\n predictions = predictions.argmax(axis=1)\n print(classification_report(test_y.argmax(axis=1), predictions))", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def test_predictor():", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def test_X_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_test.equals(atom.mnb.X_test)\n assert check_scaling(atom.lr.X_test)", "def test8():\n test = HMM(range(10), range(50))\n print 'Generating sample data...'\n l = []\n test.set_random_proba()\n for i in range(10):\n obs = test.simulate(100)\n l.append(obs)\n print 'Original'\n print 'A =', test.A\n print 'B =', test.B\n print 'pi =', test.pi\n print\n print 'Randomizing model...'\n test.set_random_proba()\n print 'Training model...'\n test.multiple_learn(l)\n print 'trained values'\n print 'A =', test.A\n print 'B =', test.B\n print 'pi =', test.pi", "def test_model_MNIST(model : neural_network.NeuralNetwork, dataset) -> (float, list):\n print(\"Testing Neural Network on MNIST Test Dataset\")\n total = 10000\n correct = 0\n incorrect_indices = []\n for i in range(10000):\n num = test_single_image_MINST(i, model, dataset)\n if num == dataset.get_test_image_label(i):\n correct += 1\n else:\n incorrect_indices.append(i)\n success_rate = correct / total\n print(\"Accuracy: {:.3f} %\".format(success_rate * 100))\n return success_rate, incorrect_indices", "def mnist_raw():\n # CVDF mirror of http://yann.lecun.com/exdb/mnist/\n base_url = \"https://storage.googleapis.com/cvdf-datasets/mnist/\"\n\n def parse_labels(filename):\n with gzip.open(filename, \"rb\") as fh:\n _ = struct.unpack(\">II\", fh.read(8))\n return numpy.array(array.array(\"B\", fh.read()), dtype=numpy.uint8)\n\n def parse_images(filename):\n with gzip.open(filename, \"rb\") as fh:\n _, num_data, rows, cols = struct.unpack(\">IIII\", fh.read(16))\n return numpy.array(array.array(\"B\", fh.read()),\n dtype=numpy.uint8).reshape(num_data, rows, cols)\n\n for filename in [\"train-images-idx3-ubyte.gz\", \"train-labels-idx1-ubyte.gz\",\n \"t10k-images-idx3-ubyte.gz\", \"t10k-labels-idx1-ubyte.gz\"]:\n _download(base_url + filename, filename)\n\n train_images = parse_images(path.join(_DATA, \"train-images-idx3-ubyte.gz\"))\n train_labels = parse_labels(path.join(_DATA, \"train-labels-idx1-ubyte.gz\"))\n test_images = parse_images(path.join(_DATA, \"t10k-images-idx3-ubyte.gz\"))\n test_labels = parse_labels(path.join(_DATA, \"t10k-labels-idx1-ubyte.gz\"))\n\n return train_images, train_labels, test_images, test_labels", "def test_LoadMnistDataset():\n loader = mnist_data_loader.MnistDataLoader()\n data = loader.Load()\n\n assert len(data.train.images) == 60000\n assert len(data.train.labels) == 60000\n\n assert len(data.test.images) == 10000\n assert len(data.test.labels) == 10000", "def test(self, test, test_labels):", "def main(): \n symbolic_sample()\n print 'Done.'", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }