query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Create a flexible volume that is a clone of a "backing" or "parent" flexible volume by spawning a background job. The jobid will be returned. The progress of the job can be tracked using the job APIs. This command fails if the chosen parent volume is currently involved in a split operation. This command also fails if the chosen parent volume is a traditional volume. Cloning is a new capability that applies exclusively to flexible volumes.
|
def volume_clone_create_async(self, parent_volume, volume, use_snaprestore_license=None, junction_active=None, space_reserve=None, junction_path=None, parent_snapshot=None):
return self.request( "volume-clone-create-async", {
'use_snaprestore_license': [ use_snaprestore_license, 'use-snaprestore-license', [ bool, 'None' ], False ],
'parent_volume': [ parent_volume, 'parent-volume', [ basestring, 'None' ], False ],
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'junction_active': [ junction_active, 'junction-active', [ bool, 'None' ], False ],
'space_reserve': [ space_reserve, 'space-reserve', [ basestring, 'None' ], False ],
'junction_path': [ junction_path, 'junction-path', [ basestring, 'None' ], False ],
'parent_snapshot': [ parent_snapshot, 'parent-snapshot', [ basestring, 'None' ], False ],
}, {
'result-error-message': [ basestring, False ],
'result-jobid': [ int, False ],
'result-error-code': [ int, False ],
'result-status': [ basestring, False ],
} )
|
[
"def volume_clone_create(self, parent_volume, volume, use_snaprestore_license=None, force_worm_clone=None, junction_active=None, qos_policy_group_name=None, space_reserve=None, junction_path=None, parent_snapshot=None, volume_type=None):\n return self.request( \"volume-clone-create\", {\n 'use_snaprestore_license': [ use_snaprestore_license, 'use-snaprestore-license', [ bool, 'None' ], False ],\n 'parent_volume': [ parent_volume, 'parent-volume', [ basestring, 'None' ], False ],\n 'force_worm_clone': [ force_worm_clone, 'force-worm-clone', [ bool, 'None' ], False ],\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'junction_active': [ junction_active, 'junction-active', [ bool, 'None' ], False ],\n 'qos_policy_group_name': [ qos_policy_group_name, 'qos-policy-group-name', [ basestring, 'None' ], False ],\n 'space_reserve': [ space_reserve, 'space-reserve', [ basestring, 'None' ], False ],\n 'junction_path': [ junction_path, 'junction-path', [ basestring, 'None' ], False ],\n 'parent_snapshot': [ parent_snapshot, 'parent-snapshot', [ basestring, 'None' ], False ],\n 'volume_type': [ volume_type, 'volume-type', [ basestring, 'None' ], False ],\n }, {\n } )",
"def create_clone(\n self,\n client,\n vol_name,\n subvol_name,\n snap_name,\n target_subvol_name,\n validate=True,\n **kwargs,\n ):\n clone_cmd = f\"ceph fs subvolume snapshot clone {vol_name} {subvol_name} {snap_name} {target_subvol_name}\"\n if kwargs.get(\"group_name\"):\n clone_cmd += f\" --group_name {kwargs.get('group_name')}\"\n if kwargs.get(\"target_group_name\"):\n clone_cmd += f\" --target_group_name {kwargs.get('target_group_name')}\"\n if kwargs.get(\"pool_layout\"):\n clone_cmd += f\" --pool_layout {kwargs.get('pool_layout')}\"\n cmd_out, cmd_rc = client.exec_command(\n sudo=True, cmd=clone_cmd, check_ec=kwargs.get(\"check_ec\", True)\n )\n if validate:\n listsubvolumes_cmd = f\"ceph fs subvolume ls {vol_name}\"\n if kwargs.get(\"target_group_name\"):\n listsubvolumes_cmd += f\" --group_name {kwargs.get('target_group_name')}\"\n out, rc = client.exec_command(\n sudo=True, cmd=f\"{listsubvolumes_cmd} --format json\"\n )\n subvolume_ls = json.loads(out.read().decode())\n if target_subvol_name not in [i[\"name\"] for i in subvolume_ls]:\n raise CommandFailed(f\"Creation of clone : {target_subvol_name} failed\")\n return cmd_out, cmd_rc",
"def volume_clone_split_start(self, volume):\n return self.request( \"volume-clone-split-start\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )",
"def create_cloned_volume(self, volume, src_vref):\n self._login()\n self._create_lun(volume)\n self.copy_volume_data(self.context, src_vref, volume)",
"def do_backupjob_create(cs, args):\n cs.backupjobs.create(args.instanceid,\n args.vault_service,\n args.display_name,\n args.display_description)",
"def create_subvolume(self, client, vol_name, subvol_name, validate=True, **kwargs):\n subvolume_cmd = f\"ceph fs subvolume create {vol_name} {subvol_name}\"\n if kwargs.get(\"size\"):\n subvolume_cmd += f\" --size {kwargs.get('size')}\"\n if kwargs.get(\"group_name\"):\n subvolume_cmd += f\" --group_name {kwargs.get('group_name')}\"\n if kwargs.get(\"pool_layout\"):\n subvolume_cmd += f\" --pool_layout {kwargs.get('pool_layout')}\"\n if kwargs.get(\"uid\"):\n subvolume_cmd += f\" --uid {kwargs.get('uid')}\"\n if kwargs.get(\"gid\"):\n subvolume_cmd += f\" --gid {kwargs.get('gid')}\"\n if kwargs.get(\"mode\"):\n subvolume_cmd += f\" --mode {kwargs.get('mode')}\"\n if kwargs.get(\"namespace-isolated\"):\n subvolume_cmd += \" --namespace-isolated\"\n cmd_out, cmd_rc = client.exec_command(\n sudo=True, cmd=subvolume_cmd, check_ec=kwargs.get(\"check_ec\", True)\n )\n if validate:\n listsubvolumes_cmd = f\"ceph fs subvolume ls {vol_name}\"\n if kwargs.get(\"group_name\"):\n listsubvolumes_cmd += f\" --group_name {kwargs.get('group_name')}\"\n out, rc = client.exec_command(\n sudo=True, cmd=f\"{listsubvolumes_cmd} --format json\"\n )\n subvolume_ls = json.loads(out.read().decode())\n if subvol_name not in [i[\"name\"] for i in subvolume_ls]:\n raise CommandFailed(f\"Creation of subvolume : {subvol_name} failed\")\n return cmd_out, cmd_rc",
"def test_create_cloned_volume(self):\n self.mox.StubOutWithMock(self._driver, '_create_file')\n self.mox.StubOutWithMock(self._driver, '_copy_file')\n\n vol_size = self._driver._size_bytes(self.TEST_VOLSIZE)\n self._driver._create_file(self.TEST_CLONEPATH, vol_size)\n self._driver._copy_file(self.TEST_VOLPATH, self.TEST_CLONEPATH)\n\n self.mox.ReplayAll()\n\n self._driver.create_cloned_volume(self.TEST_CLONE, self.TEST_VOLUME)",
"def createItem(parent, **args):\n addCreator(args, parent)\n itemModel = loadModel('item')\n args['folder'] = parent\n return itemModel.createItem(**args)",
"def create_volume():\n with settings(warn_only=True):\n run(f'docker volume create {db_volume}')",
"def copy_parent_file_request_data(sess, child_job, parent_job, file_type, is_local):\n log_data = {\n 'message': 'Copying data from parent job with job_id:{}'.format(parent_job.job_id),\n 'message_type': 'BrokerInfo',\n 'job_id': child_job.job_id,\n 'file_type': FILE_TYPE_DICT_LETTER_NAME[file_type]\n }\n\n # keep path but update file name\n filename = '{}/{}'.format(child_job.filename.rsplit('/', 1)[0], parent_job.original_filename)\n\n # copy parent job's data\n child_job.from_cached = True\n child_job.filename = filename\n child_job.original_filename = parent_job.original_filename\n child_job.number_of_errors = parent_job.number_of_errors\n child_job.number_of_warnings = parent_job.number_of_warnings\n child_job.error_message = parent_job.error_message\n\n # change the validation job's file data when within a submission\n if child_job.submission_id is not None:\n val_job = sess.query(Job).filter(Job.submission_id == child_job.submission_id,\n Job.file_type_id == FILE_TYPE_DICT_LETTER_ID[file_type],\n Job.job_type_id == JOB_TYPE_DICT['csv_record_validation']).one()\n val_job.filename = filename\n val_job.original_filename = parent_job.original_filename\n sess.commit()\n\n if not is_local:\n # Need access to AWS to check if file already exists within a bucket\n if parent_job.filename != child_job.filename and not file_already_exists(child_job.filename, file_type,\n log_data):\n # copy the parent file into the child's S3 location\n log_data['message'] = 'Copying the cached {} file from job {}'.format(file_type, parent_job.job_id)\n logger.info(log_data)\n with smart_open.smart_open(S3Handler.create_file_path(parent_job.filename), 'r') as reader:\n stream_file_to_s3(child_job.filename, reader)\n\n # mark job status last so the validation job doesn't start until everything is done\n mark_job_status(child_job.job_id, JOB_STATUS_DICT_ID[parent_job.job_status_id])",
"def _create_container(context, path, l_mtime, size):\n new_context = context.copy()\n new_context.input_ = None\n new_context.headers = None\n new_context.query = None\n container = path.split('/', 1)[0] + '_segments'\n cli_put_container(new_context, container)\n prefix = container + '/' + path.split('/', 1)[1]\n prefix = '%s/%s/%s/' % (prefix, l_mtime, size)\n\n return prefix",
"def _verify_job_parentage(self, parent_job_id, child_job_id):\n if parent_job_id not in self._running_jobs:\n raise ValueError('Parent job id {} not found, cannot validate child job {}.'.format(parent_job_id, child_job_id))\n if child_job_id not in self._running_jobs:\n parent_job = self.get_job(parent_job_id)\n parent_state = parent_job.state()\n if child_job_id not in parent_state.get('sub_jobs', []):\n raise ValueError('Child job id {} is not a child of parent job {}'.format(child_job_id, parent_job_id))\n else:\n self._create_jobs([child_job_id])\n # injects its app id and version\n child_job = self.get_job(child_job_id)\n child_job.app_id = parent_job.meta.get('batch_app')\n child_job.tag = parent_job.meta.get('batch_tag', 'release')",
"def new_job():\n content = request.json\n job = content.get('job', None)\n casename = content.get('casename', None)\n source = content.get('source', None)\n if not job or not casename or not source:\n abort(403)\n params = content.get('params', {})\n path = content.get('path', None)\n if path == '':\n path = None\n pid = run_rvt2(job, casename=casename, source=source, path=path, params=params, background=True)\n return dict(pid=pid)",
"def do_create_volume(sess, size, display_name, attach_it, chap_credentials, mode):\n\n try:\n _logger.info(\"Creating a new %d GB volume %s\", size, display_name)\n inst = sess.this_instance()\n if inst is None:\n raise Exception(\"OCI SDK error: couldn't get instance info\")\n _logger.debug('\\n availability_domain %s\\n compartment_id %s',\n inst.get_availability_domain_name(), inst.get_compartment_id())\n #\n # GT\n # vol = sess.create_volume(inst.get_compartment_id(),\n vol = sess.create_volume(sess.this_compartment().get_ocid(),\n inst.get_availability_domain_name(),\n size=size,\n display_name=display_name,\n wait=True)\n except Exception as e:\n _logger.debug(\"Failed to create volume\", exc_info=True)\n raise Exception(\"Failed to create volume\") from e\n\n _logger.info(\"Volume [%s] created\", vol.get_display_name())\n\n if not attach_it:\n return\n\n compat_info_message(gen_msg=\"Attaching the volume to this instance\", mode=mode)\n try:\n if chap_credentials:\n vol = vol.attach_to(instance_id=inst.get_ocid(), use_chap=True)\n else:\n vol = vol.attach_to(instance_id=inst.get_ocid(), use_chap=False)\n except Exception as e:\n _logger.debug('Cannot attach BV', exc_info=True)\n vol.destroy()\n raise Exception('Cannot attach BV') from e\n #\n # attach using iscsiadm commands\n compat_info_message(gen_msg=\"Attaching iSCSI device.\", mode=mode)\n\n vol_portal_ip = vol.get_portal_ip()\n vol_portal_port = vol.get_portal_port()\n vol_iqn = vol.get_iqn()\n vol_username = vol.get_user()\n vol_password = vol.get_password()\n retval = iscsiadm.attach(ipaddr=vol_portal_ip,\n port=vol_portal_port,\n iqn=vol_iqn,\n username=vol_username,\n password=vol_password,\n auto_startup=True)\n compat_info_message(compat_msg=\"iscsiadm attach Result: %s\" % iscsiadm.error_message_from_code(retval),\n gen_msg=\"Volume [%s] is attached.\" % vol.get_display_name(), mode=mode)\n if retval == 0:\n _logger.debug('Creation successful')\n if chap_credentials:\n _logger.debug('Attachment OK: saving chap credentials.')\n add_chap_secret(vol_iqn, vol_username, vol_password)\n return\n\n # here because of error case\n try:\n _logger.debug('Destroying the volume')\n vol.destroy()\n except Exception as e:\n _logger.debug(\"Failed to destroy volume\", exc_info=True)\n _logger.error(\"Failed to destroy volume: %s\", str(e))\n\n raise Exception('Failed to attach created volume: %s' % iscsiadm.error_message_from_code(retval))",
"def create_volume(DryRun=None, Size=None, SnapshotId=None, AvailabilityZone=None, VolumeType=None, Iops=None, Encrypted=None, KmsKeyId=None, TagSpecifications=None):\n pass",
"def test_attach_elsewhere_attached_volume(self):\n api = gceblockdeviceapi_for_test(self)\n gce_fixture = self.useFixture(GCEComputeTestObjects(\n compute=api._compute,\n project=get_machine_project(),\n zone=get_machine_zone()\n ))\n\n instance_name = u\"functional-test-\" + unicode(uuid4())\n other_instance = gce_fixture.create_instance(instance_name)\n\n new_volume = api.create_volume(\n dataset_id=uuid4(),\n size=get_minimum_allocatable_size()\n )\n\n attached_volume = api.attach_volume(\n new_volume.blockdevice_id,\n attach_to=other_instance.name,\n )\n\n self.assertRaises(\n AlreadyAttachedVolume,\n api.attach_volume,\n blockdevice_id=attached_volume.blockdevice_id,\n attach_to=api.compute_instance_id(),\n )",
"def heketi_volume_create(heketi_client_node, heketi_server_url, size,\n raise_on_error=True, **kwargs):\n\n if not kwargs.get('user'):\n openshift_config = g.config.get(\"cns\", g.config.get(\"openshift\"))\n heketi_cli_user = openshift_config['heketi_config']['heketi_cli_user']\n if heketi_cli_user:\n kwargs['user'] = heketi_cli_user\n heketi_cli_key = openshift_config[\n 'heketi_config']['heketi_cli_key']\n if heketi_cli_key is not None:\n kwargs['secret'] = heketi_cli_key\n\n heketi_server_url = (\n heketi_server_url if heketi_server_url else (\n \"http://heketi-storage-project.cloudapps.mystorage.com\"))\n\n block_arg = \"--block\" if kwargs.get(\"block\") else \"\"\n clusters_arg = (\"--clusters %s\" % kwargs.get(\"clusters\")\n if kwargs.get(\"clusters\") else \"\")\n disperse_data_arg = (\"--disperse-data %d\" % kwargs.get(\"disperse_data\")\n if kwargs.get(\"disperse_data\") else \"\")\n durability_arg = (\"--durability %s\" % kwargs.get(\"durability\")\n if kwargs.get(\"durability\") else \"\")\n gid_arg = \"--gid %d\" % int(kwargs.get(\"gid\")) if kwargs.get(\"gid\") else \"\"\n gluster_volume_options_arg = (\"--gluster-volume-options '%s'\"\n % kwargs.get(\"gluster_volume_options\")\n if kwargs.get(\"gluster_volume_options\")\n else \"\")\n name_arg = \"--name %s\" % kwargs.get(\"name\") if kwargs.get(\"name\") else \"\"\n persistent_volume_arg = (\"--persistent-volume %s\"\n % kwargs.get(\"persistent_volume\")\n if kwargs.get(\"persistent_volume\") else \"\")\n persistent_volume_endpoint_arg = (\"--persistent-volume-endpoint %s\"\n % (kwargs.get(\n \"persistent_volume_endpoint\"))\n if (kwargs.get(\n \"persistent_volume_endpoint\"))\n else \"\")\n persistent_volume_file_arg = (\"--persistent-volume-file %s\"\n % kwargs.get(\"persistent_volume_file\")\n if kwargs.get(\"persistent_volume_file\")\n else \"\")\n redundancy_arg = (\"--redundancy %d\" % int(kwargs.get(\"redundancy\"))\n if kwargs.get(\"redundancy\") else \"\")\n replica_arg = (\"--replica %d\" % int(kwargs.get(\"replica\"))\n if kwargs.get(\"replica\") else \"\")\n snapshot_factor_arg = (\"--snapshot-factor %f\"\n % float(kwargs.get(\"snapshot_factor\"))\n if kwargs.get(\"snapshot_factor\") else \"\")\n json_arg = \"--json\" if kwargs.get(\"json\") else \"\"\n secret_arg = (\n \"--secret %s\" % kwargs.get(\"secret\") if kwargs.get(\"secret\") else \"\")\n user_arg = \"--user %s\" % kwargs.get(\"user\") if kwargs.get(\"user\") else \"\"\n\n cmd = (\"heketi-cli -s %s volume create --size=%s %s %s %s %s %s %s \"\n \"%s %s %s %s %s %s %s %s %s %s\" % (\n heketi_server_url, str(size), block_arg, clusters_arg,\n disperse_data_arg, durability_arg, gid_arg,\n gluster_volume_options_arg, name_arg,\n persistent_volume_arg, persistent_volume_endpoint_arg,\n persistent_volume_file_arg, redundancy_arg, replica_arg,\n snapshot_factor_arg, json_arg, secret_arg, user_arg))\n cmd = TIMEOUT_PREFIX + cmd\n out = heketi_cmd_run(\n heketi_client_node, cmd, raise_on_error=raise_on_error)\n if json_arg and out:\n return json.loads(out)\n return out",
"def clip(self,\r\n parent_parcels,\r\n clip_record=None,\r\n clipping_parcels=None,\r\n geometry=None,\r\n moment=None,\r\n ):\r\n gdb_version = self._version.properties.versionName\r\n session_id = self._version._guid\r\n url = \"{base}/clip\".format(base=self._url)\r\n params = {\r\n \"gdbVersion\": gdb_version,\r\n \"sessionId\": session_id,\r\n \"parentParcels\": parent_parcels,\r\n \"moment\" : moment,\r\n \"clipRecord\" : clip_record,\r\n \"clippingParcels\" : clipping_parcels,\r\n \"clippingGeometry\" : geometry,\r\n \"f\": \"json\"\r\n }\r\n return self._con.post(url, params)",
"def copyTo(self, newParent: ghidra.framework.model.DomainFolder, monitor: ghidra.util.task.TaskMonitor) -> ghidra.framework.model.DomainFolder:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Scans aggregates and returns a list of compatible target aggregates for the given volume move operation.
|
def volume_move_target_aggr_get_iter(self, vserver, volume_name, max_records=None, desired_attributes=None, tag=None, query=None):
return self.request( "volume-move-target-aggr-get-iter", {
'max_records': max_records,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ VolumeMoveTargetAggrInfo, 'None' ], False ],
'vserver': [ vserver, 'vserver', [ basestring, 'None' ], False ],
'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],
'tag': tag,
'query': [ query, 'query', [ VolumeMoveTargetAggrInfo, 'None' ], False ],
}, {
'attributes-list': [ VolumeMoveTargetAggrInfo, True ],
} )
|
[
"def get_mutations_for_target(target: LocIndex) -> Set[Any]:\n search_space: List[Set[Any]] = [m.operations for m in get_compatible_operation_sets()]\n mutation_ops: Set[Any] = set()\n\n for potential_ops in search_space:\n if target.op_type in potential_ops:\n LOGGER.debug(\"Potential mutatest operations found for target: %s\", target.op_type)\n mutation_ops = potential_ops.copy()\n mutation_ops.remove(target.op_type)\n\n # Special case for If_Statement since that is a default to transform to True or False\n # but not a validation mutation target by itself\n if \"If_Statement\" in mutation_ops:\n mutation_ops.remove(\"If_Statement\")\n\n break\n\n return mutation_ops",
"def _target_allocations(self):\n Allocation = self.models.Allocation\n query = Session.query(Allocation)\n query = query.filter(Allocation.group == self.target)\n\n # master allocations only\n query = query.filter(Allocation.resource == Allocation.mirror_of)\n\n return query",
"def find_all_baseops(op: RelationalOp) -> List[BaseOp]:\n if (op.op_type == RelationalOpType.base):\n base_op = cast(BaseOp, op)\n return [base_op]\n if (op.op_type == RelationalOpType.join):\n join_op = cast(Join, op)\n b1 = find_all_baseops(op.child)\n b2 = find_all_baseops(join_op.other._ops)\n return b1 + b2\n if (op.has_child()):\n return find_all_baseops(op.child)\n else:\n return []",
"def collect_balls(self, target='home, trough') -> None:\n tag_list = Util.string_to_event_list(target)\n\n self.debug_log(\"Collecting all balls to devices with tags '%s'\",\n tag_list)\n\n target_devices = set()\n source_devices = set()\n balls_to_collect = False\n\n for tag in tag_list:\n for device in self.machine.ball_devices.items_tagged(tag): # type: ignore\n target_devices.add(device)\n\n for device in self.machine.ball_devices.values():\n if device not in target_devices and device.available_balls:\n source_devices.add(device)\n balls_to_collect = True\n\n if balls_to_collect:\n self.debug_log(\"Ejecting all balls from: %s\", source_devices)\n\n self.machine.events.post('collecting_balls')\n '''event: collecting_balls\n\n desc: Posted by the ball controller when it starts the collecting\n balls process.\n\n '''\n\n for device in target_devices:\n self.machine.events.remove_handler(self._collecting_balls_entered_callback)\n self.machine.events.add_handler(\n 'balldevice_{}_ball_enter'.format(device.name),\n self._collecting_balls_entered_callback,\n target=target, priority=10)\n\n for device in source_devices:\n if not device.is_playfield():\n if \"drain\" in device.tags:\n device.eject_all(device.find_next_trough())\n else:\n device.eject_all()\n else:\n self.debug_log(\"All balls are collected\")\n self._collecting_balls_complete()",
"def getMovables(self):\n listOfBindings = self.kb.kb_ask(self.moveableQuery)\n if listOfBindings:\n statements = [instantiate(self.moveableQuery.statement,bindings) for bindings in listOfBindings]\n statements.sort()\n return statements\n else:\n return listOfBindings",
"def reduction_drivers(self,target_index,method='internal',max_drivers=None,GRASP_iterations=None):\n methods = ['internal','minimal','GRASP']\n assert method in methods, ' '.join([\"method argument of reduction_drivers must be among\",str(methods)])\n drivers = []\n for path in nx.all_simple_paths(self.digraph,0,target_index):\n path_motif_history=[]\n path_drivers = []\n for ind in path:\n if ind == 0:\n ind_prev = ind\n continue\n path_motif_history += [x for x in self.motif_reduction_dict[ind].motif_history if not x in path_motif_history]\n\n if method == 'internal':\n history_drivers = sm_doi.internal_drivers(path_motif_history[-1],\n self.motif_reduction_dict[ind_prev].reduced_primes,\n max_drivers=max_drivers)\n elif method == 'GRASP':\n history_drivers = sm_doi.GRASP(path_motif_history[-1],\n self.motif_reduction_dict[ind_prev].reduced_primes,\n GRASP_iterations = GRASP_iterations)\n if len(history_drivers) == 0:\n history_drivers = [path_motif_history[-1]]\n elif method == 'minimal':\n history_drivers = sm_doi.minimal_drivers(path_motif_history[-1],\n self.motif_reduction_dict[ind_prev].reduced_primes,\n max_drivers=max_drivers)\n\n path_drivers.append(history_drivers)\n\n ind_prev = ind\n if method == 'internal':\n # merge control sets along the path\n for control_sequence in it.product(*path_drivers):\n control_set = {k:v for x in control_sequence for k,v in x.items()}\n if not control_set in drivers:\n drivers.append(control_set)\n elif method == 'minimal' or method == 'GRASP':\n drivers.append(path_drivers)\n return drivers",
"def _get_all_ops_in_neighborhood(op: Op, direction: str, neighborhood=None):\n if neighborhood is None:\n neighborhood = {}\n neighborhood[op] = direction\n if direction == 'input' and op.inputs:\n input_products = [inp for inp in op.inputs if inp.is_inter_module()]\n input_ops = [inp.producer for inp in input_products]\n for input_op in input_ops:\n if input_op not in neighborhood:\n neighborhood[input_op] = 'output'\n if input_op.type == 'Split':\n _get_all_ops_in_neighborhood(input_op, 'input', neighborhood)\n _get_all_ops_in_neighborhood(input_op, 'output', neighborhood)\n else:\n _get_all_ops_in_neighborhood(input_op, 'output', neighborhood)\n elif op.output:\n output_ops = [consumer for consumer in op.output.consumers]\n for output_op in output_ops:\n if output_op not in neighborhood:\n neighborhood[output_op] = 'input'\n if output_op.type == 'Split':\n _get_all_ops_in_neighborhood(output_op, 'output', neighborhood)\n else:\n _get_all_ops_in_neighborhood(output_op, 'input', neighborhood)\n return neighborhood",
"def aggregate(self, result, targets):\n if isinstance(result, float):\n if isinstance(targets, int):\n self._global_results[targets].append(result)\n else:\n target_np = format_tensor_to_ndarray(targets)\n if len(target_np) > 1:\n raise ValueError(\"One result can not be aggreated to multiple targets.\")\n else:\n result_np = format_tensor_to_ndarray(result)\n if isinstance(targets, int):\n for res in result_np:\n self._global_results[targets].append(float(res))\n else:\n target_np = format_tensor_to_ndarray(targets)\n if len(target_np) != len(result_np):\n raise ValueError(\"Length of result does not match with length of targets.\")\n for tar, res in zip(target_np, result_np):\n self._global_results[int(tar)].append(float(res))",
"def get_all_ops_in_neighborhood(op: Op, direction: str, neighborhood=None):\n if neighborhood is None:\n neighborhood = {}\n neighborhood[op] = direction\n if direction == 'input' and op.inputs:\n input_products = [inp for inp in op.inputs if inp.is_inter_module()]\n input_ops = [inp.producer for inp in input_products]\n for input_op in input_ops:\n if input_op not in neighborhood:\n neighborhood[input_op] = 'output'\n if input_op.type == 'Split':\n # Neighborhood ops include input of split, as well as all other consumers of split\n get_all_ops_in_neighborhood(input_op, 'input', neighborhood)\n get_all_ops_in_neighborhood(input_op, 'output', neighborhood)\n elif op.output:\n output_ops = [consumer for consumer in op.output.consumers]\n for output_op in output_ops:\n if output_op not in neighborhood:\n neighborhood[output_op] = 'input'\n if output_op.type == 'Split':\n # Neighborhood ops include all consumers of split\n get_all_ops_in_neighborhood(output_op, 'output', neighborhood)\n return neighborhood",
"def get_internal_ops_for_pattern(match_result: graph_matcher.MatchResult) -> List[tf.Operation]:\n internal_ops_list = [] # Place holder for the list of internal Ops associated with the detected Op.\n\n # The patter_to_op_tensor is a dictionary of Ops and Tensors encountered for a pattern while matching.\n # pylint: disable=protected-access\n op_tensor_dict = match_result._pattern_to_op_tensor.values()\n ops_list = [internal_op for internal_op, _ in op_tensor_dict]\n\n # The Ops_list also contains input Ops. Since only the internal ops associated with detected Op is needed,\n # skip the input Ops. This is done by making sure that the input Op's Parent Op is not in the ops_list.\n for int_op in ops_list:\n if int_op.inputs:\n parent_op = int_op.inputs[0].op\n if parent_op in ops_list:\n internal_ops_list.append(int_op)\n return internal_ops_list",
"def find_eops(self):\n if self.is_eop:\n eops = [self]\n if self.special:\n for child in self.child_packages:\n eops.extend(child.find_eops())\n return eops\n else:\n l = []\n for child in self.child_packages:\n l.extend(child.find_eops())\n return l",
"def detect_ops_in_graph(self, op_to_module_dict: Dict[tf.Operation, ModuleIdentifierOpInfo]):\n\n all_op_patterns_list = [op_dict['pattern'] for op_dict in list(reference_op_pattern_info_dict.values())]\n for pattern in all_op_patterns_list:\n layer_matcher = graph_matcher.GraphMatcher(pattern)\n\n # Graph Match\n for match_result in layer_matcher.match_graph(self._graph):\n matched_patterns = list(match_result._pattern_to_op_tensor.keys())\n op = match_result.get_op(matched_patterns[0])\n # For ops like FusedBatchNorm, there are multiple output ops of the model which may be matched (Merge,\n # Merge_1, Merge_2. In these cases, Merge is the one that should be matched because if either of the\n # other two are matched, Merge will not make it into the op_to_module_dict.\n if op not in self._valid_ops:\n continue\n template_pattern_type = pattern_to_op_type[matched_patterns[0]]\n if op in op_to_module_dict:\n # op was already matched with a different pattern previously. Compare lengths of the previous\n # pattern with current pattern, and replace the previous op type with the current op type if more\n # ops were matched.\n # This can happen if one pattern is a subset of another (Conv2D without bias vs Conv2D with bias for\n # example. If the same op is matched with both patterns, we will pick Conv2D with bias to be the one\n # to use.\n op_info = op_to_module_dict[op]\n if reference_op_pattern_info_dict[op_info.pattern_type]['length'] >= \\\n reference_op_pattern_info_dict[template_pattern_type]['length']:\n # op was already matched with a larger pattern set\n continue\n\n ops_list = [op for op in get_internal_ops_for_pattern(match_result) if op in self._valid_ops]\n # ops_list should not be empty since there was an earlier check that the current match_result has ops\n # in self._valid_ops.\n if not ops_list:\n logger.error('Valid matched ops list should not be empty')\n raise AssertionError('Valid matched ops list should not be empty')\n # Check if any ops in ops_list were already matched with a larger pattern. If so, no need to change\n # existing entries in op_to_module_dict.\n if not SubGraphMatcher._is_subset_of_already_matched_op(template_pattern_type, ops_list,\n op_to_module_dict):\n op_type = reference_op_pattern_info_dict[template_pattern_type]['op_type']\n module_name = get_module_name(reference_op_pattern_info_dict[template_pattern_type]['module_regex'],\n ops_list)\n associated_op = get_associated_op(reference_op_pattern_info_dict[template_pattern_type]\n ['associated_op_regex'], ops_list)\n op_info = ModuleIdentifierOpInfo(module_name, op_type, associated_op,\n pattern_type=template_pattern_type,\n internal_ops=ops_list)\n for op in ops_list:\n op_to_module_dict[op] = op_info",
"def extract_targets(infile_name, pam, target_len):\n # TODO(jsh): Do something with \"bases\" other than N, ATCG.\n logging.info('Extracting target set from {infile_name}.'.format(**vars()))\n fasta_sequences = SeqIO.parse(infile_name, 'fasta')\n raw_targets = dict()\n for seq_record in fasta_sequences:\n genome = seq_record.seq.upper()\n chrom = seq_record.name\n pam = pam.upper()\n reversed_pam = revcomp(pam)\n block = r'(.{' + str(target_len) + r'})'\n pam_pattern = r'(?=(' + block + pam + r'))'\n rev_pattern = r'(?=(' + reversed_pam + block + r'))'\n for hit in re.finditer(pam_pattern, str(genome)):\n if 'N' in hit.group(1):\n continue # ...Don't target unknown genetic material.\n t = sgrna_target(\n hit.group(2),\n hit.group(1)[-len(pam):],\n chrom,\n hit.start() + 1,\n hit.start() + 1 + target_len,\n False)\n name = t.id_str()\n raw_targets[name] = t\n for hit in re.finditer(rev_pattern, str(genome)):\n if 'N' in hit.group(1):\n continue\n t = sgrna_target(\n revcomp(hit.group(2)),\n revcomp(hit.group(1))[-len(pam):],\n chrom,\n hit.start() + 1 + len(pam),\n hit.start() + 1 + len(pam) + target_len,\n True)\n name = t.id_str()\n raw_targets[name] = t\n logging.info('{0} raw targets.'.format(len(raw_targets)))\n return raw_targets",
"def target_nodes(self, target):\n if target not in (\"nodes\", \"drpnodes\"):\n raise ex.excError(\"invalid target: %s\" % target)\n return set([node for node in getattr(self.svc, target)])",
"def get_each_volume(wildfrag):\n for (i_system,) in wildfrag.retrieve_system_ids():\n system = wildfrag.retrieve_system(i_system)\n\n for i_device, device in enumerate(system.devices):\n for i_volume, volume in enumerate(device.volumes):\n yield volume, system, device, i_volume, i_system, i_device",
"def _find_all(self):\n\n sources = set()\n\n # find all sources, with transformers included\n for d in self._consumers:\n in_edges = self._dep_graph.in_edges(d, data=True)\n for s, _, data in in_edges:\n if 'type' in data and data['type'] == 'kill':\n # skip killing edges\n continue\n if isinstance(s.variable, SimRegisterVariable) and s.variable.reg == self._ptr_reg:\n sources.add(s)\n\n transformers = set()\n # some of them are transformers\n # TODO: figure out transformer nodes that involve more than one register\n for s in sources:\n # if a register depends on itself, and writes to the very same register, then it's a transformer\n # e.g. inc esi (esi depends on esi)\n # add esi, eax (esi depends on esi and eax, but it writes to itself anyways)\n in_edges = self._dep_graph.in_edges(s, data=True)\n preds = [ p for p, _, data in in_edges if 'type' not in data or data['type'] != 'kill' ] # skip killing edges\n if any([ isinstance(v.variable, SimRegisterVariable) and v.variable.reg == self._ptr_reg for v in preds ]):\n transformers.add(s)\n continue\n\n if transformers:\n sources = sources - transformers\n\n # for each source and transformer, find all sinks and consumers\n sinks = set()\n consumers = set()\n killers = set()\n for s in sources | transformers:\n out_edges = self._dep_graph.out_edges(s, data=True)\n for _, suc, data in out_edges:\n if 'type' in data:\n if data['type'] == 'mem_addr':\n # this is a pointer consumer\n consumers.add(suc)\n continue\n elif data['type'] == 'mem_data':\n # this is a pointer sink\n sinks.add(suc)\n continue\n elif data['type'] == 'kill':\n killers.add(suc)\n continue\n if isinstance(suc.variable, SimRegisterVariable):\n if suc.variable.reg < 40: # FIXME: this is ugly\n if suc not in transformers:\n # it's written to a register. sink\n sinks.add(suc)\n continue\n # unsupported. WTF...\n import ipdb; ipdb.set_trace()\n\n self._sources = sources\n self._sinks = sinks\n self._consumers = consumers\n self._transformers = transformers\n self._killers = killers\n\n # convert them into dicts with instruction addresses as their keys, so we can layout them on a function\n # transition graph\n sources = dict((s.location.ins_addr, Source(s, self._function.instruction_size(s.location.ins_addr)))\n for s in sources)\n sinks = dict((s.location.ins_addr, Sink(s, self._function.instruction_size(s.location.ins_addr)))\n for s in sinks)\n consumers = dict((s.location.ins_addr, Consumer(s, self._function.instruction_size(s.location.ins_addr)))\n for s in consumers)\n transformers = dict((s.location.ins_addr, Transformer(s, self._function.instruction_size(s.location.ins_addr)))\n for s in transformers)\n killers = dict((s.location.ins_addr, Killer(s, self._function.instruction_size(s.location.ins_addr)))\n for s in killers)\n\n g = self._function.subgraph(set(sources.keys() + sinks.keys() + consumers.keys() + transformers.keys() +\n killers.keys()\n )\n )\n\n g_ = networkx.DiGraph()\n\n for src, dst in g.edges_iter():\n # TODO: create a single function that does the following crap\n src_ = sources.get(src, None)\n if src_ is None: src_ = sinks.get(src, None)\n if src_ is None: src_ = consumers.get(src, None)\n if src_ is None: src_ = transformers.get(src, None)\n if src_ is None: src_ = killers.get(src, None)\n if src_ is None: src_ = MergePoint(src, self._function.instruction_size(src))\n\n dst_ = sources.get(dst, None)\n if dst_ is None: dst_ = sinks.get(dst, None)\n if dst_ is None: dst_ = consumers.get(dst, None)\n if dst_ is None: dst_ = transformers.get(dst, None)\n if dst_ is None: dst_ = killers.get(dst, None)\n if dst_ is None: dst_ = MergePoint(dst, self._function.instruction_size(src))\n\n if not isinstance(src_, Killer) and not isinstance(dst_, Killer):\n g_.add_edge(src_, dst_)\n\n return g_",
"def add_target_command_groups(self, target: \"SoCTarget\", command_set: \"CommandSet\"):\n pass",
"def get_matching_shapes_from_group(source, target):\n\n # gets prefix-less shapes\n sources_dict, targets_dict = get_clean_matching_shapes(source, target)\n\n return get_matching_shapes(sources_dict, targets_dict)",
"def query(self, kind, selected_pkgs, changed_pkgs):\n\n # Changes are calculated and no packages found, return empty list.\n if changed_pkgs == []:\n return []\n\n selection = '//...'\n if selected_pkgs:\n # targets without a '-' operator prefix are implicitly additive\n # when specifying build targets\n selection = selected_pkgs[0]\n for pkg in selected_pkgs[1:]:\n if pkg.startswith('-'):\n selection += ' '+pkg\n else:\n selection += ' +'+pkg\n\n\n changes = '//...'\n if changed_pkgs:\n changes = 'set(%s)' % ' '.join(changed_pkgs)\n\n query_pat = 'kind(%s, rdeps(%s, %s)) except attr(\\'tags\\', \\'manual\\', //...)'\n return [target for target in self.check_output(\n 'query',\n '--keep_going',\n '--noshow_progress',\n query_pat % (kind, selection, changes)\n ).split('\\n') if target.startswith(\"//\")]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given the name of a flexible volume, either return its current size or set the volume's size to the stated amount. This API is not supported for Infinite Volumes. Also, this API does not allow to set the volume's size from vFiler context.
|
def volume_size(self, volume, new_size=None):
return self.request( "volume-size", {
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],
}, {
'is-fixed-size-flex-volume': [ bool, False ],
'is-readonly-flex-volume': [ bool, False ],
'is-replica-flex-volume': [ bool, False ],
'volume-size': [ basestring, False ],
} )
|
[
"def volume_size_async(self, volume_name, new_size=None):\n return self.request( \"volume-size-async\", {\n 'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-status': [ basestring, False ],\n 'result-error-code': [ int, False ],\n 'volume-size': [ basestring, False ],\n } )",
"def _extend_volume(self, name, new_size):\n LOG.debug('_extend__volume name: %s', name)\n params = {}\n params['volsize'] = ix_utils.get_bytes_from_gb(new_size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s/id/%s') % (\n FreeNASServer.REST_API_VOLUME,\n urllib.parse.quote_plus(\n self.configuration.ixsystems_dataset_path + '/' + name))\n ret = self.handle.invoke_command(FreeNASServer.UPDATE_COMMAND,\n request_urn, jparams)\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while extending volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)",
"def resize_volume(self, size):\r\n curr_size = self.volume.size\r\n if size <= curr_size:\r\n raise exc.InvalidVolumeResize(\"The new volume size must be larger \"\r\n \"than the current volume size of '%s'.\" % curr_size)\r\n body = {\"volume\": {\"size\": size}}\r\n self.manager.action(self, \"resize\", body=body)",
"def _create_volume(self, name, size):\n\n params = {}\n params['name'] = self.configuration.ixsystems_dataset_path + '/' + name\n params['type'] = 'VOLUME'\n params['volsize'] = ix_utils.get_bytes_from_gb(size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s') % (FreeNASServer.REST_API_VOLUME)\n LOG.debug('_create_volume params : %s', params)\n LOG.debug('_create_volume urn : %s', request_urn)\n ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,\n request_urn, jparams)\n LOG.debug('_create_volume response : %s', json.dumps(ret))\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while creating volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)",
"def resize(self, size):\r\n self.instance.resize_volume(size)\r\n self.size = size",
"def VolumeExtend(new_size,\n gib,\n#pylint: disable=unused-argument\n volume_names,\n volume_ids,\n volume_prefix,\n volume_regex,\n volume_count,\n source_account,\n source_account_id,\n test,\n mvip,\n username,\n password):\n#pylint: enable=unused-argument\n options = copy.deepcopy(locals())\n options.pop(\"new_size\", None)\n options.pop(\"gib\", None)\n\n if gib:\n multiplier = 1024 * 1024 * 1024\n else:\n multiplier = 1000 * 1000 * 1000\n\n new_size = new_size * multiplier\n post_value = new_size\n if new_size % 4096 != 0:\n post_value = int((new_size // 4096 + 1) * 4096)\n\n return VolumeModify(property_name=\"totalSize\",\n property_value=new_size,\n post_value=post_value,\n **options)",
"def volume_autosize_get(self, volume):\n return self.request( \"volume-autosize-get\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'increment-size': [ basestring, False ],\n 'minimum-size': [ basestring, False ],\n 'grow-threshold-percent': [ int, False ],\n 'maximum-size': [ basestring, False ],\n 'shrink-threshold-percent': [ int, False ],\n 'is-enabled': [ bool, False ],\n 'mode': [ basestring, False ],\n } )",
"def extend_volume(self, connection_properties):\n # The StorPool client (storpool_block service) running on this host\n # should have picked up the change already, so it is enough to query\n # the actual disk device to see if its size is correct.\n #\n volume_id = connection_properties.get('volume', None)\n if volume_id is None:\n raise exception.BrickException(\n 'Invalid StorPool connection data, no volume ID specified.')\n\n # Get the expected (new) size from the StorPool API\n volume = self._attach.volumeName(volume_id)\n LOG.debug('Querying the StorPool API for the size of %(vol)s',\n {'vol': volume})\n vdata = self._attach.api().volumeList(volume)[0]\n LOG.debug('Got size %(size)d', {'size': vdata.size})\n\n # Wait for the StorPool client to update the size of the local device\n path = '/dev/storpool/' + volume\n for _ in range(10):\n size = utils.get_device_size(self, path)\n LOG.debug('Got local size %(size)d', {'size': size})\n if size == vdata.size:\n return size\n time.sleep(0.1)\n else:\n size = utils.get_device_size(self, path)\n LOG.debug('Last attempt: local size %(size)d', {'size': size})\n return size",
"def resize(self, capacity, flags=0):\n ret = libvirtmod.virStorageVolResize(self._o, capacity, flags)\n if ret == -1: raise libvirtError ('virStorageVolResize() failed', vol=self)\n return ret",
"def manage_existing_get_size(self, volume, existing_ref):\n volume_info = self._validate_manage_existing_ref(existing_ref)\n size = self._round_bytes_to_gib(volume_info['size'])\n\n return size",
"def resize_vdi(self, name, size):\n wrap_popen('collie', 'vdi', 'resize', name, size)",
"def _to_volume(self, element, name=None):\r\n volId = findtext(element=element, xpath='volumeId',\r\n namespace=NAMESPACE)\r\n size = findtext(element=element, xpath='size', namespace=NAMESPACE)\r\n\r\n # Get our tags\r\n tags = self._get_resource_tags(element)\r\n\r\n # If name was not passed into the method then\r\n # fall back then use the volume id\r\n name = name if name else tags.get('Name', volId)\r\n\r\n # Get our extra dictionary\r\n extra = self._get_extra_dict(\r\n element, RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'])\r\n\r\n return StorageVolume(id=volId,\r\n name=name,\r\n size=int(size),\r\n driver=self,\r\n extra=extra)",
"def resize(self, newsize=None):\n LinuxVolumeManager.has_lvm()\n if not self.ondisk:\n raise LinuxVolumeManager.LVMNotExistsException(self.__class__.__name__+\"(\"+str(self.getAttribute(\"name\"))+\")\")\n if not newsize:\n newsize=\"+\"+self.parentvg.getAttribute(\"free\")\n LinuxVolumeManager.lvm('lvresize', '-L %sM', '%s/%s' % (newsize, str(self.parentvg.getAttribute(\"name\")), str(self.getAttribute(\"name\"))))\n self.init_from_disk()",
"def maximum_volume_size(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"maximum_volume_size\")",
"def _volume(self, value: object = None):\n if value is None:\n return int(self._player_info().get(\"vol\"))\n try:\n if isinstance(value, str) and (value.startswith('+') or value.startswith('-')):\n self._logger.debug(\"Adjusting volume by \" + str(value) + \". Getting old volume...\")\n new_volume = max(0, min(100, self._volume()+int(math.floor(float(value)))))\n self._logger.debug(\"Adjusting volume \"+str(value)+\" to \"+str(new_volume)+\"...\")\n else:\n new_volume = max(0, min(100, int(math.floor(float(value)))))\n self._logger.debug(\"Setting volume to \" + str(int(new_volume)))\n except ValueError:\n raise AttributeError(\"Volume must be between 0 and 100 or -100 to +100, inclusive, not '\"+str(value)+\"'\")\n response = self._send(\"setPlayerCmd:vol:\" + str(new_volume))\n if response.status_code != 200:\n raise linkplayctl.APIException(\"Failed to set volume to '\"+str(new_volume)+\"'\")\n return response.content.decode(\"utf-8\")",
"async def async_api_adjust_volume(\n hass: ha.HomeAssistant,\n config: AbstractConfig,\n directive: AlexaDirective,\n context: ha.Context,\n) -> AlexaResponse:\n volume_delta = int(directive.payload[\"volume\"])\n\n entity = directive.entity\n current_level = entity.attributes[media_player.const.ATTR_MEDIA_VOLUME_LEVEL]\n\n # read current state\n try:\n current = math.floor(int(current_level * 100))\n except ZeroDivisionError:\n current = 0\n\n volume = float(max(0, volume_delta + current) / 100)\n\n data: dict[str, Any] = {\n ATTR_ENTITY_ID: entity.entity_id,\n media_player.const.ATTR_MEDIA_VOLUME_LEVEL: volume,\n }\n\n await hass.services.async_call(\n entity.domain, SERVICE_VOLUME_SET, data, blocking=False, context=context\n )\n\n return directive.response()",
"def validate_volume_size(size):\n if size is None:\n raise exception.VolumeSizeNotSpecified()\n max_size = CONF.max_accepted_volume_size\n if int(size) > max_size:\n msg = (\"Volume 'size' cannot exceed maximum \"\n \"of %d Gb, %s cannot be accepted.\"\n % (max_size, size))\n raise exception.VolumeQuotaExceeded(msg)",
"def volume_autosize_set(self, volume, reset=None, increment_size=None, minimum_size=None, grow_threshold_percent=None, maximum_size=None, shrink_threshold_percent=None, is_enabled=None, mode=None):\n return self.request( \"volume-autosize-set\", {\n 'reset': [ reset, 'reset', [ bool, 'None' ], False ],\n 'increment_size': [ increment_size, 'increment-size', [ basestring, 'None' ], False ],\n 'minimum_size': [ minimum_size, 'minimum-size', [ basestring, 'None' ], False ],\n 'grow_threshold_percent': [ grow_threshold_percent, 'grow-threshold-percent', [ int, 'None' ], False ],\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'maximum_size': [ maximum_size, 'maximum-size', [ basestring, 'None' ], False ],\n 'shrink_threshold_percent': [ shrink_threshold_percent, 'shrink-threshold-percent', [ int, 'None' ], False ],\n 'is_enabled': [ is_enabled, 'is-enabled', [ bool, 'None' ], False ],\n 'mode': [ mode, 'mode', [ basestring, 'None' ], False ],\n }, {\n } )",
"def flex_volume(self) -> Optional[pulumi.Input['ThanosRulerSpecVolumesFlexVolumeArgs']]:\n return pulumi.get(self, \"flex_volume\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Restrict the specified volume, making it unavailable for userlevel data access but leaving it (or its containing aggregate, if it's a flexible volume) available to internal OnTAP RAIDlevel access. This API is not supported for Infinite Volumes. This API is not supported on Infinite Volume constituents.
|
def volume_restrict(self, name, cifs_delay=None):
return self.request( "volume-restrict", {
'name': [ name, 'name', [ basestring, 'None' ], False ],
'cifs_delay': [ cifs_delay, 'cifs-delay', [ int, 'None' ], False ],
}, {
} )
|
[
"def volume_restrict_async(self, volume_name):\n return self.request( \"volume-restrict-async\", {\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )",
"def modify_volume_attribute(DryRun=None, VolumeId=None, AutoEnableIO=None):\n pass",
"def volume(ctx, vol):\n avr = ctx.obj['avr']\n if vol:\n try:\n avr.volume = vol\n click.echo(avr.volume)\n except ReponseException as e:\n if \"Volume\" in str(e):\n msg = \"Volume must be specified in -0.5 increments.\"\n err = click.style(msg, fg='red')\n click.echo(err, err=True)\n else:\n click.echo(avr.volume)",
"def volume(vol):\n ReceiverManager().set_volume(vol)\n return jsonify(volume = vol, status = \"Ok\")",
"def set_volume_level(self, volume):\n self.soco.volume = str(int(volume * 100))",
"def set_volume_level(self, volume: float) -> None:\n self.send_command([\"mixer\", \"volume\", volume * 100])",
"def _chopped_volume_default(self):\n grid = self.grid\n grid.trait_set(x_max=self.slicePosition[1])\n\n volume = mlab.pipeline.volume(\n grid,\n figure=self.vscene3d.mayavi_scene,\n vmin=self.dataRange[0],\n vmax=self.dataRange[1]\n )\n\n volume._otf = self.otf\n volume._volume_property.set_scalar_opacity(self.otf)\n\n return volume",
"def pygame_volume(volume):\n volume *= 0.01 # Adjusting from percentage\n if 0.0005 < volume < 0.0078125:\n volume = 0.0078125 # pygame's min volume. This stops it going silent when volume is low but not muted.\n return volume",
"def volume_get_supported_guarantees(self, volume):\n return self.request( \"volume-get-supported-guarantees\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'guarantee-types': [ Guarantee, True ],\n } )",
"def volume(ctx, *args, **kwargs):",
"def volume_verify_suspend(self, volume=None):\n return self.request( \"volume-verify-suspend\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n } )",
"def _disable_async_replication(self, volume):\n\n current_array = self._get_current_array()\n LOG.debug(\"Disabling replication for volume %(id)s residing on \"\n \"array %(backend_id)s.\",\n {\"id\": volume[\"id\"],\n \"backend_id\": current_array.backend_id})\n try:\n current_array.set_pgroup(self._replication_pg_name,\n remvollist=([self._get_vol_name(volume)]))\n except purestorage.PureHTTPError as err:\n with excutils.save_and_reraise_exception() as ctxt:\n if (err.code == 400 and\n ERR_MSG_COULD_NOT_BE_FOUND in err.text):\n ctxt.reraise = False\n LOG.warning(\"Disable replication on volume failed: \"\n \"already disabled: %s\", err.text)\n else:\n LOG.error(\"Disable replication on volume failed with \"\n \"message: %s\", err.text)",
"def allow_volume_expansion(self, value: bool):\n self._properties[\"allowVolumeExpansion\"] = value",
"def narrow(self, *args):\n return _coin.SbViewVolume_narrow(self, *args)",
"def allow_volume_expansion(self) -> bool:\n return typing.cast(\n bool,\n self._properties.get(\"allowVolumeExpansion\"),\n )",
"def set_volume(self, zone: int, volume: int):\n raise NotImplemented()",
"def set_volume(self, speaker: Speaker, volume: int):\n raise NotImplementedError()",
"def volume_autosize_get(self, volume):\n return self.request( \"volume-autosize-get\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'increment-size': [ basestring, False ],\n 'minimum-size': [ basestring, False ],\n 'grow-threshold-percent': [ int, False ],\n 'maximum-size': [ basestring, False ],\n 'shrink-threshold-percent': [ int, False ],\n 'is-enabled': [ bool, False ],\n 'mode': [ basestring, False ],\n } )",
"def narrow(self, *args):\n return _coin.SbDPViewVolume_narrow(self, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Destroy the specified volume or plex. If a flexible volume is specified, all of its blocks are freed and returned to its containing aggregate; no other flexible volumes in the same containing aggregate (if any) are affected. If a traditional volume is specified, all of its plexes are destroyed, and its disks are returned to the appropriate spare pool(s). If a plex is specified, it must be for a mirrored aggregate (which could potentially be embedded in a traditional volume), leaving it unmirrored. Only offline volumes and plexes can be destroyed. Plexes are not supported for ClusterMode volumes. This API is not supported for Infinite Volumes.
|
def volume_destroy(self, name, force=None, unmount_and_offline=None):
return self.request( "volume-destroy", {
'force': [ force, 'force', [ bool, 'None' ], False ],
'name': [ name, 'name', [ basestring, 'None' ], False ],
'unmount_and_offline': [ unmount_and_offline, 'unmount-and-offline', [ bool, 'None' ], False ],
}, {
} )
|
[
"def destroy_volume(self, volume):\r\n url = REST_BASE + '/storage/%s' % (volume.id)\r\n status = int(self.connection.request(action=url,\r\n method='DELETE').status)\r\n return status == httplib.OK",
"def delPhysicalVolume(self, pv):\n self.getElement().removeChild(pv.getElement())\n del self.pvs[pv.getAttribute(\"name\")]",
"def do_destroy_volume(sess, ocid):\n _logger.debug(\"Destroying volume [%s]\", ocid)\n try:\n vol = sess.get_volume(ocid)\n except Exception as e:\n _logger.debug(\"Failed to retrieve Volume details\", exc_info=True)\n raise Exception(\"Failed to retrieve Volume details: %s\" % ocid) from e\n\n if vol is None:\n raise Exception(\"Volume not found: %s\" % ocid)\n\n if vol.is_attached():\n raise Exception(\"Cannot destroy an attached volume\")\n\n try:\n _logger.debug('destroying volume %s:%s', vol.get_display_name(), vol.get_ocid())\n vol.destroy()\n except Exception as e:\n _logger.debug(\"Failed to destroy volume %s\", ocid, exc_info=True)\n raise Exception(\"Failed to destroy volume\") from e",
"def detach(self, volume):\r\n return volume.detach()",
"def clean():\n global madeVolume\n if madeVolume:\n ret = subprocess.run(\n [\"docker\", \"volume\", \"rm\", c.TMP_VOL],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n )\n if ret.returncode == 0:\n steprint(f\"Removed volume: {c.TMP_VOL}\")\n else:\n steprint(\n f\"Could not delete temporary docker volume: {ret.returncode}\\n\"\n f\"You can try: docker volume rm {c.TMP_VOL}\"\n )",
"def volume_delete(mnode, volname, xfail=False):\n hosts = []\n paths = []\n volinfo = get_volume_info(mnode, volname, xfail)\n if not volinfo:\n if xfail:\n g.log.info(\n \"Volume {} does not exist in {}\"\n .format(volname, mnode)\n )\n return True\n else:\n g.log.error(\n \"Unexpected: volume {} does not exist in {}\"\n .format(volname, mnode))\n return False\n\n _, _, err = RestClient(mnode).handle_request(\n \"DELETE\", \"/v1/volumes/%s\" % volname,\n httplib.NO_CONTENT, None)\n if err:\n if xfail:\n g.log.info(\"Volume delete is expected to fail\")\n return True\n\n g.log.error(\"Volume delete failed\")\n return False\n\n # remove all brick directories\n for j in volinfo['subvols']:\n for i in j['bricks']:\n g.run(i['host'], \"rm -rf %s\" % i['path'])\n\n return True",
"def delete_volume(client, volume_id):\n client.delete_volume(volume_id)\n client.wait_for_resource_deletion(volume_id)",
"def detach_and_delete_vols(self, volumes):\n for v in volumes:\n if v.status == \"in-use\":\n v.detach()\n v.get()\n sample = TimeoutSampler(\n 100,\n 5,\n self.check_expected_vol_status,\n vol=v,\n expected_state=\"available\",\n )\n if not sample.wait_for_func_status(True):\n logger.error(f\"Volume {v.name} failed to detach\")\n raise exceptions.PSIVolumeNotInExpectedState()\n\n v.delete()\n sample = TimeoutSampler(100, 5, self.check_vol_deleted, vol=v)\n if not sample.wait_for_func_status(True):\n logger.error(f\"Failed to delete Volume {v.name}\")\n raise exceptions.PSIVolumeDeletionFailed()",
"def delLogicalVolume(self, lv):\n self.getElement().removeChild(lv.getElement())\n del self.lvs[lv.getAttribute(\"name\")]",
"def test_delete_volume(self):\n self._driver.create_volume(self.TEST_VOLUME)\n self._driver.delete_volume(self.TEST_VOLUME)\n self.assertFalse(os.path.isfile(self.TEST_VOLPATH))",
"def _delete_volume_for_cleanup(volumes_client, volume_id):\n try:\n vol = volumes_client.show_volume(volume_id)['volume']\n if vol['status'] == 'in-use':\n waiters.wait_for_volume_resource_status(volumes_client,\n volume_id,\n 'available')\n except lib_exc.NotFound:\n pass\n BaseVolumeTest.delete_volume(volumes_client, volume_id)",
"def unmanage(self, volume):\n\n vol_name = self._get_vol_name(volume)\n if len(vol_name + UNMANAGED_SUFFIX) > MAX_VOL_LENGTH:\n unmanaged_vol_name = vol_name[:-len(UNMANAGED_SUFFIX)] + \\\n UNMANAGED_SUFFIX\n else:\n unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX\n LOG.info(\"Renaming existing volume %(ref_name)s to %(new_name)s\",\n {\"ref_name\": vol_name, \"new_name\": unmanaged_vol_name})\n self._rename_volume_object(vol_name, unmanaged_vol_name)",
"def delete_volumes(self):\n return SystemCommand(self.cmd.delete_volumes)",
"def detach_volume(self, instance_name, mountpoint):\n\n # Find the instance ref so we can pass it to the\n # _container_script_modify method.\n meta = self._find_by_name(instance_name)\n instance = db.instance_get(context.get_admin_context(), meta['id'])\n self._container_script_modify(instance, None, None, mountpoint, 'del')",
"def delete_volume(client, volume_id):\n if supports_volumes_api(client):\n client.remove_volume(volume_id)\n return\n\n def clear_path(path):\n if exists(path):\n logging.info(\"Removing path: %s\", path)\n rmtree(path)\n clear_path(os.path.join(DOCKER_VFS_DIR, volume_id))\n clear_path(os.path.join(DOCKER_VOLUMES_DIR, volume_id))",
"def detach_volume(DryRun=None, VolumeId=None, InstanceId=None, Device=None, Force=None):\n pass",
"def destroy(self):\n ret = libvirtmod.virStoragePoolDestroy(self._o)\n if ret == -1: raise libvirtError ('virStoragePoolDestroy() failed', pool=self)\n return ret",
"def remove_fs(self, client, vol_name, validate=True, **kwargs):\n rmvolume_cmd = f\"ceph fs volume rm {vol_name} --yes-i-really-mean-it\"\n cmd_out, cmd_rc = client.exec_command(\n sudo=True, cmd=rmvolume_cmd, check_ec=kwargs.get(\"check_ec\", True)\n )\n if validate:\n out, rc = client.exec_command(sudo=True, cmd=\"ceph fs ls --format json\")\n volname_ls = json.loads(out.read().decode())\n if vol_name in [i[\"name\"] for i in volname_ls]:\n raise CommandFailed(f\"Creation of filesystem: {vol_name} failed\")\n return cmd_out, cmd_rc",
"def detach_volume(self, node, volume):\r\n url = REST_BASE + '/instances/%s' % (node.id)\r\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\r\n data = {'storageID': volume.id, 'type': 'detach'}\r\n resp = self.connection.request(action=url,\r\n method='PUT',\r\n headers=headers,\r\n data=data)\r\n return int(resp.status) == 200"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Pauses the volume move operation of the specified source volume. This is a synchronous API.
|
def volume_move_pause(self, source_volume):
return self.request( "volume-move-pause", {
'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],
}, {
} )
|
[
"def volume_move_abort(self, source_volume):\n return self.request( \"volume-move-abort\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )",
"def volume_move_resume(self, source_volume, cutover_window=None, is_manual_cutover=None, is_override_warnings=None, cutover_attempts=None, is_keep_source=None):\n return self.request( \"volume-move-resume\", {\n 'cutover_window': [ cutover_window, 'cutover-window', [ int, 'None' ], False ],\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n 'is_manual_cutover': [ is_manual_cutover, 'is-manual-cutover', [ bool, 'None' ], False ],\n 'is_override_warnings': [ is_override_warnings, 'is-override-warnings', [ bool, 'None' ], False ],\n 'cutover_attempts': [ cutover_attempts, 'cutover-attempts', [ int, 'None' ], False ],\n 'is_keep_source': [ is_keep_source, 'is-keep-source', [ bool, 'None' ], False ],\n }, {\n 'errors-warnings': [ ErrorsWarningsInfo, True ],\n } )",
"def volume_move_status(self, source_volume=None, is_verbose=None):\n return self.request( \"volume-move-status\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n 'is_verbose': [ is_verbose, 'is-verbose', [ bool, 'None' ], False ],\n }, {\n 'status': [ VolMoveStatusInfo, True ],\n } )",
"def media_pause(self) -> None:\n self.send_command([\"pause\"])",
"def pause(self):\n self._exec_cmd(_vix.VixVM_Pause,\n self._vm_handle,\n 0, # Must be 0\n VIX_INVALID_HANDLE,\n None,\n None\n )",
"def pause():\n global source\n if source:\n os.kill(source.pid, signal.SIGSTOP)",
"def pause(self):\n if not 'paused' in self.states:\n raise ValueError(\"Cannot pause without adding a state with the name 'paused'\")\n self.state = self.states['paused']\n log('Pausing')\n self.state.enter(self)",
"def pause(self):\n self._call(\"pause\")",
"def pause(self):\n self._player.pause()\n self.state = AudioPlayer.PAUSE\n self.dispatch_event('on_pause', self)",
"def move_disk(pegs, source, dest):\n # check if the move is valid\n # If the move is invalid, it will raise an error telling you what is the problem\n if source not in [0,1,2]: raise AssertionError(\"source index out of bounds\")\n if dest not in [0,1,2]: raise AssertionError(\"destination index out of bounds\")\n if pegs[source] == []: raise AssertionError(\"source peg is empty\")\n disk = pegs[source][-1] # disk is the top disk in the source peg\n if pegs[dest] and (pegs[dest][-1] <= disk): raise AssertionError(\"destination has smaller disk\")\n\n # The move is valid so (i) we print the move on the screen\n print(f\"STEP: move disk {disk} from peg {source} to peg {dest}\")\n\n # then (ii) we execute the move\n pegs[source].pop() # Take the disk on top of the source peg\n pegs[dest].append(disk) # and move it to the top of the destination peg\n\n # and (iii) we display the new configuration\n print_pegs(pegs)",
"def volume_up(self):\n self._player.volume += self._volume_increment",
"def distribute(self, volume, source, dest, *args, **kwargs):\n # Note: currently it varies whether the pipette should have a tip on\n # or not depending on the parameters for this call, so we cannot\n # create a very reliable assertion on tip status\n\n args = [volume, source, dest, *args]\n kwargs['mode'] = 'distribute'\n kwargs['mix_after'] = (0, 0)\n if 'disposal_vol' not in kwargs:\n kwargs['disposal_vol'] = self.min_volume\n return self.transfer(*args, **kwargs)",
"def volume_move_cutover(self, source_volume, cutover_window=None):\n return self.request( \"volume-move-cutover\", {\n 'cutover_window': [ cutover_window, 'cutover-window', [ int, 'None' ], False ],\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )",
"def pause(self):\n self._video_paused = True",
"def volume_down(self):\n self._player.volume -= self._volume_increment",
"def change_volume(self, volume):\n self.signalPlayer.volume = volume",
"def pause_sim(self):\n self._tell_sim('put', 'pause')",
"def pause_track(self):\n body = {\n \"action\": \"pause\",\n \"publishResponse\": True,\n \"resource\": MEDIA_PLAYER_RESOURCE_ID,\n }\n self._arlo.be.notify(base=self, body=body)",
"def decrease_volume(self):\n if self.is_playing:\n self.volume *= 0.8"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Trigger cutover of a move job
|
def volume_move_trigger_cutover(self, source_volume, vserver=None, force=None):
return self.request( "volume-move-trigger-cutover", {
'vserver': [ vserver, 'vserver', [ basestring, 'None' ], False ],
'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],
'force': [ force, 'force', [ bool, 'None' ], False ],
}, {
} )
|
[
"def cancelMove(self) -> None:\n frames_already_done = self._totalFrameNeeded - self._frameNeeded\n for _ in range(frames_already_done):\n self.unit.moveTo(self.sourceTile.graphics.center)\n self.isPerformed = True",
"def at_after_move(self, source_location):\r\n pass",
"def move_jobs(self, src_tube, dst_tube, n=0):\n self.watch_single_tube(src_tube)\n self.beanstalk.watch(src_tube)\n self.beanstalk.use(dst_tube)\n # BATCH DRAIN INTO THIS (note that this bit is not persistent!)\n lifo = []\n while(n > 0):\n job = self.beanstalk.reserve(timeout=60)\n if job is None:\n print(\"timed out. nothing to do?!\")\n return\n lifo.append(job)\n n -= 1\n\n stack_len = len(lifo)\n\n # dump stack into destination work queue.\n while(len(lifo) > 0):\n job = lifo.pop()\n self.beanstalk.put(job.body)\n job.delete()\n\n self.logger.info(\"drained {} jobs\".format(stack_len))",
"def apply(self, move):\n # the pile the user wants to take from\n pile = move.get_pile()\n # the amount of stones the user wants to take from the pile\n stones = move.get_stones()\n self._piles[pile] = max(0, self._piles[pile] - stones)",
"def move(self, action):\n raise NotImplementedError",
"def on_move_success(self, user, move, target):",
"def _move_lift(self, cmd):\n # action = self._vector.behavior.set_lift_height(height=cmd.data,\n # duration=0.2, in_parallel=True)\n action = self._vector.behavior.set_lift_height(height=cmd.data,\n duration=0.2)\n # action.wait_for_completed()",
"def test_moveover():\r\n import random #used only for testing, not production\r\n zero_pos = np.array([0.0, 0.0, 1.0]) # note 1 in 3rd dim, this is a standard vector\r\n x1_pos = np.array([1.0, 0.0, 1.0])\r\n xy_pos = np.array([math.sqrt(0.5), math.sqrt(0.5), 1.0])\r\n y1_pos = np.array([0.0, 1.0, 1.0], dtype=float)\r\n for _ in range(10000):\r\n target=vec3d(random.random()-0.5,random.random()-0.5)\r\n if random.random()>0.8: target=zero_pos\r\n if random.random()>0.8: target=x1_pos\r\n if random.random()>0.8: target=y1_pos\r\n if random.random()>0.8: target=xy_pos\r\n device = vec3d(random.random()-0.5, random.random()-0.5)\r\n if random.random()>0.8: device=zero_pos\r\n if random.random()>0.8: device=x1_pos\r\n if random.random()>0.8: device=xy_pos\r\n if random.random()>0.8: device=y1_pos\r\n motion=moveover(device,target,stopdist=0,maxdeviation=0.7)\r\n deviation=pos_antimotion(target,motion)-device\r\n if np.linalg.norm(deviation)>1e-13:\r\n print(f\"moveover err:\",(target,device,motion,deviation))",
"def climb(self, itemToClimb): \n tile = itemToClimb.getTile() \n if tile.getDepth() <= MAX_DEPTH and tile.stepOn():\n self.setDestination(itemToClimb.getPosition())\n self.setUnselectedItem()\n else:\n self.newChatMessage(\"No puedo subirme ahí\", 1)",
"def overflow(self, player, move):\r\n player_profile = self.which_player(player)\r\n piece_move = self._board[move[0]][move[1]]\r\n while len(self._board[move[0]][move[1]]) > 5:\r\n bottom_piece = piece_move[0]\r\n self._board[move[0]][move[1]].pop(0)\r\n if bottom_piece == player_profile.get_color():\r\n player_profile.add_reserve()\r\n if bottom_piece != player_profile.get_color():\r\n player_profile.add_capture()",
"def hillclimb(pos, max_fevals, all_results, unique_results, kernel_options, tuning_options, runner):\n tune_params = tuning_options.tune_params\n max_threads = runner.dev.max_threads\n\n #measure start point time\n time = _cost_func(pos, kernel_options, tuning_options, runner, all_results)\n\n #starting new hill climbing search, no need to remember past best\n best_global = best = time\n\n #store the start pos before hill climbing\n start_pos = pos[:]\n\n found_improved = True\n while found_improved:\n found_improved = False\n\n current_results = []\n pos = start_pos[:]\n\n index = 0\n #in each dimension see the possible values\n for values in tune_params.values():\n\n #for each value in this dimension\n for value in values:\n pos[index] = value\n\n #check restrictions\n #if restrictions and not util.check_restrictions(restrictions, pos, tune_params.keys(), False):\n # continue\n if not util.config_valid(pos, tuning_options, max_threads):\n continue\n\n #get time for this position\n time = _cost_func(pos, kernel_options, tuning_options, runner, current_results)\n if time < best:\n best = time\n best_pos = pos[:]\n #greedely replace start_pos with pos to continue from this point\n start_pos = pos[:]\n\n unique_results.update({\",\".join([str(v) for k, v in record.items() if k in tune_params]): record[\"time\"]\n for record in current_results})\n fevals = len(unique_results)\n if fevals >= max_fevals:\n all_results += current_results\n return\n\n #restore and move to next dimension\n pos[index] = start_pos[index]\n index = index + 1\n\n #see if there was improvement, update start_pos set found_improved to True\n if best < best_global:\n found_improved = True\n start_pos = best_pos\n best_global = best\n\n #append current_results to all_results\n all_results += current_results",
"def bounce_move_update(self):\n self.bounce()",
"def volume_move_cutover(self, source_volume, cutover_window=None):\n return self.request( \"volume-move-cutover\", {\n 'cutover_window': [ cutover_window, 'cutover-window', [ int, 'None' ], False ],\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )",
"def move(self, action):\n tile_type, from_pile, to_stack, nbr_to_move = action\n\n # Check for errors\n if self.winner is not None:\n raise Exception(\"Game already won\")\n #elif pile < 0 or pile >= len(self.piles):\n # raise Exception(\"Invalid pile\")\n #elif count < 1 or count > self.piles[pile]:\n # raise Exception(\"Invalid number of objects\")\n\n # get the tiles from the factory\n nbr_tiles, penalty = self.factory.remove_tiles_from_pile(from_pile, tile_type)\n\n if to_stack == penalty_stack_row_idx:\n # these tiles are going straight to penalty\n self.players[self.current_player_idx].add_tiles_to_penalty(nbr_tiles, tile_type)\n else:\n # put the tiles on the floor\n self.players[self.current_player_idx].move_tiles_to_row(nbr_tiles, tile_type, to_stack)\n\n if penalty == 1:\n self.players[self.current_player_idx].add_penalty_tile_to_penalty_stack()\n\n # check if the round is over\n if self.factory.get_tile_count_in_piles() == 0:\n # score this round and setup the next round \n # if the game is over, determine the winner\n if self.process_end_round():\n self.set_winner()\n # the end of round method also sets the next player\n else:\n # check if the player just did something which will end the game soon\n if not self.is_last_round:\n self.is_last_round = self.players[self.current_player_idx].has_a_completed_row()\n # pass the baton to the next player\n self.switch_player()\n\n \n\n # Update pile\n #self.piles[pile] -= count\n #self.switch_player()\n\n # Check for a winner\n #if all(pile == 0 for pile in self.piles):\n # self.winner = self.player",
"def execute_mouse_over(self, element: WebElement) -> None:\r\n ActionChains(self.driver).move_to_element(element).perform()",
"def test_trigger_job(self):\n pass",
"def do_last_mile(self) -> bool:\n self.set_state(DockState.LAST_MILE)\n\n remaining_dis = self.cfg.to_last_mile_dis\n _dir = 1 if self.cfg.front_dock else -1\n\n while not rospy.is_shutdown():\n if self.check_cancel():\n return False\n\n if self.is_pause:\n if not self.do_pause():\n return False\n\n centre_tf_mat = self.get_tf(self.cfg.centre_marker)\n\n # Final Dock based on odom if centre marker is getting to close\n if centre_tf_mat is None:\n rospy.logwarn(\"Not detecting centre marker\")\n if remaining_dis < self.cfg.max_last_mile_odom:\n rospy.logwarn(f\"move {remaining_dis}m with odom\")\n return self.move_with_odom(_dir*remaining_dis)\n else:\n rospy.logerr(\"exceeded max_last_mile_odom with \"\n \"remaining dis of {remaining_dis}, exit!\")\n return False\n\n centre_tf = utils.get_2d_pose(centre_tf_mat)\n if self.cfg.front_dock:\n centre_tf = utils.flip_base_frame(centre_tf)\n dis, _, yaw = centre_tf\n\n yaw -= math.pi/2\n remaining_dis = - dis - self.cfg.stop_distance - self.cfg.cam_offset\n print(f\" Approaching Charger -> d: {dis:.3f}, yaw: {yaw:.2f}\"\n f\", remaining dis: {remaining_dis:.3f}\")\n\n if (remaining_dis <= 0):\n rospy.loginfo(\" ~ STOP!! Reach destination! ~\")\n self.publish_cmd()\n return True\n\n ang_vel = utils.sat_proportional_filter(\n yaw, abs_min=0.0, abs_max=self.cfg.min_angular_vel, factor=1.2)\n self.publish_cmd(linear_vel=_dir*self.cfg.min_linear_vel,\n angular_vel=ang_vel)\n rospy.sleep(self.sleep_period)\n exit(0)",
"def on_after_foe_hit(self, foe, move, target, battle):",
"def jumpOver(self): \n if not self.isTopItem():\n self.newChatMessage(\"No puedo saltar con tanto peso\", 1)\n return\n heading = GG.utils.getNextDirection(self.getPosition(), self.__selected.getPosition())\n if heading:\n dest = GG.utils.getJumpDestination(self.getPosition(), heading, self.getRoom().size)\n if dest == None or self.getRoom().getTile(dest).getDepth():\n self.newChatMessage(\"No puedo saltar allí\", 1)\n return\n self.setUnselectedItem()\n self.setHeading(heading)\n self.setDestination(dest)\n self.setPosition(dest, 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Renames the specified volume to a new name specified by "newvolumename". If the volume is referenced in the /etc/exports file, remember to make the name change in /etc/exports also so that the affected file system can be exported by the filer after the filer reboots. The "volumerename" command does not automatically update the /etc/exports file. This API is not supported for Infinite Volumes.
|
def volume_rename(self, volume, new_volume_name):
return self.request( "volume-rename", {
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'new_volume_name': [ new_volume_name, 'new-volume-name', [ basestring, 'None' ], False ],
}, {
} )
|
[
"def rename(self, new_name):\n\n if not new_name:\n raise LvmVolumeError(_(\"No new name for logical volume given.\"))\n\n new_name = str(new_name).strip()\n if new_name == '':\n raise LvmVolumeError(_(\"Empty name for logical volume given.\"))\n\n if new_name == self.name:\n LOG.debug(_(\n \"New logical volume name is equal the current name %r.\"), new_name)\n return\n\n cur_cname = self.vgname + '/' + self.name\n new_cname = self.vgname + '/' + new_name\n\n cmd_params = [\n 'lvrename',\n self.vgname,\n self.name,\n new_name\n ]\n\n LOG.info(_(\"Renaming logical volume %(old)r to %(new)r.\") % {\n 'old': cur_cname, 'new': new_cname})\n\n (ret_code, std_out, std_err) = self.exec_lvm(\n cmd_params, quiet=True, force=False)\n\n self._name = new_name\n\n return",
"def RenameVolume(self, volume_resource, new_name):\n rename_volume_request = self.messages.RenameVolumeRequest(\n newVolumeId=new_name)\n request = (\n self.messages.BaremetalsolutionProjectsLocationsVolumesRenameRequest(\n name=volume_resource.RelativeName(),\n renameVolumeRequest=rename_volume_request)\n )\n return self.volumes_service.Rename(request)",
"def _rename_volume_object(self, old_name, new_name, raise_not_exist=False):\n current_array = self._get_current_array()\n try:\n current_array.rename_volume(old_name, new_name)\n except purestorage.PureHTTPError as err:\n with excutils.save_and_reraise_exception() as ctxt:\n if (err.code == 400 and\n ERR_MSG_NOT_EXIST in err.text):\n ctxt.reraise = raise_not_exist\n LOG.warning(\"Unable to rename %(old_name)s, error \"\n \"message: %(error)s\",\n {\"old_name\": old_name, \"error\": err.text})\n return new_name",
"def rename(self, new_name, flags=0):\n ret = libvirtmod.virDomainRename(self._o, new_name, flags)\n if ret == -1: raise libvirtError ('virDomainRename() failed', dom=self)\n return ret",
"def volume_storage_service_rename(self, volume, storage_service, new_storage_service):\n return self.request( \"volume-storage-service-rename\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'storage_service': [ storage_service, 'storage-service', [ basestring, 'None' ], False ],\n 'new_storage_service': [ new_storage_service, 'new-storage-service', [ basestring, 'None' ], False ],\n }, {\n } )",
"def Rename(self, new_ifname):\n try:\n cros_build_lib.SudoRunCommand(\n ['nameif', new_ifname, self.GetMacAddress()])\n self._ifname = new_ifname\n except cros_build_lib.RunCommandError as e:\n if 'File exists' in str(e):\n raise InterfaceNameExistsError(\n 'Rename failed, interface name %s exists' % new_ifname)\n raise",
"def rename(self, oldkey, newkey):\r\n return self.execute_command(\"RENAME\", oldkey, newkey)",
"def rename(self, key, new_key):\n return self._execute([b'RENAME', key, new_key], b'OK')",
"def rename(self, new_block_name):\r\n name = _base._rsf.rename_block(self._block._name, new_block_name)\r\n if name:\r\n self._name = name\r\n return name\r\n else:\r\n return None",
"def change_name(self, col_name, new_name):\n if new_name != col_name and new_name in self.names:\n raise ValueError, 'New name %s already exists.' % new_name\n else:\n self.change_attrib(col_name, 'name', new_name)",
"def rename(self, oldname, newname):\n oldname = _prepareMailboxName(oldname)\n newname = _prepareMailboxName(newname)\n return self.sendCommand(Command(b'RENAME', b' '.join((oldname, newname))))",
"def set_name(self, new_name):\n\n self.img.attrib['Name'] = new_name",
"def set_name(self, new_name):\n self.name = new_name",
"def renameDevice(self, uid, newId, retainGraphData=False):\n facade = self._getFacade()\n newUid = facade.renameDevice(uid, newId, retainGraphData)\n return DirectResponse.succeed(uid=newUid)",
"def rename(self, index, new_name):\n if index == 0:\n raise IndexError('It is not allowed to rename the Blank workspace.')\n\n su = sppasUnicode(new_name)\n u_name = su.to_strip()\n\n if u_name in self:\n raise ValueError('A workspace with name {:s} is already existing.'\n ''.format(u_name))\n\n cur_name = self[index]\n if cur_name == new_name:\n return\n\n src = self.check_filename(index)\n dest = os.path.join(paths.wkps, u_name) + sppasWorkspaces.ext\n shutil.move(src, dest)\n self.__wkps[index] = u_name\n\n return u_name",
"def edit_node_name(self, node, new_node):\n if self.check_node_existance(node):\n self.nodes[new_node] = self.nodes[node]\n del self.nodes[node]\n\n else:\n raise NodeNotFound('No node under name \"{0}\" found.'.format(node))",
"def rename_child(self, name, newname):\n if name not in self.children:\n raise FSException(\"Source %s doesn't exist\" % name)\n\n if newname in self.children:\n raise FSException(\"Target %s already exists\" % newname)\n\n node = self.children[name]\n node.name = newname\n del self.children[name]\n self.children[newname] = node",
"def change_image_name(self, img, newname):\r\n return self.update(img, {\"name\": newname})",
"def log_renamed(oldname, newname):\r\n logging.info('FILE-RENAMED: \\n \\t%s -- to: %s\\n', oldname, newname)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a flexible volume that is a clone of a "backing" or "parent" flexible volume. A clone is a volume that is a writable snapshot of another volume. Initially, the clone and its parent share the same storage; more storage space is consumed only as one volume or the other changes. If a specific snapshot name within the parent volume is provided, it is chosen as the parent snapshot. Otherwise, the filer will create a new, distinctively named snapshot in the parent volume for that purpose. The parent snapshot is locked in the parent volume, preventing its deletion until the clone is either destroyed or split from the parent using the 'volumeclonesplitstart' command (see below). This command fails if the chosen parent volume is currently involved in a split operation. This command also fails if the chosen parent volume is a traditional volume. Cloning is a new capability that applies exclusively to flexible volumes.
|
def volume_clone_create(self, parent_volume, volume, use_snaprestore_license=None, force_worm_clone=None, junction_active=None, qos_policy_group_name=None, space_reserve=None, junction_path=None, parent_snapshot=None, volume_type=None):
return self.request( "volume-clone-create", {
'use_snaprestore_license': [ use_snaprestore_license, 'use-snaprestore-license', [ bool, 'None' ], False ],
'parent_volume': [ parent_volume, 'parent-volume', [ basestring, 'None' ], False ],
'force_worm_clone': [ force_worm_clone, 'force-worm-clone', [ bool, 'None' ], False ],
'volume': [ volume, 'volume', [ basestring, 'None' ], False ],
'junction_active': [ junction_active, 'junction-active', [ bool, 'None' ], False ],
'qos_policy_group_name': [ qos_policy_group_name, 'qos-policy-group-name', [ basestring, 'None' ], False ],
'space_reserve': [ space_reserve, 'space-reserve', [ basestring, 'None' ], False ],
'junction_path': [ junction_path, 'junction-path', [ basestring, 'None' ], False ],
'parent_snapshot': [ parent_snapshot, 'parent-snapshot', [ basestring, 'None' ], False ],
'volume_type': [ volume_type, 'volume-type', [ basestring, 'None' ], False ],
}, {
} )
|
[
"def create_clone(\n self,\n client,\n vol_name,\n subvol_name,\n snap_name,\n target_subvol_name,\n validate=True,\n **kwargs,\n ):\n clone_cmd = f\"ceph fs subvolume snapshot clone {vol_name} {subvol_name} {snap_name} {target_subvol_name}\"\n if kwargs.get(\"group_name\"):\n clone_cmd += f\" --group_name {kwargs.get('group_name')}\"\n if kwargs.get(\"target_group_name\"):\n clone_cmd += f\" --target_group_name {kwargs.get('target_group_name')}\"\n if kwargs.get(\"pool_layout\"):\n clone_cmd += f\" --pool_layout {kwargs.get('pool_layout')}\"\n cmd_out, cmd_rc = client.exec_command(\n sudo=True, cmd=clone_cmd, check_ec=kwargs.get(\"check_ec\", True)\n )\n if validate:\n listsubvolumes_cmd = f\"ceph fs subvolume ls {vol_name}\"\n if kwargs.get(\"target_group_name\"):\n listsubvolumes_cmd += f\" --group_name {kwargs.get('target_group_name')}\"\n out, rc = client.exec_command(\n sudo=True, cmd=f\"{listsubvolumes_cmd} --format json\"\n )\n subvolume_ls = json.loads(out.read().decode())\n if target_subvol_name not in [i[\"name\"] for i in subvolume_ls]:\n raise CommandFailed(f\"Creation of clone : {target_subvol_name} failed\")\n return cmd_out, cmd_rc",
"def volume_clone_create_async(self, parent_volume, volume, use_snaprestore_license=None, junction_active=None, space_reserve=None, junction_path=None, parent_snapshot=None):\n return self.request( \"volume-clone-create-async\", {\n 'use_snaprestore_license': [ use_snaprestore_license, 'use-snaprestore-license', [ bool, 'None' ], False ],\n 'parent_volume': [ parent_volume, 'parent-volume', [ basestring, 'None' ], False ],\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'junction_active': [ junction_active, 'junction-active', [ bool, 'None' ], False ],\n 'space_reserve': [ space_reserve, 'space-reserve', [ basestring, 'None' ], False ],\n 'junction_path': [ junction_path, 'junction-path', [ basestring, 'None' ], False ],\n 'parent_snapshot': [ parent_snapshot, 'parent-snapshot', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )",
"def test_create_cloned_volume(self):\n self.mox.StubOutWithMock(self._driver, '_create_file')\n self.mox.StubOutWithMock(self._driver, '_copy_file')\n\n vol_size = self._driver._size_bytes(self.TEST_VOLSIZE)\n self._driver._create_file(self.TEST_CLONEPATH, vol_size)\n self._driver._copy_file(self.TEST_VOLPATH, self.TEST_CLONEPATH)\n\n self.mox.ReplayAll()\n\n self._driver.create_cloned_volume(self.TEST_CLONE, self.TEST_VOLUME)",
"def create_cloned_volume(self, volume, src_vref):\n self._login()\n self._create_lun(volume)\n self.copy_volume_data(self.context, src_vref, volume)",
"def volume_clone_split_start(self, volume):\n return self.request( \"volume-clone-split-start\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )",
"def clone(self, source_name, snapshot_id, dest_name):\n wrap_popen('collie', 'vdi', 'clone', '-s', snapshot_id, source_name,\n dest_name)",
"def clone(self, parent):\n # noinspection PyArgumentList\n return self.__class__(parent)",
"def isClone(self, vdisk):\n if vdisk.objecttype==\"clone\":\n return True\n else:\n return False",
"def _create_clone_pair(self, pvol, svol):\n snapshot_name = '%(prefix)s%(svol)s' % {\n 'prefix': CLONE_NAME,\n 'svol': svol % _SNAP_HASH_SIZE,\n }\n try:\n body = {\"snapshotGroupName\": snapshot_name,\n \"snapshotPoolId\": self.storage_info['snap_pool_id'],\n \"pvolLdevId\": pvol,\n \"svolLdevId\": svol,\n \"isClone\": True,\n \"clonesAutomation\": True,\n \"copySpeed\": 'medium',\n \"isDataReductionForceCopy\": True}\n self.client.add_snapshot(body)\n except utils.HBSDError as ex:\n if (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==\n rest_api.INVALID_SNAPSHOT_POOL and\n not self.conf.hitachi_snap_pool):\n msg = utils.output_log(\n MSG.INVALID_PARAMETER, param='hitachi_snap_pool')\n raise utils.HBSDError(msg)\n else:\n raise\n try:\n self._wait_copy_pair_status(svol, set([PSUS, SMPP, SMPL]))\n except Exception:\n with excutils.save_and_reraise_exception():\n try:\n self._delete_pair_from_storage(pvol, svol)\n except utils.HBSDError:\n utils.output_log(\n MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)",
"def volume_clone_split_estimate(self, volume):\n return self.request( \"volume-clone-split-estimate\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'clone-split-estimate': [ CloneSplitEstimateInfo, True ],\n } )",
"def _create_volume_from_snapshot(self, snapshot, volume):\n\n cinder_volume_id = volume['id']\n cinder_snapshot_id = snapshot['id']\n size_mb = volume['size'] * units.Ki\n result = None\n spec_dict = {}\n\n LOG.debug(\"Copying snapshot %(snap_id)s onto volume %(vol_id)s \"\n \"%(dpy_name)s\",\n {'snap_id': cinder_snapshot_id,\n 'vol_id': cinder_volume_id,\n 'dpy_name': snapshot['display_name']})\n\n source_lun_info = self.vmem_mg.lun.get_lun_info(snapshot['volume_id'])\n if source_lun_info['subType'] != 'THICK':\n msg = _('Lun copy currently only supported for thick luns')\n LOG.warn(msg)\n raise exception.ViolinBackendErr(message=msg)\n\n spec_dict = self._process_extra_specs(volume)\n selected_pool = self._get_storage_pool(\n volume, size_mb, spec_dict['pool_type'], \"create_lun\")\n\n try:\n result = self.vmem_mg.lun.copy_snapshot_to_new_lun(\n source_lun=snapshot['volume_id'],\n source_snapshot_comment=self._compress_snapshot_id(\n cinder_snapshot_id),\n destination=cinder_volume_id,\n storage_pool_id=selected_pool['storage_pool_id'])\n\n if not result['success']:\n self._check_error_code(result)\n\n except Exception:\n LOG.warn(\n _(\"Copy snapshot to volume for \"\n \"snapshot %(snap)s volume %(vol)s failed!\") %\n {'snap': cinder_snapshot_id,\n 'vol': cinder_volume_id})\n raise\n\n # get the destination lun info and extract virtualdeviceid\n info = self.vmem_mg.lun.get_lun_info(object_id=result['object_id'])\n\n self._wait_for_lun_or_snap_copy(\n snapshot['volume_id'], dest_vdev_id=info['virtualDeviceID'])\n\n if volume.get('consistencygroup_id'):\n LOG.debug('Adding volume %(v)s to consistency group %(g)s',\n {'v': cinder_volume_id,\n 'g': volume['consistencygroup_id']})\n self._ensure_snapshot_resource_area(cinder_volume_id)\n self._add_to_consistencygroup(\n volume['consistencygroup_id'], cinder_volume_id)",
"def multi_pvc_clone_factory(pvc_clone_factory, pod_factory):\n\n def factory(\n pvc_obj,\n status=constants.STATUS_BOUND,\n clone_name=None,\n storageclass=None,\n size=None,\n access_mode=None,\n volume_mode=None,\n wait_each=False,\n attach_pods=False,\n verify_data_integrity=False,\n file_name=None,\n ):\n \"\"\"\n Args:\n pvc_obj (list): List PVC object from which clone has to be created\n status (str): If provided then factory waits for cloned PVC to\n reach the desired state\n clone_name (str): Name to be provided for cloned PVC\n storageclass (str): storage class to be used for cloned PVC\n size (int): The requested size for the cloned PVC. This should\n be same as the size of parent PVC for a successful clone\n access_mode (str): This decides the access mode to be used for\n the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany\n volume_mode (str): Volume mode for PVC. This should match the\n volume mode of parent PVC\n wait_each(bool): True to wait for each PVC to be in status 'status'\n before creating next PVC, False otherwise\n attach_pods(bool): True if we want to attach PODs to the cloned PVCs, False otherwise.\n verify_data_integrity(bool): True if we want to verify data integrity by checking the existence and md5sum\n of file in the cloned PVC, False otherwise.\n file_name(str): The name of the file for which data integrity is to be checked.\n\n Returns:\n PVC: List PVC instance\n\n \"\"\"\n cloned_pvcs = []\n\n status_tmp = status if wait_each else \"\"\n\n log.info(\"Started creation of clones of the PVCs.\")\n for obj in pvc_obj:\n # Create clone\n clone_pvc_obj = pvc_clone_factory(\n pvc_obj=obj,\n clone_name=clone_name,\n storageclass=storageclass,\n size=size,\n access_mode=access_mode,\n volume_mode=volume_mode,\n status=status_tmp,\n )\n cloned_pvcs.append(clone_pvc_obj)\n\n if status and not wait_each:\n for cloned_pvc in cloned_pvcs:\n helpers.wait_for_resource_state(cloned_pvc, status)\n\n log.info(\"Successfully created clones of the PVCs.\")\n\n if attach_pods:\n # Attach PODs to cloned PVCs\n cloned_pod_objs = list()\n for cloned_pvc_obj in cloned_pvcs:\n if cloned_pvc_obj.get_pvc_vol_mode == constants.VOLUME_MODE_BLOCK:\n cloned_pod_objs.append(\n pod_factory(\n pvc=cloned_pvc_obj,\n raw_block_pv=True,\n status=constants.STATUS_RUNNING,\n pod_dict_path=constants.CSI_RBD_RAW_BLOCK_POD_YAML,\n )\n )\n else:\n cloned_pod_objs.append(\n pod_factory(pvc=cloned_pvc_obj, status=constants.STATUS_RUNNING)\n )\n\n # Verify that the fio exists and md5sum matches\n if verify_data_integrity:\n verify_data_integrity_for_multi_pvc_objs(\n cloned_pod_objs, pvc_obj, file_name\n )\n\n return cloned_pvcs, cloned_pod_objs\n\n return cloned_pvcs\n\n return factory",
"def recursive_copy(old_parent: Group, new_parent: Group) -> None:\n if len(subgroups(old_parent)) == 0:\n for dset_name in datasets(old_parent):\n new_parent.create_dataset(dset_name, data=old_parent[dset_name][...], dtype=np.float32)\n return\n for group_name in subgroups(old_parent):\n new_parent.create_group(group_name)\n recursive_copy(old_parent[group_name], new_parent[group_name])\n return",
"def _clone(context, obj, clone_id):\n return context.manage_clone(obj, clone_id)",
"def create_snapshot(\n self, client, vol_name, subvol_name, snap_name, validate=True, **kwargs\n ):\n snapshot_cmd = (\n f\"ceph fs subvolume snapshot create {vol_name} {subvol_name} {snap_name}\"\n )\n if kwargs.get(\"group_name\"):\n snapshot_cmd += f\" --group_name {kwargs.get('group_name')}\"\n cmd_out, cmd_rc = client.exec_command(\n sudo=True, cmd=snapshot_cmd, check_ec=kwargs.get(\"check_ec\", True)\n )\n if validate:\n listsnapshot_cmd = f\"ceph fs subvolume snapshot ls {vol_name} {subvol_name}\"\n if kwargs.get(\"group_name\"):\n listsnapshot_cmd += f\" --group_name {kwargs.get('group_name')}\"\n out, rc = client.exec_command(\n sudo=True, cmd=f\"{listsnapshot_cmd} --format json\"\n )\n snapshot_ls = json.loads(out.read().decode())\n if snap_name not in [i[\"name\"] for i in snapshot_ls]:\n raise CommandFailed(f\"Creation of subvolume : {snap_name} failed\")\n return cmd_out, cmd_rc",
"def is_clone(g):\n return g.parent is not None",
"def volume_clone_split_stop(self, volume):\n return self.request( \"volume-clone-split-stop\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n } )",
"def create_volume(DryRun=None, Size=None, SnapshotId=None, AvailabilityZone=None, VolumeType=None, Iops=None, Encrypted=None, KmsKeyId=None, TagSpecifications=None):\n pass",
"def create_share_from_snapshot(self, share, snapshot,\n share_server=None, parent_share=None):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the overridden methods in an object.
|
def get_overridden_methods(cls: type, obj: Type['spines.base.BaseObject']):
common = cls.__dict__.keys() & obj.__class__.__dict__.keys()
return [
m for m in common if cls.__dict__[m] != obj.__class__.__dict__[m]
and callable(cls.__dict__[m])
]
|
[
"def get_object_methods(obj):\n import utool as ut\n attr_list = (getattr(obj, attrname) for attrname in dir(obj))\n methods = [attr for attr in attr_list if ut.is_method(attr)]\n return methods",
"def is_overridden(obj):\n return getattr(obj, \"__is_overriden__\", True)",
"def get_original_method(self, obj, met_name):\n basemethod = method = getattr(obj, met_name)\n try:\n basemethod = self.__woven_dict[obj][met_name]['original']\n except KeyError:\n # if the method wasn't found AND if 'obj' is an isntance,\n # try to look at the obj.__class__ entry (convenience behaviour)\n if type(obj) == types.InstanceType:\n klass = obj.__class__\n try:\n basemethod = self.__woven_dict[klass][met_name]['original']\n except KeyError:\n return basemethod, method\n return basemethod, method",
"def get_class_methods(_class):\n return [x for x, y in list(_class.__dict__.items()) if type(y) == FunctionType]",
"def getOverrides(self):\n\n return self.data.override.dictionary_()",
"def _inherit_helper_class_parent_virtuals(self):\n mro = self.get_mro()\n mro.next() # skip 'self'\n for cls in mro:\n for method in cls.get_all_methods():\n if not method.is_virtual:\n continue\n method = method.clone()\n self.helper_class.add_virtual_method(method)",
"def system_listMethods(self):\r\n\r\n methods = self.funcs.keys()\r\n if self.instance is not None:\r\n # Instance can implement _listMethod to return a list of\r\n # methods\r\n if hasattr(self.instance, '_listMethods'):\r\n methods = remove_duplicates(\r\n methods + self.instance._listMethods()\r\n )\r\n # if the instance has a _dispatch method then we\r\n # don't have enough information to provide a list\r\n # of methods\r\n elif not hasattr(self.instance, '_dispatch'):\r\n methods = remove_duplicates(\r\n methods + list_public_methods(self.instance)\r\n )\r\n methods.sort()\r\n return methods",
"def _get_methods(class_input):\r\n return [f for f in dir(class_input) \r\n if callable(getattr(class_input, f)) and not f.startswith('__')]",
"def overrides(klass, function_name):\n try:\n superclass = inspect.getmro(klass)[1]\n overridden = getattr(klass, function_name) is not getattr(superclass, function_name)\n return overridden\n except (IndexError, AttributeError):\n return False",
"def getAllRemoteHandlers():\n return MetaRemoteModel.subclass_handlers",
"def _listOfMethods(self, lookinside):\n\t\ttry:\n\t\t\tif lookinside:\n\t\t\t\treturn dir(__import__(lookinside, globals={}, locals={}, fromlist=[], level=-1))\n\t\texcept ImportError:\n\t\t\treturn []",
"def get_line_generation_methods(self):\n return self._line_generation_methods",
"def listMethods(self):\n methodNames = self.funcs.keys()\n methodNames.sort()\n return methodNames",
"def check_methods(class_obj, *methods):\n mro = class_obj.__mro__\n for method in methods:\n for base_class_obj in mro:\n if method in base_class_obj.__dict__:\n if base_class_obj.__dict__[method] is None:\n return NotImplemented\n break\n else:\n return NotImplemented\n return True",
"def get_public_members(obj):\n return {attr: getattr(obj, attr) for attr in dir(obj)\n if not attr.startswith(\"_\")\n and not hasattr(getattr(obj, attr), '__call__')}",
"def find_client_commands(obj):\n commands = []\n for name in dir(obj):\n if not name.startswith('_'):\n if is_regular_method(obj, name):\n attr = getattr(obj, name)\n commands.append(attr)\n return commands",
"def GetOverrides(hierarchy, function_signature):\n function_xrefs = cs.getXrefsFor(function_signature)\n for function_xref in function_xrefs['overrides']:\n class_signature = GetClassSignature(function_xref['signature'])\n if class_signature in hierarchy:\n if 'overrides' not in hierarchy[class_signature]:\n hierarchy[class_signature]['overrides'] = {}\n hierarchy[class_signature]['overrides'][function_signature] = function_xref['signature']",
"def getAllAttributeNames(object):\n attrdict = {} # (object, technique, count): [list of attributes]\n # !!!\n # Do Not use hasattr() as a test anywhere in this function,\n # because it is unreliable with the remote objects: xmlrpc, soap, etc.\n # They always return true for hasattr().\n # !!!\n try:\n # Yes, this can fail if object is an instance of a class with\n # __str__ (or __repr__) having a bug or raising an\n # exception. :-(\n key = str(object)\n except:\n key = 'anonymous'\n # Wake up sleepy object - a hack for ZODB objects in \"ghost\" state.\n try:\n wakeupcall = dir(object)\n del wakeupcall\n except:\n pass\n # Get attributes available through the normal convention\n try:\n attributes = dir(object)\n attrdict[(key, 'dir', len(attributes))] = attributes\n except:\n # sadly, this fails for PyReflectedFunctions\n pass\n # Get attributes rom the object's dictionary, if it has one.\n try:\n attributes = object.__dict__.keys()\n attributes.sort()\n except: # Must catch all because object might have __getattr__.\n pass\n else:\n attrdict[(key, '__dict__', len(attributes))] = attributes\n # For a class instance, get the attributes for the class.\n try:\n klass = object.__class__\n except: # Must catch all because object might have __getattr__.\n pass\n else:\n if klass is object:\n # Break a circular reference. This happens with extension\n # classes.\n #print \"breaking circular reference to self\"\n pass\n # this extra check added for Jython 2.2.1 to break circular recursion\n elif klass is not java.lang.Class:\n # print \"calling update from\", object, \"with\", klass\n attrdict.update(getAllAttributeNames(klass))\n # Also get attributes from any and all parent classes.\n try:\n bases = object.__bases__\n except: # Must catch all because object might have __getattr__.\n pass\n else:\n if isinstance(bases, types.TupleType):\n # needed for Jython 2.2?\n halt_type = type(types.TypeType)\n for base in bases:\n if type(base) is types.TypeType \\\n or type(base) is halt_type:\n # Break a circular reference. Happens in Python 2.2.\n #print \"breaking TypeType circular reference\"\n pass\n else:\n # print \"calling update (better not be 'type') with\", base\n attrdict.update(getAllAttributeNames(base))\n return attrdict",
"def describe_class(obj):\n\n import inspect\n methods = []\n cl = obj.__class__\n print ('Class: %s' % cl.__name__)\n count = 0\n for name in cl.__dict__:\n item = getattr(cl, name)\n if inspect.ismethod(item):\n count += 1\n #describe_func(item, True)\n methods.append(item)\n\n if count==0:\n print ('No members')\n return methods"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the full subcategory of the objects of ``self`` having no nonzero zero divisors. A zero divisor in a ring `R` is an element `x \in R` such that there exists a nonzero element `y \in R` such that `x \cdot y = 0` or `y \cdot x = 0`
|
def NoZeroDivisors(self):
return self._with_axiom('NoZeroDivisors')
|
[
"def clustersWithout0(self):\n clusters = [] # liste de clusters (individu)\n temp_list = [] # liste temporaire contenant un seul cluster\n\n for i in self.individual: # pour chaque élément dans l'individu\n if i != 0: # si l'élément est différent de 0\n temp_list.append(i) # met cet élément dans la temp_list\n else:\n if temp_list: # sinon si temp_list n'est pas vide (différent d'une liste vide)\n clusters.append(temp_list) # ajoute les éléments de temp_list à la liste de clusters\n temp_list = [] # vide temp_list\n if temp_list: # si temp_list existe, ajoute temps_list si on n'a pas rencontré de 0 dans la boucle\n clusters.append(temp_list)\n return clusters",
"def barycentric_subdivision(self):\n return self.face_poset().order_complex()",
"def zero_overlap(self):\n for i in range(self.n_qubits):\n m = tn.Node(np.array([1,0]))\n self.out_edge(i) ^ m[0]\n self.nodes[i] = self.nodes[i] @ m\n \n single = self.nodes[0]\n for n in self.nodes[1:]:\n single = single @ n\n\n self.nodes = None\n \n return single.tensor",
"def vertices_no_cut (self, x):\n \n vnc = []\n for block in range(self.k):\n vnc.append([])\n for i in [i for i in range(self.n) if x[i]==block]:\n no_edge_in_cut = True\n for block2 in range(self.k):\n if block != block2:\n for j in [j for j in range(self.n) if x[j]==block2]:\n if j in self.G[i]:\n no_edge_in_cut = False\n if no_edge_in_cut:\n vnc[block].append(i)\n \n return vnc",
"def zero_indegrees(self) -> List[T]:\n return [v for v in self.vertices if self.indegrees[v] == 0]",
"def zero_crossings(data):\n pos = data > 0\n npos = ~pos\n return ((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0]",
"def blank_solution():\n return BridgeFactory.solution_from_indices(lambda _1, _2: 0.0)",
"def _negative_fundamental_discriminants(self) :\n try :\n return self.__negative_fundamental_discriminants\n except AttributeError :\n self.__negative_fundamental_discriminants = \\\n siegelmodularformg2_misc_cython.negative_fundamental_discriminants(\n self.__precision.discriminant() )\n\n return self.__negative_fundamental_discriminants",
"def remove_self_loops(self):\n not_self_loops = self.connection_graph[0][0] != self.connection_graph[0][1]\n return Graph(self.take_connections(not_self_loops))",
"def __find_all_roots(self) -> list:\n return [n for n, d in self.G.out_degree() if d == 0]",
"def super_categories(self):\n return [BasesOfQSymOrNCSF(self.base()).Commutative()]",
"def subvoxel(self):\n return (not self.valid()) or self.volume() < 1",
"def empty(self):\n return _core.VectorXiVec_empty(self)",
"def getDivisors(self):\n return self.__divisors",
"def nullifzero(self) -> NumericValue:\n return ops.NullIfZero(self).to_expr()",
"def count_zero_vertices(self):\n return sum(1 for V in self.V if V.degree == 0)",
"def get_tree_without_self_without_material(self):\n return self.get_descendants(include_self=False)",
"def zero_crossings(x):\n\tzero_cross = np.where(np.diff(np.signbit(x)))[0]\n\n\treturn zero_cross.size",
"def suppr0(liste):\r\n return [n for n in liste if n!=0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the full subcategory of the division objects of ``self``. A ring satisfies the division axiom if all nonzero elements have multiplicative inverses.
|
def Division(self):
return self._with_axiom('Division')
|
[
"def barycentric_subdivision(self):\n return self.face_poset().order_complex()",
"def divide(self, frac):\n # by default, the element is indivisible\n return [self]",
"def getDivisors(self):\n return self.__divisors",
"def __div__(self, other):\n tccd = []\n if isinstance(other, MCCD):\n for ccd,occd in zip(self._data,other._data):\n tccd.append(ccd / occd)\n else:\n for ccd in self._data:\n tccd.append(ccd / other)\n return MCCD(tccd, self.head)",
"def _direct_subclasses(self):\n return self._directly_connected(rdflib.RDFS.subPropertyOf,\n inverse=True, blacklist=BLACKLIST)",
"def __div__(self, right):\n if isinstance(right, LSeriesAbstract):\n return LSeriesProduct([(self, 1), (right, -1)])\n elif isinstance(right, LSeriesProduct):\n return LSeriesProduct(Factorization([(self, 1)]) / right._factorization)\n raise TypeError",
"def faceDiv(self):\n if getattr(self, '_faceDiv', None) is None:\n # Get the stencil of +1, -1's\n D = self._faceDivStencil\n # Compute areas of cell faces & volumes\n S = self.area\n V = self.vol\n self._faceDiv = sdiag(1/V)*D*sdiag(S)\n return self._faceDiv",
"def divisions(self):\n\n from .placeholder_division import PlaceholderDivision\n \n placeholder = None\n for item in self.__parts_and_divisions:\n if item.tag == 'part':\n if not placeholder:\n placeholder = PlaceholderDivision()\n placeholder.parts.append(item)\n else:\n if placeholder:\n yield placeholder\n placeholder = None\n yield item\n if placeholder:\n yield placeholder",
"def __div__(self, other):\n twins = []\n OK = self.good\n if isinstance(other, CCD):\n OK = OK and other.good\n for win,owin in zip(self._data,other._data):\n twins.append(win / owin)\n else:\n other.good = True\n for win in self._data:\n twins.append(win / other)\n return CCD(twins, self.time, self.nxmax, self.nymax, OK, self.head)",
"def super_categories(self):\n return [BasesOfQSymOrNCSF(self.base()).Commutative()]",
"def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorSC___div__(self, *args)",
"def reduce(self):\n\n if self.parent().has_reduce_hom() and self.is_homogeneous():\n return self.parent().homogeneous_space(self._weight, self._ep)(self._rat)\n else:\n return self",
"def divisions():\n pass",
"def NoZeroDivisors(self):\n return self._with_axiom('NoZeroDivisors')",
"def divisionsvocab(context):\n path = {'query':('/'), }\n brains = context.portal_catalog.searchResults(path=path, Type='Division', sort_on='sortable_title')\n terms = [SimpleTerm(x.getId, title=x.Title) for x in brains]\n return SimpleVocabulary(terms)",
"def test_subcategories_infinite_recurse(self):\n site = self.get_site('test2')\n cat = pywikibot.Category(site, 'Categories')\n big = pywikibot.Category(site, 'Really big category')\n result = list(cat.subcategories(recurse=3))\n self.assertEqual(result.count(cat), 2)\n self.assertEqual(result.count(big), 4)\n # check that the result is balanced\n self.assertEqual(result[:4].count(cat), 1)\n self.assertEqual(result[:4].count(big), 2)\n\n for member in set(result):\n self.assertIsInstance(member, pywikibot.Category)",
"def __floordiv__(self, other):\n return self.componentwise(other, operator.__floordiv__)",
"def doActionSetSubsumption(self):\r\n \r\n pop = self\r\n while pop.parentSet!=None:\r\n pop=pop.parentSet\r\n \r\n subsumer = None\r\n for cl in self.clSet:\r\n if cl.isSubsumer():\r\n if subsumer==None or cl.isMoreGeneral(subsumer):\r\n subsumer=cl\r\n\r\n #If a subsumer was found, subsume all more specific classifiers in the action set\r\n if subsumer!=None:\r\n i=0\r\n \t while i<self.getSize():\r\n if subsumer.isMoreGeneral(self.clSet[i]):\r\n \t\t num = self.clSet[i].getNumerosity()\r\n \t\t subsumer.addNumerosity(num)\r\n \t\t self.clSet[i].addNumerosity((-1)*num)\r\n \t\t pop.removeClassifier(self.clSet[i])\r\n \t\t self.removeClassifier(i)\r\n \t\t i = i - 1\r\n i = i + 1",
"def __sub__(self, other):\n if isinstance(other, Fraction):\n if self.denominator == other.denominator:\n new_numerator = self.numerator - other.numerator\n return Fraction(new_numerator, self.denominator)\n\n new_numerator = (other.denominator * self.numerator) - (self.denominator * other.numerator)\n new_denominator = self.denominator * other.denominator\n return Fraction(new_numerator, new_denominator)\n\n raise TypeError('You can only subtract objects from same class')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return ``True`` if this is the zero ring.
|
def is_zero(self):
return self.one() == self.zero()
|
[
"def is_zero(self):\n return self == self.number_field().ideal(0)",
"def is_zero(self):\n if not self.monomials and not self.coeffs:\n return True\n else:\n return False",
"def __bool__(self):\n return self.zero.defined and self.zero.value == 0.0",
"def iszero(self):\n return all((v == 0 for v in self.b))",
"def isZero(X:cpuByte)->bool:\r\n for position in range (cpuByte._size-1):\r\n if X._state[position]:\r\n return False # we've found a single bit where X deviates from 0\r\n return True",
"def has_zero(self):\n if self.min() <= 0 and self.max() >= 0:\n return True\n else:\n return False",
"def zero(self):\n for row in range(self._height): \n for col in range(self._width):\n if (self._val[row][col] == 0):\n return True\n return False",
"def isZeroPoly(p):\n\tif len(p) > 1:\n\t\treturn False\n\telse:\n\t\treturn p[0] == 0",
"def is_empty(self):\n return self.oil_meter[0] <= 0",
"def _check_shape_contain_zero(shp):\n if isinstance(shp, int):\n return shp == 0\n return F.shape_mul(shp) == 0",
"def is_zero(d):\n return 0.0 == d",
"def is_finite(self):\n R = self.base_ring()\n if R.is_finite() and R.order() == 1:\n return True\n return False",
"def is_empty(self):\n # Note that empty node must also be terminal node\n return self.is_terminal() and \\\n self.name == Symbol.EMPTY_SYMBOL_NAME",
"def northern(self):\n return (self.latitude >= 0)",
"def is_root(self):\n return self.node_type() == 0",
"def is_bare (self):\n # If there is no VNF\n if len([v for v in self.nfs]) == 0:\n fr_sum = sum([sum(1 for fr in i.ports.flowrules) for i in self.infras])\n # And there is no flowrule in the ports\n if fr_sum == 0:\n sg_sum = len([sg for sg in self.sg_hops])\n # And there is not SG hop\n if sg_sum == 0:\n return True\n return False",
"def empty(self):\n return self.tower is None",
"def _is_empty(self):\n return self.signal_list == []",
"def is_empty( self ):\n\t\treturn not self.guard.is_connected()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the Lie bracket `[x, y] = x y y x` of `x` and `y`.
|
def bracket(self, x, y):
return x*y - y*x
|
[
"def _coords(self, x, y):\n return y, x * 2",
"def x_and_y_to_xy(x, y):\n return flatten(zip(x,y))",
"def polygon_under_graph(x, y):\n return [(x[0], 0.), *zip(x, y), (x[-1], 0.)]",
"def stack_coordinates(\n x: np.ndarray,\n y: np.ndarray\n) -> np.ndarray:\n return np.vstack((x, y)).T",
"def LCS(X,Y) :\n\tif len(X) == 0 or len(Y) == 0 :\n\t\treturn [\"\"]\n\t#R = C = 0\n\txLen = len(X)\n\tyLen = len(Y)\n\trow1 = [['']]*(xLen+1)\n\trow2 = [['']]*(xLen+1)\n\ti = j = 0\n\tfor i in xrange(yLen) :\n\t\trow1 = row2\n\t\trow2 = [['']]*(xLen+1)\n\t\t#print row1,row2\n\t\tfor j in xrange(xLen) :\n\t\t\t#print i,j, sum(len(k) for k in row1)\n\t\t\t#print '\\n',[(x,eval(x)) for x in ['i','j','X[j]','Y[i]']]\n\t\t\tif X[j] != Y[i] :\n\t\t\t\trow2[j] = __longest__(row1[j],row2[j-1])\n\t\t\telse :\n\t\t\t\trow2[j] = __append__(row1[j],Y[i])\n\t\t\t#print [(x,eval(x)) for x in ['i','j','X[j]','Y[i]','row2[j]','row1[j]','row1','row2']]\n\t\t#print row2\n\treturn row2[xLen-1]",
"def graph_point(self, x, y):\n \n return (self.graph_x(x), self.graph_y(y))",
"def split(self, x=None, y=None): ###\n b = None\n if x == None and y == None:\n if self.internal == None:\n x = 0.5\n else:\n x = 0.5 * (self.internal.left + self.internal.right)\n if x != None and y != None:\n return tuple([b.split(x=x) for b in self.split(y=y)])\n if x != None:\n x = self.map(val=x, attr=\"x\")\n b = [self.copy(), self.copy()]\n b[0].right = x\n b[1].left = x\n if y != None:\n y = self.map(val=y, attr=\"y\")\n b = [self.copy(), self.copy()]\n b[0].bottom = y\n b[1].top = y\n if b != None:\n b = tuple(b)\n return b",
"def get_sol_coordinates():\n return 0, 0",
"def split_x_y(mylist, x, y):\r\n return",
"def cartesian_to_list(cartesian_coordinate):\n x, y = cartesian_coordinate\n\n return y, x",
"def location(s, (x,y)):\n\t\treturn s.matrix[x][y]",
"def tuple(self):\n return self.start.coordinates[0], self.start.coordinates[1], self.end.coordinates[0], self.end.coordinates[1]",
"def formLines(x,y):\r\n m = []\r\n c = []\r\n mpx = []\r\n mpy = []\r\n for i in range(len(x)):\r\n for j in range(i+1,len(y)):\r\n if (x[j]-x[i]) == 0:\r\n slope = 'inf'\r\n C = x[i]\r\n else:\r\n slope = (y[j]-y[i])/(x[j]-x[i])\r\n C = y[i] - (x[i]*(slope))\r\n m.append(slope)\r\n c.append(C)\r\n mx = (x[i] + x[j])/2\r\n my = (y[i] + y[j])/2\r\n mpx.append(mx)\r\n mpy.append(my)\r\n return m,c,mpx,mpy",
"def get_adjacent(x, y):\n return [(x + 1, y), (x + 1, y + 1), (x + 1, y - 1),\n (x, y - 1), (x, y + 1), (x - 1, y),\n (x - 1, y + 1), (x - 1, y - 1)]",
"def model_pos_to_view_pos(y, x):\n return y + 1, 3 + 2 * x",
"def int_pair(self):\n return tuple([int(self.x), int(self.y)])",
"def _yxgrid_to_table(y, x) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n iy = np.repeat(np.arange(y.shape[0]), y.shape[1])\n ix = np.hstack([np.arange(x.shape[1])] * x.shape[0])\n yv = np.ravel(y)\n xv = np.ravel(x)\n return iy, ix, yv, xv",
"def get2(self, *args) -> \"SbVec2f const &\":\n return _coin.SoMultiTextureCoordinateElement_get2(self, *args)",
"def pairs(\n x_coordinates: Iterable[float], y_coordinates: Iterable[float]\n) -> tuple[tuple[float, float], ...]:\n pairs = tuple(zip(x_coordinates, y_coordinates))\n return pairs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Returns the homset from ``self`` to ``Y`` in the category ``category``
|
def _Hom_(self, Y, category):
if category is not None and not category.is_subcategory(Rings()):
raise TypeError("%s is not a subcategory of Rings()"%category)
if Y not in Rings():
raise TypeError("%s is not a ring"%Y)
from sage.rings.homset import RingHomset
return RingHomset(self, Y, category = category)
|
[
"def head_category_set(self) -> FrozenSet[Category]:\n return self._subcategory_sets[self._head_index]",
"def subSetClasse(labeledSet):\n res_plus = ls.LabeledSet(labeledSet.getInputDimension())\n res_moins = ls.LabeledSet(labeledSet.getInputDimension())\n for i in range(labeledSet.size()):\n if labeledSet.getY(i) == 1:\n res_plus.addExample(labeledSet.getX(i), 1)\n else:\n res_moins.addExample(labeledSet.getX(i), -1) \n return res_plus, res_moins",
"def get_all_categories(self, pobject):\r\n return list(set(topic.help_category for topic in self.all()))",
"def categorie_c2g(self):\n return self.__categorie_c2g",
"def class_subset(X, y, c):\n\tsubset = X[y == c,]\t\n\treturn(subset)",
"def MajPred( self , y ) :\r\n \r\n import numpy as np\r\n \r\n if y is None or self._L is None : return\r\n # check for indexing mismatch? \r\n try : y[self._L] \r\n except Exception as e :\r\n raise ValueError( 'passed y cannot be indexed by CatTreeNode._L (%s)' % e )\r\n \r\n # search through unique items in y[L], getting counts and majority element. \r\n # implementation differs for lists and for numpy.ndarrays\r\n \r\n self._c = {} # empty dictionary for counts\r\n self._C = 0 \r\n if isinstance( y , list ) : \r\n u = set( y[self._L] )\r\n for i in u :\r\n self._c[i] = y[self._L].count(i)\r\n if self._c[i] > self._C : \r\n self._y = i\r\n self._C = self._c[i]\r\n elif isinstance( y , np.ndarray ) : \r\n u = np.unique( y[self._L] )\r\n for i in u :\r\n self._c[i] = len( np.where( y[self._L] == i ) )\r\n if self._c[i] > self._C : \r\n self._y = i\r\n self._C = self._c[i]\r\n else : \r\n raise ValueError( 'y is not a comprehensible object here (list, numpy.ndarray)' )\r\n \r\n # now, self._y is set as a majority predictor, unique item counts are set in self._c, \r\n # and we can (re)set self._C as the total coverage\r\n self._C = len( self._L )\r\n \r\n # set error for this majority prediction... note using np.nditer\r\n self._e = sum( 1 if y[i] != self._y else 0 for i in self._L ) # np.nditer(self._L) )\r\n \r\n # return error count\r\n return self._e",
"def subSetClasseAmeliore(labeledSet):\n res_plus = ls.LabeledSet(labeledSet.getInputDimension())\n res_moins = ls.LabeledSet(labeledSet.getInputDimension())\n ind_plus = np.where(labeledSet.y == 1)[0]\n ind_moins = np.where(labeledSet.y == -1)[0]\n res_plus.x = labeledSet.x[ind_plus, :]\n res_plus.y = labeledSet.y[ind_plus, :]\n res_moins.x = labeledSet.x[ind_moins, :]\n res_moins.y = labeledSet.y[ind_moins, :]\n res_plus.nb_examples = ind_plus.size\n res_moins.nb_examples = ind_moins.size\n return res_plus, res_moins",
"def get_category_specifics(self):\n results = False\n attribute = False\n shop_obj = self.env['sale.shop']\n connection_obj = self.env['ebayerp.osv']\n attribute_obj = self.env['product.attribute']\n attribute_val_obj = self.env['product.attribute.value']\n\n if self:\n if isinstance(self, int):\n ids = [self._ids]\n # if isinstance(self, long ):\n # ids = [ids]\n attr_set_obj = self\n siteid = attr_set_obj.shop_id.instance_id.site_id.site\n category_code = attr_set_obj.code\n if category_code:\n search_ebay_true = [attr_set_obj.shop_id.id]\n if search_ebay_true:\n leafcategory = ''\n inst_lnk = shop_obj.browse(search_ebay_true[0]).instance_id\n app_id = inst_lnk.app_id\n if inst_lnk.sandbox:\n server_url = \"http://open.api.sandbox.ebay.com/\"\n else:\n server_url = \"http://open.api.ebay.com/\"\n if app_id and server_url and siteid and category_code:\n concate_url = \"\"\" %sshopping?callname=GetCategoryInfo&appid=%s&siteid=%s&CategoryID=%s&version=743&responseencoding=XML\"\"\" % (\n server_url, app_id, siteid, category_code)\n try:\n urlopen = urllib.request.urlopen(concate_url)\n except Exception as e:\n urlopen = ''\n if urlopen:\n mystring = urlopen.read()\n if mystring:\n response = parseString(mystring)\n if response:\n if response.getElementsByTagName('Ack'):\n if response.getElementsByTagName('Ack')[0].childNodes[0].data == 'Success':\n if response.getElementsByTagName('LeafCategory'):\n leafcategory = \\\n response.getElementsByTagName('LeafCategory')[0].childNodes[0].data\n if leafcategory == 'false':\n raise Warning(_(\"Category is not a Leaf Category\"))\n elif leafcategory == 'true':\n leafcategory = 'true'\n else:\n raise Warning(_(\"Category is Invalid on Current Site\"))\n elif response.getElementsByTagName('Ack')[0].childNodes[0].data == 'Failure':\n long_message = response.getElementsByTagName('LongMessage')[0].childNodes[\n 0].data\n raise Warning(_(\"%s\") % long_message)\n if leafcategory == 'true':\n results = connection_obj.call(inst_lnk, 'GetCategorySpecifics', category_code, siteid)\n for item in results:\n search_id = attribute_obj.search(\n [('attr_set_id', '=', self[0].id), ('attribute_code', '=', item)])\n if not search_id:\n var = True\n if results[item]:\n if results[item][0] == 'novariation':\n var = False\n att_id = attribute_obj.create({'attr_set_id': self[0].id, 'name': item.encode(\"utf-8\"),\n 'attribute_code': item.encode(\"utf-8\"),\n 'variation_enabled': var})\n if len(results[item]):\n for val in results[item]:\n att_val_id = attribute_val_obj.create(\n {'attribute_id': att_id.id, 'name': val, 'value': val})\n\n return True",
"def populate_categories(self):\n g = self.g\n self.grammaticalCategory = Class(kgl.GrammaticalCategory, graph=g)\n self.pos = Class(kgl['POS'], graph=g)\n\n category_dict = {}\n\n self.categories = category_dict\n\n for row in wikidata_grammatical_categories.iterrows():\n label = row[1]['entityLabel.value']\n wikidata_identifier = row[1]['entity.value']\n grammatical_form = self.hash(label, \"grammatical_category\")\n category_dict[label] = kgl[grammatical_form]\n g.add((kgl[grammatical_form], rdfs_label, Literal(label))) \n g.add((kgl[grammatical_form], sameAs, URIRef(wikidata_identifier)))\n \n\n # Wikidata is a horrible mess\n # Apparently some of the most beefy categories are not (in)direct\n # subclasses of \"grammatical categories\".\n # Unfortunately, the same applies with POS - which contain more cruft\n # than anything else.\n\n lexinfo_pos = {'verb', 'adposition', 'adjective', 'adverb', 'noun',\n 'determiner', 'article', 'particle', 'pronoun',\n 'symbol', 'suffix'}\n \n other_pos = {\"conjunction\", \"preposition\", \"postposition\", \"proverb\",\n \"prefix\", \"affix\", \"letter\", \"punctuation\", \"interjection\",\n \"propername\", 'phrase'}\n \n self.all_pos = other_pos.union(lexinfo_pos)\n\n for pos in lexinfo_pos:\n g.add((kgl[pos], sameAs, lexinfo[pos]))\n \n for pos in self.all_pos:\n g.add((kgl[pos], rdf_type, self.pos.identifier))\n\n\n extra_noun_categories = [\"countable\", \"uncountable\", \"irregular\",\n \"usually uncountable\", \"unattested plural\",\n \"uncertain plural\"]\n\n extra_verb_categories = [\"defective\"]\n\n extra_adjective_categories = [\"positive\", \"comparative\", \"superlative\",\n \"not comparable\", \"comparable-only\",\n \"generally not comparable\"]\n\n wikidata_grammatical_categories_list = \\\n wikidata_grammatical_categories['entityLabel.value'].to_list()\n\n for cat in extra_noun_categories + extra_verb_categories + \\\n extra_adjective_categories + \\\n wikidata_grammatical_categories_list:\n cat_id = self.add_category(cat)\n category_dict[cat] = cat_id",
"def _Hom_(self, codomain, cat=None):\n\n from .number_field import is_NumberFieldHomsetCodomain\n if is_NumberFieldHomsetCodomain(codomain):\n from . import morphism\n return morphism.RelativeNumberFieldHomset(self, codomain)\n else:\n raise TypeError",
"def my_category(self, cat):\n categories = Category.objects.all(name=cat)\n return categories",
"def getCatSet(self, word):\n cs = set()\n for c in self.catDict:\n if word in self.catDict[c][1]:\n if c not in cs:\n cs.add(c)\n return cs",
"def _prepare_category(self):\n if os.path.exists(cfg.GQA.CATEGORIES_FILE):\n print('load category file from %s' % (cfg.GQA.CATEGORIES_FILE))\n return pickle.load(open(cfg.GQA.CATEGORIES_FILE, 'rb'))\n elif 'train' not in self.name and not os.path.exists(cfg.GQA.CATEGORIES_FILE):\n assert False, \"File %s does not exist.\" % cfg.GQA.CATEGORIES_FILE\n categories = set()\n attributes = set()\n for image_id, scene_graph in self.scenes.items():\n for object_ in scene_graph['objects'].values():\n categories.add(object_['name'])\n attributes.update(object_['attributes'])\n if not os.path.exists(cfg.GQA.CATEGORIES_FILE):\n print('write category file into %s' % (cfg.GQA.CATEGORIES_FILE))\n pickle.dump([list(categories), list(attributes)], open(cfg.GQA.CATEGORIES_FILE, 'wb'))\n return list(categories), list(attributes)",
"def category (self):\n return self.__category",
"def get_categories(self):\n product_categories = {\n \"tsne_x\": self.x,\n \"tsne_y\": self.y,\n \"product\": self.labels,\n \"category_label\": self.kmeans.predict(np.column_stack((self.x, self.y))),\n \"tmp_sort\": self.labels,\n }\n product_categories = pd.DataFrame(data=product_categories)\n product_categories[\"tmp_sort\"] = product_categories[\"tmp_sort\"].astype(float)\n product_categories = product_categories.sort_values(by=\"tmp_sort\")\n del product_categories[\"tmp_sort\"]\n return product_categories",
"def get_all_category(self):\n categories = Category.objects.all()\n return categories",
"def _point_homset(self, *args, **kwds):\n from sage.schemes.affine.affine_homset import SchemeHomset_points_spec\n return SchemeHomset_points_spec(*args, **kwds)",
"def submenu(self, category):\n return [i for i in self if category in i['category']]",
"def y_set(y):\n set_ = set()\n for x in range(len(table)):\n if table[x][y] != 0:\n set_.add(table[x][y])\n\n return set_"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The monoid of the ideals of this ring.
|
def ideal_monoid(self):
try:
from sage.rings.ideal_monoid import IdealMonoid
return IdealMonoid(self)
except TypeError:
from sage.rings.noncommutative_ideals import IdealMonoid_nc
return IdealMonoid_nc(self)
|
[
"def identity_morphism(self):\n return KenzoChainComplexMorphism(__idnt_mrph__(self._kenzo))",
"def one_from_one_basis(self):\n return self.monomial(self.one_basis()) #.",
"def binomial(self):\n return self._binomial",
"def identity_morphism(self):\n from sage.schemes.generic.morphism import SchemeMorphism_id\n return SchemeMorphism_id(self)",
"def as_ring_element(self):\n\n return self.parent().graded_ring()(self._rat)",
"def getRing(self):\n return PadicIntegerRing.getInstance(self.p)",
"def coordinate_ring(self):\n try:\n return self._coordinate_ring\n except AttributeError:\n raise ValueError(\"This scheme has no associated coordinated ring (defined).\")",
"def base_ring(self):\n return (self.functions()[0]).base_ring()",
"def monoid_generators(self):\n G = self.group_generators()\n from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets\n if G not in FiniteEnumeratedSets():\n raise NotImplementedError(\"currently only implemented for finitely generated groups\")\n from sage.sets.family import Family\n return Family(tuple(G) + tuple(~x for x in G))",
"def bottom_area(self):\n\t\treturn PI * power(self.r, 2)",
"def getPNJunction(self):\n return self.pn_junction",
"def dual_basis(self):\n return self.realization_of().dual().w()",
"def get_son(self):\n return self.left",
"def semi_perimeter(self):\n return (self.side_a + self.side_b + self.side_c) / 2.0",
"def base_morphism(self):\n try:\n return self._base_morphism\n except AttributeError:\n from sage.categories.schemes import Schemes\n from sage.schemes.generic.spec import SpecZ\n SCH = Schemes()\n if hasattr(self, '_base_scheme'):\n self._base_morphism = self.Hom(self._base_scheme, category=SCH).natural_map()\n elif hasattr(self, '_base_ring'):\n self._base_morphism = self.Hom(AffineScheme(self._base_ring), category=SCH).natural_map()\n else:\n self._base_morphism = self.Hom(SpecZ, category=SCH).natural_map()\n return self._base_morphism",
"def R_oo(self):\n return self.Roo",
"def dual(self):\n return self._dual",
"def get_semiperimeter(self):\n return (self.left_side + self.right_side + self.bottom_side) / 2",
"def inner(self):\n\n inner_array = nd.morphology.binary_erosion(self.bitmap)\n return Region(inner_array)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the characteristic of this ring.
|
def characteristic(self):
from sage.rings.infinity import infinity
from sage.rings.integer_ring import ZZ
order_1 = self.one().additive_order()
return ZZ.zero() if order_1 is infinity else order_1
|
[
"def characteristic_polynomial(self):\n # TODO\n pass",
"def risk_characteristic(self):\n return self._risk_characteristic",
"def get_characteristic(self, uuid):\r\n for char in self.characteristics:\r\n if char.get_uuid() == uuid:\r\n return char\r\n\r\n return None",
"def getPart(self) -> \"int\":\n return _coin.SoConeDetail_getPart(self)",
"def characteristics(self):\n if not hasattr(self, '_characteristics'):\n self._characteristics = ()\n return self._characteristics",
"def get_strength(self):\n return self.__strength",
"def getPart(self) -> \"int\":\n return _coin.SoCylinderDetail_getPart(self)",
"def value(self) -> \"char\":\n return _coin.charp_value(self)",
"def get_flags(self):\r\n return self.properties.Get(BT_CHARACTERISTIC_IFACE, 'Flags')",
"def read_characteristic(self, handle):\n try:\n _LOGGER.debug(\"Reading %s\", handle)\n return self._conn.readCharacteristic(handle)\n except btle.BTLEException as ex:\n _LOGGER.error(\"Got exception from bluepy while making a request: %s\", ex)\n raise ex",
"def gattc_read_characteristic(\n self,\n characteristic: Union[_CharacteristicHandle, _CharacteristicTuple],\n /,\n ) -> bytes:\n ...",
"def getEnergy(self) -> float:\n ...",
"def value(self):\n return super(CompositeOutputDevice, self).value",
"def charge(self):\n return self.__charge",
"def getMeterReading(self):\n return self._MeterReading",
"def get_power(self):\r\n return self._power",
"def getData(self) -> \"uint16_t\":\n return _coin.SoType_getData(self)",
"def state(self):\n return self._data.get('temperature')",
"def get_reg_ctrl_hum(self):\n return self.ho & 0x07"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Quotient of a ring by a twosided ideal.
|
def quotient_ring(self, I, names=None):
return self.quotient(I,names=names)
|
[
"def discount(t,r):\r\n return (1+r)**(-t)",
"def _radius_eq23(th, th1):\n return np.sin(th1)/np.sin(th+th1)",
"def quotient_by_principal_ideal(self, f, names=None):\n from sage.rings.ideal import Ideal\n I = Ideal(f)\n if I.is_zero():\n return self\n f = I.gen()\n from sage.rings.polynomial.polynomial_quotient_ring import PolynomialQuotientRing\n return PolynomialQuotientRing(self, f, names)",
"def calculate_kelvin_term(particle_diameter, kelvin_diameter):\n\n return 10 ** (kelvin_diameter / particle_diameter)",
"def top_circumference(self):\n\t\treturn 2 * PI * self.r_2",
"def gRD(RD):\r\n q = 0.0057565\r\n pi = math.pi\r\n return 1 / math.sqrt(1 + 3 * q**2 * (RD**2)/(pi**2))",
"def bottom_circumference(self):\n\t\treturn 2 * PI * self.r_1",
"def std_rate_radius_m(tas_knots):\n # a standard rate turn is at 3 deg/sec, or a 1-minute to complete 180 degrees of turn\n turn_time_sec = 60.0 # seconds\n \n # compute the distance flown in the turn time (half circle)\n # meters nm/hr hr/sec sec m/nm\n distance_flown_m = tas_knots * (1.0/3600.0) * turn_time_sec * 1852.0\n \n # a half circle traces out half a circumference (2*pi*r/2) and is the same as the distance flown above\n return distance_flown_m / math.pi",
"def SpreadFactor(self): \n return 4.5",
"def get_front_wheel_radius():\n\t# I ran the following code\n\t# cozmo_drive_straight(robot, 3.14 * 2 * 50, 30)\n\t# and I counted 13 rotations of 120 deg (the wheels have three radial marks)\n\t# Thus, 13/3 rotations takes you pi * 2 * r * (13/3) = pi * 2 * 50 mm\n\t# so r = 50 * (3/13)\n\treturn (50 * 3) / 13",
"def deposit(tents):\n tent = (float(tents) * float(0.10))\n return round(tent, 2)",
"def denominator(self):\n try:\n return self._denom_ideal\n except AttributeError:\n pass\n self._denom_ideal = (self + self.number_field().unit_ideal())**(-1)\n return self._denom_ideal",
"def getCavityQ(self, double: float) -> float:\n ...",
"def circumference(self):\n\t\treturn 2 * PI * self.r",
"def _get_radius(self) -> \"double\" :\n return _core.Cylinder__get_radius(self)",
"def diffusion():\n return 5.1412512431",
"def final_amt(p, r, n, t):\r\n a = p*(1+r/n)**(n*t)\r\n return a",
"def _rate_of_spread(self):\n pass",
"def circle_ring(R,r):\n A = pi * (R**2 - r**2)\n return A"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extend this ring by one or several elements to create a polynomial ring, a power series ring, or an algebraic extension. This is a convenience method intended primarily for interactive use.
|
def __getitem__(self, arg):
def normalize_arg(arg):
if isinstance(arg, (tuple, list)):
# Allowing arbitrary iterables would create confusion, but we
# may want to support a few more.
return tuple(arg)
elif isinstance(arg, str):
return tuple(arg.split(','))
else:
return (arg,)
# 1. If arg is a list, try to return a power series ring.
if isinstance(arg, list):
if arg == []:
raise TypeError("power series rings must have at least one variable")
elif len(arg) == 1:
# R[["a,b"]], R[[(a,b)]]...
if isinstance(arg[0], list):
raise TypeError("expected R[...] or R[[...]], not R[[[...]]]")
elts = normalize_arg(arg[0])
else:
elts = normalize_arg(arg)
from sage.rings.power_series_ring import PowerSeriesRing
return PowerSeriesRing(self, elts)
if isinstance(arg, tuple):
from sage.categories.morphism import Morphism
if len(arg) == 2 and isinstance(arg[1], Morphism):
from sage.rings.polynomial.skew_polynomial_ring_constructor import SkewPolynomialRing
return SkewPolynomialRing(self, arg[1], names=arg[0])
# 2. Otherwise, if all specified elements are algebraic, try to
# return an algebraic extension
elts = normalize_arg(arg)
try:
minpolys = [a.minpoly() for a in elts]
except (AttributeError, NotImplementedError, ValueError, TypeError):
minpolys = None
if minpolys:
# how to pass in names?
# TODO: set up embeddings
names = tuple(_gen_names(elts))
try:
# Doing the extension all at once is best, if possible...
return self.extension(minpolys, names)
except (TypeError, ValueError):
# ...but we can also construct it iteratively
return reduce(lambda R, ext: R.extension(*ext), zip(minpolys, names), self)
# 2. Otherwise, try to return a polynomial ring
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
return PolynomialRing(self, elts)
|
[
"def base_extend(self, R):\n from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing\n\n if R.has_coerce_map_from(self.base_ring()):\n return PolynomialRing(R, names=self.variable_name(), sparse=self.is_sparse())\n else:\n raise TypeError(\"no such base extension\")",
"def polyAppend(texture=int, point=float, append=(), subdivision=int, name=\"string\", hole=bool, edge=int, constructionHistory=bool):\n pass",
"def base_extend(self, R):\n from sage.categories.commutative_rings import CommutativeRings\n if R in CommutativeRings():\n return AffineScheme(self.coordinate_ring().base_extend(R), self.base_ring())\n if not self.base_scheme() == R.base_scheme():\n raise ValueError('the new base scheme must be a scheme over the old base scheme')\n return AffineScheme(self.coordinate_ring().base_extend(new_base.coordinate_ring()),\n self.base_ring())",
"def extend(self, *args):\n return _wali.SemElem_extend(self, *args)",
"def pyextend(self, *args):\n #Deref smart pointer if needed\n if hasattr(args[1], '__deref__'):\n tmp = list(args)\n tmp[1] = tmp[1].__deref__()\n args = tuple(tmp)\n\n\n return _wali.PySemElem_pyextend(self, *args)",
"def extendBy(self, *args):\n return _coin.SbBox2d_extendBy(self, *args)",
"def extendCurve(extensionType=int, pointZ=\"string\", inputPoint=\"string\", distance=\"string\", curveOnSurface=bool, pointX=\"string\", object=bool, start=int, nodeState=int, range=bool, replaceOriginal=bool, name=\"string\", join=bool, pointY=\"string\", removeMultipleKnots=bool, caching=bool, constructionHistory=bool, extendMethod=int):\n pass",
"def _add_ring(self, ring, is_hole=False, **attrs):\r\n if (ring[0, :] == ring[-1, :]).all():\r\n # unclose\r\n ring = ring[:-1]\r\n for a in attrs:\r\n attrs[a] = attrs[a][:-1]\r\n if is_hole:\r\n # add a hole center - assuming hole is convex!\r\n self._holes = np.vstack((self._holes, ring.mean(axis=0)))\r\n segments = np.zeros((ring.shape[0], 2), dtype=np.int32)\r\n segments[:, 0] = np.arange(self._points.shape[0], self._points.shape[0] + ring.shape[0])\r\n segments[:-1, 1] = np.arange(self._points.shape[0] + 1, self._points.shape[0] + ring.shape[0])\r\n segments[-1, 1] = self._points.shape[0]\r\n self._ring_slices.append((self._points.shape[0], self._points.shape[0] + ring.shape[0]))\r\n self._points = np.vstack((self._points, ring))\r\n for a in attrs:\r\n self._attrs[a] = np.concatenate((self._attrs[a], attrs[a]))\r\n self._segments = np.vstack((self._segments, segments))",
"def polygens(base_ring, names=\"x\"):\n from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing\n return PolynomialRing(base_ring, names).gens()",
"def change_ring(self, R):\n from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing\n\n return PolynomialRing(R, names=self.variable_name(), sparse=self.is_sparse())",
"def extendBy(self, *args):\n return _coin.SbBox3d_extendBy(self, *args)",
"def extend(self, *args):\n return _wali.SemElemPtr_extend(self, *args)",
"def extend_variables(self, added_names, order = 'degrevlex'):\n from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing\n\n if isinstance(added_names, str):\n added_names = added_names.split(',')\n return PolynomialRing(self.base_ring(), names = self.variable_names() + tuple(added_names), order = order)",
"def extendSurface(surfacesurface, extensionType=int, distance=\"string\", extendDirection=int, object=bool, nodeState=int, replaceOriginal=bool, name=\"string\", extendSide=int, join=bool, caching=bool, constructionHistory=bool, extendMethod=int):\n pass",
"def extend(self, list):",
"def specify_ring_extensions(self, i, basename):\n cython.declare(grps=list, label_list=list, grp=Group, grpc=Group, atom_type=list, atom_type_str=str, k=AtomType,\n p=str)\n\n grps = []\n label_list = []\n\n grp = deepcopy(self)\n grpc = deepcopy(self)\n grp.atoms[i].props['inRing'] = True\n grpc.atoms[i].props['inRing'] = False\n\n atom_type = grp.atoms[i].atomtype\n\n if len(atom_type) > 1:\n atom_type_str = ''\n for k in atom_type:\n label_list.append(k.label)\n for p in sorted(label_list):\n atom_type_str += p\n else:\n atom_type_str = atom_type[0].label\n\n grps.append((grp, grpc, basename + '_' + str(i + 1) + atom_type_str + '-inRing', 'ringExt', (i,)))\n\n return grps",
"def extendBy(self, *args) -> \"void\":\n return _coin.SbBox2d_extendBy(self, *args)",
"def polyAppendVertex(texture=int, point=float, append=(), vertex=int, name=\"string\", hole=bool, constructionHistory=bool):\n pass",
"def extendBy(self, *args) -> \"void\":\n return _coin.SbBox3d_extendBy(self, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r""" Return whether this element is a unit in the ring.
|
def is_unit(self):
if self.is_one() or (-self).is_one():
return True
if self.is_zero(): # now 0 != 1
return False
raise NotImplementedError
|
[
"def has_unit(obj):\n return hasattr(obj, UNIT_ATTR_NAME)",
"def has_units(obj):\n return hasattr(obj, 'units')",
"def is_unit(ustr):\n ustr = backwards.bytes2unicode(ustr)\n if is_null_unit(ustr):\n return True\n try:\n as_unit(ustr)\n except ValueError:\n return False\n return True",
"def is_unitary(self) -> bool:\n return is_unitary(self.tensor(reshape_to_square_matrix=True))",
"def is_simple_unit(self, unit):\n if self.unit_denom or len(self.unit_numer) > 1:\n return False\n\n if not self.unit_numer:\n # Empty string historically means no unit\n return unit == ''\n\n return self.unit_numer[0] == unit",
"def is_in_unit_circle(self) -> bool:\n return (self.x**2 + self.y**2) <= 1",
"def check_attr_unit(self, element, attr, unit_list):\n if attr in element.attrib:\n unit = self.parse_length(element.get(attr), percent=True)[1]\n return unit in unit_list",
"def is_glozz_unit(anno):\n return isinstance(anno, educe.annotation.Unit)",
"def _is_units(units):\n # Must be a dict and all items must be a unit\n return (isinstance(units, dict)\n and not isinstance(units, MetaDict)\n and all(isinstance(units[key], u.UnitBase) for key in units))",
"def HasElementType(self) -> bool:",
"def is_S_unit(self,S):\n return self.prime_to_S_part(S).is_trivial()",
"def _get_areSymbolsForUnitDisplayed(self) -> \"bool\" :\n return _core.UnitAndValuePreferences__get_areSymbolsForUnitDisplayed(self)",
"def has_nutrient(self):\n return self._nutrient is not None",
"def verify_unit_name(self, name):\n return self.student.student_class.units.filter(name=name).exists()",
"def hasNutrient(self):\n return self.heldNutrient is not None",
"def is_unit_mine(self, unit: Unit) -> bool:\n return unit.owner == self.me",
"def is_canon(self):\n for data_key in self.units:\n mu = self.units[data_key]\n if mu.unit_string != mu.canonical_unit_string:\n return False\n\n return True",
"def is_us(self) -> bool:\n return self.area_code == \"0000\"",
"async def unit_exists(self, unit: str) -> bool:"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Metodo llamado al oprimir el boton de convertir a formato .CSV y TFRecord
|
def convertFiles(self):
try:
if self.ui.lineE4.text() != '' and self.ui.lineE5.text() != '':
if self._toCSV():
if(self._generarTFRecord()): #crear TENSORFLOW RECORD
print('TFRecord creados con exito')
else:
print('algo salio mal al crear TFRecord')
else:
print('algo salio mal al crear CSV')
else:
print('No se puede inciar')
except Exception as ex:
print(ex)
|
[
"def _saveCSV( self ):",
"def test_export_csv_to_file(self):\n pass",
"def _write_tfrecords_file(self, annotations, path_to_tfrecords):",
"def _generarTFRecord(self):\n try:\n argslist = []\n mydir = str(os.path.join(os.getcwd(), 'tools'))\n dirTF = str(os.path.dirname(self.ui.lineE4.text()))\n\n for set in self.sets:\n #arg1 = str(os.environ['ENV1']) \n arg1 = 'python'\n arg2 = 'generate_tfrecord.py'\n arg3 = '--csv_input={}/{}_labels.csv'.format(dirTF, set)\n arg4 = '--output_path={}/{}.record'.format(dirTF, set)\n arg5 = '--image_dir={}'.format(self.ui.lineE5.text())\n argslist = [arg1, arg2, arg3, arg4, arg5]\n subprocess.call(argslist, cwd=mydir) # run\n \n self.statusBar().showMessage(\"TFRecord creados correctamente en: {}\".format(\n \t\t\t\t\t\t\t\t\t\t\t\t\t\t\tos.path.dirname(self.ui.lineE4.text())))\n return True\n\n except Exception as ex:\n print(ex)\n self.statusBar().showMessage(\"Error al crear TF Record\")\n return False",
"def test_csv_and_tfrecord(self):\n\n # Test model training on CSV data\n data_dir = os.path.join(self.root_data_dir, \"csv\")\n feature_config_path = os.path.join(self.root_data_dir, \"config\", self.feature_config_fname)\n\n csv_loss, csv_mrr = self.run_default_pipeline(\n data_dir=data_dir, data_format=\"csv\", feature_config_path=feature_config_path\n )\n\n # Check if the loss and accuracy on the test set is the same\n assert np.isclose(csv_loss, 0.56748, rtol=0.01)\n assert np.isclose(csv_mrr, 0.70396, rtol=0.01)\n\n # Test model training on TFRecord SequenceExample data\n data_dir = os.path.join(self.root_data_dir, \"tfrecord\")\n feature_config_path = os.path.join(self.root_data_dir, \"config\", self.feature_config_fname)\n\n tfrecord_loss, tfrecord_mrr = self.run_default_pipeline(\n data_dir=data_dir, data_format=\"tfrecord\", feature_config_path=feature_config_path\n )\n\n # Check if the loss and accuracy on the test set is the same\n assert np.isclose(tfrecord_loss, 0.56748, rtol=0.01)\n assert np.isclose(tfrecord_mrr, 0.70396, rtol=0.01)\n\n # Compare CSV and TFRecord loss and accuracies\n assert np.isclose(tfrecord_loss, csv_loss, rtol=0.01)\n assert np.isclose(tfrecord_mrr, csv_mrr, rtol=0.01)",
"def updateIntoCsv(self,filename,where):\n\t\tpass",
"def convert(file_uploaded):\r\n\r\n # Open Input CSV File\r\n input_file = open(file_uploaded, mode='r')\r\n csv_file = csv.DictReader(input_file)\r\n\r\n # Remove Existing File\r\n if os.path.exists(OUTPUT_FILE):\r\n os.remove(OUTPUT_FILE)\r\n\r\n # Open Output file\r\n output = open(OUTPUT_FILE, 'w')\r\n\r\n # Write Header\r\n output.write(\"date;paymode;info;payee;memo;amount;category;tags\\n\")\r\n\r\n # Parse out the Bank Statment\r\n for row in csv_file:\r\n # Detect Bank\r\n if csv_file.fieldnames[0] != \"Posted Account\":\r\n boi_line_parser(row, output)\r\n else:\r\n aib_line_parser(row, output)\r\n\r\n # Clean Up\r\n output.close()\r\n input_file.close()",
"def convert_custom_csv_to_tsv(input, output_path, label_col, text_col, id_col=None, skip_header=True,\r\n output_format=DEFAULT_OUT_FORMAT):\r\n convert_custom_input_to_tsv(input, \",\", output_path, label_col, text_col, id_col=id_col, skip_header=skip_header,\r\n output_format=output_format)\r\n return None",
"def process_csv(self, file_name: str):",
"def convert2tfrecord(self, output_path=None, data_type=\"train\"):\n feature_mapper = self.feature_map\n\n def parsing_loop(in_queue=None, out_queue=None):\n \"\"\"\n function to be executed within each parsing process.\n\n Args:\n in_queue: the queue used to store avazu data records as strings.\n out_queue: the queue used to store serialized tf.Examples as strings.\n \"\"\"\n while True: # loop.\n raw_record = in_queue.get() # read from in_queue.\n logging.debug('parsing_loop raw_example:{}'.format(raw_record))\n if raw_record == \"DONE\":\n # We were done here.\n break\n features = {} # dict for all feature columns and target column.\n # parse the record according to proto definitions.\n values = raw_record.rstrip('\\n').split(',')\n if len(values) != len(self.field_names) + 1:\n continue\n features = {self.idx_to_field_name[idx]: self._int64_feature(feature_mapper[self.idx_to_field_name[idx]][value]) for idx, value in enumerate(values)\n if self.idx_to_field_name[idx] != 'click' and value in feature_mapper[self.idx_to_field_name[idx]]}\n feature_values = {self.idx_to_field_name[idx]+':weight': self._float_feature(1) for idx, value in enumerate(values)\n if self.idx_to_field_name[idx] != 'click' and value in feature_mapper[self.idx_to_field_name[idx]]}\n\n features.update(feature_values)\n features.update({'target': self._float_feature(float(values[1]))})\n logging.debug('parsing_loop features:{}'.format(features))\n logging.debug('parsing_loop feature_values:{}'.format(feature_values))\n\n # create an instance of tf.Example.\n example = tf.train.Example(features=tf.train.Features(feature=features))\n # serialize the tf.Example to string.\n raw_example = example.SerializeToString()\n\n # write the serialized tf.Example out.\n out_queue.put(raw_example)\n\n def writing_loop(out_queue, out_file):\n \"\"\"\n function to be executed within the single writing process.\n\n Args:\n out_queue: the queue used to store serialized tf.Examples as strings.\n out_file: string, path to the TFRecord file for transformed tf.Example protos.\n \"\"\"\n writer = tf.io.TFRecordWriter(out_file) # writer for the output TFRecord file.\n sample_count = 0\n while True:\n raw_example = out_queue.get() # read from out_queue.\n logging.debug('writing_loop raw_example:{}'.format(raw_example))\n if raw_example == \"DONE\":\n break\n writer.write(raw_example) # write it out.\n sample_count += 1\n if not sample_count % 1000:\n logging.info('%s Processed %d examples' % (datetime.now(), sample_count))\n sys.stdout.flush()\n writer.close() # close the writer.\n logging.info('%s >>>> Processed %d examples <<<<' % (datetime.now(), sample_count))\n self.sample_cnt = sample_count\n sys.stdout.flush()\n\n in_queue = Queue() # queue for raw gdt training data records.\n out_queue = Queue() # queue for serialized tf.Examples.\n # start parsing processes.\n num_parsers = int(multiprocessing.cpu_count() - 2)\n parsers = []\n for i in range(num_parsers):\n p = Process(target=parsing_loop, args=(in_queue, out_queue))\n parsers.append(p)\n p.start()\n\n # start writing process.\n writer = Process(target=writing_loop, args=(out_queue, output_path))\n writer.start()\n logging.info('%s >>>> BEGIN to feed input file %s <<<<' % (datetime.now(), self.path))\n # read a record in.\n with open(self.path) as f:\n f.readline()\n pbar = tqdm(f, mininterval=1, smoothing=0.1)\n pbar.set_description('reading avazu dataset')\n line_num = 0\n train_cnt = 0\n test_cnt = 0\n for line in pbar:\n if line_num == 0:\n line_num += 1\n continue\n if data_type == \"train\":\n if \"141030\" in line.rstrip('\\n').split(',')[2]:\n test_cnt += 1\n continue\n train_cnt += 1\n else:\n if \"141030\" not in line.rstrip('\\n').split(',')[2]:\n continue\n in_queue.put(line) # write to in_queue.\n self.train_cnt = train_cnt\n self.test_cnt = test_cnt\n # terminate and wait for all parsing processes.\n for i in range(num_parsers):\n in_queue.put(\"DONE\")\n for i in range(num_parsers):\n parsers[i].join()\n\n # terminate and wait for the writing process.\n out_queue.put(\"DONE\")\n writer.join()\n logging.info('%s >>>> END of consuming input file %s <<<<' % (datetime.now(), self.path))\n sys.stdout.flush()",
"def set_fugro_csv_file_format(logger):\n\n logger.file_format = \"Fugro-csv\"\n logger.file_timestamp_embedded = True\n logger.first_col_data = \"Timestamp\"\n logger.file_ext = \"csv\"\n logger.file_delimiter = \",\"\n logger.num_headers = 3\n logger.channel_header_row = 2\n logger.units_header_row = 3\n\n return logger",
"def test_export_csv_in_job(self):\n pass",
"def main():\n filepath = \"input.csv\"\n delim = \";\"\n\n if len(sys.argv) > 1:\n filepath = sys.argv[1]\n if len(sys.argv) > 2:\n delim = \";\"\n\n conversion(filepath, delim, \"output.json\")",
"def cli(csvfile, check, config, total, convert, profile):\n\n convertor = CSVledger(config, profile)\n\n if total:\n convertor.convert_file(csvfile)\n print(convertor.get_totals())\n else:\n print(convertor.convert_file(csvfile))",
"def csv_to_vw(loc_csv, loc_output, train=True):\n start = datetime.now()\n print(\"\\nTurning %s into %s. Is_train_set? %s\"%(loc_csv,loc_output,train))\n \n with open(loc_output,\"wb\") as outfile:\n for e, row in enumerate( DictReader(open(loc_csv)) ):\n\t\n\t #Creating the features\n numerical_features = \"\"\n categorical_features = \"\"\n for k,v in row.items():\n if k not in [\"Label\",\"Id\"]:\n if \"I\" in k: # numerical feature, example: I5\n if len(str(v)) > 0: #check for empty values\n numerical_features += \" %s:%s\" % (k,v)\n if \"C\" in k: # categorical feature, example: C2\n if len(str(v)) > 0:\n categorical_features += \" %s\" % v\n\t\t\t \n\t #Creating the labels\t\t \n if train: #we care about labels\n if row['Label'] == \"1\":\n label = 1\n else:\n label = -1 #we set negative label to -1\n outfile.write( \"%s '%s |i%s |c%s\\n\" % (label,row['Id'],numerical_features,categorical_features) )\n\t\t\n else: #we dont care about labels\n outfile.write( \"1 '%s |i%s |c%s\\n\" % (row['Id'],numerical_features,categorical_features) )\n \n\t #Reporting progress\n if e % 1000000 == 0:\n print(\"%s\\t%s\"%(e, str(datetime.now() - start)))\n\n print(\"\\n %s Task execution time:\\n\\t%s\"%(e, str(datetime.now() - start)))",
"def preprocess_tsv(line,\n field_delim='\\t',\n num_fields=2,\n inputs_format='{0}',\n targets_format='{1}',\n field_names=None,\n use_quote_delim=False):\n def _format_part_with_field_numbers(part, field_values):\n found = re.findall(r'{(\\d+)}', part)\n if found:\n return field_values[int(found[0])]\n else:\n return part\n\n def _format_part_with_field_names(part, field_names, field_values):\n field_names_re = '|'.join(['{{({})}}'.format(x) for x in field_names])\n found = re.findall(field_names_re, part)\n if found:\n pos = field_names.index(''.join(found[0]))\n return field_values[int(pos)]\n else:\n return part\n\n def _format(format_string, field_names, field_values):\n if field_names is None:\n parts = [\n _format_part_with_field_numbers(p, field_values)\n for p in re.split(r'({\\d+})', format_string)\n ]\n else:\n field_names_re = '(' + '|'.join(['{{{}}}'.format(x) for x in field_names\n ]) + ')'\n parts = [\n _format_part_with_field_names(p, field_names, field_values)\n for p in re.split(field_names_re, format_string)\n ]\n return tf.strings.join(parts)\n\n field_values = tf.io.decode_csv(\n line,\n record_defaults=[''] *\n (num_fields if field_names is None else len(field_names)),\n field_delim=field_delim,\n use_quote_delim=use_quote_delim)\n return {\n 'inputs': _format(inputs_format, field_names, field_values),\n 'targets': _format(targets_format, field_names, field_values)\n }",
"def on_toCsv_clicked(self):\n pth = self.path.text()\n print(pth)\n\n # Save data as .csv\n conn = sqlite3.connect('stock.db')\n stockModel = dm.StockModel(conn)\n stockModel.to_csv(pth)",
"def test_des_to_csv(self):\n self.f.des_to_csv(self.f3_in,self.f3_out,self.sep2)\n self.assertTrue(cmp(self.f3_out,self.f3_ref,\"File produced have changed\"))",
"def make_csv_coder(schema):\n raw_feature_spec = get_raw_feature_spec(schema)\n parsing_schema = schema_utils.schema_from_feature_spec(raw_feature_spec)\n return tft_coders.CsvCoder(CSV_COLUMN_NAMES, parsing_schema)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Metodo llamado al oprimir boton 'Generar TFRecord
|
def _generarTFRecord(self):
try:
argslist = []
mydir = str(os.path.join(os.getcwd(), 'tools'))
dirTF = str(os.path.dirname(self.ui.lineE4.text()))
for set in self.sets:
#arg1 = str(os.environ['ENV1'])
arg1 = 'python'
arg2 = 'generate_tfrecord.py'
arg3 = '--csv_input={}/{}_labels.csv'.format(dirTF, set)
arg4 = '--output_path={}/{}.record'.format(dirTF, set)
arg5 = '--image_dir={}'.format(self.ui.lineE5.text())
argslist = [arg1, arg2, arg3, arg4, arg5]
subprocess.call(argslist, cwd=mydir) # run
self.statusBar().showMessage("TFRecord creados correctamente en: {}".format(
os.path.dirname(self.ui.lineE4.text())))
return True
except Exception as ex:
print(ex)
self.statusBar().showMessage("Error al crear TF Record")
return False
|
[
"def make_TF_instance(TF_rec):\n tf = TFInstance(protein_accession=TF_rec.name,\n name=TF_rec.name,\n description=TF_rec.description)\n tf.save()",
"def create_tfrecord(task_name, split, processor, tokenizer, pad_for_eval=False):\n if task_name != FLAGS.task_name and task_name == \"diagnostic\":\n # a corner case\n data_dir = os.path.join(os.path.dirname(FLAGS.data_dir), task_name)\n output_dir = os.path.join(os.path.dirname(FLAGS.output_dir), task_name)\n else:\n data_dir = FLAGS.data_dir\n output_dir = FLAGS.output_dir\n if not tf.io.gfile.exists(output_dir):\n tf.io.gfile.makedirs(output_dir)\n\n # Path to the tfrecord & meta data\n tok_basename = os.path.basename(FLAGS.tokenizer_path)\n file_base = \"{}.len-{}.{}.tfrecord\".format(\n tok_basename, FLAGS.max_seq_length, split)\n file_path = os.path.join(output_dir, file_base)\n meta_path = file_path.replace(\"tfrecord\", \"meta.json\")\n\n if (FLAGS.overwrite_data or not tf.io.gfile.exists(file_path)\n or not tf.io.gfile.exists(meta_path)):\n # Load examples\n if split == \"train\":\n examples = processor.get_train_examples(data_dir)\n elif split == \"dev\":\n examples = processor.get_dev_examples(data_dir)\n elif split == \"test\":\n examples = processor.get_test_examples(data_dir)\n else:\n raise NotImplementedError\n\n num_real_examples = len(examples)\n if split == \"train\" and FLAGS.shuffle_examples:\n np.random.shuffle(examples)\n if pad_for_eval:\n while len(examples) % FLAGS.eval_batch_size != 0:\n examples.append(classifier_utils.PaddingInputExample())\n num_examples = len(examples)\n\n meta_dict = {\"num_real_examples\": num_real_examples,\n \"num_examples\": num_examples}\n with tf.io.gfile.GFile(meta_path, \"w\") as fp:\n json.dump(meta_dict, fp, indent=4)\n else:\n with tf.io.gfile.GFile(meta_path, \"r\") as fp:\n meta_dict = json.load(fp)\n num_examples = meta_dict[\"num_examples\"]\n num_real_examples = meta_dict[\"num_real_examples\"]\n\n tf.logging.info(\"Num of %s samples: %d real / %d total.\", split,\n num_real_examples, num_examples)\n\n if FLAGS.overwrite_data or not tf.io.gfile.exists(file_path):\n tokenize_fn = tokenizer.convert_text_to_ids\n label_list = processor.get_labels()\n if task_name == \"sts-b\":\n file_based_convert_examples_to_features(\n examples, None, FLAGS.max_seq_length, tokenize_fn,\n file_path, FLAGS.num_passes)\n else:\n file_based_convert_examples_to_features(\n examples, label_list, FLAGS.max_seq_length, tokenize_fn,\n file_path, FLAGS.num_passes)\n else:\n tf.logging.info(\"Do not overwrite existing tfrecord %s.\", file_path)\n\n return num_examples, file_path",
"def generate_tfrecord(source_path):\n writer = tf.io.TFRecordWriter(os.path.join(source_path, \"train.tfrecords\"))\n img_dir = os.path.join(source_path, 'faces')\n for imageName in os.listdir(img_dir):\n image = Image.open(os.path.join(img_dir, imageName))\n image = image.resize((64, 64), Image.BILINEAR)\n image_raw = image.tobytes() # convert image to binary format\n example = tf.train.Example(features=tf.train.Features(feature={\n \"image_raw\": _bytes_feature(image_raw),\n }))\n writer.write(example.SerializeToString())\n writer.close()",
"def save_as_tfrecord(data, output, name):\n writer = tf.python_io.TFRecordWriter(name);\n for i in range(0, len(data)):\n inp = tf.train.Feature(float_list=tf.train.FloatList(value=data[i]));\n label = tf.train.Feature(float_list=tf.train.FloatList(value=output[i]));\n feature = {};\n feature['data'] = inp;\n feature['label'] = label;\n\n example = tf.train.Example(features=tf.train.Features(feature=feature));\n writer.write(example.SerializeToString());\n \n writer.close();",
"def gen_record(document_id, primary_doc, gen_links):\n pass",
"def _write_tfrecords_file(self, annotations, path_to_tfrecords):",
"def gen_tfrecord_from_file(out_dir, out_filename, bag_filename, timing_filename,\n flip=False, debug=False):\n packager = CNNPackager(flip)\n bag = rosbag.Bag(bag_filename)\n events_temporal_info = read_temporal_info(timing_filename)\n current_time = heapq.heappop(events_temporal_info)\n temporal_info_dict = dict()\n\n temporal_info_complete = False\n start_time = rospy.Time(bag.get_start_time())\n for topic, msg, t in bag.read_messages(topics=TOPIC_NAMES):\n if not temporal_info_complete and t > start_time + current_time[0]:\n # add the frame number anf timing label to frame dict\n temporal_info_dict[current_time[1]] = packager.get_img_frame_count()\n if len(events_temporal_info) > 0:\n current_time = heapq.heappop(events_temporal_info)\n else:\n temporal_info_complete = True\n if topic == TOPIC_NAMES[1]:\n packager.img_callback(msg)\n elif topic == TOPIC_NAMES[2]:\n packager.nao_aud_callback(msg)\n elif topic == TOPIC_NAMES[3]:\n packager.kinect_aud_callback(msg)\n\n # perform data pre-processing steps\n packager.format_output(debug, output_dir + out_filename)\n\n # generate TFRecord data\n ex = make_sequence_example(packager.get_img_stack(), img_dtype,\n packager.get_grs_stack(), grs_dtype,\n packager.get_pnt_stack(), opt_dtype,\n packager.get_nao_aud_stack(), aud_dtype,\n packager.get_kinect_aud_stack(), aud_dtype,\n temporal_info_dict, timing_filename)\n\n # write TFRecord data to file\n end_file = \".tfrecord\"\n if flip:\n end_file = \"_flip\" + end_file\n\n writer = tf.python_io.TFRecordWriter(os.path.join(out_dir, out_filename + end_file))\n writer.write(ex.SerializeToString())\n writer.close()\n\n packager.reset()\n bag.close()",
"def convert2tfrecord(self, output_path=None, data_type=\"train\"):\n feature_mapper = self.feature_map\n\n def parsing_loop(in_queue=None, out_queue=None):\n \"\"\"\n function to be executed within each parsing process.\n\n Args:\n in_queue: the queue used to store avazu data records as strings.\n out_queue: the queue used to store serialized tf.Examples as strings.\n \"\"\"\n while True: # loop.\n raw_record = in_queue.get() # read from in_queue.\n logging.debug('parsing_loop raw_example:{}'.format(raw_record))\n if raw_record == \"DONE\":\n # We were done here.\n break\n features = {} # dict for all feature columns and target column.\n # parse the record according to proto definitions.\n values = raw_record.rstrip('\\n').split(',')\n if len(values) != len(self.field_names) + 1:\n continue\n features = {self.idx_to_field_name[idx]: self._int64_feature(feature_mapper[self.idx_to_field_name[idx]][value]) for idx, value in enumerate(values)\n if self.idx_to_field_name[idx] != 'click' and value in feature_mapper[self.idx_to_field_name[idx]]}\n feature_values = {self.idx_to_field_name[idx]+':weight': self._float_feature(1) for idx, value in enumerate(values)\n if self.idx_to_field_name[idx] != 'click' and value in feature_mapper[self.idx_to_field_name[idx]]}\n\n features.update(feature_values)\n features.update({'target': self._float_feature(float(values[1]))})\n logging.debug('parsing_loop features:{}'.format(features))\n logging.debug('parsing_loop feature_values:{}'.format(feature_values))\n\n # create an instance of tf.Example.\n example = tf.train.Example(features=tf.train.Features(feature=features))\n # serialize the tf.Example to string.\n raw_example = example.SerializeToString()\n\n # write the serialized tf.Example out.\n out_queue.put(raw_example)\n\n def writing_loop(out_queue, out_file):\n \"\"\"\n function to be executed within the single writing process.\n\n Args:\n out_queue: the queue used to store serialized tf.Examples as strings.\n out_file: string, path to the TFRecord file for transformed tf.Example protos.\n \"\"\"\n writer = tf.io.TFRecordWriter(out_file) # writer for the output TFRecord file.\n sample_count = 0\n while True:\n raw_example = out_queue.get() # read from out_queue.\n logging.debug('writing_loop raw_example:{}'.format(raw_example))\n if raw_example == \"DONE\":\n break\n writer.write(raw_example) # write it out.\n sample_count += 1\n if not sample_count % 1000:\n logging.info('%s Processed %d examples' % (datetime.now(), sample_count))\n sys.stdout.flush()\n writer.close() # close the writer.\n logging.info('%s >>>> Processed %d examples <<<<' % (datetime.now(), sample_count))\n self.sample_cnt = sample_count\n sys.stdout.flush()\n\n in_queue = Queue() # queue for raw gdt training data records.\n out_queue = Queue() # queue for serialized tf.Examples.\n # start parsing processes.\n num_parsers = int(multiprocessing.cpu_count() - 2)\n parsers = []\n for i in range(num_parsers):\n p = Process(target=parsing_loop, args=(in_queue, out_queue))\n parsers.append(p)\n p.start()\n\n # start writing process.\n writer = Process(target=writing_loop, args=(out_queue, output_path))\n writer.start()\n logging.info('%s >>>> BEGIN to feed input file %s <<<<' % (datetime.now(), self.path))\n # read a record in.\n with open(self.path) as f:\n f.readline()\n pbar = tqdm(f, mininterval=1, smoothing=0.1)\n pbar.set_description('reading avazu dataset')\n line_num = 0\n train_cnt = 0\n test_cnt = 0\n for line in pbar:\n if line_num == 0:\n line_num += 1\n continue\n if data_type == \"train\":\n if \"141030\" in line.rstrip('\\n').split(',')[2]:\n test_cnt += 1\n continue\n train_cnt += 1\n else:\n if \"141030\" not in line.rstrip('\\n').split(',')[2]:\n continue\n in_queue.put(line) # write to in_queue.\n self.train_cnt = train_cnt\n self.test_cnt = test_cnt\n # terminate and wait for all parsing processes.\n for i in range(num_parsers):\n in_queue.put(\"DONE\")\n for i in range(num_parsers):\n parsers[i].join()\n\n # terminate and wait for the writing process.\n out_queue.put(\"DONE\")\n writer.join()\n logging.info('%s >>>> END of consuming input file %s <<<<' % (datetime.now(), self.path))\n sys.stdout.flush()",
"def writeTFrecords(tfrecords_filename, filenames, prediction_time):\n # intialize a file identifier\n subjectId = 0\n # process all filenames into a training and testing data -TF records\n for file in filenames:\n # numpy loadtxt for file with column names and formats\n print(file)\n data_cond = np.loadtxt(file,dtype={'names': ['Period', 'Block', 'Trial','Trial_id','x_ord','y_ord'], \n 'formats': ['S3', 'S7' ,'S6','i4', 'i4', 'i4']}, delimiter=\"\\t\",skiprows=1)\n # name to save TF records\n sName = file.replace('.txt','')\n saveName = sName.split(\"/\")\n # display current file being processed\n tfrecords_train_savename = \"data/tfrecords/\"+saveName[-1]+\"_train_\"+tfrecords_filename\n print(tfrecords_train_savename)\n tfrecords_test_savename = \"data/tfrecords/\"+saveName[-1]+\"_test_\"+tfrecords_filename\n # open recordwriters for training and testing data\n testWriter = tf.io.TFRecordWriter(tfrecords_test_savename+'.tfrecords')\n \n # process text to convert text labels to numerical indicators\n period = processText(data_cond['Period'],0)\n print(period.shape)\n block = processText(data_cond['Block'],0, period)\n [stim, foil, pos] = processText(data_cond['Trial'],1) \n # read input data\n x_ord = data_cond['x_ord']\n y_ord = data_cond['y_ord']\n trial_id = data_cond['Trial_id']\n \n # process input data to create dervied vectors\n x_diff = np.append(0.0,np.diff(x_ord))\n y_diff = np.append(0.0,np.diff(y_ord))\n thetas = np.arctan2(y_diff, x_diff)\n speed = np.sqrt((x_diff*x_diff) + (y_diff*y_diff))\n x_vel = speed * np.cos(thetas)\n y_vel = speed * np.sin(thetas)\n x_acc = np.append(0.0, np.diff(x_vel))\n y_acc = np.append(0.0, np.diff(y_vel))\n \n # store data from future in the same example to feed into algorithm\n out_x = np.append(x_ord[prediction_time:],[-1]*prediction_time)\n out_y = np.append(y_ord[prediction_time:],[-1]*prediction_time)\n\n out_xacc = np.append([0.0]*prediction_time, x_acc[0:(len(x_acc)-prediction_time)] )\n out_yacc = np.append([0.0]*prediction_time, y_acc[0:(len(y_acc)-prediction_time)] )\n\n out_xvel = np.append(x_vel[prediction_time:], [-1]*prediction_time)\n out_yvel = np.append(y_vel[prediction_time:], [-1]*prediction_time)\n \n subjectId = subjectId + 1\n trial_id_prev = 0\n timer = 0\n \n # generate an example for each time point\n prev_block = 0\n time_after_stim = np.array([],dtype=np.int32)\n prev_pos_arr = np.array([],dtype=np.int32)\n uniq_block = np.unique(block)\n prev_pos = 1\n\n for idx,trial_num in enumerate(trial_id):\n if trial_id_prev != trial_id[idx]:\n timer = 1\n trial_id_prev = trial_id[idx]\n if idx > 0:\n prev_pos = pos[idx-1]\n time_after_stim = np.append(time_after_stim,timer)\n prev_pos_arr = np.append(prev_pos_arr,prev_pos)\n timer = timer+1\n\n for curr_block in uniq_block:\n # open recordwriters for training and testing data\n blk_ids = np.where(block == curr_block)[0] \n trainWriter = tf.io.TFRecordWriter(tfrecords_train_savename+'_block_'+str(curr_block)+'.tfrecords')\n # print(np.shape(blk_ids), type(blk_ids))\n # generate example with features\n example = tf.train.Example(features=tf.train.Features(feature={\n 'Subject' : _int64_feature(np.repeat(subjectId,np.size(blk_ids)) ), # 1\n 'period' : _int64_feature(period[blk_ids]), # 2\n 'block' : _int64_feature(block[blk_ids]), # 3\n 'stim' : _int64_feature(stim[blk_ids]), # 4\n 'foilInd' : _int64_feature(foil[blk_ids]), # 5\n 'pos' : _int64_feature(pos[blk_ids]), # 6\n 'trial_id': _int64_feature(trial_id[blk_ids]), # 7\n 'x_ord' : _float_feature(x_ord[blk_ids]), # 8\n 'y_ord' : _float_feature(y_ord[blk_ids]), # 9\n 'x_vel' : _float_feature(x_vel[blk_ids]), # 10\n 'y_vel' : _float_feature(y_vel[blk_ids]), # 11\n 'x_acc' : _float_feature(x_acc[blk_ids]), # 12\n 'y_acc' : _float_feature(y_acc[blk_ids]), # 13\n 'out_x' : _float_feature(out_x[blk_ids]), # 14\n 'out_y' : _float_feature(out_y[blk_ids]), # 15\n 'out_xvel' : _float_feature(out_xvel[blk_ids]), # 16\n 'out_yvel' : _float_feature(out_yvel[blk_ids]), # 17\n 'out_xacc' : _float_feature(out_xacc[blk_ids]), # 18\n 'out_yacc' : _float_feature(out_yacc[blk_ids]), # 19\n 'time_after_stim' : _int64_feature(time_after_stim[blk_ids]), # 20\n 'prev_pos' : _int64_feature(prev_pos_arr[blk_ids]) # 21\n }))\n\n trainWriter.write(example.SerializeToString())\n testWriter.write(example.SerializeToString())\n trainWriter.close()\n\n testWriter.close()",
"def dataset_creator(in_file, outfile, feature_extractor, *args):\n df = pd.read_csv(in_file)\n id = 0\n with tf.python_io.TFRecordWriter(outfile+\".tfrecords\") as writer:\n \n for index, row in df.iterrows():\n if(index % 100 == 0):\n print(\"Digesting\",row['Datafile'])\n if(row['Label'] in classes):\n path = row['Datafile']\n data = pd.read_csv(data_path+path).values\n\n label = classes.index(row['Label'])\n subject = int(row['Subject'][-2:])\n extracted_featurelist = feature_extractor(data, args[0])\n\n serialized_example = serialize_example(extracted_featurelist, label, id, subject)\n id = id + 1\n writer.write(serialized_example)\n else:\n print(row['Label'],\"not in known classes!\")",
"def print_record(self):",
"def _create_metadata(self):\n\n # Creates model info.\n model_meta = _metadata_fb.ModelMetadataT()\n model_meta.name = self.model_info.name\n model_meta.description = self.model_info.description\n model_meta.version = self.model_info.version\n model_meta.author = \"TensorFlow Lite Model Maker\"\n model_meta.license = (\"Apache License. Version 2.0 \"\n \"http://www.apache.org/licenses/LICENSE-2.0.\")\n\n # Creates input info.\n input_meta = _metadata_fb.TensorMetadataT()\n input_meta.name = \"input_text\"\n input_meta.description = (\n \"Embedding vectors representing the input text to be classified. The \"\n \"input need to be converted from raw text to embedding vectors using \"\n \"the attached dictionary file.\")\n # Create the vocab file.\n vocab_file = _metadata_fb.AssociatedFileT()\n vocab_file.name = os.path.basename(self.associated_files[1])\n vocab_file.description = (\"Vocabulary file to convert natural language \"\n \"words to embedding vectors.\")\n vocab_file.type = _metadata_fb.AssociatedFileType.VOCABULARY\n\n # Create the RegexTokenizer.\n tokenizer = _metadata_fb.ProcessUnitT()\n tokenizer.optionsType = (\n _metadata_fb.ProcessUnitOptions.RegexTokenizerOptions)\n tokenizer.options = _metadata_fb.RegexTokenizerOptionsT()\n tokenizer.options.delimRegexPattern = self.model_info.delim_regex_pattern\n tokenizer.options.vocabFile = [vocab_file]\n\n input_meta.content = _metadata_fb.ContentT()\n input_meta.content.contentPropertiesType = (\n _metadata_fb.ContentProperties.FeatureProperties)\n input_meta.content.contentProperties = _metadata_fb.FeaturePropertiesT()\n input_meta.processUnits = [tokenizer]\n\n # Creates output info.\n output_meta = _metadata_fb.TensorMetadataT()\n output_meta.name = \"probability\"\n output_meta.description = \"Probabilities of the labels respectively.\"\n output_meta.content = _metadata_fb.ContentT()\n output_meta.content.contentProperties = _metadata_fb.FeaturePropertiesT()\n output_meta.content.contentPropertiesType = (\n _metadata_fb.ContentProperties.FeatureProperties)\n output_stats = _metadata_fb.StatsT()\n output_stats.max = [1.0]\n output_stats.min = [0.0]\n output_meta.stats = output_stats\n label_file = _metadata_fb.AssociatedFileT()\n label_file.name = os.path.basename(self.associated_files[0])\n label_file.description = (\"Labels for the categories that the model can \"\n \"classify.\")\n label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS\n output_meta.associatedFiles = [label_file]\n\n # Creates subgraph info.\n subgraph = _metadata_fb.SubGraphMetadataT()\n subgraph.inputTensorMetadata = [input_meta]\n subgraph.outputTensorMetadata = [output_meta]\n model_meta.subgraphMetadata = [subgraph]\n\n b = flatbuffers.Builder(0)\n b.Finish(\n model_meta.Pack(b),\n _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)\n self.metadata_buf = b.Output()",
"def makeTxt():\n print('start')\n model = KeyedVectors.load_word2vec_format('\\\\\\\\smbhome.uscs.susx.ac.uk\\\\ls612\\\\Documents\\\\Dissertation\\\\LSTM-PICO-Detection-master\\\\other_spyder\\\\Extended embeddings\\\\2019-07-19-09-34-51-bigrams_FINAL.bin', binary=True)#, limit = 20 for tests\n model.save_word2vec_format('\\\\\\\\smbhome.uscs.susx.ac.uk\\\\ls612\\\\Documents\\\\Dissertation\\\\Data\\\\extended.txt', binary=False)\n print('done creating text files')",
"def convert_to_tfrecords_v2(self,filename, data_set, label):\n n=len(data_set)\n if data_set.shape[0] != n:\n raise ValueError('Images size %d does not match label size %d.' %(data_set.shape[0], n))\n rows = data_set.shape[1]\n cols = data_set.shape[2]\n depth = data_set.shape[3]\n\n with tf.io.TFRecordWriter(filename) as writer:\n for index in range(n):\n image_raw = data_set[index].tostring()\n l=int(label[index])\n example = tf.train.Example(\n features=tf.train.Features(\n feature={'height': self._int64_feature(rows),'width': self._int64_feature(cols),'depth': self._int64_feature(depth),'label': self._int64_feature(l),'image': self._bytes_feature(image_raw)}\n )\n )\n writer.write(example.SerializeToString())",
"def build_tfrecord_input(training=True, num_epochs=None):\n train_2012_filenames = gfile.Glob(os.path.join(\"/home/wangyang59/Data/ILSVRC2016_tf_kitti_2012_train_hist\", '*'))\n train_2015_filenames = gfile.Glob(os.path.join(\"/home/wangyang59/Data/ILSVRC2016_tf_kitti_2015_train_hist\", '*'))\n val_2015_filenames = gfile.Glob(os.path.join(\"/home/wangyang59/Data/ILSVRC2016_tf_kitti_2015_val_hist_fullsize\", '*'))\n \n if training:\n filenames = val_2015_filenames\n else:\n filenames = val_2015_filenames\n #filenames = filenames[:index]\n filename_queue = tf.train.string_input_producer(filenames, shuffle=False, num_epochs=num_epochs)\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n\n features = {\"image1_raw\": tf.FixedLenFeature([1], tf.string),\n \"image2_raw\": tf.FixedLenFeature([1], tf.string),\n \"scene\": tf.FixedLenFeature([1], tf.string),\n \"img_size\": tf.FixedLenFeature([1], tf.string)}\n \n features = tf.parse_single_example(serialized_example, features=features)\n \n image1_buffer = tf.reshape(features[\"image1_raw\"], shape=[])\n image1 = tf.image.decode_jpeg(image1_buffer, channels=COLOR_CHAN)\n image1.set_shape([RESIZE_HEIGHT, RESIZE_WIDTH, COLOR_CHAN])\n image1 = tf.cast(image1, tf.float32) / 255.0\n \n image2_buffer = tf.reshape(features[\"image2_raw\"], shape=[])\n image2 = tf.image.decode_jpeg(image2_buffer, channels=COLOR_CHAN)\n image2.set_shape([RESIZE_HEIGHT, RESIZE_WIDTH, COLOR_CHAN])\n image2 = tf.cast(image2, tf.float32) /255.0\n \n file_name = features['scene']\n img_size = tf.decode_raw(features[\"img_size\"], tf.float32)\n img_size.set_shape([1, 2])\n \n if training:\n image_batch = tf.train.shuffle_batch(\n [image1, image2],\n FLAGS.batch_size,\n num_threads=FLAGS.batch_size,\n capacity=100 * FLAGS.batch_size,\n min_after_dequeue=50 * FLAGS.batch_size,\n enqueue_many=False)\n else:\n image_batch = tf.train.batch(\n [image1, image2, file_name, img_size],\n FLAGS.batch_size / FLAGS.num_gpus,\n #num_threads=FLAGS.batch_size / FLAGS.num_gpus,\n num_threads=1,\n capacity=10 * FLAGS.batch_size,\n #min_after_dequeue=5 * FLAGS.batch_size,\n enqueue_many=False)\n\n return image_batch",
"def create_genbank(fasta_file, UPLOAD_FOLDER, phage_id, payload):\n headers = payload.get_json()\n gb_file = os.path.join(UPLOAD_FOLDER, phage_id + \".gb\")\n genome = SeqIO.read(fasta_file, \"fasta\").seq\n genome = Seq(str(genome), IUPAC.unambiguous_dna)\n record = SeqRecord(genome, id='', name=headers[\"phageName\"], description=headers[\"source\"])\n ##FIXME\n record.annotations[\"AUTHORS\"] = \"Becker, L.W.\"\n record.annotations[\"Reference\"] = \"whole thing\"\n\n qualifiers = {}\n qualifiers[\"organism\"] = headers[\"source\"]\n qualifiers[\"mol_type\"] = headers[\"molType\"]\n qualifiers[\"isolation_source\"] = headers[\"isolationSource\"]\n qualifiers[\"lab_host\"] = headers[\"labHost\"]\n qualifiers[\"country\"] = headers[\"country\"]\n qualifiers[\"identified_by\"] = headers[\"identifiedBy\"]\n qualifiers[\"note\"] = headers[\"notes\"]\n feature = SeqFeature(FeatureLocation(start=0, end=len(genome)), type='source', qualifiers=qualifiers)\n record.features.append(feature)\n\n idNumber = 0\n for cds in Annotations.query.filter_by(phage_id=phage_id).order_by(Annotations.left).all():\n if (cds.function == \"@DELETED\" or cds.status == \"trnaDELETED\"):\n continue\n idNumber += 1\n if cds.strand == '-':\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = str(idNumber)\n if headers[\"includeNotes\"]:\n qualifiers[\"note\"] = cds.notes\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right, strand=-1), id=cds.id[0:-1] + str(idNumber), type='gene', qualifiers=qualifiers)\n record.features.append(feature)\n if cds.status == \"tRNA\":\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = headers[\"phageName\"] + '_' + str(idNumber)\n qualifiers[\"note\"] = cds.function\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right, strand=-1), id=cds.id[0:-1] + str(idNumber), type='tRNA', qualifiers=qualifiers)\n record.features.append(feature)\n else:\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = headers[\"phageName\"] + '_' + str(idNumber)\n qualifiers[\"codon_left\"] = [1]\n qualifiers[\"transl_table\"] = [11]\n pattern = re.compile(\"@(.*)##(.*)\")\n matches = pattern.search(cds.function)\n if matches:\n qualifiers[\"product\"] = matches.group(1)\n qualifiers[\"protein_id\"] = matches.group(2)\n else:\n qualifiers[\"product\"] = \"Hypothetical Protein\"\n qualifiers[\"protein_id\"] = \"unknown:\" + qualifiers[\"locus_tag\"]\n left = len(genome) - cds.right\n right = len(genome) - cds.left + 1\n qualifiers[\"translation\"] = Seq.translate(helper.get_sequence(genome, cds.strand, left, right), table=11)[0:-1]\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right, strand=-1), id=cds.id[0:-1] + str(idNumber), type='CDS', qualifiers=qualifiers)\n record.features.append(feature)\n else:\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = headers[\"phageName\"] + '_' + str(idNumber)\n if headers[\"includeNotes\"]:\n qualifiers[\"note\"] = cds.notes\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right), id=cds.id[0:-1] + str(idNumber), type='gene', qualifiers=qualifiers)\n record.features.append(feature)\n if cds.status == \"tRNA\":\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = headers[\"phageName\"] + '_' + str(idNumber)\n qualifiers[\"note\"] = cds.function\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right), id=cds.id[0:-1] + str(idNumber), type='tRNA', qualifiers=qualifiers)\n record.features.append(feature)\n else:\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = headers[\"phageName\"] + '_' + str(idNumber)\n qualifiers[\"codon_left\"] = [1]\n qualifiers[\"transl_table\"] = [11]\n pattern = re.compile(\"@(.*)##(.*)\")\n matches = pattern.search(cds.function)\n if matches:\n qualifiers[\"product\"] = matches.group(1)\n qualifiers[\"protein_id\"] = matches.group(2)\n else:\n qualifiers[\"product\"] = \"Hypothetical Protein\"\n qualifiers[\"protein_id\"] = \"unknown:\" + qualifiers[\"locus_tag\"]\n qualifiers[\"translation\"] = Seq.translate(helper.get_sequence(genome, cds.strand, cds.left - 1, cds.right), table=11)[0:-1]\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right), id=cds.id[0:-1] + str(idNumber), type='CDS', qualifiers=qualifiers)\n record.features.append(feature)\n with open(gb_file, 'w') as genbank:\n SeqIO.write(record, genbank, 'genbank')\n new_lines = []\n with open (gb_file, 'r') as genbank:\n lines = genbank.readlines()\n for index, line in enumerate(lines):\n if index is 0:\n new_lines.append(line[0:-28] + \" linear \" + datetime.now().strftime('%d-%b-%Y').upper() + '\\n')\n elif index is 5 or index is 6:\n if headers[\"source\"] != \"\":\n new_lines.append(line[0:-2] + headers[\"source\"] + '\\n')\n elif index is 7:\n if headers[\"organism\"] != \"\":\n new_lines.append(line[0:-2] + headers[\"organism\"] + '\\n')\n new_lines.append(\"REFERENCE 1 (bases 1 to \" + str(len(genome)) + \")\\n\")\n long_line = \" AUTHORS \" + headers[\"authors\"]\n while len(long_line) > 81:\n new_lines.append(long_line[0:80] + '\\n')\n long_line = long_line[81:]\n new_lines.append(long_line + '\\n')\n long_line = \" TITLE \" + headers[\"title\"]\n while len(long_line) > 81:\n new_lines.append(long_line[0:80] + '\\n')\n long_line = long_line[81:]\n new_lines.append(long_line + '\\n')\n long_line = \" JOURNAL \" + headers[\"journal\"]\n while len(long_line) > 81:\n new_lines.append(long_line[0:80] + '\\n')\n long_line = long_line[81:]\n new_lines.append(long_line + '\\n')\n else:\n new_lines.append(line)\n with open (gb_file, 'w') as genbank:\n genbank.writelines(new_lines)\n return gb_file",
"def write_data_to_tf(self, filename, tfrecord_name):\n writer = tf.python_io.TFRecordWriter(tfrecord_name)\n with open(filename) as fin_data:\n for line in fin_data:\n example = self.func(line)\n writer.write(example.SerializeToString())\n writer.close()",
"def _create_tfrecord(filenames, name, num_images):\n tfrecords_filename = path_resolver.resolve_data_path(TF_RECORD_LOC[name])\n (tfrecords_filename.parent).mkdir(parents=True, exist_ok=True)\n\n progress_bar = tqdm(filenames[:num_images])\n with tf.io.TFRecordWriter(str(tfrecords_filename)) as writer:\n for i, (img_path, label_index) in enumerate(progress_bar):\n img_jpeg = open(img_path, 'rb').read()\n img = np.array(Image.open(img_path))\n height = img.shape[0]\n width = img.shape[1]\n progress_bar.set_description(f\"{name} #{i}: {img_path}\")\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'label_index': _int64_feature(label_index),\n 'image_name': _bytes_feature(str.encode(os.path.basename(img_path))),\n 'image_jpeg': _bytes_feature(img_jpeg)}))\n writer.write(example.SerializeToString())\n return i + 1",
"def write_to_tfrecord(dataset_path):\n # Create TFRecords file:\n filename = join(dataset_path, 'dataset.tfrecords')\n writer = tf.python_io.TFRecordWriter(filename)\n\n # Convert (image, steering) pair to Tensorflow Example\n print(\"Convert {} to TFRecords...\".format(dataset_path))\n start_time = time.time()\n\n # Initialize dataset:\n dataset = Dataset(args[\"dataset\"])\n # Initialize progress bar status:\n i = 0\n N = dataset.N\n # Convert:\n for (images, steerings) in iter(dataset):\n for image, steering in zip(images, steerings):\n # Write example:\n example = _create_example(image, steering)\n writer.write(example.SerializeToString())\n # Update progress bar:\n i += 1\n show_progress_bar(\n i, 6*N,\n prefix = 'Progress:', suffix = 'Complete',\n length = 50\n )\n print(\"[Time Elapsed]: {:.2f} seconds\".format(time.time() - start_time))\n print(\"TFRecords generated.\")\n\n # Finally:\n writer.close()\n\n return filename"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
boton OK (default) carga el archivo especificado desde la carpeta ~/Documentos/LKE_framework/object_detection/samples/configs/
|
def cargarConfigs(self):
try:
self.dirModelConfig = os.path.join(OBJECTDETECTIONPATH, "samples/configs/{}".format(str(self.ui.lineE9.text())))
print("Modelo NUEVO seleccionado: {}".format(str(self.dirModelConfig)))
file = open(self.dirModelConfig, 'r')
with file:
text = file.read()
self.ui.textEdit1.setText(text)
self.ui.downModel.setEnabled(1)
self.ui.pBCancelDown.setEnabled(1)
except Exception as ex:
print(ex)
|
[
"def test_object_detection(init_env, config):\n config_file = generate(config)\n run_all_steps(init_env, config_file)",
"def ensure_config_path() -> str:\n home = os.path.expanduser('~')\n mce_config_dir = os.path.join(home, '.mce')\n if not os.path.exists(mce_config_dir):\n print(f'creating config dir at {mce_config_dir}')\n os.mkdir(mce_config_dir, mode=0o755)\n model_path = os.path.join(mce_config_dir, \"models\")\n if not os.path.exists(model_path):\n os.mkdir(model_path, 0o755)\n print(f'copying models into {model_path}')\n model_file = os.path.join(\n mce.DEEPSTREAM_MODELS_ROOT, \"Primary_Detector/resnet10.caffemodel\")\n proto_file = os.path.join(\n mce.DEEPSTREAM_MODELS_ROOT, \"Primary_Detector/resnet10.prototxt\")\n label_file_path = os.path.join( # why not label_file?\n mce.DEEPSTREAM_MODELS_ROOT, \"Primary_Detector/labels.txt\")\n int8_calib_file = os.path.join(\n mce.DEEPSTREAM_MODELS_ROOT, \"Primary_Detector/cal_trt.bin\")\n for f in (model_file, proto_file, label_file_path, int8_calib_file):\n target = os.path.join(model_path, os.path.basename(f))\n # if it's not fixed by deepstream 5.0...\n # if os.access(f, os.W_OK):\n # sys.stderr.write(\n # f\"WARNING: {f} is writable. This is a security risk.\")\n if not os.path.exists(target):\n print(f'cpoying {f} to {target}')\n shutil.copy(f, target)\n os.chmod(target, 0o644)\n return mce_config_dir",
"def test_config():\n check_model_exist()\n test_suite = InferenceTest()\n test_suite.load_config(model_path=\"./resnet50_quant/resnet50_quant\")\n test_suite.config_test()",
"def test_get_configs_from_multiple_files(self):\n temp_dir = self.get_temp_dir()\n\n # Write model config file.\n model_config_path = os.path.join(temp_dir, \"model.config\")\n model = model_pb2.DetectionModel()\n model.faster_rcnn.num_classes = 10\n _write_config(model, model_config_path)\n\n # Write train config file.\n train_config_path = os.path.join(temp_dir, \"train.config\")\n train_config = train_config = train_pb2.TrainConfig()\n train_config.batch_size = 32\n _write_config(train_config, train_config_path)\n\n # Write train input config file.\n train_input_config_path = os.path.join(temp_dir, \"train_input.config\")\n train_input_config = input_reader_pb2.InputReader()\n train_input_config.label_map_path = \"path/to/label_map\"\n _write_config(train_input_config, train_input_config_path)\n\n # Write eval config file.\n eval_config_path = os.path.join(temp_dir, \"eval.config\")\n eval_config = eval_pb2.EvalConfig()\n eval_config.num_examples = 20\n _write_config(eval_config, eval_config_path)\n\n # Write eval input config file.\n eval_input_config_path = os.path.join(temp_dir, \"eval_input.config\")\n eval_input_config = input_reader_pb2.InputReader()\n eval_input_config.label_map_path = \"path/to/another/label_map\"\n _write_config(eval_input_config, eval_input_config_path)\n\n configs = config_util.get_configs_from_multiple_files(\n model_config_path=model_config_path,\n train_config_path=train_config_path,\n train_input_config_path=train_input_config_path,\n eval_config_path=eval_config_path,\n eval_input_config_path=eval_input_config_path)\n self.assertProtoEquals(model, configs[\"model\"])\n self.assertProtoEquals(train_config, configs[\"train_config\"])\n self.assertProtoEquals(train_input_config,\n configs[\"train_input_config\"])\n self.assertProtoEquals(eval_config, configs[\"eval_config\"])\n self.assertProtoEquals(eval_input_config, configs[\"eval_input_configs\"][0])",
"def _checkModelConfig(self):\n if (self.modelConfig.__eq__('')):\n print('Debe cargar primero el archivo de configuración')\n self.statusBar().showMessage('Debe cargar primero el archivo de configuración')\n return False\n else:\n return True #true porque no esta vacio",
"def test_correct_file_load_supplied_engine_success(self):\n MODEL_NAME, SAMPLE_PACKAGE = SAMPLE_PACKAGES['md']\n self.loader.load_fdp_to_db(SAMPLE_PACKAGE, config.get_engine())\n self.cm = model_registry.ModelRegistry()\n self.assertGreater(len(list(self.cm.list_models())), 0, 'no dataset was loaded')",
"def _read_config(self):\n self.tomldoc = toml.loads(self.path.read_text())\n if not self.tomldoc[\"storage_root\"]:\n path = Path.home() / \"planetarypy_data\"\n path.mkdir(exist_ok=True)\n self.tomldoc[\"storage_root\"] = str(path)\n self.storage_root = path\n self.save()\n else:\n self.storage_root = Path(self.tomldoc[\"storage_root\"])",
"def test_setup_merged_samples(self):\n flist = find_samples(j_doe_00_05)\n setup_merged_samples(flist, **{'dry_run':False})\n with open(os.path.join(j_doe_00_05, \"P001_101_index3\", \"TOTAL\", \"P001_101_index3-bcbb-config.yaml\")) as fh:\n conf = yaml.load(fh)\n self.assertEqual(conf[\"details\"][0][\"files\"][0], os.path.join(j_doe_00_05, \"P001_101_index3\", \"TOTAL\", \"P001_101_index3_B002BBBXX_TGACCA_L001_R1_001.fastq.gz\"))",
"def use_samples_config():\n os.environ[\"TS_COLAB_CONFIG_DIR\"] = str(\n PROJECT_ROOT / \"tests\" / \".config\" / \"samples\"\n )",
"def test_unscanned_save_creates_file(files_dir):\n swords_dir = pathlib.Path(SWORDS_DIR)\n asset_dir = data.recursive_load_asset_dir(swords_dir)\n\n data.recursive_save_asset_dir(asset_dir)\n\n # Check if the file got saved.\n config_file = swords_dir.joinpath(data.load_scan_save.CONFIG_FILE_NAME)\n assert config_file.is_file()",
"def produce_model_config(self):\n file_template = open('[PATH_TO_BE_MODIFIED]/faster_resnet_101_template.config', 'r')\n file_output = open(self.outputModelPath.text()+'/faster_resnet_101.config', 'w')\n\n for line in file_template.readlines():\n if '[NUM_CLASSES]' in line.strip():\n file_output.write(' num_classes: '+str(self.num_classes)+'\\n')\n elif '[MIN_DIMENSION]' in line.strip():\n file_output.write(' min_dimension: '+self.minImageDimText.text()+'\\n')\n elif '[MAX_DIMENSION]' in line.strip():\n file_output.write(' max_dimension: '+self.maxImageDimText.text()+'\\n')\n elif '[FINE_TUNE_CHECKPOINT]' in line.strip():\n file_output.write(' fine_tune_checkpoint: \"'+self.pretrainedModel.text()+'\"\\n')\n elif '[TRAIN_INPUT_PATH]' in line.strip():\n file_output.write(' input_path: \"'+self.trainingData.text()+'\"\\n')\n elif '[EVAL_INPUT_PATH]' in line.strip():\n file_output.write(' input_path: \"[PATH_TO_BE_MODIFIED]/val.record\"\\n')\n elif '[LABEL_MAP_PATH]' in line.strip():\n file_output.write(' label_map_path: \"'+self.mapTextForMod.text()+'\"\\n')\n else:\n file_output.write(line)\n\n file_template.close()\n file_output.close()\n\n msgBox = QMessageBox()\n msgBox.setIcon(msgBox.Information)\n msgBox.setText(\"A config file 'faster_resnet_101.config' is automatically \\n\"+\n \"created under directory '\"+self.outputModelPath.text()+\"'.\\n\"+\n \"Please don't delete it. You still need it for exporting your\\n\"+\n \"final model for inference.\")\n msgBox.exec_()",
"def train_from_cfg_file(config: Dict[str, Dict[str, Any]]) -> None:\n # Print configuration file title\n print(\"[\" + (\"=\" * (len(config[\"title\"]) + 18)) + \"]\")\n print(f\"[======== {config['title']} ========]\")\n\n # Get date\n d = date.today().isoformat()\n\n # Create results directory\n for dn in [\"./results\", \"./results/models\", \"./results/logs\"]:\n if not os.path.exists(dn):\n os.makedirs(dn)\n\n # Deserialize configuration file\n parameters = config[\"params\"]\n epochs = config[\"run\"][\"epochs\"]\n ensemble = config[\"run\"][\"ensemble\"]\n datasets = config[\"datasets\"]\n\n # Iterate through datasets\n for dataset in datasets:\n\n if datasets[dataset]:\n print(f\"[==> {dataset}\")\n\n # Prepare output directory\n if not os.path.exists(f\"results/models/{dataset}\"):\n os.makedirs(f\"results/models/{dataset}\")\n\n if dataset == \"families\":\n\n for famid, fn in CATALOG[dataset].items():\n\n # Create basename for files\n basename = f\"model-famid-{famid}-{parameters['hidden-neurons']}-{parameters['optimizer']}-{parameters['lc-layer']}-{d}\"\n\n # Prepare prefix\n if not os.path.exists(f\"results/models/{dataset}/famid{famid}\"):\n os.makedirs(f\"results/models/{dataset}/famid{famid}\")\n prefix = f\"results/models/{dataset}/famid{famid}/{basename}\"\n\n # Prepare log file\n log = f\"results/logs/{basename}.log\"\n\n train_ensemble(fn, prefix, log, parameters, epochs, ensemble)\n\n else:\n # Create basename for files\n basename = f\"model-{dataset}-{parameters['hidden-neurons']}-{parameters['optimizer']}-{parameters['lc-layer']}-{d}\"\n if 'without' in config['title']:\n t = config['title']\n basename += f\"-{t[t.find('without'):]}\"\n\n # Prepare output prefix\n prefix = f\"results/models/{dataset}/{basename}\"\n\n # Prepare log file\n log = f\"results/logs/{basename}.log\"\n\n # Training DiabNet for dataset\n train_ensemble(\n CATALOG[dataset], prefix, log, parameters, epochs, ensemble\n )\n\n print(\"[\" + (\"=\" * (len(config[\"title\"]) + 18)) + \"]\")",
"def open_configuration(self):\n msg, fil = RegistryHelpers.GetFilenameFromUserQT(self, RegistryKey='kluster',\n Title='Open a kluster configuration file',\n AppName='klustersave', bMulti=False, bSave=False,\n fFilter='Kluster configuration (*.kfc)')\n if fil:\n if os.path.exists(fil):\n self.vessview_window.clear_sensors()\n with open(fil, 'r') as json_fil:\n self.xyzrph = json.load(json_fil)\n self.load_from_existing_xyzrph()\n else:\n print('Unable to find file: {}'.format(fil))\n else:\n print('Open cancelled')",
"def test_load_upload(self):\n pass",
"def load_data(config):\n # Download if not already done\n url = 'http://cs231n.stanford.edu/'\n fname = 'tiny-imagenet-200.zip'\n md5_sum = '90528d7ca1a48142e341f4ef8d21d0de'\n fpath = _maybe_download(url, fname, md5_sum)\n\n # Extract content if not already done\n main_dir = _maybe_extract(fpath, \"Tiny-Imagenet\")\n train_dir = os.path.join(main_dir, \"train\")\n test_dir = os.path.join(main_dir, \"test\")\n val_dir = os.path.join(main_dir, \"val\")\n\n # Load wnids.txt\n with open(os.path.join(main_dir, \"wnids.txt\")) as f:\n globals()['labels'] = f.read().splitlines()\n\n with open(os.path.join(main_dir, \"words.txt\"), 'r') as fp:\n reader = csv.reader(fp, delimiter='\\t', quotechar='\"')\n # filename, class, bb, bb, bb, bb\n words = [row for row in reader]\n wnid2word = {}\n for wnid, word in words:\n wnid2word[wnid] = word\n\n globals()['wnid2word'] = wnid2word\n\n # Get labeled training data\n pickle_fpath = os.path.join(train_dir, \"data.pickle\")\n if not os.path.exists(pickle_fpath):\n # Get train data\n x_train, y_train = _get_train_data(train_dir, \"train_img_paths\")\n x_test = _get_test_data(test_dir, \"test_img_paths\")\n x_val, y_val = _get_val_data(val_dir, \"val_img_paths\")\n\n data = {'x_train': x_train, 'y_train': y_train,\n 'x_val': x_val, 'y_val': y_val,\n 'x_test': x_test,\n 'train_img_paths': globals()['train_img_paths'],\n 'test_img_paths': globals()['test_img_paths'],\n 'val_img_paths': globals()['val_img_paths']}\n\n # Store data as pickle to speed up later calls\n with open(pickle_fpath, 'wb') as f:\n pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n with open(pickle_fpath, 'rb') as f:\n data = pickle.load(f)\n\n perm = np.random.permutation(len(data['x_train']))\n data['x_train'] = data['x_train'][perm]\n data['y_train'] = data['y_train'][perm]\n\n if K.image_dim_ordering() == 'th':\n data['x_train'] = data['x_train'].transpose(0, 2, 3, 1)\n data['x_test'] = data['x_test'].transpose(0, 2, 3, 1)\n\n return data",
"def test_find_samples_from_file(self):\n with open(os.path.join(j_doe_00_05, \"P001_101_index3-bcbb-config.yaml\"), \"w\") as fh:\n fh.write(\"\\n\")\n flist = find_samples(j_doe_00_05, sample=os.path.join(j_doe_00_05, \"samples.txt\"))\n validate_sample_directories(flist, j_doe_00_05)\n self.assertEqual(len(flist),2)\n os.unlink(os.path.join(j_doe_00_05, \"P001_101_index3-bcbb-config.yaml\"))",
"def config():\n model = None\n while model not in {\"NOTE\", \"NOTE_DURATION\"}:\n model = input(\"NOTE or NOTE_DURATION? (type one of the options in all caps): \")\n\n train = None\n while train not in {\"y\", \"n\"}:\n train = input(\"Do you want to train and save? (y/n): \")\n\n load = None\n while load not in {\"y\", \"n\"}:\n load = input(\"Do you want to load and generate? (only say yes if you said yes in the previous question, or have trained before) (y/n): \")\n\n if train == \"y\":\n composer = None\n while composer not in {\"Bach\", \"Mozart\", \"Beethoven\", \"Scarlatti\", \"Chopin\", \"Liszt\"}:\n composer = input(\"Pick a composer: Bach, Mozart, Beethoven, Scarlatti, Chopin, Liszt: \")\n\n one_track = \"n\"\n if composer != \"Scarlatti\":\n one_track = input(\"OneTrack (y/n): \")\n\n note_gen_epochs = int(input(\"How many NoteGen epochs? (int): \"))\n duration_gen_epochs = None\n if model == \"NOTE_DURATION\":\n duration_gen_epochs = int(input(\"How many DurationGen epochs? (int): \"))\n\n if one_track == \"y\":\n file_path_training_data = \"./OneTrackData/\" + composer\n file_path_save_data = \"./Dict Data/\" + \"OneTrack\" + composer\n file_path_save_weights = \"./Trained Weights/\" + model + \"_\" + \"OneTrack\" + composer + \",{},{}\".format(note_gen_epochs, duration_gen_epochs)\n file_path_read_weights = \"./Trained Weights/\" + model + \"_\" + \"OneTrack\" + composer + \",{},{}\".format(note_gen_epochs, duration_gen_epochs)\n else:\n file_path_training_data = \"./data/\" + composer\n file_path_save_data = \"./Dict Data/\" + \"MultiTrack\" + composer\n file_path_save_weights = \"./Trained Weights/\" + model + \"_\" + \"MultiTrack\" + composer + \",{},{}\".format(note_gen_epochs,\n duration_gen_epochs)\n file_path_read_weights = \"./Trained Weights/\" + model + \"_\" + \"MultiTrack\" + composer + \",{},{}\".format(note_gen_epochs,\n duration_gen_epochs)\n else:\n file_path_training_data = None\n note_gen_epochs = None\n duration_gen_epochs = None\n file_path_save_weights = None\n if load == 'n':\n exit()\n else:\n possible_files = os.listdir(\"./Trained Weights\")\n files_index = int(input(\"choose one number: \\n\" + \"\\n\".join([\"{} {}\".format(i, elm) for i, elm in enumerate(possible_files)]) + \"\\n\"))\n file_path_read_weights = \"./Trained Weights/\" + possible_files[files_index]\n\n possible_files = os.listdir(\"./Dict Data\")\n files_index = int(input(\n \"choose one number: \\n\" + \"\\n\".join([\"{} {}\".format(i, elm) for i, elm in enumerate(possible_files)]) + \"\\n\"))\n file_path_save_data = \"./Dict Data/\" + possible_files[files_index]\n\n return model, file_path_training_data, file_path_save_data, file_path_save_weights, file_path_read_weights, note_gen_epochs, duration_gen_epochs, train, load",
"def setup_test_config():\n json_path = data_path('example_moastro.json')\n print json_path\n assert os.path.exists(json_path) == True\n os.putenv('MOASTROCONFIG', json_path)",
"def initialize_config(self):\n\n def _logic(utterance: str) -> bool:\n \"\"\"\n Logic to be used by the logic-micromodel.\n \"\"\"\n return \"test\" in utterance.lower()\n\n configs = [\n {\n \"model_type\": \"svm\",\n \"name\": \"test_svm\",\n \"model_path\": os.path.join(self.model_path, \"test_svm\"),\n \"setup_args\": {\n \"training_data_path\": os.path.join(\n self.data_path, \"dog_vs_cat.json\"\n ),\n },\n },\n {\n \"model_type\": \"logic\",\n \"name\": \"test_logic\",\n \"model_path\": os.path.join(self.model_path, \"test_logic\"),\n \"setup_args\": {\"logic_func\": _logic},\n },\n {\n \"model_type\": \"bert_query\",\n \"name\": \"test_bert_query\",\n \"model_path\": os.path.join(self.model_path, \"test_bert_query\"),\n \"setup_args\": {\n \"threshold\": 0.8,\n \"seed\": [\n \"This is a test\",\n \"Arya is a hungry cat.\",\n ],\n \"infer_config\": {\n \"k\": 2,\n \"segment_config\": {\"window_size\": 5, \"step_size\": 3},\n },\n },\n },\n ]\n return configs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
descargar modelo buscar en el archivo '/src/model4download.csv' la ruta especificada de descarga del modelo segun el archivo de configuracion precargado
|
def downloadModel(self):
self.ui.downModel.setEnabled(0)
m4d = os.path.join(os.getcwd(), "src/models4download.csv")
flag = False
url = None
try:
# abrir modelos para descarga
with open(m4d, 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
if(self.ui.lineE9.text() in row):
print(row[1])
url = row[1]
flag = True
csvFile.close()
self.thread3.url = url # pasamos url al hilo 3
self.url = url # pasamos url a ventana principal
except Exception as ex:
print(ex)
flag = False
if not flag:
self.statusBar().showMessage("No se puede iniciar la descarga")
self.ui.downModel.setEnabled(1)
else:
try:
# lazamos thread para descargar el modelo
self.thread3.start()
except Exception as ex:
print(ex)
|
[
"def downloadModel(self):\n \"\"\" If user select multi row, only data from currentRow is downloaded and loaded into moose \"\"\"\n selectedRow = self.resultsPanel.currentRow()\n modelId = self.resultsPanel.item(selectedRow, 0).text()\n modelSBML = str(self.client.service.getModelSBMLById(modelId)).encode(\"utf-8\")\n self.filePath = os.path.join(config.settings[config.KEY_LOCAL_DEMOS_DIR], str(modelId)+'.xml')\n f = open(str(self.filePath), 'w')\n f.write(modelSBML)\n self.close()\n \n type_sbml = 'SBML'\n filters = {'SBML(*.xml)': type_sbml}\n filepath,filter_ = QtGui.QFileDialog.getSaveFileNameAndFilter(None,'Save File',modelId,';;'.join(filters))\n if filepath:\n if str(filepath).rfind('.') != -1:\n filepath = filepath[:str(filepath).rfind('.')]\n if str(filter_).rfind('.') != -1:\n extension = filter_[str(filter_).rfind('.'):len(filter_)-1]\n self.filePath = str(filepath+extension)\n\n if filters[str(filter_)] == 'SBML':\n f = open(str(self.filePath), 'w')\n f.write(modelSBML)\n f.close()",
"def cargar_modelo(nombre_modelo):\n modelo_read=Sequential()\n\n try:\n modelo_read.load_weights(nombre_modelo,by_name=True)\n return modelo_read\n except:\n return None",
"def downloading_source_csv(self, version):\n self.print_shell('Downloading url and filenames ... ')\n self.bucket.download_blob(bucket_name=BUCKET_NAME,\n source_blob_name='data_source/datos_fuentes_'+version+'.csv',\n destination_file_name=\"temp/datos_fuentes.csv\")\n self.handler = Data_Extractor(csv_urls=\"temp/datos_fuentes.csv\")\n self.print_shell('url and names downloaded')",
"def download_inf_cadastral(self):\n file_name = \"cad_fi.csv\"\n url = \"{}/{}\".format(URL_CADASTRAL_DIARIO, file_name)\n local_file = \"{}/{}\".format(CSV_FILES_DIR, file_name)\n\n if os.path.exists(local_file):\n log.debug(\"Arquivo cadastral '%s' ja existe localmente\", file_name)\n self.filename = local_file\n else:\n log.debug(\"Tentando baixar arquivo: %s\", url)\n res = download_file(url, local_file)\n if res.status_code == 404:\n log.debug(\"Arquivo nao encontrado no site da cvm\")\n msg(\n \"red\",\n \"Erro: Arquivo cadastral encontrado no site da CVM. {}\".format(url),\n 1,\n )\n elif res.status_code == 200:\n log.debug(\"Arquivo baixado com sucesso: %s\", file_name)\n self.filename = local_file",
"def download_model():\n # path = '/home/tomas/code/tomasaltilio/Food_Detective/ResNET_acc32'\n path = 'gs://food-models-le-wagon/ResNET_acc32/'\n model = models.load_model(path)\n return model",
"def cargarConfigs(self):\n try:\n\n self.dirModelConfig = os.path.join(OBJECTDETECTIONPATH, \"samples/configs/{}\".format(str(self.ui.lineE9.text())))\n\n print(\"Modelo NUEVO seleccionado: {}\".format(str(self.dirModelConfig)))\n\n file = open(self.dirModelConfig, 'r')\n with file:\n text = file.read()\n self.ui.textEdit1.setText(text)\n\n self.ui.downModel.setEnabled(1)\n self.ui.pBCancelDown.setEnabled(1)\n except Exception as ex:\n print(ex)",
"def model(model, directory):\n return pandas.read_csv(csv_path(directory, model))",
"def download_model(model_date, model_name):\n\n model_file = model_name + '.tar.gz'\n url = os.path.join('http://download.tensorflow.org/models/object_detection/tf2',\n model_date,\n model_file)\n\n # Download model\n urllib.request.urlretrieve(url, model_file)\n\n # Untar and clean\n tar = tarfile.open(model_file)\n tar.extractall()\n tar.close()\n os.remove(model_file)",
"def download_pojo(model,path=\"\", get_jar=True):\n java = H2OConnection.get( \"Models.java/\"+model.model_id )\n\n # HACK: munge model._id so that it conforms to Java class name. For example, change K-means to K_means.\n # TODO: clients should extract Java class name from header.\n regex = re.compile(\"[+\\\\-* !@#$%^&()={}\\\\[\\\\]|;:'\\\"<>,.?/]\")\n pojoname = regex.sub(\"_\",model.model_id)\n\n filepath = path + \"/\" + pojoname + \".java\"\n print(\"Filepath: {}\".format(filepath))\n if path == \"\": print(java.text)\n else:\n with open(filepath, 'wb') as f:\n f.write(java.text.encode(\"utf-8\"))\n if get_jar and path!=\"\":\n url = H2OConnection.make_url(\"h2o-genmodel.jar\")\n filename = path + \"/\" + \"h2o-genmodel.jar\"\n response = urlopen()(url)\n with open(filename, \"wb\") as f:\n f.write(response.read())",
"def _convert_demand(self):\n\n dic_TABLA_HABIL = list(reader_csv('', TABLA_HABIL, self._ose_dir))\n dic_tabla_no_habil = list(reader_csv('', TABLA_NO_HABIL, self._ose_dir))\n dic_tabla_duracion = list(reader_csv('', BLOCK_LENGTH, self._ose_dir))\n\n # TODO: Replace directory and file name below with correct one\n # If we integrate Ameba code we can import libraries with correct names\n \"\"\" READER SING\"\"\"\n dic_ind_1_SING = list(reader_csv(os.path.join(DIR_OSE_SING,DIR_OSE_DEM,DIR_OSE_IND), FILE_OSE_IND_1_SING, self._ose_dir))\n dic_ind_2_SING = list(reader_csv(os.path.join(DIR_OSE_SING,DIR_OSE_DEM,DIR_OSE_IND), FILE_OSE_IND_2_SING, self._ose_dir))\n\n dic_veg_1_SING = list(reader_csv(os.path.join(DIR_OSE_SING,DIR_OSE_DEM,DIR_OSE_VEG), FILE_OSE_VEG_1_SING, self._ose_dir))\n dic_veg_2_SING = list(reader_csv(os.path.join(DIR_OSE_SING,DIR_OSE_DEM,DIR_OSE_VEG), FILE_OSE_VEG_2_SING, self._ose_dir))\n dic_veg_3_SING = list(reader_csv(os.path.join(DIR_OSE_SING,DIR_OSE_DEM,DIR_OSE_VEG), FILE_OSE_VEG_3_SING, self._ose_dir))\n \"\"\" READER SIC\"\"\"\n if self._model in ['Ope','ope','OPE']:\n dic_ind_1_SIC = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DEM,DIR_OSE_IND), FILE_OSE_IND_OPE_1_SIC, self._ose_dir))\n dic_ind_2_SIC = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DEM,DIR_OSE_IND), FILE_OSE_IND_OPE_2_SIC, self._ose_dir))\n\n dic_veg_1_SIC = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DEM,DIR_OSE_VEG), FILE_OSE_VEG_OPE_1_SIC, self._ose_dir))\n dic_veg_2_SIC = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DEM,DIR_OSE_VEG), FILE_OSE_VEG_OPE_2_SIC, self._ose_dir))\n dic_veg_3_SIC = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DEM,DIR_OSE_VEG), FILE_OSE_VEG_OPE_3_SIC, self._ose_dir))\n else: # if self._model in ['Opt','opt','OPT']:\n dic_ind_1_SIC = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DEM,DIR_OSE_IND), FILE_OSE_IND_OPT_1_SIC, self._ose_dir))\n dic_ind_2_SIC = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DEM,DIR_OSE_IND), FILE_OSE_IND_OPT_2_SIC, self._ose_dir))\n\n dic_veg_1_SIC = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DEM,DIR_OSE_VEG), FILE_OSE_VEG_OPT_1_SIC, self._ose_dir))\n dic_veg_2_SIC = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DEM,DIR_OSE_VEG), FILE_OSE_VEG_OPT_2_SIC, self._ose_dir))\n dic_veg_3_SIC = list(reader_csv(os.path.join(DIR_OSE_SIC,DIR_OSE_DEM,DIR_OSE_VEG), FILE_OSE_VEG_OPT_3_SIC, self._ose_dir))\n\n \"\"\" \"\"\"\n\n \"\"\" Find the initial and last year of every file\"\"\"\n dic_ind_1_SING_max_year = self._get_max_year(dic_ind_1_SING,OSE_IND_YEAR)\n dic_ind_2_SING_max_year = self._get_max_year(dic_ind_2_SING,OSE_IND_YEAR)\n dic_veg_1_SING_max_year = self._get_max_year(dic_veg_1_SING,OSE_VEG_YEAR)\n dic_veg_2_SING_max_year = self._get_max_year(dic_veg_2_SING,OSE_VEG_YEAR)\n dic_veg_3_SING_max_year = self._get_max_year(dic_veg_3_SING,OSE_VEG_YEAR)\n\n dic_ind_1_SING_min_year = self._get_min_year(dic_ind_1_SING,OSE_IND_YEAR)\n dic_ind_2_SING_min_year = self._get_min_year(dic_ind_2_SING,OSE_IND_YEAR)\n dic_veg_1_SING_min_year = self._get_min_year(dic_veg_1_SING,OSE_VEG_YEAR)\n dic_veg_2_SING_min_year = self._get_min_year(dic_veg_2_SING,OSE_VEG_YEAR)\n dic_veg_3_SING_min_year = self._get_min_year(dic_veg_3_SING,OSE_VEG_YEAR)\n\n dic_ind_1_SIC_max_year = self._get_max_year(dic_ind_1_SIC,OSE_IND_YEAR)\n dic_ind_2_SIC_max_year = self._get_max_year(dic_ind_2_SIC,OSE_IND_YEAR)\n dic_veg_1_SIC_max_year = self._get_max_year(dic_veg_1_SIC,OSE_VEG_YEAR)\n dic_veg_2_SIC_max_year = self._get_max_year(dic_veg_2_SIC,OSE_VEG_YEAR)\n dic_veg_3_SIC_max_year = self._get_max_year(dic_veg_3_SIC,OSE_VEG_YEAR)\n\n dic_ind_1_SIC_min_year = self._get_min_year(dic_ind_1_SIC,OSE_IND_YEAR)\n dic_ind_2_SIC_min_year = self._get_min_year(dic_ind_2_SIC,OSE_IND_YEAR)\n dic_veg_1_SIC_min_year = self._get_min_year(dic_veg_1_SIC,OSE_VEG_YEAR)\n dic_veg_2_SIC_min_year = self._get_min_year(dic_veg_2_SIC,OSE_VEG_YEAR)\n dic_veg_3_SIC_min_year = self._get_min_year(dic_veg_3_SIC,OSE_VEG_YEAR)\n\n\n dem_factor_ind_SING = SearchDemandFactor(MAX_BLOCK, dic_ind_1_SING_min_year, dic_ind_1_SING_max_year, dic_ind_1_SING, [OSE_IND_YEAR, OSE_IND_BAR, OSE_IND_BLOCK], OSE_MONTHS_1)\n energy_ind_SING = SearchEnergy(dic_ind_2_SING_min_year,dic_ind_2_SING_max_year, dic_ind_2_SING, [OSE_IND_YEAR, OSE_IND_BAR], OSE_MONTHS_1)\n\n dem_factor_veg_SING = SearchDemandFactor(MAX_BLOCK, dic_veg_1_SING_min_year, dic_veg_1_SING_max_year, dic_veg_1_SING, [OSE_VEG_YEAR, OSE_VEG_BAR, OSE_VEG_BLOCK], OSE_MONTHS_1)\n energy_factor_veg_SING = SearchEnergy(dic_veg_2_SING_min_year,dic_veg_2_SING_max_year, dic_veg_2_SING, [OSE_VEG_YEAR, OSE_VEG_BAR], OSE_MONTHS_1)\n energy_veg_SING = SearchYearEnergy(dic_veg_3_SING_min_year, dic_veg_3_SING_max_year, dic_veg_3_SING, OSE_MONTHS_1)\n\n dem_factor_ind_SIC = SearchDemandFactor(MAX_BLOCK, dic_ind_1_SIC_min_year, dic_ind_1_SIC_max_year, dic_ind_1_SIC, [OSE_IND_YEAR, OSE_IND_BAR, OSE_IND_BLOCK], OSE_MONTHS_2)\n energy_ind_SIC = SearchEnergy(dic_ind_2_SIC_min_year,dic_ind_2_SIC_max_year, dic_ind_2_SIC, [OSE_IND_YEAR, OSE_IND_BAR], OSE_MONTHS_1)\n\n dem_factor_veg_SIC = SearchDemandFactor(MAX_BLOCK, dic_veg_1_SIC_min_year, dic_veg_1_SIC_max_year, dic_veg_1_SIC, [OSE_VEG_YEAR, OSE_VEG_BAR, OSE_VEG_BLOCK], OSE_MONTHS_2)\n energy_factor_veg_SIC = SearchEnergy(dic_veg_2_SIC_min_year,dic_veg_2_SIC_max_year, dic_veg_2_SIC, [OSE_VEG_YEAR, OSE_VEG_BAR], OSE_MONTHS_1)\n energy_veg_SIC = SearchYearEnergy(dic_veg_3_SIC_min_year, dic_veg_3_SIC_max_year, dic_veg_3_SIC, OSE_MONTHS_1)\n\n\n \"\"\" demand profile duration\"\"\"\n demand = self.__block_length_dates(int(self._year_ose), dic_tabla_duracion)\n\n \"\"\" STAGE & BLOCK GENERATOR\"\"\"\n block_distribution_year = self.__block_distribution(demand)\n block_distribution = []\n for years in range(int(self._year_ini),int(self._year_end)+1):\n for block in block_distribution_year:\n block_distribution.append(copy.deepcopy(block))\n\n block_distribution[-1].update({TIME_AMEBA : block_distribution[-1][TIME_AMEBA].replace(year=years)})\n delta = years-int(self._year_ini)\n block_distribution[-1].update({STAGE_AMEBA : int(block_distribution[-1][STAGE_AMEBA])+(12*delta)})\n\n \"\"\" CHECK IF DIRECTORY EXIST \"\"\"\n directory = os.path.join(self._ameba_dir,DIR_AMEBA_DEM)\n check_directory(directory)\n\n writer_block = writer_csv('block_distribution.csv', COLUMNS_BLOCK, os.path.join(self._ameba_dir,DIR_AMEBA_DEM))\n writer_block.writeheader()\n\n for block in block_distribution:\n block.update({TIME_AMEBA: self._date_time(block[TIME_AMEBA],block[TIME_AMEBA].year)})\n block.pop(SCENARIO_AMEBA)\n\n writer_block.writerow(block)\n\n \"\"\" SIC AND SING BAR LIST\"\"\"\n bar_ind_SING=[]\n for row in dic_ind_2_SING:\n if row[COLUMNS_OSE_IND_2[0]]==self._year_ini:\n bar_ind_SING.append({NAME_AMEBA:row[COLUMNS_OSE_IND_2[1]]})\n bar_veg_SING=[]\n for row in dic_veg_2_SING:\n if row[COLUMNS_OSE_VEG_2[0]]==self._year_ini:\n bar_veg_SING.append({NAME_AMEBA:row[COLUMNS_OSE_VEG_2[1]]})\n bar_ind_SIC=[]\n for row in dic_ind_2_SIC:\n if row[COLUMNS_OSE_IND_2[0]]==self._year_ini:\n bar_ind_SIC.append({NAME_AMEBA:row[COLUMNS_OSE_IND_2[1]]})\n bar_veg_SIC=[]\n for row in dic_veg_2_SIC:\n if row[COLUMNS_OSE_VEG_2[0]]==self._year_ini:\n bar_veg_SIC.append({NAME_AMEBA:row[COLUMNS_OSE_VEG_2[1]]})\n\n \"\"\" genera lista para todos los años\"\"\"\n dem=[]\n i=0\n for years in range(int(self._year_ini),int(self._year_end)+1):\n for element in demand:\n dem.append(copy.deepcopy(element))\n dem[i].update({ TIME_AMEBA:element.copy()[TIME_AMEBA].replace(year=years )})\n i+=1\n\n\n \"\"\" MAIN PART\"\"\"\n dec_num = 1\n\n for element in dem:\n year = int(element[TIME_AMEBA].year)\n block = int(element[BLOCK_AMEBA])\n month = MONTH_INDEX[int(element[STAGE_AMEBA])]\n\n if month > 8:\n year = year - 1\n\n year_ind1_sic = year\n year_ind2_sic = year\n year_ind1_sing = year\n year_ind2_sing = year\n year_veg1_sic = year\n year_veg2_sic = year\n year_veg3_sic = year\n year_veg1_sing = year\n year_veg2_sing = year\n year_veg3_sing = year\n\n if year_ind1_sing < dic_ind_1_SING_min_year:\n year_ind1_sing = dic_ind_1_SING_min_year\n if year_ind2_sing < dic_ind_2_SING_min_year:\n year_ind2_sing = dic_ind_2_SING_min_year\n\n if year_veg1_sing < dic_veg_1_SING_min_year:\n year_veg1_sing = dic_veg_1_SING_min_year\n if year_veg2_sing < dic_veg_2_SING_min_year:\n year_veg2_sing = dic_veg_2_SING_min_year\n if year_veg3_sing < dic_veg_3_SING_min_year:\n year_veg3_sing = dic_veg_3_SING_min_year\n\n if year_ind1_sic < dic_ind_1_SIC_min_year:\n year_ind1_sic = dic_ind_1_SIC_min_year\n if year_ind2_sic < dic_ind_2_SIC_min_year:\n year_ind2_sic = dic_ind_2_SIC_min_year\n\n if year_veg1_sic < dic_veg_1_SIC_min_year:\n year_veg1_sic = dic_veg_1_SIC_min_year\n if year_veg2_sic < dic_veg_2_SIC_min_year:\n year_veg2_sic = dic_veg_2_SIC_min_year\n if year_veg3_sic < dic_veg_3_SIC_min_year:\n year_veg3_sic = dic_veg_3_SIC_min_year\n\n for name_ind in bar_ind_SING:\n name = name_ind[NAME_AMEBA]\n value_dem_factor = float(dem_factor_ind_SING.get_demand_factor(year_ind1_sing, block, name)[month])\n value_ene = float(energy_ind_SING.get_energy(year_ind2_sing, name)[month])\n value = value_dem_factor*value_ene*1000/MONTH_HRS[month]\n\n element.update({str(remove(name_ind[NAME_AMEBA]))+'_ind': round(value,dec_num)})\n for name_veg in bar_veg_SING:\n name = name_veg[NAME_AMEBA]\n value_dem_factor = float(dem_factor_veg_SING.get_demand_factor(year_veg1_sing, block, name)[month])\n value_ene_factor = float(energy_factor_veg_SING.get_energy(year_veg2_sing, name)[month])\n value_ene = float(energy_veg_SING.get_energy(year_veg3_sing)[month])\n value = (value_dem_factor*value_ene_factor*1000/MONTH_HRS[month])*value_ene\n element.update({str(remove(name_veg['name']))+'_veg': round(value,dec_num)})\n\n for name_ind in bar_ind_SIC:\n name = name_ind[NAME_AMEBA]\n value_dem_factor = float(dem_factor_ind_SIC.get_demand_factor(year_ind1_sic, block, name)[month])\n value_ene = float(energy_ind_SIC.get_energy(year_ind2_sic, name)[month])\n value = value_dem_factor*value_ene*1000/MONTH_HRS[month]\n\n element.update({str(remove(name_ind[NAME_AMEBA]))+'_ind': round(value,dec_num)})\n for name_veg in bar_veg_SIC:\n name = name_veg[NAME_AMEBA]\n\n value_dem_factor = float(dem_factor_veg_SIC.get_demand_factor(year_veg1_sic, block, name)[month])\n value_ene_factor = float(energy_factor_veg_SIC.get_energy(year_veg2_sic, name)[month])\n value_ene = float(energy_veg_SIC.get_energy(year_veg3_sic)[month])\n\n value = (value_dem_factor*value_ene_factor*1000/MONTH_HRS[month])*value_ene\n element.update({str(remove(name_veg['name']))+'_veg': round(value,dec_num)})\n element.update({TIME_AMEBA: self._date_time(element[TIME_AMEBA],element[TIME_AMEBA].year)})\n\n columns=dem[0].keys()\n columns.insert(0, columns.pop(columns.index(TIME_AMEBA)))\n columns.insert(1, columns.pop(columns.index('scenario')))\n\n columns.pop(columns.index('block'))\n columns.pop(columns.index(STAGE_AMEBA))\n\n \"\"\" CHECK IF DIRECTORY EXIST \"\"\"\n directory = os.path.join(self._ameba_dir,DIR_AMEBA_DEM)\n check_directory(directory)\n\n writer = writer_csv(FILE_AMEBA, columns, os.path.join(self._ameba_dir,DIR_AMEBA_DEM))\n writer.writeheader()\n\n for element in dem:\n element.pop(BLOCK_AMEBA)\n element.pop(STAGE_AMEBA)\n writer.writerow(element)",
"def _download_model(self, lang_code, version):\n \n model_name = '{}-{}'.format(lang_code, version)\n model_path_cloud = os.path.join(self.cloud_path, '{}.zip'.format(model_name))\n model_path_local = os.path.join(self.disk_path, '{}.zip'.format(model_name))\n \n # Download and extract models for provided language. \n self._download_and_extract_lang_model(model_path_cloud, model_path_local) \n self.metadata.read(os.path.join(self.disk_path,lang_code+\"-\"+str(version),\"metadata.json\"))\n \n # Download Facebook embeddings based on the metadata read from the model\n self._download_embeddings(self.metadata.embeddings_remote_link, self.metadata.embeddings_file_name)\n sys.stdout.write(\"\\n\")",
"def download_model_files(args):\n\n log(args, f\"Downloading model files\")\n\n dstpath = pathlib.Path('tutorials', 'models')\n url = 'https://github.com/{repo}/raw/main/src/abcgan/models/{name}'\n\n download_files(args, url, persist.dir_path, dstpath)",
"def cancelDonwload(self):\n if self.thread3.isRunning():\n try:\n print(\"Hilo activado y listo para detener\")\n self.ui.downModel.setEnabled(1)\n self.ui.progressBar.setValue(0)\n\n modelsDir = str(os.path.join(os.getcwd(), \"models\")) # se guarda en carpeta models\n filename = os.path.join(modelsDir, os.path.basename(self.url))\n os.remove(filename)\n self.thread3.terminate()\n self.ui.downModel.setEnabled(1)\n\n except Exception as ex:\n print(ex)\n print('!error descargar modelo')\n else:\n print(\"Hilo inactivo\")",
"def maybe_download():\r\n\r\n print(\"Downloading Inception 5h Model ...\")\r\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)",
"def download_model(model_id, file_format=\"json\", save=True, path=\".\"):\n\n if save:\n response = requests.get(\"http://bigg.ucsd.edu/static/models/%s.%s\" % (model_id, file_format), stream=True)\n response.raise_for_status()\n with open(os.path.join(path, \"%s.%s\" % (model_id, file_format)), \"wb\") as model_file:\n for block in response.iter_content(1024):\n model_file.write(block)\n else:\n response = requests.get(\"http://bigg.ucsd.edu/static/models/%s.json\" % model_id, stream=True)\n response.raise_for_status()\n return model_from_dict(response.json())",
"def descargarArchivo(self):\r\n try:\r\n nom_arch=self.dicValoresCompleto[\"nom_arch\"]\r\n except Exception:\r\n mens=unicode(\"Error. El campo nom_arch no está entre los nombres de los campos\",\"utf-8\")\r\n return\r\n subDir=self.oUtiles.oUtilidades.uneSubDir(self.listaSubDirDescargas)\r\n nom_arch=self.oUtiles.dTrabajos + subDir + nom_arch\r\n \r\n ############\r\n #primero comprueba que el archivo no haya sido descargado ya,\r\n #en tal caso no hace falta que vuelva a descargarArchivose del servidor\r\n if str(self.ui.bttDescargar.text())==\"Ver archivo\":\r\n #el archivo ya se descargo y existe\r\n ext_arch=os.path.splitext(nom_arch)[1]\r\n if ext_arch==self.toUtf8(\".pdf\"):\r\n if self.oUtiles.lector_pdf==None:\r\n QtGui.QMessageBox.information(self,\"Problema\",\"No ha configurado el programa para leer archivos PDF.\" ,1)\r\n return\r\n else:\r\n programa=self.oUtiles.lector_pdf\r\n #programa=self.toUtf8(\"C:/Program Files (x86)/Adobe/Reader 9.0/Reader/AcroRd32.exe\")\r\n if sys.platform=='linux2':\r\n orden=[programa,nom_arch]\r\n else:\r\n programa=self.toUtf8(programa)\r\n programa=self.toUtf8(\"\\\"\") + programa + self.toUtf8(\"\\\"\")\r\n archivo=self.toUtf8(\"\\\"\") + nom_arch + self.toUtf8(\"\\\"\")\r\n orden=programa + self.toUtf8(\" \") + archivo\r\n\r\n try:\r\n \r\n subprocess.call(orden)\r\n except Exception,e:\r\n mens=self.toUtf8(\"No se pudo mostrar el archivo. Probablemente la ruta o el nombre debe tener acentos o eñes. Tambien es posible que no este correcta la ruta al programa que muestra los archivos PDF, en el fichero dirTrabajos.txt (\" + self.oUtiles.lector_pdf + \"). Debe abrilo ud. manualmente\")\r\n QtGui.QMessageBox.information(self,\"Problema\",mens ,1)\r\n else:\r\n dlg=ctrMuestraImg(self.oUtiles)\r\n dlg.muestraImagen(nom_arch)\r\n dlg.muestraValores(self.dicMostrar)\r\n dlg.exec_()\r\n return \r\n\r\n #############\r\n\r\n\r\n# #primero comprueba que el archivo no haya sido descargado ya,\r\n# #en tal caso no hace falta que vuelva a descargarArchivose del servidor\r\n# if str(self.ui.bttDescargar.text())==\"Ver archivo\":\r\n# #el archivo ya se descargo y existe\r\n# ext_arch=os.path.splitext(nom_arch)[1]\r\n# if ext_arch==self.toUtf8(\".pdf\"):\r\n# if self.oUtiles.lector_pdf==None:\r\n# QtGui.QMessageBox.information(self,\"Problema\",\"No ha configurado el programa para leer archivos PDF.\" ,1)\r\n# return\r\n# else:\r\n# programa=self.oUtiles.lector_pdf\r\n# #programa=self.toUtf8(\"C:/Program Files (x86)/Adobe/Reader 9.0/Reader/AcroRd32.exe\")\r\n# programa=self.toUtf8(programa)\r\n# programa=self.toUtf8(\"\\\"\") + programa + self.toUtf8(\"\\\"\")\r\n# archivo=self.toUtf8(\"\\\"\") + nom_arch + self.toUtf8(\"\\\"\")\r\n# orden=programa + self.toUtf8(\" \") + archivo\r\n# try:\r\n# subprocess.call(orden)\r\n# except Exception,e:\r\n# mens=self.toUtf8(\"No se pudo mostrar el archivo. Probablemente la ruta o el nombre debe tener acentos o eñes. Tambien es posible que no este correcta la ruta al programa que muestra los archivos PDF, en el fichero dirTrabajos.txt (\" + self.oUtiles.lector_pdf + \"). Debe abrilo ud. manualmente\")\r\n# QtGui.QMessageBox.information(self,\"Problema\",mens ,1)\r\n# else:\r\n# dlg=ctrMuestraImg(self.oUtiles)\r\n# dlg.muestraImagen(nom_arch)\r\n# dlg.muestraValores(self.dicMostrar)\r\n# dlg.exec_()\r\n# return \r\n# \r\n if os.path.exists(nom_arch):\r\n mens=\"El archivo ya estaba descargado en: \" + nom_arch\r\n self.ui.lbEstado.setText(mens)\r\n self.ui.bttDescargar.setText(\"Ver archivo\")\r\n return\r\n \r\n #la imagen no habia sido descargada\r\n #compruebo que los directorios existen y si no los creo\r\n rr=self.oUtiles.oUtilidades.creaDir(self.oUtiles.dTrabajos,self.listaSubDirDescargas,True)#devuelve Exception si no va bien\r\n if isinstance(rr,Exception):\r\n self.ui.lbEstado.setText(rr.message)\r\n return#no hace falta dar mensajes, ya se ha avisado de lo\r\n #que pasa en la funcion creaDirImagenes\r\n \r\n self.ui.lbEstado.setText(\"Recuperando de la base de datos. Espere ...\")\r\n lvCondWhere=[]\r\n lCamposCondWhere=[]\r\n idd=self.dicValoresCompleto.get(\"id\")\r\n if idd !=None:\r\n lvCondWhere.append(idd)\r\n lCamposCondWhere.append(\"id\")\r\n gid=self.dicValoresCompleto.get(\"gid\")\r\n if gid !=None:\r\n lvCondWhere.append(gid)\r\n lCamposCondWhere.append(\"gid\")\r\n lvCondWhere.append(self.oUtiles.id_trabajo)\r\n lCamposCondWhere.append(\"id_trabajo\")\r\n condWhere=self.oUtiles.oConsultasPg.oGeneraExpresionesPsycopg2.generaWhere(lCamposCondWhere, \"and\")\r\n resp=self.oUtiles.oArchivos.descargaYgrabaArchivo(self.oUtiles.oConsultasPg,self.nomTabla,\"archivo\",condWhere,lvCondWhere,nom_arch)\r\n if isinstance(resp, Exception):\r\n mens=resp.message\r\n else:\r\n mens=\"Archivo descargado en: \" + nom_arch\r\n self.ui.bttDescargar.setText(\"Ver archivo\")\r\n\r\n self.ui.lbEstado.setText(mens)",
"def csv_file_download_with_stream():\n idPARSING_DSF = int(request.args.get('pdsf_id', 0))\n if idPARSING_DSF != 0:\n pdsf = services.estimator.pdsf_file_info(idPARSING_DSF)\n else:\n return redirect(\"/my_task\")\n\n filename = pdsf[\"ParsingFile\"]\n fname = filename.split(\"/\")[-1]\n temp_df = pd.read_csv(filename, encoding='utf-8')\n\n # 그 결과를 앞서 만든 IO stream에 저장\n output_stream = StringIO()\n\n temp_df.to_csv(output_stream, index=False, encoding='utf-8')\n response = Response(\n output_stream.getvalue(),\n mimetype='text/csv; charset=utf-8',\n content_type='application/octet-stream',\n )\n\n response.headers[\"Content-Disposition\"] = f\"attachment; filename={fname}\".encode('utf-8')\n\n return response",
"def generer_csv_aleatoires(self, path):\n file = open(path + \"/voeux.\"+self.nom, \"w\")\n\n fieldnames = [\"num\"] + [\"oblig\"+str(i) for i in range(1,self.optimizer.Parameters.nbMaxUEObligatoires + 1)] + [\"cons\"+str(i) for i in range(1,self.optimizer.Parameters.nbMaxUEConseillees + 1)]\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n effectif = random.randint(self.effectifMin, self.effectifMax)\n # self.set_effectif(effectif)\n # print(self.Proportions)\n\n s = np.random.multinomial(effectif, self.Proportions, size=1)[0]\n\n s = [v for i in range(len(s)) for v in [i+1]*s[i]]\n random.shuffle(s)\n\n id_rel = 0\n for i in range(len(s)):\n current_nb_ue = s[i]\n if current_nb_ue != 0:\n id_rel += 1\n L_Oblig, L_Cons = self.constituer_voeu(current_nb_ue)\n csvLine = dict()\n csvLine[\"num\"] = id_rel\n for o in range(len(L_Oblig)):\n csvLine[\"oblig\"+str(o+1)] = L_Oblig[o]\n for c in range(len(L_Cons)):\n csvLine[\"cons\"+str(c+1)] = L_Cons[c]\n writer.writerow(csvLine)\n\n\n file.close()",
"def act_func_load_model(self):\n # Open QFileDialog\n dialog = QtWidgets.QFileDialog(caption=\"Load model\")\n dialog.setFileMode(QtWidgets.QFileDialog.ExistingFile)\n dialog.setNameFilter(\"Sparse model files (*.pp *.txt)\")\n if not dialog.exec_():\n return\n filename = dialog.selectedFiles()\n self.process.sparse_model_file = filename\n\n # Set mode\n self.build_model_mode = False\n self.mode_qlabel.setText(\"Mode: Use existing model\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
cancelar descarga del modelo seleccionado en la carpeta ~/models
|
def cancelDonwload(self):
if self.thread3.isRunning():
try:
print("Hilo activado y listo para detener")
self.ui.downModel.setEnabled(1)
self.ui.progressBar.setValue(0)
modelsDir = str(os.path.join(os.getcwd(), "models")) # se guarda en carpeta models
filename = os.path.join(modelsDir, os.path.basename(self.url))
os.remove(filename)
self.thread3.terminate()
self.ui.downModel.setEnabled(1)
except Exception as ex:
print(ex)
print('!error descargar modelo')
else:
print("Hilo inactivo")
|
[
"def cancel(self):\n url = self._path.format(self.custom_model_id, self.custom_model_version_id)\n self._client.delete(url)",
"def test_model_select(self, modelo):\n self.assertNotEqual(modelo, '', note=\"El modelo no debe estar vacío.\")",
"def reset_context():\n global _model\n _model = None",
"def unload_model(model_name):\n del backend_globals.loaded_model[model_name]",
"def del_model( modelName ): # FIXME: Freezes Python, DO NOT USE!\n # delete_model : gazebo_msgs/DeleteModel\n del_model_prox = rospy.ServiceProxy('gazebo/delete_model', DeleteModel) # model spawner\n # rospy.wait_for_service('gazebo/delete_model') # Wait for the model loader to be ready \n # FREEZES EITHER WAY\n ref=del_model_prox(modelName) # Remove from Gazebo",
"def act_func_load_model(self):\n # Open QFileDialog\n dialog = QtWidgets.QFileDialog(caption=\"Load model\")\n dialog.setFileMode(QtWidgets.QFileDialog.ExistingFile)\n dialog.setNameFilter(\"Sparse model files (*.pp *.txt)\")\n if not dialog.exec_():\n return\n filename = dialog.selectedFiles()\n self.process.sparse_model_file = filename\n\n # Set mode\n self.build_model_mode = False\n self.mode_qlabel.setText(\"Mode: Use existing model\")",
"def unload_model(self, model_name):\n raise_error(\"Not implemented yet\")",
"def load_model(self):\n filename = filedialog.askopenfilename()\n if filename:\n self.model_path = filename\n self.reload()",
"def on_clear_clicked(self, obj):\n self.book_model.clear()\n self.book.clear()",
"def delete_model(self, request, obj):\n # handle something here\n obj.delete()",
"def remove_selected(self):\n\n if not self.selected:\n required_field_empty_warning(self, \"Select item for removal.\")\n\n # on (row, 0) placed entity ID\n model_id = int(self.table_widget.item(self.selected[0], 0).text())\n\n if not DeleteDialog(\n \"item with ID = {0}\".format(model_id), self.model.__tablename__\n ).exec_() == QDialog.Accepted:\n return\n\n session = db.get_session()\n session.query(self.model).filter(self.model.id == model_id).delete()\n session.commit()\n self.show_table(self.model)",
"def _select_model(self):\n available_models = self.params[\"task\"][self.task]\n models_list = list(available_models.keys())\n models_list.append(\"Return to task selection\")\n \n # prompt CLI models options\n terminal_menu = TerminalMenu(models_list, preview_command=self._preview_model, \n preview_size=0.75)\n menu_entry_index = terminal_menu.show()\n\n self.model = models_list[menu_entry_index]\n self.processor = self.params[\"task\"][self.task][self.model][\"model_processor\"]\n self.live_runner = self.params[\"task\"][self.task][self.model][\"live_runner\"]\n # go back and select another task, model, etc - prompt user-input\n if self.model == \"Return to task selection\":\n self.user_input()",
"def test_delete_model_by_name(self):\n pass",
"def cmd_new_scene(self, **kwargs):\n self.canvas.delete(\"all\")\n self.models = list()",
"def dlg_open_file(self, **kwargs):\n path = askopenfilename(filetypes=((\"Obj Model\", \"*.obj\"),\n (\"All files\", \"*.*\")))\n self.cmd_load_model(path)",
"def action_cancel(self):\n context = self._context or {}\n for inv_brw in self.browse():\n if not inv_brw.wh_muni_id:\n super(AccountInvoice, self).action_cancel()\n else:\n raise exceptions.except_orm(\n _(\"Error!\"),\n _(\"No puede cancelar una factura que no tiene\"\n \"Documento de retención municipal. Primero se debe cancelar la\"\n \"factura el documento de retención municipal y luego puedes\"\n \"cancelar esta factura.\"))\n return True",
"def desactiver(self):\n self.est_activee = False",
"def reset_objects(self):\n self.objects.choices = []",
"def test_missing_model(self):\n del self.data['model']\n self.assertInvalid()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
permite verificar si el archivo de configuracion esta cargado
|
def _checkModelConfig(self):
if (self.modelConfig.__eq__('')):
print('Debe cargar primero el archivo de configuración')
self.statusBar().showMessage('Debe cargar primero el archivo de configuración')
return False
else:
return True #true porque no esta vacio
|
[
"def check_configuration(file):\n return os.path.isfile(file)",
"def _check_file(self):\n if not os.path.exists(self.file_path):\n return False\n self._migrate()\n config = configparser.RawConfigParser()\n config.read(self.file_path)\n try:\n config.get(\n escape_for_ini('keyring-setting'),\n escape_for_ini('password reference'),\n )\n except (configparser.NoSectionError, configparser.NoOptionError):\n return False\n return True",
"def settings_exist(self): \n path = os.getcwd()\n print(path)\n if os._exists(os.path.join(path, 'pomgr.settings.json')):\n return True\n else:\n return False",
"def check_settings(self):\n #check file existence\n if not os.path.isfile(self.settings()):\n print('Missing file : %s was not retrieved.' % self.settings())\n sys.exit(1)\n\n check_settings_file(self)",
"def check_config(name):\n return os.path.isfile(JUMBODIR + name + '/jumbo_config')",
"def _check_file(self):\n if not os.path.exists(self.file_path):\n return False\n self._migrate()\n config = configparser.RawConfigParser()\n config.read(self.file_path)\n try:\n config.get(\n escape_for_ini('keyring-setting'), escape_for_ini('password reference')\n )\n except (configparser.NoSectionError, configparser.NoOptionError):\n return False\n try:\n self._check_scheme(config)\n except AttributeError:\n # accept a missing scheme\n return True\n return self._check_version(config)",
"def have_configs(self):\n self.assert_(self.config.has_option('PDC','passwd'))\n self.assert_(self.config.has_option('PDC','principal'))\n self.assert_(self.config.has_option('PDC','host'))\n self.assert_(self.config.has_option('PDC','smrl2_dir'))\n self.assertTrue(self.config.has_option('PDC','user'))",
"def verifyConfiguration(self):",
"def is_config(filename):\n filename = os.path.basename(filename)\n if filename in [\"server_config\"]:\n return True\n return False",
"def check_config( self ) :\n\n self._logger.info( 'exercising execution engine...' )\n\n return True",
"def check_config(self):\n check_paper, check_name, check_source, check_folder = self.what_to_check()\n\n if check_paper:\n #does the papers dict exist?\n if not 'papers' in self.config:\n print 'please ensure that your settings.conf has the variable \"papers\"'\n return False\n\n #is papers dict emty?\n elif self.config['papers'] is None:\n print 'please ensure that your settings.conf has no empty variables'\n return False\n\n #does the paper from the argument exist in the papers dict?\n elif self.args.paper not in self.config['papers'].keys() \\\n and self.args.paper[:-5] not in self.config['papers'].keys():\n print 'The paper you want to track does not exist in the settings.conf file'\n return False\n\n #special case: if no paper is specified but only one is in the config file - track it.\n elif len(self.config['papers']) > 1:\n print 'Please specify the paper you are working on by either having only one entry' \\\n ' in the papers variable or using an argument'\n return False\n\n #check only if not overwritten in command line\n if check_name:\n #does the name variable exist in config?\n if not 'name' in self.config:\n print 'please ensure that your settings.conf has the variable \"name\"'\n return False\n\n #is the name variable empty?\n elif self.config['name'] is None:\n print 'please ensure that your settings.conf has no empty variables'\n return False\n\n #check only if not overwritten in command line\n if check_folder:\n #does the variable exist?\n if not 'folder_name' in self.config:\n print 'please ensure that your settings.conf has the variable \"folder_name\"'\n return False\n\n #is the variable empty?\n elif self.config['folder_name'] is None:\n print 'please ensure that your settings.conf has no empty variables'\n return False\n\n self.set_settings(check_paper, check_name, check_source, check_folder)\n\n #the following can only get checked with existent settings - otherwise too much redundancy\n\n #does the source folder exist?\n if not os.path.exists(self.settings[\"source\"]):\n print 'Your source folder seems to be nonexistent'\n return False\n\n #does the document exist?\n elif not os.path.exists(os.path.join(self.settings[\"source\"], self.settings[\"paper\"] + '.docx')) \\\n and not os.path.exists(os.path.join(self.settings[\"source\"], self.settings[\"paper\"])):\n print 'Please ensure that a .docx with your specified name exists'\n return False\n\n #print os.path.join(self.settings[\"source\"], self.settings[\"paper\"] + '.docx')\n\n return True",
"def cfg_avail(self) -> None:\n if os.path.isfile(self.cfg_path):\n LOGGER.info(\"Configuration file available\")\n else:\n raise ConfigFileNotFoundError()",
"def test_getConfig_but_there_is_no_config_file(self):\n if os.path.isfile(config_address):\n os.remove(config_address)\n self.assertEqual(getConfig()['general'], defaults)",
"def __check_config_and_continue(self):\n archive_type = self.config['Archive']['archive_type']\n # Prompt for original db file for EF or AA\n if archive_type in [\"EF\", \"AA\"]:\n if not (self.config.has_option('Archive', 'original_db_file')) or \\\n self.config['Archive']['original_db_file'] == \"\":\n path_to_original_db = input(\">> Full path to the original database file \"\n \"(this will be copied into the working path and loaded into MySQL):\\n\")\n self.config['Archive']['original_db_file'] = path_to_original_db\n return True\n else:\n return False",
"def test_getConfig_when_there_is_a_config_file(self):\n generateConfig()\n self.assertEqual(getConfig()['general'], defaults)",
"def test_configuration_loaded(self):\n dripconfig.load()\n assert 'doinhdch' in dripconfig.configuration",
"def test_read_config(self):\n config = _read_config({'store_config': True,\n 'fp': os.getcwd()})\n self.assertEqual(len(config), 5)",
"def test_settingsExamplePresent(self):\n self.assertTrue(\n os.path.isfile(settings_file),\n '{} is missing!'.format(settings_file)\n )",
"def checkforparamsfile():\n if os.path.isfile(FILENAME) and os.stat(FILENAME).st_size > 0:\n openparams()\n return\n else:\n saveparams()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
exportar grafico computacional para poder usarlo con nuestros datos, se le pueden pasar como parametros el numero de entrenamiento especifico o por default tomar el ultimo guardado correctamente Para exportar el grafico computacional despues del entrenamiento es necesario correr el siguiente codigo From tensorflow/models/research/ python3 export_inference_graph.py \ input_type image_tensor \ pipeline_config_path /home/gustavo/Documentos/LKE_framework/projects/mesas/training/ssd_mobilenet_v1_pets.config \ trained_checkpoint_prefix /home/gustavo/Documentos/LKE_framework/projects/mesas/training/model.ckpt2898 \ output_directory /home/gustavo/Documentos/LKE_framework/projects/mesas/training/mesas_graph
|
def exportGraph(self):
try:
if(self._checkModelConfig()):
num_check = ''
b = 0 #si b = 1 los datos son validos y se puede exportar
#cambiar el checkpoint segun sea el radioButton
if(self.ui.rb_lastCheck.isChecked()):
num_check = self._findlastcheckp() #buscar el ultimo
if num_check.__eq__(-1): # regressa -1 si da error al buscar
b = 0
else:
print('checkpoint = {}'.format(num_check))
b = 1
if(self.ui.rb_manualCheck.isChecked()):
if(str(self.ui.lineE_checkpoint.text()) == ''):
self.statusBar().showMessage('escribe un numero valido')
print('escribe un numero valido')
b = 0
else:
num_check = self.ui.lineE_checkpoint.text()
print('checkpoint = {}'.format(num_check))
# metodo buscar si el numero de checkpoint es valido
# si no es valido informamos, si lo es bandera = 1
b = self._validarCheckpoint(num_check)
arg1 = 'python'
arg2 = 'export_inference_graph.py'
arg3 = '--input_type image_tensor'
arg4 = '--pipeline_config_path {}/projects/{}/training/{}'.format(os.getcwd(),
self.nameProject, self.modelConfig)
#si la bandera == 1 entonces corremos el comando
if(b.__eq__(1)):
self.exportfiles = '{}_graph_{}'.format(self.nameProject, num_check)
arg5 = '--trained_checkpoint_prefix {}/projects/{}/training/model.ckpt-{}'.format(
os.getcwd(), self.nameProject, num_check)
arg6 = '--output_directory {}/projects/{}/training/{}'.format(
os.getcwd(), self.nameProject, self.exportfiles)
path = os.path.join(os.getcwd(), 'projects/{}/training/{}'.format(
self.nameProject, self.exportfiles))
command = arg1 + ' ' + OBJECTDETECTIONPATH + '/' + arg2 + ' ' + arg3 + ' ' + arg4 + ' ' + arg5 + ' ' + arg6
self.statusBar().showMessage('Checkpoint valido')
self._exportar(path, command)
else:
print('no se puede iniciar')
self.statusBar().showMessage('Error: Intente un checkpoint valido')
except Exception as ex:
print(ex)
self.statusBar().showMessage('error al exportar')
|
[
"def export_frozen_inference_graph(\n checkpoint_path, pipeline_config_path, output_dir\n):\n # Import here because they are sooooo slow\n sys.path.append(etac.TF_OBJECT_DETECTION_DIR)\n from google.protobuf import text_format\n from object_detection import exporter # pylint: disable=import-error\n\n # pylint: disable=import-error\n from object_detection.protos import pipeline_pb2\n\n # Load pipeline config\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n with tf1.gfile.GFile(pipeline_config_path, \"r\") as f:\n text_format.Merge(f.read(), pipeline_config)\n\n # Export inference graph\n exporter.export_inference_graph(\n \"image_tensor\",\n pipeline_config,\n checkpoint_path,\n output_dir,\n input_shape=None,\n )",
"def protobuf_from_checkpoint(model_name, checkpoint_location, export_location):\n\n im_input = tf.placeholder(shape=(None, 299, 299, 3), dtype=tf.float32, name=\"Model_Input\")\n\n if model_name == \"InceptionV3\":\n from architectures.inception_v3 import inception_v3, inception_v3_arg_scope\n model = inception_v3\n model_scope = inception_v3_arg_scope\n output_node_names = [\"Model_Input\", \"Model_Output\"]\n\n\n elif model_name == \"InceptionResnetV2\":\n from architectures.inseption_resnet_v2 import inception_resnet_v2, inception_resnet_v2_arg_scope\n model = inception_resnet_v2\n model_scope = inception_resnet_v2_arg_scope\n output_node_names = [\"Model_Input\", \"InceptionResnetV2/Logits/Dropout/Identity\"]\n\n\n else:\n raise NotImplemented(\"The only supported models are [\\\"InceptionV3\\\", \\\"InceptionResnetV2\\\"]\")\n\n\n\n slim = tf.contrib.slim\n\n with tf.Session() as sess:\n with slim.arg_scope(model_scope()):\n if model_name == \"InceptionV3\":\n logits, terminals = model(im_input, is_training=False, create_aux_logits=True, num_classes=1001)\n output = tf.squeeze(terminals['AvgPool_1a'], axis=[1, 2], name='Model_Output')\n elif model_name == \"InceptionResnetV2\":\n logits, terminals = model(im_input, is_training=False, create_aux_logits=True)\n\n saver = tf.train.Saver()\n\n # Restore variables from disk.\n saver.restore(sess, checkpoint_location)\n sess.graph.as_default()\n print(\"Model restored.\")\n\n # Write graph to tensorboard\n writer = tf.summary.FileWriter(\"./tf_summary\", graph=sess.graph)\n\n output_graph_def = tf.graph_util.convert_variables_to_constants(\n sess, # The session is used to retrieve the weights\n tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes\n output_node_names # The output node names are used to select the usefull nodes\n )\n\n with tf.gfile.GFile(export_location, \"wb\") as f:\n f.write(output_graph_def.SerializeToString())",
"def Network_gen(inputs, topo, outputs, name, input_size, output_size):\n\n # Creates a new file\n topo_done = open(\"%s.vhd\" %name, \"w+\")\n\n topo_string = 'import pandas as pd \\nimport math \\n \\ndef main(): \\n'\n \n in_size = input_size\n out_size = output_size\n\n input_list = []\n for i in range(1, inputs):\n # Gero tambem uma lista com os nomes das entradas\n # Gero a primeira camada da minha rede, a camada de inputs\n inputs_list.append('input_%s' %i)\n topo_string = topo_string + '\\n'\n\n topo_string = topo_string + ');'\n \n\n \n for layer in range(len(topo)):\n # Gero cada camada da topologia\n layer_nodes = topo[layer]\n\n for node in range(layer_nodes):\n topo_string = topo_string + ''",
"def _process_graph(graph: tf.Graph) -> List[str]:\n all_nodes = [x.name for x in graph.as_graph_def().node]\n print(\"############\")\n print(all_nodes)\n nodes = [x for x in all_nodes if x in POSSIBLE_OUTPUT_NODES | MODEL_CONSTANTS]\n print(\"List of nodes to export for brain TODO(oleguer put name here)\")\n print(\"############\")\n print(nodes)\n print(\"############\")\n for n in nodes:\n print(\"\\t\" + n)\n return nodes",
"def WriteInferenceGraph(self):\n inference_graph_dir = os.path.join(FLAGS.logdir, 'inference_graphs')\n tf.gfile.MakeDirs(inference_graph_dir)\n tf.logging.info('Writing inference graphs to dir: %s', inference_graph_dir)\n\n cfg = self.model_registry.GetParams(self._model_name, 'Test')\n if (issubclass(cfg.cls, base_model.MultiTaskModel) and\n not FLAGS.model_task_name):\n tf.logging.info('Cannot write inference graphs for multi-task model '\n 'when model_task_name is not specified.')\n return\n try:\n filename_prefix = 'inference'\n if FLAGS.model_task_name:\n filename_prefix = '%s_inference' % FLAGS.model_task_name\n filename_prefix = os.path.join(inference_graph_dir, filename_prefix)\n # Standard inference graph.\n self.inference_graph_exporter.InferenceGraphExporter.Export(\n model_cfg=cfg,\n model_task_name=FLAGS.model_task_name,\n export_path=filename_prefix + '.pbtxt')\n except NotImplementedError as e:\n tf.logging.error('Cannot write inference graph: %s', e)\n\n # TPU inference graph. Not all models support it so fail silently.\n try:\n self.inference_graph_exporter.InferenceGraphExporter.Export(\n model_cfg=cfg,\n model_task_name=FLAGS.model_task_name,\n device_options=self.inference_graph_exporter.InferenceDeviceOptions(\n device='tpu',\n retain_device_placement=False,\n var_options='ON_DEVICE',\n gen_init_op=True,\n dtype_override=None),\n export_path=filename_prefix + '_tpu.pbtxt')\n except Exception as e: # pylint: disable=broad-except\n tf.logging.info('Error exporting TPU inference graph: %s' % e)",
"def export_protobuf_graph(protobuf_model_path, export_location):\n with tf.Session() as persisted_sess:\n print(\"load graph\")\n with tf.gfile.GFile(protobuf_model_path,'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n persisted_sess.graph.as_default()\n tf.import_graph_def(graph_def, name='')\n writer = tf.summary.FileWriter(export_location, graph=persisted_sess.graph)",
"def save_paddle_inference_model(\n self,\n executor,\n scope,\n program,\n feeded_vars,\n target_vars,\n output_path,\n day,\n pass_id,\n hadoop_fs_name,\n hadoop_fs_ugi,\n hadoop_home=\"$HADOOP_HOME\",\n save_combine=True,\n ):\n day = str(day)\n pass_id = str(pass_id)\n model_name = \"inference_model\"\n # pull dense before save\n self.pull_all_dense_params(scope, program)\n if fleet.worker_index() == 0:\n with fluid.scope_guard(scope):\n if save_combine:\n paddle.static.io.save_inference_model(\n model_name,\n feeded_vars,\n target_vars,\n executor,\n program=program.clone(),\n )\n else:\n paddle.static.io.save_inference_model(\n model_name,\n feeded_vars,\n target_vars,\n executor,\n program=program.clone(),\n )\n\n configs = {\n \"fs.default.name\": hadoop_fs_name,\n \"hadoop.job.ugi\": hadoop_fs_ugi,\n }\n client = HDFSClient(hadoop_home, configs)\n\n if pass_id == \"-1\":\n dest = f\"{output_path}/{day}/base/dnn_plugin/\"\n else:\n dest = \"{}/{}/delta-{}/dnn_plugin/\".format(\n output_path,\n day,\n pass_id,\n )\n if not client.is_exist(dest):\n client.makedirs(dest)\n\n client.upload(model_name, dest, multi_processes=5, overwrite=True)\n\n fleet._role_maker._barrier_worker()",
"def main(hidden, width, epochs, tb, name):\r\n \r\n #Load training and validation data\r\n trainIn=np.loadtxt(\"fullTrainInput.txt\",delimiter=\"\\t\",skiprows=1)\r\n trainOut=np.loadtxt(\"fullTrainOutput.txt\",delimiter=\"\\t\",skiprows=1)\r\n valIn=np.loadtxt(\"fullValidateInput.txt\",delimiter=\"\\t\",skiprows=0)\r\n valOut=np.loadtxt(\"fullValidateOutput.txt\",delimiter=\"\\t\",skiprows=0)\r\n\r\n\r\n #Normalize the training and output data and collect the values used to do so\r\n normInfo, data = normalizeData(trainIn, trainOut, valIn, valOut) \r\n \r\n graph1=tf.Graph()\r\n with graph1.as_default():\r\n \r\n #Create the iterators used for training and validation\r\n #Path for tensorboard to save data\r\n if(tb is not None):\r\n STORE_PATH = os.path.join(os.getcwd(),tb)\r\n \r\n #hyper paramaters\r\n batch_size=128 \r\n learning_rate=0.001\r\n \r\n #dropout paramaters\r\n dropoutPercent=0.0\r\n rate=tf.placeholder(dtype=tf.float32, shape=(), name=\"rate\")\r\n \r\n #size of data\r\n train_size=len(trainIn[:,1])\r\n val_size=len(valIn[:,1])\r\n data_size=train_size+val_size\r\n \r\n #setup data pipelines\r\n (x_input, y_output, handle, training_iterator, validation_iterator) = build_input_pipeline(\r\n data, batch_size)\r\n\r\n #Create the neural network\r\n neural_net, logits = createNeuralNet(width, hidden, x_input, rate)\r\n\r\n #Print a network summary\r\n neural_net.summary()\r\n\r\n #Create the percent difference metric\r\n percentErr = percentError(normInfo[0][0], normInfo[0][1], y_output, logits)\r\n\r\n #Create the loss function and optimizer\r\n loss, train_op = setupOptimization(normInfo[0][0], normInfo[0][1], learning_rate, y_output, logits)\r\n\r\n\r\n init_op= tf.group(tf.global_variables_initializer(),\r\n tf.local_variables_initializer())\r\n\r\n #merge outputs for tensorboard\r\n if(tb is not None):\r\n merged = tf.summary.merge_all()\r\n\r\n with tf.Session(graph=graph1) as sess:\r\n if(tb is not None):\r\n writer = tf.summary.FileWriter(STORE_PATH, sess.graph) #Tensorboard writer\r\n \r\n sess.run(init_op)\r\n\r\n train_handle = sess.run(training_iterator.string_handle())\r\n validate_handle = sess.run(validation_iterator.string_handle())\r\n \r\n steps=ceil(train_size/batch_size) #Number of batches to get through all the data\r\n\r\n for j in range(epochs):\r\n averageLoss=0\r\n averageError=0\r\n\r\n #Run the training cycle\r\n for i in range(steps):\r\n loss_value, error_value, _ = sess.run([loss, percentErr, train_op],\r\n feed_dict={handle: train_handle, rate: dropoutPercent})\r\n\r\n averageLoss+=loss_value\r\n averageError+=error_value\r\n\r\n print(\"Epoch: {:>3d} Training loss: {:.5f} Training Error: {:.3f}\".format(\r\n j+1, averageLoss/steps, averageError/steps))\r\n\r\n #Run the validation cycle\r\n valid_iters=1 #Numer of runs through the validation data. Note:\r\n #adjusting this value will scale the output to \r\n #Tensorboard by the same amount\r\n\r\n averageLoss=0\r\n averageError=0\r\n if(tb is not None): #when writing to tensorboard\r\n for i in range(valid_iters):\r\n loss_value, error_value, summary = sess.run([loss, percentErr, merged],\r\n feed_dict={handle: validate_handle, rate: 0.0})\r\n averageLoss+=loss_value\r\n averageError+=error_value\r\n\r\n writer.add_summary(summary, j+1)\r\n else: #when not writing to tensorboard\r\n for i in range(valid_iters):\r\n loss_value, error_value = sess.run([loss, percentErr],\r\n feed_dict={handle: validate_handle, rate: 0.0})\r\n averageLoss+=loss_value\r\n averageError+=error_value\r\n \r\n print(\"Validation loss: {:.5f} Validation Percent Error: {:.3f} Iterations: {}\".format(\r\n averageLoss/valid_iters, averageError/valid_iters, valid_iters))\r\n \r\n #save the network\r\n if(name is not None):\r\n saver = tf.train.Saver()\r\n print('\\nSaving...')\r\n saver.save(sess, \"./\"+name)",
"def main(\n source: str,\n destination: str,\n checkpoint: str = \"pretrained/checkpoints/raft-sintel.ckpt\",\n ext: Optional[str] = None,\n overwrite: bool = False,\n iters: int = 24,\n visualize: bool = True,\n):\n destination = Path(destination)\n destination.mkdir(parents=True, exist_ok=overwrite)\n\n dataset = FlowInferenceDataset(source, ext=ext)\n dataloader = DataLoader(dataset, batch_size=1, num_workers=4)\n device = (\n torch.device(\"cuda\", 0) if torch.cuda.is_available() else torch.device(\"cpu\")\n )\n\n model = RAFT.load_from_checkpoint(checkpoint)\n model.to(device)\n\n for i, (img0, img1) in tqdm(enumerate(dataloader), total=len(dataset)):\n img0, img1 = img0.to(device), img1.to(device)\n\n padder = InputPadder(img0.shape)\n padded0, padded1 = padder.pad(img0, img1)\n _, flow = model(padded0, padded1, iters=iters, test_mode=True)\n\n assert flow.shape[0] == 1\n flow = padder.unpad(flow)[0]\n \n flow_raw_file = destination / f\"{i:06d}.flo\"\n optical_flow.write(flow_raw_file, flow)\n\n if visualize:\n img0 = img0[0] / 255.0\n img1 = img1[0] / 255.0\n flow_rgb = optical_flow.flow2rgb(flow)\n flow_rgb_file = flow_raw_file.with_suffix(\".png\")\n torchvision.utils.save_image([img0, img1, flow_rgb], flow_rgb_file)",
"def generate_tf_model(graph):\n\n # generate tensorflow model and export to out_file\n\n # with __dict__ we can see the content of the class\n logging.debug(graph.__dict__)\n\n # model_spec contains some info about the model\n for key, value in graph.model_spec.items():\n logging.debug(key)\n logging.debug(value)\n\n network_name = graph.model_spec['name']\n\n filename = get_database( 'benchmark', 'graphs' ,'tf2', network_name+'.pb')\n logging.debug(\"Stored to: %s\" % filename)",
"def inference():\r\n interpreter = MNN.Interpreter(\"../model/yolofastest.mnn\")\r\n interpreter.setCacheFile('.tempcache')\r\n config = {}\r\n # config['precision'] = 'low'\r\n\r\n # # create session\r\n # runtimeinfo, exists = MNN.Interpreter.createRuntime((config,))\r\n # print(runtimeinfo, exists)\r\n # session = interpreter.createSession(config, runtimeinfo)\r\n session = interpreter.createSession(config)\r\n\r\n # show session info\r\n # print('memory_info: %fMB' % interpreter.getSessionInfo(session, 0))\r\n # print('flops_info: %fM' % interpreter.getSessionInfo(session, 1))\r\n # print('backend_info: %d' % interpreter.getSessionInfo(session, 2))\r\n\r\n input_tensor = interpreter.getSessionInput(session)\r\n image = cv2.imread(\"../1.jpg\")\r\n # cv2 read as bgr format\r\n # image = image[..., ::-1]\r\n # change to rgb format\r\n image = cv2.resize(image, (INPUT_SIZE, INPUT_SIZE))\r\n # #resize to mobile_net tensor size\r\n # image = image - (103.94, 116.78, 123.68)\r\n # image = image * (0.017, 0.017, 0.017)\r\n # #preprocess it\r\n # image = image.transpose((2, 0, 1))\r\n # #change numpy data type as np.float32 to match tensor's format\r\n # image = image.astype(np.float32)\r\n # cv2 read shape is NHWC, Tensor's need is NCHW,transpose it\r\n tmp_input = MNN.Tensor((INPUT_SIZE, INPUT_SIZE, 3), MNN.Halide_Type_Float, \\\r\n image, MNN.Tensor_DimensionType_Tensorflow)\r\n input_tensor.copyFrom(tmp_input)\r\n interpreter.runSession(session)\r\n\r\n scores = \"layer125-conv\"\r\n scores2 = \"layer115-conv\"\r\n\r\n output_tensor0 = interpreter.getSessionOutput(session, scores)\r\n output_tensor1 = interpreter.getSessionOutput(session, scores2)\r\n\r\n # constuct a tmp tensor and copy/convert in case output_tensor is nc4hw4\r\n # tmp_output = MNN.Tensor((1, 1001), MNN.Halide_Type_Float, np.ones([1, 1001]).astype(np.float32), MNN.Tensor_DimensionType_Tensorflow)\r\n tmp_output0 = MNN.Tensor((1, 75, 20, 20), MNN.Halide_Type_Float, \\\r\n np.ones([1, 75, 20, 20]).astype(np.float32), MNN.Tensor_DimensionType_Tensorflow)\r\n tmp_output1 = MNN.Tensor((1, 75, 10, 10), MNN.Halide_Type_Float, \\\r\n np.ones([1, 75, 10, 10]).astype(np.float32), MNN.Tensor_DimensionType_Tensorflow)\r\n\r\n output_tensor0.copyToHostTensor(tmp_output0)\r\n output_tensor1.copyToHostTensor(tmp_output1)\r\n print(\"expect 983\")\r\n print(\"output belong to class: {}\".format(np.argmax(tmp_output0.getData())))\r\n print(\"output belong to class: {}\".format(np.argmax(tmp_output1.getData())))",
"def export_graph(filename, graph, num_obs, num_int):\n # Sample observational dataset\n data_obs = graph.sample(batch_size=num_obs, as_array=True)\n # Sample interventional dataset\n data_int = []\n for var_idx in range(graph.num_latents, graph.num_vars):\n var = graph.variables[var_idx]\n values = np.random.randint(var.prob_dist.num_categs, size=(num_int,))\n int_sample = graph.sample(interventions={var.name: values},\n batch_size=num_int,\n as_array=True)\n data_int.append(int_sample)\n # Stack all data\n data_int = np.stack(data_int, axis=0)\n data_obs = data_obs.astype(np.uint8)\n data_int = data_int.astype(np.uint8)\n adj_matrix = graph.adj_matrix\n # If the graph has latent variable, remove them from the dataset\n latents = graph.latents\n if graph.num_latents > 0:\n data_obs = data_obs[:, graph.num_latents:]\n data_int = data_int[:, :, graph.num_latents:]\n adj_matrix = adj_matrix[graph.num_latents:, graph.num_latents:]\n latents = latents - graph.num_latents # Correcting indices\n # Export and visualize\n np.savez_compressed(filename, data_obs=data_obs, data_int=data_int,\n adj_matrix=adj_matrix,\n latents=latents)\n if graph.num_vars <= 100:\n for i, v in enumerate(graph.variables):\n v.name = r\"$X_{%i}$\" % (i+1)\n visualize_graph(graph,\n filename=filename+\".pdf\",\n figsize=(8, 8),\n layout=\"graphviz\")",
"def process_graphs(args):\n os.makedirs(args.output_folder, exist_ok=True)\n\n for graph_type in args.graph_type:\n for graph_idx in range(args.num_graphs):\n seed = args.seed+graph_idx\n graph = create_graph(num_vars=args.num_vars,\n num_categs=args.num_categs,\n edge_prob=args.edge_prob,\n graph_type=graph_type,\n num_latents=args.num_latents,\n deterministic=args.deterministic,\n seed=seed)\n name = 'graph_%s_%i_%i' % (graph_type, args.num_vars, seed)\n if args.num_latents > 0:\n name += '_l%i' % (args.num_latents)\n export_graph(filename=os.path.join(args.output_folder, name),\n graph=graph,\n num_obs=args.num_obs,\n num_int=args.num_int)",
"def export_model(self):\n mode = utils.INFER\n graph = tf.Graph()\n with graph.as_default():\n infer_model = self.build_export_model()\n infer_model.sess = tf.Session(config=self.session_conf)\n infer_model.saver = tf.train.Saver()\n\n model_path = self.get_model_path(mode)\n infer_model.saver.restore(infer_model.sess, save_path=model_path)\n\n to_saved_model(self.config, infer_model.sess, infer_model.export_inputs,\n infer_model.output_dict)",
"def main(_):\n # Fix directories\n if tf.gfile.Exists(FLAGS.log_dir):\n tf.gfile.DeleteRecursively(FLAGS.log_dir)\n tf.gfile.MakeDirs(FLAGS.log_dir)\n\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n n_samples = mnist.train.num_examples\n\n # Start training\n print \"Starting Session\"\n with tf.Session() as sess:\n # Instantiate Network\n vae = VAE()\n\n # Create a saver\n saver = tf.train.Saver(tf.all_variables())\n\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n\n # Run through the epochs\n for epoch in range(FLAGS.epochs):\n avg_cost = 0.\n total_batch = n_samples / FLAGS.batch_size\n\n # Loop over batches\n for i in range(total_batch):\n batch_x, _ = mnist.train.next_batch(FLAGS.batch_size)\n cost, _ = sess.run([vae.loss_val, vae.train_op], feed_dict={vae.X: batch_x})\n avg_cost += cost / (n_samples * FLAGS.batch_size)\n\n # Display step\n if epoch % FLAGS.display_step == 0:\n print \"Epoch:\", epoch, \" \" * 4, \"Average Cost:\", avg_cost\n checkpoint_path = os.path.join(FLAGS.log_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, epoch)\n\n # Generate Reconstructed Pictures\n if FLAGS.z_dim > 2:\n x_sample = mnist.test.next_batch(FLAGS.batch_size)[0]\n x_reconstruct = vae.reconstruct(sess, x_sample)\n\n plt.figure(figsize=(8, 12))\n for i in range(5):\n plt.subplot(5, 2, 2 * i + 1)\n plt.imshow(x_sample[i+10].reshape(28, 28), vmin=0, vmax=1)\n plt.title(\"Test input\")\n plt.colorbar()\n plt.subplot(5, 2, 2 * i + 2)\n plt.imshow(x_reconstruct[i+10].reshape(28, 28), vmin=0, vmax=1)\n plt.title(\"Reconstruction\")\n plt.colorbar()\n plt.tight_layout()\n plt.show()\n else:\n nx = ny = 20\n x_values = np.linspace(-3, 3, nx)\n y_values = np.linspace(-3, 3, ny)\n canvas = np.empty((28 * ny, 28 * nx))\n for i, yi in enumerate(x_values):\n for j, xi in enumerate(y_values):\n z_mu = np.tile(np.array([[xi, yi]]), (FLAGS.batch_size, 1))\n x_mean = vae.generate(sess, z_mu)\n canvas[(nx-i-1)*28:(nx-i)*28, j*28:(j+1)*28] = x_mean[0].reshape(28, 28)\n\n plt.figure(figsize=(8, 10))\n Xi, Yi = np.meshgrid(x_values, y_values)\n plt.imshow(canvas, origin=\"upper\")\n plt.tight_layout()\n plt.show()",
"def torch_to_onnx(\n self, output_fpath: str, model: Module, network_metadata: NetworkMetadata\n ):\n # Currently does not support exporting GPU models to onnx.\n device = model.device\n tokenizer = GPT2Tokenizer.from_pretrained(network_metadata.variant)\n input_ids = torch.tensor(\n [\n tokenizer.encode(\n \"Here is some text to encode Hello World\", add_special_tokens=True\n )\n ]\n ).to(device)\n\n gpt2_model = GPT2TorchFile.TorchModule(\n model.transformer, model.lm_head, model.config\n )\n\n inputs = GPT2ModelTRTConfig.get_input_dims(network_metadata)[\n GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME\n ]\n outputs = GPT2ModelTRTConfig.get_output_dims(network_metadata)[\n GPT2ModelTRTConfig.NETWORK_DECODER_SEGMENT_NAME\n ]\n\n # Exports to ONNX\n opt_args={}\n\n version_major = int((torch.__version__).split('.')[0])\n version_minor = int((torch.__version__).split('.')[1])\n if version_major < 1 or (version_major == 1 and version_minor < 11):\n opt_args['use_external_data_format'] = True\n if not network_metadata.other.kv_cache:\n # This code allows for huggingface compatible torch class to use onnx exporter\n # This code regulates the number of output = 1 if non kv-cache mode is used.\n # Otherwise it will automatically output key value pairs\n old_forward = gpt2_model.forward\n def _export_forward(input_ids, **kwargs):\n result = old_forward(input_ids, use_cache = False, **kwargs)\n return result[0]\n gpt2_model.forward = _export_forward\n\n torch.onnx.export(\n gpt2_model,\n input_ids,\n output_fpath,\n opset_version=13,\n do_constant_folding=True,\n input_names=inputs.get_names(),\n output_names=outputs.get_names(),\n dynamic_axes={\n **inputs.get_torch_dynamic_axis_encoding(),\n **outputs.get_torch_dynamic_axis_encoding(),\n },\n training=torch.onnx.TrainingMode.EVAL,\n **opt_args\n )\n else:\n decoder_output = gpt2_model(input_ids, use_cache = True)\n past_key_values = decoder_output[1]\n\n # Exporting the kv cache engine\n old_forward = gpt2_model.forward\n def _export_forward(input_ids, past_key_values, **kwargs):\n result = old_forward(input_ids, past_key_values=past_key_values, use_cache=True, **kwargs)\n return (result[0], result[1])\n gpt2_model.forward = _export_forward\n\n torch.onnx.export(\n gpt2_model,\n (input_ids, past_key_values),\n output_fpath,\n opset_version=13,\n do_constant_folding=True,\n input_names=inputs.get_names(),\n output_names=outputs.get_names(),\n dynamic_axes={\n **inputs.get_torch_dynamic_axis_encoding(),\n **outputs.get_torch_dynamic_axis_encoding(),\n },\n training=torch.onnx.TrainingMode.EVAL,\n **opt_args\n )\n\n return GPT2ONNXFile(output_fpath, network_metadata)",
"def plastic_package_graph():\n G = gt.Graph(directed=True)\n G.add_vertex(12)\n vid = G.new_vertex_property(\"string\")\n G.vertex_properties[\"id\"] = vid\n G.vp.id[0] = 'Farm'\n G.vp.id[1] = 'Packaging'\n G.vp.id[2] = 'Oil rig'\n G.vp.id[3] = 'Oil refinery'\n G.vp.id[4] = 'Stock 1'\n G.vp.id[5] = 'Production'\n G.vp.id[6] = 'Consumption'\n G.vp.id[7] = 'Waste'\n G.vp.id[8] = 'Burn'\n G.vp.id[9] = 'Recycling'\n G.vp.id[10] = 'Stock 2'\n G.vp.id[11] = 'Waste 2'\n flow = G.new_edge_property(\"object\")\n eid = G.new_edge_property(\"int\") # need a persistent edge id, because graph-tool can reindex the edges\n G.edge_properties[\"flow\"] = flow\n G.edge_properties[\"eid\"] = eid\n e = G.add_edge(G.vertex(0), G.vertex(1))\n G.ep.flow[e] = {'amount': 95,\n 'composition': {'cucumber': 0.3158, 'milk': 0.6842}}\n G.ep.eid[e] = 0\n e = G.add_edge(G.vertex(2), G.vertex(3))\n G.ep.flow[e] = {'amount': 20, 'composition': {'crude oil': 1.0}}\n G.ep.eid[e] = 1\n e = G.add_edge(G.vertex(3), G.vertex(4))\n G.ep.flow[e] = {'amount': 16, 'composition': {'petrol': 1.0}}\n G.ep.eid[e] = 2\n e = G.add_edge(G.vertex(3), G.vertex(5))\n G.ep.flow[e] = {'amount': 4, 'composition': {'plastic': 1.0}}\n G.ep.eid[e] = 3\n e = G.add_edge(G.vertex(5), G.vertex(1))\n G.ep.flow[e] = {'amount': 5, 'composition': {'plastic': 1.0}}\n G.ep.eid[e] = 4\n e = G.add_edge(G.vertex(1), G.vertex(6))\n G.ep.flow[e] = {'amount': 100,\n 'composition': {'plastic': 0.05, 'cucumber': 0.3,\n 'milk': 0.65}}\n G.ep.eid[e] = 5\n e = G.add_edge(G.vertex(6), G.vertex(7))\n G.ep.flow[e] = {'amount': 75, 'composition': {'human waste': 1.0}}\n G.ep.eid[e] = 6\n e = G.add_edge(G.vertex(6), G.vertex(8))\n G.ep.flow[e] = {'amount': 3, 'composition': {'plastic': 1.0}}\n G.ep.eid[e] = 7\n e = G.add_edge(G.vertex(6), G.vertex(9))\n G.ep.flow[e] = {'amount': 2, 'composition': {'plastic': 1.0}}\n G.ep.eid[e] = 8\n e = G.add_edge(G.vertex(9), G.vertex(10))\n G.ep.flow[e] = {'amount': 1, 'composition': {'waste': 1.0}}\n G.ep.eid[e] = 9\n e = G.add_edge(G.vertex(9), G.vertex(5))\n G.ep.flow[e] = {'amount': 1, 'composition': {'plastic': 1.0}}\n G.ep.eid[e] = 10\n e = G.add_edge(G.vertex(6), G.vertex(11))\n G.ep.flow[e] = {'amount': 20, 'composition': {'other waste': 1.0}}\n G.ep.eid[e] = 11\n split = _split_flows(G)\n return split",
"def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(infile), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')",
"def encode_data(data, labels, model, file_name= \"encoded.hdf5\"):\r\n\r\n model.summary()\r\n print(data.shape)\r\n\r\n print(\"Imagens processadas\")\r\n outputs = model.predict(data, batch_size=128, verbose=1)\r\n print(outputs.shape)\r\n \r\n f = h5py.File(file_name, mode ='w')\r\n f.create_dataset(\"encoded\", outputs.shape, np.float32)\r\n f[\"encoded\"][...] = outputs\r\n f.create_dataset(\"labels\", (outputs.shape[0],7), np.uint8)\r\n f[\"labels\"][...] = labels\r\n f.close()\r\n print('Finalizado')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
validar si el numero de checkpoint existe
|
def _validarCheckpoint(self, num_check):
dirCheckpoint = os.path.join(os.getcwd(), 'projects/{}/training/'.format(self.nameProject))
for root, dirs, files in os.walk(dirCheckpoint):
for file_name in files:
indexstr = file_name.find('model.ckpt-{}.meta'.format(num_check))
if not (indexstr.__eq__(-1)): # si es diferente de -1
print('Si existe {}'.format('model.ckpt-{}.meta'.format(num_check)))
return 1 # regresamos 1 para informar que si exite
else:
b = 0
return b
|
[
"def test_invalid_input_checkpoint_step(self):\n msg1 = (\n 'Must raise `TypeError` or `ValueError` when input '\n '`checkpoint_step` is invalid.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n False, 0, -1, 0.0, 1.0, math.nan, -math.nan, math.inf, -math.inf,\n 0j, 1j, '', b'', (), [], {}, set(), object(), lambda x: x, type,\n None, NotImplemented, ...\n )\n\n for invalid_input in examples:\n with self.assertRaises(\n (TypeError, ValueError),\n msg=msg1\n ) as ctx_man:\n BaseConfig(checkpoint_step=invalid_input)\n\n if isinstance(ctx_man.exception, TypeError):\n self.assertEqual(\n ctx_man.exception.args[0],\n '`checkpoint_step` must be an instance of `int`.',\n msg=msg2\n )\n else:\n self.assertEqual(\n ctx_man.exception.args[0],\n '`checkpoint_step` must be bigger than or equal to `1`.',\n msg=msg2\n )",
"def create_notebook_checkpoint(self, nb, path):\n self.log.info(\"Airavata.checkpounts.create_notebook_checkpoint: ('%s')\", path)",
"def test_invalid_run(self):\n probe_run = 123321\n self.assertTrue(probe_run not in RUNS)\n self.assertFalse(utils.valid_run(probe_run))",
"def do_check(self, title_and_filename):\n try:\n t_n = shlex.split(title_and_filename)\n recorder.createCheckpoint(t_n[0], t_n[1])\n except:\n print \"invalid argument\"",
"def test_checkpoint_empty(self):\n\n consumer = ELDConsumer(Queue(), 60, log_nutrition=True)\n document = Document('is not', { }, attributes={ 'timestamp': 10 })\n checkpoint = consumer._checkpoint(document)\n self.assertEqual({ }, checkpoint)",
"def _DoSanityCheck(self, prefix):\n if not self._sanity_checks:\n return\n reader = tf.train.NewCheckpointReader(prefix)\n checks = collections.defaultdict(lambda: [])\n for variables, rule in self._sanity_checks:\n for v in variables:\n key = _VarKey(v)\n checks[key].append(rule)\n for key, rules in checks.items():\n value = reader.get_tensor(key)\n for rule in rules:\n if not rule.Check(value):\n # TODO(zhifengc): Maybe should return an explicit signal\n # so that the caller (the controller loop) can Restore()\n # the latest checkpoint before raise the error.\n msg = f\"Checkpoint sanity check failed: {prefix} {key} {rule}\\n\"\n # Also saves the error message into a file.\n file_io.write_string_to_file(\"{}.failed\".format(prefix), msg)\n raise tf.errors.AbortedError(None, None, msg)",
"def create_checkpoint(self) -> Checkpoint:\n pass",
"def add_checkpoint(self, checkpoint_id, name, checkpoint_number):\n\n self._checkpoints.append(\n (self._model.get_item('checkpoint', \n checkpoint_id, True, True), name))\n\n index = 0\n for item in self._open:\n if item[1] == checkpoint_number:\n self._open[index] = (name, checkpoint_number)\n return\n elif item[1] < checkpoint_number:\n index += 1\n else:\n break\n self._open.insert(index, (name, checkpoint_number))",
"def delete_checkpoint(self, notebook_id, checkpoint_id):\n doc = self.collection.find_one( { '_id' : notebook_id })\n if doc:\n if 'ipynb_chkpt' in doc:\n self.collection.update( { '_id' : notebook_id },\n { '$unset' : { 'ipynb_chkpt' : 1,\n 'chkpt_created' : 1}})\n else:\n raise web.HTTPError(404,\n u'Notebook checkpoint does not exist: %s' % notebook_id)\n else:\n raise web.HTTPError(404,\n u'Notebook %s does not exist' % notebook_id)",
"def check_valid_data():\n # check final information\n if self.train_number < 1 or not self.start_station\\\n or not self.end_station:\n print_error_and_exit('data')\n # check if the start point is the same as end point\n if self.start_station == self.end_station\\\n or self.start_station == self.end_station.alter_station:\n print_error_and_exit('end')",
"def get_checkpoint(self, sequence_id: int) -> Checkpoint:\n pass",
"def testPostRestoreCheckpointExistence(self):\n self.assertTrue(os.path.isfile(self.checkpoint_path))\n tune.run(\n \"PG\",\n name=\"TuneRestoreTest\",\n stop={\"training_iteration\": 2},\n checkpoint_config=CheckpointConfig(\n num_to_keep=1,\n checkpoint_frequency=1,\n ),\n restore=self.checkpoint_parent,\n config={\n \"env\": \"CartPole-v0\",\n \"framework\": \"tf\",\n },\n )\n self.assertTrue(os.path.isfile(self.checkpoint_path))",
"def has_checkpoint():\n checkpoint_dir = get_checkpoint_dir()\n if not pathmgr.exists(checkpoint_dir):\n return False\n return any(_NAME_PREFIX in f for f in pathmgr.ls(checkpoint_dir))",
"def test_default(self):\n default_num_to_keep = 20\n num_epochs = 30\n target = list(range(num_epochs - default_num_to_keep, num_epochs))\n\n checkpointer = Checkpointer(serialization_dir=self.TEST_DIR)\n\n for e in range(num_epochs):\n checkpointer.save_checkpoint(epoch=e,\n model_state={\"epoch\": e},\n training_states={\"epoch\": e},\n is_best_so_far=False)\n models, training = self.retrieve_and_delete_saved()\n assert models == training == target",
"def test_create_checkpoint_scale(self):\n\n consumer = ELDConsumer(Queue(), 60)\n with open(os.path.join(os.path.dirname(__file__), 'corpus.json'), 'r') as f:\n lines = f.readlines()\n tweets = [ json.loads(line) for line in lines ]\n documents = consumer._to_documents(tweets)\n document = Document.concatenate(*documents, tokenizer=consumer.tokenizer)\n checkpoint = consumer._checkpoint(document)\n self.assertLessEqual(0, min(checkpoint.values()))\n self.assertEqual(1, max(checkpoint.values()))",
"def check_version(self):\n global check, error_details\n for row_index, row in self.primer_df.iterrows():\n if (row['Version'] is not None) and (not isinstance(row['Version'], float)) and (\n not isinstance(row['Version'], int)):\n check += 1\n error = \"Version number not a valid entry, see row %s in file\" % (row_index + 4)\n error_details.append(error)",
"def check_step_file(filename, steplist):\n #checks file for existing data\n #and returns number of runs left to do\n #for each # of does in steplist\n runs = {}\n for step in steplist:\n runs[step] = 0\n if not 'cuda' in filename:\n raise Exception(filename)\n\n try:\n with open(filename, 'r') as file:\n lines = [line.strip() for line in file.readlines()]\n for line in lines:\n try:\n vals = line.split(',')\n if len(vals) == 2:\n vals = [float(v) for v in vals]\n runs[vals[0]] += 1\n except:\n pass\n return runs\n except:\n return runs",
"def set_checkpoint(self, name=\"\"):\n\n if self._num_checkpoints == self._max_checkpoints:\n self._checkpoints.pop(0)\n self._num_checkpoints -= 1\n\n self._checkpoints.append((self.copy(), name))\n self._num_checkpoints += 1",
"def test_validate_nwb_error(simple3_nwb: Path) -> None:\n validation_result = validate(simple3_nwb)\n assert len([i for i in validation_result if i.severity]) > 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given nonsequential nodes, elements, boundary elements containing homogenous displacements in [1 .. n_space_dimensions], and update_ratio between (0, 1), returns the nodes in updated positions.
|
def smooth_neighbor_nonweighted(*, nodes, elements, boundary, update_ratio):
assert update_ratio > 0.0 and update_ratio < 1.0
displacements = dict() # empty prior to update
boundary_keys = boundary.keys()
elements_wo_element_number = tuple([x[1:] for x in elements])
adj = adjacencies_upper_diagonal(xs=elements_wo_element_number)
# loop over all nodes in mesh
for node_key, node_values in nodes.items():
update = []
connected_node_labels = tuple(
y[0] if y[0] != int(node_key) else y[1]
for y in tuple(filter(lambda x: int(node_key) in x, adj))
)
if node_key in boundary_keys:
# node with at least one fixed dof
# number of space dimensions at this node
# node_nsd = len(nodes[node_key])
node_nsd = len(node_values)
# assume all ndof at node are active (non-fixed) as default
dof_fixity = [item for item in repeat(False, node_nsd)]
node_dof_fixed = boundary[node_key]
# node_dof_fixed = tuple(boundary[node_key])
# for i, fixed in enumerate(node_dof_fixed):
# for i, fixed in enumerate(node_dof_fixed):
# for fixed in range(node_dof_fixed[0], node_dof_fixed[-1] + 1): # 0-index Python
# if isinstance(node_dof_fixed, str) and node_dof_fixed.lower() == "encastre":
# node_dof_fixed = tuple([i + 1 for i in range(0, node_nsd)]) # 0-index Python
# else:
# # cast as a tuple, guard against single dof being interpreted as an in
# node_dof_fixed = tuple([node_dof_fixed])
for item in node_dof_fixed:
# dof_index = int(item) # satisfy type explicitly for linting in Python
# dof_fixity[dof_index - 1] = True # flip to be a fixed dof, 0-index Python
dof_fixity[item - 1] = True # flip to be a fixed dof, 0-index Python
# for i, fixed in enumerate(node_dof_fixed):
for i, fixed in enumerate(dof_fixity):
if not fixed:
# dof is not fixed
# position of subject node
# p_subject = nodes[str(node_key)][i]
p_subject = node_values[i]
# positions for degree of freedom i for connected nodes qs
qs = [nodes[str(k)][i] for k in connected_node_labels]
num_connections = len(qs)
delta = (1.0 / num_connections) * sum(qs) - p_subject
delta = delta * update_ratio
else:
# dof is fixed
delta = 0.0
# for both fixed and not fixed, append
update.append(delta)
displacements[node_key] = tuple(update)
else:
# fully unconstrained node, all dof are active, no dof are fixed
# p_subject = nodes[str(node_key)]
p_subject = node_values
np_p_subject = np.array(p_subject)
qs = [nodes[str(k)] for k in connected_node_labels]
num_connections = len(qs)
np_qs = np.array(qs)
sum_np_qs = sum(np_qs)
deltas = (1.0 / num_connections) * sum_np_qs - np_p_subject
deltas = deltas * update_ratio
displacements[node_key] = tuple(deltas)
return displacements
|
[
"def _divideElement(self, elemID, nPerElement, maxElemId, keysNotToCopy=[]): \n if len(self.Modes)>0:\n raise Exception('Cannot divide graph when mode data is present')\n if len(self.Motions)>0:\n raise Exception('Cannot divide graph when motion data is present')\n\n\n maxNodeId=np.max([n.ID for n in self.Nodes])\n e = self.getElement(elemID)\n newElems = []\n if len(e.nodes)==2:\n n1=e.nodes[0]\n n2=e.nodes[1]\n subNodes=[n1]\n for iSub in range(1,nPerElement):\n maxNodeId += 1\n #data_dict = n1.data.copy()\n data_dict = dict()\n fact = float(iSub)/nPerElement\n # Interpolating position\n x = n1.x*(1-fact)+n2.x*fact\n y = n1.y*(1-fact)+n2.y*fact\n z = n1.z*(1-fact)+n2.z*fact\n # Interpolating data (only if floats)\n for k,v in n1.data.items():\n if k not in keysNotToCopy:\n try:\n data_dict[k] = n1.data[k]*(1-fact) + n2.data[k]*fact\n except:\n data_dict[k] = n1.data[k]\n ni = Node(maxNodeId, x, y, z, **data_dict)\n subNodes.append(ni)\n self.addNode(ni)\n subNodes+=[n2]\n e.nodes =subNodes[0:2]\n e.nodeIDs=[e.ID for e in e.nodes]\n for i in range(1,nPerElement):\n maxElemId+=1\n elem_dict = e.data.copy()\n # Creating extra properties if necessary\n if e.propIDs is not None:\n if all(e.propIDs==e.propIDs[0]):\n # No need to create a new property\n propIDs=e.propIDs\n propset=e.propset\n else:\n raise NotImplementedError('Division of element with different properties on both ends. TODO add new property.')\n elem= Element(maxElemId, [subNodes[i].ID, subNodes[i+1].ID], propset=propset, propIDs=propIDs, **elem_dict )\n newElems.append(elem)\n return newElems",
"def interpolated_displacements(self):\n xvals = []\n yvals = []\n nat_space = np.linspace(-1, 1)\n for elem in self.elements:\n elem_nodes = self.conn[elem.num][:elem.num_points]\n elem_coords = self.coords[elem_nodes]\n elem_disp = self._disp[elem_nodes]\n for xi in nat_space:\n xvals.append(elem.mapping(xi, elem_coords))\n yvals.append(np.dot(elem_disp, elem.shape(xi)))\n return xvals, yvals",
"def updateNodes(self, p_id):\n \n prev_face_recog_rate = self.face_recognition_rate\n \n# init_I_priors = self.r_bn.cpt(self.I)[:]\n\n # Erase I and F\n self.r_bn.erase(self.I)\n self.r_bn.erase(self.F) \n \n # Change and add nodes\n # Face node\n self.face_node = gum.LabelizedVariable(\"F\",\"Face\",0)\n\n for counter in range(0, len(self.i_labels)):\n self.face_node.addLabel(self.i_labels[counter]) \n self.F = self.r_bn.add(self.face_node)\n self.node_ids[\"F\"] = self.F\n # Identity node\n self.identity_node = gum.LabelizedVariable(\"I\",\"Identity\",0)\n for counter in range(0, len(self.i_labels)):\n self.identity_node.addLabel(self.i_labels[counter]) \n self.I = self.r_bn.add(self.identity_node) \n self.node_ids[\"I\"] = self.I\n \n self.addArcs()\n \n # Change CPT\n updated_cpt_I = []\n \n # copy previous likelihoods back into the network for G, A, H, and T\n self.r_bn.cpt(self.G)[:-1] = [i[1] for i in self.cpt_matrix]\n self.r_bn.cpt(self.A)[:-1] = [i[2] for i in self.cpt_matrix]\n self.r_bn.cpt(self.H)[:-1] = [i[3] for i in self.cpt_matrix]\n self.r_bn.cpt(self.T)[:-1] = [i[4] for i in self.cpt_matrix]\n\n for counter in range(0, len(self.i_labels)):\n if counter < len(self.i_labels) - 1:\n \n # THIS UPDATES ALL LIKELIHOODS TO BE (IF NO ONLINE LEARNING): P(F=f|I=i) = face_recognition_rate^weight_F if f=i, P(F=f|I=i) = ((1 - face_recognition_rate)/(num_people-1))^weight_F if f!=i\n # BUT IT DOESN'T PERFORM AS GOOD AS UPDATING AS IN 'ELSE' CONDITION\n # if (self.update_prob_unknown_method == \"none\" and counter == self.i_labels.index(self.unknown_var)) or self.update_prob_method == \"none\" or (self.update_partial_params is not None and \"F\" not in self.update_partial_params):\n \n # the below method UPDATES ONLY UNKNOWN LIKELIHOOD TO BE (IF NO ONLINE LEARNING): P(F=f|I=i) = face_recognition_rate^weight_F if f=i, P(F=f|I=i) = ((1 - face_recognition_rate)/(num_people-1))^weight_F if f!=i\n if (self.update_prob_unknown_method == \"none\" or self.update_prob_method == \"none\") and counter == self.i_labels.index(self.unknown_var):\n\n li_f = [self.applyWeight((1 - self.face_recognition_rate)/(len(self.i_labels)-1),self.weights[0]) for x in range(0, len(self.i_labels))]\n li_f[counter] = self.applyWeight(self.face_recognition_rate, self.weights[0])\n \n norm_li_f = self.normaliseSum(li_f)\n self.cpt_matrix[counter][0] = norm_li_f[:]\n else:\n # If the user is never seen before, update the likelihood of that user to =(1-face_recognition_rate)/(num-people-1)\n if self.occurrences[counter][0] == 0:\n for ff in range(0, len(self.i_labels)-1):\n if np.isclose(self.cpt_matrix[counter][0][ff], (1-prev_face_recog_rate)/(len(self.i_labels)-2)):\n self.cpt_matrix[counter][0][ff] = (1-self.face_recognition_rate)/(len(self.i_labels)-1)\n updated_cpt_F = self.cpt_matrix[counter][0][:]\n else:\n # if the user is previously seen, then update the likelihoods by computing the original likelihood by multiplying with occurrence \n # and then adding the new user likelihood, then normalising. \n # i.e. P(F=f|I=i)_total = [P(F=f|I=i)*num_occurrence(f)].append(1-face_recognition_rate)/(num_people-1)) and normalise \n \n if self.update_prob_method == \"avg\":\n occur = self.occurrences[counter][0] + 1\n else: #self.update_prob_method == \"sum\" or self.update_prob_method == \"evidence\" or self.update_prob_method == \"none\":\n occur = self.occurrences[counter][2] + 1\n \n updated_cpt_F = [i*occur for i in self.cpt_matrix[counter][0] ]\n updated_cpt_F = np.append(updated_cpt_F, [(1-self.face_recognition_rate)/(len(self.i_labels)-1)]) \n updated_cpt_F = self.normaliseSum(updated_cpt_F)\n self.cpt_matrix[counter][0] = updated_cpt_F[:]\n else:\n self.addLikelihoods(counter)\n self.r_bn.cpt(self.F)[:] = [i[0] for i in self.cpt_matrix]\n \n # update P(I)\n self.r_bn.cpt(self.I)[:] = self.updatePriorI()",
"def pythran_itrtr_old(pos, pos_nds, A, row_order, dim_ar, t, def_itr, rep_nd_brd_start, k, height, width, grav_multiplier):\n\n t_func_start = time()\n\n dt = t/def_itr\n\n nbr_nds = pos_nds.shape[0]\n nbr_pts = pos.shape[0]\n\n max_iter = 500\n ctr = 0\n\n center = np.array((width/2, height/2))\n # grav_multiplier = 10\n\n \n t_loop_start = time()\n \n while True: \n # t_itr_start = time()\n \n delta_nds = pos_nds[:, np.newaxis, :] - pos_nds[np.newaxis, :, :]\n \n # calculate distances based on whether to use node borders or not\n if t < dt * def_itr * rep_nd_brd_start:\n\n # print('repellant node borders now')\n distance, both_ovlp_cnt = pythran_dist_old(pos, row_order, nbr_nds, nbr_pts)\n # print('both_ovlp: ', both_ovlp_cnt)\n \n \n else: \n # print('nodes as points')\n distance = np.sqrt(np.sum(delta_nds**2, axis = -1))\n \n distance[distance == 0] = 1\n # print('distance: ', distance)\n t_last_point_calc_itr = time()\n \n # t_dist_calced = time()\n\n force_ar = (k * k / distance**2) - A * distance / k\n displacement = (delta_nds * force_ar[:, :, None]).sum(axis=1)\n\n # t_displacement_done = time()\n \n # ------------- repellant borders, could be functionalized\n # why do i not get division by 0 error here? \n\n dispx1 = np.copy(displacement[:,0]) + (k*10)**2/(pos_nds[:,0] - dim_ar[:,0]/2)**2\n dispx2 = dispx1 - (k*10)**2/(width - (pos_nds[:,0] + dim_ar[:,0]/2))**2\n\n dispy1 = np.copy(displacement[:,1]) + (k*10)**2/(pos_nds[:,1] - dim_ar[:,1]/2)**2\n dispy2 = dispy1 - (k*10)**2/(height - (pos_nds[:,1] + dim_ar[:,1]/2))**2\n\n displacement = np.concatenate([dispx2[:,None], dispy2[:,None]], axis = 1)\n\n # t_repellent_borders_done = time()\n # -------- gravity\n\n center_vec = center - pos_nds\n\n sum_vec = np.abs(np.sum(center_vec, axis =1))\n # prevent division by 0 error\n sum_vec[sum_vec == 0] = 1\n \n gravity_vec = (center_vec/sum_vec[:,None])*grav_multiplier\n displacement = displacement + gravity_vec\n \n # t_grav_done = time()\n \n # --------------- delta calcs\n\n length = np.sqrt(np.sum(displacement**2, axis = -1))\n length = np.where(length < 0.01, 0.1, length)\n\n len_ar = t/length\n delta_pos = displacement * len_ar[:,None]\n \n # t_deltas_done = time()\n \n # ---------- update node positions\n # print('pos_nds v2: ', pos_nds)\n # print('delta_pos: ', delta_pos)\n \n pos_nds += delta_pos\n \n # print('pos_nds v3: ', pos_nds)\n\n delta_pos_xtnd = np.hstack([delta_pos]*4).reshape((nbr_pts, 2))\n pos += delta_pos_xtnd\n \n # debugging test\n # if math.isnan(pos[0][0]):\n # break\n \n # max iterations limit\n ctr +=1\n \n # t_updates_done = time()\n \n # t_itr_ttl = t_updates_done - t_itr_start\n # t_dist_calced_prd = t_dist_calced - t_itr_start\n # t_displacement_done_prd = t_displacement_done - t_dist_calced\n # t_repellent_borders_done_prd = t_repellent_borders_done - t_displacement_done\n # t_grav_done_prd = t_grav_done - t_repellent_borders_done\n # t_deltas_done_prd = t_deltas_done - t_grav_done\n # t_updates_done_prd = t_updates_done - t_deltas_done\n \n \n\n # print('-------------------------')\n # print('t_dist_calced_prd: ', t_dist_calced_prd, round(t_dist_calced_prd/t_itr_ttl,3))\n # print('t_displacement_done_prd: ', t_displacement_done_prd, round(t_displacement_done_prd/t_itr_ttl,3))\n # print('t_repellent_borders_done_prd: ', t_repellent_borders_done_prd, round(t_repellent_borders_done_prd/t_itr_ttl,3))\n # print('t_grav_done_prd: ', t_grav_done_prd, round(t_grav_done_prd/t_itr_ttl, 3))\n # print('t_deltas_done_prd: ', t_deltas_done_prd, round(t_deltas_done_prd/t_itr_ttl, 3))\n # print('t_updates_done_prd: ', t_updates_done_prd, round(t_updates_done_prd/t_itr_ttl, 3))\n # print(ctr)\n \n if ctr == max_iter:\n break\n \n # see if any nodes violate boundaries\n\n canvas_boundaries_crossed = 0\n \n min_x = np.min(pos[:,0])\n max_x = np.max(pos[:,0])\n\n min_y = np.min(pos[:,1])\n max_y = np.max(pos[:,1])\n \n if min_x < 0 or min_y < 0 or max_x > width or max_y > height: \n canvas_boundaries_crossed = 1\n\n # reduce temperature in first phase (no node borders)\n if t > (dt * def_itr * rep_nd_brd_start):\n t -= dt\n\n # reduce temp in second phase if nodes don't overlap and boundaries not \n else: \n if both_ovlp_cnt == nbr_nds and canvas_boundaries_crossed == 0:\n t -= dt\n\n if t < 0:\n \n t_func_end = time()\n t_func_duration = t_func_end - t_func_start\n t_point_period = t_last_point_calc_itr - t_func_start\n t_border_period = t_func_end - t_last_point_calc_itr\n \n print('iterations required: ', ctr)\n print('point period part: ', t_point_period, t_point_period/t_func_duration)\n print('border period: ', t_border_period, t_border_period/t_func_duration)\n \n break\n \n return pos_nds, pos, ctr",
"def update_reference_points(self):\n\n # No commen reference point\n if len(self)==0:\n return\n\n self.intersection_matrix = Intersection_matrix(len(self))\n self.reset_reference_points()\n now = time.time()\n self.update_boundary_reference_points()\n delta_t = time.time() - now\n # print(\"calculation time for num_obs={} is: {}ms\".format(\n # len(self), delta_t))\n\n obs_reference_size = np.zeros(len(self))\n\n for ii in range(len(self)):\n obs_reference_size[ii] = self[ii].get_reference_length()\n\n for ii in range(len(self)):\n # Boundaries have constant center\n if self[ii].is_boundary:\n continue\n \n distances = np.zeros(len(self))\n for jj in range(len(self)):\n if ii==jj:\n # DistanceMatrix[ii, jj] = \\\n distances[jj] = -1\n else:\n # DistanceMatrix[ii, jj] =\n distances[jj] = self.get_distance(ii, jj)\n\n weights = get_reference_weight(distances, obs_reference_size)\n\n # print('weights', np.round(weights, 2))\n if np.sum(weights):\n reference_point = np.zeros(self[ii].dim)\n for jj in range(len(self)):\n if ii==jj or weights[jj]:\n continue\n \n ref = self.get_boundary_reference_point(ii, jj)\n reference_point = (reference_point \n + self[ii].transform_global2relative(ref)*weights[jj])\n self[ii].set_reference_point(reference_point, in_global_frame=False)\n \n else:\n self[ii].set_reference_point(np.zeros(self[ii].dim), in_global_frame=False)\n \n # TODO: create a more smooth transition between 'free' obstacles and 'clustered ones'\n # TODO: include the 'extended' hull as a deformation parameter\n\n # import pdb; pdb.set_trace() ##### DEBUG ##### \n # Combine reference points of obstacles in each cluster\n intersecting_obs = get_intersection_cluster(self.intersection_matrix, self)\n # self.assign_sibling_groups(intersecting_obs)\n\n get_single_reference_point(self, intersecting_obs, self.intersection_matrix)\n\n if False: # NOT USED ANYMORE (REMOVE)\n # for cluster_intersecting in intersecting_obs:\n weight_obs = np.zeros(len(cluster_intersecting))\n ref_points_obs = np.zeros((self.dim, len(cluster_intersecting)))\n\n for oo, ii in zip(cluster_intersecting, range(len(cluster_intersecting))):\n ref_points_obs[:, ii] = self[oo].center_position\n weight_obs[ii] = self[oo].get_reference_length()\n\n weight_obs = weight_obs/np.sum(weight_obs)\n \n ref_point = np.sum(ref_points_obs * np.tile(weight_obs, (self.dim, 1)), axis=1)\n \n for oo in cluster_intersecting:\n self[oo].set_reference_point(ref_point, in_global_frame=True)\n\n # Indicate that no obstacle has moved (since last reference-point search).\n self.reset_obstacles_have_moved()",
"def __get_repulsive_force(self, robot_cell, robot_map):\n circle = filled_midpoint_circle(robot_cell.x, robot_cell.y, self.__radius_obs)\n closest_obstacles = [None] * self.__max_obs\n min_dists = [inf] * self.__max_obs\n for point in circle:\n if robot_map.is_in_bound(point) and robot_map.grid[point.x][point.y] >= 0.75:\n dist = hypot(robot_cell.x - point.x, robot_cell.y - point.y)\n for i in range(self.__max_obs):\n if dist < min_dists[i]:\n for ii in range(self.__max_obs - 1, i + 2, -1):\n min_dists[ii] = min_dists[ii - 1]\n closest_obstacles[ii] = closest_obstacles[ii - 1]\n min_dists[i] = dist\n closest_obstacles[i] = point\n break\n result = {'x': 0, 'y': 0}\n for obstacle in closest_obstacles:\n if obstacle != None:\n dist = hypot(robot_cell.x - obstacle.x, robot_cell.y - obstacle.y)\n rep_factor = min(0.9, abs(self.__radius_obs - dist) / self.__radius_obs)\n length = -2 * log10(1 - rep_factor) * self.__weight_rep\n dx = obstacle.x - robot_cell.x\n dy = obstacle.y - robot_cell.y\n angle = atan2(dy, dx)\n result['x'] += -length * cos(angle)\n result['y'] += -length * sin(angle)\n return result",
"def divideElements(self, nPerElement, excludeDataKey='', excludeDataList=[], method='append', keysNotToCopy=[]): \n maxNodeId=np.max([n.ID for n in self.Nodes])\n maxElemId=np.max([e.ID for e in self.Elements])\n\n if nPerElement<=0:\n raise Exception('nPerElement should be more than 0')\n\n newElements=[]\n for ie in np.arange(len(self.Elements)): # cannot enumerate since length increases\n elemID = self.Elements[ie].ID\n if method=='insert':\n newElements+=[self.getElement(elemID)] # newElements contains\n if (len(excludeDataKey)>0 and self.Elements[ie].data[excludeDataKey] not in excludeDataList) or len(excludeDataKey)==0:\n elems = self._divideElement(elemID, nPerElement, maxElemId, keysNotToCopy)\n maxElemId+=len(elems)\n newElements+=elems\n else:\n print('Not dividing element with ID {}, based on key `{}` with value `{}`'.format(elemID, excludeDataKey,self.Elements[ie].data[excludeDataKey]))\n # Adding elements at the end\n if method=='append':\n pass\n elif method=='insert':\n self.Elements=[] # We clear all elements\n else:\n raise NotImplementedError('Element Insertions')\n\n for e in newElements:\n self.addElement(e)\n\n # Trigger, remove precomputed values related to connectivity:\n self.connecticityHasChanged()\n\n return self",
"def pythran_itrtr(pos, pos_nds, A, row_order, dim_ar, t, def_itr, rep_nd_brd_start, k, height, width, grav_multiplier):\n\n t_func_start = time()\n\n dt = t/def_itr\n\n nbr_nds = pos_nds.shape[0]\n nbr_pts = pos.shape[0]\n\n max_iter = 500\n ctr = 0\n\n center = np.array((width/2, height/2))\n # grav_multiplier = 10\n\n \n # t_loop_start = time()\n \n while True: \n t_itr_start = time()\n \n delta_nds = pos_nds[:, np.newaxis, :] - pos_nds[np.newaxis, :, :]\n \n # calculate distances based on whether to use node borders or not\n if t < dt * def_itr * rep_nd_brd_start:\n\n # print('repellant node borders now')\n distance, both_ovlp_cnt = pythran_dist(pos, nbr_nds, nbr_pts)\n # print('both_ovlp: ', both_ovlp_cnt)\n \n \n else: \n # print('nodes as points')\n distance = np.sqrt(np.sum(delta_nds**2, axis = -1))\n \n distance[distance == 0] = 1\n # print('distance: ', distance)\n t_last_point_calc_itr = time()\n \n # t_dist_calced = time()\n\n force_ar = (k * k / distance**2) - A * distance / k\n displacement = (delta_nds * force_ar[:, :, None]).sum(axis=1)\n\n # t_displacement_done = time()\n \n # ------------- repellant borders, could be functionalized\n # why do i not get division by 0 error here? \n\n dispx1 = np.copy(displacement[:,0]) + (k*10)**2/(pos_nds[:,0] - dim_ar[:,0]/2)**2\n dispx2 = dispx1 - (k*10)**2/(width - (pos_nds[:,0] + dim_ar[:,0]/2))**2\n\n dispy1 = np.copy(displacement[:,1]) + (k*10)**2/(pos_nds[:,1] - dim_ar[:,1]/2)**2\n dispy2 = dispy1 - (k*10)**2/(height - (pos_nds[:,1] + dim_ar[:,1]/2))**2\n\n displacement = np.concatenate([dispx2[:,None], dispy2[:,None]], axis = 1)\n\n # t_repellent_borders_done = time()\n # -------- gravity\n\n center_vec = center - pos_nds\n\n sum_vec = np.abs(np.sum(center_vec, axis =1))\n # prevent division by 0 error\n sum_vec[sum_vec == 0] = 1\n \n gravity_vec = (center_vec/sum_vec[:,None])*grav_multiplier\n displacement = displacement + gravity_vec\n \n # t_grav_done = time()\n \n # --------------- delta calcs\n\n length = np.sqrt(np.sum(displacement**2, axis = -1))\n length = np.where(length < 0.01, 0.1, length)\n\n len_ar = t/length\n delta_pos = displacement * len_ar[:,None]\n \n # t_deltas_done = time()\n \n # ---------- update node positions\n # print('pos_nds v2: ', pos_nds)\n # print('delta_pos: ', delta_pos)\n \n pos_nds += delta_pos\n \n # print('pos_nds v3: ', pos_nds)\n\n delta_pos_xtnd = np.hstack([delta_pos]*4).reshape((nbr_pts, 2))\n pos += delta_pos_xtnd\n \n # debugging test\n # if math.isnan(pos[0][0]):\n # break\n \n # max iterations limit\n ctr +=1\n \n # t_updates_done = time()\n \n # t_itr_ttl = t_updates_done - t_itr_start\n # t_dist_calced_prd = t_dist_calced - t_itr_start\n # t_displacement_done_prd = t_displacement_done - t_dist_calced\n # t_repellent_borders_done_prd = t_repellent_borders_done - t_displacement_done\n # t_grav_done_prd = t_grav_done - t_repellent_borders_done\n # t_deltas_done_prd = t_deltas_done - t_grav_done\n # t_updates_done_prd = t_updates_done - t_deltas_done\n \n \n\n # print('-------------------------')\n # print('t_dist_calced_prd: ', t_dist_calced_prd, round(t_dist_calced_prd/t_itr_ttl,3))\n # print('t_displacement_done_prd: ', t_displacement_done_prd, round(t_displacement_done_prd/t_itr_ttl,3))\n # print('t_repellent_borders_done_prd: ', t_repellent_borders_done_prd, round(t_repellent_borders_done_prd/t_itr_ttl,3))\n # print('t_grav_done_prd: ', t_grav_done_prd, round(t_grav_done_prd/t_itr_ttl, 3))\n # print('t_deltas_done_prd: ', t_deltas_done_prd, round(t_deltas_done_prd/t_itr_ttl, 3))\n # print('t_updates_done_prd: ', t_updates_done_prd, round(t_updates_done_prd/t_itr_ttl, 3))\n # print(ctr)\n \n if ctr == max_iter:\n break\n \n # see if any nodes violate boundaries\n\n canvas_boundaries_crossed = 0\n \n min_x = np.min(pos[:,0])\n max_x = np.max(pos[:,0])\n\n min_y = np.min(pos[:,1])\n max_y = np.max(pos[:,1])\n \n if min_x < 0 or min_y < 0 or max_x > width or max_y > height: \n canvas_boundaries_crossed = 1\n\n # reduce temperature in first phase (no node borders)\n if t > (dt * def_itr * rep_nd_brd_start):\n t -= dt\n\n # reduce temp in second phase if nodes don't overlap and boundaries not \n else: \n if both_ovlp_cnt == nbr_nds and canvas_boundaries_crossed == 0:\n t -= dt\n\n if t < 0:\n t_func_end = time()\n t_func_duration = t_func_end - t_func_start\n t_point_period = t_last_point_calc_itr - t_func_start\n t_border_period = t_func_end - t_last_point_calc_itr\n print('iterations required: ', ctr)\n print('point period part: ', t_point_period, t_point_period/t_func_duration)\n print('border period: ', t_border_period, t_border_period/t_func_duration)\n \n break\n \n return pos_nds, pos, ctr",
"def expand_nodes_to_equations(nods, dof_names, all_dof_names):\n dpn = len(all_dof_names)\n nc = len(dof_names)\n\n eq = nm.empty(len(nods) * nc, dtype=nm.int32)\n for ii, dof in enumerate(dof_names):\n idof = all_dof_names.index(dof)\n eq[ii::nc] = dpn * nods + idof\n return eq",
"def fix_nodes_plane(self):\n # get the a1,a2,a3,a4 in terms of XYZ to transform xi to XYZ rep.\n a1 = np.array([2,-1,-1,0]); a2=np.array([-1,2,-1,0]); a3=np.array([-1,-1,2,0]); a4=np.array([0,0,0,1])\n repmat = np.array([self.X,self.Y,self.Z])\n repmat = np.transpose(repmat)\n repmat = np.linalg.inv(repmat) # [X|Y|Z]a = [ ]\n a1xyz = np.dot(repmat,a1); a2xyz=np.dot(repmat,a2); a3xyz=np.dot(repmat,a3); a4xyz=np.dot(repmat,a4)\n print(\"########### DEBUG ###########\")\n print(\"a1 = \"+str(a1xyz))\n print(\"a2 = \"+str(a2xyz))\n print(\"a3 = \"+str(a3xyz))\n print(\"a4 = \"+str(a4xyz))\n print(\"##############################\")\n for seg in self.segments:\n # find xi in XYZ rep\n n = a1xyz*xi[0] + a2xyz*xi[1] + a3xyz*xi[2] + a4xyz*xi[3]\n p0 = np.array([0.,0.,0.])\n for tmp_node in seg.vertices:\n p0 += np.array(tmp_node.coords)\n p0 /= len(seg.vertices)\n for iv in range(len(seg.vertices)):\n p1 = np.array(seg.vertices[iv].coords)\n tmp = p1-p0\n t = -(xi[0]*tmp[0]+xi[1]*tmp[1]+xi[2]*tmp[2])/(np.sum(xi**2))\n seg.vertices[iv].coords = list(p1+xi*t)\n print(\"############ DEBUG - fix_nodes_plane #############\")\n print(\"old = \"+str(p1))\n print(\"new = \"+str(seg.vertices[iv].coords))\n print(\"##################################################\")",
"def _RefinePositions(self, nodes):\n \n # Create a map from old to new positions\n pp1 = nodes.nodes()\n pp2 = stentpoints3d.get_subpixel_positions(self._vol, np.array(pp1))\n M = {}\n for i in range(pp2.shape[0]):\n M[pp1[i]] = tuple(pp2[i].flat)\n \n # Make a copy, replacing the node locations\n newnodes = stentgraph.StentGraph()\n for n1 in nodes.nodes():\n newnodes.add_node(M[n1], **nodes.node[n1])\n for n1, n2 in nodes.edges():\n newnodes.add_edge(M[n1], M[n2], **nodes.edge[n1][n2])\n \n # Refine paths to subpixel positions\n for n1, n2 in newnodes.edges():\n path = newnodes.edge[n1][n2]['path']\n newpath = stentpoints3d.get_subpixel_positions(self._vol, path)\n newnodes.edge[n1][n2]['path'] = newpath\n assert n1 == tuple(newpath[0].flat) or n1 == tuple(newpath[-1].flat)\n \n return newnodes",
"def get_gsom_node_array_with_new_feature_vectors(gsom_nodemap, gsom_list, labels, input_database, centroids, global_centroid):\n frame_list = []\n no_of_nodes = len(gsom_list)\n print(\"no of nodes in gsom: \" + str(no_of_nodes))\n\n for x in range(no_of_nodes):\n gsom_node_weights = gsom_list[x]\n # print(\"\\nNode:\" + str(x))\n for key, node in gsom_nodemap.items():\n if (len(node.get_mapped_labels()) > 0):\n if (gsom_node_weights.tolist() == node.recurrent_weights[0].tolist()):\n updated_weights = []\n grade = []\n for frame in node.get_mapped_labels():\n prev_feature_vector = input_database[0][int(frame)].tolist()\n\n contsant = calculate_const_for_frame(\n global_centroid,\n centroids[labels[x]],\n gsom_node_weights,\n prev_feature_vector[0]\n )\n\n updated_weights.append(\n [contsant * val for val in prev_feature_vector[0]]\n )\n grade.append(contsant)\n\n frame_list.append([key, node, labels[x], node.get_mapped_labels(), updated_weights, grade])\n break\n return frame_list",
"def expand_nodes_to_dofs(nods, n_dof_per_node):\n dofs = nm.repeat(nods, n_dof_per_node)\n dofs.shape = (nods.shape[0], n_dof_per_node)\n\n idof = nm.arange(n_dof_per_node, dtype=nm.int32)\n\n dofs = n_dof_per_node * dofs + idof\n\n return dofs",
"def apply_bc(self):\n nsize = len(self._nodes)\n ncount = 0\n for node in self._nodes:\n for dof in range(3):\n i = nsize*dof + ncount\n if not node._fixed[dof]:\n # not fixed: apply load to right hand side vector\n self._R[i] = node._r[dof]\n else:\n # is fixed: apply displacement and set corresponding equations to identity\n self._R[i] = node._u[dof]\n self._K[i].fill(0)\n self._K[i,i] = 1\n # TODO: apply suture constraints\n ncount = ncount + 1",
"def fill_domain(boundary_nodes, domain_conditions, num_domain_nodes, x_nodes=100, y_nodes=100, autosave=False):\n # Check if nodes have been previously generated\n h = hash(tuple(boundary_nodes)) + x_nodes + y_nodes# + sum(hash(cond) for cond in domain_conditions)\n if not os.path.exists(f\"node_positions/{h}\"):\n os.makedirs(f\"node_positions/{h}\")\n else:\n try:\n nodes = np.load(f\"node_positions/{h}/{num_domain_nodes}nodes.npy\")\n # cut_outs = np.load(f\"node_positions/{h}/{num_domain_nodes}cut_outs.npy\")\n print(\"Node positions loaded\")\n return nodes, None\n except FileNotFoundError:\n pass\n\n print(\"Generating nodes\")\n\n x_min, x_max, y_min, y_max = np.min(boundary_nodes.real), np.max(boundary_nodes.real), np.min(boundary_nodes.imag), np.max(boundary_nodes.imag)\n x_potentials = np.linspace(x_min, x_max, x_nodes+2)[1:-1]\n y_potentials = np.linspace(y_min, y_max, y_nodes+2)[1:-1]\n x, y = np.meshgrid(x_potentials, y_potentials)\n potentials = x.ravel() + y.ravel() * 1j\n\n cut_outs = np.array([], dtype=np.complex128)\n\n for condition in domain_conditions:\n cut_outs = np.concatenate((cut_outs, potentials[np.logical_not(condition(potentials))]))\n potentials = potentials[condition(potentials)]\n print(potentials.size)\n\n plt.scatter(potentials.real, potentials.imag, s=3)\n plt.show()\n nodes = np.array([], dtype=np.complex128)\n\n for i in range(num_domain_nodes):\n print(i)\n ds = np.zeros_like(potentials, dtype=np.float64)\n\n # vectorize this\n max_dist = -1\n k = 0\n\n for j in range(potentials.size):\n # ds[j] = np.min(np.abs(np.concatenate((nodes, boundary_nodes)) - potentials[j]))\n dist = np.min(np.abs(np.concatenate((nodes, boundary_nodes)) - potentials[j]))\n if dist > max_dist:\n max_dist = dist\n k = j\n\n # k = np.argmax(ds)\n nodes = np.append(nodes, potentials[k])\n cartesians = np.delete(potentials, k)\n\n if autosave:\n if (i+1) % autosave == 0:\n np.save(f\"node_positions/{h}/{i+1}nodes.npy\", nodes)\n\n np.save(f\"node_positions/{h}/{num_domain_nodes}nodes.npy\", nodes)\n np.save(f\"node_positions/{h}/{num_domain_nodes}cut_outs.npy\", cut_outs)\n\n return nodes, cut_outs",
"def moveNodesToGoodPosition(movenodes):\n pass",
"def gravity2(Np, pos_arr, epsilon_0, mass_arr, softening, velocity):\r\n p = 0\r\n N_new = np.size(mass_arr)\r\n acc = np.zeros(pos_arr.shape)\r\n # for p in range(0, Np):\r\n # rs = pos_arr[p] - pos_arr # array of relative position vectors\r\n while p < N_new:\r\n # for p in range(N_new):\r\n velocity, mass_arr, pos_arr, N_new = collisions(N_new, p, epsilon_0, mass_arr, velocity, pos_arr)\r\n rs = pos_arr[p] - pos_arr\r\n r2 = (rs ** 2).sum(axis=1) + softening**2\r\n ir3 = -1 * np.divide(mass_arr * np.ones_like(r2), np.sqrt(r2) * r2, out=np.zeros_like(r2), where=r2 != 0)\r\n acc[p, :] = (ir3[:, np.newaxis] * rs).sum(axis=0)\r\n p += 1\r\n indx = np.where(acc[:, 0] != 0)\r\n acc = np.reshape(acc[indx, :], [N_new, 3])\r\n return acc, velocity, mass_arr",
"def nodalSum2(val,elems,tol):\n print \"!!!!nodalSum2!!!!\"\n val[:] = normalize(val)\n import timer\n from pyformex.lib import misc\n t = timer.Timer()\n nodes = unique(elems)\n t.reset()\n [ averageDirectionsOneNode(val,where(elems==i),tol) for i in nodes ]\n ## for i in nodes:\n ## wi = where(elems==i)\n ## k = val[wi]\n ## #averageDirection(k,tol)\n ## misc.averageDirection(k,tol)\n ## val[wi] = k\n print \"TIME %s \\n\" % t.seconds()",
"def misplaced_tile_heuristic(nodes, possible_moves):\n raise NotImplementedError"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load all cisco required snmp tables.
|
def _load_snmp_tables(self):
self._logger.info("Start loading MIB tables:")
self._if_table = self._snmp.get_table("IF-MIB", "ifDescr")
self._logger.info("ifIndex table loaded")
self._logger.info("MIB Tables loaded successfully")
|
[
"def readAllTables():\n pidMap = SCOS.MIB.readTable(\"pid.dat\")\n picMap = SCOS.MIB.readTable(\"pic.dat\")\n tpcfMap = SCOS.MIB.readTable(\"tpcf.dat\")\n pcfMap = SCOS.MIB.readTable(\"pcf.dat\")\n plfMap = SCOS.MIB.readTable(\"plf.dat\", uniqueKeys=False)\n ccfMap = SCOS.MIB.readTable(\"ccf.dat\")\n cpcMap = SCOS.MIB.readTable(\"cpc.dat\")\n cdfMap = SCOS.MIB.readTable(\"cdf.dat\", uniqueKeys=False)\n return (pidMap, picMap, tpcfMap, pcfMap, plfMap, ccfMap, cpcMap, cdfMap)",
"def initialize_tables(self):\n self.initialize_users_table()\n self.initialize_sessions_table()\n self.initialize_queries_table()\n self.initialize_parsetrees_table()",
"def _get_entity_table(self):\n self._raw_physical_indexes = EntityQualiMibTable(self._snmp)\n\n index_list = self._raw_physical_indexes.raw_entity_indexes\n try:\n index_list.sort(key=lambda k: int(k), reverse=True)\n except ValueError:\n self._logger.error(\"Failed to load snmp entity table!\", exc_info=1)\n raise GeneralAutoloadError(\"Failed to load snmp entity table.\")\n for key in index_list:\n entity = self._raw_physical_indexes.get(key)\n if \"port\" in entity.entity_class:\n if self.port_exclude_pattern:\n invalid_port = self.port_exclude_pattern.search(\n entity.name\n ) or self.port_exclude_pattern.search(entity.description)\n if invalid_port:\n continue\n self._load_port(self.ENTITY_PORT(entity))\n elif \"powersupply\" in entity.entity_class.lower():\n self._load_power_port(self.ENTITY_POWER_PORT(entity))",
"def _load(self):\n self.get_table()\n self._get_pole()\n self._get_winners()",
"def init_ots_table():\n fc_storage_type = conf_utils.get_comm_config(consts.COMM_CONF_KEY_FC_STORAGE_TYPE)\n if fc_storage_type != consts.STORAGETYPE.OTS:\n return\n\n tables = [consts.NAMESPACE.PROCESS, consts.NAMESPACE.TRANSFER]\n for table in tables:\n try:\n ots = ots_storage.OTS(table, \"test\")\n ots.init_tb()\n except Exception as e:\n print(e)",
"def load_block_table():\n global UCDBlocks\n f = open(os.path.join(os.path.dirname(__file__), BLOCK_FILE), 'rb')\n UCDBlocks = load(f)\n f.close()",
"def load_all_schema(self):\n raise NotImplementedError",
"def _load_interfaces(self):\n self._load_devices()\n try:\n r = self.call_api(endpoint=\"/ports?columns=port_id,device_id,ifName\")\n self.interfaces = json.loads(r.text, object_pairs_hook=AttrDict)\n except requests.exceptions.HTTPError as err:\n raise LibrenmsException(\"Cannot load librenms interfaces into memory: %s\" % err)",
"def _load_coremaps(self, conn):\n\t\t# Calculate coremap\n\t\tcore_map = {}\n\t\tfor coord, chip in self.chips.iteritems():\n\t\t\tcore_map[coord] = sum(1<<c.core_id for c in chip.cores.itervalues())\n\t\tdata = spinnaker_app.core_map_struct_pack(core_map)\n\t\taddr = spinnaker_app.core_map_sdram_addr()\n\t\t\n\t\t# Load onto system\n\t\tfor (x,y), chip in self.chips.iteritems():\n\t\t\tconn.selected_cpu_coords = (x,y,0)\n\t\t\tself._write_mem_with_retry(conn, addr, scp.TYPE_BYTE, data)",
"def load_process_table(self):\n if self.procs is None:\n f = open(self.proctable_path)\n lines = f.readlines()\n f.close()\n self.procs = {}\n for l in lines[1:]:\n l = l.strip().split()\n self.procs[int(l[self.ID])] = {\n \"product\": int (l[self.PRODUCT]),\n \"product_energy\": float(l[self.PRODUCT_ENERGY]),\n \"time\": float(l[self.TIME]),\n }",
"def basicsetup(testcase):\n testcase.connection = util.generatedatabase(\":memory:\")\n for app in [\"Core\",\"Anime\",\"AnimeLife\"]:\n module,config = util.loadapp(app)\n util.loadtables(module,config,testcase.connection)\n\n populate_tables(testcase)",
"def test_all_tables(self):\n job = base_job.Job(os.path.join(os.getcwd(), 'my_sql_all.json'))\n job.connect_to_database()\n base_tables_to_keep = ['books', 'cities', 'states']\n self.assertEqual(base_tables_to_keep, mysql_worker.get_tables(job))",
"def _load_syscall_table(self):\n\n # Grab and format ABI\n abi = self.project.loader.main_bin.abi.replace(\"ELFOSABI_\",\"\")\n\n # If it's Linux, treat it as SYSV for now\n abi = abi.replace(\"LINUX\",\"SYSV\")\n\n # Determine correct module to load\n module = \"larissa.SimProcedures.Syscalls.{0}.{1}_{2}\".format(abi, self.project.loader.main_bin.arch, self.project.loader.main_bin.bits)\n \n try:\n module = importlib.import_module(module)\n except ImportError:\n logger.error(\"Unknown syscall table for current architecture.\")\n return\n\n # Grab the table\n self.syscall_table = module.syscall_table",
"def init_statistics_tables(self):\r\n self.initDB('job_stats.db3')",
"def refreshTables(self):\n self.send_refreshTables()\n self.recv_refreshTables()",
"def load_location_tables(self):\n if not self._loading_session:\n filename, _ = QFileDialog.getOpenFileName(self.main, caption='Load Location Tables', filter='*.json')\n else:\n filename = self._session['location_tables']\n if filename == '':\n return\n\n if os.path.isfile(filename):\n print(f\"Loading {filename}.\")\n self.app.setOverrideCursor(Qt.WaitCursor)\n try:\n self.location_tables = LocationTable(filename, self.main)\n except:\n self.collection = None\n message = QMessageBox(0, 'Error', 'Could not load the location tables. '\n 'Verify that the file format is correct.')\n message.exec()\n self.app.restoreOverrideCursor()\n\n self._session['location_tables'] = filename",
"def load(self):\n inventory_settings = InventorySettings(**config.SETTINGS.inventory.settings)\n self.nautobot = pynautobot.api(url=inventory_settings.address, token=inventory_settings.token)\n if not inventory_settings.verify_ssl:\n self.nautobot.http_session.verify = False\n else:\n self.nautobot.http_session.verify = True\n\n self._check_nautobot_version()\n\n sites = {}\n device_names = []\n\n results = self.nornir.run(task=query_device_info_from_nautobot)\n\n for device_name, items in results.items():\n if items[0].failed:\n continue\n\n result = items[0].result\n nb_device = result[\"device\"]\n site_name = nb_device[\"site\"].get(\"slug\")\n\n if site_name not in sites.keys():\n site = self.site(name=site_name, remote_id=nb_device[\"site\"].get(\"id\"))\n sites[site_name] = site\n self.add(site)\n else:\n site = sites[site_name]\n\n device = self.device(name=device_name, site_name=site_name, remote_id=nb_device[\"id\"])\n\n if nb_device[\"primary_ip\"]:\n device.primary_ip = nb_device[\"primary_ip\"].get(\"address\")\n\n device = self.apply_model_flag(device, nb_device)\n self.add(device)\n\n # Load Prefix and Vlan per site\n for site in self.get_all(self.site):\n self.load_nautobot_prefix(site)\n self.load_nautobot_vlan(site)\n\n # Load interfaces and IP addresses for each devices\n devices = self.get_all(self.device)\n for device in devices:\n site = sites[device.site_name]\n device_names.append(device.name)\n self.load_nautobot_device(site=site, device=device)\n\n # Load Cabling\n for site in self.get_all(self.site):\n self.load_nautobot_cable(site=site, device_names=device_names)",
"def fetch_statistics(self):\n self.appl_db.connect(self.appl_db.APPL_DB)\n self.counters_db.connect(self.counters_db.COUNTERS_DB)\n self.nat_statistics_list = []\n\n nat_table_keys = self.appl_db.keys(self.appl_db.APPL_DB, \"NAT_TABLE:*\")\n if nat_table_keys:\n for i in nat_table_keys:\n nat_entry = re.split(':', i, maxsplit=1)[-1].strip()\n if nat_entry:\n exists = self.counters_db.exists(self.counters_db.COUNTERS_DB, 'COUNTERS_NAT:{}'.format(nat_entry))\n\n if not exists:\n continue\n\n nat_keys = re.split(':', nat_entry)\n nat_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAT_TABLE:{}'.format(nat_entry))\n\n ip_protocol = \"all\"\n source = \"---\"\n destination = \"---\"\n\n if nat_values['nat_type'] == \"snat\":\n source = nat_keys[0]\n else:\n destination = nat_keys[0]\n\n counter_entry = self.counters_db.get_all(self.counters_db.COUNTERS_DB, 'COUNTERS_NAT:{}'.format(nat_entry)) \n packets = counter_entry['NAT_TRANSLATIONS_PKTS']\n byte = counter_entry['NAT_TRANSLATIONS_BYTES']\n\n self.nat_statistics_list.append((ip_protocol,) + (source,) + (destination,) + (packets,) + (byte,))\n\n napt_table_keys = self.appl_db.keys(self.appl_db.APPL_DB, \"NAPT_TABLE:*\")\n if napt_table_keys:\n for i in napt_table_keys:\n napt_entry = re.split(':', i, maxsplit=1)[-1].strip()\n if napt_entry:\n exists = self.counters_db.exists(self.counters_db.COUNTERS_DB, 'COUNTERS_NAPT:{}'.format(napt_entry))\n\n if not exists:\n continue\n\n napt_keys = re.split(':', napt_entry)\n napt_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAPT_TABLE:{}'.format(napt_entry))\n\n ip_protocol = napt_keys[0].lower()\n source = \"---\"\n destination = \"---\"\n\n if napt_values['nat_type'] == \"snat\":\n source = napt_keys[1] + ':' + napt_keys[2]\n else:\n destination = napt_keys[1] + ':' + napt_keys[2]\n\n counter_entry = self.counters_db.get_all(self.counters_db.COUNTERS_DB, 'COUNTERS_NAPT:{}'.format(napt_entry))\n packets = counter_entry['NAT_TRANSLATIONS_PKTS']\n byte = counter_entry['NAT_TRANSLATIONS_BYTES']\n\n self.nat_statistics_list.append((ip_protocol,) + (source,) + (destination,) + (packets,) + (byte,))\n\n nat_twice_table_keys = self.appl_db.keys(self.appl_db.APPL_DB, \"NAT_TWICE_TABLE:*\")\n if nat_twice_table_keys:\n for i in nat_twice_table_keys:\n nat_twice_entry = re.split(':', i, maxsplit=1)[-1].strip()\n if nat_twice_entry:\n exists = self.counters_db.exists(self.counters_db.COUNTERS_DB, 'COUNTERS_TWICE_NAT:{}'.format(nat_twice_entry))\n\n if not exists:\n continue\n\n nat_twice_keys = re.split(':', nat_twice_entry)\n nat_twice_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAT_TWICE_TABLE:{}'.format(nat_twice_entry))\n\n ip_protocol = \"all\"\n\n source = nat_twice_keys[0]\n destination = nat_twice_keys[1]\n\n counter_entry = self.counters_db.get_all(self.counters_db.COUNTERS_DB, 'COUNTERS_TWICE_NAT:{}'.format(nat_twice_entry))\n packets = counter_entry['NAT_TRANSLATIONS_PKTS']\n byte = counter_entry['NAT_TRANSLATIONS_BYTES']\n\n self.nat_statistics_list.append((ip_protocol,) + (source,) + (destination,) + (packets,) + (byte,))\n\n napt_twice_table_keys = self.appl_db.keys(self.appl_db.APPL_DB, \"NAPT_TWICE_TABLE:*\")\n if napt_twice_table_keys:\n for i in napt_twice_table_keys:\n napt_twice_entry = re.split(':', i, maxsplit=1)[-1].strip()\n if napt_twice_entry:\n exists = self.counters_db.exists(self.counters_db.COUNTERS_DB, 'COUNTERS_TWICE_NAPT:{}'.format(napt_twice_entry))\n\n if not exists:\n continue\n\n napt_twice_keys = re.split(':', napt_twice_entry)\n napt_twice_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAPT_TWICE_TABLE:{}'.format(napt_twice_entry))\n\n ip_protocol = napt_twice_keys[0].lower()\n\n source = napt_twice_keys[1] + ':' + napt_twice_keys[2]\n destination = napt_twice_keys[3] + ':' + napt_twice_keys[4]\n\n counter_entry = self.counters_db.get_all(self.counters_db.COUNTERS_DB, 'COUNTERS_TWICE_NAPT:{}'.format(napt_twice_entry))\n packets = counter_entry['NAT_TRANSLATIONS_PKTS']\n byte = counter_entry['NAT_TRANSLATIONS_BYTES']\n\n self.nat_statistics_list.append((ip_protocol,) + (source,) + (destination,) + (packets,) + (byte,))\n\n self.nat_statistics_list.sort(key = lambda x: x[0])\n return",
"def init_from_db(self, db):\n\n with db.session.begin():\n for name, table in self.tables.items():\n table.init_from_table(getattr(db, name))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that update_if_dirty works. We use the last_modified field as a proxy for knowing whether or not we actually did a save.
|
def test_update_if_dirty(self):
create_tables(TestModel)
x = TestModel()
x.my_int = 3
x.my_string = "foo"
x.save()
last_mod = x.last_modified
django_util.update_if_dirty(x, my_int=3, my_string="bar")
assert_not_equal(x.last_modified, last_mod)
last_mod = x.last_modified
django_util.update_if_dirty(x, my_int=3, my_string="bar")
assert_equal(x.last_modified, last_mod)
x.delete()
|
[
"def test_save(self):\n instance1 = BaseModel()\n attr_updated_before_save = instance1.updated_at\n instance1.save()\n attr_updated_after_save = instance1.updated_at\n self.assertNotEqual(attr_updated_before_save, attr_updated_after_save)",
"def test_that_save_func_update_update_at_attr(self):\n b = BaseModel()\n b.save()\n self.assertNotEqual(b.created_at, b.updated_at)\n self.assertGreater(b.updated_at.microsecond,\n b.created_at.microsecond)",
"def check_modified(self):\n return bool(self._modified)",
"def test_project_last_modified(self):\n old = get_project_by_id(self.pid).last_updated\n modify_filter(fid=self.fid)\n new = get_project_by_id(self.pid).last_updated\n self.assertNotEqual(old, new)",
"def update_with_defaults(obj, data):\n for key, value in data.items():\n setfield(obj, key, value)\n if getattr(obj, '_is_dirty', None):\n logger.debug(obj._is_dirty)\n obj.save(update_fields=obj._dirty_fields)\n del obj._is_dirty\n del obj._dirty_fields\n return True",
"def test_xml_status_dirty_modified(self):\n svn_output = self.SVN_STATUS_XML_DIRTY_MODIFIED\n is_dirty = SvnRepository.xml_status_is_dirty(\n svn_output)\n self.assertTrue(is_dirty)",
"def test_no_update_fresh_data_single(self):\n w = Weather.objects.get(pk=6)\n w.last_modified = self.CURRENT_TIME\n w.save()\n weather = Weather.objects.retrieve_weather_object(city='Azusa', state='CA')\n self.assertEqual(w.last_modified, weather.last_modified)",
"def test_updated_at(self):\n self.base.save()\n self.assertTrue(self.base.created_at != self.base.updated_at)",
"def is_dirty(self):\n for _, prop in self._props.items():\n if prop.is_dirty:\n return True\n return False",
"def test_update_not_saved(self):\n user = BonitaUser(username=u'myusername', password=u'mypassword')\n\n user._update()",
"def has_changes(self):\n return self._repo.is_dirty()",
"def test_update_not_saved(self):\n user = BonitaUser(username=u'myusername', password=u'mypassword')\n\n user._update_base_attributes()",
"def is_allowed_update_for(self, instance):\n return self._is_allowed_for(instance, 'update')",
"def getDirty(self) -> \"SbBool\":\n return _coin.SoField_getDirty(self)",
"def test_update_not_saved(self):\n user = BonitaUser(username=u'myusername', password=u'mypassword')\n\n user._update_personal_contact_infos()",
"def repo_is_dirty(self):\n output = subprocess.check_output(['git', 'status', '--porcelain'], cwd=self.repo_dir)\n\n return self.MODIFIED_FILE_REGEX.search(output.decode('utf-8')) is not None",
"def test_update_not_saved(self):\n user = BonitaUser(username=u'myusername', password=u'mypassword')\n\n user._update_password()",
"def test_full_update_checker_result(self):\n pass",
"def test_xml_status_dirty_added(self):\n svn_output = self.SVN_STATUS_XML_DIRTY_ADDED\n is_dirty = SvnRepository.xml_status_is_dirty(\n svn_output)\n self.assertTrue(is_dirty)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute and return the armstrong number
|
def armstrong_number(number):
total = 0
num_len = len(number)
for i in range(len(number)):
total += int(number[i]) ** num_len
return total
|
[
"def get_armstrong_value(num):\n num = str(num)\n length = len(num)\n armstrong_value = 0\n for char in num:\n armstrong_value += int(char)**length\n return armstrong_value",
"def final_amt(p, r, n, t):\r\n a = p*(1+r/n)**(n*t)\r\n return a",
"def avhrr( redchan, nirchan ):\n\tif( nirchan < 0 or redchan < 0 ):\n\t\tresult = -1.0\n\telse:\n\t\tresult = (( 0.035+ 0.545*nirchan - 0.32*redchan) / 10000.0 )\n\treturn result",
"def dispersiveLJ(self, r):\n\t\treturn -1/pow(r,6)",
"def get_strenght(self):\n return 10 - self.get_agility()",
"def BuildNumber(self) -> int:",
"def get_generous_next_payout(lambs_given):\n return (2 * lambs_given)",
"def final_amt(p, r, n, t):\n\n a = p * (1 + r/100) ** (n*t)\n return a # This is new, and makes the function fruitful.",
"def reward(self):\n return self._r_sum",
"def calc_shield_recharge(block_count):\n return block_count * 5.5",
"def discount(t,r):\r\n return (1+r)**(-t)",
"def calc_r0(self):\n raise NotImplementedError",
"def lj_potential(r):\n\tr_6 = (1.0 / r) ** 6\n\treturn r_6 ** 2 - 2 * r_6",
"def a_realization(self):\n return self.powersum()",
"def annuity_payment(r, n, pv):\n return r * pv / (1 - pow(1 + r, -n))",
"def atomic_number(self) -> int:\n return self.random.randint(1, 119)",
"def _calc_rmr(self) -> float:\n\n WEIGHT_FACTOR = 10\n HEIGHT_FACTOR = 6.25\n AGE_FACTOR = -5\n MALE_CONSTANT = 5\n FEMALE_CONSTANT = -161\n\n if self._gender == 'M':\n gender_constant = MALE_CONSTANT\n else:\n gender_constant = FEMALE_CONSTANT\n\n return (WEIGHT_FACTOR * self._weight + HEIGHT_FACTOR * self._height\n + AGE_FACTOR * self._age + gender_constant)",
"def standaardprijs(afstandKM):\n return 0.0",
"def UpdateReward(picked_arm):\n counts[picked_arm]+=1\n\n arm_count=counts[picked_arm]\n arm_value=values[picked_arm]\n true_prob=true_prob_values[picked_arm]\n \n reward=CalculateReward(true_prob)\n values[picked_arm]=arm_value*((arm_count- 1)/arm_count) + reward/arm_count\n \n return(values[picked_arm])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Runs the component importer. Project should not be in a transaction. acm_files is a list of acm or xml AVM component model file, which will be imported into the project.
|
def run_importer(project, lib_root, acm_file):
print 'Running Component Importer'
importer = win32com.client.DispatchEx("MGA.Interpreter.CyPhyComponentImporter")
importer.Initialize(project) # initialize GMEConsole and MGAGateway
project.BeginTransactionInNewTerr() # transaction is required by the ImportFiles function call
importer.ImportFile(project, lib_root, acm_file) # import components defined in the list of files
project.CommitTransaction() # save imported component in the project
|
[
"def import_jobs(rd_client, projects, files):\n for file_path in files:\n job = open(file_path, 'r').read()\n print(\"Importing %s\" % file_path)\n response = rd_client.import_job(\n job, fmt=\"yaml\",project=projects,dupeOption=\"update\"\n )\n if response['failed'] is not None:\n print(\"Import %s failed.\" % file_path)\n print(respinse['failed'])\n sys.exit(1)\n print(\"Impoerted %s successfully.\" % file_path)",
"def auvdeployment_import(files):\n\n print(\"MESSAGE: Starting auvdeployment import\")\n auvdeployment = {}\n\n netcdf = NetCDFParser(open(files['netcdf'], \"rb\"))\n track_parser = TrackParser(open(files['track'], \"r\"))\n image_subfolder = files['image']\n\n # now start going through and creating the data\n auvdeployment['mission_aim'] = \"Generic Description.\"\n auvdeployment['min_depth'] = 14000\n auvdeployment['max_depth'] = 0\n\n auvdeployment['start_time_stamp'] = datetime.datetime.now()\n auvdeployment['end_time_stamp'] = datetime.datetime.now()\n\n # create the left-colour camera object\n # we don't normally give out the right mono\n # images...\n leftcamera = {}\n\n leftcamera['name'] = \"Left Colour\"\n leftcamera['angle'] = \"Downward\"\n\n first_image = None\n last_image = None\n\n lat_lim = LimitTracker('latitude')\n lon_lim = LimitTracker('longitude')\n\n print(\"First readings from netcdf file.\")\n earlier_seabird = netcdf.next()\n later_seabird = netcdf.next()\n\n # now we get to the images... (and related data)\n print(\"Begin parsing images.\")\n\n first_image = None\n last_image = None\n image_list = []\n # campaign_name = auvdeployment.campaign.short_name\n # deployment_name = auvdeployment.short_name\n count = 0\n for row in track_parser:\n count += 1\n current_image = {}\n image_name = os.path.splitext(row['leftimage'])[0] + \".tif\"\n\n image_datetime = datetime.datetime.strptime(os.path.splitext(image_name)[0], \"PR_%Y%m%d_%H%M%S_%f_LC16\")\n image_datetime = image_datetime.replace(tzinfo=tzutc())\n current_image['date_time'] = str(image_datetime)\n current_image['position'] = \"POINT ({0} {1})\".format(row['longitude'], row['latitude'])\n current_image['latitude'] = row['latitude']\n current_image['longitude'] = row['longitude']\n\n depth = float(row['depth'])\n current_image['depth'] = row['depth']\n # quickly calculate limit info\n\n if depth > float(auvdeployment['max_depth']):\n auvdeployment['max_depth'] = str(depth)\n\n if depth < float(auvdeployment['min_depth']):\n auvdeployment['min_depth'] = str(depth)\n\n lat_lim.check(row)\n lon_lim.check(row)\n\n # calculate image locations and create thumbnail\n current_image['image_path'] = os.path.join(image_subfolder, image_name)\n\n # get the extra measurements from the seabird data\n while image_datetime > later_seabird['date_time'] and not netcdf.isFinished():\n later_seabird, earlier_seabird = earlier_seabird, netcdf.next()\n\n # find which is closer - could use interpolation instead\n if (later_seabird['date_time'] - image_datetime) > (image_datetime - earlier_seabird['date_time']):\n closer_seabird = earlier_seabird\n else:\n closer_seabird = later_seabird\n\n current_image['temperature'] = closer_seabird['temperature']\n current_image['salinity'] = closer_seabird['salinity']\n current_image['roll'] = row['roll']\n current_image['pitch'] = row['pitch']\n current_image['yaw'] = row['heading']\n current_image['altitude'] = row['altitude']\n current_image['camera'] = leftcamera['name']\n current_image['camera_angle'] = leftcamera['angle']\n\n image_list.append(current_image)\n\n # we need first and last to get start/end points and times\n last_image = current_image\n if first_image is None:\n first_image = current_image\n\n # now save the actual min/max depth as well as start/end times and\n # start position and end position\n\n print 'done with ', count, 'images'\n auvdeployment['start_time_stamp'] = first_image['date_time']\n auvdeployment['end_time_stamp'] = last_image['date_time']\n\n auvdeployment['start_position'] = first_image['position']\n auvdeployment['end_position'] = last_image['position']\n\n auvdeployment['transect_shape'] = 'POLYGON(({0} {2}, {0} {3}, {1} {3}, {1} {2}, {0} {2} ))'.format(lon_lim.minimum, lon_lim.maximum, lat_lim.minimum, lat_lim.maximum)\n\n return auvdeployment, image_list",
"def test_AvivImporter():\n\n filenames = [\n 'sampledata/01-CD-Aviv62DS/CSA/CSA.CD',\n 'sampledata/01-CD-Aviv62DS/CSA/blank.CD',\n 'sampledata/01-CD-Aviv62DS/PEP-average/4RNSX.001',\n ]\n assert spp.Importer(filenames[0])\n\n for filename in filenames:\n assert spp.AvivImporter(filename)",
"def run():\n # add subfolders into path\n cmd_folder = os.path.realpath(\n os.path.abspath(\n os.path.split(\n inspect.getfile(\n inspect.currentframe()))[0]))\n if cmd_folder not in sys.path:\n sys.path.insert(0, cmd_folder)\n\n cmd_subfolder = pjoin(cmd_folder, \"utilities\")\n if cmd_subfolder not in sys.path:\n sys.path.insert(0, cmd_subfolder)\n\n # get input parameters from toolbox interface\n dem = arcpy.GetParameterAsText(0)\n veg = arcpy.GetParameterAsText(1)\n remap = arcpy.GetParameterAsText(2)\n output_folder = arcpy.GetParameterAsText(3)\n fdi = arcpy.GetParameter(4)\n extent = arcpy.GetParameter(5)\n\n dem_sr = arcpy.Describe(dem).spatialReference\n arcpy.AddMessage(\"DEM's spatial reference type is {0}\".format(dem_sr.type))\n\n if dem_sr.type == \"Projected\":\n # find effective AOI based on the input parameters\n mask = find_aoi(extent, dem, veg)\n\n try:\n # calculate the BAL for the effective AOI\n bal_calc(veg, dem, fdi, output_folder, remap, mask)\n arcpy.AddMessage(\"Successfully completed BAL calculation!\")\n except Exception as err:\n # Report any exceptions back\n arcpy.AddError(err)\n\n else:\n arcpy.AddError(\"To go ahead, the DEM needs to be projected first\")",
"def _import_alembic(self, alembic_file, valid_tag_info, nodes=None, parent=None):\n\n if valid_tag_info:\n res = alembic.import_alembic(\n project=self._project, alembic_file=alembic_file, mode='import', nodes=nodes, parent=parent)\n else:\n res = alembic.import_alembic(project=self._project, alembic_file=alembic_file, mode='import', parent=parent)\n\n return res",
"def import_project(self,project_dir):\n # Check that target directory exists\n project_dir = os.path.abspath(project_dir)\n # Check that project doesn't already exist\n project_name = os.path.basename(project_dir)\n project_metadata = self.load_project_metadata()\n if project_name in [p['Project'] for p in project_metadata] or \\\n utils.AnalysisProject(project_name,\n os.path.join(self.analysis_dir,\n project_name)).exists:\n raise Exception(\"Project called '%s' already exists\" %\n project_name)\n # Load target as a project\n project = utils.AnalysisProject(project_name,project_dir)\n # Rsync the project directory\n print \"Importing project directory contents for '%s'\" % project_name\n try:\n excludes = ['--exclude=tmp.*',\n '--exclude=qc_report.*']\n rsync = applications.general.rsync(project_dir,\n self.analysis_dir,\n extra_options=excludes)\n print \"Running %s\" % rsync\n status = rsync.run_subprocess(log=self.log_path('import_project.rsync.log'))\n except Exception as ex:\n logging.error(\"Exception importing project: %s\" % ex)\n raise ex\n if status != 0:\n raise Exception(\"Failed to import project from %s (status %s)\" %\n (project_dir,status))\n # Update the projects.info metadata file\n print \"Updating projects.info file with imported project\"\n project_metadata = self.load_project_metadata()\n sample_names = [s.name for s in project.samples]\n project_metadata.add_project(project_name,\n sample_names,\n user=project.info.user,\n library_type=project.info.library_type,\n single_cell_platform=project.info.single_cell_platform,\n organism=project.info.organism,\n PI=project.info.PI,\n comments=project.info.comments)\n project_metadata.save()\n # Report\n print \"Projects now in metadata file:\"\n for p in project_metadata:\n print \"- %s\" % p['Project']\n # Update the QC report\n try:\n project = self.get_analysis_projects(pattern=project_name)[0]\n except Exception as ex:\n logging.error(\"Exception when trying to acquire project %s: %s\"\n % (project_name,ex))\n return\n if project.qc is None:\n print \"No QC for %s\" % project_name\n else:\n if project.qc.verify():\n try:\n project.qc_report()\n print \"Updated QC report for %s\" % project_name\n except Exception, ex:\n logging.error(\"import_project: failed to generate QC \"\n \"report for %s\" % project_name)",
"def add_components(cls, project_path):\n old_path = os.getcwd()\n os.chdir(project_path)\n # print(\"begin mvn clean package\"+absoluteProjectPath)\n # subprocess.call([\"mvn\", \"clean\", \"package\"], shell = True)\n # print(\"end mvn clean package\")\n print(\"Veuillez deployer l'ear\")\n log = input(\"Saisissez le chemin vers le fichier de log : \")\n f = open(log, \"r\")\n content = f.read()\n f.close()\n os.chdir(old_path)\n for path in cls.componentsFilePath:\n Search4Ejb.parse_log(content, path)\n WebMigration.update_jndi()",
"def import_ensembles(amoptd):\n\n logger.info(\"Importing ensembles from directory: {0}\".format(amoptd['ensembles']))\n\n ensembles = glob.glob(os.path.join(amoptd['ensembles'], '*.pdb'))\n if not len(ensembles):\n msg = \"Cannot import ensembles from the directory: {0}\".format(amoptd['ensembles'])\n exit_util.exit_error(msg)\n amoptd['ensembles'] = ensembles\n\n # get the data on the ensemble\n ensembles_data = []\n for e in ensembles:\n d = {}\n d['name'] = os.path.splitext(os.path.basename(e))[0]\n d['ensemble_pdb'] = e\n\n # Get data on the models\n hierarchy = iotbx.pdb.pdb_input(file_name=e).construct_hierarchy()\n d['subcluster_num_models'] = len(hierarchy.models())\n d['num_residues'] = len(hierarchy.models()[0].chains()[0].residue_groups())\n d['ensemble_num_atoms'] = len(hierarchy.models()[0].atoms())\n\n ensembles_data.append(d)\n\n amoptd['ensembles_data'] = ensembles_data\n\n return ensembles",
"def main(para, files):\n for filename in files:\n convertUSFMToAccordance(filename, para)",
"def processAllICATFiles( self ):\n\n # Now check the directory given to see if there are any xml ingest files in there \n print ''\n EDVerbose.DEBUG( strftime(\"%Y-%m-%d %H:%M:%S\") + \" *** EDPluginControlDLSArchiverv10.processAllICATFiles : processing all 'icat' files if any.\" ) \n print ''\n \n edICATPlugin = self.loadPlugin( 'EDPluginExecICATIngesterv10' )\n \n from XSDataExecICATIngesterv10 import XSDataInputPluginExecICATIngester\n from XSDataExecICATIngesterv10 import XSDataResultPluginExecICATIngester\n \n # build the plugin input\n xsDataPluginExecICATIngester = XSDataInputPluginExecICATIngester() \n \n xsDataPluginExecICATIngester.setXmlArchiveDirectory( self.getDataInput().getArchiveLocation() )\n \n xsDataPluginExecICATIngester.setXmlSearchDir( self.getDataInput().getDropZonePath() ) \n #xsDataPluginExecICATIngester.setXmlFailedtoIngestDirectory( self.getDataInput().getFailedtoIngestDirectory().getPath().getValue() ) \n #xsDataPluginExecICATIngester.setXmlIngestFileName( XSDataFile( XSDataString( fullname ) ) ) \n \n edICATPlugin.setDataInput( xsDataPluginExecICATIngester )\n \n # now run the plugin\n edICATPlugin.connectSUCCESS( self.doSuccessICAT )\n edICATPlugin.connectFAILURE( self.doFailureICAT )\n edICATPlugin.executeSynchronous()\n \n # retrieve the plugin result\n xsDataResultPluginExecICATIngester = edICATPlugin.getDataOutput()\n \n #xsDataResultPluginExecICATIngester.getSessionID().getValue()\n \n return",
"def loadAssemblyFromFile(file):\r\n if file.__contains__(\".FCStd\"):\r\n\r\n FreeCAD.open(file)\r\n doc = App.activeDocument()\r\n doc = FreeCAD.ActiveDocument\r\n objs = FreeCAD.ActiveDocument.Objects\r\n return doc, objs\r\n\r\n if file.__contains__(\".STEP\") or file.__contains__(\".step\"):\r\n Import.open(file)\r\n doc = App.activeDocument()\r\n doc = FreeCAD.ActiveDocument\r\n objs = FreeCAD.ActiveDocument.Objects\r\n return doc, objs",
"def _compile_and_upload_seqc(self):\n futures = []\n with self.session.set_transaction(), ThreadPoolExecutor() as executor:\n # Compile sequencer code for all AWGs in parallel.\n for awg_core, awg_string in self.sequencer_code_mcc.values():\n future_seqc = executor.submit(\n awg_core.load_sequencer_program,\n awg_string\n )\n futures.append(future_seqc)\n\n # Wait until all compilations are finished and check if there are\n # errors.\n for future in as_completed(futures):\n try:\n _ = future.result()\n except CoreError as e:\n print(\"Sequencer code compilation error\", e)",
"def process(self, files):\n self.track_versions(files)\n astrodriz_params = [\"-n\", \"1\"]\n assoc = self.assoc_files(files)\n if assoc:\n self.run_stage1(*assoc)\n if self.stage2:\n args = astrodriz_params + assoc\n self.run_stage2(*args)\n return\n unassoc = self.unassoc_files(files)\n if unassoc:\n self.run_stage1(*unassoc)\n if self.stage2:\n args = astrodriz_params + unassoc\n self.run_stage2(*args)\n return",
"def load_components_files(self, system_directory):\n components_glob = glob.iglob(\n os.path.join(system_directory, '*', 'component.yaml')\n )\n self.components = {}\n self.justification_mapping = {}\n for component_yaml_path in components_glob:\n component_dir_path = os.path.split(component_yaml_path)[0]\n component_key = os.path.split(component_dir_path)[-1]\n component = Component(component_directory=component_dir_path)\n utils.merge_justification(\n self.justification_mapping, component.justification_mapping\n )\n self.components[component_key] = component",
"def run(self):\n\n self.start_process()\n cwd = os.getcwd()\n\n db_file_obj = Database.db_find_by_id(self.file_id)\n Database.db_gui_insert_newtype(db_file_obj['Name'].split(\".\")[-1])\n output_obj = self.check_cuckoo(db_file_obj['location'])\n\n for module in self.modules:\n if module in self.modules_ignore:\n continue\n\n #location main python file in modules folder on system\n location_of_module = '{0}/modules/{1}/{1}.py'.format(cwd, module)\n module_dir = '{0}/modules/{1}/'.format(cwd, module)\n\n os.chdir(module_dir)\n p = subprocess.Popen(['python', \"{0}.py\".format(module), db_file_obj['location']], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n os.chdir(cwd)\n stdoutdata, stderrdata = p.communicate()\n\n #if we get error data the module 'failed'\n module_passed = True\n if stderrdata:\n module_passed = False\n self.modules[module] = module_passed\n\n output = self.processData(stdoutdata)\n output_obj[module] = output\n\n Database.db_update_malware_on_id(db_file_obj[\"_id\"], output_obj)\n Database.db_update_process(self.id, self.to_database_file())\n\n output_obj = self.check_cuckoo(output_obj)\n\n self.finish_process()\n Database.db_update_malware_on_id(db_file_obj[\"_id\"], output_obj)\n Database.db_update_process(self.id, self.to_database_file())",
"def main():\n\n parser = argparse.ArgumentParser(\n description='Perform initial loading of build database from manifests'\n )\n parser.add_argument('-c', '--config', dest='add_proj_config',\n help='Configuration file for build database loader',\n default='build_db_loader_conf.ini')\n\n args = parser.parse_args()\n\n # Check configuration file information\n add_proj_config = configparser.ConfigParser()\n add_proj_config.read(args.add_proj_config)\n\n if any(key not in add_proj_config for key in ['build_db', 'repos']):\n print(\n f'Invalid or unable to read config file {args.add_proj_config}'\n )\n sys.exit(1)\n\n db_info = add_proj_config['build_db']\n db_required_keys = ['db_uri', 'username', 'password']\n\n if any(key not in db_info for key in db_required_keys):\n print(\n f'One of the following DB keys is missing in the config file:\\n'\n f' {\", \".join(db_required_keys)}'\n )\n sys.exit(1)\n\n repo_info = add_proj_config['repos']\n repo_required_keys = ['manifest_dir', 'manifest_url', 'repo_basedir']\n\n if any(key not in repo_info for key in repo_required_keys):\n print(\n f'One of the following repo keys is missing in the '\n f'config file:\\n {\", \".join(repo_required_keys)}'\n )\n sys.exit(1)\n\n # Now run through all the manifests in build-manifests and update\n # the database with new project documents\n add_projects = AddProject(db_info, repo_info)\n last_manifest = [] # Start from beginning\n manifest_repo = repo_info['manifest_dir']\n\n print('Checking out/updating the build-manifests repo...')\n cbutil_git.checkout_repo(manifest_repo, repo_info['manifest_url'])\n\n manifest_walker = cbutil_git.ManifestWalker(manifest_repo, last_manifest)\n\n for commit_info, manifest_xml in manifest_walker.walk():\n try:\n manifest_info = add_projects.get_manifest_info(manifest_xml)\n except mf_parse.InvalidManifest as exc:\n # If the file is not an XML file, simply move to next one\n print(f'{commit_info[0]}: {exc}, skipping...')\n continue\n\n add_projects.update_project_documents(manifest_info)",
"def execfiles(self, inputfiles):\n\n if len(self.arglist['inputfiles']) > 0:\n for filename in inputfiles:\n # Read input file: make dataparent, get child from load() ##-\n datain = DataParent(config = self.config)\n self.datain = datain.load(filename)\n # Call start - run and call end\n self.runstart(self.datain,self.arglist)\n self.run()\n self.runend(self.dataout)\n # Write output file\n self.dataout.save()\n self.log.info('Execute: Saved result %s' % self.dataout.filename)\n else:\n # Warning - no input file\n self.log.warn('Execute: Missing input File')",
"def run(self):\n print(f\"* Make {self.name} IDL file *\")\n\n print(f\"*** Validate and generate {self.name} libraries ***\")\n self.make_idl_file()\n\n print(f\"*** Copy {self.idl_file_from_path} to {self.idl_file_to_path} ***\")\n shutil.copy(self.idl_file_from_path, self.idl_file_to_path)\n\n if not self.keep_all:\n print(f\"*** Cleanup {self.name} files ***\")\n self.delete_files()\n\n print(f\"*** Done generating {self.name} IDL file ***\")",
"def import_dir(self, dirname):\n\t\tprint \"importing\",dirname\n\t\tdir = os.listdir(dirname)\n\t\tfor file in dir:\n\t\t\tfullpath = os.path.join(dirname,file)\n\t\t\tif os.path.isdir(fullpath):\n\t\t\t\tself.import_dir(fullpath)\n\n\t\t\telif os.path.splitext(fullpath)[1].lower() in tag.supported_extensions:\n\t\t\t\ttags = tag.read_tags(fullpath)\n\t\t\t\tsql = \"insert into best values (?,?,?,?,?,?,?,?)\"\n\t\t\t\targs = self.get_args(tags, fullpath)\n\t\t\t\tself.conn.execute(sql, args)\n\n\t\tself.conn.commit()",
"def do_bundle_import(mc, args):\n total_reqs = collections.OrderedDict()\n for filename in args.filename:\n local_path = None\n if os.path.isfile(filename):\n _file = filename\n local_path = os.path.dirname(os.path.abspath(filename))\n else:\n print(\"Bundle file '{0}' does not exist, attempting to download\"\n \"\".format(filename))\n _file = utils.to_url(\n filename,\n base_url=args.murano_repo_url,\n path='bundles/',\n extension='.bundle',\n )\n\n try:\n bundle_file = utils.Bundle.from_file(_file)\n except Exception as e:\n print(\"Failed to create bundle for '{0}', reason: {1}\".format(\n filename, e))\n continue\n\n data = {\"is_public\": args.is_public}\n\n try:\n for package in bundle_file.packages(\n base_url=args.murano_repo_url, path=local_path):\n\n requirements = package.requirements(\n base_url=args.murano_repo_url,\n path=local_path,\n )\n total_reqs.update(requirements)\n except Exception:\n print(\"Can't parse bundle contents\")\n continue\n\n imported_list = []\n\n for name, dep_package in total_reqs.items():\n image_specs = dep_package.images()\n if image_specs:\n print(\"Inspecting required images\")\n try:\n imgs = utils.ensure_images(\n glance_client=mc.glance_client,\n image_specs=image_specs,\n base_url=args.murano_repo_url,\n local_path=local_path,\n is_package_public=args.is_public)\n for img in imgs:\n print(\"Added {0}, {1} image\".format(\n img['name'], img['id']))\n except Exception as e:\n print(\"Error {0} occurred while installing \"\n \"images for {1}\".format(e, name))\n try:\n imported_package = _handle_package_exists(\n mc, data, dep_package, args.exists_action)\n if imported_package:\n imported_list.append(imported_package)\n except exceptions.CommandError:\n raise\n except Exception as e:\n print(\"Error {0} occurred while \"\n \"installing package {1}\".format(e, name))\n if imported_list:\n _print_package_list(imported_list)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
calls the set_focus method once after 0.5 second interval. The focus must be returned after short interval so this method is necessary.
|
def set_focus_real(self):
Clock.schedule_once(self.set_focus, 0.5)
|
[
"def setFocus():\n pass",
"def _on_focus_changed(self, old, new):\n self._update_focus_widget()",
"def focus_change(self, func):\r\n return self._subscribe(\"focus_change\", func)",
"def set_focused(self):\n self.has_keyboard_focus = True",
"def set_focus(self):\n self.logger.info(f\"Set focus on element: {self.selectors}\")\n self.element.set_focus()",
"def setFocus(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n return False",
"def focus10():\n move_instrument('Small KB Forces', 'focus_10um', wait=True)",
"def set_focus_on_answer(self):\n self.answer_entry.focus()",
"def focus(self, warp: bool = True) -> None:",
"def focus5():\n move_instrument('Small KB Forces', 'focus_5um', wait=True)",
"def force_focus(self):\n\n\t\tself.lift()\n\t\tself.focus_force()\n\t\tself.attributes(\"-topmost\", True)\n\t\tself.grab_set()",
"def _update_focus_widget(self):\n fd = focus_registry.focused_declaration()\n self.declaration.focused_widget = fd",
"def focus(self):\n self.node.focus()",
"def focus(self):\n cls = type(self)\n if not cls.can_focus:\n raise TypeError(\"cannot focus %s.%s widgets\"\n % (cls.__module__, cls.__name__))\n self._wrapper.focus()",
"def enable_focus_mode(self) -> None:\n self._set_configuration(ffoc=\"ON\")",
"def focus_buffer(self, results_buffer, active_buffer):\n\n results_has_focus = results_buffer.id() == active_buffer.id()\n\n if not results_has_focus:\n self.window.focus_view(results_buffer)",
"def start_microscope_focus(self):\n self.logger.info('Starting the microscope focus free run')\n self.cameras['camera_fiber'].stop_camera()\n self.electronics.fiber_led = 0\n self.electronics.side_led = 0\n self.electronics.top_led = 1\n\n self.electronics.laser_power = 0\n\n self.config['camera_microscope'].update(self.config['microscope_focus'])\n self.cameras['camera_microscope'].stop_camera()\n self.cameras['camera_microscope'].configure(self.config['camera_microscope'])\n self.cameras['camera_microscope'].start_free_run()",
"def OnKillFocus(self, event):\n self.hasFocus = False\n self.DrawFocusIndicator(False)",
"def user32_SetFocus(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
closes the GUI window
|
def close_window(self):
Window.close()
|
[
"def close_window(_):\n root.destroy()",
"def close_window(window):\r\n window.destroy()",
"def close(self):\n\n Dialog.close(self)\n gui.no_modal_dialog=True",
"def doQuit(self):\n\n self.mainWin2.destroy()",
"def shutdown_gui(self):\n Gtk.main_quit()",
"def closeWindowCallback(self, event):\n\t\tself.EndModal(self.status)",
"def close_board(self):\n self.ctrl.close()\n return",
"def quit(self):\r\n self.root.quit()\r\n self.root.destroy()",
"def closeEvent(self, event):\n\n # Remove the viewer widget from the main GUI and exit.\n self.parent_gui.display_widget(None, display=False)\n self.close()",
"def close_window(window):\n xkill('-id', window)",
"def close_cells_window(self):\n try:\n self.cells_window.destroy()\n self.cell_button.configure(state='normal')\n del self.cells_window\n except AttributeError:\n pass",
"def OnCloseWindow(self):\n pass",
"def close_window(self):\n if self.task_mngr.is_task_remaining():\n if self.error_occurred:\n response = self.msg_win.show_question_msg(\n \"Warning\",\n \"Tasks are still running, however it seems a task has errors or stalled. \"\n \"Close the window?\"\n )\n else:\n response = self.msg_win.show_question_msg(\n \"Warning\",\n \"Tasks are still running. Are you sure you want to close the window?\"\n )\n if response:\n self.task_mngr.stop_tasks()\n self.close()\n else:\n self.close()",
"def window_close(self):\n if self._worker.isRunning():\n self._worker.terminate()\n\n self._save_prefs()",
"def closing_widget(self):\n pass",
"def close_defects_window(self):\n try:\n self.defects_window.destroy()\n self.defect_button.configure(state='normal')\n del self.defects_window\n except AttributeError:\n pass",
"def auto_exit(self):\n # self.window.quit()\n self.window.destroy()\n self.plot_states()",
"def closeSession(self):\n self.hide()",
"def ev_windowclose(self, event: WindowEvent) -> None:"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
draw graph of test result when a device button is clicked
|
def draw_graph(self, dev, txt):
"""x-axis reps bias_volt and y-axis reps cont_curr."""
if txt != '':
self.firstbox.device.text = "Summary of: " + dev
f = open(tst.get_path(), 'r')
s = f.read()
bias_v = []
cont_i = []
if len(txt) != 1:
i1 = s.find(dev) if s.find(dev)!= -1 else s.find(dev[0].upper() + dev[1])
final_bias_v = tst.get_device(dev).get_stat2()
i2 = s.find(str(final_bias_v), i1)
arr = s[i1:i2].split(',')
i_bias_v = 1
i_cont_i = 3
while i_cont_i < len(arr):
bias_v.append(float(arr[i_bias_v]))
cont_i.append(float(arr[i_cont_i][:arr[i_cont_i].find('\n')])*10**11)
i_bias_v += 3
i_cont_i += 3
##if I need to implement button functionality for columns and rows, add if conditions like "if len(txt) == 1"
if len(self.firstbox.real_graph.plots) == 1:
self.firstbox.real_graph.remove_plot(self.firstbox.real_graph.plots[0])
self.plot = MeshLinePlot(color=[1,1,1,1])
self.firstbox.real_graph.add_plot(self.plot)
self.plot.points = []
for i, (x, y) in enumerate(zip(bias_v, cont_i)):
self.plot.points.append((x,y))
|
[
"def drawTestOutput(p_test, Y_test, weight_test, xmin, xmax, num_bins, node, class_tag): #class_tag = ['tth', 'ttb', 'ttc']\n plt.figure()\n\n\n d_bins = (xmax-xmin) / float(num_bins)\n\n nNode=flavorTransfor(node)\n x_l=0.\n x_h=1.\n # the histogram of the data\n #test_sig\n evt_tot, cat = Y_test.shape\n \n for i_c in range(cat):\n ns, bins, patches = plt.hist(p_test[Y_test[:, i_c]==1][:, nNode], num_bins, range=(x_l, x_h), normed=1, histtype='step', weights=weight_test[Y_test[:, i_c]==1] )\n\n plt.grid(True)\n plt.legend(class_tag, loc='best')\n plt.title(node+\" node\")\n plt.savefig(\"RNNOutput4Class_\"+node+\".png\")\n\n return",
"def print_result_graphs(self):\n for i in range(self.approximation.dimension):\n self.pointwise_plot(i)",
"def test_device(self, txt):\r\n \"\"\"appropriate button.\"\"\"\r\n try:\r\n # Manual testing mode\r\n if not tst.get_auto():\r\n if ((txt == '') | (len(txt) != 2)):\r\n popup.give_warning(\"please insert correct information.\")\r\n elif (not((ord(txt[0]) in range(65, 72))|(ord(txt[0]) in range(97,104)))&(int(txt[1]) in range(1, 9))):\r\n popup.give_warning(\"please insert correct information.\")\r\n else:\r\n self.device.text = \"Device being tested: \" + txt\r\n test_device(txt)\r\n # Automatic testing mode\r\n if tst.get_auto():\r\n if self.device.text[-2:] == \"G8\":\r\n popup.give_warning(\"please press finish button if you have finished testing \" +\r\n \"or press reset button if you wish to test another chip.\")\r\n elif self.device.text == \"device being tested: \":\r\n test_device(\"A1\")\r\n self.test_result.a.a1.text = str(tst.get_devices()[0].get_stat1()) + ' ' + str(tst.get_devices()[0].get_stat2())\r\n self.device.text = \"Device being tested: A1\" \r\n else:\r\n print self.device.text\r\n txt = self.device.text[-2:-1] + str(int(self.device.text[-1])+1) if int(self.device.text[-1]) < 8 else str(unichr(ord(self.device.text[-2:-1])+1)) + '1'\r\n test_device(txt)\r\n self.device.text = \"Device being tested: \" + txt\r\n except:\r\n popup.give_warning(\"please insert correct information.\")\r\n \r\n if (txt == \"A1\") | (txt == \"a1\"):\r\n self.test_result.a.a1.text = str(tst.get_devices()[0].get_stat1()) + ' ' + str(tst.get_devices()[0].get_stat2())\r\n elif (txt == \"A2\") | (txt == \"a2\"): \r\n self.test_result.a.a2.text = str(tst.get_devices()[1].get_stat1()) + ' ' + str(tst.get_devices()[1].get_stat2())\r\n elif (txt == \"A3\") | (txt == \"a3\"): \r\n self.test_result.a.a3.text = str(tst.get_devices()[2].get_stat1()) + ' ' + str(tst.get_devices()[2].get_stat2())\r\n elif (txt == \"A4\") | (txt == \"a4\"): \r\n self.test_result.a.a4.text = str(tst.get_devices()[3].get_stat1()) + ' ' + str(tst.get_devices()[3].get_stat2())\r\n elif (txt == \"A5\") | (txt == \"a5\"): \r\n self.test_result.a.a5.text = str(tst.get_devices()[4].get_stat1()) + ' ' + str(tst.get_devices()[4].get_stat2())\r\n elif (txt == \"A6\") | (txt == \"a6\"): \r\n self.test_result.a.a6.text = str(tst.get_devices()[5].get_stat1()) + ' ' + str(tst.get_devices()[5].get_stat2())\r\n elif (txt == \"A7\") | (txt == \"a7\"): \r\n self.test_result.a.a7.text = str(tst.get_devices()[6].get_stat1()) + ' ' + str(tst.get_devices()[6].get_stat2())\r\n elif (txt == \"A8\") | (txt == \"a8\"): \r\n self.test_result.a.a8.text = str(tst.get_devices()[7].get_stat1()) + ' ' + str(tst.get_devices()[7].get_stat2())\r\n elif (txt == \"B1\") | (txt == \"b1\"): \r\n self.test_result.b.b1.text = str(tst.get_devices()[8].get_stat1()) + ' ' + str(tst.get_devices()[8].get_stat2())\r\n elif (txt == \"B2\") | (txt == \"b2\"): \r\n self.test_result.b.b2.text = str(tst.get_devices()[9].get_stat1()) + ' ' + str(tst.get_devices()[9].get_stat2())\r\n elif (txt == \"B3\") | (txt == \"b3\"): \r\n self.test_result.b.b3.text = str(tst.get_devices()[10].get_stat1()) + ' ' + str(tst.get_devices()[10].get_stat2())\r\n elif (txt == \"B4\") | (txt == \"b4\"): \r\n self.test_result.b.b4.text = str(tst.get_devices()[11].get_stat1()) + ' ' + str(tst.get_devices()[11].get_stat2())\r\n elif (txt == \"B5\") | (txt == \"b5\"): \r\n self.test_result.b.b5.text = str(tst.get_devices()[12].get_stat1()) + ' ' + str(tst.get_devices()[12].get_stat2())\r\n elif (txt == \"B6\") | (txt == \"b6\"): \r\n self.test_result.b.b6.text = str(tst.get_devices()[13].get_stat1()) + ' ' + str(tst.get_devices()[13].get_stat2())\r\n elif (txt == \"B7\") | (txt == \"b7\"): \r\n self.test_result.b.b7.text = str(tst.get_devices()[14].get_stat1()) + ' ' + str(tst.get_devices()[14].get_stat2())\r\n elif (txt == \"B8\") | (txt == \"b8\"): \r\n self.test_result.b.b8.text = str(tst.get_devices()[15].get_stat1()) + ' ' + str(tst.get_devices()[15].get_stat2())\r\n elif (txt == \"C1\") | (txt == \"c1\"): \r\n self.test_result.c.c1.text = str(tst.get_devices()[16].get_stat1()) + ' ' + str(tst.get_devices()[16].get_stat2())\r\n elif (txt == \"C2\") | (txt == \"c2\"): \r\n self.test_result.c.c2.text = str(tst.get_devices()[17].get_stat1()) + ' ' + str(tst.get_devices()[17].get_stat2())\r\n elif (txt == \"C3\") | (txt == \"c3\"): \r\n self.test_result.c.c3.text = str(tst.get_devices()[18].get_stat1()) + ' ' + str(tst.get_devices()[18].get_stat2())\r\n elif (txt == \"C4\") | (txt == \"c4\"): \r\n self.test_result.c.c4.text = str(tst.get_devices()[19].get_stat1()) + ' ' + str(tst.get_devices()[19].get_stat2())\r\n elif (txt == \"C5\") | (txt == \"c5\"): \r\n self.test_result.c.c5.text = str(tst.get_devices()[20].get_stat1()) + ' ' + str(tst.get_devices()[20].get_stat2())\r\n elif (txt == \"C6\") | (txt == \"c6\"): \r\n self.test_result.c.c6.text = str(tst.get_devices()[21].get_stat1()) + ' ' + str(tst.get_devices()[21].get_stat2())\r\n elif (txt == \"C7\") | (txt == \"c7\"): \r\n self.test_result.c.c7.text = str(tst.get_devices()[22].get_stat1()) + ' ' + str(tst.get_devices()[22].get_stat2())\r\n elif (txt == \"C8\") | (txt == \"c8\"): \r\n self.test_result.c.c8.text = str(tst.get_devices()[23].get_stat1()) + ' ' + str(tst.get_devices()[23].get_stat2())\r\n elif (txt == \"D1\") | (txt == \"d1\"): \r\n self.test_result.d.d1.text = str(tst.get_devices()[24].get_stat1()) + ' ' + str(tst.get_devices()[24].get_stat2())\r\n elif (txt == \"D2\") | (txt == \"d2\"): \r\n self.test_result.d.d2.text = str(tst.get_devices()[25].get_stat1()) + ' ' + str(tst.get_devices()[25].get_stat2())\r\n elif (txt == \"D3\") | (txt == \"d3\"): \r\n self.test_result.d.d3.text = str(tst.get_devices()[26].get_stat1()) + ' ' + str(tst.get_devices()[26].get_stat2())\r\n elif (txt == \"D4\") | (txt == \"d4\"): \r\n self.test_result.d.d4.text = str(tst.get_devices()[27].get_stat1()) + ' ' + str(tst.get_devices()[27].get_stat2())\r\n elif (txt == \"D5\") | (txt == \"d5\"): \r\n self.test_result.d.d5.text = str(tst.get_devices()[28].get_stat1()) + ' ' + str(tst.get_devices()[28].get_stat2())\r\n elif (txt == \"D6\") | (txt == \"d6\"): \r\n self.test_result.d.d6.text = str(tst.get_devices()[29].get_stat1()) + ' ' + str(tst.get_devices()[29].get_stat2())\r\n elif (txt == \"D7\") | (txt == \"d7\"): \r\n self.test_result.d.d7.text = str(tst.get_devices()[30].get_stat1()) + ' ' + str(tst.get_devices()[30].get_stat2())\r\n elif (txt == \"D8\") | (txt == \"d8\"): \r\n self.test_result.d.d8.text = str(tst.get_devices()[31].get_stat1()) + ' ' + str(tst.get_devices()[31].get_stat2())\r\n elif (txt == \"E1\") | (txt == \"e1\"): \r\n self.test_result.e.e1.text = str(tst.get_devices()[32].get_stat1()) + ' ' + str(tst.get_devices()[32].get_stat2())\r\n elif (txt == \"E2\") | (txt == \"e2\"): \r\n self.test_result.e.e2.text = str(tst.get_devices()[33].get_stat1()) + ' ' + str(tst.get_devices()[33].get_stat2())\r\n elif (txt == \"E3\") | (txt == \"e3\"): \r\n self.test_result.e.e3.text = str(tst.get_devices()[34].get_stat1()) + ' ' + str(tst.get_devices()[34].get_stat2())\r\n elif (txt == \"E4\") | (txt == \"e4\"): \r\n self.test_result.e.e4.text = str(tst.get_devices()[35].get_stat1()) + ' ' + str(tst.get_devices()[35].get_stat2())\r\n elif (txt == \"E5\") | (txt == \"e5\"): \r\n self.test_result.e.e5.text = str(tst.get_devices()[36].get_stat1()) + ' ' + str(tst.get_devices()[36].get_stat2())\r\n elif (txt == \"E6\") | (txt == \"e6\"): \r\n self.test_result.e.e6.text = str(tst.get_devices()[37].get_stat1()) + ' ' + str(tst.get_devices()[37].get_stat2())\r\n elif (txt == \"E7\") | (txt == \"e7\"): \r\n self.test_result.e.e7.text = str(tst.get_devices()[38].get_stat1()) + ' ' + str(tst.get_devices()[38].get_stat2())\r\n elif (txt == \"E8\") | (txt == \"e8\"): \r\n self.test_result.e.e8.text = str(tst.get_devices()[39].get_stat1()) + ' ' + str(tst.get_devices()[39].get_stat2())\r\n elif (txt == \"F1\") | (txt == \"f1\"): \r\n self.test_result.f.f1.text = str(tst.get_devices()[40].get_stat1()) + ' ' + str(tst.get_devices()[40].get_stat2())\r\n elif (txt == \"F2\") | (txt == \"f2\"): \r\n self.test_result.f.f2.text = str(tst.get_devices()[41].get_stat1()) + ' ' + str(tst.get_devices()[41].get_stat2())\r\n elif (txt == \"F3\") | (txt == \"f3\"): \r\n self.test_result.f.f3.text = str(tst.get_devices()[42].get_stat1()) + ' ' + str(tst.get_devices()[42].get_stat2())\r\n elif (txt == \"F4\") | (txt == \"f4\"): \r\n self.test_result.f.f4.text = str(tst.get_devices()[43].get_stat1()) + ' ' + str(tst.get_devices()[43].get_stat2())\r\n elif (txt == \"F5\") | (txt == \"f5\"): \r\n self.test_result.f.f5.text = str(tst.get_devices()[44].get_stat1()) + ' ' + str(tst.get_devices()[44].get_stat2())\r\n elif (txt == \"F6\") | (txt == \"f6\"): \r\n self.test_result.f.f6.text = str(tst.get_devices()[45].get_stat1()) + ' ' + str(tst.get_devices()[45].get_stat2())\r\n elif (txt == \"F7\") | (txt == \"f7\"): \r\n self.test_result.f.f7.text = str(tst.get_devices()[46].get_stat1()) + ' ' + str(tst.get_devices()[46].get_stat2())\r\n elif (txt == \"F8\") | (txt == \"f8\"): \r\n self.test_result.f.f8.text = str(tst.get_devices()[47].get_stat1()) + ' ' + str(tst.get_devices()[47].get_stat2())\r\n elif (txt == \"G1\") | (txt == \"g1\"): \r\n self.test_result.g.g1.text = str(tst.get_devices()[48].get_stat1()) + ' ' + str(tst.get_devices()[48].get_stat2())\r\n elif (txt == \"G2\") | (txt == \"g2\"): \r\n self.test_result.g.g2.text = str(tst.get_devices()[49].get_stat1()) + ' ' + str(tst.get_devices()[49].get_stat2())\r\n elif (txt == \"G3\") | (txt == \"g3\"): \r\n self.test_result.g.g3.text = str(tst.get_devices()[50].get_stat1()) + ' ' + str(tst.get_devices()[50].get_stat2())\r\n elif (txt == \"G4\") | (txt == \"g4\"): \r\n self.test_result.g.g4.text = str(tst.get_devices()[51].get_stat1()) + ' ' + str(tst.get_devices()[51].get_stat2())\r\n elif (txt == \"G5\") | (txt == \"g5\"): \r\n self.test_result.g.g5.text = str(tst.get_devices()[52].get_stat1()) + ' ' + str(tst.get_devices()[52].get_stat2())\r\n elif (txt == \"G6\") | (txt == \"g6\"): \r\n self.test_result.g.g6.text = str(tst.get_devices()[53].get_stat1()) + ' ' + str(tst.get_devices()[53].get_stat2())\r\n elif (txt == \"G7\") | (txt == \"g7\"): \r\n self.test_result.g.g7.text = str(tst.get_devices()[54].get_stat1()) + ' ' + str(tst.get_devices()[54].get_stat2())\r\n elif (txt == \"G8\") | (txt == \"g8\"): \r\n self.test_result.g.g8.text = str(tst.get_devices()[55].get_stat1()) + ' ' + str(tst.get_devices()[55].get_stat2())",
"def btn_func_display(self):\n #2D-to-3D conversion\n keypoint_pos = self.process.convert_2d_to_3d([self.scenes[self._count].labels])\n #transform points to origins of respective scene\n self.process.transform_points(keypoint_pos, [self.scenes[self._count].labels])\n #visualize the labeled keypoints in scene\n obj = []\n if not self.process.scene_kpts==[]:\n obj = self.process.scene_kpts[0].transpose()\n self.process.visualize_points_in_scene(self.scenes[self._count].mesh, obj)",
"def plot_test_results(self): \r\n self.svm_test_results = pd.read_json(path.deployment_svm_test_results)\r\n self.svm_test_results.sort_index(inplace=True)\r\n print('\\nPlotting test results...')\r\n self.pdf_object = PdfPages(path.deployment_results_plots)\r\n print('\\t|--Plotting SVM test results...')\r\n self.plot_probabilities(dataframe=self.svm_test_results,\r\n name='SVM', exponential=False)\r\n self.svm_test_results = pd.read_json(path.deployment_svm_test_results)\r\n self.svm_test_results.sort_index(inplace=True)\r\n self.plot_probabilities(dataframe=self.svm_test_results,\r\n name='SVM EMA', exponential=True) \r\n print('\\nPlotted results saved to {}'.format(path.deployment_results_plots))\r\n self.svm_test_results = pd.read_json(path.deployment_svm_test_results)\r\n self.svm_test_results.sort_index(inplace=True) \r\n self.create_chart_data(dataframe=self.svm_test_results)\r\n print('\\nChart data saved to {}'.format(path.deployment_chart_data))\r\n self.pdf_object.close()",
"def do_outputs(self):\n self.guess.displayBoard()\n self.draw.drawJumper()",
"def graphic_window(self):",
"def predict_plot_test(self):\r\n\r\n sc_preds = self.model.predict(self.x_val)\r\n #print(preds[-10:])\r\n y_org = self.test_set[ : , -1]\r\n self.scaler.fit(y_org.reshape(-1,1))\r\n preds = self.scaler.inverse_transform(sc_preds[: , -1].reshape(-1,1))\r\n\r\n #print(preds[-10:])\r\n\r\n plt.plot(preds,label = \"predicted\", color = 'orange')\r\n plt.plot(y_org[self.window_size:], label = \"real\", color = 'blue')\r\n plt.title(\"Test phase - predicting the test results\")\r\n plt.legend()\r\n plt.show()",
"def visualize(self, observation, action):\n raise NotImplementedError",
"def testDrawTraversal(self):\n g=self.g\n self.examples.load_by_name(g, \"tinkerpop-modern\")\n traversal=g.E().hasLabel(\"created\").toList()\n gviz=GremlinDraw.show_graph_traversal(g, traversal, \"software\")\n self.check_draw(gviz)",
"def visual_state_action(self):\n\n q_dict = self.q_table_dict\n plt.figure(dpi=220, figsize=(7, 7))\n ax = plt.axes()\n ax.set(xlim=[0, 10], ylim=[0, 10])\n\n ax.xaxis.set_major_locator(plt.MultipleLocator(1.0)) # 设置x主坐标间隔 1\n ax.yaxis.set_major_locator(plt.MultipleLocator(1.0)) # 设置y主坐标间隔 1\n ax.grid(True, linestyle=\"-\", color=\"0.6\", linewidth=\"1\")\n # ax.scatter(8.5, 7.5)\n\n keys = sorted(q_dict.keys())\n x, y, i = 0.5, 9.5, 1\n for key in keys:\n # print(\"key: \" + str(key))\n while key[0]*10 + key[1] != i - 1:\n i = i + 1\n x = x + 1\n if x == 10.5:\n x = 0.5\n y = y - 1\n\n if key == self.goal_state:\n ax.scatter(x, y)\n i = i + 1\n x = x + 1\n continue\n\n if np.average(q_dict[key]) == 0:\n i = i + 1\n x = x + 1\n if x == 10.5:\n x = 0.5\n y = y - 1\n continue\n\n if q_dict[key].index(np.max(q_dict[key])) == 0:\n plt.annotate('', xy=(x - 0.5, y), xytext=(x, y),\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\", color='red'))\n else:\n plt.annotate('', xy=(x - 0.5, y), xytext=(x, y),\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"))\n\n if q_dict[key].index(np.max(q_dict[key])) == 1:\n plt.annotate('', xy=(x, y + 0.5), xytext=(x, y),\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\", color='red'))\n else:\n plt.annotate('', xy=(x, y + 0.5), xytext=(x, y),\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"))\n\n if q_dict[key].index(np.max(q_dict[key])) == 2:\n plt.annotate('', xy=(x + 0.5, y), xytext=(x, y),\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\", color='red'))\n else:\n plt.annotate('', xy=(x + 0.5, y), xytext=(x, y),\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"))\n\n if q_dict[key].index(np.max(q_dict[key])) == 3:\n plt.annotate('', xy=(x, y - 0.5), xytext=(x, y),\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\", color='red'))\n else:\n plt.annotate('', xy=(x, y - 0.5), xytext=(x, y),\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"))\n\n x = x + 1\n if x == 10.5:\n x = 0.5\n y = y - 1\n i = i + 1\n\n # 设置刻度标记的大小\n plt.tick_params(axis='both', labelsize=10)\n\n plt.show()",
"def click1(self):\n\n global r_info\n global sigma_info\n global b_info\n global user_r_entry\n global user_sigma_entry\n global user_b_entry\n global graphframe\n\n try:\n r_info = user_r_entry.get() #This obtains the user input for r\n sigma_info = user_sigma_entry.get() #This obtains the user input for sigma\n b_info = user_b_entry.get() #This obtains the user input for b\n \n print (\"\")\n print (\"==============================================\")\n print (\" SYSTEM REPORT \") \n \n timeinit = time.process_time() #start timer to get execution time\n \n print (\"User entered: \\n r = %f \\n sigma = %f \\n b = %f \\n\" \n % (r_info, sigma_info, b_info)) #Printing System reports in kernel\n \n print (\"The randomised initial values are \\n X0 = %f \\n Y0 = %f \\n Z0 = %f \\n \" \n %(self.X_0, self.Y_0, self.Z_0))#Printing System reports in kernel\n \n xyz = [self.X_0, self.Y_0, self.Z_0] #initialises xyz in a list using the initial values\n \n for _ in range(self.STEP): #iterates up till the STEP size then applies RK4\n k_0 = self.__lorenz(xyz)\n k_1 = self.__lorenz([x + k * self.DT / 2 for x, k in zip(xyz, k_0)])\n k_2 = self.__lorenz([x + k * self.DT / 2 for x, k in zip(xyz, k_1)])\n k_3 = self.__lorenz([x + k * self.DT for x, k in zip(xyz, k_2)])\n for i in range(3):\n xyz[i] += (k_0[i] + 2 * k_1[i] + 2 * k_2[i] + k_3[i]) \\\n * self.DT / 6.0\n self.res[i].append(xyz[i])\n \n fig = Figure() \n ax = Axes3D(fig)\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n \n self.canvas = FigureCanvasTkAgg(fig, master = graphframe) #This embeds the graph into Tkinter, places this with its master in graphframe\n ax.plot(self.res[0], self.res[1], self.res[2], color=\"blue\", lw=1) \n \n self.canvas.draw() #main app that draws and embeds the graph onto tkinter app\n self.canvas.get_tk_widget().grid(row = 0, column = 0) #.grid places the object on the window.\n \n timeend = time.process_time() #timer for end of time\n timer = timeend - timeinit #this obtians the time taken to excecute\n print (\"Time taken to execute: %f seconds \" % timer)\n print (\"\")\n print (\" END OF REPORT \")\n print (\"==============================================\")\n\n\n except tk.TclError:\n print (messagebox.showinfo(\"Invalid!\", \"Please Enter Valid Inputs (i.e. integers or floats)\"))\n print (\"\")",
"def drawTransitionResults(self):\n # Get the data, it should be the last element of the list indexed -1 (last element)\n result = self.game_obj.player.statistics.round_results[-1]\n # Set labels for general tab\n self.post_results_label_hours_bid_on.setText(number_to_string(8760 / self.game_obj.bidRounds, \"hours\"))\n self.post_results_label_demand.setText(number_to_string(result[\"demand\"], \"MW\"))\n self.post_results_label_system_price.setText(number_to_string(result[\"system_price\"], \"NOK/MW\"))\n self.post_results_label_used_capacity.setText(\"{}/{} MW\".format(result[\"sold_amount\"], result[\"bid_amount\"]))\n self.post_results_label_production.setText(\n number_to_string(result[\"sold_amount\"] * 8760 / self.game_obj.bidRounds, \"GWh\")) # /1000 to get GWh\n self.post_results_label_profits.setText(number_to_string(result[\"profits\"], \"MNOK\"))\n self.post_results_label_revenue.setText(number_to_string(result[\"revenue\"], \"MNOK\"))\n self.post_results_label_costs.setText(number_to_string(result[\"cost\"], \"MNOK\"))\n self.post_results_label_administrative_costs.setText(number_to_string(result[\"administrative_cost\"], \"MNOK\"))\n self.post_results_label_operational_costs.setText(number_to_string(result[\"operational_cost\"], \"MNOK\"))\n self.post_results_label_taxes.setText(number_to_string(result[\"taxes\"], \"MNOK\"))\n self.post_results_label_emissions.setText(\n number_to_string(result[\"emissions\"], \"TON CO<sub>2</sub>eq\"))\n self.post_results_label_gas_price.setText(number_to_string(result[\"gas_price\"], \"NOK/MWh\"))\n self.post_results_label_gas_fuel.setText(number_to_string(result[\"gas_fuel\"], \"GWh\"))\n self.post_results_label_gas_production.setText(number_to_string(result[\"player_gas_production\"], \"GWh\"))\n self.post_results_label_coal_price.setText(number_to_string(result[\"coal_price\"], \"NOK/MWh\"))\n self.post_results_label_coal_fuel.setText(\n number_to_string(result[\"coal_fuel\"], \"GWh\"))\n self.post_results_label_coal_production.setText(number_to_string(result[\"player_coal_production\"], \"GWh\"))\n # Set labels for source tab\n self.total_production = result[\"total_pv_production\"] + result[\"total_gas_production\"] + result[\n \"total_coal_production\"]\n # Calculate player market shares for sources\n if result[\"total_pv_production\"] == 0:\n self.player_pv_market_share = 0\n else:\n self.player_pv_market_share = result[\"player_pv_production\"] / result[\"total_pv_production\"]\n if result[\"total_gas_production\"] == 0:\n self.player_gas_market_share = 0\n else:\n self.player_gas_market_share = result[\"player_gas_production\"] / result[\"total_gas_production\"]\n if result[\"total_coal_production\"] == 0:\n self.player_coal_market_share = 0\n else:\n self.player_coal_market_share = result[\"player_coal_production\"] / result[\"total_coal_production\"]\n # Calculate the source market shares\n if self.total_production == 0:\n self.pv_market_share = 0\n self.gas_market_share = 0\n self.coal_market_share = 0\n else:\n self.pv_market_share = result[\"total_pv_production\"] / self.total_production\n self.gas_market_share = result[\"total_gas_production\"] / self.total_production\n self.coal_market_share = result[\"total_coal_production\"] / self.total_production\n # Set players production label\n self.post_results_label_your_production_pv.setText(number_to_string(result[\"player_pv_production\"], \"GWh\"))\n self.post_results_label_your_production_gas.setText(\n number_to_string(result[\"player_gas_production\"], \"GWh\"))\n self.post_results_label_your_production_coal.setText(\n number_to_string(result[\"player_coal_production\"], \"GWh\"))\n # Set total production label\n self.post_results_label_total_production_pv.setText(number_to_string(result[\"total_pv_production\"], \"GWh\"))\n self.post_results_label_total_production_gas.setText(\n number_to_string(result[\"total_gas_production\"], \"GWh\"))\n self.post_results_label_total_production_coal.setText(\n number_to_string(result[\"total_coal_production\"], \"GWh\"))\n # Set players market share label\n self.post_results_label_your_market_share_pv.setText(number_to_string(self.player_pv_market_share, \"%\"))\n self.post_results_label_your_market_share_gas.setText(number_to_string(self.player_gas_market_share, \"%\"))\n self.post_results_label_your_market_share_coal.setText(number_to_string(self.player_coal_market_share, \"%\"))\n # Set source market share label (formatting to \"-\" if zero)\n self.post_results_label_source_market_share_pv.setText(number_to_string(self.pv_market_share, \"%\"))\n self.post_results_label_source_market_share_gas.setText(number_to_string(self.gas_market_share, \"%\"))\n self.post_results_label_source_market_share_coal.setText(number_to_string(self.coal_market_share, \"%\"))\n # Draw the demand plot for the general tab\n bids = dict_bids_to_bids_object_list(result[\"own_bids\"])\n bids.extend(dict_bids_to_bids_object_list(result[\"other_bids\"]))\n real_demand = [result[\"demand_curve_fixed\"], result[\"demand_curve_variable\"]]\n self.plot_post_results_demand_graph(bids, result[\"system_price\"], real_demand)\n # Draw the pie chart for the sources tab\n self.plot_post_round_results_sources_graph()\n # Print all the bids\n elements = len(result[\"own_bids\"][\"plant\"])\n #font = QtGui.QFont()\n #font.setPointSize(12)\n if elements == 0:\n self.post_results_empty = QtWidgets.QLabel(self.page_post_round)\n self.post_results_empty.setFont(self.fonts[\"tiny_text\"])\n self.post_results_verticalLayout_emptyList.addWidget(self.post_results_empty)\n self.post_results_empty.setText(\"You did not send any bids this round\")\n return\n # Creating empty lists for every column in the bid list\n self.post_results_widget_name = [None] * elements\n self.post_results_widget_price = [None] * elements\n self.post_results_widget_amount = [None] * elements\n self.post_results_widget_producer_surplus = [None] * elements\n self.post_results_widget_revenues = [None] * elements\n self.post_results_widget_operational_costs = [None] * elements\n self.post_results_widget_taxes = [None] * elements\n self.post_results_widget_emissions = [None] * elements\n for row in range(elements):\n # Plant name\n self.post_results_widget_name[row] = QtWidgets.QLabel(self.page_post_round)\n self.post_results_widget_name[row].setFont(self.fonts[\"tiny_text\"])\n self.post_results_gridLayout.addWidget(self.post_results_widget_name[row], row + 1, 0, 1, 1)\n self.post_results_widget_name[row].setText(self.game_obj.player.getPlantName(result[\"own_bids\"][\"plant\"][row]))\n # Price\n self.post_results_widget_price[row] = QtWidgets.QLabel(self.page_post_round)\n self.post_results_widget_price[row].setFont(self.fonts[\"tiny_text\"])\n self.post_results_gridLayout.addWidget(self.post_results_widget_price[row], row + 1, 1, 1, 1)\n self.post_results_widget_price[row].setText(number_to_string(result[\"own_bids\"][\"price\"][row], \"NOK/MW\"))\n # Amount\n self.post_results_widget_amount[row] = QtWidgets.QLabel(self.page_post_round)\n self.post_results_widget_amount[row].setFont(self.fonts[\"tiny_text\"])\n self.post_results_gridLayout.addWidget(self.post_results_widget_amount[row], row + 1, 2, 1, 1)\n self.post_results_widget_amount[row].setText(\n \"{}/{} MW\".format(result[\"own_bids\"][\"actual_amount\"][row], result[\"own_bids\"][\"amount\"][row]))\n # Producer_surplus\n self.post_results_widget_producer_surplus[row] = QtWidgets.QLabel(self.page_post_round)\n self.post_results_widget_producer_surplus[row].setFont(self.fonts[\"tiny_text\"])\n self.post_results_gridLayout.addWidget(self.post_results_widget_producer_surplus[row], row + 1, 3, 1, 1)\n self.post_results_widget_producer_surplus[row].setText(number_to_string(result[\"own_bids\"][\"producer_surplus\"][row], \"MNOK\"))\n # Revenues\n self.post_results_widget_revenues[row] = QtWidgets.QLabel(self.page_post_round)\n self.post_results_widget_revenues[row].setFont(self.fonts[\"tiny_text\"])\n self.post_results_gridLayout.addWidget(self.post_results_widget_revenues[row], row + 1, 4, 1, 1)\n self.post_results_widget_revenues[row].setText(number_to_string(result[\"own_bids\"][\"revenues\"][row], \"MNOK\"))\n # Operational costs\n self.post_results_widget_operational_costs[row] = QtWidgets.QLabel(self.page_post_round)\n self.post_results_widget_operational_costs[row].setFont(self.fonts[\"tiny_text\"])\n self.post_results_gridLayout.addWidget(self.post_results_widget_operational_costs[row], row + 1, 5, 1, 1)\n self.post_results_widget_operational_costs[row].setText(number_to_string(result[\"own_bids\"][\"operational_costs\"][row], \"MNOK\"))\n # Taxes\n self.post_results_widget_taxes[row] = QtWidgets.QLabel(self.page_post_round)\n self.post_results_widget_taxes[row].setFont(self.fonts[\"tiny_text\"])\n self.post_results_gridLayout.addWidget(self.post_results_widget_taxes[row], row + 1, 6, 1, 1)\n self.post_results_widget_taxes[row].setText(number_to_string(result[\"own_bids\"][\"taxes\"][row], \"MNOK\"))\n # Emissions\n self.post_results_widget_emissions[row] = QtWidgets.QLabel(self.page_post_round)\n self.post_results_widget_emissions[row].setFont(self.fonts[\"tiny_text\"])\n self.post_results_gridLayout.addWidget(self.post_results_widget_emissions[row], row + 1, 7, 1, 1)\n self.post_results_widget_emissions[row].setText(\n number_to_string(result[\"own_bids\"][\"emissions\"][row], \"TON CO<sub>2</sub>eq\"))",
"def baseline():\n global state\n draw, image = clear_screen()\n\n data[\"new_user\"] = False\n data[\"tests\"] = {\"0\": None}\n data[\"number_of_tests\"] = -1\n update_data()\n\n update_buttons(draw, image, \"Start\", \"Start\", \"Record a \", \"baseline.\")\n state = \"Test\"",
"def run():\n axis = True\n coord = [50, 0]\n tft.fill(TFT.BLACK)\n while True:\n \n coord[int(axis)] = int(pot.read()/8)\n print(coord, [x.value() for x in buttons])\n if buttons[0].value() == 0:\n print('button0')\n axis = not axis\n \n if buttons[1].value() == 0:\n print('button1')\n # break\n\n tft.pixel(coord, TFT.RED)",
"def test_measure_button(self):\n measure_button = MeasureButton(0, 0, 20, 20)\n self.assertEqual((255, 0, 0), measure_button.return_color())\n self.assertFalse(measure_button.active)\n measure_button.clicked()\n self.assertEqual((0, 255, 0), measure_button.return_color())\n self.assertTrue(measure_button.active)\n measure_button.clicked()\n self.assertEqual((255, 0, 0), measure_button.return_color())\n self.assertFalse(measure_button.active)",
"def output_graphs(results):",
"def plot_bo(self, n_test=500):\n self.tracker.plot_bo(n_test)",
"def drawActogram(self):\n try:\n sender = ''.join([x for x in self.sender().text()\n if x.isnumeric()])\n print('plotting ' + sender)\n sender = int(sender)-1\n\n time_ = []\n status = []\n\n with open(self.name[sender].text(), 'rb') as f:\n for buff in iter(lambda: f.read(8), b''):\n anteroom_tuple = struct.unpack('=If', buff)\n time_.append(anteroom_tuple[0])\n status.append(anteroom_tuple[1])\n\n time_ = np.asarray(time_)/(24*3600) + 719163 - 5/24\n status = np.asarray(status)\n\n days = np.floor(time_)\n x = (time_ - days) * 24\n y = status + (days[-1] - days)\n\n self.win = pg.GraphicsWindow()\n pg.setConfigOptions(antialias=True)\n self.p1 = self.win.addPlot()\n\n for i in range(int(days[0]), int(days[-1]) + 1):\n self.p1.plot(x[days == i], y[days == i], pen='r')\n self.p1.plot(x[days == i-1] + 24, # double-plot\n y[days == i-1] + 1, pen='r')\n self.p1.plot(x[days == int(days[-1])] + 24, # double-plot\n y[days == int(days[-1])] + 1, pen='r') # last day\n \n # Set axis layout\n self.xax = self.p1.getAxis('bottom')\n self.xax.setTickSpacing(24, 2)\n self.yax = self.p1.getAxis('left') \n self.p1.showGrid(x=True, y=True)\n\n except FileNotFoundError:\n print('No file')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
take tester's choice of device and put the test result into the
|
def test_device(self, txt):
"""appropriate button."""
try:
# Manual testing mode
if not tst.get_auto():
if ((txt == '') | (len(txt) != 2)):
popup.give_warning("please insert correct information.")
elif (not((ord(txt[0]) in range(65, 72))|(ord(txt[0]) in range(97,104)))&(int(txt[1]) in range(1, 9))):
popup.give_warning("please insert correct information.")
else:
self.device.text = "Device being tested: " + txt
test_device(txt)
# Automatic testing mode
if tst.get_auto():
if self.device.text[-2:] == "G8":
popup.give_warning("please press finish button if you have finished testing " +
"or press reset button if you wish to test another chip.")
elif self.device.text == "device being tested: ":
test_device("A1")
self.test_result.a.a1.text = str(tst.get_devices()[0].get_stat1()) + ' ' + str(tst.get_devices()[0].get_stat2())
self.device.text = "Device being tested: A1"
else:
print self.device.text
txt = self.device.text[-2:-1] + str(int(self.device.text[-1])+1) if int(self.device.text[-1]) < 8 else str(unichr(ord(self.device.text[-2:-1])+1)) + '1'
test_device(txt)
self.device.text = "Device being tested: " + txt
except:
popup.give_warning("please insert correct information.")
if (txt == "A1") | (txt == "a1"):
self.test_result.a.a1.text = str(tst.get_devices()[0].get_stat1()) + ' ' + str(tst.get_devices()[0].get_stat2())
elif (txt == "A2") | (txt == "a2"):
self.test_result.a.a2.text = str(tst.get_devices()[1].get_stat1()) + ' ' + str(tst.get_devices()[1].get_stat2())
elif (txt == "A3") | (txt == "a3"):
self.test_result.a.a3.text = str(tst.get_devices()[2].get_stat1()) + ' ' + str(tst.get_devices()[2].get_stat2())
elif (txt == "A4") | (txt == "a4"):
self.test_result.a.a4.text = str(tst.get_devices()[3].get_stat1()) + ' ' + str(tst.get_devices()[3].get_stat2())
elif (txt == "A5") | (txt == "a5"):
self.test_result.a.a5.text = str(tst.get_devices()[4].get_stat1()) + ' ' + str(tst.get_devices()[4].get_stat2())
elif (txt == "A6") | (txt == "a6"):
self.test_result.a.a6.text = str(tst.get_devices()[5].get_stat1()) + ' ' + str(tst.get_devices()[5].get_stat2())
elif (txt == "A7") | (txt == "a7"):
self.test_result.a.a7.text = str(tst.get_devices()[6].get_stat1()) + ' ' + str(tst.get_devices()[6].get_stat2())
elif (txt == "A8") | (txt == "a8"):
self.test_result.a.a8.text = str(tst.get_devices()[7].get_stat1()) + ' ' + str(tst.get_devices()[7].get_stat2())
elif (txt == "B1") | (txt == "b1"):
self.test_result.b.b1.text = str(tst.get_devices()[8].get_stat1()) + ' ' + str(tst.get_devices()[8].get_stat2())
elif (txt == "B2") | (txt == "b2"):
self.test_result.b.b2.text = str(tst.get_devices()[9].get_stat1()) + ' ' + str(tst.get_devices()[9].get_stat2())
elif (txt == "B3") | (txt == "b3"):
self.test_result.b.b3.text = str(tst.get_devices()[10].get_stat1()) + ' ' + str(tst.get_devices()[10].get_stat2())
elif (txt == "B4") | (txt == "b4"):
self.test_result.b.b4.text = str(tst.get_devices()[11].get_stat1()) + ' ' + str(tst.get_devices()[11].get_stat2())
elif (txt == "B5") | (txt == "b5"):
self.test_result.b.b5.text = str(tst.get_devices()[12].get_stat1()) + ' ' + str(tst.get_devices()[12].get_stat2())
elif (txt == "B6") | (txt == "b6"):
self.test_result.b.b6.text = str(tst.get_devices()[13].get_stat1()) + ' ' + str(tst.get_devices()[13].get_stat2())
elif (txt == "B7") | (txt == "b7"):
self.test_result.b.b7.text = str(tst.get_devices()[14].get_stat1()) + ' ' + str(tst.get_devices()[14].get_stat2())
elif (txt == "B8") | (txt == "b8"):
self.test_result.b.b8.text = str(tst.get_devices()[15].get_stat1()) + ' ' + str(tst.get_devices()[15].get_stat2())
elif (txt == "C1") | (txt == "c1"):
self.test_result.c.c1.text = str(tst.get_devices()[16].get_stat1()) + ' ' + str(tst.get_devices()[16].get_stat2())
elif (txt == "C2") | (txt == "c2"):
self.test_result.c.c2.text = str(tst.get_devices()[17].get_stat1()) + ' ' + str(tst.get_devices()[17].get_stat2())
elif (txt == "C3") | (txt == "c3"):
self.test_result.c.c3.text = str(tst.get_devices()[18].get_stat1()) + ' ' + str(tst.get_devices()[18].get_stat2())
elif (txt == "C4") | (txt == "c4"):
self.test_result.c.c4.text = str(tst.get_devices()[19].get_stat1()) + ' ' + str(tst.get_devices()[19].get_stat2())
elif (txt == "C5") | (txt == "c5"):
self.test_result.c.c5.text = str(tst.get_devices()[20].get_stat1()) + ' ' + str(tst.get_devices()[20].get_stat2())
elif (txt == "C6") | (txt == "c6"):
self.test_result.c.c6.text = str(tst.get_devices()[21].get_stat1()) + ' ' + str(tst.get_devices()[21].get_stat2())
elif (txt == "C7") | (txt == "c7"):
self.test_result.c.c7.text = str(tst.get_devices()[22].get_stat1()) + ' ' + str(tst.get_devices()[22].get_stat2())
elif (txt == "C8") | (txt == "c8"):
self.test_result.c.c8.text = str(tst.get_devices()[23].get_stat1()) + ' ' + str(tst.get_devices()[23].get_stat2())
elif (txt == "D1") | (txt == "d1"):
self.test_result.d.d1.text = str(tst.get_devices()[24].get_stat1()) + ' ' + str(tst.get_devices()[24].get_stat2())
elif (txt == "D2") | (txt == "d2"):
self.test_result.d.d2.text = str(tst.get_devices()[25].get_stat1()) + ' ' + str(tst.get_devices()[25].get_stat2())
elif (txt == "D3") | (txt == "d3"):
self.test_result.d.d3.text = str(tst.get_devices()[26].get_stat1()) + ' ' + str(tst.get_devices()[26].get_stat2())
elif (txt == "D4") | (txt == "d4"):
self.test_result.d.d4.text = str(tst.get_devices()[27].get_stat1()) + ' ' + str(tst.get_devices()[27].get_stat2())
elif (txt == "D5") | (txt == "d5"):
self.test_result.d.d5.text = str(tst.get_devices()[28].get_stat1()) + ' ' + str(tst.get_devices()[28].get_stat2())
elif (txt == "D6") | (txt == "d6"):
self.test_result.d.d6.text = str(tst.get_devices()[29].get_stat1()) + ' ' + str(tst.get_devices()[29].get_stat2())
elif (txt == "D7") | (txt == "d7"):
self.test_result.d.d7.text = str(tst.get_devices()[30].get_stat1()) + ' ' + str(tst.get_devices()[30].get_stat2())
elif (txt == "D8") | (txt == "d8"):
self.test_result.d.d8.text = str(tst.get_devices()[31].get_stat1()) + ' ' + str(tst.get_devices()[31].get_stat2())
elif (txt == "E1") | (txt == "e1"):
self.test_result.e.e1.text = str(tst.get_devices()[32].get_stat1()) + ' ' + str(tst.get_devices()[32].get_stat2())
elif (txt == "E2") | (txt == "e2"):
self.test_result.e.e2.text = str(tst.get_devices()[33].get_stat1()) + ' ' + str(tst.get_devices()[33].get_stat2())
elif (txt == "E3") | (txt == "e3"):
self.test_result.e.e3.text = str(tst.get_devices()[34].get_stat1()) + ' ' + str(tst.get_devices()[34].get_stat2())
elif (txt == "E4") | (txt == "e4"):
self.test_result.e.e4.text = str(tst.get_devices()[35].get_stat1()) + ' ' + str(tst.get_devices()[35].get_stat2())
elif (txt == "E5") | (txt == "e5"):
self.test_result.e.e5.text = str(tst.get_devices()[36].get_stat1()) + ' ' + str(tst.get_devices()[36].get_stat2())
elif (txt == "E6") | (txt == "e6"):
self.test_result.e.e6.text = str(tst.get_devices()[37].get_stat1()) + ' ' + str(tst.get_devices()[37].get_stat2())
elif (txt == "E7") | (txt == "e7"):
self.test_result.e.e7.text = str(tst.get_devices()[38].get_stat1()) + ' ' + str(tst.get_devices()[38].get_stat2())
elif (txt == "E8") | (txt == "e8"):
self.test_result.e.e8.text = str(tst.get_devices()[39].get_stat1()) + ' ' + str(tst.get_devices()[39].get_stat2())
elif (txt == "F1") | (txt == "f1"):
self.test_result.f.f1.text = str(tst.get_devices()[40].get_stat1()) + ' ' + str(tst.get_devices()[40].get_stat2())
elif (txt == "F2") | (txt == "f2"):
self.test_result.f.f2.text = str(tst.get_devices()[41].get_stat1()) + ' ' + str(tst.get_devices()[41].get_stat2())
elif (txt == "F3") | (txt == "f3"):
self.test_result.f.f3.text = str(tst.get_devices()[42].get_stat1()) + ' ' + str(tst.get_devices()[42].get_stat2())
elif (txt == "F4") | (txt == "f4"):
self.test_result.f.f4.text = str(tst.get_devices()[43].get_stat1()) + ' ' + str(tst.get_devices()[43].get_stat2())
elif (txt == "F5") | (txt == "f5"):
self.test_result.f.f5.text = str(tst.get_devices()[44].get_stat1()) + ' ' + str(tst.get_devices()[44].get_stat2())
elif (txt == "F6") | (txt == "f6"):
self.test_result.f.f6.text = str(tst.get_devices()[45].get_stat1()) + ' ' + str(tst.get_devices()[45].get_stat2())
elif (txt == "F7") | (txt == "f7"):
self.test_result.f.f7.text = str(tst.get_devices()[46].get_stat1()) + ' ' + str(tst.get_devices()[46].get_stat2())
elif (txt == "F8") | (txt == "f8"):
self.test_result.f.f8.text = str(tst.get_devices()[47].get_stat1()) + ' ' + str(tst.get_devices()[47].get_stat2())
elif (txt == "G1") | (txt == "g1"):
self.test_result.g.g1.text = str(tst.get_devices()[48].get_stat1()) + ' ' + str(tst.get_devices()[48].get_stat2())
elif (txt == "G2") | (txt == "g2"):
self.test_result.g.g2.text = str(tst.get_devices()[49].get_stat1()) + ' ' + str(tst.get_devices()[49].get_stat2())
elif (txt == "G3") | (txt == "g3"):
self.test_result.g.g3.text = str(tst.get_devices()[50].get_stat1()) + ' ' + str(tst.get_devices()[50].get_stat2())
elif (txt == "G4") | (txt == "g4"):
self.test_result.g.g4.text = str(tst.get_devices()[51].get_stat1()) + ' ' + str(tst.get_devices()[51].get_stat2())
elif (txt == "G5") | (txt == "g5"):
self.test_result.g.g5.text = str(tst.get_devices()[52].get_stat1()) + ' ' + str(tst.get_devices()[52].get_stat2())
elif (txt == "G6") | (txt == "g6"):
self.test_result.g.g6.text = str(tst.get_devices()[53].get_stat1()) + ' ' + str(tst.get_devices()[53].get_stat2())
elif (txt == "G7") | (txt == "g7"):
self.test_result.g.g7.text = str(tst.get_devices()[54].get_stat1()) + ' ' + str(tst.get_devices()[54].get_stat2())
elif (txt == "G8") | (txt == "g8"):
self.test_result.g.g8.text = str(tst.get_devices()[55].get_stat1()) + ' ' + str(tst.get_devices()[55].get_stat2())
|
[
"def create_testbed_device_instance(self, dev_name_info, hint):\n testbed_dev = None\n if hint == \"AP\":\n testbed_dev = TestBedAP(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"AP\"\n if hint == \"STA\":\n testbed_dev = TestBedSTA(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"STA\"\n if hint == \"DUT\":\n testbed_dev = DUT(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"DUT\"\n if hint == \"SNIFFER\":\n testbed_dev = Sniffer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"SNIFFER\"\n if hint == \"PCENDPOINT\":\n testbed_dev = PCEndpoint(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"PCENDPOINT\"\n if hint == \"APCONFIG\":\n testbed_dev = APConfig(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"APCONFIG\"\n if hint == \"RADIUSSERVER\":\n testbed_dev = RadiusServer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"RADIUSSERVER\"\n if hint == \"OSUSERVER\":\n testbed_dev = OSUServer(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"OSUSERVER\"\n if hint == \"ATTENUATOR\":\n testbed_dev = Attenuator(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"ATTENUATOR\"\n if hint == \"POWERSWITCH\":\n testbed_dev = PowerSwitch(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"POWERSWITCH\"\n if hint == \"WFAEMT\":\n testbed_dev = WFAEMT(self.prog_name)\n testbed_dev.dev_name = dev_name_info\n testbed_dev.dev_type = \"WFAEMT\"\n return testbed_dev",
"def test_questionnaire_device_good(self):\n with mock.patch('builtins.input', mock.MagicMock(return_value=\"SNEK!\")):\n assert bs.questionnaire_device() == \"SNEK!\"",
"def test(device, logger, result, case_pass, case_fail, **kwargs):\n logger.info(\"Start TC audio_playback_check.\")\n logger.info(\"Try to list ALSA devices.\")\n r, o = device.execute_adb_shell_cmd(\"alsa_aplay -l\")\n if r != 0:\n #command not executed successfully\n case_fail(result, \"List command failed.\", logger, logs=o)\n else:\n if len(o) == 0:\n #TODO: Findout why below error msg won't print via stdout and stderr\n err_msg = \"aplay: device_list:268: no soundcards found...\"\n case_fail(result, err_msg, logger)\n else:\n o = o[1:]\n for l in o:\n logger.debug(\"ALSA_list>>%s\", l)\n case_pass(result, \"ALSA_list detected sound cards.\",\n logger, logs=o)\n return",
"def set_testbed_device(self):\n curr_node = self.ill.head\n while curr_node is not None:\n if curr_node.tag == \"TESTBED\":\n # curr_node.data is a dictionary \n dev_info_str = (curr_node.data.keys())[0]\n\n for key in TestScriptElementType.script_testbed_devices:\n\n searchObj = re.search(key, dev_info_str, re.I)\n if searchObj:\n if TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_DUT:\n self.set_dut_dev(dev_info_str, curr_node)\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_CONTROL_AGENT_DUT:\n self.set_dut_dev(dev_info_str, curr_node, \"NOTVAR\")\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_CONTROL_AGENT_STA or TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_CONTROL_AGENT_AP:\n if TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_CONTROL_AGENT_STA:\n self.set_testbed_ap_sta_dev(dev_info_str, curr_node, \"STA\", \"NOTVAR\")\n else:\n self.set_testbed_ap_sta_dev(dev_info_str, curr_node, \"AP\", \"NOTVAR\")\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_TESTBEDSTA or TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_TESTBEDAP:\n if TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_TESTBEDSTA:\n self.set_testbed_ap_sta_dev(dev_info_str, curr_node, \"STA\", \"\")\n else:\n self.set_testbed_ap_sta_dev(dev_info_str, curr_node, \"AP\", \"\")\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFAEMT:\n self.set_wfa_emt_dev(dev_info_str, curr_node)\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_EMT_CONTROL_AGNET:\n self.set_wfa_emt_dev(dev_info_str, curr_node, \"NOTVAR\")\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.POWERSWITCH:\n self.set_pwr_swt_dev(dev_info_str, curr_node)\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_APCONFIGSERVER:\n self.set_apconfig_dev(dev_info_str, curr_node)\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_PCEDNPOINT:\n self.set_pce_dev(dev_info_str, curr_node)\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_SNIFFER:\n self.set_sniffer_dev(dev_info_str, curr_node)\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_RADIUSSERVER:\n self.set_rad_svr_dev(dev_info_str, curr_node)\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_CONTROL_AGENT_OSUSERVER:\n self.set_osu_svr_dev(dev_info_str, curr_node)\n elif TestScriptElementType.script_testbed_devices[key] == TestScriptElementType.WFA_ATTENUATOR:\n self.set_atten_dev(dev_info_str, curr_node)\n else:\n raise SyntaxError\n break\n if curr_node.tag == \"DISPLAYNAME\":\n for tbd in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:\n if tbd.alias == (curr_node.data.keys())[0]:\n tbd.displayname = (curr_node.data.values())[0]\n break\n\n curr_node = curr_node.next",
"def test_one_emulator(mock_tools, android_sdk):\n mock_tools.subprocess.check_output.return_value = devices_result(\"one_emulator\")\n\n assert android_sdk.devices() == {\n \"emulator-5554\": {\n \"name\": \"Android SDK built for x86\",\n \"authorized\": True,\n },\n }",
"def test_bluetoothctl_device_random(self):\n\n with open(\"tests/fixtures/generic/bluetoothctl_device_random.out\", \"r\") as f:\n output = f.read()\n\n actual = parse(output, quiet=True)\n\n self.assertIsNotNone(actual)\n self.assertIsNotNone(actual[0])\n\n expected = {\n \"address\": \"DF:1C:C3:B4:1A:1F\",\n \"is_random\": True,\n \"name\": \"M585/M590\",\n \"alias\": \"M585/M590\",\n \"appearance\": \"0x03c2\",\n \"icon\": \"input-mouse\",\n \"paired\": \"yes\",\n \"bonded\": \"yes\",\n \"trusted\": \"no\",\n \"blocked\": \"no\",\n \"connected\": \"no\",\n \"legacy_pairing\": \"no\",\n \"uuids\": [\n \"Generic Access Profile (00001800-0000-1000-8000-00805f9b34fb)\",\n \"Generic Attribute Profile (00001801-0000-1000-8000-00805f9b34fb)\",\n \"Device Information (0000180a-0000-1000-8000-00805f9b34fb)\",\n \"Battery Service (0000180f-0000-1000-8000-00805f9b34fb)\",\n \"Human Interface Device (00001812-0000-1000-8000-00805f9b34fb)\",\n \"Vendor specific (00010000-0000-1000-8000-011f2000046d)\"\n ],\n \"modalias\": \"usb:v046DpB01Bd0011\"\n }\n\n if actual:\n for k, v in expected.items():\n self.assertEqual(v, actual[0][k], f\"Device regex failed on {k}\")",
"def choose_device(cls, devices):\n return devices[0]",
"def test4_SingleObservationSelectByIntent(self):\n\n os.system(\"mv \" + self.inpms + \" \" + self.inpms + \".test4\")\n self.inpms += \".test4\"\n record = {}\n\n tblocal = tbtool()\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n tblocal.close()\n if 'MODEL_DATA' in cols:\n raise ValueError, \"The input MS, \" + self.inpms + \" already has a MODEL_DATA col\" + str(cols)\n\n try:\n #print \"\\nRunning setjy(field='Uranus').\"\n print \"\\nRunning setjy(field='Titan').\"\n sjran = setjy(vis=self.inpms, field='', spw='', modimage='',\n selectdata=True, intent=\"*AMPLI*\",\n scalebychan=True, fluxdensity=-1,\n standard='Butler-JPL-Horizons 2010', usescratch=True)\n except Exception, e:\n #print \"\\nError running setjy(field='Uranus')\"\n print \"\\nError running setjy(field='Titan')\"\n raise e\n try:\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n if 'MODEL_DATA' not in cols:\n #raise AssertionError, \"setjy(field='Uranus') did not add a MODEL_DATA column\"\n raise AssertionError, \"setjy(field='Titan') did not add a MODEL_DATA column\"\n else:\n #record['wvr'] = tblocal.getcell('MODEL_DATA', 0)\n #record['auto1'] = tblocal.getcell('MODEL_DATA', 18)\n #record['long1'] = tblocal.getcell('MODEL_DATA', 19)\n #record['auto4'] = tblocal.getcell('MODEL_DATA', 2)\n #record['long4'] = tblocal.getcell('MODEL_DATA', 3)\n #Titan\n if self.ismms:\n #record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n #record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n\t\t #record['auto3'] = tblocal.getcell('MODEL_DATA', 2835)\n\t\t #record['long3'] = tblocal.getcell('MODEL_DATA', 2868)\n querystr = 'FIELD_ID==1'\n auto0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(auto0query)\n record['auto0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(long0query)\n record['long0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n auto3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(auto3query)\n record['auto3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(long3query)\n record['long3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n\n else:\n record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n record['auto3'] = tblocal.getcell('MODEL_DATA', 405)\n record['long3'] = tblocal.getcell('MODEL_DATA', 438)\n tblocal.close()\n #record['history'] = self.get_last_history_line(self.inpms, origin='setjy::imager::setjy()', hint=\"V=0] Jy\")\n if not self.ismms: record['history'] = self.get_last_history_line(self.inpms, origin='imager::setjy()', hint=\"V=0] Jy\")\n self.result = record\n except AssertionError, e:\n print \"\\nError accesing MODEL_DATA\"\n tblocal.close()\n raise e\n\n \"\"\"Flux density in HISTORY (selectbyIntent)?\"\"\"\n #self.check_history(self.result['history'], [\"Uranus\", \"V=0] Jy\"])\n if not self.ismms: self.check_history(self.result['history'], [\"Titan\", \"V=0] Jy\"])\n\n #\"\"\"WVR spw with selectbyIntent\"\"\"\n #self.check_eq(self.result['wvr'], numpy.array([[25.93320656+0.j,\n # 26.88228607+0.j]]),\n # 0.003)\n\n #\"\"\"Zero spacing of spw 1 with scalebychan\"\"\"\n # 8 (decreasing freq!) chans, XX & YY.\n #self.check_eq(self.result['auto1'],\n # numpy.array([[65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j],\n # [65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j]]),0.0001)\n\n #\"\"\"Long spacing of spw 1 with scalebychan\"\"\"\n #self.check_eq(self.result['long1'],\n # numpy.array([[4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j],\n # [4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j]]),0.0001)\n\n # spw 4 only has 1 chan, so it should be the same as without scalebychan.\n #\"\"\"Zero spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['auto4'], numpy.array([[69.33396912+0.j],[69.33396912+0.j]]),0.0001)\n #\"\"\"Long spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['long4'], numpy.array([[2.83933783+0.j],[2.83933783+0.j]]),0.0001)\n\n \"\"\"Zero spacing of spw 3 with scalebychan, selectbyintent\"\"\"\n self.check_eq(self.result['auto3'][1][0], (3.0934467+0j),0.0001)\n self.check_eq(self.result['auto3'][1][1920], (3.08946729+0j),0.0001)\n self.check_eq(self.result['auto3'][1][3839], (3.08549213+0j),0.0001)\n\n return sjran",
"def test_bluetoothctl_device(self):\n\n with open(\"tests/fixtures/generic/bluetoothctl_device.out\", \"r\") as f:\n output = f.read()\n\n actual = parse(output, quiet=True)\n\n self.assertIsNotNone(actual)\n self.assertIsNotNone(actual[0])\n\n expected = {\n \"address\": \"EB:06:EF:62:B3:19\",\n \"is_public\": True,\n \"name\": \"TaoTronics TT-BH026\",\n \"alias\": \"TaoTronics TT-BH026\",\n \"class\": \"0x00240404\",\n \"icon\": \"audio-headset\",\n \"paired\": \"no\",\n \"bonded\": \"no\",\n \"trusted\": \"no\",\n \"blocked\": \"no\",\n \"connected\": \"no\",\n \"legacy_pairing\": \"no\",\n \"uuids\": [\n \"Advanced Audio Distribu.. (0000110d-0000-1000-8000-00805f9b34fb)\",\n \"Audio Sink (0000110b-0000-1000-8000-00805f9b34fb)\",\n \"A/V Remote Control (0000110e-0000-1000-8000-00805f9b34fb)\",\n \"A/V Remote Control Cont.. (0000110f-0000-1000-8000-00805f9b34fb)\",\n \"Handsfree (0000111e-0000-1000-8000-00805f9b34fb)\",\n \"Headset (00001108-0000-1000-8000-00805f9b34fb)\",\n \"Headset HS (00001131-0000-1000-8000-00805f9b34fb)\"\n ],\n \"rssi\": -52,\n \"txpower\": 4\n }\n\n if actual:\n for k, v in expected.items():\n self.assertEqual(v, actual[0][k], f\"Device regex failed on {k}\")",
"def test_drivers(devices, expected, current_actor_context):\n current_actor_context.feed(PCIDevices(devices=devices))\n current_actor_context.run()\n if expected:\n assert not current_actor_context.consume(Report)\n else:\n assert current_actor_context.consume(Report)",
"def test_a_register_device_for_loan_license(self):\n self.status.register(self.status.DEVICEID1, self.status.DEVICENAME1)",
"def test_device_option(build_command):\n options = build_command.parse_options([\"-d\", \"myphone\"])\n\n assert options == {\"udid\": \"myphone\", \"update\": False}",
"def device_test(self):\n # Create a MPI packet object\n mpi_packet = MPI()\n mpi_packet.descriptor = MPI.MPI_BASE_CMD_DESCRIPTOR\n \n # Set to idle payload\n field_len = 0x02\n field_desc = 0x05\n mpi_packet.payload = [field_len, field_desc]\n\n # Payload length \n mpi_packet.payload_len = len(mpi_packet.payload)\n \n # Build imu ping command in bytes\n command = mpi_packet.build()\n \n # Send byte packet to microstrain imu \n self.ser.write(command)\n \n # Read output from the imu adter sleeping for 2 ms\n sleep(0.002)\n reply = self.ser.read(16)\n \n if reply[7] == \"\\x00\":\n print \" Device Built-In Test (BIT) successful!\"\n print \" BIT Error Flags : \"\n print \" Byte 1 : \", '0x' + reply[10].encode('hex') \n print \" Byte 2 : \", '0x' + reply[11].encode('hex')\n print \" Byte 3 : \", '0x' + reply[12].encode('hex')\n print \" Byte 4 : \", '0x' + reply[13].encode('hex')\n else:\n print \" Command unsuccessful\"\n err = '0x' + reply[7].encode('hex')\n print \" Error Code : \", err\n print \" Error Message : \", MPI.MPI_ACK_NACK_ERROR[err]\n \n return",
"def test_display_output(self, test_input, expected):\n dataklazz = UserAgentInfo(**test_input)\n assert dataklazz.display() == expected",
"def test_read_device_info(self):\n with self.plc:\n name, version = self.plc.read_device_info()\n self.assertEqual(name, \"TestServer\")\n self.assertEqual(version.build, 3)",
"def randSwitchPort(rand, inportdic, outportdic):\n if 'all' == rand:\n #Random switch SUT in port\n inporttype = random.choice(list(inportdic.keys()))\n inport = inportdic[inporttype]\n #Random switch SUT out port\n outporttype = random.choice(list(outportdic.keys()))\n outport = outportdic[outporttype]\n cmd_dut = ''.join('ci' + inport + 'o' + outport)\n log.logger.info(\"Set the DUT port is: %s\" % cmd_dut)\n #Set Switch output\n switchport = \"\".join(re.findall(r\"\\d\",outporttype))\n cmd_sw = ''.join('ci'+switchport+'oall')\n #log.logger.info(\"Set the Switch port is: %s\" % cmd_sw)\n elif 'input' == rand:\n # Random switch SUT in port, output default is first port\n inporttype = random.choice(list(inportdic.keys()))\n inport = inportdic[inporttype]\n cmd_dut = ''.join('ci' + inport + 'o' + outportdic['HDMI1'])\n log.logger.info(\"The DUT IN port is: %s\" % inport)\n #No output switch\n log.logger.info(\"OUTput has no change.\")\n # Set Switch output\n cmd_sw = ''\n elif 'output' == rand:\n #No switch SUT in port, input is the default\n log.logger.info(\"INput has no change.\")\n outporttype = random.choice(list(outportdic.keys()))\n outport = outportdic[outporttype]\n cmd_dut = ''.join('ci' + inportdic['HDMI1'] + 'o' + outport)\n log.logger.info(\"The DUT OUT port is: %s\" % outport)\n #Set Switch output\n switchport = \"\".join(re.findall(r\"\\d\",outporttype))\n cmd_sw = ''.join('ci'+switchport+'oall')\n #log.logger.info(\"Set the Switch port is: %s\" % cmd_sw)\n else:\n cmd_dut = ''.join('ci' + inportdic['HDMI1'] + 'o' + outportdic['HDMI1'])\n #Set Switch output\n switchport = \"\".join(re.findall(r\"\\d\",'HDMI1'))\n cmd_sw = ''.join('ci'+switchport+'oall')\n return cmd_dut, cmd_sw, outport, outporttype, inporttype",
"def print_menu_and_get_device(device_type):\n\n devices = None\n if device_type == \"sink\":\n devices = pulse.get_sinks()\n print(\"Available Pulse Audio sinks:\")\n elif device_type == \"source\":\n devices = pulse.get_sources()\n print(\"Available Pulse Audio sources:\")\n else:\n raise ValueError(\"device_type must be either sink or source\")\n for index, device in enumerate(devices):\n print(\"\\t{index}: {active_indicator}{name}\".format(\n index=index,\n active_indicator=\"(active default) \" if device[\"active\"] else \"\",\n name=device[\"device_name\"]))\n valid_input = False\n selection = None\n while not valid_input:\n selection = input(\"? \")\n valid_input = is_int(selection) and 0 <= int(selection) < len(devices)\n selection = int(selection)\n return devices[selection]",
"async def test_get_actions(hass: HomeAssistant, device_ias) -> None:\n\n ieee_address = str(device_ias[0].ieee)\n\n ha_device_registry = dr.async_get(hass)\n reg_device = ha_device_registry.async_get_device(\n identifiers={(DOMAIN, ieee_address)}\n )\n ha_entity_registry = er.async_get(hass)\n siren_level_select = ha_entity_registry.async_get(\n \"select.fakemanufacturer_fakemodel_default_siren_level\"\n )\n siren_tone_select = ha_entity_registry.async_get(\n \"select.fakemanufacturer_fakemodel_default_siren_tone\"\n )\n strobe_level_select = ha_entity_registry.async_get(\n \"select.fakemanufacturer_fakemodel_default_strobe_level\"\n )\n strobe_select = ha_entity_registry.async_get(\n \"select.fakemanufacturer_fakemodel_default_strobe\"\n )\n\n actions = await async_get_device_automations(\n hass, DeviceAutomationType.ACTION, reg_device.id\n )\n\n expected_actions = [\n {\n \"domain\": DOMAIN,\n \"type\": \"squawk\",\n \"device_id\": reg_device.id,\n \"metadata\": {},\n },\n {\"domain\": DOMAIN, \"type\": \"warn\", \"device_id\": reg_device.id, \"metadata\": {}},\n ]\n expected_actions.extend(\n [\n {\n \"domain\": Platform.SELECT,\n \"type\": action,\n \"device_id\": reg_device.id,\n \"entity_id\": entity_id,\n \"metadata\": {\"secondary\": True},\n }\n for action in [\n \"select_first\",\n \"select_last\",\n \"select_next\",\n \"select_option\",\n \"select_previous\",\n ]\n for entity_id in [\n siren_level_select.id,\n siren_tone_select.id,\n strobe_level_select.id,\n strobe_select.id,\n ]\n ]\n )\n\n assert actions == unordered(expected_actions)",
"def test_400005_owner_create_multi_service_order_different_devices(self):\n self.logger.info(\".... Start test_400005_owner_create_multi_service_order_different_devices ....\")\n try:\n with allure.step(\"teststep5: get provider id\"):\n provider_name = self.config.getItem('h5', 'name')\n table = 'bus_provider'\n condition = (\"name\", provider_name)\n allure.attach(\"table name and condition\", \"{0},{1}\".format(table, condition))\n self.logger.info(\"\")\n self.logger.info(\"table: {0}, condition: {1}\".format(table, condition))\n select_result = self.mysql.execute_select_condition(table, condition)\n allure.attach(\"query result\", str(select_result))\n self.logger.info(\"query result: {0}\".format(select_result))\n provider_id = select_result[0][0]\n\n with allure.step(\"teststep6: get spu id\"):\n table = 'bus_spu'\n condition = (\"provider_id\", provider_id)\n allure.attach(\"table name and condition\", \"{0},{1}\".format(table, condition))\n self.logger.info(\"\")\n self.logger.info(\"table: {0}, condition: {1}\".format(table, condition))\n select_result = self.mysql.execute_select_condition(table, condition)\n allure.attach(\"query result\", str(select_result))\n self.logger.info(\"query result: {0}\".format(select_result))\n spu_id = select_result[0][0]\n\n with allure.step(\"teststep7: get sku id\"):\n sku_name = self.config.getItem('sku', 'single_count')\n table = 'bus_sku'\n condition = (\"name\", sku_name)\n allure.attach(\"table name and condition\", \"{0},{1}\".format(table, condition))\n self.logger.info(\"\")\n self.logger.info(\"table: {0}, condition: {1}\".format(table, condition))\n select_result = self.mysql.execute_select_condition(table, condition)\n allure.attach(\"query result\", str(select_result))\n self.logger.info(\"query result: {0}\".format(select_result))\n sku_id = select_result[0][0]\n\n with allure.step(\"teststep8: get owner feature\"):\n table = 'mem_features'\n condition = (\"member_id = '{}' and features_name = '{}'\".format(self.member_id, \"本人\"))\n allure.attach(\"table name and condition\", \"{0},{1}\".format(table, condition))\n self.logger.info(\"\")\n self.logger.info(\"table: {0}, condition: {1}\".format(table, condition))\n select_result = self.mysql.execute_select_conditions(table, condition)\n allure.attach(\"query result\", str(select_result))\n self.logger.info(\"query result: {0}\".format(select_result))\n owner_feautreid = select_result[0][0]\n\n with allure.step(\"teststep9: get devices id\"):\n table = 'iot_releationship'\n condition = (\"iot_device_name\", self.devicename)\n allure.attach(\"table name and condition\", \"{0},{1}\".format(table, condition))\n self.logger.info(\"\")\n self.logger.info(\"table: {0}, condition: {1}\".format(table, condition))\n select_result = self.mysql.execute_select_condition(table, condition)\n allure.attach(\"query result\", str(select_result))\n self.logger.info(\"query result: {0}\".format(select_result))\n condition2 = (\"iot_device_name\", self.devicename2)\n allure.attach(\"table name and condition\", \"{0},{1}\".format(table, condition2))\n self.logger.info(\"\")\n self.logger.info(\"table: {0}, condition: {1}\".format(table, condition2))\n select_result2 = self.mysql.execute_select_condition(table, condition2)\n allure.attach(\"query result\", str(select_result2))\n self.logger.info(\"query result: {0}\".format(select_result2))\n devices_ids = []\n device_id = ''\n device_id2 = ''\n if select_result and select_result2:\n device_id = select_result[0][0]\n device_id2 = select_result2[0][0]\n devices_ids.append(select_result[0][0])\n devices_ids.append(select_result2[0][0])\n\n with allure.step(\"teststep10: subscribe service order create.\"):\n self.mqttclient2.loopstart()\n time.sleep(5)\n self.mqttclient2.loopstop()\n topic = \"/{0}/{1}/{2}\".format(self.productkey, self.devicename, self.order_create)\n topic2 = \"/{0}/{1}/{2}\".format(self.productkey2, self.devicename2, self.order_create)\n self.logger.info(\"topic: {0}\".format(topic))\n self.mqttclient2.subscribe(topic2, 1)\n self.mqttclient2.loopstart()\n self.mqttclient.subscribe(topic, 1)\n self.mqttclient.loopstart()\n self.mqttclient.clear()\n self.mqttclient2.clear()\n start_time = int(time.time())\n\n with allure.step(\"teststep11: create service orders\"):\n with allure.step(\"初始化HTTP客户端。\"):\n h5_port = self.config.getItem('h5', 'port')\n baseurl = '{0}://{1}:{2}'.format(self.sv_protocol, self.sv_host, h5_port)\n allure.attach(\"baseurl\", str(baseurl))\n self.logger.info(\"baseurl: \" + baseurl)\n httpclient1 = HTTPClient(baseurl)\n with allure.step(\"连接H5主页\"):\n r_homeindex = h5_home_index(httpclient1, self.member_id, self.token, self.logger)\n allure.attach(\"homeindex\", str(r_homeindex))\n self.logger.info(\"homeindex: \" + str(r_homeindex))\n assert not r_homeindex\n with allure.step(\"本人申请下单\"):\n r_applyresult1 = h5_shopping_apply_result(httpclient1, provider_id, spu_id, sku_id,\n [owner_feautreid], \"2010-2-4\", \"2038-02-11\",\n self.logger)\n allure.attach(\"apply result\", str(r_applyresult1))\n self.logger.info(\"apply result: \" + str(r_applyresult1))\n assert r_applyresult1\n with allure.step(\"获取服务单号\"):\n r_orderlist = get_myservice_order_list(self.httpclient, self.member_id, 0, 10, 3,\n timestamp=get_timestamp(), logger=self.logger)\n self.logger.info(\"service order list: \" + str(r_orderlist))\n service_order_id = r_orderlist[0][\"service_order_id\"]\n\n end_time = int(time.time())\n during = end_time - start_time\n while (not self.mqttclient.rcv_msg or not self.mqttclient2.rcv_msg) and during < 60:\n sleep(5)\n end_time = int(time.time())\n during = end_time - start_time\n self.mqttclient.loopstop()\n self.mqttclient.unsubscribe(topic)\n self.mqttclient2.loopstop()\n self.mqttclient2.unsubscribe(topic2)\n if self.mqttclient.rcv_msg:\n msg = self.mqttclient.rcv_msg.pop()\n payload = json.loads(msg.payload, encoding='utf-8')\n self.logger.info(\"device1 message payload: {}\".format(payload))\n assert payload['data']['service_order_id'] == str(service_order_id)\n else:\n self.logger.info(\"Fail: Cannot get the create service order message from device1.\")\n assert False\n if self.mqttclient2.rcv_msg:\n msg = self.mqttclient2.rcv_msg.pop()\n payload = json.loads(msg.payload, encoding='utf-8')\n self.logger.info(\"device2 message payload: {}\".format(payload))\n assert payload['data']['service_order_id'] == str(service_order_id)\n else:\n self.logger.info(\"Fail: Cannot get the create service order message from device2.\")\n assert False\n self.logger.info(\"MQTT receive service order create finished.\")\n\n with allure.step(\"teststep12: publish service order report.\"):\n for i in range(4):\n self.logger.info(\"\")\n self.logger.info(\"Publish service order report {} times.\".format(i))\n iot_publish_ServiceOrderReport(self.mqttclient, self.productkey, self.devicename, service_order_id,\n device_id, 1, 1, logger=self.logger)\n sleep(3)\n iot_publish_ServiceOrderReport(self.mqttclient2, self.productkey2, self.devicename2, service_order_id,\n device_id2, 0, 1, logger=self.logger)\n sleep(3)\n\n sleep(10)\n with allure.step(\"teststep13: get recognize record.\"):\n records = get_recognized_record_list(self.httpclient, self.member_id, 0, 10, timestamp=get_timestamp(),\n logger=self.logger)\n self.logger.info(\"Recognize records: {0}\".format(records))\n assert len(records['data']) == 8\n\n with allure.step(\"teststep14: get service order status.\"):\n r_orderlist = get_myservice_order_list(self.httpclient, self.member_id, 0, 10, 3, timestamp=get_timestamp(),\n logger=self.logger)\n self.logger.info(\"Service order list: {0}\".format(r_orderlist))\n assert r_orderlist[0]['already_count'] == 8\n assert r_orderlist[0]['state'] == 1\n\n with allure.step(\"teststep15: subscribe service order close.\"):\n topic = \"/{0}/{1}/{2}\".format(self.productkey, self.devicename, self.order_close)\n topic2 = \"/{0}/{1}/{2}\".format(self.productkey2, self.devicename2, self.order_close)\n self.logger.info(\"topic: {0}\".format(topic))\n self.mqttclient.subscribe(topic, 1)\n self.mqttclient.loopstart()\n self.logger.info(\"topic: {0}\".format(topic2))\n self.mqttclient2.subscribe(topic2, 1)\n self.mqttclient2.loopstart()\n self.mqttclient.clear()\n self.mqttclient2.clear()\n start_time = int(time.time())\n\n with allure.step(\"teststep16: close service orders\"):\n table = 'bus_order'\n condition = (\"service_order_id\", service_order_id)\n allure.attach(\"table name and condition\", \"{0},{1}\".format(table, condition))\n self.logger.info(\"\")\n self.logger.info(\"table: {0}, condition: {1}\".format(table, condition))\n select_result = self.mysql.execute_select_condition(table, condition)\n allure.attach(\"query result\", str(select_result))\n self.logger.info(\"query result: {0}\".format(select_result))\n order_id = select_result[0][0]\n close_result = h5_order_delete(httpclient1, provider_id, spu_id, sku_id, order_id)\n allure.attach(\"close order result\", str(close_result))\n self.logger.info(\"close order result: {0}\".format(close_result))\n assert close_result\n\n end_time = int(time.time())\n during = end_time - start_time\n while (not self.mqttclient.rcv_msg) and (not self.mqttclient2.rcv_msg) and during < 60:\n sleep(5)\n end_time = int(time.time())\n during = end_time - start_time\n self.mqttclient.loopstop()\n self.mqttclient.unsubscribe(topic)\n self.mqttclient2.loopstop()\n self.mqttclient2.unsubscribe(topic2)\n if self.mqttclient.rcv_msg:\n msg = self.mqttclient.rcv_msg.pop()\n payload = json.loads(msg.payload, encoding='utf-8')\n self.logger.info(\"device1 message payload: {}\".format(payload))\n assert payload['action_id'] == '202'\n assert payload['data']['service_order_id'] == str(service_order_id)\n else:\n self.logger.error(\"Failed:device1 has not received iot message\")\n assert False\n if self.mqttclient2.rcv_msg:\n msg = self.mqttclient2.rcv_msg.pop()\n payload = json.loads(msg.payload, encoding='utf-8')\n self.logger.info(\"device2 message payload: {}\".format(payload))\n assert payload['action_id'] == '202'\n assert payload['data']['service_order_id'] == str(service_order_id)\n else:\n self.logger.error(\"Failed:device2 has not received iot message\")\n assert False\n self.logger.info(\"MQTT receive service order close finished.\")\n\n with allure.step(\"teststep17: get service order status.\"):\n r_orderlist = get_myservice_order_list(self.httpclient, self.member_id, 0, 10, 3, timestamp=get_timestamp(),\n logger=self.logger)\n self.logger.info(\"Service order Status: {0}\".format(r_orderlist))\n assert r_orderlist[0]['already_count'] == 8\n assert r_orderlist[0]['state'] == 2\n # iot时间请求时间同步接口已去掉。\n # with allure.step(\"teststep18: subscribe sync time.\"):\n # self.mqttclient.clear()\n # self.mqttclient2.clear()\n # payload = iot_publish_SyncTime(self.mqttclient, self.productkey, self.devicename, 1, logger=self.logger)\n # self.logger.info(\"device1 time sync message payload: {}\".format(payload))\n # payload2 = iot_publish_SyncTime(self.mqttclient2, self.productkey2, self.devicename2, 1, logger=self.logger)\n # self.logger.info(\"device2 time sync message payload: {}\".format(payload2))\n # assert payload['action_id'] == '204'\n # assert payload2['action_id'] == '204'\n except Exception as e:\n allure.attach(\"Exception: \", \"{}\".format(e))\n self.logger.error(\"Error: exception occur: \")\n self.logger.error(e)\n assert False\n finally:\n with allure.step(\"teststep: delete mem order records\"):\n table = 'mem_order_record'\n condition = (\"member_id\", self.member_id)\n allure.attach(\"table name and condition\", \"{0},{1}\".format(table, condition))\n self.logger.info(\"\")\n self.logger.info(\"table: {0}, condition: {1}\".format(table, condition))\n delete_result = self.mysql.execute_delete_condition(table, condition)\n allure.attach(\"delete result\", str(delete_result))\n self.logger.info(\"delete result: {0}\".format(delete_result))\n with allure.step(\"teststep: delete service order records\"):\n table = 'bus_service_order'\n condition = (\"member_id\", self.member_id)\n allure.attach(\"table name and condition\", \"{0},{1}\".format(table, condition))\n self.logger.info(\"\")\n self.logger.info(\"table: {0}, condition: {1}\".format(table, condition))\n delete_result = self.mysql.execute_delete_condition(table, condition)\n allure.attach(\"delete result\", str(delete_result))\n self.logger.info(\"delete result: {0}\".format(delete_result))\n with allure.step(\"teststep: delete bus service order records\"):\n table = 'bus_order'\n condition = (\"member_id\", self.member_id)\n allure.attach(\"table name and condition\", \"{0},{1}\".format(table, condition))\n self.logger.info(\"\")\n self.logger.info(\"table: {0}, condition: {1}\".format(table, condition))\n delete_result = self.mysql.execute_delete_condition(table, condition)\n allure.attach(\"delete result\", str(delete_result))\n self.logger.info(\"delete result: {0}\".format(delete_result))\n self.logger.info(\".... End test_400005_owner_create_multi_service_order_different_devices ....\")\n self.logger.info(\"\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets up logging for a test of a chip.
|
def setup_logging():
"""it is called by start_test() and enables creation of separate logs for consecutive testing."""
if tst.get_log() != "":
tst.get_log().removeHandler(tst.get_hdlr1())
tst.get_data().removeHandler(tst.get_hdlr2())
log_fn = "{}{}_{}_{}_{}_log.txt".format(logs_folder_path, tst.get_name(), now, bias_step, cont_volt)
data_fn = "{}{}_{}_{}_{}_data.txt".format(logs_folder_path, tst.get_name(), now, bias_step, cont_volt)
hdlr1 = logging.FileHandler(log_fn)
hdlr1.setLevel(logging.INFO)
hdlr2 = logging.FileHandler(data_fn)
hdlr1.setFormatter(formatter)
hdlr2.setFormatter(formatter)
tst.get_log().addHandler(hdlr1)
tst.get_data().addHandler(hdlr2)
tst.set_hdlr1(hdlr1)
tst.set_hdlr2(hdlr2)
tst.set_path(data_fn)
|
[
"def startTest(self, event):\r\n self._setupLoghandler()",
"def setup_logging():\n client = logging.Client()\n client.get_default_handler()\n client.setup_logging()",
"def setup_logging(self):\n logfile = self.configuration['options'].get('logfile', None)\n if logfile and isinstance(logfile, basestring):\n ch = logging.FileHandler(logfile)\n\n level = self.configuration['options'].get('loglevel', None)\n if not level:\n level = 'INFO'\n\n ch.setLevel({\n 'DEBUG': logging.DEBUG,\n 'INFO': logging.INFO,\n 'WARNING': logging.WARNING,\n 'ERROR': logging.ERROR,\n 'CRITICAL': logging.CRITICAL,\n }.get(level, logging.INFO))\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)",
"def setUp(self):\n #cbrandom.toggleDebugMode(True)",
"def setUp(cls):\n logger.log_info(TAG, 'BaseLayer setUp')\n \n # Initialize and read the configuration\n TestConfig.init_config()\n\n # Create a test run dir\n TestConfig.output_dir = utils.create_time_stamped_dir(path='./output', prefix='test_run')\n # Set up the logger\n logger.init_log_to_file(TestConfig.output_dir)",
"def _set_logging(self):\n logging.basicConfig(**self.settings[\"general\"][\"logging\"])\n log.info(\n \"Setting logging config: {!r}\".format(self.settings[\"general\"][\"logging\"])\n )",
"def setup(self):\n from Utilities.movoto.logger import MLogger\n self._mlogger = MLogger().getLogger(*self._args, **self._kw)",
"def setUp(self):\n with mock.patch('recipy.log.open_or_create_db', open_or_create_test_db):\n log_init()",
"def setup_logging():\n rotate_cfg = {\n \"filename\": cfg[\"log_file\"],\n \"maxBytes\": 1024*1000,\n \"backupCount\": 5\n }\n rotate_fmt = \"%(asctime)s %(levelname)-8s %(message)s\"\n console_fmt = \"%(levelname)-8s %(message)s\"\n\n if cfg[\"debug\"]:\n level = logging.DEBUG\n else:\n level = logging.INFO\n\n logger = logging.getLogger()\n logger.setLevel(level)\n\n rotate = logging.handlers.RotatingFileHandler(**rotate_cfg)\n rotate.setFormatter(logging.Formatter(rotate_fmt))\n logger.addHandler(rotate)\n\n console = logging.StreamHandler()\n console.setLevel(level)\n console.setFormatter(logging.Formatter(console_fmt))\n logger.addHandler(console)",
"def test_defaultlog():\n\tlgr = simplelog.make_logger()\n\tlgr.info (\"what?\")",
"def setUp(self):\n profiler.FunctionLogger.clear_data()",
"def setUp(self):\n SelTestBase.setUp(self)\n self.addDevice()",
"def setup_logger():\n log = logging.getLogger('contrail_vrouter_provisioning')\n log.setLevel(logging.DEBUG)\n # create rotating file handler which logs even debug messages\n fh = logging.handlers.RotatingFileHandler(LOG_FILENAME,\n maxBytes=64*1024,\n backupCount=2)\n fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n # create formatter and add it to the handlers\n formatter = logging.Formatter(\n '[%(asctime)s %(name)s(%(lineno)s) %(levelname)s]: %(message)s',\n datefmt='%a %b %d %H:%M:%S %Y')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n log.addHandler(fh)\n log.addHandler(ch)\n\n return log",
"def test_fixture_setup(fixtureWithSetup):\n rlog(\"here\")",
"def setUp(self):\n # Initialize SpiderDetect object\n self.spider_obj = SpiderDetect(test=True)\n\n # Mock parsed log file data\n self.data = {\n \"1.1.1.1\":{\n \"count\": 1,\n \"get\": [\"random_get\"] * 1,\n \"unique_get\": [\"random_get\"] * 1,\n \"ua\": [\"360spider\"] * 1,\n \"ep_time\": [1000560492302] * 1,\n \"status_code\": [404] * 1\n }\n }",
"def test_logging_def_cfg(self):\n self.dbgfunc()\n with ctx.nested(U.Chdir(self.tmpdir()), U.tmpenv('CRAWL_CONF', None)):\n # reset any logger that has been initialized\n CrawlConfig.log(close=True)\n CrawlConfig.get_config(reset=True, soft=True)\n\n logpath = os.path.basename(self.logpath())\n d = copy.deepcopy(self.cdict)\n d['crawler']['filename'] = self.default_cfname\n d['crawler']['logpath'] = logpath\n self.write_cfg_file(self.default_cfname, d)\n os.chmod(self.default_cfname, 0644)\n\n # now ask for a default logger\n l = CrawlConfig.log(\"frooble test log\")\n\n # and check that it has the right handler\n self.expected(1, len(l.handlers))\n self.expected(os.path.abspath(logpath),\n l.handlers[0].stream.name)\n self.expected(5*1000*1000, l.handlers[0].maxBytes)\n self.expected(5, l.handlers[0].backupCount)",
"def _setup_logging(args):\n\n if args.logconf is None:\n level = (50 - (10 * args.verbose)) \n logging.basicConfig(format=LOG_FORMAT, level=level)\n logging.getLogger(TSV2NICECXMODULE).setLevel(level)\n logger.setLevel(level)\n return\n # logconf was set use that file\n logging.config.fileConfig(args.logconf, disable_existing_loggers=False)",
"def _init_logger(self):\n #self._logger = logger_factory.make_logger(__name__)",
"def testBasic(self):\n with TestLog.StdoutCapture(self.outputFilename):\n log.configure()\n log.log(log.getDefaultLogger(), log.INFO, \"This is INFO\")\n log.info(u\"This is unicode INFO\")\n log.trace(\"This is TRACE\")\n log.debug(\"This is DEBUG\")\n log.warn(\"This is WARN\")\n log.error(\"This is ERROR\")\n log.fatal(\"This is FATAL\")\n log.critical(\"This is CRITICAL\")\n log.warning(\"Format %d %g %s\", 3, 2.71828, \"foo\")\n self.check(\"\"\"\nroot INFO: This is INFO\nroot INFO: This is unicode INFO\nroot WARN: This is WARN\nroot ERROR: This is ERROR\nroot FATAL: This is FATAL\nroot FATAL: This is CRITICAL\nroot WARN: Format 3 2.71828 foo\n\"\"\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Display the bullets to the screen.
|
def display_bullets(self):
pygame.draw.rect(self.screen, self.settings.bullet_color, self.bullet)
|
[
"def _draw_bullets(self, window):\n for bullet in self._bullets:\n if not self._is_visible(bullet['rect']):\n bullet['visible'] = False\n if bullet['visible']:\n pygame.draw.rect(window, bullet['color'], bullet['rect'])\n self._bullets = [\n bullet for bullet in self._bullets if bullet['visible']]",
"def show(self,win):\n # display bullet\n # -------------\n if self.yPos > 0:\n win.addstr(self.yPos,self.xPos,\"+\")\n win.refresh()",
"def update_bullets(bullets):\n # renews bullets positions\n bullets.update()\n\t# Removing out of screen bullets\n for bullet in bullets.copy(): # search in copy but delete in bullets\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)",
"def bullets(elements):\n for name in elements:\n print(\"*\", name)",
"def add_bullet(self):\n self.game_objects.append(Bullet(self.player.heading, self.player.position))",
"def update_bullets(bullets):\n #Update bullet positions.\n bullets.update()\n \n #Get rid of bullets that have disappeared.\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)",
"def on_draw(self):\n self.clear()\n self.arch.draw()\n self.bullet.draw()\n\tfps_display.draw()",
"def shoot(self, direction):\n\t\tself.facing = direction\n\t\tbullet = game_items.Bullet(self)\n\t\tself.bullets_sprite_list.add(bullet)\n\t\tself.game.all_sprite_list.add(bullet)",
"def bullet(self, spacing):\n return f'{spacing}* '",
"def _create_bullet(self, size, velocity, color):\n shape = pygame.Rect(self._rect.centerx,\n self._rect.centery, size[0], size[1])\n bullet = {'velocity': velocity, 'rect': shape,\n 'color': color, 'visible': True}\n self._bullets.append(bullet)",
"def test_bullets(self) -> None:\n assert OUTPUT.body[0][1][0] == [\n \"--\\tbullet no indent\",\n \"\\t--\\tbullet indent 1\",\n \"\\t\\t--\\tbullet indent 2\",\n ]",
"def display_graphics(self):\n\n # Blit the background\n self.dis.blit(statistics_menu, (0, 0))\n\n # Blit the leaderboard\n self.dis.blit(self.get_leaderboard(), (DISPLAY_X / 2 - self.lb_image_width / 2, self.leaderboard_y))\n\n # Set bold to True for this font (temporarily)\n bahnschrift_font_small.set_bold(True)\n\n # Blit the header items\n self.dis.blit(bahnschrift_font_small.render(\"Position\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2), self.leaderboard_y - self.lb_header_offset))\n self.dis.blit(bahnschrift_font_small.render(\"XP\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2) + 150, self.leaderboard_y - self.lb_header_offset))\n self.dis.blit(bahnschrift_font_small.render(\"Level\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2) + 300, self.leaderboard_y - self.lb_header_offset))\n self.dis.blit(bahnschrift_font_small.render(\"Bases\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2) + 450, self.leaderboard_y - self.lb_header_offset))\n self.dis.blit(bahnschrift_font_small.render(\"Time\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2) + 600, self.leaderboard_y - self.lb_header_offset))\n self.dis.blit(bahnschrift_font_small.render(\"Date\", True, COLOR_WHITE), ((DISPLAY_X / 2 - self.lb_image_width / 2) + 750, self.leaderboard_y - self.lb_header_offset))\n\n # Set bold to False for this font\n bahnschrift_font_small.set_bold(False)\n\n # Blit the button onto the display\n self.dis.blit(self.get_button(), (self.button_x, self.button_y))",
"def update(self):\n #Updating the bullets position when it's angled or not and killing it if its off the screen\n if self.angled:\n if self.rect.x < 900 and self.rect.x >0:\n # Moves the bullet by its velocity\n self.rect.x += self.velx \n self.rect.y += self.vely\n else:\n self.kill()\n else:\n if self.rect.x < 900 and self.rect.x >0:\n # Moves the bullet by its velocity\n self.rect.x += self.velx \n self.rect.y += self.vely \n else:\n self.kill()",
"def listLoop(self):\n while self.state == \"LIST\":\n self.listBG = pygame.transform.smoothscale(pygame.image.load('assets/QuotesBlank.png').convert_alpha(), (self.width,self.height))\n self.screen.blit(self.listBG, (0, 0))\n #BUTTONS\n #pygame.draw.rect(self.screen, (255, 0, 0), pygame.Rect(450, 500, 100, 100), 3)\n #SCREEN WORDS\n myfont = pygame.font.Font('assets/BRLNSDB.TTF', 40)\n #text = myfont.render((self.AllQuotes.quoteDict[0][0]), True, (0,0,0))\n #print(self.AllQuotes.pagelist[0][0][0])\n\n ptext.draw(self.AllQuotes.pagelist[0][0][0],(140,23),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][0][1],(140,115),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][0][2],(440,115),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n\n ptext.draw(self.AllQuotes.pagelist[0][1][0],(140,228),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][1][1],(140,320),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][1][2],(440,320),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n\n ptext.draw(self.AllQuotes.pagelist[0][2][0],(140,433),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][2][1],(140,525),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][2][2],(440,525),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n\n ptext.draw(self.AllQuotes.pagelist[0][3][0],(561,23),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][3][1],(561,115),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][3][2],(861,115),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n\n ptext.draw(self.AllQuotes.pagelist[0][4][0],(561,228),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][4][1],(561,320),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][4][2],(861,320),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n\n ptext.draw(self.AllQuotes.pagelist[0][5][0],(561,433),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][5][1],(561,525),width = 400,align=\"left\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n ptext.draw(self.AllQuotes.pagelist[0][5][2],(861,525),width = 400,align=\"right\", color = (12,24,45),fontsize=40,fontname='assets/sunshine.ttf')\n #MOUSE\n pygame.mouse.set_visible(True)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n #BUTTON REPLACEMENT (WITH COORDINATES)\n if event.type == pygame.MOUSEBUTTONDOWN:\n temp=pygame.mouse.get_pos()\n print(temp)\n if((temp[0]>=892 and temp[0]<=951) and (temp[1]>=620 and temp[1]<=675)):\n self.state = \"LIST2\"\n self.mainLoop()\n if((temp[0]>=147 and temp[0]<=217) and (temp[1]>=646 and temp[1]<=675)):\n self.state = \"MENU\"\n self.mainLoop()\n pygame.display.flip()",
"def update(self):\n # Bullet position update\"\"\"\n self.y -= self.settings.bullet_speed\n # Updating the position of the bullet rectangle\"\"\"\n self.rect.y = self.y",
"def draw():\n\n # Make the background white\n screen.clear()\n screen.fill((255, 255, 255))\n\n # Draw the actors\n apple.draw()\n orange.draw()\n pineapple.draw()\n bomb.draw()\n\n # TODO: Show instructions, message and points",
"def off_screen(self, width):\r\n for bullet in self.p1_bullet:\r\n if bullet.is_off_screen(width):\r\n bullet.alive = False\r\n for bullet in self.p2_bullet:\r\n if bullet.is_off_screen(width):\r\n bullet.alive = False",
"def bullet(text, level=1):\n return '{0:s}* {1:s}'.format(' ' * (level - 1), text.strip())",
"def draw_entities(self, entities: List[EntityImage]):\r\n self._screen.blits(entities)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test for installhook subcommand
|
def test_install_hook(self, _, install_hook):
result = self.cli.invoke(cli.cli, ["install-hook"])
expected_path = os.path.join(u"/hür", u"dur", hooks.COMMIT_MSG_HOOK_DST_PATH)
expected = u"Successfully installed gitlint commit-msg hook in {0}\n".format(expected_path)
self.assertEqual(result.output, expected)
self.assertEqual(result.exit_code, 0)
expected_config = config.LintConfig()
expected_config.target = os.path.realpath(os.getcwd())
install_hook.assert_called_once_with(expected_config)
|
[
"def test_install_hook_negative(self, install_hook):\n result = self.cli.invoke(cli.cli, [\"install-hook\"])\n self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)\n self.assertEqual(result.output, u\"tëst\\n\")\n expected_config = config.LintConfig()\n expected_config.target = os.path.realpath(os.getcwd())\n install_hook.assert_called_once_with(expected_config)",
"def test_install_hook_target(self, _, install_hook):\n # Specified target\n result = self.cli.invoke(cli.cli, [\"--target\", self.SAMPLES_DIR, \"install-hook\"])\n expected_path = os.path.join(u\"/hür\", u\"dur\", hooks.COMMIT_MSG_HOOK_DST_PATH)\n expected = \"Successfully installed gitlint commit-msg hook in %s\\n\" % expected_path\n self.assertEqual(result.exit_code, 0)\n self.assertEqual(result.output, expected)\n\n expected_config = config.LintConfig()\n expected_config.target = self.SAMPLES_DIR\n install_hook.assert_called_once_with(expected_config)",
"def test_post_installs(self):\n pass",
"def test_uninstall_hook_negative(self, uninstall_hook):\n result = self.cli.invoke(cli.cli, [\"uninstall-hook\"])\n self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)\n self.assertEqual(result.output, u\"tëst\\n\")\n expected_config = config.LintConfig()\n expected_config.target = os.path.realpath(os.getcwd())\n uninstall_hook.assert_called_once_with(expected_config)",
"def on_install(self):\n pass",
"def test_lfs_proper_install_detection(orchestra: OrchestraShim, monkeypatch):\n monkeypatch.setenv(\"HOME\", \"/tmp\")\n lfs._lfs_install_checked = False\n orchestra(\"install\", \"-b\", \"component_C\", should_fail=True)",
"def testPackageInstalled(self):\n self.Patch(\n setup_common,\n \"CheckCmdOutput\",\n return_value=self.PKG_INFO_INSTALLED)\n\n self.assertTrue(setup_common.PackageInstalled(\"fake_package\"))",
"def myhook(self, args):\n print(\"myhook spec\")",
"def setup_command(command_subclass):\n orig_run = command_subclass.run\n def custom_run(self):\n\n pre_install() \n orig_run(self)\n post_install()\n\n command_subclass.run = custom_run\n return command_subclass",
"def setup(command):\n return NotImplemented",
"def test_install(self):\n self.__assert_empty_builder()\n self.__builder.install()\n self.assertEqual('path -install ', str(self.__builder))",
"def prereposetup_hook(conduit):\n global downloader\n opts, args = conduit.getCmdLine()\n if args[0] == \"install\":\n\n yum_vars = dict(awsdomain=\"amazonaws.com\", awsregion=\"default\", releasever=\"2017.12\")\n yum_vars.update(conduit.getConf().yumvar)\n\n catalog_url = os.environ.get(\"CATALOGURL\", \"http://amazonlinux.{awsregion}.{awsdomain}/{releasever}/extras-catalog.json\").format(**yum_vars)\n downloader = threading.Thread(target=download, args=(catalog_url,))\n downloader.start()",
"def test_get_installs(self):\n pass",
"def test_setup_git_hooks(self):\n repo = 'git@github.com:user/repository'\n self._add_path(os.path.join('repository', 'git_hooks'))\n os.path.islink.return_value = False\n unbox.main([repo])\n self.assertTrue(call('.git/hooks') in shutil.rmtree.call_args_list)\n self.assertTrue(call('../git_hooks', '.git/hooks') in\n os.symlink.call_args_list)",
"def test_install_module(self):\n pass",
"def test_get_install_item(self):\n pass",
"def test_repo_get_git_hook(self):\n pass",
"def install_runner(hook_name, repo=None, noop=None, yes=None):\n\n repo = local_repo(repo)\n if hook_name not in hook_specs and not noop:\n raise RuntimeError('not a supported git hook: %r' % hook_name)\n\n\n hook_file = join(repo.git_dir, 'hooks', hook_name)\n\n runner_file = RUNNER\n\n if exists(hook_file):\n if filecmp.cmp(runner_file, hook_file):\n make_executable(hook_file)\n return\n\n msg = ' '.join([\n 'A script is already installed as the',\n colors.cyan(hook_name),\n 'hook.\\n',\n colors.bold('Do you want to remove it?'),\n ])\n if yes or (noop and click.confirm(msg)):\n noop or os.unlink(hook_file)\n\n noop or do_install(runner_file, hook_file)",
"def testPackageNotInstalled(self):\n self.Patch(\n setup_common,\n \"CheckCmdOutput\",\n return_value=self.PKG_INFO_NONE_INSTALL)\n\n self.assertFalse(\n setup_common.PackageInstalled(\"fake_package\"))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test for installhook subcommand with a specific target option specified
|
def test_install_hook_target(self, _, install_hook):
# Specified target
result = self.cli.invoke(cli.cli, ["--target", self.SAMPLES_DIR, "install-hook"])
expected_path = os.path.join(u"/hür", u"dur", hooks.COMMIT_MSG_HOOK_DST_PATH)
expected = "Successfully installed gitlint commit-msg hook in %s\n" % expected_path
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output, expected)
expected_config = config.LintConfig()
expected_config.target = self.SAMPLES_DIR
install_hook.assert_called_once_with(expected_config)
|
[
"def test_install_hook_negative(self, install_hook):\n result = self.cli.invoke(cli.cli, [\"install-hook\"])\n self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)\n self.assertEqual(result.output, u\"tëst\\n\")\n expected_config = config.LintConfig()\n expected_config.target = os.path.realpath(os.getcwd())\n install_hook.assert_called_once_with(expected_config)",
"def test_install_hook(self, _, install_hook):\n result = self.cli.invoke(cli.cli, [\"install-hook\"])\n expected_path = os.path.join(u\"/hür\", u\"dur\", hooks.COMMIT_MSG_HOOK_DST_PATH)\n expected = u\"Successfully installed gitlint commit-msg hook in {0}\\n\".format(expected_path)\n self.assertEqual(result.output, expected)\n self.assertEqual(result.exit_code, 0)\n expected_config = config.LintConfig()\n expected_config.target = os.path.realpath(os.getcwd())\n install_hook.assert_called_once_with(expected_config)",
"def toolHasOptions():\n pass",
"def test_hook_with_addoption(self, pytester: Pytester) -> None:\n pytester.makepyfile(\n newhooks=\"\"\"\n import pytest\n @pytest.hookspec(firstresult=True)\n def pytest_default_value():\n pass\n \"\"\"\n )\n pytester.makepyfile(\n myplugin=\"\"\"\n import newhooks\n def pytest_addhooks(pluginmanager):\n pluginmanager.add_hookspecs(newhooks)\n def pytest_addoption(parser, pluginmanager):\n default_value = pluginmanager.hook.pytest_default_value()\n parser.addoption(\"--config\", help=\"Config, defaults to %(default)s\", default=default_value)\n \"\"\"\n )\n pytester.makeconftest(\n \"\"\"\n pytest_plugins=(\"myplugin\",)\n def pytest_default_value():\n return \"default_value\"\n \"\"\"\n )\n res = pytester.runpytest(\"--help\")\n res.stdout.fnmatch_lines([\"*--config=CONFIG*default_value*\"])",
"def test_uninstall_hook_negative(self, uninstall_hook):\n result = self.cli.invoke(cli.cli, [\"uninstall-hook\"])\n self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)\n self.assertEqual(result.output, u\"tëst\\n\")\n expected_config = config.LintConfig()\n expected_config.target = os.path.realpath(os.getcwd())\n uninstall_hook.assert_called_once_with(expected_config)",
"def myhook(self, args):\n print(\"myhook spec\")",
"def test_install_pinned_custom_app_support_package_url_with_args(\n create_command, myapp, tmp_path, support_path\n):\n # Pin the support revision\n myapp.support_revision = \"42\"\n\n # Provide an app-specific override of the package URL\n myapp.support_package = \"https://example.com/custom/support.zip?cool=Yes\"\n\n # Write a temporary support zip file\n support_file = tmp_path / \"out.zip\"\n with zipfile.ZipFile(support_file, \"w\") as support_zip:\n support_zip.writestr(\"internal/file.txt\", data=\"hello world\")\n\n # Modify download_url to return the temp zipfile\n create_command.download_url = mock.MagicMock(return_value=support_file)\n\n # Install the support package\n create_command.install_app_support_package(myapp)\n\n # Confirm the right URL was used\n create_command.download_url.assert_called_with(\n download_path=create_command.dot_briefcase_path / \"support\",\n url=\"https://example.com/custom/support.zip?cool=Yes&revision=42\",\n )\n\n # Confirm that the full path to the support file\n # has been unpacked.\n assert (support_path / \"internal\" / \"file.txt\").exists()",
"def _option_exists(self, option):\n for call in self.calls.values():\n for kwarg in call:\n if option == kwarg:\n return True\n return False",
"def completing_subcommand_option_util(self, option, words):\n # Example: Return True for: gh view 1 --pag\n if len(words) > 3:\n if option in words:\n return True\n return False",
"def command_available(cmd):\n from distutils import spawn\n\n return spawn.find_executable(cmd) is not None",
"def setup(command):\n return NotImplemented",
"def prereposetup_hook(conduit):\n global downloader\n opts, args = conduit.getCmdLine()\n if args[0] == \"install\":\n\n yum_vars = dict(awsdomain=\"amazonaws.com\", awsregion=\"default\", releasever=\"2017.12\")\n yum_vars.update(conduit.getConf().yumvar)\n\n catalog_url = os.environ.get(\"CATALOGURL\", \"http://amazonlinux.{awsregion}.{awsdomain}/{releasever}/extras-catalog.json\").format(**yum_vars)\n downloader = threading.Thread(target=download, args=(catalog_url,))\n downloader.start()",
"def maybe_add_command(opt: dict, script: str) -> dict:\n if script == sys.argv[0]:\n opt[\";command\"] = \" \".join([sys.executable] + sys.argv) # will be sorted to the beginning below\n return opt",
"def check_multilevel_install():\n global options, single_image\n \n # User wants to override multi level install\n if options[\"skip_multi_level\"] == True:\n single_image = True\n return\n\n # User wants to override multi level install\n if options[\"skip_multi_level\"] == True:\n single_image = True\n return\n\n # User wants to override the midway image\n if options[\"midway_system_image\"] != \"\":\n set_next_upgrade_from_user()\n else:\n if re.match(\"nxos.\", options[\"target_system_image\"]) \\\n or re.match(\"n9000\", options[\"target_system_image\"]):\n poap_log(\"Single image is set\")\n single_image = True\n else:\n poap_log(\"Single image is not set\")\n single_image = False\n set_next_upgrade_from_upgrade_path()",
"def is_available_command(event, command):\n event_handler = get_event_handler_for_type(event, event_type=command)\n is_available = event_handler is not None\n return is_available",
"def test_import_cmd(self):\n # Test hook without extra import options\n hook = SqoopHook()\n\n # The subprocess requires an array but we build the cmd by joining on a space\n cmd = \" \".join(\n hook._import_cmd(\n self._config_import[\"target_dir\"],\n append=self._config_import[\"append\"],\n file_type=self._config_import[\"file_type\"],\n split_by=self._config_import[\"split_by\"],\n direct=self._config_import[\"direct\"],\n driver=self._config_import[\"driver\"],\n )\n )\n\n if self._config_import[\"append\"]:\n assert \"--append\" in cmd\n\n if self._config_import[\"direct\"]:\n assert \"--direct\" in cmd\n\n assert f\"--target-dir {self._config_import['target_dir']}\" in cmd\n\n assert f\"--driver {self._config_import['driver']}\" in cmd\n assert f\"--split-by {self._config_import['split_by']}\" in cmd\n # these are from extra options, but not passed to this cmd import command\n assert \"--show\" not in cmd\n assert 'hcatalog-storage-stanza \"stored as orcfile\"' not in cmd\n\n # Test hook with extra import options\n hook = SqoopHook(**self._config_import_extra_options)\n\n cmd = \" \".join(\n hook._import_cmd(\n target_dir=None,\n append=self._config_import[\"append\"],\n file_type=self._config_import[\"file_type\"],\n split_by=self._config_import[\"split_by\"],\n direct=self._config_import[\"direct\"],\n driver=self._config_import[\"driver\"],\n )\n )\n\n assert \"--target-dir\" not in cmd\n # these checks are from the extra import options\n assert \"--show\" in cmd\n assert 'hcatalog-storage-stanza \"stored as orcfile\"' in cmd\n assert \"--fetch-size\" in cmd",
"def test_install_custom_app_support_package_url(\n create_command, myapp, tmp_path, support_path\n):\n # Provide an app-specific override of the package URL\n myapp.support_package = \"https://example.com/custom/support.zip\"\n\n # Write a temporary support zip file\n support_file = tmp_path / \"out.zip\"\n with zipfile.ZipFile(support_file, \"w\") as support_zip:\n support_zip.writestr(\"internal/file.txt\", data=\"hello world\")\n\n # Modify download_url to return the temp zipfile\n create_command.download_url = mock.MagicMock(return_value=support_file)\n\n # Install the support package\n create_command.install_app_support_package(myapp)\n\n # Confirm the right URL was used\n create_command.download_url.assert_called_with(\n download_path=create_command.dot_briefcase_path / \"support\",\n url=\"https://example.com/custom/support.zip\",\n )\n\n # Confirm that the full path to the support file\n # has been unpacked.\n assert (support_path / \"internal\" / \"file.txt\").exists()",
"def test_autotools_args_from_conditional_variant(config, mock_packages):\n s = Spec(\"autotools-conditional-variants-test\").concretized()\n assert \"example\" not in s.variants\n assert len(s.package._activate_or_not(\"example\", \"enable\", \"disable\")) == 0",
"def install_runner(hook_name, repo=None, noop=None, yes=None):\n\n repo = local_repo(repo)\n if hook_name not in hook_specs and not noop:\n raise RuntimeError('not a supported git hook: %r' % hook_name)\n\n\n hook_file = join(repo.git_dir, 'hooks', hook_name)\n\n runner_file = RUNNER\n\n if exists(hook_file):\n if filecmp.cmp(runner_file, hook_file):\n make_executable(hook_file)\n return\n\n msg = ' '.join([\n 'A script is already installed as the',\n colors.cyan(hook_name),\n 'hook.\\n',\n colors.bold('Do you want to remove it?'),\n ])\n if yes or (noop and click.confirm(msg)):\n noop or os.unlink(hook_file)\n\n noop or do_install(runner_file, hook_file)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Negative test for installhook subcommand
|
def test_install_hook_negative(self, install_hook):
result = self.cli.invoke(cli.cli, ["install-hook"])
self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)
self.assertEqual(result.output, u"tëst\n")
expected_config = config.LintConfig()
expected_config.target = os.path.realpath(os.getcwd())
install_hook.assert_called_once_with(expected_config)
|
[
"def test_uninstall_hook_negative(self, uninstall_hook):\n result = self.cli.invoke(cli.cli, [\"uninstall-hook\"])\n self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)\n self.assertEqual(result.output, u\"tëst\\n\")\n expected_config = config.LintConfig()\n expected_config.target = os.path.realpath(os.getcwd())\n uninstall_hook.assert_called_once_with(expected_config)",
"def test_post_installs(self):\n pass",
"def test_install_hook(self, _, install_hook):\n result = self.cli.invoke(cli.cli, [\"install-hook\"])\n expected_path = os.path.join(u\"/hür\", u\"dur\", hooks.COMMIT_MSG_HOOK_DST_PATH)\n expected = u\"Successfully installed gitlint commit-msg hook in {0}\\n\".format(expected_path)\n self.assertEqual(result.output, expected)\n self.assertEqual(result.exit_code, 0)\n expected_config = config.LintConfig()\n expected_config.target = os.path.realpath(os.getcwd())\n install_hook.assert_called_once_with(expected_config)",
"def testPackageNotInstalled(self):\n self.Patch(\n setup_common,\n \"CheckCmdOutput\",\n return_value=self.PKG_INFO_NONE_INSTALL)\n\n self.assertFalse(\n setup_common.PackageInstalled(\"fake_package\"))",
"def on_uninstall(self):\n pass",
"def on_install(self):\n pass",
"def test_lfs_proper_install_detection(orchestra: OrchestraShim, monkeypatch):\n monkeypatch.setenv(\"HOME\", \"/tmp\")\n lfs._lfs_install_checked = False\n orchestra(\"install\", \"-b\", \"component_C\", should_fail=True)",
"def test_install_hook_target(self, _, install_hook):\n # Specified target\n result = self.cli.invoke(cli.cli, [\"--target\", self.SAMPLES_DIR, \"install-hook\"])\n expected_path = os.path.join(u\"/hür\", u\"dur\", hooks.COMMIT_MSG_HOOK_DST_PATH)\n expected = \"Successfully installed gitlint commit-msg hook in %s\\n\" % expected_path\n self.assertEqual(result.exit_code, 0)\n self.assertEqual(result.output, expected)\n\n expected_config = config.LintConfig()\n expected_config.target = self.SAMPLES_DIR\n install_hook.assert_called_once_with(expected_config)",
"def test_setup_git_hooks(self):\n repo = 'git@github.com:user/repository'\n self._add_path(os.path.join('repository', 'git_hooks'))\n os.path.islink.return_value = False\n unbox.main([repo])\n self.assertTrue(call('.git/hooks') in shutil.rmtree.call_args_list)\n self.assertTrue(call('../git_hooks', '.git/hooks') in\n os.symlink.call_args_list)",
"def test_hello_hook_nopie(self):\n binary_path = os.path.abspath('tests/binary/test_hello/test64_nopie') \n script_path = os.path.abspath('tests/binary/test_hello/test64_nopie_hook.py') \n res, out_path = patch_run(binary_path, script_path)\n try:\n self.assertEqual(res, 'world\\n')\n except Exception as e:\n raise Exception(e)\n finally:\n os.remove(out_path)",
"def setup(command):\n return NotImplemented",
"def test_invalid_commands(self):\n self.assertEqual(\n False,\n self.command_runner.MaybeCheckForAndOfferSoftwareUpdate('update', 0))",
"def fail_on_npm_install():\n return 1",
"def myhook(self, args):\n print(\"myhook spec\")",
"def install_runner(hook_name, repo=None, noop=None, yes=None):\n\n repo = local_repo(repo)\n if hook_name not in hook_specs and not noop:\n raise RuntimeError('not a supported git hook: %r' % hook_name)\n\n\n hook_file = join(repo.git_dir, 'hooks', hook_name)\n\n runner_file = RUNNER\n\n if exists(hook_file):\n if filecmp.cmp(runner_file, hook_file):\n make_executable(hook_file)\n return\n\n msg = ' '.join([\n 'A script is already installed as the',\n colors.cyan(hook_name),\n 'hook.\\n',\n colors.bold('Do you want to remove it?'),\n ])\n if yes or (noop and click.confirm(msg)):\n noop or os.unlink(hook_file)\n\n noop or do_install(runner_file, hook_file)",
"def uninstall(runner, hook_type='pre-commit'):\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return 0\n\n os.remove(hook_path)\n output.write_line('{} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line('Restored previous hooks to {}'.format(hook_path))\n\n return 0",
"def test_product_uninstalled(self):\n self.assertFalse(\n self.installer.is_product_installed(\"collective.behavior.banner\")\n )",
"def test_install_source_packages_notarget_success(self):\n \n self.assertTrue(install_source_packages(\"/tmp/install_source_packages_no_target\", [\"wvdial\"], None, False, self.log))",
"def test_product_uninstalled(self):\n self.assertFalse(self.installer.isProductInstalled(\n 'plonetheme.spot'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Negative test for uninstallhook subcommand
|
def test_uninstall_hook_negative(self, uninstall_hook):
result = self.cli.invoke(cli.cli, ["uninstall-hook"])
self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)
self.assertEqual(result.output, u"tëst\n")
expected_config = config.LintConfig()
expected_config.target = os.path.realpath(os.getcwd())
uninstall_hook.assert_called_once_with(expected_config)
|
[
"def on_uninstall(self):\n pass",
"def uninstall(runner, hook_type='pre-commit'):\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return 0\n\n os.remove(hook_path)\n output.write_line('{} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line('Restored previous hooks to {}'.format(hook_path))\n\n return 0",
"def test_install_hook_negative(self, install_hook):\n result = self.cli.invoke(cli.cli, [\"install-hook\"])\n self.assertEqual(result.exit_code, self.GIT_CONTEXT_ERROR_CODE)\n self.assertEqual(result.output, u\"tëst\\n\")\n expected_config = config.LintConfig()\n expected_config.target = os.path.realpath(os.getcwd())\n install_hook.assert_called_once_with(expected_config)",
"def test_uninstall(self):\n self.installer.uninstallProducts(['rapido.plone'])\n self.assertFalse(self.installer.isProductInstalled('rapido.plone'))",
"def uninstallpackage():\n uninstall_package_command = ['pip', 'uninstall', '-y', 'hey-helper']\n _run_command(uninstall_package_command)",
"def test_uninstall(self):\n self.installer.uninstallProducts(['collective.sassy'])\n self.assertFalse(self.installer.isProductInstalled('collective.sassy'))",
"def handle_uninstall_extras(self):\n uninstall_sect = self._ext_conf.get(EXTCFG_SECTION.UNINSTALL, {})\n\n if not isinstance(uninstall_sect, dict):\n raise extm_exc.InstExtrasManagerConfigError(\n f'Invalid configuration container structure:'\n f' {EXTCFG_SECTION.UNINSTALL}: {uninstall_sect}'\n )\n\n exec_ext_cmd_opt = uninstall_sect.get(EXTCFG_OPTION.EXEC_EXT_CMD, [])\n\n if not isinstance(exec_ext_cmd_opt, list):\n raise extm_exc.InstExtrasManagerConfigError(\n f'Invalid configuration container structure:'\n f' {EXTCFG_OPTION.EXEC_EXT_CMD}: {exec_ext_cmd_opt}'\n )\n\n # Handle the 'execute external commands' option\n self._exec_external_commands(*exec_ext_cmd_opt)",
"def test_product_uninstalled(self):\n self.assertFalse(\n self.installer.is_product_installed(\"collective.behavior.banner\")\n )",
"def test_product_uninstalled(self):\n self.assertFalse(self.installer.isProductInstalled(\n 'plonetheme.spot'))",
"def __startPluginUninstall(self):\n self.__startProc(\"eric6_pluginuninstall.py\")",
"def test_plugin_remove():\n with patch.object(mysql, \"plugin_status\", MagicMock(return_value=\"ACTIVE\")):\n _test_call(\n mysql.plugin_remove,\n \"UNINSTALL PLUGIN auth_socket\",\n \"auth_socket\",\n )",
"def uninstall_command(args):\n # Get the path to manage.py\n manage_path = get_manage_path(args)\n item_name = args.app_or_extension\n process = [\"python\", manage_path, \"tethys_app_uninstall\", item_name]\n if args.is_extension:\n process.append(\"-e\")\n if args.is_forced:\n process.append(\"-f\")\n try:\n subprocess.call(process)\n except KeyboardInterrupt:\n pass",
"def unhook(self):\n raise NotImplementedError",
"def uninstallAndExit():\n\tpharosUninstaller.uninstall()\n\tsys.exit(1)",
"def uninstall_on_parameter():\n if len(sys.argv) != 2 or sys.argv[1] != \"uninstall\":\n return\n\n # Application data\n shutil.rmtree(Path(DESTINATION).expanduser())\n\n # Icons\n for size in ICON_SIZES:\n os.remove(Path(ICONS_HICOLOR_FOLDER).expanduser().joinpath(\n \"{size}x{size}/apps/fuzzlecheck.png\".format(size=size)))\n\n # Desktop entry\n os.remove(desktop_file_path())\n\n print(\"Fuzzlecheck was removed.\")\n sys.exit(0)",
"def uninstall(ctx, **kwargs):\n input = LayerUninstall(session=None, verbose=ctx.obj[\"VERBOSE\"], **kwargs)\n\n input = input._replace(\n session=boto3.Session(\n profile_name=input.aws_profile, region_name=input.aws_region\n )\n )\n\n if input.aws_permissions_check:\n permissions.ensure_layer_uninstall_permissions(input)\n\n functions = get_aliased_functions(input)\n\n with ThreadPoolExecutor() as executor:\n futures = [\n executor.submit(layers.uninstall, input, function) for function in functions\n ]\n uninstall_success = all(future.result() for future in as_completed(futures))\n\n if uninstall_success:\n done(\"Uninstall Complete\")\n else:\n failure(\"Uninstall Incomplete. See messages above for details.\", exit=True)",
"def remove_install():\n if exists('/usr/local/bin/k3s-agent-uninstall.sh'):\n run('sudo /usr/local/bin/k3s-agent-uninstall.sh')\n\n if exists('/usr/local/bin/k3s-uninstall.sh'):\n run('sudo /usr/local/bin/k3s-uninstall.sh')\n\n if exists('/usr/local/lib/k3s'): \n run('sudo rm -r /usr/local/lib/k3s')\n\n if exists('/usr/local/lib/k3s'): \n run('sudo rm -r /usr/local/bin/k3s')",
"def test_uninstall_path():\n mock_retcode = MagicMock(return_value=0)\n path = \"C:\\\\KB123456.msu\"\n with patch.dict(win_wusa.__salt__, {\"cmd.retcode\": mock_retcode}), patch(\n \"os.path.exists\", MagicMock(return_value=True)\n ):\n assert win_wusa.uninstall(path) is True\n mock_retcode.assert_called_once_with(\n [\"wusa.exe\", \"/uninstall\", \"/quiet\", path, \"/norestart\"],\n ignore_retcode=True,\n )",
"def package_uninstall(package_name, args=[]):\n\n returncode, _, _ = exec_command(\n ['dcos', 'package', 'uninstall', package_name, '--yes'] + args)\n assert returncode == 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Examine the current room based on the light level and list contents.
|
def look_around(self):
if self.current_room.light is True:
print(self.current_room.description + '\n')
if len(self.current_room.items) == 1:
print(f'You can see a {self.current_room.items[0]}.\n')
elif len(self.current_room.items) > 1:
print('You can see some items:\n')
for item in self.current_room.items:
print(f"{item.name}: {item.description}")
print("\n")
if len(self.current_room.enemies) > 0:
print(
f"Danger ahead! You can see a {self.current_room.enemies[0].name}.\n")
else:
print("It's pitch black! You can't see a thing!\n")
|
[
"def explore_room(room):\n time_check()\n items = [i[\"name\"] for i in object_relations[room[\"name\"]]]\n print(\"You explore the room. This is \" + room[\"name\"] + \". You find \" + \", \".join(items))",
"def roomInfo():\n # global LOC, ROOMS_VISITED, DEAD_GUARD_HAS_UNIFORM\n if LOC not in ROOMS_VISITED:\n ROOMS_VISITED.append(LOC)\n print(chr(27) + \"[2J\" + chr(27) + \"[;H\") # Clears the console\n printMap()\n print(\"\\n\\n\")\n print(SPIKES_UP)\n printw(rooms[LOC][\"name\"])\n printw(\"=\" * len(rooms[LOC][\"name\"]))\n if DEAD_GUARD_HAS_UNIFORM == True:\n if LOC == \"guard room\":\n printw(\"You enter in a room with two warriors playing cards and drinking beer. \"\n \"When they see you they immediately grip you and then kill you.\")\n gameOver()\n elif LOC == \"freedom\":\n printw(\"After winning the TicTacToe game, the chamber opened... You are free!\")\n\n printw(rooms[LOC][\"info\"])",
"def get_details(self):\n print(\"The \" + self.get_name())\n print(\"----------------------\")\n print(self.get_description())\n for direction in self.linked_rooms:\n room = self.linked_rooms[direction]\n print(\"The \" + room.get_name() + \" is \" + direction)",
"def generate_level(self):\n room_coords = self.recursive_divide(0, 0, GAME_HEIGHT, GAME_WIDTH)\n debug(room_coords)\n\n self.rooms = [Room(y, x, h, w) for y, x, h, w in room_coords]\n self.entrance, self.exit = self.generate_stairs()\n\n for object_ in self.architecture:\n self.add_to_grid(object_)\n\n for room in self.rooms:\n threshold = 0.2 + 4 * (room.w * room.h) / (GAME_WIDTH * GAME_HEIGHT)\n while random.random() < threshold:\n tile_y = random.randrange(room.y, room.y + room.h)\n tile_x = random.randrange(room.x, room.x + room.w)\n monster = self.generate_random_monster(tile_y, tile_x)\n self.creatures += [monster]\n threshold = max(0.1, threshold - 0.1)",
"def test_get_rooms(self):\n self.board.get_rooms",
"def print_room(self,room_name):\n\t\t##single out the particular room from self.all_rooms\n\t\tself.room_to_list_its_occupants=[room_object for room_object in self.all_rooms if room_object.room_name==room_name]\n\t\tsize=len(self.room_to_list_its_occupants[0].list_of_occupants)\n\t\tnames_of_occupants=[]\n\t\tfor index in range(0,size):\n\t\t\t#print(self.temp_room[0].list_of_occupants[index].firstname,self.temp_room[0].list_of_occupants[index].secondname)\n\t\t\tnames_of_occupants.extend(['{} {}'.format(self.room_to_list_its_occupants[0].list_of_occupants[index].firstname,self.room_to_list_its_occupants[0].list_of_occupants[index].secondname)])\n\n\t\tprint(names_of_occupants)\n\t\treturn names_of_occupants",
"def test_3_Room0(self):\n l_xml = self.m_xml.room\n # print(PrettyFormatAny.form(self.m_xml.room, 'Room'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_ROOM_NAME_0)\n self.assertEqual(l_xml.attrib['Key'], TESTING_ROOM_KEY_0)\n self.assertEqual(l_xml.attrib['Active'], TESTING_ROOM_ACTIVE_0)\n self.assertEqual(l_xml.find('UUID').text, TESTING_ROOM_UUID_0)\n self.assertEqual(l_xml.find('Comment').text, TESTING_ROOM_COMMENT_0)\n self.assertEqual(l_xml.find('Corner').text, TESTING_ROOM_CORNER_0)\n self.assertEqual(l_xml.find('Floor').text, TESTING_ROOM_FLOOR_0)\n self.assertEqual(l_xml.find('LastUpdate').text, str(TESTING_ROOM_LAST_UPDATE_0))\n self.assertEqual(l_xml.find('Size').text, TESTING_ROOM_SIZE_0)\n self.assertEqual(l_xml.find('RoomType').text, TESTING_ROOM_TYPE_0)",
"def getrooms(self):\n for room in self.rooms:\n if room[0] != self.spawnHunter or self.spawnWumpus:\n whatItem = randrange(0, 3) # A 1 in 3 chance to get one of the items\n if randrange(0, 101) <= self.chance:\n if whatItem == 0:\n room[1] = \"bat\"\n elif whatItem == 1:\n room[1] = \"gold\"\n else:\n room[1] = \"pit\"\n return self.rooms",
"def print_room(self,room_name):\n room_members_list =[]\n #Check if room_name provided is string\n if not isinstance(room_name,str):\n print (\"Room name must be a string\")\n #Check if room provided is already created under offices or livingspaces\n if room_name.lower() in self.office: #Check if room is an office\n #Print to the screen room has no members if empty\n if len(self.office[room_name].members) == 0:\n return ('Room has no members no members')\n else:\n #Create string to be the screen the Room and it's members\n for name in self.office[room_name].members:\n s = self.persons[name].person_id\\\n + ' ' + self.persons[name].last_name \\\n + ' ' +self.persons[name].first_name + ''\n room_members_list.append(s)\n #Return Dictionary with room name as key and list of members as values\n return{room_name:room_members_list}\n #Check if room is a living space\n elif room_name.lower() in self.living_space:\n #Print to the screen room has no members if empty\n if len(self.living_space[room_name].members) == 0:\n return('Room has no members no members')\n else:\n #Print to the screen the Room and it's members\n for name in self.living_space[room_name].members:\n s = self.persons[name].person_id\\\n + ' ' + self.persons[name].last_name \\\n + ' ' +self.persons[name].first_name + ''\n #Add members to\n room_members_list.append(s)\n #Return Dictionary with room name as key and list of members as values\n return {room_name:room_members_list}\n else:\n #Inform user that they want print does not exist\n return('Your room doesn\\'t exist')",
"def test_process_room_only_runs(self):\n # Get all the rooms that the game should recognize.\n data_dir = os.path.abspath('data')\n rooms_full_path = os.path.join(data_dir, ROOMS_FILENAME)\n with open(rooms_full_path, \"r\") as rooms_file:\n rooms_dict_str = rooms_file.read()\n rooms_dict = json.loads(rooms_dict_str)\n # Add the cardinal directions to the rooms dict\n rooms_dict[\"north\"] = \"north\"\n rooms_dict[\"east\"] = \"east\"\n rooms_dict[\"south\"] = \"south\"\n rooms_dict[\"west\"] = \"west\"\n for room in rooms_dict:\n print \"TESTING COMMAND: \" + room\n processed_command = parser.parse_command(room)\n output_type = processed_command[\"type\"]\n title = None\n action = None\n top_level = [\"item\", \"room\", \"feature\"]\n for word in top_level:\n if word in processed_command['command']:\n title = processed_command['command'][word]\n if \"action\" in processed_command['command']:\n action = processed_command['command']['action']\n res = self.game.process_parsed_command(output_type, title, action)\n if res:\n self.game.post_process(res)",
"def print_room_items(room):\n\n if room[\"items\"]:\n print(\"There is \" + list_of_items(room[\"items\"]) + \" here.\\n\")",
"def _get_all_rooms(klass, floor):\n unidentified_rooms = floor.get(\"unidentified_rooms\", [])\n unidentified_rooms = (\n (None, room) for room in unidentified_rooms )\n rooms = floor.get(\"rooms\", {})\n room_items = (\n (rid, room) for rid, room in rooms.items() if \"polygon\" in room\n )\n return chain(room_items, unidentified_rooms)",
"def print_room(self):\n\n print(self.name + \"\\n\")\n print(\"-\"*80)\n print(\"\\n\")\n for person in self.current_occupants:\n print(person + \", \")\n\n return \"Room {} printed\".format(self.name)",
"def parse_rooms(data):\n room_names = data.find_all('h2')\n rooms = []\n for r_name in room_names[2:8]: \n room_bundles = parse_room_bundles(data, r_name.text)\n rooms.append(CommunityCenterRoom(r_name.text, room_bundles))\n return rooms",
"def extract_house_rooms(self, intent_message):\n house_rooms = []\n if intent_message.slots.house_room:\n for room in intent_message.slots.house_room.all():\n type(room.value)\n house_rooms.append(room.value)\n return house_rooms",
"def display_curr_room(self):\n r, c = self.__adventurer_pos\n return str(self.__maze[r][c])",
"def rooms(x, y, width, height):\n for i in range(width):\n for j in range(height):\n room_pos = (x + i, y + j)\n if room_pos in maze:\n yield room_pos",
"def play_room(room):\n game_state[\"current_room\"] = room\n if(game_state[\"current_room\"] == game_state[\"target_room\"]):\n print(\"Congrats! You escaped the room!\")\n else:\n print(\"\")\n print(\"You are now in \" + room[\"name\"])\n print(\"\")\n intended_action = input(\"What would you like to do? Type 'explore' or 'examine'?\").strip().lower()\n if intended_action == \"explore\":\n explore_room(room)\n play_room(room)\n elif intended_action == \"examine\":\n examine_item(input(\"What would you like to examine?\").strip().lower())\n else:\n print(\"Not sure what you mean. Type 'explore' or 'examine'.\")\n play_room(room)\n linebreak()",
"def get_items(self):\n return [i for i in self.game.items if i.room == self]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate a sha signature using the current time, database secret and the record object or the res_model and res_id parameters Return the sha signature and the time of generation in a tuple
|
def object_shasign(record=False, res_model='', res_id=None, **kw):
secret = request.env['ir.config_parameter'].sudo().get_param('database.secret')
shasign = False
timestamp = int(time())
if record:
shasign = sha1('%s%s%s%s' % (record._model, record.id, secret, timestamp)).hexdigest()
elif res_model and res_id:
shasign = sha1('%s%s%s%s' % (res_model, res_id, secret, timestamp)).hexdigest()
return (shasign, timestamp)
|
[
"def get_sign(self) -> Tuple[str, int]:\n md5 = hashlib.md5()\n timestamp = int(datetime.datetime.now().timestamp())\n md5.update(\n (self.auth[\"user_key\"] + str(timestamp) + self.auth[\"user_secret\"]).encode(\n \"utf-8\"\n )\n )\n sign = md5.hexdigest()\n return sign, timestamp",
"def token_sign() -> str:\n date_now = str(now().date())\n\n sha = sha256()\n sha.update(bytes(date_now, encoding=\"utf-8\"))\n\n return sha.hexdigest()",
"def get_signature(cls, song_id, salt=None):\r\n\r\n if salt is None:\r\n salt = str(int(time.time() * 1000))\r\n\r\n mac = hmac.new(cls._key, song_id, sha1)\r\n mac.update(salt)\r\n sig = base64.urlsafe_b64encode(mac.digest())[:-1]\r\n\r\n return sig, salt",
"def generate_security_hash(self, content_type, object_pk, timestamp):\n info = (content_type, object_pk, timestamp)\n key_salt = \"flag.forms.SecurityForm\"\n value = \"-\".join(info)\n return salted_hmac(key_salt, value).hexdigest()",
"def _make_hash(self, sid, secret):\n return hmac.new(secret, sid, sha).hexdigest()[:8]",
"def _ww_sig(arguments, timestamp):\n digest = hashlib.sha1\n secret_key = os.getenv('WW_SECRET_KEY')\n raw_args = arguments + '\\n' + timestamp\n\n hmac_hash = hmac.new(\n secret_key.encode(), raw_args.encode(), digest).digest()\n return base64.b64encode(hmac_hash).rstrip().decode()",
"def create_signature(self, audio_ts):\n pass",
"def _generate_signature(password, string):\n signature = None\n try:\n hmac_sha256 = hmac.new(key=password.encode(),\n msg=string.encode(),\n digestmod=sha256)\n signature = hmac_sha256.hexdigest()\n except Exception as e:\n sentry.captureException()\n current_app.logger.error(\"Failed to generate NYC ID.Web Services \"\n \"authentication signature: \", e)\n return signature",
"def bytes(self):\n\n if not self._signature:\n self._signature = self.r.to_bytes(32, 'big') + self.s.to_bytes(32, 'big')\n return self._signature",
"def generate_signature(self, url, params=None, payload=None):\n sig = url\n if params:\n sig += self._flatten_params(params)\n if payload:\n sig += self._flatten_params(payload)\n sig += self.app_secret\n\n if isinstance(sig, unicode):\n sig = sig.encode(\"utf8\")\n\n return hashlib.sha256(sig).hexdigest()",
"def _create_hash(self, result):\r\n text = result[\"created_at\"].encode('utf-8') + result[\"user\"][\"screen_name\"].encode('utf-8') + result[\"text\"].encode('utf-8')\r\n return hashlib.sha224(text).hexdigest()",
"def sign( self, hash, random_k ):\n\n G = self.public_key.generator\n n = G.order()\n k = random_k % n\n p1 = k * G\n r = p1.x()\n if r == 0: raise RuntimeError(\"amazingly unlucky random number r\")\n s = ( numbertheory.inverse_mod( k, n ) * \\\n ( hash + ( self.secret_multiplier * r ) % n ) ) % n\n if s == 0: raise RuntimeError(\"amazingly unlucky random number s\")\n return Signature( r, s )",
"def create_hmac_signature(self):\n return self.data.decode(self.encoding) + \"&Signature=\" + self.signature",
"def get_sign(self, secret, parameters_list):\r\n parameters_str = ''.join(parameters_list)\r\n str = secret + parameters_str + secret\r\n\r\n sha1 = hashlib.sha1()\r\n sha1.update(str)\r\n mysign = sha1.hexdigest()\r\n sign = mysign.upper()\r\n return sign",
"def _generate_signature(self, method, url, parameters):\n enc_parameters = {}\n for key in parameters:\n enc_key = self._percent_enc(key)\n enc_val = self._percent_enc(parameters[key])\n enc_parameters[enc_key] = enc_val\n p_str = \"\"\n for key in sorted(enc_parameters):\n p_str += key + '=' + enc_parameters[key] + '&'\n p_str = p_str[0:-1]\n base_str = method.upper() + '&' + self._percent_enc(url) + '&'\n base_str += self._percent_enc(p_str)\n s_key = self._percent_enc(str(os.environ['TWITTER_CONSUMER_SECRET']))\n s_key += '&'\n s_key += self._percent_enc(str(os.environ['TWITTER_TOKEN_SECRET']))\n base_str = base_str.encode()\n s_key = s_key.encode()\n t_hash = hmac.new(s_key, base_str, digestmod=hashlib.sha1)\n digest = t_hash.digest()\n sig = base64.b64encode(digest)\n return sig.decode()",
"def time_signature(self, nn, dd, cc, bb):\n pass",
"def sign(self, method, content, content_type, date, request_path):\n \n if content is None:\n content_md5=\"\"\n else:\n content_md5 = compute_md5_hex(to_hashable(content))\n\n print \"content md5 \"+content_md5\n\n return self.sign_with_content_md5(method, content_md5, content_type, date, request_path)",
"def __sign(self, data):\n dataName = self.secret\n keys = list(data.keys())\n keys.sort()\n for a in keys: dataName += (a + data[a])\n #print dataName\n hash = hashlib.md5()\n hash.update(dataName.encode('utf-8'))\n return hash.hexdigest()",
"def _sign_web3_transaction(tx: Dict[str, any], v: int, r: int, s: int) -> (bytes, HexBytes):\n unsigned_transaction = serializable_unsigned_transaction_from_dict(tx)\n rlp_encoded_transaction = encode_transaction(unsigned_transaction, vrs=(v, r, s))\n\n # To get the address signing, just do ecrecover_to_pub(unsigned_transaction.hash(), v, r, s)\n return rlp_encoded_transaction, unsigned_transaction.hash()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Build an Elasticsearch document from the person instance.
|
def get_es_document_for_person(cls, person, index=None, action="index"):
index = index or cls.index_name
# Get published titles
titles = {
t.language: t.title
for t in Title.objects.filter(page=person.extended_object, published=True)
}
# Prepare portrait images
portrait_images = {}
for portrait in Picture.objects.filter(
cmsplugin_ptr__placeholder__page=person.extended_object,
cmsplugin_ptr__placeholder__slot="portrait",
):
language = portrait.cmsplugin_ptr.language
with translation.override(language):
portrait_images[language] = get_picture_info(portrait, "portrait")
# Get bio texts
bio = defaultdict(list)
for simple_text in SimpleText.objects.filter(
cmsplugin_ptr__placeholder__page=person.extended_object,
cmsplugin_ptr__placeholder__slot="bio",
):
bio[simple_text.cmsplugin_ptr.language].append(simple_text.body)
return {
"_id": str(person.extended_object_id),
"_index": index,
"_op_type": action,
"absolute_url": {
lang: person.extended_object.get_absolute_url(lang)
for lang, _ in settings.LANGUAGES
},
"bio": {language: " ".join(st) for language, st in bio.items()},
"complete": {
language: slice_string_for_completion(title)
for language, title in titles.items()
},
"portrait": portrait_images,
"title": titles,
"title_raw": titles,
}
|
[
"def from_elasticsearch(cls, document):\n return cls(**document['_source'])",
"def to_document(self):\n try:\n return search.Document(\n doc_id=str(self.key.urlsafe()),\n fields=self._get_document_fields())\n\n except (TypeError, ValueError) as e:\n raise DocumentCreationError(e)",
"def format_es_document_for_autocomplete(es_document, language=None):\n return {\n \"id\": es_document[\"_id\"],\n \"kind\": \"persons\",\n \"title\": get_best_field_language(es_document[\"_source\"][\"title\"], language),\n }",
"def elastic_mapping_builder(obj):\n super(Citations, Citations).elastic_mapping_builder(obj)\n obj['journal_id'] = obj['journal_volume'] = \\\n obj['journal_issue'] = {'type': 'integer'}\n obj['abstract_text'] = obj['xml_text'] = \\\n obj['page_range'] = obj['release_authorization_id'] = \\\n {'type': 'text'}\n obj['article_title'] = obj['encoding'] = \\\n obj['doi_reference'] = {'type': 'text', 'fields': {'keyword': {'type': 'keyword', 'ignore_above': 256}}}",
"def format_es_object_for_api(es_person, best_language):\n return {\n \"id\": es_person[\"_id\"],\n \"portrait\": get_best_field_language(\n es_person[\"_source\"][\"portrait\"], best_language\n ),\n \"title\": get_best_field_language(\n es_person[\"_source\"][\"title\"], best_language\n ),\n }",
"def _elasticsearch_builder(self):\n if self._api_key:\n es = Elasticsearch(hosts=[self._server], connection_class=RequestsHttpConnection,\n verify_certs=self._insecure, proxies=self._proxy, api_key=self._api_key)\n else:\n es = Elasticsearch(hosts=[self._server], connection_class=RequestsHttpConnection, http_auth=self._http_auth,\n verify_certs=self._insecure, proxies=self._proxy)\n # this should be passed as api_key via Elasticsearch init, but this code ensures it'll be set correctly\n if self._api_key and hasattr(es, 'transport'):\n es.transport.get_connection().session.headers['authorization'] = self._get_api_key_header_val(self._api_key)\n return es",
"def document(self, name, rank=None):\n return Document(name, index=self, rank=rank)",
"def create_document_(self, init_dict = None):\n if init_dict is None:\n initV = {}\n else:\n initV = init_dict\n\n return self.document_class(self, initV)",
"def _instance_document(self):\n return self.__instance_document",
"def __init__(self, esidxer=None):\n self.target_esidxer = esidxer",
"def record_builder(record, error_counter, metadata):\n\n #create the object based on the input record\n try:\n person = entities.Person.parts_unknown(record['name'])\n except KeyError:\n # If we don't have name in the record\n person = entities.Entity('UNKNOWN')\n error_counter.add('No name on person')\n\n return person",
"def test_index_document(self):\n post = {\n \"title\": \"My Document\",\n \"body\": \"Hello world.\"\n }\n\n # Documents in elasticsearch have an INDEX and a\n # TYPE. You can also specify an ID here if you\n # want to generate your own.\n self.client.index(\n index=\"blog\", # Think \"database\"\n doc_type=\"post\",\n body=post\n )",
"def search(cls):\n\n return Search(\n index=es_indices(cls.INDEX_PREFIX),\n doc_type={cls._doc_type.name: cls.from_es},\n ).sort(cls.SORT).using(es_conn())",
"def person_search():\n\n # Filter to just Volunteers\n s3.filter = FS(\"human_resource.type\") == 2\n\n # Only allow use in the search_ac method\n s3.prep = lambda r: r.method == \"search_ac\"\n\n return crud_controller(\"pr\", \"person\")",
"def search_person(body): # noqa: E501\n if connexion.request.is_json:\n body = PersonQuery.from_dict(connexion.request.get_json()) # noqa: E501\n return dict(results=data_access.search_persons(body))\n return dict(results=[])",
"def __init__(self):\r\n self.elastic = Elasticsearch([{'host': HOST,'post': PORT}])\r\n\r\n self.header = np.array(['date', 'time', 's-ip', 'cs-method', 'cs-uri-stem', 'cs-uri-query','s-port', 'cs-username', 'c-ip', 'cs(User-Agent)', 'sc-status', 'sc-substatus', 'sc-win32-status', 'sc-bytes', 'cs-bytes', 'time-taken'])",
"def _init_elasticsearch(self):\n\n self._elasticsearch = Elasticsearch()",
"def index_body(doc_type, mapping=None, setting=None):\n body = {}\n if setting is not None:\n body['settings'] = setting\n if mapping is not None:\n d = {}\n d[doc_type] = mapping\n body['mappings'] = d\n\n return body",
"def elasticsearch_format(self, entry):\n date_obj = self.parse_date(entry[\"reg_date\"])\n entry[\"reg_date\"] = datetime.strftime(date_obj, \"%Y-%m-%dT%H:%M:%S.000Z\")\n # all bulk data need meta data describing the data\n meta_dict = {\n \"index\": {\n \"_index\": self.es_index,\n \"_type\": self.es_doc,\n \"_id\": entry[\"id\"]\n }\n }\n return meta_dict, entry"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Format an person stored in ES into a consistent and easytoconsume record for API consumers
|
def format_es_object_for_api(es_person, best_language):
return {
"id": es_person["_id"],
"portrait": get_best_field_language(
es_person["_source"]["portrait"], best_language
),
"title": get_best_field_language(
es_person["_source"]["title"], best_language
),
}
|
[
"def elasticsearch_format(self, entry):\n date_obj = self.parse_date(entry[\"reg_date\"])\n entry[\"reg_date\"] = datetime.strftime(date_obj, \"%Y-%m-%dT%H:%M:%S.000Z\")\n # all bulk data need meta data describing the data\n meta_dict = {\n \"index\": {\n \"_index\": self.es_index,\n \"_type\": self.es_doc,\n \"_id\": entry[\"id\"]\n }\n }\n return meta_dict, entry",
"def format_es_document_for_autocomplete(es_document, language=None):\n return {\n \"id\": es_document[\"_id\"],\n \"kind\": \"persons\",\n \"title\": get_best_field_language(es_document[\"_source\"][\"title\"], language),\n }",
"def convert_to_person_line_delimited(person):\n person_fields = ['RECTYPE', 'YEAR', 'DATANUM', 'SERIAL', 'PERNUM', 'PERWT',\n 'SLWT', 'SLREC', 'RESPONDT', 'FAMUNIT', 'FAMSIZE', 'SUBFAM',\n 'SFTYPE', 'SFRELATE', 'MOMLOC', 'STEPMOM', 'MOMRULE_HIST',\n 'POPLOC', 'STEPPOP', 'POPRULE_HIST', 'SPLOC', 'SPRULE_HIST',\n 'NCHILD', 'NCHLT5', 'NSIBS', 'ELDCH', 'YNGCH', 'RELATE',\n 'RELATED', 'SEX', 'AGE', 'AGEMONTH', 'MARST', 'MARRNO',\n 'AGEMARR', 'CHBORN', 'RACE', 'RACED', 'HISPAN', 'HISPAND',\n 'BPL', 'BPLD', 'MBPL', 'MBPLD', 'FBPL', 'FBPLD', 'NATIVITY',\n 'CITIZEN', 'MTONGUE', 'MTONGUED', 'SPANNAME', 'HISPRULE',\n 'SCHOOL', 'HIGRADE', 'HIGRADED', 'EDUC', 'EDUCD', 'EMPSTAT',\n 'EMPSTATD', 'LABFORCE', 'OCC', 'OCC1950', 'IND', 'IND1950',\n 'CLASSWKR', 'CLASSWKRD', 'WKSWORK1', 'WKSWORK2', 'HRSWORK1',\n 'HRSWORK2', 'DURUNEMP', 'UOCC', 'UOCC95', 'UIND', 'UCLASSWK',\n 'INCWAGE', 'INCNONWG', 'OCCSCORE', 'SEI', 'PRESGL', 'ERSCOR50',\n 'EDSCOR50', 'NPBOSS50', 'MIGRATE5', 'MIGRATE5D', 'MIGPLAC5',\n 'MIGMET5', 'MIGTYPE5', 'MIGCITY5', 'MIGSEA5', 'SAMEPLAC',\n 'SAMESEA5', 'MIGCOUNTY', 'VETSTAT', 'VETSTATD', 'VET1940',\n 'VETWWI', 'VETPER', 'VETCHILD', 'HISTID', 'SURSIM', 'SSENROLL']\n\n line_list = []\n for field in person_fields:\n line_list.append(person[field])\n\n # append a new line at the end\n # line_list.append(\"\\n\")\n\n line = '|'.join(line_list)\n line = line + \"\\n\"\n return line",
"def record_builder(record, error_counter, metadata):\n\n #create the object based on the input record\n try:\n person = entities.Person.parts_unknown(record['name'])\n except KeyError:\n # If we don't have name in the record\n person = entities.Entity('UNKNOWN')\n error_counter.add('No name on person')\n\n return person",
"def annotate_record(self, record, variant_result):\n record.INFO['variant_id'] = variant_result.variant_id\n record.INFO['gene'] = \",\".join(variant_result.genes)\n record.INFO['gnomad_exomes_AF'] = variant_result.gnomad_exomes_af\n record.INFO['gnomad_genomes_AF'] = variant_result.gnomad_genomes_af\n record.ALT = variant_result.alt\n record.POS = variant_result.pos\n record.ID = \";\".join(variant_result.rs_ids) or \".\"\n return record",
"def get_es_document_for_person(cls, person, index=None, action=\"index\"):\n index = index or cls.index_name\n\n # Get published titles\n titles = {\n t.language: t.title\n for t in Title.objects.filter(page=person.extended_object, published=True)\n }\n\n # Prepare portrait images\n portrait_images = {}\n for portrait in Picture.objects.filter(\n cmsplugin_ptr__placeholder__page=person.extended_object,\n cmsplugin_ptr__placeholder__slot=\"portrait\",\n ):\n language = portrait.cmsplugin_ptr.language\n with translation.override(language):\n portrait_images[language] = get_picture_info(portrait, \"portrait\")\n\n # Get bio texts\n bio = defaultdict(list)\n for simple_text in SimpleText.objects.filter(\n cmsplugin_ptr__placeholder__page=person.extended_object,\n cmsplugin_ptr__placeholder__slot=\"bio\",\n ):\n bio[simple_text.cmsplugin_ptr.language].append(simple_text.body)\n\n return {\n \"_id\": str(person.extended_object_id),\n \"_index\": index,\n \"_op_type\": action,\n \"absolute_url\": {\n lang: person.extended_object.get_absolute_url(lang)\n for lang, _ in settings.LANGUAGES\n },\n \"bio\": {language: \" \".join(st) for language, st in bio.items()},\n \"complete\": {\n language: slice_string_for_completion(title)\n for language, title in titles.items()\n },\n \"portrait\": portrait_images,\n \"title\": titles,\n \"title_raw\": titles,\n }",
"def get_person_from_legacy_format(profile_record):\n\n if not is_profile_in_legacy_format(profile_record):\n raise ValueError(\"Not a legacy profile\")\n\n profile = profile_record\n\n try:\n profile = json.loads(json.dumps(profile))\n except ValueError:\n pass\n\n images = []\n accounts = []\n profile_data = {\n \"@type\": \"Person\"\n }\n\n if profile.has_key(\"name\") and type(profile[\"name\"]) == dict \\\n and profile[\"name\"].has_key(\"formatted\"):\n profile_data[\"name\"] = profile[\"name\"][\"formatted\"]\n\n if profile.has_key(\"bio\"):\n profile_data[\"description\"] = profile[\"bio\"]\n\n if profile.has_key(\"location\") and type(profile[\"location\"]) == dict \\\n and profile[\"location\"].has_key(\"formatted\"):\n profile_data[\"address\"] = {\n \"@type\": \"PostalAddress\",\n \"addressLocality\": profile[\"location\"][\"formatted\"]\n }\n\n if profile.has_key(\"avatar\") and type(profile[\"avatar\"]) == dict and \\\n profile[\"avatar\"].has_key(\"url\"):\n images.append({\n \"@type\": \"ImageObject\",\n \"name\": \"avatar\",\n \"contentUrl\": profile[\"avatar\"][\"url\"]\n })\n\n if profile.has_key(\"cover\") and type(profile[\"cover\"]) == dict and \\\n profile[\"cover\"].has_key(\"url\"):\n images.append({\n \"@type\": \"ImageObject\",\n \"name\": \"cover\",\n \"contentUrl\": profile[\"cover\"][\"url\"]\n })\n\n if len(images) > 0:\n profile_data[\"image\"] = images\n\n if profile.has_key(\"website\") and type(profile[\"website\"]) in [str, unicode]:\n profile_data[\"website\"] = [{\n \"@type\": \"WebSite\",\n \"url\": profile[\"website\"]\n }]\n\n for service_name in [\"twitter\", \"facebook\", \"github\"]:\n if profile.has_key(service_name):\n accounts.append(\n format_account(service_name, profile[service_name])\n )\n\n if profile.has_key(\"bitcoin\") and type(profile[\"bitcoin\"]) == dict and \\\n profile[\"bitcoin\"].has_key(\"address\"):\n accounts.append({\n \"@type\": \"Account\",\n \"role\": \"payment\",\n \"service\": \"bitcoin\",\n \"identifier\": profile[\"bitcoin\"][\"address\"]\n })\n\n if profile.has_key(\"auth\"):\n if len(profile[\"auth\"]) > 0 and type(profile[\"auth\"]) == dict:\n if profile[\"auth\"][0].has_key(\"publicKeychain\"):\n accounts.append({\n \"@type\": \"Account\",\n \"role\": \"key\",\n \"service\": \"bip32\",\n \"identifier\": profile[\"auth\"][0][\"publicKeychain\"]\n })\n\n if profile.has_key(\"pgp\") and type(profile[\"pgp\"]) == dict \\\n and profile[\"pgp\"].has_key(\"url\") \\\n and profile[\"pgp\"].has_key(\"fingerprint\"):\n accounts.append({\n \"@type\": \"Account\",\n \"role\": \"key\",\n \"service\": \"pgp\",\n \"identifier\": profile[\"pgp\"][\"fingerprint\"],\n \"contentUrl\": profile[\"pgp\"][\"url\"]\n })\n\n profile_data[\"account\"] = accounts \n\n return profile_data",
"def format_data(account):\n account_name = account[\"name\"]\n account_description = account[\"description\"]\n account_country = account[\"country\"]\n return f\"{account_name}, a {account_description}, from {account_country}\"",
"def formatted_people(people):\n attribute_lists = [OUTPUT_FIELDS] + [\n [p.last_name, p.first_name, p.gender, p.favorite_color,\n p.birth_date.strftime('%-m/%-d/%Y')]\n for p in people]\n sizes = [2 + max(len(a[i]) for a in attribute_lists)\n for i in range(5)]\n return [_formatted_fields(l, sizes) for l in attribute_lists]",
"def create_person(row_dict):\n\n person_dict = {}\n person_dict[\"sex\"] = Sex.objects.get(name=\"Male\")\n\n if \"sex\" in row_dict and row_dict[\"sex\"].strip() == \"F\":\n person_dict[\"sex\"] = Sex.objects.get(name=\"Female\")\n\n # praenomen special case\n # we initially clean the string, and then add the object to the dictionary\n praenomen_str = row_dict.get(\"praenomen\")\n if praenomen_str:\n if \".\" not in praenomen_str:\n praenomen_str = praenomen_str + \".\"\n\n praenomen_dic = clean_field(\"praenomen\", praenomen_str)\n\n try:\n praenomen = Praenomen.objects.get(abbrev=praenomen_dic[\"praenomen\"])\n except Praenomen.DoesNotExist:\n praenomen = Praenomen.objects.get(abbrev=\"-.\")\n\n praenomen_dic[\"praenomen\"] = praenomen\n\n # add praenomen info to person object\n person_dict.update(praenomen_dic)\n\n # cleans the remining person name fields\n for field in [\"nomen\", \"cognomen\", \"other_names\"]:\n field_string = row_dict.get(field)\n\n if field_string:\n d_obj = clean_field(field, field_string)\n # updates the person dictionary\n person_dict.update(d_obj)\n\n # remaining person fields, where names were not standard\n person_dict[\"re_number\"] = row_dict.get(\"re\")\n\n # logger.info(person_dict)\n person, created = Person.objects.get_or_create(**person_dict)\n\n if created:\n LOGGER.info(\"Created person with id={}\".format(person.id))\n\n return person.id, created",
"def _write_record(self, record):\n # convert biopython-style 1/-1 strand to MAF-style +/- strand\n if record.annotations.get(\"strand\") == 1:\n strand = \"+\"\n elif record.annotations.get(\"strand\") == -1:\n strand = \"-\"\n else:\n # TODO: issue warning?\n strand = \"+\"\n\n fields = [\n \"s\",\n # In the MAF file format, spaces are not allowed in the id\n \"%-40s\" % record.id.replace(\" \", \"_\"),\n \"%15s\" % record.annotations.get(\"start\", 0),\n \"%5s\"\n % record.annotations.get(\"size\", len(str(record.seq).replace(\"-\", \"\"))),\n strand,\n \"%15s\" % record.annotations.get(\"srcSize\", 0),\n str(record.seq),\n ]\n self.handle.write(\"%s\\n\" % \" \".join(fields))",
"def parse_person(self, s, nac):\n org_name = self.find_first_item(s, ('person',))\n if org_name is None:\n raise UnknownWhoisFormat('Can not find person in Person section')\n\n address = self.find_all_items(s, ('address',))\n if len(address) == 0:\n raise UnknownWhoisFormat('Can not find address in Person section')\n\n country = self.find_first_item(s, ('country',))\n if country is None:\n raise UnknownWhoisFormat('Can not find country in Person section')\n\n nac[ORGNAME] = org_name\n nac[ORGADDRESS] = address\n nac[COUNTRY] = country\n return nac",
"def __formatData(self, data: dict) -> Union[dict, str]:\n\n authorData = {} # valid data accumulator\n if data['AuthorGroup']:\n for i in data['AuthorGroup']:\n affiliationID = i[0]\n affiliationName = i[2]\n authorID = i[7]\n authorName = i[8]\n author = {\"Name\" : authorName, \"AuthorID\" : authorID, \"AffiliationID\" : affiliationID, \"AffiliationName\" : affiliationName}\n authorData[authorID] = author\n \n # Clean up redundant fields\n del data['AuthorGroup']\n del data['Authors']\n del data['Affiliations']\n data['AuthorData'] = authorData\n return data\n return \"\"",
"def entity_tostring(self, entity):\n\n metadata = \", \".join(['\"%s\": \"%s\"' % (key, value) for\n key, value in entity.metadata.items()])\n\n mentions = \", \".join(['\"%s\"' % mention for mention in entity.mentions])\n\n return ('{name: \"%s\",'\n ' type: \"%s\",'\n ' metadata: {%s},'\n ' salience: %s,'\n ' mentions: [%s]}') % (\n entity.name,\n entity.type,\n metadata,\n entity.salience,\n mentions)",
"def preprocess(self):\n self.data['full_name'] = self.data[\"given_name\"] + \" \" + self.data[\"surname\"]\n self.data['find_unique'] = self.data['date_of_birth'] + \" \" + self.data['sex'] + \" \" + self.data['full_name']",
"def to_as1(obj, type=None):\n if not obj:\n return {}\n\n type = obj.get('$type') or type\n if not type:\n raise ValueError('Bluesky object missing $type field')\n\n # TODO: once we're on Python 3.10, switch this to a match statement!\n if type in ('app.bsky.actor.defs#profileView',\n 'app.bsky.actor.defs#profileViewBasic'):\n images = [{'url': obj.get('avatar')}]\n banner = obj.get('banner')\n if banner:\n images.append({'url': obj.get('banner'), 'objectType': 'featured'})\n\n handle = obj.get('handle')\n did = obj.get('did')\n\n ret = {\n 'objectType': 'person',\n 'id': did,\n 'url': (Bluesky.user_url(handle) if handle\n else did_web_to_url(did) if did and did.startswith('did:web:')\n else None),\n 'displayName': obj.get('displayName'),\n 'summary': obj.get('description'),\n 'image': images,\n }\n\n elif type == 'app.bsky.feed.post':\n text = obj.get('text', '')\n\n # convert facets to tags\n tags = []\n for facet in obj.get('facets', []):\n tag = {}\n\n for feat in facet.get('features', []):\n if feat.get('$type') == 'app.bsky.richtext.facet#link':\n tag.update({\n 'objectType': 'article',\n 'url': feat.get('uri'),\n })\n elif feat.get('$type') == 'app.bsky.richtext.facet#mention':\n tag.update({\n 'objectType': 'mention',\n 'url': Bluesky.user_url(feat.get('did')),\n })\n\n index = facet.get('index', {})\n # convert indices from UTF-8 encoded bytes to Unicode chars (code points)\n # https://github.com/snarfed/atproto/blob/5b0c2d7dd533711c17202cd61c0e101ef3a81971/lexicons/app/bsky/richtext/facet.json#L34\n byte_start = index.get('byteStart')\n if byte_start is not None:\n tag['startIndex'] = len(text.encode()[:byte_start].decode())\n byte_end = index.get('byteEnd')\n if byte_end is not None:\n tag['displayName'] = text.encode()[byte_start:byte_end].decode()\n tag['length'] = len(tag['displayName'])\n\n tags.append(tag)\n\n in_reply_to = obj.get('reply', {}).get('parent', {}).get('uri')\n\n ret = {\n 'objectType': 'comment' if in_reply_to else 'note',\n 'content': text,\n 'inReplyTo': [{\n 'id': in_reply_to,\n 'url': at_uri_to_web_url(in_reply_to),\n }],\n 'published': obj.get('createdAt', ''),\n 'tags': tags,\n }\n\n elif type in ('app.bsky.feed.defs#postView', 'app.bsky.embed.record#viewRecord'):\n ret = to_as1(obj.get('record') or obj.get('value'))\n author = obj.get('author') or {}\n uri = obj.get('uri')\n ret.update({\n 'id': uri,\n 'url': (at_uri_to_web_url(uri, handle=author.get('handle'))\n if uri.startswith('at://') else None),\n 'author': to_as1(author, type='app.bsky.actor.defs#profileViewBasic'),\n })\n\n # convert embeds to attachments\n for embed in util.get_list(obj, 'embeds') + util.get_list(obj, 'embed'):\n embed_type = embed.get('$type')\n\n if embed_type == 'app.bsky.embed.images#view':\n ret.setdefault('image', []).extend(to_as1(embed))\n\n elif embed_type in ('app.bsky.embed.external#view',\n 'app.bsky.embed.record#view'):\n ret.setdefault('attachments', []).append(to_as1(embed))\n\n elif embed_type == 'app.bsky.embed.recordWithMedia#view':\n ret.setdefault('attachments', []).append(to_as1(\n embed.get('record', {}).get('record')))\n media = embed.get('media')\n media_type = media.get('$type')\n if media_type == 'app.bsky.embed.external#view':\n ret.setdefault('attachments', []).append(to_as1(media))\n elif media_type == 'app.bsky.embed.images#view':\n ret.setdefault('image', []).extend(to_as1(media))\n else:\n assert False, f'Unknown embed media type: {media_type}'\n\n elif type == 'app.bsky.embed.images#view':\n ret = [{\n 'url': img.get('fullsize'),\n 'displayName': img.get('alt'),\n } for img in obj.get('images', [])]\n\n elif type == 'app.bsky.embed.external#view':\n ret = to_as1(obj.get('external'), type='app.bsky.embed.external#viewExternal')\n\n elif type == 'app.bsky.embed.external#viewExternal':\n ret = {\n 'objectType': 'link',\n 'url': obj.get('uri'),\n 'displayName': obj.get('title'),\n 'summary': obj.get('description'),\n 'image': obj.get('thumb'),\n }\n\n elif type == 'app.bsky.embed.record#view':\n record = obj.get('record')\n return to_as1(record) if record else None\n\n elif type == 'app.bsky.embed.record#viewNotFound':\n return None\n\n elif type in ('app.bsky.embed.record#viewNotFound',\n 'app.bsky.embed.record#viewBlocked'):\n return None\n\n elif type == 'app.bsky.feed.defs#feedViewPost':\n ret = to_as1(obj.get('post'), type='app.bsky.feed.defs#postView')\n reason = obj.get('reason')\n if reason and reason.get('$type') == 'app.bsky.feed.defs#reasonRepost':\n ret = {\n 'objectType': 'activity',\n 'verb': 'share',\n 'object': ret,\n 'actor': to_as1(reason.get('by'), type='app.bsky.actor.defs#profileViewBasic'),\n }\n\n elif type == 'app.bsky.graph.follow':\n ret = {\n 'objectType': 'activity',\n 'verb': 'follow',\n 'actor': {\n 'url': obj.get('subject'),\n },\n }\n\n elif type == 'app.bsky.feed.defs#threadViewPost':\n return to_as1(obj.get('post'), type='app.bsky.feed.defs#postView')\n\n elif type == 'app.bsky.feed.defs#generatorView':\n uri = obj.get('uri')\n ret = {\n 'objectType': 'service',\n 'id': uri,\n 'url': at_uri_to_web_url(uri),\n 'displayName': f'Feed: {obj.get(\"displayName\")}',\n 'summary': obj.get('description'),\n 'image': obj.get('avatar'),\n 'author': to_as1(obj.get('creator'), type='app.bsky.actor.defs#profileView'),\n }\n\n else:\n raise ValueError(f'Bluesky object has unknown $type: {type}')\n\n return util.trim_nulls(ret)",
"def add_author_full_name(value):\n if \"full_name\" not in value:\n value['full_name'] = '{0}, {1}'.format(\n value['surname'],\n collapse_initials(value['given_names']),\n ).title()\n return value",
"def format(self, record: LogRecord) -> str:\n return json.dumps(self.prepare(record), cls=self.encoder)",
"def print_record(self):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Format a document stored in ES into an easytoconsume record for autocomplete consumers. This method differs from the regular one as objects retrieved from query VS complete queries can be formatted differently; and consumers of autocomplete do not need full objects.
|
def format_es_document_for_autocomplete(es_document, language=None):
return {
"id": es_document["_id"],
"kind": "persons",
"title": get_best_field_language(es_document["_source"]["title"], language),
}
|
[
"def elasticsearch_format(self, entry):\n date_obj = self.parse_date(entry[\"reg_date\"])\n entry[\"reg_date\"] = datetime.strftime(date_obj, \"%Y-%m-%dT%H:%M:%S.000Z\")\n # all bulk data need meta data describing the data\n meta_dict = {\n \"index\": {\n \"_index\": self.es_index,\n \"_type\": self.es_doc,\n \"_id\": entry[\"id\"]\n }\n }\n return meta_dict, entry",
"def elastic_mapping_builder(obj):\n super(Citations, Citations).elastic_mapping_builder(obj)\n obj['journal_id'] = obj['journal_volume'] = \\\n obj['journal_issue'] = {'type': 'integer'}\n obj['abstract_text'] = obj['xml_text'] = \\\n obj['page_range'] = obj['release_authorization_id'] = \\\n {'type': 'text'}\n obj['article_title'] = obj['encoding'] = \\\n obj['doi_reference'] = {'type': 'text', 'fields': {'keyword': {'type': 'keyword', 'ignore_above': 256}}}",
"def to_document(self):\n try:\n return search.Document(\n doc_id=str(self.key.urlsafe()),\n fields=self._get_document_fields())\n\n except (TypeError, ValueError) as e:\n raise DocumentCreationError(e)",
"def format_es_object_for_api(es_person, best_language):\n return {\n \"id\": es_person[\"_id\"],\n \"portrait\": get_best_field_language(\n es_person[\"_source\"][\"portrait\"], best_language\n ),\n \"title\": get_best_field_language(\n es_person[\"_source\"][\"title\"], best_language\n ),\n }",
"def from_elasticsearch(cls, document):\n return cls(**document['_source'])",
"def elastic_output_as_table(raw_elastic_json_obj):\n\n # fill in the table and the raw contents objects\n table = []\n for raw_row in raw_elastic_json_obj[\"hits\"][\"hits\"]:\n raw_row_flattened = flatten_json(raw_row[\"_source\"])\n table.append(raw_row_flattened)\n\n return table",
"def testFormatDoc(self):\n meta = self.session.create_metabolome()\n\n self.util.stringTypeTest(self, meta, \"format_doc\")\n\n self.util.stringPropertyTest(self, meta, \"format_doc\")",
"def test_bulk_get_query_document_serialization(self):\n\n # Construct a json representation of a BulkGetQueryDocument model\n bulk_get_query_document_model_json = {}\n bulk_get_query_document_model_json['atts_since'] = ['1-99b02e08da151943c2dcb40090160bb8']\n bulk_get_query_document_model_json['id'] = 'testString'\n bulk_get_query_document_model_json['rev'] = 'testString'\n\n # Construct a model instance of BulkGetQueryDocument by calling from_dict on the json representation\n bulk_get_query_document_model = BulkGetQueryDocument.from_dict(bulk_get_query_document_model_json)\n assert bulk_get_query_document_model != False\n\n # Construct a model instance of BulkGetQueryDocument by calling from_dict on the json representation\n bulk_get_query_document_model_dict = BulkGetQueryDocument.from_dict(bulk_get_query_document_model_json).__dict__\n bulk_get_query_document_model2 = BulkGetQueryDocument(**bulk_get_query_document_model_dict)\n\n # Verify the model instances are equivalent\n assert bulk_get_query_document_model == bulk_get_query_document_model2\n\n # Convert model instance back to dict and verify no loss of data\n bulk_get_query_document_model_json2 = bulk_get_query_document_model.to_dict()\n assert bulk_get_query_document_model_json2 == bulk_get_query_document_model_json",
"def generate_records(document):\n q = db.session.query(DocumentRecord)\n q = q.filter(DocumentRecord.document_id == document.id)\n for record in q.yield_per(1000):\n texts = [record.text]\n if record.data is not None:\n texts.extend(record.data.values())\n\n yield {\n '_id': record.id,\n '_type': TYPE_RECORD,\n '_index': six.text_type(es_index),\n '_source': {\n 'document_id': document.id,\n 'collection_id': document.collection_id,\n 'index': record.index,\n 'sheet': record.sheet,\n 'text': index_form(texts)\n }\n }",
"def to_json(self):\n def _convert_to_python(doc, struct):\n for key in struct:\n if isinstance(struct[key], dict):\n if doc: # we don't need to process an empty doc\n if key in doc: # we don't care about missing fields\n _convert_to_python(doc[key], struct[key])\n elif type(struct[key]) is list:\n if struct[key]:\n if isinstance(struct[key][0], R):\n l_objs = []\n for obj in doc[key]:\n obj['_collection'] = self.collection.name\n obj['_database'] = self.db.name\n l_objs.append(obj)\n doc[key] = l_objs\n elif isinstance(struct[key][0], dict):\n if doc[key]:\n for obj in doc[key]:\n _convert_to_python(obj, struct[key][0])\n else:\n if isinstance(struct[key], R) and doc[key] is not None:\n doc[key]['_collection'] = self.collection.name\n doc[key]['_database'] = self.db.name\n try:\n from json import dumps\n except ImportError:\n from anyjson import serialize as dumps\n except ImportError:\n raise ImportError(\"can't import anyjson. Please install it before continuing.\")\n obj = self.to_json_type()\n _convert_to_python(obj, self.structure)\n return str(dumps(obj))",
"def transform_doc(self, document):\n title, abstract = self._cleaned_document_words(document)\n features = {\n 'title':\n self._text_features(title, self.max_title_len),\n 'abstract':\n self._text_features(abstract, self.max_abstract_len),\n 'authors':\n [\n self.author_to_index[author] for author in document.authors\n if author in self.author_to_index\n ],\n 'venue':\n [self.venue_to_index.get(document.venue, 0)],\n 'keyphrases':\n [\n self.keyphrase_to_index[keyphrase]\n for keyphrase in document.key_phrases\n if keyphrase in self.keyphrase_to_index\n ]\n }\n\n return features",
"def build_document_queryset(\n query_string, index,\n use_stemming=False,\n use_startswith=False,\n match_stopwords=False,\n match_all=False,\n):\n\n assert(index.id)\n\n tokenization = _tokenize_query_string(query_string, match_stopwords=match_stopwords)\n if not tokenization:\n return DocumentRecord.objects.none()\n\n if not match_all:\n # If match_all is false, we split the branches into a branch per token\n split_branches = []\n for branch in tokenization:\n for token in branch:\n split_branches.append([token])\n tokenization = split_branches\n\n # We now need to gather document IDs, for each branch we need to\n # look for matching tokens in a single query, then post-process them\n # to only fetch documents that match all of them.\n doc_scores = {}\n for branch in tokenization:\n tokens = set([x[-1] for x in branch])\n\n filters = _build_filters(index, branch, use_startswith, use_stemming)\n\n keys = TokenFieldIndex.objects.filter(\n filters\n ).values_list(\"pk\", flat=True)[:_PER_TOKEN_HARD_QUERY_LIMIT]\n\n doc_results = {}\n\n for pk in keys:\n doc_id = TokenFieldIndex.document_id_from_pk(pk)\n token = pk.split(\"|\")[1]\n doc_results.setdefault(doc_id, set()).add(token)\n\n for doc_id, found_tokens in doc_results.items():\n if _compare_tokens(tokens, found_tokens, match_all, use_startswith):\n doc_scores[doc_id] = doc_scores.get(doc_id, 0) + _calculate_score(\n tokens, found_tokens\n )\n\n document_ids = [\n x[0] for x in sorted(doc_scores.items(), key=lambda x: -x[1])\n ]\n results = DocumentRecord.objects.filter(pk__in=document_ids)\n return results, document_ids",
"def _formatQuery(self, query_dict):\n pass",
"def annotate_record(self, record, variant_result):\n record.INFO['variant_id'] = variant_result.variant_id\n record.INFO['gene'] = \",\".join(variant_result.genes)\n record.INFO['gnomad_exomes_AF'] = variant_result.gnomad_exomes_af\n record.INFO['gnomad_genomes_AF'] = variant_result.gnomad_genomes_af\n record.ALT = variant_result.alt\n record.POS = variant_result.pos\n record.ID = \";\".join(variant_result.rs_ids) or \".\"\n return record",
"def _transform(self, document):\n transformed = {\n \"@context\": \"http://schema.org\",\n \"@type\": \"JobPosting\",\n }\n basic_mappings = {\n 'title': 'PositionTitle',\n 'qualifications': 'QualificationSummary',\n 'url': 'PositionURI',\n }\n for target_key, source_key in basic_mappings.items():\n transformed[target_key] = document.get(source_key)\n\n # many of the fields we want are in UserArea->Details\n # sadly most of these never seem to show up in real data,\n # but they are mentioned in the API docs so they are worth checking for\n user_details = document.get('UserArea', {}).get('Details', {})\n transformed['description'] = user_details.get('JobSummary', None)\n transformed['educationRequirements'] = \\\n user_details.get('Education', None)\n transformed['responsibilities'] = user_details.get('MajorDuties', None)\n transformed['experienceRequirements'] = \\\n user_details.get('Requirements', None)\n transformed['jobBenefits'] = user_details.get('Benefits', None)\n\n # employment type, salary, and location are stored in lists;\n # pick the first one\n position_schedules = document.get('PositionSchedule', [])\n if len(position_schedules) > 0:\n transformed['employmentType'] = \\\n position_schedules[0].get('Name', None)\n\n remuneration = document.get('PositionRemuneration', [])\n if len(remuneration) > 0:\n transformed['baseSalary'] = {\n '@type': 'MonetaryAmount',\n 'minValue': float(remuneration[0].get('MinimumRange', None)),\n 'maxValue': float(remuneration[0].get('MaximumRange', None))\n }\n\n locations = document.get('PositionLocation', [])\n if len(locations) > 0:\n transformed['jobLocation'] = {\n '@type': 'Place',\n 'address': {\n '@type': 'PostalAddress',\n 'addressLocality': locations[0].get('CityName', ''),\n 'addressRegion': locations[0].get('CountrySubDivisionCode', ''),\n 'addressCountry': locations[0].get('CountryCode', ''),\n }\n }\n\n # both organization and the department within the org. are defined\n transformed['hiringOrganization'] = {\n '@type': 'Organization',\n 'name': document.get('OrganizationName')\n }\n department_name = document.get('DepartmentName', None)\n if department_name:\n transformed['hiringOrganization']['department'] = {\n '@type': 'Organization',\n 'name': department_name\n }\n\n if not document['PositionStartDate']:\n transformed['datePosted'] = None\n else:\n start = datetime.strptime(\n document['PositionStartDate'],\n self.DATE_FORMAT\n )\n transformed['datePosted'] = start.date().isoformat()\n if not document['PositionEndDate']:\n transformed['validThrough'] = None\n else:\n end = datetime.strptime(\n document['PositionEndDate'],\n self.DATE_FORMAT\n )\n transformed['validThrough'] = end.isoformat()\n\n return transformed",
"def get_docs(self, query):\n data = {}\n tot_docs = Doc.objects().count()\n for word in query:\n ind = Index.objects(key=word).first()\n if not ind:\n continue\n data[word] = {\n \"idf\": math.log(\n tot_docs / len(ind.documents), 10\n ), # calculate idf of the query word\n \"docs\": ind.documents, # Documents which contain word\n }\n return data",
"def export_doc_qa_feedback():\n relevant_feedback_query = {\"query\": {\"bool\": {\"must\": [{\"term\": {\"label\": \"relevant\"}}]}}}\n result = scan(elasticsearch_client, index=DB_INDEX_FEEDBACK, query=relevant_feedback_query)\n\n per_document_feedback = defaultdict(list)\n for feedback in result:\n document_id = feedback[\"_source\"][\"document_id\"]\n per_document_feedback[document_id].append(\n {\n \"question\": feedback[\"_source\"][\"question\"],\n \"id\": feedback[\"_id\"],\n \"answers\": [\n {\"text\": feedback[\"_source\"][\"answer\"], \"answer_start\": feedback[\"_source\"][\"offset_start_in_doc\"]}\n ],\n }\n )\n\n export_data = []\n for document_id, feedback in per_document_feedback.items():\n document = document_store.get_document_by_id(document_id)\n context = document.text\n export_data.append({\"paragraphs\": [{\"qas\": feedback}], \"context\": context})\n\n export = {\"data\": export_data}\n\n return export",
"def update_by_query(self, collection, query, document):\n try:\n self.set_dynamic_mapping(collection)\n document_id = document.get_id()\n document_body = document.to_dict()\n if \"_id\" in document_body.keys():\n del document_body['_id']\n self.client.index(\n self.index, \n collection, \n document_body,\n id=self.query_to_id(query)\n )\n except Exception as e:\n print(e)\n pass",
"def results_to_formatted_dicts(query_results):\n\n # Initialize array and get data/'hits'\n data_formatted = []\n# pprint(query_results)\n data = [doc for doc in query_results[\"hits\"][\"hits\"]]\n\n entries_1 = (\"type\", \"instance\", \"@version\", \"index\", \"geoip\")\n entries_2 = (\n \"highlight\",\n \"fields\",\n \"location\",\n \"_score\",\n \"_index\",\n \"_source\",\n \"_type\",\n \"sort\",\n )\n\n for doc in data:\n source_dictionary = doc[\"_source\"]\n # Check that geoip information is valid and uncorrupted\n if \"geoip\" in source_dictionary:\n # Check if geoip key is present but empty\n if not source_dictionary[\"geoip\"]:\n for key in entries_1:\n if key in source_dictionary:\n del source_dictionary[key]\n # Collect epoch timestamp and update dictionary with geoip items\n epoch_timestamp = doc[\"fields\"][\"@timestamp\"][0]\n doc.update(source_dictionary)\n\n # Delete second level keys for final flattened dictionary\n for key in entries_2:\n if key in doc:\n del doc[key]\n # Add epoch timestamp and collect ip error tag\n doc[\"epoch_timestamp\"] = epoch_timestamp\n doc[\"tags\"] = doc[\"tags\"][0]\n data_formatted.append(doc)\n\n else:\n # Delete duplicate country code and rename country_code2 -> country_code - if country code exists\n try:\n del source_dictionary[\"geoip\"][\"country_code3\"]\n source_dictionary[\"geoip\"][\"country_code\"] = source_dictionary[\n \"geoip\"\n ].pop(\"country_code2\")\n except:\n source_dictionary[\"geoip\"][\"country_code\"] = \"N/A\"\n # Collect all items in geoip dictionary\n geoip_items = source_dictionary[\"geoip\"]\n\n # Delete first level keys entries\n for key in entries_1:\n if key in source_dictionary:\n del source_dictionary[key]\n\n # Collect epoch timestamp and update dictionary with geoip items\n epoch_timestamp = doc[\"fields\"][\"@timestamp\"][0]\n doc.update(geoip_items)\n doc.update(source_dictionary)\n\n # Delete second level keys for final flattened dictionary\n for key in entries_2:\n if key in doc:\n del doc[key]\n # Add epoch timestamp and append dictionary\n doc[\"epoch_timestamp\"] = epoch_timestamp\n data_formatted.append(doc)\n else:\n continue\n\n return data_formatted"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns an initialised ci.CompassInterface object. Note `Depends` adds the oAuth2 integration with OpenAPI.
|
async def ci_user(request: requests.Request, token: str = Depends(oauth2_scheme)) -> ci.CompassInterface:
return await get_current_user(request, token)
|
[
"def overlayCompassRose(self, compassRose=None):\r\n SlTrace.lg(\"overlayCompassRose\", \"compass_rose\")\r\n canvas = self.get_canvas()\r\n if canvas is None:\r\n return\r\n sc = self.sc\r\n gmi = self.get_gmi()\r\n if compassRose is not None:\r\n self.compass_rose = CompassRose(compassRose).live_obj()\r\n if self.compass_rose is None:\r\n return\r\n \r\n cro = self.compass_rose\r\n if cro.tags:\r\n for tag in cro.tags:\r\n canvas.delete(tag)\r\n cro.tags = []\r\n x_fract = cro.x_fract\r\n y_fract = cro.y_fract\r\n lenFraction = cro.len_fract * 1\r\n canvas_width = self.get_canvas_width()\r\n canvas_height = self.get_canvas_height()\r\n canvas_x = int(canvas_width * x_fract)\r\n canvas_y = int(canvas_height * y_fract)\r\n arrow_len = int(sqrt(canvas_width**2 + canvas_height**2) * lenFraction)\r\n arrow_len_m = gmi.pixelToMeter(arrow_len)\r\n cent_color = \"red\"\r\n cr_circle = self.create_circle((canvas_x,canvas_y), radius=5, fill=cent_color)\r\n cro.tags.append(cr_circle)\r\n north_deg = GeoDraw.NORTH_DEG # Default map north\r\n arrow_color = \"green\"\r\n arrow_width = 3\r\n x_image, y_image = sc.canvas_to_image(canvas_x, canvas_y)\r\n apt_image = gmi.addToPoint(leng=arrow_len_m, xY=(x_image,y_image),\r\n deg=north_deg, unit=\"m\")\r\n apt_image_x, apt_image_y = apt_image\r\n apt_canvas_x, apt_canvas_y = sc.image_to_canvas(apt_image_x,\r\n apt_image_y)\r\n tag = canvas.create_line(canvas_x,canvas_y, apt_canvas_x, apt_canvas_y,\r\n arrow=\"last\",\r\n arrowshape=(3*arrow_width,4*arrow_width,\r\n 2*arrow_width),\r\n fill=arrow_color, width=arrow_width)\r\n cro.tags.append(tag)\r\n # North Label\r\n label_size = 16\r\n north_label_font = (\"Helvetica\", label_size)\r\n text_off_x = label_size\r\n text_off_y = text_off_x\r\n apt_text_x = apt_canvas_x + text_off_x\r\n apt_text_y = apt_canvas_y - text_off_y\r\n apt_text_pos = (apt_text_x, apt_text_y)\r\n tag = canvas.create_text(apt_text_pos, text = \"North\",\r\n font=north_label_font, fill=arrow_color)\r\n cro.tags.append(tag)",
"def download_and_install_compass():\n os_type = sys.platform\n pkg_format = get_pkg_format()\n\n # Sometimes sys.platform gives us 'linux2' and we only want 'linux'\n if os_type.startswith('linux'):\n os_type = 'linux'\n if pkg_format == 'apt':\n os_type += '_deb'\n elif pkg_format == 'yum' or pkg_format == 'dnf':\n os_type += '_rpm'\n elif os_type == 'darwin':\n os_type = 'osx'\n\n if os_type.startswith('linux') and os.getuid() != 0:\n print 'You must run this script as root.'\n sys.exit(1)\n\n if os_type.startswith('linux') and not is_supported_distro():\n print 'You are using an unsupported Linux distribution.\\n' \\\n 'Please visit: https://compass.mongodb.com/community-supported-platforms' \\\n ' to view available community supported packages.'\n sys.exit(1)\n\n if platform.machine() != 'x86_64':\n print 'Sorry, MongoDB Compass is only supported on 64 bit platforms.' \\\n ' If you believe you\\'re seeing this message in error please open a' \\\n ' ticket on the SERVER project at https://jira.mongodb.org/'\n\n link = 'https://compass.mongodb.com/api/v2/download/latest/compass/stable/' + os_type\n pkg = download_pkg(link, pkg_format=pkg_format)\n\n print 'Installing the package...'\n if os_type == 'osx':\n install_mac(pkg)\n elif os_type.startswith('linux'):\n install_linux(pkg_format, pkg)\n else:\n print 'Unrecognized os_type: %s' % os_type\n\n print 'Cleaning up...'\n os.remove(pkg)\n print 'Done!'",
"def classFactory(iface): # pylint: disable=invalid-name\n #\n from .avaframeConnector import AvaFrameConnectorPlugin\n return AvaFrameConnectorPlugin()",
"def __init__(\n self,\n hass: core.HomeAssistant,\n config_entry: config_entries.ConfigEntry,\n implementation: config_entry_oauth2_flow.AbstractOAuth2Implementation,\n ) -> None:\n self._oauth_session = config_entry_oauth2_flow.OAuth2Session(\n hass, config_entry, implementation\n )\n\n assert isinstance(implementation, HomePlusControlOAuth2Implementation)\n\n # Create the API authenticated client - external library\n super().__init__(\n subscription_key=implementation.subscription_key,\n oauth_client=aiohttp_client.async_get_clientsession(hass),\n update_intervals=DEFAULT_UPDATE_INTERVALS,\n )",
"def _create_client(self):\r\n self.association_refresh_time = {}\r\n auth_plugin = k_loading.load_auth_from_conf_options(\r\n cfg.CONF, 'placement')\r\n client = k_loading.load_session_from_conf_options(\r\n cfg.CONF, 'placement', auth=auth_plugin)\r\n client.additional_headers = {'accept': 'application/json'}\r\n return client",
"def classFactory(iface): # pylint: disable=invalid-name\n\n # Instala a dependênia do plugin (módulo xmltodict)\n installDependency()\n\n # import os\n # pip_file = os.path.dirname(__file__) + '/get-pip.py'\n #\n # if os.path.exists(pip_file) and os.path.isfile(pip_file):\n # # cli = 'cd C:\\Program Files\\QGIS 3.4.8\\\\apps\\Python37 && python ' + pip_file + ' && python -m pip install xmltodict'\n # cli = 'cd C:\\Program Files\\QGIS 3.16\\\\apps\\Python37 && python -m pip install xmltodict'\n # cli = cli.replace('/', '\\\\')\n # os.system(cli)\n #\n # import sys\n # if 'xmltodict' in sys.modules:\n # os.remove(pip_file)\n\n from .siat_consultation import SiatConsultation\n return SiatConsultation(iface)",
"def connect_gis(self):\n with open(r'../keys.yaml') as file:\n \tkeys = yaml.load(file, Loader=yaml.FullLoader)\n \n try:\n gis = GIS(url=\"http://lahub.maps.arcgis.com/home/organization.html\",\n username=keys['arcgis_username'],password=keys['arcgis_password'])\n \n print(\"Successful Connection to AGOL API: \",type(gis.content))\n \n except tokenize.TokenError:\n pass\n \n except:\n print('Other Error')\n \n return gis",
"def __init__(self, client_id, client_secret, access_token=None,\n refresh_token=None, expires_at=None, refresh_cb=None,\n redirect_uri=None, **kwargs):\n self.client = FitBarkOauth2Client(\n client_id,\n client_secret,\n access_token=access_token,\n refresh_token=refresh_token,\n expires_at=expires_at,\n refresh_cb=refresh_cb,\n redirect_uri=redirect_uri,\n **kwargs\n )",
"def build(self, name):\r\n if name == AuthMethod.oauth1_signature_service(): return OAuth1SignatureService(self.__configuration)\r\n elif name == AuthMethod.oauth2_password_service(): return OAuth2PasswordService(self.__configuration)\r\n elif name == AuthMethod.oauth2_assertion_service(): return OAuth2AssertionService(self.__configuration)\r\n else: raise NotImplementedError(name)",
"def ReadCompassRaw():\n global bus\n global magxoffset, magyoffset, magzoffset\n\n # Set single measurement mode\n register = 0x0A # CTRL\n data = 1 # Single measurement mode\n try:\n bus.write_byte_data(addressMPU9150mag, register, data)\n except:\n Print('Failed sending CTRL!')\n time.sleep(0.01)\n\n # Wait for dataready\n register = 0x02 # Status 1\n try:\n\tstatus = 0\n\twhile (status & 1) == 0:\n\t status = bus.read_byte_data(addressMPU9150mag, register)\n except:\n Print('Failed reading ST1!')\n\n # Read the data from the compass chip\n try:\n [xl, xh, yl, yh, zl, zh] = bus.read_i2c_block_data(addressMPU9150mag, 3, 6)\n except:\n Print('Failed reading registers!')\n status = 0\n xh = 0\n xl = 0\n yh = 0\n yl = 0\n zh = 0\n zl = 0\n \n # Convert from unsigned to correctly signed values\n bytes = struct.pack('BBBBBB', xl, xh, yl, yh, zl, zh)\n x, y, z = struct.unpack('hhh', bytes)\n\n return x - magxoffset, y - magyoffset, z - magzoffset",
"def classFactory(iface): # pylint: disable=invalid-name\n #\n from .qgis_acoustics import QGISAcoustics\n return QGISAcoustics(iface)",
"def autonomousInit(self) -> None:\n ...",
"def initialize_apis(rest_host: str, candlepin_host: str) -> API:\n return API(\n candlepin=Candlepin(candlepin_host),\n user=UserV1(rest_host),\n regnum=RegnumV5(rest_host),\n activation=ActivationV2(rest_host),\n terms=TermsV1(rest_host),\n )",
"def init_client():\n with open(\".api-key\") as api_file:\n api_key = api_file.read()\n gmaps = GmapsClient(api_key)\n return gmaps",
"def init_config_flow(hass):\n config_flow.register_flow_implementation(\n hass,\n DOMAIN,\n client_id=\"id\",\n client_secret=\"secret\",\n api_key=\"123\",\n redirect_uri=\"http://example.com\",\n sensors=None,\n )\n flow = config_flow.LogiCircleFlowHandler()\n flow._get_authorization_url = Mock(return_value=\"http://example.com\")\n flow.hass = hass\n return flow",
"def _calibconstants(self, **kwa):\n if self._calibc_ is None:\n logger.debug('AreaDetector._calibconstants - make CalibConstants')\n cc = self._calibconst # defined in DetectorImpl\n if is_none(cc, 'self._calibconst is None'): return None\n self._calibc_ = CalibConstants(cc, **kwa)\n return self._calibc_",
"def _init_client(self):\n access_token = self._read_token()\n\n if isinstance(access_token, tuple):\n sess = dbsession.DropboxSession(self.app_key, self.app_secret)\n sess.set_token(access_key, access_secret)\n self.api_client = dbclient.DropboxClient(sess)\n log.debug(\"Loaded OAuth 1 access token.\")\n elif access_token:\n self.api_client = dbclient.DropboxClient(access_token)\n log.debug(\"Loaded OAuth 2 access token\")\n else:\n self.api_client = None",
"def _setupOcclusion(self):\n technique = self.settings.occlusionTechnique\n self.debug(\"Creating occlusion handle for\", technique)\n\n if technique == \"None\":\n self.occlusion = AmbientOcclusionTechniqueNone()\n elif technique == \"SAO\":\n self.occlusion = AmbientOcclusionTechniqueSAO()\n else:\n self.error(\"Unkown occlusion technique:\", technique)\n self.occlusion = AmbientOcclusionTechniqueNone()",
"def autonomousInit(self) -> None:\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Loads a text file
|
def load_text_file(self):
with open(self.file_name, "r") as filino:
data = filino.readlines()
return data
|
[
"def _load_text_from_file(self, file_path):\n try:\n return open(file_path, 'r').read()\n except IOError:\n print \"Warning: File name not found.\"\n print \"No text loaded.\"\n return str()",
"def load_file(self, file_path):\n ...",
"def open_file(self, text_file_name):\r\n\t\tself.text_file_name = text_file_name\r\n\t\tfile_obj = open(self.text_file_name)\r\n\t\tcontent = file_obj.read()\r\n\t\tfile_obj.close()\r\n\t\treturn content",
"def from_txt_file(cls, path, **kwargs):\n with open(path, 'rb') as f:\n return cls(lines=f, **kwargs)",
"def loadtxt(filename, **kwargs):\n return np.loadtxt(str(filename), **kwargs)",
"def open_and_read_file(file_path):\n\n text_file = open(file_path)\n text = text_file.read()\n\n return text",
"def import_from_txt(file):\n\n with open(file, \"r\") as f:\n doc = f.read() # Read all contents, no line breaks\n\n return doc",
"def _load_tex_file(self):\n with open(self.input_file.path, 'r', encoding='utf8') as file:\n return [line.strip() for line in file]",
"def load_text (file_name, directory_name):\n with open(directory_name+file_name, encoding=ENCODING) as f:\n string = f.read()\n string = normalize(string)\n return string",
"def loadfile(self):\n assembler = AssemblyFileReader('test.s')\n assembler.read_into_list()\n file_data = ''\n for e in assembler._file: # Create a large formatted string to be disp\n file_data += e\n self.Code_View_lbl.delete(1.0, END) # Clear text\n self.Code_View_lbl.insert(END, file_data) # Insert the file text\n self.Code_View_lbl.tag_configure(\"current_line\", background=\"#e9e9e9\")\n self.Code_View_lbl.tag_remove(\"current_line\", 1.0, \"end\")\n self.Code_View_lbl.tag_add(\"current_line\", 1.0, 2.0)",
"def load_from_local_file(self, path):\n # open file for writing and dump in text\n with open(path, \"r\") as text_file:\n self._top_sites_text = text_file.read()",
"def load_file(module_path: str, filename: str) -> str:\n directory, _ = os.path.splitext(module_path)\n filename = os.path.join(directory, filename)\n with open(filename) as f:\n text = f.read()\n return text",
"def load_input() -> str:\n with open('input.txt') as f:\n return f.read()",
"def add_text_file(self, filename):\n text_file = open(filename, 'r')\n text = text_file.read()\n self.addText(text)\n text_file.close()",
"def load_pickled (self, file_name, directory='.//Pickled_Texts/Default/'):\n with open(directory + file_name +'.pkl', 'rb') as f:\n text_list = pickle.load(f)\n self.text_list = text_list",
"def _load_file(self):\n self.insertion_offset = 0\n self.populate_rand()\n self.file_name = self.randlist.popleft()\n with open(self.file_name) as f:\n self.file = list((f.readlines()))\n for x,i in enumerate(self.file):\n self.file[x] = i.rstrip('\\n')",
"def open_file():\r\n filepath = askopenfilename(\r\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")]\r\n )\r\n if not filepath:\r\n return\r\n txt_edit.delete(1.0, tk.END)\r\n with open(filepath, \"r\") as input_file:\r\n text = input_file.read()\r\n txt_edit.insert(tk.END, text)\r\n window3.title(f\"Text Editor Application - {filepath}\")",
"def __read_text(self) -> None:\n # Reading all txt from a dict\n if os.path.isdir(self.path):\n for txt in [x for x in os.listdir(self.path) if x.endswith((\".txt\", \".TXT\", \".Txt\"))]:\n file = open(os.path.join(self.path, txt), \"r\")\n text = file.read()\n if self.data == \"\":\n self.data += text\n else:\n self.data += \" \" + text\n file.close()\n\n # Reading all txt from a file\n elif self.path.endswith((\".txt\", \".TXT\", \".Txt\")):\n with open(self.path, \"r\") as file:\n self.data = file.read()",
"def read_file(file_path):\n f = open(file_path, 'r')\n txt = f.read()\n f.close()\n return txt"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the tokens .by_resource_server, Ensure that only one token was gotten, and return that token. If the token_data includes a "refresh_token" field, update self.refresh_token to that value.
|
def _extract_token_data(self, res: OAuthTokenResponse) -> dict[str, t.Any]:
token_data_list = list(res.by_resource_server.values())
if len(token_data_list) != 1:
raise ValueError(
"Attempting refresh for refresh token authorizer "
"didn't return exactly one token. Possible service error."
)
token_data = next(iter(token_data_list))
# handle refresh_token being present
# mandated by OAuth2: https://tools.ietf.org/html/rfc6749#section-6
if "refresh_token" in token_data:
self.refresh_token = token_data["refresh_token"]
return token_data
|
[
"def refresh_tokens(self) -> Dict[str, Union[str, int]]:\n LOGGER.info(\"Refreshing tokens ...\")\n token = self._oauth.refresh_token(f\"{self.host}{ENDPOINT_TOKEN}\")\n\n if self.token_updater is not None:\n self.token_updater(token)\n\n return token",
"def refresh_token(self):\n if not self._refresh_token:\n self.get_oauth_tokens()\n\n return self._refresh_token",
"def refresh_token(self, token_info):\r\n if 'refresh_token' not in token_info:\r\n return self.get_new_token()\r\n refresh_request = {'refresh_token': token_info['refresh_token'],\r\n 'client_id': self.user_id,\r\n 'client_secret': self.key,\r\n 'grant_type': 'refresh_token'}\r\n\r\n new_token = self._token_request(refresh_request)\r\n if 'refresh_token' not in new_token:\r\n new_token['refresh_token'] = token_info['refresh_token']\r\n return new_token",
"def _save_token_data_from_response(self, token_data: dict[str, Any]) -> None:\n self._token_last_refreshed = datetime.utcnow()\n self.access_token = token_data[\"access_token\"]\n if refresh_token := token_data.get(\"refresh_token\"):\n self.refresh_token = refresh_token",
"def get_refresh_token_data(self):\n return {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token\n }",
"def _refresh(self):\n # Request and set a new API token.\n new_token = self.authenticate(self._username, self._password)\n self._token = new_token\n logger.info('New API token received: \"{}\".'.format(new_token))\n return self._token",
"def get_refresh(self):\n\t\tauth_info = self.__get_refresh__()\n\t\tself.token_info['access_token'] = auth_info['access_token']\n\t\tself.token_info['token_type'] = auth_info['token_type']\n\t\tself.token_info['base_uri'] = auth_info['resource_server_base_uri']\n\t\tself.token_info['expire_time'] = datetime.now() + \\\n\t\t\ttimedelta(seconds=auth_info['expires_in'])\n\n\t\tself.refresh_info['refresh_token'] = auth_info['refresh_token']\n\t\tself.refresh_info['refresh_uri'] = auth_info['refresh_token_server_uri']\n\t\tself.refresh_info['refresh_time'] = self.token_info['expire_time'] - \\\n\t\t\ttimedelta(seconds=300)\n\n\t\tself.is_authenticated = True\n\t\treturn auth_info",
"def get_token(self):\n if self.oauth['credentials'].access_token_expired:\n self.oauth['credentials']._refresh(httplib2.Http().request)\n self.oauth['token'] = None # need a new token after refreshing\n if self.oauth['token'] is None:\n self.oauth['token'] = gdata.gauth.OAuth2Token(\n self.oauth['client_id'],\n self.oauth['client_secret'],\n self.oauth['scope'],\n self.oauth['user_agent'],\n access_token = self.oauth['credentials'].access_token,\n refresh_token = self.oauth['credentials'].refresh_token)\n return self.oauth['token']",
"def refresh_token():\n\n enc_token = jwt_helper.get_token_from_cookie(cookies=request.cookies, key='refToken')\n __, jwt_content = jwt_helper.decode(token=enc_token, token_type='refresh')\n\n # check_jti()\n subject = jwt_content['sub']\n refresh_token, access_token = jwt_helper.gen_tokens(subject)\n resp = jwt_helper.make_token_response(access_token, refresh_token)\n return resp",
"def refresh_token(self) -> None:\n payload: Dict[str, str] = {\"apiKey\": API_KEY}\n headers: Dict[str, str] = {\"Content-Type\": \"application/json\"}\n\n r: requests.Response = requests.post(AUTH_URL, json=payload, headers=headers)\n\n if r.status_code != 200:\n raise Unauthorized(msg=\"Bad response from server\")\n\n response = r.json()\n token: str = response.get(\"token\") or \"\"\n auth: bool = response.get(\"auth\", False)\n\n if not auth or not token:\n raise Unauthorized(msg=\"Bad response from server\")\n\n self.token = token\n self.auth = auth",
"def access_token(self):\n if not self._access_token:\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n payload = urllib.urlencode({\n 'grant_type': 'refresh_token',\n 'client_id': OAUTH_CLIENT_ID,\n 'refresh_token': self.refresh_token\n })\n\n request = urllib2.Request(OAUTH_URL, headers=headers, data=payload)\n request.get_method = lambda: 'POST'\n\n try:\n response = urllib2.urlopen(request)\n data = json.load(response)\n self._access_token = data['access_token']\n except urllib2.HTTPError:\n # the refresh token has expired or become invalid\n self._refresh_token = None\n self.get_oauth_tokens()\n\n return self._access_token",
"async def get_refresh_token(\n cls, session: AsyncSession, token: uuid.UUID\n ) -> Optional[RefreshToken]:",
"def refresh_tokens(self) -> str:\n run_coroutine_threadsafe(\n self.session.async_ensure_token_valid(), self.hass.loop\n ).result()\n\n return self.session.token[\"access_token\"] # type: ignore[no-any-return]",
"def refresh_token(user, refresh: bool = True) -> Token:\n Token.objects.filter(user=user).delete()\n token, _ = Token.objects.get_or_create(user=user)\n return token.key",
"async def refresh_token(self, client):\n try:\n # Refresh API tokens\n _LOGGER.debug(f'Refreshing tokens for client \"{client}\"')\n if client in ['skoda', 'smartlink', 'connect']:\n body = {\n 'grant_type': 'refresh_token',\n 'brand': BRAND,\n 'refresh_token': self._session_tokens[client]['refresh_token']\n }\n url = 'https://tokenrefreshservice.apps.emea.vwapps.io/refreshTokens'\n else:\n body = {\n 'grant_type': 'refresh_token',\n 'scope': 'sc2:fal',\n 'token': self._session_tokens[client]['refresh_token']\n }\n url = 'https://mbboauth-1d.prd.ece.vwg-connect.com/mbbcoauth/mobile/oauth2/v1/token'\n\n try:\n response = await self._session.post(\n url=url,\n headers=TOKEN_HEADERS.get(client),\n data = body,\n )\n except:\n raise\n\n if response.status == 200:\n tokens = await response.json()\n # Verify access_token\n if 'access_token' in tokens:\n if not await self.verify_token(tokens['access_token']):\n _LOGGER.warning('Tokens could not be verified!')\n for token in tokens:\n self._session_tokens[client][token] = tokens[token]\n return True\n elif response.status == 400:\n error = await response.json()\n if error.get('error', {}) == 'invalid_grant':\n _LOGGER.debug(f'VW-Group API token refresh failed: {error.get(\"error_description\", {})}')\n if client == 'vwg':\n return await self._getAPITokens()\n else:\n resp = await response.json()\n _LOGGER.warning(f'Something went wrong when refreshing tokens for \"{client}\".')\n _LOGGER.debug(f'Headers: {TOKEN_HEADERS.get(\"vwg\")}')\n _LOGGER.debug(f'Request Body: {body}')\n _LOGGER.warning(f'Something went wrong when refreshing VW-Group API tokens.')\n except Exception as error:\n _LOGGER.warning(f'Could not refresh tokens: {error}')\n return False",
"def get_refresh_token(self, user):\n refresh_token = None\n payment_authorization = PaymentAuthorization.objects.filter(user=user)\n if payment_authorization:\n refresh_token = payment_authorization.refresh_token\n return refresh_token",
"def generate_refresh_token(self):\n params = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'refresh_token': self.refresh_token,\n 'grant_type': 'refresh_token',\n }\n request_url = self.command_to_url('o/oauth2/token')\n # bandit security check for Issue: [B310:blacklist]\n # More Info: https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b310-urllib-urlopen # noqa\n if not request_url.startswith('https:'):\n raise RuntimeError(\n \"[-] request_url does not start \"\n f\"with 'https:' - {request_url}\")\n response = urllib.request.urlopen( # nosec\n request_url,\n urllib.parse.urlencode(params).encode('UTF-8')\n ).read().decode('UTF-8')\n return json.loads(response)",
"def get_refresh_token_authorizer(self):\n\n # Get client from globus sdk to act on\n client = globus_sdk.NativeAppAuthClient(self.CLIENT_ID)\n client.oauth2_start_flow(refresh_tokens=True)\n\n # Get authorizer that handles the refreshing of token\n return globus_sdk.RefreshTokenAuthorizer(self.TRANSFER_RT, client)",
"def get_access_token(self):\n # will need to implement method for refreshing refresh token (90 day expiration)\n\n aws_access_key = Variable.get(\"aws_access_key_id\")\n aws_secret_key = Variable.get(\"aws_secret_access_key\")\n s3_client = boto3.client(\n 's3',\n aws_access_key_id=aws_access_key,\n aws_secret_access_key=aws_secret_key\n )\n\n bytes_buffer = io.BytesIO()\n s3_client.download_fileobj(Bucket=\"on-da-dip\", Key=\"tokeninfo.txt\", Fileobj=bytes_buffer)\n byte_value = bytes_buffer.getvalue()\n refresh_token = byte_value.decode()\n\n endpoint = self.url + \"oauth2/token\"\n grant_type = \"refresh_token\"\n access_type = \"offline\"\n\n data = {\n \"grant_type\": grant_type,\n \"access_type\": access_type,\n \"refresh_token\": refresh_token,\n \"client_id\": self.client_id\n }\n\n result = requests.post(url=endpoint, data=data)\n\n if result.status_code == 200:\n result_body = result.json()\n self.access_token = result_body[\"access_token\"]\n\n cwd = os.getcwd()\n dir = os.path.dirname(cwd)\n refresh_token_file = open(dir + \"/creds/tokeninfo.txt\", \"wt\")\n # need to update token file with latest refresh token\n refresh_token_file.write(result_body[\"refresh_token\"])\n refresh_token_file.close()\n\n s3_client.upload_file(Filename=dir + \"/creds/tokeninfo.txt\", Bucket=\"on-da-dip\", Key=\"tokeninfo.txt\")\n\n elif result.status_code == 401:\n print(\"Invalid credentials.\")\n elif result.status_code == 403:\n print(\"User doesn't have access to this account and/or permissions.\")\n elif result.status_code == 400:\n print(\"Validation unsuccessful. Check that client id and refresh tokens are correct.\")\n elif result.status_code == 500:\n print(\"Server error, try again later.\")\n else:\n print(\"Unknown error.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prepare a line for recognition; this inverts it, transposes it, and pads it.
|
def prepare_line(line,pad=16):
line = line * 1.0/np.amax(line)
line = np.amax(line)-line
line = line.T
if pad>0:
w = line.shape[1]
line = np.vstack([np.zeros((pad,w)),line,np.zeros((pad,w))])
return line
|
[
"def _fill_line(line):\n # Length must be 164: TID, FGCID, IFX, IFY, 4 * 10 * (PFiPX, PFjPY, occupancy, speed)\n while len(line) < 164:\n line.append('')\n line.append('')\n line.append('')\n line.append('')\n return line",
"def _prepare_line(self, line):\r\n return line.rstrip('\\r\\n').strip()",
"def _draw_horizontal_aid_line(self, pos, with_outer=True):\n aid_line = super(SelectionAidLine, self)._draw_horizontal_aid_line(pos, False)\n aid_line.sensitive = True\n return aid_line",
"def setup_line(line, indices, salt):\n if len(indices) <= 0:\n return line\n length_of_line = len(line)\n new_line = CONST.EMPTY_STRING\n start_index = 0\n for index in indices:\n new_line += line[start_index : index + salt] + CONST.NEW_LINE\n start_index = index + salt\n new_line += line[start_index:]\n return new_line.strip(CONST.NEW_LINE)",
"def prepare_line_data(trimmed_lines, norm_intensity_matrix, step_size):\n _, n_points, n_lines = trimmed_lines.shape\n n_grid, _, n_angles = norm_intensity_matrix.shape\n angle_step = np.int(180 / n_angles)\n line_data = np.zeros(\n (7, n_points, n_lines)) # 7 rows are x, y, angle, is_active, intensity, linewidth, q. Feel free to add more.\n line_data[:4, :, :] = trimmed_lines\n\n # Set up subgrid\n a1_shape = np.array((n_grid, n_grid))\n a1_spacing = np.array((step_size, step_size))\n grid_density = np.array((1, 1)) # No need for extra precision when propogating lines\n grid_to_subgrid, subgrid_to_grid, cart_to_grid, grid_to_cart, cart_to_subgrid, subgrid_to_cart = define_subgrid(\n a1_shape, a1_spacing, grid_density)\n\n for i in range(n_lines):\n # Identify active region of line\n active_mask = np.where(line_data[3, :, i] == 1)\n active_length = len(active_mask[0])\n\n # Get line in grid coordinates\n x_values = line_data[0, :, i][active_mask].astype(int)\n y_values = line_data[1, :, i][active_mask].astype(int)\n row, col, x, y = cart_to_grid(x_values, y_values)\n row, col = row.astype(int), col.astype(int)\n a = (line_data[2, :, i] / angle_step)[active_mask].astype(int)\n\n # Look up intensity\n # square_intensity_matrix = intensity_matrix.reshape(n_grid, n_grid, n_angles)\n intensity_values = norm_intensity_matrix[(row, col, a)]\n line_data[4, :active_length, i] = intensity_values\n\n # Linewidth is not present for now - feel free to add\n # q is not present for now - feel free to add\n\n return line_data",
"def draw_horizontal_line(screen, width, x1, x2, y):\r\n # height = len(screen) * 8 / width\r\n start_byte, start_bit = (y*width + x1) / 8, x1 % 8\r\n end_byte, end_bit = (y*width + x2) / 8, x2 % 8\r\n start_mask = 0xFF >> start_bit\r\n end_mask = ~(0xFF >> end_bit + 1)\r\n # Fill in start and end bytes.\r\n if start_byte == end_byte:\r\n screen[start_byte] |= start_mask & end_mask\r\n else:\r\n screen[start_byte] |= start_mask\r\n screen[end_byte] |= end_mask\r\n # Fill in full middle bytes.\r\n for i in xrange(start_byte + 1, end_byte):\r\n screen[i] = 0xFF\r\n return screen",
"def striped_line(klass, lane, surface, stripes_count, longitudinal, side):\n starts = longitudinal + np.arange(stripes_count) * klass._stripe_spacing\n ends = longitudinal + np.arange(stripes_count) * klass._stripe_spacing + klass._stripe_length\n lats = [(side - 0.5) * lane.width_at(s) for s in starts]\n klass.draw_stripes(lane, surface, starts, ends, lats)",
"def add_line(line,pauli_pos,pauli):\n\n unhidden = see_if_unhidden(pauli)\n p = (1-self.rho[pauli])/2 # prob of 1 output\n # in the following, white lines goes from a to b, and black from b to c\n if unhidden:\n if line=='X':\n \n a = ( self.box[pauli_pos][0]-length/2, self.box[pauli_pos][1]-width/2 )\n c = ( self.box[pauli_pos][0]+length/2, self.box[pauli_pos][1]-width/2 )\n b = ( p*a[0] + (1-p)*c[0] , p*a[1] + (1-p)*c[1] )\n \n self.ax.add_patch( Rectangle( a, length*(1-p), width, angle=0, color=(0.0,0.0,0.0)) )\n self.ax.add_patch( Rectangle( b, length*p, width, angle=0, color=(1.0,1.0,1.0)) )\n \n elif line=='Z':\n \n a = ( self.box[pauli_pos][0]-width/2, self.box[pauli_pos][1]-length/2 )\n c = ( self.box[pauli_pos][0]-width/2, self.box[pauli_pos][1]+length/2 )\n b = ( p*a[0] + (1-p)*c[0] , p*a[1] + (1-p)*c[1] )\n \n self.ax.add_patch( Rectangle( a, width, length*(1-p), angle=0, color=(0.0,0.0,0.0)) )\n self.ax.add_patch( Rectangle( b, width, length*p, angle=0, color=(1.0,1.0,1.0)) )\n \n else:\n \n \n a = ( self.box[pauli_pos][0]-length/(2*np.sqrt(2)), self.box[pauli_pos][1]-length/(2*np.sqrt(2)) )\n c = ( self.box[pauli_pos][0]+length/(2*np.sqrt(2)), self.box[pauli_pos][1]+length/(2*np.sqrt(2)) )\n b = ( p*a[0] + (1-p)*c[0] , p*a[1] + (1-p)*c[1] )\n \n self.ax.add_patch( Rectangle( a, width, length*(1-p), angle=-45, color=(0.0,0.0,0.0)) )\n self.ax.add_patch( Rectangle( b, width, length*p, angle=-45, color=(1.0,1.0,1.0)) )\n \n return p",
"def _get_horizontal_line(self):\n line = [self.SEPARATOR] * self._width\n return ''.join(line)",
"def SetLine ( self, line, buf ):\n\t\tif line > 3 | line < 0: return False # Check bounds\n\t\tif len(buf) < 20: buf.ljust (20) # Pad to 20\n\t\telif len(buf) > 20: buf = buf[0:19] # Drop the remainder\n\t\toutput = \"\\000%c%s\" % ( chr(line), buf ) # Parse the command\n\t\tself.SendCommand ( 31, output )",
"def Line(self, prePos):\n \n if self.Draw:\n pygame.draw.line(self.Parent, self.Colour, prePos, self.GetPos(), self.Width)",
"def create_horizontal_line():\n d = Drawing(100, 1)\n d.add(Line(0, 0, 1000, 0))\n return d",
"def shorten_line(line: Line, intersections: list[Matchstick], gw: GameWindow) -> Line:\n # Get the smallest and largest x coordinates of the intersected sticks\n smallest_stick_x = get_min_x(intersections)\n largest_stick_x = get_max_x(intersections)\n\n # All the sticks are on the same row, so they all have the same y coordinates\n y_low = intersections[0].v_pos - gw.stick_length / 2\n y_high = intersections[0].v_pos + gw.stick_length / 2\n\n # Adjust the x and y coordinates\n new_line = chop_y(line, y_low, y_high)\n new_line = chop_x(new_line, smallest_stick_x - gw.h_spacing/3, largest_stick_x + gw.h_spacing/3)\n\n return new_line",
"def striped_line(cls, lane, surface, stripes_count, s0, side):\r\n starts = s0 + np.arange(stripes_count) * cls.STRIPE_SPACING\r\n ends = s0 + np.arange(stripes_count) * cls.STRIPE_SPACING + cls.STRIPE_LENGTH\r\n lats = [(side - 0.5) * lane.width_at(s) for s in starts]\r\n cls.draw_stripes(lane, surface, starts, ends, lats)",
"def continuous_line(klass, lane, surface, stripes_count, longitudinal, side):\n starts = [longitudinal + 0 * klass._stripe_spacing]\n ends = [longitudinal + stripes_count * klass._stripe_spacing + klass._stripe_length]\n lats = [(side - 0.5) * lane.width_at(s) for s in starts]\n klass.draw_stripes(lane, surface, starts, ends, lats)",
"def _draw_vertical_aid_line(self, pos, with_outer=True):\n aid_line = super(SelectionAidLine, self)._draw_vertical_aid_line(pos, False)\n aid_line.sensitive = True\n return aid_line",
"def prepare(self,line):\n if len(line) > 0 and line[0]==\"`\":\n return self.prepare(line[1:])\n return str(line)",
"def extend_line(line,shape=[640,480],plot=False):\n start=line[0]\n end=line[1]\n dxs,dys=shape[0]-start[0],shape[1]-start[1] #offsets from origin\n deltax=np.float(end[0])-np.float(start[0])\n deltay=np.float(end[1])-np.float(start[1])\n if deltax == 0.0:\n slope = 90.\n else:\n slope = deltay/deltax #*-1 ?\n #make a line with this slope, passing through start and end, that extends over the whole frame. Get endpoints...\n #if dxs >= shape[0]/2 and dys <=shape[1]/2: #look closer to bottom right corner...assume all slopes are +-45 degrees\n xvec=np.arange(0,shape[0],1)\n #x2=np.arange(int(xvec),shape[0],1)\n y2=slope*(xvec - np.float(start[0])) +np.float(start[1])\n #else:\n # x2=np.arange(0,int(np.float(start[0])+np.float(dxs)/np.sqrt(2.)+3),1)\n # y2=slope*(x2 - np.float(start[0])) +np.float(start[1])\n\n #now get endpoints for parts of the line that are within the frame - need to re-do limit on y!\n if y2[0] < y2[-1]:\n xi=np.where(y2 >= 0.)[0][0]\n try:\n xf=np.where(y2 >=shape[1]-1)[0][0]\n except IndexError:\n xf = np.where(y2==y2[-1])[0][0]\n else:\n xf=np.where(y2 >= 0.)[0][-1]\n try:\n xi=np.where(y2 >=shape[1]-1)[0][-1]\n except IndexError:\n xi = np.where(y2==y2[0])[0][0]\n\n extended_line=(int(xi),int(y2[xi])),(int(xf),int(y2[xf]))\n #slopeE=float(int(y2[xf])-int(y2[xi]))/float(int(xf)-int(xi))\n #print slope,slopeE\n if plot:\n s1=extended_line[0]\n e1=extended_line[1]\n fig,ax=plt.subplots()\n ax.plot((start[0],end[0]),(start[1],end[1]))\n ax.plot((s1[0],e1[0]),(s1[1],e1[1]),'r--')\n fig.show()\n\n return extended_line#,xvec,y2",
"def draw_line(screen, width, x1, x2, y):\n if x1 > x2:\n x2, x1 = x1, x2\n\n x1_bit = (y * width * 8) + x1\n x2_bit = (y * width * 8) + x2\n\n x1_byte = int(x1_bit/8)\n x2_byte = int(x2_bit/8)\n\n for i in range(8):\n mapped_bit = (x1_byte * 8) + i\n if x1_bit <= mapped_bit <= x2_bit:\n screen[x1_byte] = set_bit(screen[x1_byte], i)\n\n if x1_byte == x2_byte:\n return screen\n\n for i in range(x1_byte + 1, x2_byte):\n screen[i] |= 0b11111111\n\n for i in range(8):\n mapped_bit = (x2_byte * 8) + i\n if mapped_bit <= x2_bit:\n screen[x2_byte] = set_bit(screen[x2_byte], i)\n\n return screen"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute a vector consisting of the Euclidean norm of the rows of the 2D array.
|
def rownorm(a):
return np.sum(np.array(a)**2,axis=1)**.5
|
[
"def norm(self) -> float:\n return np.sqrt(self.inner_product(self).real)",
"def euclidean_norm(self) -> float:\n return self._euclidean_norm",
"def l2_norm(x):\n return np.sqrt(np.dot(x.T, x))",
"def norm(self) -> ScalarFunction:\n a = sympy.Integer(0)\n for i in self._vec:\n a += i._f ** 2\n return ScalarFunction(sympy.sqrt(a))",
"def l2_norm(x):\n return np.linalg.norm(x)",
"def get_euclidean_distance(\n n_dimensional_numpy_array_0,\n n_dimensional_numpy_array_1):\n return np.linalg.norm(\n n_dimensional_numpy_array_0 -\n n_dimensional_numpy_array_1)",
"def numpy_l2norm2(x):\n if x.dtype is not np.float64:\n x = x.astype(np.float64)\n x = x.reshape(-1)\n return np.inner(x, x)",
"def _euclidean_distance(xi, X):\n distances = np.linalg.norm(xi - X, axis=1)\n return distances",
"def grad2Dnorm(self,arr):\n\n d_x = self.deriv(arr,axis=0)\n d_y = self.deriv(arr,axis=1)\n \n return np.sqrt(d_x**2+d_y**2)",
"def norm2(v):\n # return (v.T @ v) ** (0.5)\n return math.sqrt(sum(x*x for x in v))",
"def norm_array(q):\n assert(len(q) == 4)\n return np.sqrt(np.dot(q, q))",
"def norm(self, d):\n return np.sqrt(self.dx*self.dy*\n np.sum((d[self.ilo:self.ihi+1,self.jlo:self.jhi+1]**2).flat))",
"def euclidean_multidim(*simulated, observed):\n pts_sim = np.column_stack(simulated)\n pts_obs = np.column_stack(observed)\n d_multidim = np.sum((pts_sim - pts_obs)**2., axis=1)\n d_squared = np.sum(d_multidim, axis=1)\n d = np.sqrt(d_squared)\n\n return d",
"def _norm(x: torch.Tensor) ->torch.Tensor:\n return torch.abs(x[..., 0]) ** 2 + torch.abs(x[..., 1]) ** 2",
"def norm(vector):\r\n\treturn math.sqrt(default_scalar_prod(vector, vector))",
"def normalizeRows(x):\n\n ### YOUR CODE HERE\n # we first compute each row norm\n per_row_norm = np.sqrt(np.sum(np.square(x), axis=1)).reshape(-1,1)\n\n # now we divide each value of each row by the row's norm\n x = x / per_row_norm\n ### END YOUR CODE\n\n return x",
"def normalizeRows(x):\r\n ### START CODE HERE ### (≈ 2 lines of code)\r\n # Compute x_norm as the norm 2 of x. Use np.linalg.norm(..., ord = 2, axis = ..., keepdims = True)\r\n x_norm = np.linalg.norm(x, ord = 2, axis = 1, keepdims = True)\r\n # Divide x by its norm.\r\n x = x/x_norm\r\n ### END CODE HERE ###\r\n return x",
"def Vector2Norm(v):\n norm = 0\n for element in v:\n norm = norm + element*element\n norm = math.sqrt(norm)\n return norm",
"def euclidean_distance_matrix(x):\n r = np.sum(x*x, 1)\n r = r.reshape(-1, 1)\n distance_mat = r - 2*np.dot(x, x.T) + r.T\n return distance_mat"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sum the outer products of the `us` and `vs`. Values are clipped into the range `[lo,hi]`. This is mainly used for computing weight updates in logistic regression layers.
|
def sumouter(us,vs,lo=-1.0,hi=1.0,out=None):
result = out or np.zeros((len(us[0]),len(vs[0])))
for u,v in zip(us,vs):
result += np.outer(np.clip(u,lo,hi),v)
return result
|
[
"def outerprod(u,v):\n\tW = torch.einsum('...i,...j->...ij',u,v)\n\treturn W",
"def __mul__(self, *args):\n return _vnl_vectorPython.vnl_vectorUS___mul__(self, *args)",
"def uAvProductErrorProp(u, v, S):\n u = np.matrix(u).reshape(1,3)\n v = np.matrix(v).reshape(1,3)\n rows = S.shape[0]\n cols = S.shape[1]\n SUM = 0\n for i in range(rows):\n for j in range(cols):\n SUM += (u[0,i]*v[0,j]*S[i,j])**2\n return np.sqrt(SUM)",
"def dot_product(u, v):\n return sum([u * v for (u, v) in zip(u, v)])",
"def comprehensive_weight(U,V,D,N):\n u_sum = endpoint_sum(U, V, D, N)\n v_sum = endpoint_sum(V, U, D, N)\n return u_sum + v_sum",
"def uvw(self, *args, **kwargs):\n return _measures.measures_uvw(self, *args, **kwargs)",
"def bottom_stress(u, v):\n \n nx = len(u[0,:,0])\n ny = len(u[0,0,:])\n nz = 2\n Bx = numpy.zeros(((nz,nx,ny)))\n By = numpy.zeros(((nz,nx,ny)))\n k = 0.01\n Bx[0,:,:]= -k*u[0,:,:]*numpy.sqrt((u[0,:,:]**2)+(v[0,:,:]**2))\n By[0,:,:]= -k*v[0,:,:]*numpy.sqrt((u[0,:,:]**2)+(v[0,:,:]**2))\n return Bx, By",
"def mvmt(u, w, v):\n return torch.einsum(\"...ij,...j,...kj->...ik\", u, w, v)",
"def wind_stress(uw, vw):\n \n nx = len(uw[:,0])\n ny = len(uw[0,:])\n nz = 2 \n Fx = numpy.zeros(((nz,nx,ny)))\n Fy = numpy.zeros(((nz,nx,ny)))\n k = 0.001\n Fx[1,:,:]= k*uw[:,:]*numpy.sqrt((uw[:,:]**2)+(vw[:,:]**2))\n Fy[1,:,:]= k*vw[:,:]*numpy.sqrt((uw[:,:]**2)+(vw[:,:]**2))\n return Fx, Fy",
"def post_multiply(self, *args):\n return _vnl_vectorPython.vnl_vectorUS_post_multiply(self, *args)",
"def test_bureswasserstein_inner_product(self):\n base_point = gs.array([[1., 0., 0.],\n [0., 1.5, .5],\n [0., .5, 1.5]])\n tangent_vec_a = gs.array([[2., 1., 1.],\n [1., .5, .5],\n [1., .5, .5]])\n tangent_vec_b = gs.array([[1., 2., 4.],\n [2., 3., 8.],\n [4., 8., 5.]])\n metric = SPDMetricBuresWasserstein(3)\n result = metric.inner_product(tangent_vec_a, tangent_vec_b, base_point)\n expected = gs.array(4.)\n\n self.assertAllClose(result, expected)",
"def calc_uwvw(Couw, Covw, k0=1e-4):\n return uw, vw",
"def __mul__(self, *args):\n return _vnl_vectorPython.vnl_vectorUC___mul__(self, *args)",
"def _compute_volume_of_S_minus(self, u, v):\n # First compute the volume of [u, v]\n uv_vol = np.prod(v - u)\n # Now find all the active cells that dominate u and compute their\n L_plus_vol = self._compute_volume_of_L_plus(u)\n return uv_vol - L_plus_vol",
"def _compute_volume_of_L_plus(self, u):\n L_plus_vol = 0.\n for ln, un, _ in self.active_cells_dominated_by_lplus(u):\n L_plus_vol += np.prod(un - ln)\n return L_plus_vol",
"def inner(*args, ndim=1):\n axes = tuple(range(-ndim,0))\n return misc.sum_product(*args, axes_to_sum=axes)",
"def pre_multiply(self, *args):\n return _vnl_vectorPython.vnl_vectorUS_pre_multiply(self, *args)",
"def uxv_cart(u, v):\n w_x = (u[1]*v[2] - v[1]*u[2])\n w_y = (-u[0]*v[2] + u[2]*v[0])\n w_z = (u[0]*v[1] - u[1]*v[0])\n return np.array([w_x, w_y, w_z])",
"def inner_product(self, vec1, vec2):",
"def update(self, *args):\n return _vnl_vectorPython.vnl_vectorUS_update(self, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return all weights as a single vector. This is mainly a convenience function for plotting.
|
def allweights(self):
aw = list(self.weights())
weights,derivs,names = list(zip(*aw))
weights = [w.ravel() for w in weights]
derivs = [d.ravel() for d in derivs]
return np.concatenate(weights),np.concatenate(derivs)
|
[
"def get_weights(self, ):\n return [w for l in self.weights for w in l.flat]",
"def weights ( self ) :\n N = len ( self ) \n return array ( 'd' , ( self.weight ( i ) for i in range ( N ) ) )",
"def get_weights(self):\r\n return self.weights # returning the weight matrix\r",
"def getWeight(self):\n return np.concatenate([self.weight.ravel()] * 4)",
"def weights(self) -> np.ndarray:\n self._check_fitted()\n return np.asarray(self._fit_result.x)",
"def showWeights(self):\n print 'W1: ' + str(self.params[0].get_value().shape)\n print self.params[0].get_value()\n print 'b1: ' + str(self.params[1].get_value().shape)\n print self.params[1].get_value()\n print 'W2: ' + str(self.params[2].get_value().shape)\n print self.params[2].get_value()\n print 'b2: ' + str(self.params[3].get_value().shape)\n print self.params[3].get_value()",
"def weight(self):\n vec = np.array([[reqt.weight for reqt in self.requirements]])\n return vec.T # Return as column vector",
"def get_weights(self, key):\n return np.array([entry.data[\"weights\"][key] for entry in self._entries])",
"def get_all_weights(self):\n\n # add weights for each layer if layer is a Dense layer and return the list\n return [l.weights for l in self.layers if isinstance(l, Dense)]",
"def get_weights(self):\n\n weights = []\n for layer in self.NN:\n for node in layer:\n for weight in node.weights:\n weights.append(weight)\n return weights",
"def get_recurrent_weights(self):\n return npify(self.w_rec.weight)",
"def getweights(self, axons: List[opentensor_pb2.Axon]) -> List[float]:\n result = []\n for ax in axons:\n if ax.identity not in self._weights:\n result.append(0.0)\n else:\n result.append(self._weights[ax.identity])\n return result",
"def get_weight(self):\n return self.graph_weights.reshape(self.size_graph_rows, self.size_graph_cols)",
"def sample_weights(self):\n return self.to_dataframe()[\"sample_weight\"].values",
"def get_weights(w = 1.7, length = 20):\n\n return [w**i for i in range(length, 0, -1)]",
"def weights(self) :\n\t\treturn sign(self.L) #1/(self.L + 0.00001) ",
"def model_weights_as_vector(model):\r\n weights_vector = []\r\n\r\n for layer in model.layers: # model.get_weights():\r\n if layer.trainable:\r\n layer_weights = layer.get_weights()\r\n for l_weights in layer_weights:\r\n vector = numpy.reshape(l_weights, newshape=(l_weights.size))\r\n weights_vector.extend(vector)\r\n\r\n return numpy.array(weights_vector)",
"def extract_weights_and_values(self):\r\n self.weights = [None] * (self.num_items + 1)\r\n self.values = [None] * (self.num_items + 1)\r\n for item in self.items:\r\n self.weights[item['index'] + 1] = item['weight']\r\n self.values[item['index'] + 1] = item['value']",
"def get_weight_matrix(self):\n return self.W"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the weights using the deltas computed in the last forward/backward pass. Subclasses need not implement this, they should implement the `weights` method.
|
def update(self):
if not hasattr(self,"verbose"):
self.verbose = 0
if not hasattr(self,"deltas") or self.deltas is None:
self.deltas = [np.zeros(dw.shape) for w,dw,n in self.weights()]
for ds,(w,dw,n) in zip(self.deltas,self.weights()):
ds.ravel()[:] = self.momentum * ds.ravel()[:] + self.learning_rate * dw.ravel()[:]
w.ravel()[:] += ds.ravel()[:]
if self.verbose:
LOG.info("{} {} {}".format(n, (np.amin(w), np.amax(w)), (np.amin(dw), np.amax(dw))))
|
[
"def update_weights(self):\n for layer in xrange(len(self.weights)):\n self.update_weights_layer(layer)",
"def _update_weights(self, _batch_weight_gradients):\n for _weight_gradient in _batch_weight_gradients:\n _weight_gradient = list(reversed(_weight_gradient))\n for _layer in reversed(range(len(self._layers))):\n self._layers[_layer].update_weights(-self._learning_rate*_weight_gradient[_layer])",
"def update_weights_layer(self, layer):\n self.weights[layer] += self.delta_w[layer]\n self.__prev_update[layer][:] = self.delta_w[layer][:]",
"def updateWeights(self, initialInputs):\n self.firstLayer.updateWeight(initialInputs)",
"def update_weights(self) :\n for layer in self.layers :\n try:\n layer.update_weights()\n except Exception as e :\n pass",
"def update_weights(self):\n # print(\"\\u001b[31;1m|py|\\u001b[0m\\u001b[37m\", \"ModelInterface::\", inspect.currentframe().f_code.co_name)\n\n for k, optimizer in self.optimizers.items():\n self.models[k].before_update()\n optimizer.step()\n self.models[k].inc_step()",
"def update_weights(self) -> None:\n for neuron in self.__neurons__:\n neuron.update_weight(self.__inputs__)",
"def update_weights(self, gradients, learning_rate):\n for weights, gradient in zip(self.weights, gradients):\n assert gradient.shape == weights.shape\n weights += gradient * learning_rate;",
"def update_wights(self, weights: np.ndarray, data_input: np.ndarray, layer_outputs: np.ndarray, weight_error_matrix: np.ndarray, alpha=.1):\n for layer_index in range(len(weights)-1, -1, -1):\n layer_weights = weights[layer_index]\n\n # get the previous layer INPUTS\n if layer_index == 0:\n layer_output = np.array(data_input)\n else:\n # get the previous layer INPUTS\n layer_output = layer_outputs[layer_index - 1]\n\n layer_error = weight_error_matrix[layer_index]\n\n # Add BIAS to input values, in last position, as done in estimating\n layer_output = np.append(layer_output, [1])\n\n # weight_delta = layer_output * layer_error\n layer_output = layer_output.reshape(1, len(layer_output))\n layer_error = layer_error.reshape(1, len(layer_error))\n weight_delta = layer_output * layer_error.transpose()\n\n weight_delta = alpha * weight_delta\n\n weights[layer_index] = layer_weights + weight_delta\n\n return weights",
"def adjustWeight(self, deltaWeight):\n self.lastWeightDelta = deltaWeight\n #before = self.weight\n self.weight = self.weight + (deltaWeight * self.inputNeuron.getOutput())\n #print 'adjusting %s with delta %s: %s => %s' % (self, deltaWeight, before, self.weight)",
"def update_weights(self, network, l_rate, batch_size):\n for layer in range(len(network)):\n layer_data = network[layer]\n for neuron_id in range(len(layer_data['layer'])):\n for weight_id in range(len(layer_data['layer'][neuron_id]['weights'])):\n # update weight\n layer_data['layer'][neuron_id]['weights'][weight_id] += l_rate * (layer_data['layer'][neuron_id]['delta_weights'][weight_id])/batch_size\n # initialise delta_weight\n layer_data['layer'][neuron_id]['delta_weights'][weight_id] = 0",
"def update_node_weights(self, inputs):\n\n #Iterates through each node in each layer\n for i in range(len(self.NN)):\n for node in self.NN[i]:\n #Iterates through each value in the inputs and assigns weights\n for j in range(len(inputs)):\n #Multiplies the weight gradient by the learning rate and input value\n weight_update = self.learning_rate * node.delta_weight * inputs[j]\n #Adjusts the weight with momentum\n node.weights[j] += weight_update + node.momentum[j]\n #Adjusts the momentum value\n node.momentum[j] = weight_update\n #Updates the bias node\n node.weights[-1] += self.learning_rate * node.delta_weight\n #Sets the new inputs to the output vector of current layer\n inputs = [node.output for node in self.NN[i]]",
"def update_weights(self, energies, weights):\n for e in energies: # this is basically a reduce call, but there's no real reason not to keep it like this\n Vref = self._compute_vref(e, weights)\n self.reference_potentials.append(Vref) # a constant time operation\n new_wts = np.exp(-1.0 * (e - Vref) * self.time_step)\n weights *= new_wts\n return weights",
"def update_params(self): # computes gradient descent\n self.W=self.W-(self.rate*self.dW)\n self.b=self.b-(self.rate*self.db)",
"def updateWeights(self, inActs, alpha, delta):\n totalModification = 0\n inActs = [1.0] + inActs\n for i, weight in enumerate(self.weights):\n change = alpha * delta * inActs[i]\n old = self.weights[i]\n self.weights[i] += change\n totalModification += abs(change)\n return totalModification",
"def update_weight(self, learn_rate):\n pass",
"def backward_pass(self, w, delta):\n batch_size = float(delta.shape[0])\n self.delta = np.multiply(np.dot(w, delta.T).T, self.act.act_der(self.z))\n self.db = np.sum(self.delta, axis = 0, keepdims = True) / batch_size\n self.dw = np.dot(self.x.T, self.delta) / batch_size\n return self.delta",
"def update(self):\n self.weight_mom[self.index] = self.sub_weight_mom\n self.weight[self.index] = self.sub_weight",
"def update_weights(self, gradients, rewards):\n\n for i in range(len(gradients)):\n self.theta += self.ALPHA * gradients[i] * sum([r * (self.GAMMA ** t) for t, r in enumerate(rewards[i:])])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the internal state array for the last forward propagation. This is mostly used for visualizations.
|
def states(self):
return np.array(self.state[:self.last_n])
|
[
"def get_state(self) -> np.ndarray:\n return np.copy(self.state)",
"def get_state(self) -> numpy.ndarray:\n env_data = [\n bool(self.gym_env.lander.awake),\n bool(self.gym_env.game_over),\n copy.copy(self.gym_env.prev_shaping),\n copy.copy(self.gym_env.prev_reward),\n bool(self.gym_env.legs[0].ground_contact),\n bool(self.gym_env.legs[1].ground_contact),\n ]\n state = get_env_state(self.gym_env) + env_data\n return numpy.array((state, None), dtype=object)",
"def get_internal_states(self):\n joint_pos = []\n joint_vel = []\n for joint_id in range(len(self.joints)):\n joint_name = self.joints[joint_id]\n joint_state = self._agent.get_joint_state(joint_name)\n joint_pos.append(joint_state.get_positions())\n joint_vel.append(joint_state.get_velocities())\n joint_pos = np.array(joint_pos).flatten()\n joint_vel = np.array(joint_vel).flatten()\n # pos of continous joint could be huge, wrap the range with sin and cos.\n joint_pos_sin = np.sin(joint_pos)\n joint_pos_cos = np.cos(joint_pos)\n internal_states = np.concatenate(\n (joint_pos_sin, joint_pos_cos, joint_vel), axis=0)\n return internal_states",
"def get_current_state(self):\n active = self.state[self.state[:,2] == 1]\n return {'remain_time': np.transpose(self.state[:,0:1])[0], 'remain_energy': np.transpose(self.state[:, 1:2])[0]}",
"def initial_state(self):\r\n return [None for _ in range(self.n_layers)]",
"def get_normalized_state(self) -> np.array:\n return np.array(self.env.state)",
"def getstate(self):\n\t\timport numpy as np\n\t\t# initialize state 2D array\n\t\tstate=np.zeros(self.shape)\n\t\t# cycle on cells\n\t\tfor (id, cell) in self.cells.iteritems():\n\t\t\tstate[id[0], id[1]]=cell.state\n\t\t# output\n\t\treturn state",
"def __get_state__(self):\n\t\t## unroll all the parameters\n\t\tgates = self._gates\n\t\t\n\t\tThetas = [theta for gate in gates for theta in gate.__get_state__()['Thetas']] \n\t\tparams = [weight for gate in gates for weight in gate.__get_state__()['params']]\n\n\t\tprint \"Total number of parameters: %d \" % len(params) \n\n\t\treturn dict(Thetas=Thetas,params=params)",
"def __getstate__(self):\n W_list = []\n bhid_list = []\n bvis_list = []\n for layer in self.dA_layers:\n W, bhid, bvis = layer.get_params()\n W_list.append(W.get_value(borrow=True))\n bhid_list.append(bhid.get_value(borrow=True))\n bvis_list.append(bvis.get_value(borrow=True))\n \n return (self.n_layers, self.n_outs, W_list, bhid_list, bvis_list, self.corruption_levels, self.layer_types, self.use_loss, self.dropout_rates, self.opt_method)",
"def get_state_sequence(self, x):\n\t\tT = len(x)\n\t\tdelta = np.zeros((T, self.hidden_states))\n\t\tpsi = np.zeros((T, self.hidden_states))\n\t\tdelta[0] = np.log(self.initial_state_distribution) + np.log(self.output_distribution[:, x[0]])\n\t\tfor t in range(1, T):\n\t\t\tfor j in range(self.hidden_states):\n\t\t\t\tdelta[t, j] = np.max(delta[t - 1] + np.log(self.state_transition_matrix[:, j])) + np.log(self.output_distribution[j, x[t]])\n\t\t\t\tpsi[t, j] = np.argmax(delta[t - 1] + np.log(self.state_transition_matrix[:, j]))\n\n\t\t# backtrack\n\t\tstates = np.zeros(T, dtype=np.int32)\n\t\tstates[T - 1] = np.argmax(delta[T - 1])\n\t\tfor t in range(T - 2, -1, -1):\n\t\t\tstates[t] = psi[t + 1, states[t + 1]]\n\t\treturn states",
"def previous_decision_matrix(self):\n return np.array(\n [member_clf.predict(self.previous_X) for member_clf in self.ensemble_]\n )",
"def getCurrentModelState(self):\n return list(value(v, exception=False) for v in self.data.all_variables)",
"def get_states(self):\n states = np.zeros(\n self.current_key,\n dtype=[\n (\"time\", np.float64),\n (\"pose\", np.float32, 3),\n (\"dr_pose3\", np.float32, 6),\n (\"cov\", np.float32, 9),\n ],\n )\n\n # Update all\n values = self.isam.calculateEstimate()\n for key in range(self.current_key):\n pose = values.atPose2(X(key))\n cov = self.isam.marginalCovariance(X(key))\n self.keyframes[key].update(pose, cov)\n\n t0 = self.keyframes[0].time\n for key in range(self.current_key):\n keyframe = self.keyframes[key]\n states[key][\"time\"] = (keyframe.time - t0).to_sec()\n states[key][\"pose\"] = g2n(keyframe.pose)\n states[key][\"dr_pose3\"] = g2n(keyframe.dr_pose3)\n states[key][\"cov\"] = keyframe.transf_cov.ravel()\n return states",
"def get_env_state(self) -> np.ndarray:\n return self.env.env.state",
"def state_updates(self):\n state_updates = []\n for layer in self.layers:\n if layer.stateful:\n state_updates += layer.updates\n return state_updates",
"def get_absorbing_state(self) -> np.ndarray:\n obs = np.zeros(self.observation_space.shape)\n obs[-1] = 1\n return obs",
"def state_copy(self):\n list1 = []\n list2 = []\n for x in self.state[0]:\n list1.append(x)\n for x in self.state[1]:\n list2.append(x)\n ret = []\n ret.append(list1)\n ret.append(list2)\n return ret",
"def final_state(self) -> List[int]:\n return self.__generate_final_state(initial_state=self.initial_state)",
"def get_active_state(self):\n # append id of evse to state matrix\n idx = np.transpose([np.arange(self.n_EVs)])\n idxState = np.append(self.state, idx, axis = 1)\n active = idxState[self.state[:,2] == 1]\n return {'remain_time': np.transpose(active[:,0:1])[0], 'remain_energy': np.transpose(active[:, 1:2])[0], 'index': np.transpose(active[:, 3:4])[0]}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Allocate space for the internal state variables. `n` is the maximum sequence length that can be processed.
|
def allocate(self,n):
ni,ns,na = self.dims
vars = "cix ci gix gi gox go gfx gf"
vars += " state output gierr gferr goerr cierr stateerr outerr"
for v in vars.split():
setattr(self,v,np.nan*np.ones((n,ns)))
self.source = np.nan*np.ones((n,na))
self.sourceerr = np.nan*np.ones((n,na))
|
[
"def alloc(n):\n if Coefficients._nvx:\n nvx = Coefficients._nvx\n else:\n nvx = n\n Coefficients._aP = np.zeros(nvx)\n Coefficients._aE = np.zeros(nvx)\n Coefficients._aW = np.zeros(nvx)\n Coefficients._Su = np.zeros(nvx)\n Coefficients._Sp = np.zeros(nvx)\n Coefficients._aEE = np.zeros(nvx)\n Coefficients._aWW = np.zeros(nvx)",
"def reserve(self, n):\n assert n > 0, \"Argument `n` must be a positive integer\"\n IdFactory._seqno_pool.extend(self._next_id_fn(n))",
"def __init__(self, n):\n self.iter_status = 0\n self.max_val = n",
"def __init__(self, n):\r\n CSP.__init__(self, range(n), UniversalDict(range(n)),\r\n UniversalDict(range(n)), queen_constraint)\r\n update(self, rows=[0]*n, ups=[0]*(2*n - 1), downs=[0]*(2*n - 1))",
"def __init__(self, n):\n self.sfs = np.zeros(n+1,dtype=int)\n self.n_hap = n\n\n\n self._pi = None\n self._td = None\n self._S = None\n self._FWH = None\n self._ns = None\n self._theta_h = None",
"def add_spill_vars(self, n: int):\n for i in range(n):\n self.declare_variable(\n f\"spill-var-{i}\",\n types.Int.fromsize(8)\n )",
"def allocate_memory():\n create_execution_object_pipelines()\n tidl.allocate_memory(EOPS)",
"def __init__(self, n):\n self.n = n\n self.parent = [x for x in range(n)]",
"def __init__(self,n:int) -> None:\r\n self.vertices = [None]*n\r\n for i in range(n):\r\n self.vertices[i] = Vertex(i)",
"def _create_state_init_parameters(self):\n self.init_ws, self.init_bs, self.init_norms = [], [], []\n # shallow copy of the state shapes:\n state_shapes = list(self.rnn_pre_attention.state_shape)\n if self.rnn_post_attention:\n state_shapes += self.rnn_post_attention.state_shape\n for state_idx, (_, init_num_hidden) in enumerate(state_shapes):\n self.init_ws.append(mx.sym.Variable(\"%senc2decinit_%d_weight\" % (self.prefix, state_idx)))\n self.init_bs.append(mx.sym.Variable(\"%senc2decinit_%d_bias\" % (self.prefix, state_idx)))\n if self.config.layer_normalization:\n self.init_norms.append(layers.LayerNormalization(prefix=\"%senc2decinit_%d_norm\" % (self.prefix,\n state_idx)))",
"def __init__(self, n, init_list = None):\n self._numElts = n\n if init_list is None:\n self._flow = [Eisen() for i in range(n)]\n else:\n assert(len(init_list) == n)\n self._flow = list(init_list)",
"def _init_state_variables(self) -> None:\n for name, type_info in self.STATE_VARIABLE_DEFINITIONS.items():\n self.create_state_var(name, type_info)",
"def _set_arrays_alloc(self):\n\n if not self.v_inplace:\n self.v = np.zeros(self.n)\n\n if not self.e_inplace:\n self.e = np.zeros(self.n)",
"def init_tot_n(n=0):\n global tot_n\n tot_n = n",
"def _initialize_parameters(state_machine, n_features):\n return np.zeros((state_machine.n_states \n + state_machine.n_transitions,\n n_features))",
"def __init__(self, n):\n self.n = n\n self.numrows = 100\n self.data = np.empty((self.numrows, self.n), np.float)\n self.numpoints = 0",
"def _update_nof_storage(self):\n nof = self.nof\n\n # save all values\n \n #\n # THIS WILL bE TRANSPLANTED TO OPEN SYSTEMS\n #\n save_A2 = self._A2\n if self._is_transformed:\n save_A4 = self._A4\n \n \n save_cfunc = self.cfuncs\n save_lambdas = self.lambdas\n save_where = self.where\n save_cofts = self._cofts\n\n # Add one place in the matrix\n self.nof += 1\n\n # reinitiate\n self._initiate_storage()\n\n # refill\n \n #\n # THIS WILL BE TRANSPLANTED TO OPEN SYSTEM\n #\n self._A2[:,:,0:nof+1] = save_A2\n if self._is_transformed:\n self._A4[:,:,:,:,0:nof] = save_A4\n \n\n\n for i in range(nof+1):\n self.cfuncs[i] = save_cfunc[i]\n self.lambdas[i] = save_lambdas[i]\n self.where[i] = save_where[i]\n self._cofts[0:nof+1,:] = save_cofts",
"def __init__(self, n, value):\n self.repeats = n\n self.value = value",
"def set_size(self, n: 'int') -> \"void\":\n return _vnl_diag_matrixPython.vnl_diag_matrixLD_set_size(self, n)",
"def set_size(self, n: 'int') -> \"void\":\n return _vnl_diag_matrixPython.vnl_diag_matrixSI_set_size(self, n)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Perform forward propagation of activations and update the internal state for a subsequent call to `backward`. Since this performs sequence classification, `xs` is a 2D array, with rows representing input vectors at each time step. Returns a 2D array whose rows represent output vectors for each input vector.
|
def forward(self,xs):
ni,ns,na = self.dims
assert len(xs[0])==ni
n = len(xs)
self.last_n = n
N = len(self.gi)
if n>N: raise RecognitionError("input too large for LSTM model")
self.reset(n)
forward_py(n,N,ni,ns,na,xs,
self.source,
self.gix,self.gfx,self.gox,self.cix,
self.gi,self.gf,self.go,self.ci,
self.state,self.output,
self.WGI,self.WGF,self.WGO,self.WCI,
self.WIP,self.WFP,self.WOP)
assert not np.isnan(self.output[:n]).any()
return self.output[:n]
|
[
"def forward_states(X, wx, wRec):\n # Initialise the matrix that holds all states for all input sequences.\n # The initial state s0 is set to 0.\n S = np.zeros((X.shape[0], X.shape[1]+1))\n # Use the recurrence relation defined by update_state to update the \n # states trough time.\n for k in range(0, X.shape[1]):\n # S[k] = S[k-1] * wRec + X[k] * wx\n S[:,k+1] = update_state(X[:,k], S[:,k], wx, wRec)\n return S",
"def forward_propagate(self, inputs):\r\n \r\n \r\n # the input layer activation is just the input itself\r\n activations = inputs\r\n self.activations[0] = activations\r\n \r\n # iterate through the network layers\r\n for i, w in enumerate(self.weights):\r\n # calculate matrix multiplication between previous activation and weight matrix\r\n net_inputs = np.dot(activations, w)\r\n \r\n # calculate the activations\r\n activations = self._sigmoid(net_inputs)\r\n \r\n self.activations[i + 1] = activations\r\n \r\n return activations",
"def _forward_step(self, inputs: list):\n self._model.eval()\n with torch.no_grad():\n return self._model(inputs)",
"def forward(self, x):\n \n #forward pass\n for i in range(self.num_layers):\n #linear layer\n x = self.layer[i].forward(x)\n \n #activation\n x = self.activation[i].forward(x)\n\n return x",
"def forward(self, xs, update_ema: bool = False, **kwargs):\n # Perform a forward pass with the dense layer\n net_output = self._net(xs.view(xs.size(0), -1))\n\n # Add the layer output to the kwargs dict to be passed to the decision nodes in the tree\n # Split (or chunk) the output tensor of shape (batch_size, num_decision_nodes) into individual tensors\n # of shape (batch_size, 1) containing the logits that are relevant to single decision nodes\n kwargs['linear_output'] = net_output.chunk(net_output.size(1), dim=1)\n # Add the mapping of decision nodes to dense layer outputs to the kwargs dict to be passed to the decision nodes\n # in the tree\n kwargs['out_map'] = dict(self._out_map) # Use a copy of self._out_map, as the original should not be modified\n\n # Perform a forward pass through the soft decision tree\n return super(SoftDecisionTree, self).forward(xs, update_ema, **kwargs)",
"def forward_propagation(self, x0, u_array):\n traj_array = [x0]\n\n for t, u in enumerate(u_array):\n traj_array.append(self.plant_dyn(traj_array[-1], u, t, self.aux))\n\n return traj_array",
"def feedforward(self, data):\n activations = data\n for i in range(2, self.L + 1):\n activations = sigmoid((self.weights[i] @ activations) + self.biases[i])\n return activations",
"def propagate(self):\n for sample in self.input_value:\n # perform forward propagation on one sample\n layer_output = sample\n for l in self.layers:\n layer_output = l.activate(layer_output)\n self.forward_propagation_output.append(layer_output) #stores propagation output value of one sample\n return self.forward_propagation_output",
"def forward(self,X,start=None,end=None,mode='test'):\n X=X.astype(self.dtype)\n if start is None: start=0\n if end is None: end=len(self.conv_params)+1\n layer_caches=[]\n prev_a=X\n for i in xrange(start,end+1):\n i1=i+1\n if 0<=i<len(self.conv_params):\n #This is a conv layer\n w,b=self.params['W%d' %i1],self.params['b%d' %i1]\n gamma,beta=self.params['gamma%d' %i1],self.params['beta%d' %i1]\n conv_param=self.conv_params[i]\n bn_param=self.bn_params[i]\n bn_param['mode']=mode\n next_a,cache=conv_bn_relu_forward(prev_a,w,b,gamma,beta,bn_param)\n elif i==len(self.conv_params):\n #This is a the fully-connected hidden layer\n w,b=self.params['W%d' %i1],self.params['b%d' %i1]\n bn_param=self.bn_params[i]\n bn_param['mode']=mode\n next_a,cache=affine_bn_relu_forward(prev_a,w,b,gamma,beta,bn_param)\n elif i==len(self.conv_params)+1:\n #This is the last fully-connected layer that produces scores\n w,b=self.params['W%d' %i1],self.params['b%d' %i1]\n next_a,cache=affine_forward(prev_a,w,b)\n else:\n raise ValueError('Invalid layer index %d' %i)\n \n layer_caches.append(cache)\n prev_a=next_a\n \n out=prev_a\n cache=(start,end,layer_caches)\n return out,cache",
"def forwardPropagate(self, inputs, noChange=False) -> list:\n if not noChange:\n # Update inputs for backward propagation\n self.inputs = inputs\n\n currLayerInputs = inputs\n forwardPropNetwork = deepcopy(self.network) if noChange else self.network\n for l in range(len(forwardPropNetwork)):\n nextLayerInputs = []\n for node in forwardPropNetwork[l]:\n # Initialize output as bias value\n output = node[self.WEIGHTS][-1]\n # Compute dot product between weights and current layer inputs\n for i in range(len(node[self.WEIGHTS]) - 1):\n output += node[self.WEIGHTS][i] * currLayerInputs[i]\n # If iterating on last layer, always use sigmoid activation function\n if l == len(forwardPropNetwork) - 1:\n node[self.OUTPUT] = sigmoid(output)\n else:\n node[self.OUTPUT] = self.activationFunction(output)\n nextLayerInputs.append(node[self.OUTPUT])\n # Prep next layer's inputs\n currLayerInputs = nextLayerInputs\n # Returns the last layer's outputs\n return currLayerInputs",
"def forward(self, inputs: List[float]) -> List[float]:\n self.__inputs__ = inputs\n return [neuron.compute_output(self.__inputs__)\n for neuron in self.__neurons__]",
"def _forward_prediction(self):\n state = self._inverse_embedding(self.state_forward)\n x = concatenate([self.action, state])\n x = self.dense_fw_1(x)\n x = self.dense_fw_2(x)\n x = self.flatten(x)\n\n return x",
"def update_forwards(self):\n\n self.args = tuple(arg.forwarded for arg in self.args)\n other_deps = self.other_deps\n self.other_deps = OrderedSet()\n for op in other_deps:\n self.add_other_dep(op)\n self.initializers = [op.forwarded for op in self.initializers]",
"def backpropagation(activations_list, weights_list, labels):\n assert len(activations_list) == len(weights_list)\n N = len(activations_list)\n deltas_list = [None] * N\n deltas_list[-1] = activations_list[-1] - labels\n for n in range(N)[:-1]:\n m = -(n+1)\n comp1 = np.matmul(deltas_list[m], weights_list[m])\n comp2 = activations_list[m-1] * (1 - activations_list[m-1])\n deltas_list[m-1] = comp1 * comp2\n return(deltas_list)",
"def update(self, input_values, forward_only=True):\n output = fct.normalize_function(input_values)\n #output = input_values\n if not forward_only:\n derivatives = [] # collection of the derivatives of the act functions\n outputs = [output] # passed through act. func.\n\n for i, weight_layer in enumerate(self.weights):\n # Loop over the network layers and calculate the output\n signal = np.dot(output, weight_layer)\n # if i >0:\n # for j in range(signal.shape[1]):\n # signal[:,j] = np.sum[signal[:,:(j+1)]]\n\n output = self.layers[i][1](signal)\n\n if not forward_only:\n outputs.append(output)\n derivatives.append(\n self.layers[i][1](signal, derivative=True).T) # the derivative used for weight update\n\n if not forward_only:\n return outputs, derivatives\n\n return output",
"def forward_propagation(X, synapses):\n layers = [X]\n for i in xrange(len(synapses)):\n layers.append(sigmoid(np.dot(layers[i], synapses[i])))\n return layers",
"def forward_activations(self, x0):\n\n x1 = self.model.conv1_1(x0)\n x2 = self.model.relu1_1(x1)\n x3 = self.model.conv1_2(x2)\n x4 = self.model.relu1_2(x3)\n x5 = self.model.pool1(x4)\n x6 = self.model.conv2_1(x5)\n x7 = self.model.relu2_1(x6)\n x8 = self.model.conv2_2(x7)\n x9 = self.model.relu2_2(x8)\n x10 = self.model.pool2(x9)\n x11 = self.model.conv3_1(x10)\n x12 = self.model.relu3_1(x11)\n x13 = self.model.conv3_2(x12)\n x14 = self.model.relu3_2(x13)\n x15 = self.model.conv3_3(x14)\n x16 = self.model.relu3_3(x15)\n x17 = self.model.pool3(x16)\n x18 = self.model.conv4_1(x17)\n x19 = self.model.relu4_1(x18)\n x20 = self.model.conv4_2(x19)\n x21 = self.model.relu4_2(x20)\n x22 = self.model.conv4_3(x21)\n x23 = self.model.relu4_3(x22)\n x24 = self.model.pool4(x23)\n x25 = self.model.conv5_1(x24)\n x26 = self.model.relu5_1(x25)\n x27 = self.model.conv5_2(x26)\n x28 = self.model.relu5_2(x27)\n x29 = self.model.conv5_3(x28)\n x30 = self.model.relu5_3(x29)\n x31_preflatten = self.model.pool5(x30)\n x31 = x31_preflatten.view(x31_preflatten.size(0), -1)\n x32 = self.model.fc6(x31)\n x33 = self.model.relu6(x32)\n x34 = self.model.dropout6(x33)\n x35 = self.model.fc7(x34)\n x36 = self.model.relu7(x35)\n # x37 = self.model.dropout7(x36)\n # x38 = self.model.fc8(x37)\n\n return x36",
"def forward(self, ops):\n\n if self.train_state:\n self.forward_ops.append(ops)",
"def forward(self, observation):\n # Do not modify this function\n forward_matrix = numpy.zeros((len(self.states), len(observation)))\n\n for oi, obs in enumerate(observation):\n for si, state in enumerate(self.states):\n if oi==0:\n forward_matrix[si, oi] = self.transitions['#'][state] * self.emissions[state][obs]\n else:\n for pi, prevstate in enumerate(self.states):\n forward_matrix[si, oi] += forward_matrix[pi, oi-1] * self.transitions[prevstate][state]\n\n forward_matrix[si, oi] *= self.emissions[state][obs] # factor out common emission prob\n\n return forward_matrix"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
An MLP implementation by stacking two `Logreg` networks on top of each other.
|
def MLP1(Ni,Ns,No):
lr1 = Logreg(Ni,Ns)
lr2 = Logreg(Ns,No)
stacked = Stacked([lr1,lr2])
return stacked
|
[
"def compute_log_reg(self):\n \n self.X = self.data.iloc[:,:-1].values\n self.X = sm.add_constant(self.X)\n self.y = self.data.iloc[:,-1]\n self.model = sm.Logit(self.y, self.X).fit(disp=False)",
"def stack_models_and_evaluate_accuracy(train_probas, val_probas, test_probas, y_train, y_val, y_test):\n logreg = LogisticRegression()\n rfc = RandomForestClassifier(n_estimators=200, max_depth=20)\n\n print(\"Stacking using Random Forest:\")\n print(\"-----------------------------\")\n stacking_accuracy_logreg, train_stack_logreg_pred, val_stack_logreg_pred, test_stack_logreg_pred = \\\n stack_models(train_probas, val_probas, y_train, y_val, logreg, test_probas, y_test)\n print(\"train, validation and test accuracy scores:\", stacking_accuracy_logreg)\n\n print(\"Stacking using Logistic Regression:\")\n print(\"-----------------------------------\")\n stacking_accuracy_rfc, train_stack_rfc_pred, val_stack_rfc_pred, test_stack_rfc_pred = \\\n stack_models(train_probas, val_probas, y_train, y_val, rfc, test_probas, y_test)\n print(\"train, validation and test accuracy scores:\", stacking_accuracy_rfc)\n\n np.save(os.path.join('models', 'train_stack_logreg_pred.npy'), train_stack_logreg_pred)\n np.save(os.path.join('models', 'val_stack_logreg_pred.npy'), val_stack_logreg_pred)\n np.save(os.path.join('models', 'test_stack_logreg_pred.npy'), test_stack_logreg_pred)\n\n np.save(os.path.join('models', 'train_stack_rfc_pred.npy'), train_stack_logreg_pred)\n np.save(os.path.join('models', 'val_stack_rfc_pred.npy'), val_stack_logreg_pred)\n np.save(os.path.join('models', 'test_stack_rfc_pred.npy'), test_stack_logreg_pred)",
"def forward_prop(params):\n # Neural network architecture\n n_inputs = 4\n n_hidden = 20\n n_classes = 3\n\n # Roll-back the weights and biases\n W1 = params[0:80].reshape((n_inputs, n_hidden))\n b1 = params[80:100].reshape((n_hidden,))\n W2 = params[100:160].reshape((n_hidden, n_classes))\n b2 = params[160:163].reshape((n_classes,))\n\n # Perform forward propagation\n z1 = X.dot(W1) + b1 # Pre-activation in Layer 1\n a1 = np.tanh(z1) # Activation in Layer 1\n z2 = a1.dot(W2) + b2 # Pre-activation in Layer 2\n logits = z2 # Logits for Layer 2\n\n # Compute for the softmax of the logits\n exp_scores = np.exp(logits)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n\n # Compute for the negative log likelihood\n N = 150 # Number of samples\n corect_logprobs = -np.log(probs[range(N), y])\n loss = np.sum(corect_logprobs) / N\n\n return loss",
"def reconstruct_loglayer(self, n_outs = 10):\n # We now need to add a logistic layer on top of the MLP\n self.logLayer = LogisticRegression(\n input=self.dA_layers[-1].output,\n n_in=self.dA_layers[-1].n_hidden, n_out=n_outs)\n\n self.params.extend(self.logLayer.params)\n # construct a function that implements one step of finetunining\n\n # compute the cost for second phase of training,\n # defined as the negative log likelihood\n self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)\n # compute the gradients with respect to the model parameters\n # symbolic variable that points to the number of errors made on the\n # minibatch given by self.x and self.y\n self.errors = self.logLayer.errors(self.y)",
"def forward_backward_prop(X, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n W1 = np.reshape(params[ofs:ofs + Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n # Note: compute cost based on `sum` not `mean`.\n z1 = np.dot(X, W1) + b1\n l1 = sigmoid(z1)\n\n z2 = np.dot(l1, W2) + b2\n l2 = softmax(z2)\n\n cost = -np.sum(labels * np.log(l2))\n\n ### YOUR CODE HERE: backward propagation\n softmax_grad = crossentropy_softmax_grad(labels, l2)\n gradW2 = np.dot(l1.T, softmax_grad)\n gradb2 = np.reshape(np.sum(softmax_grad, axis=0), b2.shape)\n\n assert gradW2.shape == W2.shape\n assert gradb2.shape == b2.shape\n\n sig_grad = sigmoid_grad(l1)\n dy_dz1 = sig_grad * np.dot(softmax_grad, W2.T)\n gradW1 = np.dot(X.T, dy_dz1)\n gradb1 = np.reshape(np.sum(dy_dz1, axis=0), b1.shape)\n assert gradW1.shape == W1.shape\n assert gradb1.shape == b1.shape\n ### END YOUR CODE\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(),\n gradW2.flatten(), gradb2.flatten()))\n\n return cost, grad.flatten()",
"def logistic_regression_loss_naive(W, X, y, reg):\n # Set the loss to a random number\n loss = 0\n # Initialize the gradient to zero\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n #############################################################################\n # START OF YOUR CODE #\n #############################################################################\n dim = X.shape[1]\n num_train = X.shape[0]\n f_mat = np.zeros_like(W)\n h_mat = np.zeros_like(W)\n loss1 = np.zeros_like(W)\n grad = 0\n y_ = np.zeros([y.shape[0],W.shape[1]])\n for i in range(y.shape[0]):\n y_[i,y[i]] = 1 \n \n for i in range(num_train):\n sample_x = X[i,:]\n for cate in range(W.shape[1]):\n grad = 0\n f_x = 0\n for index in range(dim):\n f_x += W[index,cate]*sample_x[index]\n \n f_mat[i,cate] = f_x\n h_x = sigmoid(f_x)\n loss += y_[i,cate]*np.log(h_x) + (1 - y_[i,cate]) * np.log(1 - h_x)\n grad += (h_x - y_[i,cate]) * sample_x\n h_mat[i,cate] = h_x - y_[i,cate]\n dW[:,cate] = grad.T\n \n loss = (-1 / num_train )* loss + 0.5 * reg * np.sum(W * W)\n dW = 1/ num_train * dW + reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW",
"def train_log_linear_with_w2v():\n data_manager = DataManager(W2V_AVERAGE, batch_size=BATCH_SIZE, embedding_dim=300)\n model = LogLinear(data_manager.get_input_shape()[0])\n train_model(model, data_manager, N_EPOCHS, LEARNING_RATE, WEIGHT_DECAY)",
"def build_network(unit_multiplier = 1, num_lstm_stacks = 5):\n print(\"got to training!\")\n model = Sequential()\n model.add(Conv1D(filters= BASE_NUM_FILTERS*unit_multiplier, kernel_size = KERNEL_SIZE, activation='relu', input_shape=(PAD_LENGTH, WORD_DIM)))\n model.add(MaxPool1D())\n for i in range(1,num_lstm_stacks): \n model.add(LSTM(units=BASE_NUM_UNITS*unit_multiplier, return_sequences = True, recurrent_dropout = .20, dropout = .20))\n model.add(BatchNormalization())\n model.add(LSTM(units=BASE_NUM_UNITS*unit_multiplier, return_sequences = False, recurrent_dropout = .20, dropout = .20))\n model.add(BatchNormalization())\n model.add(Dense(units=1, activation=\"sigmoid\"))\n return model",
"def top2gating(logits: torch.Tensor, capacity_factor: float) ->Tuple[Tensor, Tensor, Tensor, Tensor]:\n gates = F.softmax(logits, dim=1)\n num_tokens = gates.shape[0]\n num_experts = gates.shape[1]\n capacity = math.ceil(2 * num_tokens / num_experts * capacity_factor)\n indices1_s = torch.argmax(gates, dim=1)\n mask1 = F.one_hot(indices1_s, num_classes=num_experts)\n logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)\n logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float('-inf'))\n indices2_s = torch.argmax(logits_except1, dim=1)\n mask2 = F.one_hot(indices2_s, num_classes=num_experts)\n locations1 = torch.cumsum(mask1, dim=0) - 1\n locations2 = torch.cumsum(mask2, dim=0) - 1\n locations2 += torch.sum(mask1, dim=0, keepdim=True)\n exp_counts = torch.sum(mask1, dim=0).detach()\n me = torch.mean(gates, dim=0)\n ce = torch.mean(mask1.float(), dim=0)\n l_aux = torch.mean(me * ce) * num_experts * num_experts\n mask1 *= torch.lt(locations1, capacity)\n mask2 *= torch.lt(locations2, capacity)\n locations1_s = torch.sum(locations1 * mask1, dim=1)\n locations2_s = torch.sum(locations2 * mask2, dim=1)\n mask1_float = mask1.float()\n mask2_float = mask2.float()\n gates1_s = torch.einsum('se,se->s', gates, mask1_float)\n gates2_s = torch.einsum('se,se->s', gates, mask2_float)\n denom_s = gates1_s + gates2_s\n denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)\n gates1_s /= denom_s\n gates2_s /= denom_s\n gates1 = torch.einsum('s,se->se', gates1_s, mask1_float)\n gates2 = torch.einsum('s,se->se', gates2_s, mask2_float)\n locations1_sc = F.one_hot(locations1_s, num_classes=capacity).float()\n locations2_sc = F.one_hot(locations2_s, num_classes=capacity).float()\n combine1_sec = torch.einsum('se,sc->sec', gates1, locations1_sc)\n combine2_sec = torch.einsum('se,sc->sec', gates2, locations2_sc)\n combine_weights = combine1_sec + combine2_sec\n dispatch_mask = combine_weights.bool()\n return l_aux, combine_weights, dispatch_mask, exp_counts",
"def two_scale_forward(self, inputs):\n assert 'images' in inputs\n x_1x = inputs['images']\n\n x_lo = ResizeX(x_1x, cfg.MODEL.MSCALE_LO_SCALE)\n lo_outs = self._fwd(x_lo)\n pred_05x = lo_outs['cls_out']\n p_lo = pred_05x\n aux_lo = lo_outs['aux_out']\n logit_attn = lo_outs['logit_attn']\n attn_05x = logit_attn\n\n hi_outs = self._fwd(x_1x)\n pred_10x = hi_outs['cls_out']\n p_1x = pred_10x\n aux_1x = hi_outs['aux_out']\n\n p_lo = logit_attn * p_lo\n aux_lo = logit_attn * aux_lo\n p_lo = scale_as(p_lo, p_1x)\n aux_lo = scale_as(aux_lo, p_1x)\n\n logit_attn = scale_as(logit_attn, p_1x)\n\n # combine lo and hi predictions with attention\n joint_pred = p_lo + (1 - logit_attn) * p_1x\n joint_aux = aux_lo + (1 - logit_attn) * aux_1x\n\n if self.training:\n gts = inputs['gts']\n do_rmi = cfg.LOSS.OCR_AUX_RMI\n aux_loss = self.criterion(joint_aux, gts, do_rmi=do_rmi)\n\n # Optionally turn off RMI loss for first epoch to try to work\n # around cholesky errors of singular matrix\n do_rmi_main = True # cfg.EPOCH > 0\n main_loss = self.criterion(joint_pred, gts, do_rmi=do_rmi_main)\n loss = cfg.LOSS.OCR_ALPHA * aux_loss + main_loss\n\n # Optionally, apply supervision to the multi-scale predictions\n # directly. Turn off RMI to keep things lightweight\n if cfg.LOSS.SUPERVISED_MSCALE_WT:\n scaled_pred_05x = scale_as(pred_05x, p_1x)\n loss_lo = self.criterion(scaled_pred_05x, gts, do_rmi=False)\n loss_hi = self.criterion(pred_10x, gts, do_rmi=False)\n loss += cfg.LOSS.SUPERVISED_MSCALE_WT * loss_lo\n loss += cfg.LOSS.SUPERVISED_MSCALE_WT * loss_hi\n return loss\n else:\n output_dict = {\n 'pred': joint_pred,\n 'pred_05x': pred_05x,\n 'pred_10x': pred_10x,\n 'attn_05x': attn_05x,\n }\n return output_dict",
"def forward(self, model, X, targets):\n # X: Bx3xHxW | Bx4xHxW | Bx6xHxW\n # logits: Bx6xHxW\n # targets: Bx1xHxW\n # output: Bx1xHxW\n logits = model(X)\n loss = self.loss_func(logits, targets.expand_as(logits)) # Single loss value (averaged)\n # logits[:,0:5,:,:].mean(dim=1, keepdims=True)\n # logits.mean(dim=1, keepdims=True)\n score = self.metric_func(logits[:,5:6,:,:], targets) # Single metric score (averaged)\n\n return logits, loss, score",
"def forward_mlp(output_dim,\n hidden_sizes,\n hidden_nonlinearity,\n output_nonlinearity,\n input_var,\n mlp_params,\n ):\n x = input_var\n idx = 0\n bias_added = False\n sizes = tuple(hidden_sizes) + (output_dim,)\n\n if output_nonlinearity is None:\n output_nonlinearity = tf.identity\n\n for name, param in mlp_params.items():\n assert str(idx) in name or (idx == len(\n hidden_sizes) and \"output\" in name)\n\n if \"kernel\" in name:\n assert param.shape == (x.shape[-1], sizes[idx])\n x = tf.matmul(x, param)\n elif \"bias\" in name:\n assert param.shape == (sizes[idx],)\n x = tf.add(x, param)\n bias_added = True\n else:\n raise NameError\n\n if bias_added:\n if \"hidden\" in name:\n x = hidden_nonlinearity(x)\n elif \"output\" in name:\n x = output_nonlinearity(x)\n else:\n raise NameError\n idx += 1\n bias_added = False\n output_var = x\n return input_var, output_var # Todo why return input_var?",
"def gradient_log_linear(weights):\n\n pooling_pooled, pooling_reg_const = log_linear_pooling(P, weights)\n log_pooling = np.log(pooling_pooled)\n res = np.zeros(nviews)\n for i in np.arange(nviews):\n res[i] = np.sum(weights[i] * log_pooling / P[i])\n return res",
"def forward(log_emlik, log_startprob, log_transmat):",
"def add_loggers(\n name_a: str,\n model_a: torch.nn.Module,\n name_b: str,\n model_b: torch.nn.Module,\n) -> Tuple[torch.nn.Module, torch.nn.Module]:\n _turn_on_loggers(name_a, model_a)\n _turn_on_loggers(name_b, model_b)\n return model_a, model_b",
"def stack_models(df_prepared, df_target, model_1, model_2, model_3, model_4):\n\n # Bring together the best estimators of all the three ML models and the deep neural network model\n estimators = [model_1, model_2, model_3, model_4]\n\n # Creating training set for the Stacker/Blender\n stack_predictions = np.empty((df_prepared.shape[0], len(estimators)), dtype=np.float32)\n for index, estimator in enumerate(estimators):\n stack_predictions[:, index] = np.reshape(estimator.predict(df_prepared), (df_prepared.shape[0],))\n\n # Initializing the Stacker/Blender (Random Forest Regressor)\n rf_blender = RandomForestRegressor(n_estimators=20, random_state=123)\n\n # Evaluate the Blender on stacking set using cross-validation (# cross validation sets =3)\n val_scores = cross_val_score(rf_blender, stack_predictions, df_target, scoring='neg_mean_squared_error', n_jobs=-1)\n\n return rf_blender, np.mean(np.sqrt(np.array(val_scores)*-1))",
"def mlp_mnist():\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets('/tmp/data', one_hot=True)\n training_data = np.array([image.flatten() for image in mnist.train.images])\n training_label = mnist.train.labels\n valid_data = np.array([image.flatten() for image in mnist.validation.images])\n valid_label = mnist.validation.labels\n input_dim = training_data.shape[1]\n label_size = training_label.shape[1]\n\n model = Sequential()\n model.add(Input(input_shape=(input_dim, )))\n model.add(Dense(300, activator='selu'))\n model.add(Dropout(0.2))\n model.add(Softmax(label_size))\n model.compile('CCE', optimizer=SGD())\n model.fit(training_data, training_label, validation_data=(valid_data, valid_label))",
"def print_logregress(ds, logys, yname=\"y\"):\n m, b, r, p, se = linregress(ds, logys)\n print(\"\\nlog({_coconut_format_0}) = {_coconut_format_1} d + {_coconut_format_2}\\t(r**2 = {_coconut_format_3})\".format(_coconut_format_0=(yname), _coconut_format_1=(m), _coconut_format_2=(b), _coconut_format_3=(r**2)))\n print(\"{_coconut_format_0} = {_coconut_format_1} * 2**({_coconut_format_2} d))\".format(_coconut_format_0=(yname), _coconut_format_1=(exp(b)), _coconut_format_2=(m / log(2))))\n poly = PolynomialFeatures(degree=2, include_bias=False)\n X = ((poly.fit_transform)((list)((map)(lambda x: [x,], ds))))\n clf = linear_model.LinearRegression()\n clf.fit(X, logys)\n# a d**2 + b d + c\n b, a = clf.coef_\n c = clf.intercept_\n print(\"log({_coconut_format_0}) = {_coconut_format_1} d**2 + {_coconut_format_2} d + {_coconut_format_3}\".format(_coconut_format_0=(yname), _coconut_format_1=(a), _coconut_format_2=(b), _coconut_format_3=(c)))\n# (d - 1)(a d - c)\n# a d**2 - a d - c d + c\n# a d**2 - (a + c) d + c\n print(\"{_coconut_format_0} = exp((d - 1)({_coconut_format_1} d - {_coconut_format_2}) + {_coconut_format_3} d)\".format(_coconut_format_0=(yname), _coconut_format_1=(a), _coconut_format_2=(c), _coconut_format_3=(b + a + c)))\n print(\"{_coconut_format_0} = 2**((d - 1)({_coconut_format_1} d - {_coconut_format_2}) + {_coconut_format_3} d)\".format(_coconut_format_0=(yname), _coconut_format_1=(a / log(2)), _coconut_format_2=(c / log(2)), _coconut_format_3=((a + b + c) / log(2))))\n print(\"{_coconut_format_0} = exp({_coconut_format_1} ((d - 1)(d - {_coconut_format_2}) + {_coconut_format_3} d))\".format(_coconut_format_0=(yname), _coconut_format_1=(a), _coconut_format_2=(c / a), _coconut_format_3=(1 + (b + c) / a)))\n print(\"{_coconut_format_0} = 2**({_coconut_format_1} ((d - 1)(d - {_coconut_format_2}) + {_coconut_format_3} d))\".format(_coconut_format_0=(yname), _coconut_format_1=(a / log(2)), _coconut_format_2=(c / a), _coconut_format_3=(1 + (b + c) / a)))",
"def add_model(self, inputs):\n ### YOUR CODE HERE\n N = self.config.batch_size\n Dh = self.config.hidden_size\n d = self.config.embed_size\n steps = self.config.num_steps\n\n # Trust default Xavier initializer tf.uniform_unit_scaling_initializer\n with tf.variable_scope(\"RNN_LM\") as scope:\n rnn_outputs = []\n h = self.initial_state_placeholder\n # Formula: h(t) = sigmoid(h(t-1)*H + e(t)*I + b_1)\n for idx, input in enumerate(inputs):\n if idx > 0:\n scope.reuse_variables()\n \n H = tf.get_variable(\"HiddenXFormMatrix\", [Dh, Dh])\n I = tf.get_variable(\"InpWordRepMatrix\", [d, Dh])\n b_1= tf.get_variable(\"HiddenBiasVector\", [Dh])\n\n e = tf.nn.dropout(input, self.dropout_placeholder)\n a = tf.matmul(h,H) + tf.matmul(e,I) + b_1\n h = tf.sigmoid(a)\n output = tf.nn.dropout(h, self.dropout_placeholder) # Drop Op\n rnn_outputs.append(output)\n self.final_state = h\n ### END YOUR CODE\n return rnn_outputs",
"def forward_prop(params, predict = False):\n # Neural network architecture\n n_inputs = inputs\n n_hidden = hidden\n n_classes = classes\n\n start = 0\n W1_i = n_inputs*n_hidden\n b1_i = W1_i + n_hidden\n W2_i = b1_i + n_hidden*n_classes\n b2_i = W2_i + n_classes\n\n # Roll-back the weights and biases\n #If predict is true this is done with the optimized weights\n W1 = params[start:W1_i].reshape((n_inputs,n_hidden))\n b1 = params[W1_i:b1_i].reshape((n_hidden,))\n W2 = params[b1_i:W2_i].reshape((n_hidden,n_classes))\n b2 = params[W2_i:b2_i].reshape((n_classes,))\n\n # Perform forward propagation\n # Pre-activation in Layer 1 - what X we use depends on if it's the training or test data\n if predict == False:\n z1 = X_train.dot(W1) + b1\n else:\n z1 = X.dot(W1) + b1 \n \n a1 = np.tanh(z1) # Activation in Layer 1\n z2 = a1.dot(W2) + b2 # Pre-activation in Layer 2\n logits = z2 # Logits for Layer 2\n # Compute for the softmax of the logits\n exp_scores = np.exp(logits)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n\n\n if predict == False:\n # Compute for the negative log likelihood\n N = num # Number of samples\n corect_logprobs = -np.log(probs[range(N), y_train])\n loss = np.sum(corect_logprobs) / N\n return loss\n else:\n #If we are doing a prediction then return the predicted values\n y_pred = np.argmax(logits, axis=1)\n return y_pred"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
An LSTM layer with a `Logreg` layer for the output.
|
def LSTM1(Ni,Ns,No):
lstm = LSTM(Ni,Ns)
if No==1:
logreg = Logreg(Ns,No)
else:
logreg = Softmax(Ns,No)
stacked = Stacked([lstm,logreg])
return stacked
|
[
"def lstm_layer(return_sequences=True):\n return LSTM(240, dropout= 0.1, recurrent_dropout= 0.1,\n return_sequences=return_sequences)",
"def compute_log_reg(self):\n \n self.X = self.data.iloc[:,:-1].values\n self.X = sm.add_constant(self.X)\n self.y = self.data.iloc[:,-1]\n self.model = sm.Logit(self.y, self.X).fit(disp=False)",
"def LstmLayer(*args, **kwargs):\n if FLAGS.cudnn_lstm and tf.compat.v1.test.is_gpu_available():\n return tf.compat.v1.keras.layers.CuDNNLSTM(*args, **kwargs)\n else:\n return tf.compat.v1.keras.layers.LSTM(*args, **kwargs, implementation=1)",
"def build_lstm_graph(self):\n tf.reset_default_graph()\n lstm_graph = tf.Graph()\n\n with lstm_graph.as_default():\n self.xx = tf.placeholder('float32', [None, 1, self.n_features], name='features')\n self.yy = tf.placeholder('float32', name='labels')\n self.bins = tf.constant(self.bins, name='bins')\n with tf.name_scope(\"output_layer\"):\n weight = tf.Variable(tf.random_normal([self._lstm_size, self.n_labels]), name='weights')\n biases = tf.Variable(tf.random_normal([self.n_labels]), name='biases')\n x = tf.transpose(self.xx, [1, 0, 2])\n x = tf.reshape(x, [-1, self.n_features])\n x = tf.split(x, 1)\n\n lstm_cell = rnn_cell.LSTMCell(self._lstm_size, name='basic_lstm_cell')\n outputs, _ = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)\n\n logits = tf.add(tf.matmul(outputs[-1], weight), biases, name='rnn_model')\n\n tf.summary.histogram(\"last_lstm_output\", outputs[-1])\n tf.summary.histogram(\"weights\", weight)\n tf.summary.histogram(\"biases\", biases)\n\n with tf.name_scope(\"train\"):\n correct = tf.equal(tf.argmax(logits, 1), tf.argmax(self.yy, 1))\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'), name='accuracy')\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=self.yy),\n name='loss'\n )\n tf.train.AdamOptimizer().minimize(loss, name=\"loss_mse_adam_minimize\")\n tf.summary.scalar(\"loss\", loss)\n tf.summary.scalar(\"accuracy\", accuracy)\n\n # Operators to use after restoring the model\n for op in [logits, loss]:\n tf.add_to_collection('ops_to_restore', op)\n\n return lstm_graph",
"def train_log_linear_with_w2v():\n data_manager = DataManager(W2V_AVERAGE, batch_size=BATCH_SIZE, embedding_dim=300)\n model = LogLinear(data_manager.get_input_shape()[0])\n train_model(model, data_manager, N_EPOCHS, LEARNING_RATE, WEIGHT_DECAY)",
"def train_log_linear_with_one_hot():\n data_manager = DataManager(ONEHOT_AVERAGE, batch_size=BATCH_SIZE)\n model = LogLinear(data_manager.get_input_shape()[0])\n train_model(model, data_manager, N_EPOCHS, LEARNING_RATE, WEIGHT_DECAY)",
"def forward(self, history_tensor, prev_hidden_state): \n e = self.input_lookup(history_tensor)\n x = e.view(e.shape[0], e.shape[1], e.shape[2])\n\n h, _ = self.lstm(x, self.init_hidden())\n\n o = self.output(h[-1])\n #y = self.softmax(o)\n y = F.log_softmax(o, dim=1)\n y = y.squeeze()[-1]\n\n return y",
"def log_loss(input, label, epsilon=1e-4):\n helper = LayerHelper('log_loss', **locals())\n loss = helper.create_tmp_variable(dtype=input.dtype)\n helper.append_op(\n type='log_loss',\n inputs={'Predicted': [input],\n 'Labels': [label]},\n outputs={'Loss': [loss]},\n attrs={'epsilon': epsilon})\n return loss",
"def lstm_layer(\n input_size: int,\n hidden_size: int,\n num_layers: int = 1,\n batch_first: bool = True,\n forget_bias: float = 1.0,\n kernel_init: Initialization = Initialization.XavierGlorotUniform,\n bias_init: Initialization = Initialization.Zero,\n) -> torch.nn.Module:\n lstm = torch.nn.LSTM(input_size, hidden_size, num_layers, batch_first=batch_first)\n # Add forget_bias to forget gate bias\n for name, param in lstm.named_parameters():\n # Each weight and bias is a concatenation of 4 matrices\n if \"weight\" in name:\n for idx in range(4):\n block_size = param.shape[0] // 4\n _init_methods[kernel_init](\n param.data[idx * block_size : (idx + 1) * block_size]\n )\n if \"bias\" in name:\n for idx in range(4):\n block_size = param.shape[0] // 4\n _init_methods[bias_init](\n param.data[idx * block_size : (idx + 1) * block_size]\n )\n if idx == 1:\n param.data[idx * block_size : (idx + 1) * block_size].add_(\n forget_bias\n )\n return lstm",
"def LSTM_Network(_X, config):\r\n # (NOTE: This step could be greatly optimised by shaping the dataset once\r\n # input shape: (batch_size, n_steps, n_input)\r\n _X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size\r\n # Reshape to prepare input to hidden activation\r\n _X = tf.reshape(_X, [-1, config.n_inputs])\r\n # new shape: (n_steps*batch_size, n_input)\r\n\r\n # Linear activation\r\n _X = tf.nn.relu(tf.matmul(_X, config.W['hidden']) + config.biases['hidden'])\r\n # Split data because rnn cell needs a list of inputs for the RNN inner loop\r\n _X = tf.split(_X, config.n_steps, 0)\r\n # new shape: n_steps * (batch_size, n_hidden)\r\n\r\n # Define two stacked LSTM cells (two recurrent layers deep) with tensorflow\r\n lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(config.n_hidden, forget_bias=1.0, state_is_tuple=True)\r\n lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(config.n_hidden, forget_bias=1.0, state_is_tuple=True)\r\n lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2]*config.n_layers, state_is_tuple=True)\r\n # Get LSTM cell output\r\n outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)\r\n\r\n # Get last time step's output feature for a \"many to one\" style classifier,\r\n # as in the image describing RNNs at the top of this page\r\n lstm_last_output = outputs[-1]\r\n\r\n # Linear activation\r\n return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']",
"def LSTM(inputs, dim, seq_len, name):\n with tf.name_scope(name):\n cell = tf.contrib.rnn.LSTMCell(num_units=dim)\n hidden_states, cell_states = tf.nn.dynamic_rnn(cell, inputs=inputs,\n sequence_length=seq_len, dtype=tf.float32, scope=name)\n\n return hidden_states, cell_states",
"def four_layer_lstm(self, num_neurons_layer_1, num_neurons_layer_2,\n num_neurons_layer_3, num_neurons_layer_4, num_epochs,\n dropout, X_train, Y_train,\n X_val, Y_val):\n model = Sequential()\n\n # layer: 1\n model.add(LSTM(num_neurons_layer_1, activation='tanh', return_sequences=True,\n batch_input_shape=(1, X_train.shape[1], X_train.shape[2]),\n stateful=True))\n\n model.add(Dropout(dropout))\n # layer: 2\n model.add(LSTM(num_neurons_layer_2, activation='tanh', return_sequences=True,\n batch_input_shape=(1, X_train.shape[1], X_train.shape[2]),\n stateful=True))\n model.add(Dropout(dropout))\n # layer: 3\n model.add(LSTM(num_neurons_layer_3, activation='tanh', return_sequences=True,\n batch_input_shape=(1, X_train.shape[1], X_train.shape[2]),\n stateful=True))\n model.add(Dropout(dropout))\n # layer: 4\n model.add(LSTM(num_neurons_layer_4, activation='tanh', return_sequences=True,\n batch_input_shape=(1, X_train.shape[1], X_train.shape[2]),\n stateful=True))\n model.add(Dropout(dropout))\n model.add(Flatten())\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam', metrics = ['accuracy'])\n print(model.summary())\n size = self.size(model)\n hist=model.fit(X_train, Y_train, epochs=num_epochs,\n batch_size=1, shuffle=False, validation_data = (X_val, Y_val),\n verbose = 0)\n return model, size",
"def _lstm_model(self) -> Sequential:\n\n model = Sequential()\n model.add(\n layers.LSTM(units=225,\n activation='relu',\n batch_input_shape=(1, self.time_steps, 310),\n return_sequences=True,\n stateful=True))\n model.add(\n layers.LSTM(units=200,\n batch_input_shape=(1, self.time_steps, 310),\n kernel_initializer='glorot_normal',\n activation='relu',\n stateful=True,\n return_sequences=False))\n model.add(\n layers.Dense(units=150,\n kernel_initializer='glorot_normal',\n activation='relu'))\n model.add(layers.Dropout(0.3))\n model.add(\n layers.Dense(units=50,\n kernel_initializer='glorot_normal',\n activation='relu'))\n model.add(layers.Dropout(0.3))\n model.add(layers.Dense(units=1))\n\n early_stop = EarlyStopping(monitor='val_loss', verbose=1)\n model_checkpoint = ModelCheckpoint('best_lstm_model.h5',\n monitor='val_loss',\n verbose=1,\n save_best_only=True)\n logdir = f'LSTM_logs/scalars/{datetime.now().strftime(\"%Y%m%d-%H%M%S\")}'\n tensorboard_callback = TensorBoard(log_dir=logdir)\n self.callbacks = [early_stop, model_checkpoint, tensorboard_callback]\n\n if self.gpu >= 2:\n try:\n model = multi_gpu_model(model, gpus=self.gpu, cpu_relocation=True)\n LOGGER.info(f\"Training model with {self.gpu} gpus\")\n except Exception as e:\n LOGGER.info(f\"Failed to train model with GPUS due to {e}, reverting to CPU\")\n raise e\n\n model.compile(loss='mean_squared_error',\n optimizer='adam',\n metrics=[metrics.mae, correlation_coefficient_loss])\n\n return model",
"def logL(self, X, Xerr):\n if self.V is None or self.mu is None or self.weights is None:\n raise Exception(\"Model parameters not set.\")\n \n return self.GMM.logL(X,Xerr)",
"def get_logit_op(self,\r\n decoder_inp,\r\n context,\r\n Env,\r\n decoder_state,\r\n *args,\r\n **kwargs):\r\n\r\n# decoder_inp = tf.reshape(decoder_inp,[-1,1,self.hidden_dim])\r\n _ , decoder_state = tf.nn.dynamic_rnn(self.cell,\r\n decoder_inp,\r\n initial_state=decoder_state,\r\n scope=self._scope+'Decoder/LSTM/rnn')\r\n hy = decoder_state[-1].h\r\n\r\n # glimpses\r\n for i in range(self.n_glimpses):\r\n # ref: [batch_size x max_time x hidden_dim], logit : [batch_size x max_time]\r\n ref, logit = self.glimpses[i](hy,context,Env)\r\n if self.mask_glimpses:\r\n logit -= self.BIGNUMBER* Env.mask\r\n prob = tf.nn.softmax(logit)\r\n \r\n # hy : [batch_size x 1 x max_time ] * [batch_size x max_time x hidden_dim] -> \r\n #[batch_size x hidden_dim ]\r\n hy = tf.squeeze(tf.matmul( tf.expand_dims(prob,1),ref) ,1)\r\n\r\n # attention\r\n _, logit = self.pointer(hy,context,Env)\r\n if self.mask_pointer:\r\n logit -= self.BIGNUMBER* Env.mask\r\n \r\n return logit , decoder_state",
"def reconstruct_loglayer(self, n_outs = 10):\n # We now need to add a logistic layer on top of the MLP\n self.logLayer = LogisticRegression(\n input=self.dA_layers[-1].output,\n n_in=self.dA_layers[-1].n_hidden, n_out=n_outs)\n\n self.params.extend(self.logLayer.params)\n # construct a function that implements one step of finetunining\n\n # compute the cost for second phase of training,\n # defined as the negative log likelihood\n self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)\n # compute the gradients with respect to the model parameters\n # symbolic variable that points to the number of errors made on the\n # minibatch given by self.x and self.y\n self.errors = self.logLayer.errors(self.y)",
"def nll_on_features(self, h, batch, reduction=\"mean\"):\n batch = batch.to(h.device)\n # The targets are\n y = batch.outputs\n # Retrieve attention mask\n loss_mask = batch.inputs[1].float().view(y.size(0), y.size(1), 1)\n log_probs = F.log_softmax(h, dim=-1) * loss_mask\n nll_loss = F.nll_loss(\n log_probs.view(y.numel(), -1).contiguous(),\n y.contiguous().view(-1),\n reduction=reduction,\n )\n if reduction == \"none\":\n nll_loss = nll_loss.view(y.size(0), y.size(1))\n return nll_loss",
"def three_layer_lstm(self, num_neurons_layer_1, num_neurons_layer_2,\n num_neurons_layer_3, num_epochs, dropout,\n X_train, Y_train, X_val, Y_val):\n model = Sequential()\n # layer: 1\n model.add(LSTM(num_neurons_layer_1, activation='tanh', return_sequences=True,\n batch_input_shape=(1, X_train.shape[1], X_train.shape[2]),\n stateful=True))\n model.add(Dropout(dropout))\n # layer: 2\n model.add(LSTM(num_neurons_layer_2, activation='tanh', return_sequences=True,\n batch_input_shape=(1, X_train.shape[1], X_train.shape[2]),\n stateful=True))\n model.add(Dropout(dropout))\n # layer: 3\n model.add(LSTM(num_neurons_layer_3, activation='tanh', return_sequences=True,\n batch_input_shape=(1, X_train.shape[1], X_train.shape[2]),\n stateful=True))\n model.add(Dropout(dropout))\n model.add(Flatten())\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam', metrics = ['accuracy'])\n print(model.summary())\n size = self.size(model)\n hist=model.fit(X_train, Y_train, epochs=num_epochs,\n batch_size=1, shuffle=False, validation_data = (X_val, Y_val),\n verbose = 0)\n return model, size",
"def _lstm_cell():\n cell = tf.compat.v1.nn.rnn_cell.LSTMCell(num_units=options.hidden_units)\n if is_training:\n cell = tf.compat.v1.nn.rnn_cell.DropoutWrapper(\n cell,\n input_keep_prob=options.input_keep_prob,\n output_keep_prob=options.output_keep_prob,\n state_keep_prob=options.state_keep_prob)\n return cell",
"def forecast_model(self, x, num_layers, timesteps):\r\n \r\n # LSTM Model\r\n model = Sequential()\r\n model.add(LSTM(units = num_layers, input_shape = (timesteps, x.shape[-1]),\r\n activation = self.model_activation))\r\n model.add(Dense(1, activation = None))\r\n model.compile(loss = 'mean_squared_error', optimizer = 'adam')\r\n \r\n return model"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a list of target classes `cs` and a total maximum number of classes, compute an array that has a `1` in each column and time step corresponding to the target class.
|
def make_target(cs,nc):
result = np.zeros((2*len(cs)+1,nc))
try:
for i,j in enumerate(cs):
result[2*i,0] = 1.0
result[2*i+1,j] = 1.0
result[-1,0] = 1.0
except(IndexError):
LOG.critical('Cannot index target class. Did you load a model that was trained on less characters then needed for this operation?')
sys.exit(1)
return result
|
[
"def fast_cm(tru, pred, num_classes):\r\n bin = tru * num_classes + pred\r\n h = np.bincount(bin, minlength=num_classes*num_classes)\r\n return h.reshape((num_classes, num_classes))",
"def competitive_learning(x, classes, c = 1):\n a = -5\n b = 5\n w = (b - a)*np.random.random_sample((x.shape[1], classes)) + a\n for point in x:\n net = np.matmul(point, w)\n max_ind = np.argmax(net)\n w[:, max_ind] = w[:, max_ind] + c*point\n return w",
"def getLocalCC(ft,labels,classInds,numFeatureMaps):\n \n cc = np.zeros([len(classInds),numFeatureMaps])\n for i in range(len(classInds)):\n inds = np.argwhere(labels==classInds[i])[:,0]\n if inds.size:\n ft_c = ft[inds,...]\n ftMean = np.mean(ft_c,axis=0)\n cc[i,:] = ftMean\n return cc",
"def confmat(Y,T):\n\n n, c = Y.shape\n n2, c2 = T.shape\n\n if n != n2 or c != c2:\n raise Exception('Outputs and targets are different sizes')\n\n if c > 1:\n # Find the winning class assuming 1-of-N encoding\n Yclass = np.argmax(Y, axis=1)+1\n TL=np.dot(np.arange(1,c+1),T.T)\n\n else:\n # Assume two classes with 0-1 encoding\n c = 2\n class2 = np.nonzero(T > 0.5)[0]\n TL = np.ones(n)\n TL[class2] = 2\n class2 = np.nonzero(Y > 0.5)[0]\n Yclass = np.ones(n)\n Yclass[class2] = 2\n # Compute \n pdb.set_trace()\n correct = (Yclass==TL)\n total = correct.sum()\n rate = np.array([total*100/n, total])\n\n C = np.zeros((c,c))\n for i in range(c):\n for j in range(c):\n C[i,j] = np.sum(np.multiply((Yclass==j+1),(TL==i+1)))\n return C,rate",
"def getNumpyClassQuantities(values, quantities, classBreaks):\r\n\r\n import numpy\r\n\r\n class_results = []\r\n for i in range(0, len(classBreaks)):\r\n class_range = classBreaks[i]\r\n in_class = numpy.logical_and(values >= float(class_range[0]), values < float(class_range[1]))\r\n class_results.append({\r\n 'class': class_range,\r\n 'intersectionCount': int(in_class.sum()),\r\n 'intersectedQuantity': (in_class * quantities).sum()\r\n })\r\n return class_results",
"def create_label_distribution(n_timestamps, n_classes):\n shuffled_indexes = np.arange(n_timestamps)\n np.random.shuffle(shuffled_indexes)\n labels = np.zeros(n_timestamps, dtype=int)\n for c in range(n_classes):\n labels[np.where(shuffled_indexes < c * n_timestamps // n_classes)] += 1\n return labels",
"def check_y(y, n_classes=5): \n ymax = np.argmax(y, axis=-1)\n classtotals = {k: 0 for k in np.arange(n_classes)}\n for i in np.arange(ymax.shape[0]):\n y_ = ymax[i,...] \n for c in np.arange(n_classes):\n classtotals[c] += (y_ == c).sum()\n return classtotals",
"def calcium_train(t, onsets):\n numberofbins = len(t)\n ap_trace = np.zeros(numberofbins)\n calcium_trace = np.zeros(numberofbins)\n apno = len(onsets)\n for ons in onsets:\n calcium_trace = calcium_trace + calcium_event(t, ons*(t[1]-t[0]))\n ap_trace[ons] = 1.\n return calcium_trace, ap_trace",
"def extractCases(df,sample_count):\n\n if(sample_count==0):\n return np.zeros(6)\n class1 = len(df[df['CGM']>180].index)/sample_count\n class2 = len(df[df['CGM']>250].index)/sample_count\n class3 = len(df[(df['CGM']>=70) & (df['CGM']<=180)].index)/sample_count\n class4 = len(df[(df['CGM']>=70) & (df['CGM']<=150)].index)/sample_count\n class5 = len(df[df['CGM']<70].index)/sample_count\n class6 = len(df[df['CGM']<54].index)/sample_count\n res = np.array([class1,class2,class3,class4,class5,class6])\n return res",
"def class_candidates(size):\n s = st.ToricLattice(size)\n a = np.zeros(16, dtype='int64')\n for i in range(16):\n a[i] = class_candidate(s, i)\n return a",
"def getStepLengths(cs):\n return _getStepAndCycleLengths(cs)[0]",
"def bout_time_curve_300(boutlist):\n timecourse = []\n for i in range(0, 301):\n if i < math.ceil(boutlist[-1]) + 1:\n counter = 0\n for time in boutlist:\n if time <= i:\n counter += 1\n else:\n counter = np.nan\n\n timecourse.append(counter)\n return timecourse",
"def multiclass_anytime2(data_files,train_range,test_range,\n label=0.9,bias=1.,\n scale_min=0., scale_max=1.,scale_prop=\"local\",\n feature_key=\"features\",target_key=\"target_midi\"):\n # read features and targets\n features = []\n targets = []\n feature = []\n target = []\n for file in data_files:\n data = shelve.open(file)\n print file,\"feature shape:\", data[feature_key].shape\n feature.append(data[feature_key])\n target.append(data[target_key])\n data.close()\n features.append(feature)\n targets.append(target)\n \n # make data preprocessing\n data_preprocessing(features,bias,scale_min,scale_max,0,scale_prop)\n\n # make targets\n \n # check how many pitch classes we have\n all_keys = []\n for el in targets[0]:\n all_keys += el.tolist()\n classes = list(set(all_keys))\n classes.sort()\n print \"classes:\", classes\n print \"nr classes:\",len(classes)\n\n # make (binary) target data\n cl_targets = []\n for piece in targets[0]:\n target = np.ones((len(piece), len(classes))) * (-1)*label\n for n in range(len(piece)):\n ind = classes.index( piece[n] )\n target[n,ind] = label\n cl_targets.append(target)\n \n # make train and test data\n trainin = []\n testin = []\n trainout = []\n testout = []\n nr_ex = len(train_range)\n for n in range(nr_ex):\n trainin.append( features[0][n][ train_range[n][0]:train_range[n][1] ] )\n trainout.append( cl_targets[n][ train_range[n][0]:train_range[n][1] ] )\n testin.append( features[0][n][ test_range[n][0]:test_range[n][1] ] )\n testout.append( cl_targets[n][ test_range[n][0]:test_range[n][1] ] )\n \n return trainin, trainout, testin, testout",
"def getBurnSteps(cs):\n stepLengths = getStepLengths(cs)\n return [len(steps) for steps in stepLengths]",
"def makeTargetsArray(pfsConfig):\n allCobraIds = np.arange(2394, dtype='int32') + 1\n fiberId = pfsConfig.fiberId\n cobraId = FiberIds().fiberIdToCobraId(fiberId)\n # targets vector has an entry for each cobra and sorted by cobraId.\n targets = np.empty((2394, 2), dtype=pfsConfig.pfiNominal.dtype)\n targets[:] = np.NaN\n # cobraMask is boolean array(shape=cobraId.shape)\n cobraMask = np.isin(cobraId, allCobraIds)\n # only existing cobraId.\n cobraId = cobraId[cobraMask]\n # assigning target vector directly.\n targets[cobraId - 1] = pfsConfig.pfiNominal[cobraMask]\n isNan = np.logical_or(np.isnan(targets[:, 0]), np.isnan(targets[:, 1]))\n\n return targets[:, 0] + targets[:, 1] * 1j, isNan",
"def class_performances(self):\n count = np.array(\n [len(self._global_results[i]) for i in range(self._num_labels)])\n result_sum = np.array(\n [sum(self._global_results[i]) for i in range(self._num_labels)])\n return result_sum / count.clip(min=1)",
"def to6classes(labels):\n res =np.zeros_like(labels, dtype=int)\n res[labels==1] = 1 #Ground\n res[labels==2] = 2 #buildings\n res[labels==3] = 3 #poles\n res[labels==4] = 3 #poles\n # res[labels==5] = 0 #trashcan\n # res[labels==6] = 0 #barriers\n res[labels==7] = 4 #Ground\n res[labels==8] = 5 #Ground\n res[labels==9] = 6 #Ground\n return res",
"def getCycleLengths(cs):\n return _getStepAndCycleLengths(cs)[1]",
"def segmap_classes_to_class_vec(segmap_classes, class_presence_threshold=1):\n class_vec = np.zeros(n_classes)\n for i in range(n_classes):\n if np.count_nonzero(segmap_classes[:, :, i]) >= class_presence_threshold:\n class_vec[i] = 1\n \n return class_vec"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Perform alignment between the `outputs` of a neural network classifier and some targets. The targets themselves are a time sequence of vectors, usually a unary representation of each target class (but possibly sequences of arbitrary posterior probability distributions represented as vectors).
|
def ctc_align_targets(outputs,targets,threshold=100.0,verbose=0,debug=0,lo=1e-5):
outputs = np.maximum(lo,outputs)
outputs = outputs * 1.0/np.sum(outputs,axis=1)[:,np.newaxis]
# first, we compute the match between the outputs and the targets
# and put the result in the log domain
match = np.dot(outputs,targets.T)
lmatch = np.log(match)
if debug:
import matplotlib.pyplot as plt
plt.figure("ctcalign"); plt.clf();
plt.subplot(411); plt.imshow(outputs.T,interpolation='nearest',cmap=plt.cm.hot)
plt.subplot(412); plt.imshow(lmatch.T,interpolation='nearest',cmap=plt.cm.hot)
assert not np.isnan(lmatch).any()
# Now, we compute a forward-backward algorithm over the matches between
# the input and the output states.
both = forwardbackward(lmatch)
# We need posterior probabilities for the states, so we need to normalize
# the output. Instead of keeping track of the normalization
# factors, we just normalize the posterior distribution directly.
epath = np.exp(both-np.amax(both))
l = np.sum(epath,axis=0)[np.newaxis,:]
epath /= np.where(l==0.0,1e-9,l)
# The previous computation gives us an alignment between input time
# and output sequence position as posteriors over states.
# However, we actually want the posterior probability distribution over
# output classes at each time step. This dot product gives
# us that result. We renormalize again afterwards.
aligned = np.maximum(lo,np.dot(epath,targets))
l = np.sum(aligned,axis=1)[:,np.newaxis]
aligned /= np.where(l==0.0,1e-9,l)
if debug:
plt.subplot(413); plt.imshow(epath.T,cmap=plt.cm.hot,interpolation='nearest')
plt.subplot(414); plt.imshow(aligned.T,cmap=plt.cm.hot,interpolation='nearest')
plt.ginput(1,0.01);
return aligned
|
[
"def process_outputs(self, outputs, image_size):\n boxes = []\n box_class = []\n box_confidences = []\n i = 0\n for output in outputs:\n boxes.append(output[:, :, :, 0:4])\n box_class.append(self.sigmoid(output[:, :, :, 5:]))\n box_confidences.append(self.sigmoid(output[:, :, :, 4:5]))\n\n for box in boxes:\n H_box = box.shape[0]\n W_box = box.shape[1]\n anchor_box = box.shape[2]\n\n the_box = np.zeros((H_box, W_box, anchor_box))\n\n ind_x = np.arange(W_box)\n ind_y = np.arange(H_box)\n ind_x = ind_x.reshape(1, W_box, 1)\n ind_y = ind_y.reshape(H_box, 1, 1)\n\n box_x = the_box + ind_x\n box_y = the_box + ind_y\n\n tx = box[..., 0]\n ty = box[..., 1]\n tw = box[..., 2]\n th = box[..., 3]\n\n sig_tx = self.sigmoid(tx)\n sig_ty = self.sigmoid(ty)\n\n bx = sig_tx + box_x\n by = sig_ty + box_y\n bx = bx / W_box\n by = by / H_box\n\n pw = self.anchors[i, :, 0]\n ph = self.anchors[i, :, 1]\n\n bw = pw * np.exp(tw)\n bh = ph * np.exp(th)\n\n inp_w = self.model.input.shape[1].value\n inp_h = self.model.input.shape[2].value\n\n bw = bw / inp_w\n bh = bh / inp_h\n\n x1 = bx - bw / 2\n y1 = by - bh / 2\n x2 = x1 + bw\n y2 = y1 + bh\n\n box[..., 0] = x1 * image_size[1]\n box[..., 1] = y1 * image_size[0]\n box[..., 2] = x2 * image_size[1]\n box[..., 3] = y2 * image_size[0]\n i = i + 1\n\n return (boxes, box_confidences, box_class)",
"def forward(self, inputs, alignments, input_lengths, output_lengths):\n embedded_inputs = self.embedding(inputs)\n encoded_inputs = self.encoder(embedded_inputs, input_lengths)\n aligned_features = self.alignment_module(encoded_inputs, alignments)\n pre_outputs = self.decoder(aligned_features, output_lengths)\n postnet_outputs = pre_outputs + self.postnet(pre_outputs)\n output_mask = get_mask_from_lengths(\n output_lengths, expand_multiple=postnet_outputs.shape[1]\n ).transpose(2, 1)\n postnet_outputs = postnet_outputs * output_mask\n return pre_outputs, postnet_outputs",
"def build_targets(pred_boxes, pred_cls, target, anchors, ignore_thres):\n ByteTensor = torch.cuda.ByteTensor if pred_boxes.is_cuda else torch.ByteTensor\n FloatTensor = torch.cuda.FloatTensor if pred_boxes.is_cuda else torch.FloatTensor\n\n nB = pred_boxes.size(0) # batch_size (num_samples)\n nA = pred_boxes.size(1) # num_anchors\n nC = pred_cls.size(-1) # num_classes\n nG = pred_boxes.size(2) # grid_size\n\n # Output tensors\n # shape(batch_size, num_anchors, grid_size, grid_size)\n obj_mask = ByteTensor(nB, nA, nG, nG).fill_(0)\n noobj_mask = ByteTensor(nB, nA, nG, nG).fill_(1) # fill with 1\n class_mask = FloatTensor(nB, nA, nG, nG).fill_(0)\n iou_scores = FloatTensor(nB, nA, nG, nG).fill_(0)\n tx = FloatTensor(nB, nA, nG, nG).fill_(0)\n ty = FloatTensor(nB, nA, nG, nG).fill_(0)\n tw = FloatTensor(nB, nA, nG, nG).fill_(0)\n th = FloatTensor(nB, nA, nG, nG).fill_(0)\n # shape(batch_size, num_anchors, grid_size, grid_size, num_classes)\n tcls = FloatTensor(nB, nA, nG, nG, nC).fill_(0)\n\n ##=== scale the target bboxes (relative to feature map) ===\n target_boxes = target[:, 2:6] * nG\n gxy = target_boxes[:, :2]\n gwh = target_boxes[:, 2:]\n\n ##=== Get anchors with best iou ===\n ious = torch.stack([bbox_wh_iou(anchor, gwh) for anchor in anchors])\n best_ious, best_n = ious.max(0)\n\n ##=== Compute target values from target bbox ===\n b, target_labels = target[:, :2].long().t()\n gx, gy = gxy.t()\n gw, gh = gwh.t()\n\n # get the top-left corner coordinates of the grid cell\n # where the object(target bbox center) appears\n gi, gj = gxy.long().t() \n\n # Set masks\n obj_mask[b, best_n, gj, gi] = 1\n noobj_mask[b, best_n, gj, gi] = 0\n\n # Set noobj mask to zero where iou exceeds ignore threshold\n for i, anchor_ious in enumerate(ious.t()):\n noobj_mask[b[i], anchor_ious > ignore_thres, gj[i], gi[i]] = 0\n\n # Center offset\n # (gx.floor(), gy.floor()) is the top-left corner of the grid cell\n # where the object(target bbox center) appears\n # b_x = sigmod(t_x) + c_x ==> target_sigmod(t_x) = b_x - c_x\n # b_y = sigmod(t_y) + c_y ==> target_sigmod(t_y) = b_y - c_y\n tx[b, best_n, gj, gi] = gx - gx.floor() \n ty[b, best_n, gj, gi] = gy - gy.floor()\n # Width and height\n # b_w = anchor_w * exp(t_w) ==> target_(t_w) = log(b_w / anchor_w)\n # b_h = anchor_h * exp(t_h) ==> target_(t_h) = log(b_h / anchor_h)\n tw[b, best_n, gj, gi] = torch.log(gw / anchors[best_n][:, 0] + 1e-16)\n th[b, best_n, gj, gi] = torch.log(gh / anchors[best_n][:, 1] + 1e-16)\n\n ##=== One-hot encoding of label ===\n tcls[b, best_n, gj, gi, target_labels] = 1\n\n ##=== Compute label correctness and iou at best anchor ===\n class_mask[b, best_n, gj, gi] = (pred_cls[b, best_n, gj, gi].argmax(-1) == target_labels).float()\n iou_scores[b, best_n, gj, gi] = bbox_iou(pred_boxes[b, best_n, gj, gi], target_boxes, x1y1x2y2=False)\n\n tconf = obj_mask.float()\n return iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf",
"def _multilabel_classification_postprocessing(self, X, outputs, y_true, transformed_y_true, metadata):\n\n y_pred = torch.softmax(outputs, dim=-1)\n binarized_y_pred = (y_pred > 0.1).float()\n top_indices = torch.argmax(y_pred, dim=-1)\n preds = []\n for y, binary_y, ind in zip(y_pred, binarized_y_pred, top_indices):\n if torch.sum(binary_y) == 0:\n pred = [0 for _ in range(len(y))]\n pred[ind] = 1\n pred = torch.tensor(pred, dtype=torch.float, device=y_pred.device)\n preds.append(pred)\n else:\n preds.append(binary_y)\n y_pred = torch.stack(preds)\n\n return y_pred, transformed_y_true, metadata",
"def align(self, sequences):\n seqs = [copy.deepcopy(s) for s in sequences]\n c = seqs[0]\n aligned = [c]\n klass = c.__class__\n with tqdm(total=len(seqs)-1) as pbar:\n for s in seqs[1:]:\n score, traceback = c.global_align_multiple_solutions(s, self.sm, self.g)\n c, s = next(c.recover_global_align_multiple_solutions(s, traceback))\n aligned = self.update_aligned_with_gaps(aligned, c)\n aligned.append(klass(s)) # add temp alignments to the list of processed\n c = self.consensus(aligned + [s], klass)\n pbar.update()\n return c, aligned",
"def _align(cycles, embs, num_steps, num_cycles, cycle_length,\n similarity_type, temperature):\n logits_list = []\n labels_list = []\n for i in range(num_cycles):\n logits, labels = _align_single_cycle(cycles[i],\n embs,\n cycle_length,\n num_steps,\n similarity_type,\n temperature)\n logits_list.append(logits)\n labels_list.append(labels)\n\n logits = tf.stack(logits_list)\n labels = tf.stack(labels_list)\n\n return logits, labels",
"def accuracy(outputs, targets) -> float:\n\n preds = outputs.reshape(-1, outputs.shape[2]).argmax(dim=1)\n targets = targets.reshape(-1) \n\n return (torch.sum(preds == targets).float() / len(targets)).item()",
"def loss_labels(self, outputs, targets, indices, num_boxes, current_epoch, owod_targets, owod_indices, log=True):\n assert 'pred_logits' in outputs\n temp_src_logits = outputs['pred_logits'].clone()\n temp_src_logits[:, :, self.invalid_cls_logits] = -100000000000.0\n src_logits = temp_src_logits\n if self.unmatched_boxes:\n idx = self._get_src_permutation_idx(owod_indices)\n target_classes_o = torch.cat([t['labels'][J] for t, (_, J) in zip(owod_targets, owod_indices)])\n else:\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t['labels'][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device)\n target_classes[idx] = target_classes_o\n target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)\n target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n target_classes_onehot = target_classes_onehot[:, :, :-1]\n loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1]\n losses = {'loss_ce': loss_ce}\n if log:\n losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]\n return losses",
"def predict_output(features, weights):\n predictions = np.dot(features, weights)\n return predictions",
"def loss_labels(self, outputs, targets, indices, num_boxes, log=True):\n assert 'pred_logits' in outputs\n src_logits = outputs['pred_logits']\n #print(src_logits.shape)\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(src_logits.shape[:2], self.num_classes,\n dtype=torch.int64, device=src_logits.device)\n target_classes[idx] = target_classes_o\n\n #print(target_classes.shape)\n\n loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes,self.empty_weight)\n losses = {'loss_ce': loss_ce}\n\n if log:\n # TODO this should probably be a separate loss, not hacked in this one here\n losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]\n return losses",
"def compute_targets(self, image_group, annotations_group):\n \"\"\"\n Compute target outputs for the network using images and their annotations.\n \"\"\"\n\n batches_targets = anchors.anchor_targets_bbox(\n self.anchors,\n image_group,\n annotations_group,\n num_classes=self.num_classes(),\n )\n return list(batches_targets)",
"def postprocess(\n self,\n preds: List[Tuple[torch.FloatTensor, torch.FloatTensor]],\n targets: List[Dict[str, torch.LongTensor]],\n num_preds: int = 0,\n ) -> Tuple[\n List[Tuple[torch.FloatTensor, torch.FloatTensor]],\n List[Dict[str, torch.LongTensor]],\n int,\n ]:\n # Fix indexing\n for target in targets:\n target[\"scope\"] += num_preds\n target[\"positives\"] += num_preds\n target[\"negatives\"] += num_preds\n\n # Move to cpu\n preds = [(cost.cpu(), alignment.cpu()) for (cost, alignment) in preds]\n targets = [\n {key: value.cpu() for key, value in target.items()} for target in targets\n ]\n\n # Compute new num_preds\n num_preds += len(preds)\n\n return preds, targets, num_preds",
"def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> List[int]:\n preds = np.argmax(predictions, axis=2)\n batch_size, seq_len = preds.shape\n\n preds_list = [[] for _ in range(batch_size)]\n\n for i in range(batch_size):\n for j in range(seq_len):\n # If this label is not masked over, lookup the corresponding tag and append it to outputs.\n if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:\n preds_list[i].append(label_map[preds[i][j]])\n\n return preds_list #, out_label_list",
"def postprocess_model_outputs(self, predictions, expected):\n expected[\"y\"] = expected[\"y\"].numpy()\n expected[\"display_ids\"] = expected[\"display_ids\"].numpy()\n\n return predictions.numpy(), expected",
"def _create_output_array(self, sequences):\n lengths = [len(x) for x in sequences]\n labels = np.zeros(\n (self.batch_size, self._max_output_length, len(\n self._output_tokens)),\n dtype=np.float32)\n end_marker_index = self._output_dict[SeqToSeq.sequence_end]\n for i, sequence in enumerate(sequences):\n for j, token in enumerate(sequence):\n labels[i, j, self._output_dict[token]] = 1\n for j in range(lengths[i], self._max_output_length):\n labels[i, j, end_marker_index] = 1\n return labels",
"def _align_pad_prediction(self, predictions, bos, pad):\n dtype, device = predictions[0][0].dtype, predictions[0][0].device\n flatten_tgt = [\n best.tolist() for bests in predictions for best in bests\n ]\n paded_tgt = torch.tensor(\n list(zip_longest(*flatten_tgt, fillvalue=pad)),\n dtype=dtype,\n device=device,\n ).T\n bos_tensor = torch.full(\n [paded_tgt.size(0), 1], bos, dtype=dtype, device=device\n )\n full_tgt = torch.cat((bos_tensor, paded_tgt), dim=-1)\n batched_nbest_predict = full_tgt.view(\n len(predictions), -1, full_tgt.size(-1)\n ) # (batch, n_best, tgt_l)\n return batched_nbest_predict",
"def _alignments(self) -> None:\n if not os.path.exists(self.align_path):\n logger.info(\"Training aligner\")\n train_opts = []\n if self.batch_size:\n train_opts.append(f\"--batch_size={self.batch_size}\")\n if self.delta:\n train_opts.append(f\"--delta={self.delta}\")\n if self.fst_default_cache_gc:\n train_opts.append(f\"--fst_default_cache_gc={self.fst_default_cache_gc}\")\n if self.fst_default_cache_gc_limit:\n train_opts.append(\n f\"--fst_default_cache_gc_limit={self.fst_default_cache_gc_limit}\"\n )\n if self.alpha:\n train_opts.append(f\"--alpha={self.alpha}\")\n if self.num_iterations:\n train_opts.append(f\"--max_iters={self.num_iterations}\")\n # Constructs the actual command vectors (plus an index for logging\n # purposes).\n random.seed(config.SEED)\n starts = [\n (\n RandomStart(\n idx,\n seed,\n self.input_far_path,\n self.output_far_path,\n self.cg_path,\n self.working_directory,\n train_opts,\n )\n )\n for (idx, seed) in enumerate(\n random.sample(range(1, RAND_MAX), self.random_starts), 1\n )\n ]\n stopped = threading.Event()\n num_commands = len(starts)\n job_queue = Queue()\n fst_likelihoods = {}\n # Actually runs starts.\n logger.info(\"Calculating alignments...\")\n begin = time.time()\n with tqdm(total=num_commands * self.num_iterations, disable=config.QUIET) as pbar:\n for start in starts:\n job_queue.put(start)\n error_dict = {}\n return_queue = Queue()\n procs = []\n for i in range(config.NUM_JOBS):\n log_path = self.working_log_directory.joinpath(f\"baumwelch.{i}.log\")\n p = RandomStartWorker(\n i,\n job_queue,\n return_queue,\n log_path,\n stopped,\n )\n procs.append(p)\n p.start()\n\n while True:\n try:\n result = return_queue.get(timeout=1)\n if isinstance(result, Exception):\n\n error_dict[getattr(result, \"job_name\", 0)] = result\n continue\n if stopped.is_set():\n continue\n except queue.Empty:\n for proc in procs:\n if not proc.finished.is_set():\n break\n else:\n break\n continue\n if isinstance(result, int):\n pbar.update(result)\n else:\n fst_likelihoods[result[0]] = result[1]\n for p in procs:\n p.join()\n if error_dict:\n raise PyniniAlignmentError(error_dict)\n (best_fst, best_likelihood) = min(fst_likelihoods.items(), key=operator.itemgetter(1))\n logger.info(f\"Best likelihood: {best_likelihood}\")\n logger.debug(\n f\"Ran {self.random_starts} random starts in {time.time() - begin:.3f} seconds\"\n )\n # Moves best likelihood solution to the requested location.\n shutil.move(best_fst, self.align_path)\n cmd = [thirdparty_binary(\"baumwelchdecode\")]\n if self.fst_default_cache_gc:\n cmd.append(f\"--fst_default_cache_gc={self.fst_default_cache_gc}\")\n if self.fst_default_cache_gc_limit:\n cmd.append(f\"--fst_default_cache_gc_limit={self.fst_default_cache_gc_limit}\")\n cmd.append(self.input_far_path)\n cmd.append(self.output_far_path)\n cmd.append(self.align_path)\n cmd.append(self.afst_path)\n cmd = [str(x) for x in cmd]\n logger.debug(f\"Subprocess call: {cmd}\")\n subprocess.check_call(cmd, env=os.environ)\n logger.info(\"Completed computing alignments!\")",
"def _generate_detections(cls_outputs, box_outputs, anchor_boxes, indices,\n classes, image_id, image_scale, num_classes,\n max_boxes_to_draw, nms_configs):\n anchor_boxes = anchor_boxes[indices, :]\n scores = sigmoid(cls_outputs)\n # apply bounding box regression to anchors\n boxes = decode_box_outputs_np(\n box_outputs.swapaxes(0, 1), anchor_boxes.swapaxes(0, 1))\n # run class-wise nms\n return per_class_nms(boxes, scores, classes, image_id, image_scale,\n num_classes, max_boxes_to_draw, nms_configs)",
"def write_concatenated_alignment(id_pairing, alignment_1, alignment_2,\n target_sequence_1, target_sequence_2):\n\n def _unfilter(string):\n \"\"\"\n Uppercases all of the letters in string,\n converts all \".\" to \"-\"\n \"\"\"\n string = np.char.upper(string)\n string[string==\".\"] = \"-\"\n return string\n\n def _prepare_header(id1, id2):\n # id1_id2\n header_format = \"{}_{}\"\n concatenated_header = header_format.format(id1, id2)\n\n return concatenated_header\n\n sequences_to_write = [] # list of (header,seq1,seq2) tuples\n\n # load the monomer alignments\n with open(alignment_1) as f1, open(alignment_2) as f2:\n ali_1 = Alignment.from_file(f1)\n ali_2 = Alignment.from_file(f2)\n\n ali_1 = ali_1.apply(func=_unfilter,columns=np.array(range(ali_1.matrix.shape[1])))\n ali_2 = ali_2.apply(func=_unfilter,columns=np.array(range(ali_2.matrix.shape[1])))\n\n target_index_1 = ali_1.id_to_index[target_sequence_1]\n target_index_2 = ali_2.id_to_index[target_sequence_2]\n\n # prepare the target sequence\n target_sequences = (\n ali_1.matrix[target_index_1, :],\n ali_2.matrix[target_index_2, :]\n )\n\n # Target header must end with /1-range for correct focus mode\n length = len(target_sequences[0]) + len(target_sequences[1])\n\n target_header = \"{}_{}/1-{}\".format(\n parse_header(target_sequence_1)[0],\n parse_header(target_sequence_2)[0],\n length\n )\n\n # store target sequence for writing\n sequences_to_write.append(\n (target_header, target_sequences[0], target_sequences[1])\n )\n\n # the target sequence is the first in the output file\n target_seq_idx = 0\n\n # create other headers and sequences\n for id1, id2 in zip(id_pairing.id_1, id_pairing.id_2):\n\n # prepare the concatenated header\n concatenated_header = _prepare_header(id1, id2)\n\n # get indices of the sequences\n index_1 = ali_1.id_to_index[id1]\n index_2 = ali_2.id_to_index[id2]\n\n # save the information\n sequences_to_write.append(\n (\n concatenated_header,\n ali_1.matrix[index_1, :],\n ali_2.matrix[index_2, :]\n )\n )\n\n # concatenate strings\n sequences_full = OrderedDict([\n (header, np.concatenate([seq1, seq2])) for header, seq1, seq2 in sequences_to_write\n ])\n\n sequences_monomer_1 = OrderedDict([\n (header, seq1) for header, seq1, seq2 in sequences_to_write\n ])\n\n sequences_monomer_2 = OrderedDict([\n (header, seq2) for header, seq1, seq2 in sequences_to_write\n ])\n\n full_ali = Alignment.from_dict(sequences_full)\n monomer_ali_1 = Alignment.from_dict(sequences_monomer_1)\n monomer_ali_2 = Alignment.from_dict(sequences_monomer_2)\n\n return target_header, target_seq_idx, full_ali, monomer_ali_1, monomer_ali_2"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a codec containing ASCII characters plus the default character set from ocrolib.
|
def ocropus_codec():
import ocrolib
base = [c for c in ascii_labels]
base_set = set(base)
extra = [c for c in ocrolib.chars.default if c not in base_set]
return Codec().init(base+extra)
|
[
"def test_default(self):\n self.assertEqual(Codec.default(), Latin1Codec())",
"def get_data_encoding():",
"def getdefaultencoding():\n\tpass",
"def setdefaultencoding(name):\n\tpass",
"def register_codec():\n def inner_register(encoding):\n if encoding != 'cly':\n return None\n return (_encode, _decode, _CodecStreamReader, _CodecStreamWriter)\n return codecs.register(inner_register)",
"def SetDefaultEncoding(*args, **kwargs):\n pass",
"def writeASCII(*args, **kwargs):\n \n pass",
"def test_encoding_ascii(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'amazon-sample-1000.csv'))\n self.assertEqual(guessed_encoding.lower(), u'ascii')",
"def codecs(self):\n return Codecs(configuration=self.script.configuration)",
"def __init__(self, encodings, fallback_encoding=None, eol_fix=None):\n\n self.decoders = []\n\n for encoding in encodings:\n self.add_encoding(encoding)\n\n self.set_fallback_encoding(fallback_encoding)\n self.eol_fix = eol_fix",
"def encoding(self, outgoing=False, incoming=False):\n # It possible to negotiate UTF-8 input with ascii output using\n # command ``toggle outbinary`` on the bsd client.\n assert outgoing or incoming\n return (self.env.get('CHARSET', self._default_encoding)\n if (outgoing and not incoming and self.outbinary or\n not outgoing and incoming and self.inbinary or\n outgoing and incoming and self.outbinary and self.inbinary)\n else 'ascii')",
"def __init__(self, parent, id=wx.ID_ANY, msg=u'', title=u'',\n elist=list(), default=u'',\n style=wx.CAPTION, pos=wx.DefaultPosition,\n size=wx.DefaultSize,\n name=EncodingDialogNameStr):\n if not len(elist):\n elist = GetAllEncodings()\n\n default = encodings.normalize_encoding(default)\n if default and default.lower() in elist:\n sel = default.lower()\n else:\n sel = locale.getpreferredencoding(False)\n\n super(EncodingDialog, self).__init__(parent, id, msg, title,\n elist, sel, pos, size, style)",
"def __init__(self, encoding):\n self.trans = {}\n for char in 'ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ':\n self.trans[char] = 'A'\n for char in 'ȀǞ':\n self.trans[char] = 'Ä'\n self.trans['Ǻ'] = 'Å'\n self.trans['Ä'] = 'Ae'\n self.trans['Å'] = 'Aa'\n for char in 'àáâầấẫẩậãāăằắẵặẳȧǡạḁȃąǎảẚ':\n self.trans[char] = 'a'\n for char in 'ȁǟ':\n self.trans[char] = 'ä'\n self.trans['ǻ'] = 'å'\n self.trans['ä'] = 'ae'\n self.trans['å'] = 'aa'\n for char in 'ḂḄḆƁƂ':\n self.trans[char] = 'B'\n for char in 'ḃḅḇƀɓƃ':\n self.trans[char] = 'b'\n for char in 'ĆĈĊÇČƇ':\n self.trans[char] = 'C'\n for char in 'ćĉċçčƈȼ':\n self.trans[char] = 'c'\n self.trans['Ḉ'] = 'Ç'\n self.trans['ḉ'] = 'ç'\n self.trans['Ð'] = 'Dh'\n self.trans['ð'] = 'dh'\n for char in 'ĎḊḌḎḐḒĐƉƊƋ':\n self.trans[char] = 'D'\n for char in 'ďḋḍḏḑḓđɖɗƌ':\n self.trans[char] = 'd'\n for char in 'ÈȄÉÊḚËĒḔḖĔĖẸE̩ȆȨḜĘĚẼḘẺ':\n self.trans[char] = 'E'\n for char in 'ỀẾỄỆỂ':\n self.trans[char] = 'Ê'\n for char in 'èȅéêḛëēḕḗĕėẹe̩ȇȩḝęěẽḙẻ':\n self.trans[char] = 'e'\n for char in 'ềếễệể':\n self.trans[char] = 'ê'\n for char in 'ḞƑ':\n self.trans[char] = 'F'\n for char in 'ḟƒ':\n self.trans[char] = 'f'\n for char in 'ǴḠĞĠĢǦǤƓ':\n self.trans[char] = 'G'\n for char in 'ǵḡğġģǧǥɠ':\n self.trans[char] = 'g'\n self.trans['Ĝ'] = 'Gx'\n self.trans['ĝ'] = 'gx'\n for char in 'ḢḤḦȞḨḪH̱ĦǶ':\n self.trans[char] = 'H'\n for char in 'ḣḥḧȟḩḫ̱ẖħƕ':\n self.trans[char] = 'h'\n for char in 'IÌȈÍÎĨḬÏḮĪĬȊĮǏİỊỈƗ':\n self.trans[char] = 'I'\n for char in 'ıìȉíîĩḭïḯīĭȋįǐiịỉɨ':\n self.trans[char] = 'i'\n for char in 'ĴJ':\n self.trans[char] = 'J'\n for char in 'ɟĵ̌ǰ':\n self.trans[char] = 'j'\n for char in 'ḰǨĶḲḴƘ':\n self.trans[char] = 'K'\n for char in 'ḱǩķḳḵƙ':\n self.trans[char] = 'k'\n for char in 'ĹĻĽḶḸḺḼȽŁ':\n self.trans[char] = 'L'\n for char in 'ĺļľḷḹḻḽƚłɫ':\n self.trans[char] = 'l'\n for char in 'ḾṀṂ':\n self.trans[char] = 'M'\n for char in 'ḿṁṃɱ':\n self.trans[char] = 'm'\n for char in 'ǸŃÑŅŇṄṆṈṊŊƝɲȠ':\n self.trans[char] = 'N'\n for char in 'ǹńñņňṅṇṉṋŋɲƞ':\n self.trans[char] = 'n'\n for char in 'ÒÓÔÕṌṎȬÖŌṐṒŎǑȮȰỌǪǬƠỜỚỠỢỞỎƟØǾ':\n self.trans[char] = 'O'\n for char in 'òóôõṍṏȭöōṑṓŏǒȯȱọǫǭơờớỡợởỏɵøǿ':\n self.trans[char] = 'o'\n for char in 'ȌŐȪ':\n self.trans[char] = 'Ö'\n for char in 'ȍőȫ':\n self.trans[char] = 'ö'\n for char in 'ỒỐỖỘỔȎ':\n self.trans[char] = 'Ô'\n for char in 'ồốỗộổȏ':\n self.trans[char] = 'ô'\n for char in 'ṔṖƤ':\n self.trans[char] = 'P'\n for char in 'ṕṗƥ':\n self.trans[char] = 'p'\n self.trans['ᵽ'] = 'q'\n for char in 'ȐŔŖŘȒṘṚṜṞ':\n self.trans[char] = 'R'\n for char in 'ȑŕŗřȓṙṛṝṟɽ':\n self.trans[char] = 'r'\n for char in 'ŚṤŞȘŠṦṠṢṨ':\n self.trans[char] = 'S'\n for char in 'śṥşșšṧṡṣṩȿ':\n self.trans[char] = 's'\n self.trans['Ŝ'] = 'Sx'\n self.trans['ŝ'] = 'sx'\n for char in 'ŢȚŤṪṬṮṰŦƬƮ':\n self.trans[char] = 'T'\n for char in 'ţțťṫṭṯṱŧȾƭʈ':\n self.trans[char] = 't'\n for char in 'ÙÚŨṸṴÜṲŪṺŬỤŮŲǓṶỦƯỮỰỬ':\n self.trans[char] = 'U'\n for char in 'ùúũṹṵüṳūṻŭụůųǔṷủưữựửʉ':\n self.trans[char] = 'u'\n for char in 'ȔŰǛǗǕǙ':\n self.trans[char] = 'Ü'\n for char in 'ȕűǜǘǖǚ':\n self.trans[char] = 'ü'\n self.trans['Û'] = 'Ux'\n self.trans['û'] = 'ux'\n self.trans['Ȗ'] = 'Û'\n self.trans['ȗ'] = 'û'\n self.trans['Ừ'] = 'Ù'\n self.trans['ừ'] = 'ù'\n self.trans['Ứ'] = 'Ú'\n self.trans['ứ'] = 'ú'\n for char in 'ṼṾ':\n self.trans[char] = 'V'\n for char in 'ṽṿ':\n self.trans[char] = 'v'\n for char in 'ẀẂŴẄẆẈ':\n self.trans[char] = 'W'\n for char in 'ẁẃŵẅẇẉ':\n self.trans[char] = 'w'\n for char in 'ẊẌ':\n self.trans[char] = 'X'\n for char in 'ẋẍ':\n self.trans[char] = 'x'\n for char in 'ỲÝŶŸỸȲẎỴỶƳ':\n self.trans[char] = 'Y'\n for char in 'ỳýŷÿỹȳẏỵỷƴ':\n self.trans[char] = 'y'\n for char in 'ŹẐŻẒŽẔƵȤ':\n self.trans[char] = 'Z'\n for char in 'źẑżẓžẕƶȥ':\n self.trans[char] = 'z'\n self.trans['ɀ'] = 'zv'\n\n # Latin: extended Latin alphabet\n self.trans['ɑ'] = 'a'\n for char in 'ÆǼǢ':\n self.trans[char] = 'AE'\n for char in 'æǽǣ':\n self.trans[char] = 'ae'\n self.trans['Ð'] = 'Dh'\n self.trans['ð'] = 'dh'\n for char in 'ƎƏƐ':\n self.trans[char] = 'E'\n for char in 'ǝəɛ':\n self.trans[char] = 'e'\n for char in 'ƔƢ':\n self.trans[char] = 'G'\n for char in 'ᵷɣƣᵹ':\n self.trans[char] = 'g'\n self.trans['Ƅ'] = 'H'\n self.trans['ƅ'] = 'h'\n self.trans['Ƕ'] = 'Wh'\n self.trans['ƕ'] = 'wh'\n self.trans['Ɩ'] = 'I'\n self.trans['ɩ'] = 'i'\n self.trans['Ŋ'] = 'Ng'\n self.trans['ŋ'] = 'ng'\n self.trans['Œ'] = 'OE'\n self.trans['œ'] = 'oe'\n self.trans['Ɔ'] = 'O'\n self.trans['ɔ'] = 'o'\n self.trans['Ȣ'] = 'Ou'\n self.trans['ȣ'] = 'ou'\n self.trans['Ƽ'] = 'Q'\n for char in 'ĸƽ':\n self.trans[char] = 'q'\n self.trans['ȹ'] = 'qp'\n self.trans[''] = 'r'\n self.trans['ſ'] = 's'\n self.trans['ß'] = 'ss'\n self.trans['Ʃ'] = 'Sh'\n for char in 'ʃᶋ':\n self.trans[char] = 'sh'\n self.trans['Ʉ'] = 'U'\n self.trans['ʉ'] = 'u'\n self.trans['Ʌ'] = 'V'\n self.trans['ʌ'] = 'v'\n for char in 'ƜǷ':\n self.trans[char] = 'W'\n for char in 'ɯƿ':\n self.trans[char] = 'w'\n self.trans['Ȝ'] = 'Y'\n self.trans['ȝ'] = 'y'\n self.trans['IJ'] = 'IJ'\n self.trans['ij'] = 'ij'\n self.trans['Ƨ'] = 'Z'\n for char in 'ʮƨ':\n self.trans[char] = 'z'\n self.trans['Ʒ'] = 'Zh'\n self.trans['ʒ'] = 'zh'\n self.trans['Ǯ'] = 'Dzh'\n self.trans['ǯ'] = 'dzh'\n for char in 'ƸƹʔˀɁɂ':\n self.trans[char] = u\"'\"\n self.trans['Þ'] = 'Th'\n self.trans['þ'] = 'th'\n for char in 'Cʗǃ':\n self.trans[char] = '!'\n\n # Punctuation and typography\n for char in '«»“”„¨':\n self.trans[char] = u'\"'\n for char in '‘’′':\n self.trans[char] = u\"'\"\n self.trans['•'] = '*'\n self.trans['@'] = '(at)'\n self.trans['¤'] = '$'\n self.trans['¢'] = 'c'\n self.trans['€'] = 'E'\n self.trans['£'] = 'L'\n self.trans['¥'] = 'yen'\n self.trans['†'] = '+'\n self.trans['‡'] = '++'\n self.trans['°'] = ':'\n self.trans['¡'] = '!'\n self.trans['¿'] = '?'\n self.trans['‰'] = 'o/oo'\n self.trans['‱'] = 'o/ooo'\n for char in '¶§':\n self.trans[char] = '>'\n self.trans['…'] = '...'\n for char in '‒–—―':\n self.trans[char] = '-'\n self.trans['·'] = ' '\n self.trans['¦'] = '|'\n self.trans['⁂'] = '***'\n self.trans['◊'] = '<>'\n self.trans['‽'] = '?!'\n self.trans['؟'] = ';-)'\n self.trans['¹'] = '1'\n self.trans['²'] = '2'\n self.trans['³'] = '3'\n\n # Cyrillic\n self.trans.update({'А': 'A', 'а': 'a', 'Б': 'B', 'б': 'b',\n 'В': 'V', 'в': 'v', 'Г': 'G', 'г': 'g',\n 'Д': 'D', 'д': 'd', 'Е': 'E', 'е': 'e',\n 'Ж': 'Zh', 'ж': 'zh', 'З': 'Z', 'з': 'z',\n 'И': 'I', 'и': 'i', 'Й': 'J', 'й': 'j',\n 'К': 'K', 'к': 'k', 'Л': 'L', 'л': 'l',\n 'М': 'M', 'м': 'm', 'Н': 'N', 'н': 'n',\n 'О': 'O', 'о': 'o', 'П': 'P', 'п': 'p',\n 'Р': 'R', 'р': 'r', 'С': 'S', 'с': 's',\n 'Т': 'T', 'т': 't', 'У': 'U', 'у': 'u',\n 'Ф': 'F', 'ф': 'f', 'х': 'kh', 'Ц': 'C',\n 'ц': 'c', 'Ч': 'Ch', 'ч': 'ch', 'Ш': 'Sh',\n 'ш': 'sh', 'Щ': 'Shch', 'щ': 'shch', 'Ь': \"'\",\n 'ь': \"'\", 'Ъ': '\"', 'ъ': '\"', 'Ю': 'Yu',\n 'ю': 'yu', 'Я': 'Ya', 'я': 'ya', 'Х': 'Kh',\n 'Χ': 'Kh'})\n\n # Additional Cyrillic letters, most occuring in only a few languages\n self.trans.update({\n 'Ы': 'Y', 'ы': 'y', 'Ё': 'Ë', 'ё': 'ë',\n 'Э': 'È', 'Ѐ': 'È', 'э': 'è', 'ѐ': 'è',\n 'І': 'I', 'і': 'i', 'Ї': 'Ji', 'ї': 'ji',\n 'Є': 'Je', 'є': 'je', 'Ґ': 'G', 'Ҝ': 'G',\n 'ґ': 'g', 'ҝ': 'g', 'Ђ': 'Dj', 'ђ': 'dj',\n 'Љ': 'Lj', 'љ': 'lj',\n 'Њ': 'Nj', 'њ': 'nj', 'Ћ': 'Cj', 'ћ': 'cj',\n 'Җ': 'Zhj', 'Ѓ': 'Gj', 'ѓ': 'gj',\n 'Ќ': 'Kj', 'ќ': 'kj', 'Ӣ': 'Ii', 'ӣ': 'ii',\n 'Ҳ': 'H', 'ҳ': 'h',\n 'Ҷ': 'Dz', 'ҷ': 'dz', 'Ө': 'Ô', 'Ӫ': 'Ô',\n 'ө': 'ô', 'ӫ': 'ô', 'Ү': 'Y', 'ү': 'y', 'Һ': 'H',\n 'һ': 'h', 'Ә': 'AE', 'Ӕ': 'AE', 'ә': 'ae',\n 'Ӛ': 'Ë', 'Ӭ': 'Ë', 'ӛ': 'ë', 'ӭ': 'ë',\n 'җ': 'zhj', 'Ұ': 'U', 'ў': 'ù', 'Ў': 'Ù',\n 'ѝ': 'ì', 'Ѝ': 'Ì', 'Ӑ': 'A', 'ă': 'a', 'Ӓ': 'Ä',\n 'Ҽ': 'Ts', 'Ҿ': 'Ts', 'ҽ': 'ts', 'ҿ': 'ts',\n 'Ҙ': 'Dh', 'ҙ': 'dh', 'Ӏ': '', 'ӏ': '', 'Ӆ': 'L',\n 'ӆ': 'l', 'Ӎ': 'M', 'ӎ': 'm', 'Ӧ': 'Ö', 'ӧ': 'ö',\n 'Ҩ': 'u', 'ҩ': 'u', 'Ҧ': 'Ph', 'ҧ': 'ph', 'Ҏ': 'R',\n 'ҏ': 'r', 'Ҫ': 'Th', 'ҫ': 'th', 'Ҭ': 'T', 'ҭ': 't',\n 'Ӯ': 'Û', 'ӯ': 'û', 'Ӹ': 'U', 'ұ': 'u',\n 'ӹ': 'u', 'Ҵ': 'Tts', 'ҵ': 'tts', 'Ӵ': 'Ch', 'ӵ': 'ch'})\n\n for char in 'ЈӤҊ':\n self.trans[char] = 'J'\n for char in 'јӥҋ':\n self.trans[char] = 'j'\n for char in 'ЏӁӜҶ':\n self.trans[char] = 'Dzh'\n for char in 'џӂӝҷ':\n self.trans[char] = 'dzh'\n for char in 'ЅӞӠӋҸ':\n self.trans[char] = 'Dz'\n for char in 'ѕӟӡӌҹ':\n self.trans[char] = 'dz'\n for char in 'ҒӶҔ':\n self.trans[char] = 'G'\n for char in 'ғӷҕ':\n self.trans[char] = 'g'\n for char in 'ҚҞҠӃ':\n self.trans[char] = 'Q'\n for char in 'қҟҡӄ':\n self.trans[char] = 'q'\n for char in 'ҢҤӉӇ':\n self.trans[char] = 'Ng'\n for char in 'ңҥӊӈ':\n self.trans[char] = 'ng'\n for char in 'ӖѢҌ':\n self.trans[char] = 'E'\n for char in 'ӗѣҍ':\n self.trans[char] = 'e'\n for char in 'ӲӰҮ':\n self.trans[char] = 'Ü'\n for char in 'ӳӱү':\n self.trans[char] = 'ü'\n\n # Archaic Cyrillic letters\n self.trans.update({\n 'Ѹ': 'Ou', 'ѹ': 'ou', 'Ѡ': 'O', 'Ѻ': 'O', 'ѡ': 'o',\n 'ѻ': 'o', 'Ѿ': 'Ot', 'ѿ': 'ot', 'Ѣ': 'E', 'ѣ': 'e',\n 'Ѥ': 'Ei', 'Ѧ': 'Ei', 'ѥ': 'ei', 'ѧ': 'ei', 'Ѫ': 'Ai',\n 'ѫ': 'ai', 'Ѯ': 'X', 'ѯ': 'x', 'Ѱ': 'Ps', 'ѱ': 'ps',\n 'Ѳ': 'Th', 'ѳ': 'th', 'Ѵ': 'Ü', 'Ѷ': 'Ü', 'ѵ': 'ü'})\n\n # Hebrew alphabet\n for char in 'אע':\n self.trans[char] = u\"'\"\n self.trans['ב'] = 'b'\n self.trans['ג'] = 'g'\n self.trans['ד'] = 'd'\n self.trans['ה'] = 'h'\n self.trans['ו'] = 'v'\n self.trans['ז'] = 'z'\n self.trans['ח'] = 'kh'\n self.trans['ט'] = 't'\n self.trans['י'] = 'y'\n for char in 'ךכ':\n self.trans[char] = 'k'\n self.trans['ל'] = 'l'\n for char in 'םמ':\n self.trans[char] = 'm'\n for char in 'ןנ':\n self.trans[char] = 'n'\n self.trans['ס'] = 's'\n for char in 'ףפ':\n self.trans[char] = 'ph'\n for char in 'ץצ':\n self.trans[char] = 'ts'\n self.trans['ק'] = 'q'\n self.trans['ר'] = 'r'\n self.trans['ש'] = 'sh'\n self.trans['ת'] = 'th'\n\n # Arab alphabet\n for char in 'اﺍﺎ':\n self.trans[char] = 'a'\n for char in 'بﺏﺐﺒﺑ':\n self.trans[char] = 'b'\n for char in 'تﺕﺖﺘﺗ':\n self.trans[char] = 't'\n for char in 'ثﺙﺚﺜﺛ':\n self.trans[char] = 'th'\n for char in 'جﺝﺞﺠﺟ':\n self.trans[char] = 'g'\n for char in 'حﺡﺢﺤﺣ':\n self.trans[char] = 'h'\n for char in 'خﺥﺦﺨﺧ':\n self.trans[char] = 'kh'\n for char in 'دﺩﺪ':\n self.trans[char] = 'd'\n for char in 'ذﺫﺬ':\n self.trans[char] = 'dh'\n for char in 'رﺭﺮ':\n self.trans[char] = 'r'\n for char in 'زﺯﺰ':\n self.trans[char] = 'z'\n for char in 'سﺱﺲﺴﺳ':\n self.trans[char] = 's'\n for char in 'شﺵﺶﺸﺷ':\n self.trans[char] = 'sh'\n for char in 'صﺹﺺﺼﺻ':\n self.trans[char] = 's'\n for char in 'ضﺽﺾﻀﺿ':\n self.trans[char] = 'd'\n for char in 'طﻁﻂﻄﻃ':\n self.trans[char] = 't'\n for char in 'ظﻅﻆﻈﻇ':\n self.trans[char] = 'z'\n for char in 'عﻉﻊﻌﻋ':\n self.trans[char] = u\"'\"\n for char in 'غﻍﻎﻐﻏ':\n self.trans[char] = 'gh'\n for char in 'فﻑﻒﻔﻓ':\n self.trans[char] = 'f'\n for char in 'قﻕﻖﻘﻗ':\n self.trans[char] = 'q'\n for char in 'كﻙﻚﻜﻛک':\n self.trans[char] = 'k'\n for char in 'لﻝﻞﻠﻟ':\n self.trans[char] = 'l'\n for char in 'مﻡﻢﻤﻣ':\n self.trans[char] = 'm'\n for char in 'نﻥﻦﻨﻧ':\n self.trans[char] = 'n'\n for char in 'هﻩﻪﻬﻫ':\n self.trans[char] = 'h'\n for char in 'وﻭﻮ':\n self.trans[char] = 'w'\n for char in 'یيﻱﻲﻴﻳ':\n self.trans[char] = 'y'\n # Arabic - additional letters, modified letters and ligatures\n self.trans['ﺀ'] = \"'\"\n for char in 'آﺁﺂ':\n self.trans[char] = u\"'a\"\n for char in 'ةﺓﺔ':\n self.trans[char] = 'th'\n for char in 'ىﻯﻰ':\n self.trans[char] = 'á'\n for char in 'یﯼﯽﯿﯾ':\n self.trans[char] = 'y'\n self.trans['؟'] = '?'\n # Arabic - ligatures\n for char in 'ﻻﻼ':\n self.trans[char] = 'la'\n self.trans['ﷲ'] = 'llah'\n for char in 'إأ':\n self.trans[char] = u\"a'\"\n self.trans['ؤ'] = \"w'\"\n self.trans['ئ'] = \"y'\"\n for char in '◌◌':\n self.trans[char] = \"\" # indicates absence of vowels\n # Arabic vowels\n self.trans['◌'] = 'a'\n self.trans['◌'] = 'u'\n self.trans['◌'] = 'i'\n self.trans['◌'] = 'a'\n self.trans['◌'] = 'ay'\n self.trans['◌'] = 'ay'\n self.trans['◌'] = 'u'\n self.trans['◌'] = 'iy'\n # Arab numerals\n for char in '٠۰':\n self.trans[char] = '0'\n for char in '١۱':\n self.trans[char] = '1'\n for char in '٢۲':\n self.trans[char] = '2'\n for char in '٣۳':\n self.trans[char] = '3'\n for char in '٤۴':\n self.trans[char] = '4'\n for char in '٥۵':\n self.trans[char] = '5'\n for char in '٦۶':\n self.trans[char] = '6'\n for char in '٧۷':\n self.trans[char] = '7'\n for char in '٨۸':\n self.trans[char] = '8'\n for char in '٩۹':\n self.trans[char] = '9'\n # Perso-Arabic\n for char in 'پﭙﭙپ':\n self.trans[char] = 'p'\n for char in 'چچچچ':\n self.trans[char] = 'ch'\n for char in 'ژژ':\n self.trans[char] = 'zh'\n for char in 'گﮔﮕﮓ':\n self.trans[char] = 'g'\n\n # Greek\n self.trans.update({\n 'Α': 'A', 'α': 'a', 'Β': 'B', 'β': 'b', 'Γ': 'G',\n 'γ': 'g', 'Δ': 'D', 'δ': 'd', 'Ε': 'E', 'ε': 'e',\n 'Ζ': 'Z', 'ζ': 'z', 'Η': 'I', 'η': 'i', 'θ': 'th',\n 'Θ': 'Th', 'Ι': 'I', 'ι': 'i', 'Κ': 'K', 'κ': 'k',\n 'Λ': 'L', 'λ': 'l', 'Μ': 'M', 'μ': 'm', 'Ν': 'N',\n 'ν': 'n', 'Ξ': 'X', 'ξ': 'x', 'Ο': 'O', 'ο': 'o',\n 'Π': 'P', 'π': 'p', 'Ρ': 'R', 'ρ': 'r', 'Σ': 'S',\n 'σ': 's', 'ς': 's', 'Τ': 'T', 'τ': 't', 'Υ': 'Y',\n 'υ': 'y', 'Φ': 'F', 'φ': 'f', 'Ψ': 'Ps', 'ψ': 'ps',\n 'Ω': 'O', 'ω': 'o', 'ϗ': '&', 'Ϛ': 'St', 'ϛ': 'st',\n 'Ϙ': 'Q', 'Ϟ': 'Q', 'ϙ': 'q', 'ϟ': 'q', 'Ϻ': 'S',\n 'ϻ': 's', 'Ϡ': 'Ss', 'ϡ': 'ss', 'Ϸ': 'Sh', 'ϸ': 'sh',\n '·': ':', 'Ά': 'Á', 'ά': 'á', 'Έ': 'É', 'Ή': 'É',\n 'έ': 'é', 'ή': 'é', 'Ί': 'Í', 'ί': 'í', 'Ϊ': 'Ï',\n 'ϊ': 'ï', 'ΐ': 'ï', 'Ό': 'Ó', 'ό': 'ó', 'Ύ': 'Ý',\n 'ύ': 'ý', 'Ϋ': 'Y', 'ϋ': 'ÿ', 'ΰ': 'ÿ', 'Ώ': 'Ó',\n 'ώ': 'ó'})\n\n # Japanese (katakana and hiragana)\n for char in 'アァあ':\n self.trans[char] = 'a'\n for char in 'イィい':\n self.trans[char] = 'i'\n for char in 'ウう':\n self.trans[char] = 'u'\n for char in 'エェえ':\n self.trans[char] = 'e'\n for char in 'オォお':\n self.trans[char] = 'o'\n for char in 'ャや':\n self.trans[char] = 'ya'\n for char in 'ュゆ':\n self.trans[char] = 'yu'\n for char in 'ョよ':\n self.trans[char] = 'yo'\n for char in 'カか':\n self.trans[char] = 'ka'\n for char in 'キき':\n self.trans[char] = 'ki'\n for char in 'クく':\n self.trans[char] = 'ku'\n for char in 'ケけ':\n self.trans[char] = 'ke'\n for char in 'コこ':\n self.trans[char] = 'ko'\n for char in 'サさ':\n self.trans[char] = 'sa'\n for char in 'シし':\n self.trans[char] = 'shi'\n for char in 'スす':\n self.trans[char] = 'su'\n for char in 'セせ':\n self.trans[char] = 'se'\n for char in 'ソそ':\n self.trans[char] = 'so'\n for char in 'タた':\n self.trans[char] = 'ta'\n for char in 'チち':\n self.trans[char] = 'chi'\n for char in 'ツつ':\n self.trans[char] = 'tsu'\n for char in 'テて':\n self.trans[char] = 'te'\n for char in 'トと':\n self.trans[char] = 'to'\n for char in 'ナな':\n self.trans[char] = 'na'\n for char in 'ニに':\n self.trans[char] = 'ni'\n for char in 'ヌぬ':\n self.trans[char] = 'nu'\n for char in 'ネね':\n self.trans[char] = 'ne'\n for char in 'ノの':\n self.trans[char] = 'no'\n for char in 'ハは':\n self.trans[char] = 'ha'\n for char in 'ヒひ':\n self.trans[char] = 'hi'\n for char in 'フふ':\n self.trans[char] = 'fu'\n for char in 'ヘへ':\n self.trans[char] = 'he'\n for char in 'ホほ':\n self.trans[char] = 'ho'\n for char in 'マま':\n self.trans[char] = 'ma'\n for char in 'ミみ':\n self.trans[char] = 'mi'\n for char in 'ムむ':\n self.trans[char] = 'mu'\n for char in 'メめ':\n self.trans[char] = 'me'\n for char in 'モも':\n self.trans[char] = 'mo'\n for char in 'ラら':\n self.trans[char] = 'ra'\n for char in 'リり':\n self.trans[char] = 'ri'\n for char in 'ルる':\n self.trans[char] = 'ru'\n for char in 'レれ':\n self.trans[char] = 're'\n for char in 'ロろ':\n self.trans[char] = 'ro'\n for char in 'ワわ':\n self.trans[char] = 'wa'\n for char in 'ヰゐ':\n self.trans[char] = 'wi'\n for char in 'ヱゑ':\n self.trans[char] = 'we'\n for char in 'ヲを':\n self.trans[char] = 'wo'\n for char in 'ンん':\n self.trans[char] = 'n'\n for char in 'ガが':\n self.trans[char] = 'ga'\n for char in 'ギぎ':\n self.trans[char] = 'gi'\n for char in 'グぐ':\n self.trans[char] = 'gu'\n for char in 'ゲげ':\n self.trans[char] = 'ge'\n for char in 'ゴご':\n self.trans[char] = 'go'\n for char in 'ザざ':\n self.trans[char] = 'za'\n for char in 'ジじ':\n self.trans[char] = 'ji'\n for char in 'ズず':\n self.trans[char] = 'zu'\n for char in 'ゼぜ':\n self.trans[char] = 'ze'\n for char in 'ゾぞ':\n self.trans[char] = 'zo'\n for char in 'ダだ':\n self.trans[char] = 'da'\n for char in 'ヂぢ':\n self.trans[char] = 'dji'\n for char in 'ヅづ':\n self.trans[char] = 'dzu'\n for char in 'デで':\n self.trans[char] = 'de'\n for char in 'ドど':\n self.trans[char] = 'do'\n for char in 'バば':\n self.trans[char] = 'ba'\n for char in 'ビび':\n self.trans[char] = 'bi'\n for char in 'ブぶ':\n self.trans[char] = 'bu'\n for char in 'ベべ':\n self.trans[char] = 'be'\n for char in 'ボぼ':\n self.trans[char] = 'bo'\n for char in 'パぱ':\n self.trans[char] = 'pa'\n for char in 'ピぴ':\n self.trans[char] = 'pi'\n for char in 'プぷ':\n self.trans[char] = 'pu'\n for char in 'ペぺ':\n self.trans[char] = 'pe'\n for char in 'ポぽ':\n self.trans[char] = 'po'\n for char in 'ヴゔ':\n self.trans[char] = 'vu'\n self.trans['ヷ'] = 'va'\n self.trans['ヸ'] = 'vi'\n self.trans['ヹ'] = 've'\n self.trans['ヺ'] = 'vo'\n\n # Japanese and Chinese punctuation and typography\n for char in '・·':\n self.trans[char] = ' '\n for char in '〃『』《》':\n self.trans[char] = u'\"'\n for char in '「」〈〉〘〙〚〛':\n self.trans[char] = u\"'\"\n for char in '(〔':\n self.trans[char] = '('\n for char in ')〕':\n self.trans[char] = ')'\n for char in '[【〖':\n self.trans[char] = '['\n for char in ']】〗':\n self.trans[char] = ']'\n self.trans['{'] = '{'\n self.trans['}'] = '}'\n self.trans['っ'] = ':'\n self.trans['ー'] = 'h'\n self.trans['゛'] = \"'\"\n self.trans['゜'] = 'p'\n self.trans['。'] = '. '\n self.trans['、'] = ', '\n self.trans['・'] = ' '\n self.trans['〆'] = 'shime'\n self.trans['〜'] = '-'\n self.trans['…'] = '...'\n self.trans['‥'] = '..'\n self.trans['ヶ'] = 'months'\n for char in '•◦':\n self.trans[char] = '_'\n for char in '※*':\n self.trans[char] = '*'\n self.trans['Ⓧ'] = '(X)'\n self.trans['Ⓨ'] = '(Y)'\n self.trans['!'] = '!'\n self.trans['?'] = '?'\n self.trans[';'] = ';'\n self.trans[':'] = ':'\n self.trans['。'] = '.'\n for char in ',、':\n self.trans[char] = ','\n\n # Georgian\n self.trans['ა'] = 'a'\n self.trans['ბ'] = 'b'\n self.trans['გ'] = 'g'\n self.trans['დ'] = 'd'\n for char in 'ეჱ':\n self.trans[char] = 'e'\n self.trans['ვ'] = 'v'\n self.trans['ზ'] = 'z'\n self.trans['თ'] = 'th'\n self.trans['ი'] = 'i'\n self.trans['კ'] = 'k'\n self.trans['ლ'] = 'l'\n self.trans['მ'] = 'm'\n self.trans['ნ'] = 'n'\n self.trans['ო'] = 'o'\n self.trans['პ'] = 'p'\n self.trans['ჟ'] = 'zh'\n self.trans['რ'] = 'r'\n self.trans['ს'] = 's'\n self.trans['ტ'] = 't'\n self.trans['უ'] = 'u'\n self.trans['ფ'] = 'ph'\n self.trans['ქ'] = 'q'\n self.trans['ღ'] = 'gh'\n for char in 'ყ':\n self.trans[char] = u\"q'\"\n self.trans['შ'] = 'sh'\n self.trans['ჩ'] = 'ch'\n self.trans['ც'] = 'ts'\n self.trans['ძ'] = 'dz'\n for char in 'წ':\n self.trans[char] = u\"ts'\"\n for char in 'ჭ':\n self.trans[char] = u\"ch'\"\n self.trans['ხ'] = 'kh'\n self.trans['ჯ'] = 'j'\n self.trans['ჰ'] = 'h'\n self.trans['ჳ'] = 'w'\n self.trans['ჵ'] = 'o'\n self.trans['ჶ'] = 'f'\n\n # Devanagari\n for char in 'पप':\n self.trans[char] = 'p'\n self.trans['अ'] = 'a'\n for char in 'आा':\n self.trans[char] = 'aa'\n self.trans['प'] = 'pa'\n for char in 'इि':\n self.trans[char] = 'i'\n for char in 'ईी':\n self.trans[char] = 'ii'\n for char in 'उु':\n self.trans[char] = 'u'\n for char in 'ऊू':\n self.trans[char] = 'uu'\n for char in 'एे':\n self.trans[char] = 'e'\n for char in 'ऐै':\n self.trans[char] = 'ai'\n for char in 'ओो':\n self.trans[char] = 'o'\n for char in 'औौ':\n self.trans[char] = 'au'\n for char in 'ऋृर':\n self.trans[char] = 'r'\n for char in 'ॠॄ':\n self.trans[char] = 'rr'\n for char in 'ऌॢल':\n self.trans[char] = 'l'\n for char in 'ॡॣ':\n self.trans[char] = 'll'\n self.trans['क'] = 'k'\n self.trans['ख'] = 'kh'\n self.trans['ग'] = 'g'\n self.trans['घ'] = 'gh'\n self.trans['ङ'] = 'ng'\n self.trans['च'] = 'c'\n self.trans['छ'] = 'ch'\n self.trans['ज'] = 'j'\n self.trans['झ'] = 'jh'\n self.trans['ञ'] = 'ñ'\n for char in 'टत':\n self.trans[char] = 't'\n for char in 'ठथ':\n self.trans[char] = 'th'\n for char in 'डद':\n self.trans[char] = 'd'\n for char in 'ढध':\n self.trans[char] = 'dh'\n for char in 'णन':\n self.trans[char] = 'n'\n self.trans['फ'] = 'ph'\n self.trans['ब'] = 'b'\n self.trans['भ'] = 'bh'\n self.trans['म'] = 'm'\n self.trans['य'] = 'y'\n self.trans['व'] = 'v'\n self.trans['श'] = 'sh'\n for char in 'षस':\n self.trans[char] = 's'\n self.trans['ह'] = 'h'\n self.trans['क'] = 'x'\n self.trans['त'] = 'tr'\n self.trans['ज'] = 'gj'\n for char in 'क़':\n self.trans[char] = 'q'\n self.trans['फ'] = 'f'\n self.trans['ख'] = 'hh'\n self.trans['H'] = 'gh'\n self.trans['ज'] = 'z'\n for char in 'डढ':\n self.trans[char] = 'r'\n # Devanagari ligatures (possibly incomplete and/or incorrect)\n for char in 'ख्':\n self.trans[char] = 'khn'\n self.trans['त'] = 'tn'\n for char in 'द्':\n self.trans[char] = 'dn'\n self.trans['श'] = 'cn'\n for char in 'ह्':\n self.trans[char] = 'fn'\n for char in 'अँ':\n self.trans[char] = 'm'\n for char in '॒॑':\n self.trans[char] = u\"\"\n self.trans['०'] = '0'\n self.trans['१'] = '1'\n self.trans['२'] = '2'\n self.trans['३'] = '3'\n self.trans['४'] = '4'\n self.trans['५'] = '5'\n self.trans['६'] = '6'\n self.trans['७'] = '7'\n self.trans['८'] = '8'\n self.trans['९'] = '9'\n\n # Armenian\n self.trans['Ա'] = 'A'\n self.trans['ա'] = 'a'\n self.trans['Բ'] = 'B'\n self.trans['բ'] = 'b'\n self.trans['Գ'] = 'G'\n self.trans['գ'] = 'g'\n self.trans['Դ'] = 'D'\n self.trans['դ'] = 'd'\n self.trans['Ե'] = 'Je'\n self.trans['ե'] = 'e'\n self.trans['Զ'] = 'Z'\n self.trans['զ'] = 'z'\n self.trans['Է'] = 'É'\n self.trans['է'] = 'é'\n self.trans['Ը'] = 'Ë'\n self.trans['ը'] = 'ë'\n self.trans['Թ'] = 'Th'\n self.trans['թ'] = 'th'\n self.trans['Ժ'] = 'Zh'\n self.trans['ժ'] = 'zh'\n self.trans['Ի'] = 'I'\n self.trans['ի'] = 'i'\n self.trans['Լ'] = 'L'\n self.trans['լ'] = 'l'\n self.trans['Խ'] = 'Ch'\n self.trans['խ'] = 'ch'\n self.trans['Ծ'] = 'Ts'\n self.trans['ծ'] = 'ts'\n self.trans['Կ'] = 'K'\n self.trans['կ'] = 'k'\n self.trans['Հ'] = 'H'\n self.trans['հ'] = 'h'\n self.trans['Ձ'] = 'Dz'\n self.trans['ձ'] = 'dz'\n self.trans['Ղ'] = 'R'\n self.trans['ղ'] = 'r'\n self.trans['Ճ'] = 'Cz'\n self.trans['ճ'] = 'cz'\n self.trans['Մ'] = 'M'\n self.trans['մ'] = 'm'\n self.trans['Յ'] = 'J'\n self.trans['յ'] = 'j'\n self.trans['Ն'] = 'N'\n self.trans['ն'] = 'n'\n self.trans['Շ'] = 'S'\n self.trans['շ'] = 's'\n self.trans['Շ'] = 'Vo'\n self.trans['շ'] = 'o'\n self.trans['Չ'] = 'Tsh'\n self.trans['չ'] = 'tsh'\n self.trans['Պ'] = 'P'\n self.trans['պ'] = 'p'\n self.trans['Ջ'] = 'Dz'\n self.trans['ջ'] = 'dz'\n self.trans['Ռ'] = 'R'\n self.trans['ռ'] = 'r'\n self.trans['Ս'] = 'S'\n self.trans['ս'] = 's'\n self.trans['Վ'] = 'V'\n self.trans['վ'] = 'v'\n for char in 'Տ':\n self.trans[char] = u\"T'\"\n for char in 'տ':\n self.trans[char] = u\"t'\"\n self.trans['Ր'] = 'R'\n self.trans['ր'] = 'r'\n self.trans['Ց'] = 'Tsh'\n self.trans['ց'] = 'tsh'\n self.trans['Ւ'] = 'V'\n self.trans['ւ'] = 'v'\n self.trans['Փ'] = 'Ph'\n self.trans['փ'] = 'ph'\n self.trans['Ք'] = 'Kh'\n self.trans['ք'] = 'kh'\n self.trans['Օ'] = 'O'\n self.trans['օ'] = 'o'\n self.trans['Ֆ'] = 'F'\n self.trans['ֆ'] = 'f'\n self.trans['և'] = '&'\n self.trans['՟'] = '.'\n self.trans['՞'] = '?'\n self.trans['՝'] = ';'\n self.trans['՛'] = ''\n\n # Tamil\n for char in 'க்':\n self.trans[char] = 'k'\n for char in 'ஙண்ந்ன்':\n self.trans[char] = 'n'\n self.trans['ச'] = 'c'\n for char in 'ஞ்':\n self.trans[char] = 'ñ'\n for char in 'ட்':\n self.trans[char] = 'th'\n self.trans['த'] = 't'\n self.trans['ப'] = 'p'\n for char in 'ம்':\n self.trans[char] = 'm'\n for char in 'ய்':\n self.trans[char] = 'y'\n for char in 'ர்ழ்ற':\n self.trans[char] = 'r'\n for char in 'ல்ள':\n self.trans[char] = 'l'\n for char in 'வ்':\n self.trans[char] = 'v'\n self.trans['ஜ'] = 'j'\n self.trans['ஷ'] = 'sh'\n self.trans['ஸ'] = 's'\n self.trans['ஹ'] = 'h'\n for char in 'க்ஷ':\n self.trans[char] = 'x'\n self.trans['அ'] = 'a'\n self.trans['ஆ'] = 'aa'\n self.trans['இ'] = 'i'\n self.trans['ஈ'] = 'ii'\n self.trans['உ'] = 'u'\n self.trans['ஊ'] = 'uu'\n self.trans['எ'] = 'e'\n self.trans['ஏ'] = 'ee'\n self.trans['ஐ'] = 'ai'\n self.trans['ஒ'] = 'o'\n self.trans['ஓ'] = 'oo'\n self.trans['ஔ'] = 'au'\n self.trans['ஃ'] = ''\n\n # Bengali\n self.trans['অ'] = 'ô'\n for char in 'আা':\n self.trans[char] = 'a'\n for char in 'ইিঈী':\n self.trans[char] = 'i'\n for char in 'উুঊূ':\n self.trans[char] = 'u'\n for char in 'ঋৃ':\n self.trans[char] = 'ri'\n for char in 'এেয়':\n self.trans[char] = 'e'\n for char in 'ঐৈ':\n self.trans[char] = 'oi'\n for char in 'ওো':\n self.trans[char] = 'o'\n for char in 'ঔৌ':\n self.trans[char] = 'ou'\n self.trans['্'] = ''\n self.trans['ৎ'] = 't'\n self.trans['ং'] = 'n'\n self.trans['ঃ'] = 'h'\n self.trans['ঁ'] = 'ñ'\n self.trans['ক'] = 'k'\n self.trans['খ'] = 'kh'\n self.trans['গ'] = 'g'\n self.trans['ঘ'] = 'gh'\n self.trans['ঙ'] = 'ng'\n self.trans['চ'] = 'ch'\n self.trans['ছ'] = 'chh'\n self.trans['জ'] = 'j'\n self.trans['ঝ'] = 'jh'\n self.trans['ঞ'] = 'n'\n for char in 'টত':\n self.trans[char] = 't'\n for char in 'ঠথ':\n self.trans[char] = 'th'\n for char in 'ডদ':\n self.trans[char] = 'd'\n for char in 'ঢধ':\n self.trans[char] = 'dh'\n for char in 'ণন':\n self.trans[char] = 'n'\n self.trans['প'] = 'p'\n self.trans['ফ'] = 'ph'\n self.trans['ব'] = 'b'\n self.trans['ভ'] = 'bh'\n self.trans['ম'] = 'm'\n self.trans['য'] = 'dzh'\n self.trans['র'] = 'r'\n self.trans['ল'] = 'l'\n self.trans['শ'] = 's'\n self.trans['হ'] = 'h'\n for char in 'য়':\n self.trans[char] = '-'\n for char in 'ড়':\n self.trans[char] = 'r'\n self.trans['ঢ'] = 'rh'\n self.trans['০'] = '0'\n self.trans['১'] = '1'\n self.trans['২'] = '2'\n self.trans['৩'] = '3'\n self.trans['৪'] = '4'\n self.trans['৫'] = '5'\n self.trans['৬'] = '6'\n self.trans['৭'] = '7'\n self.trans['৮'] = '8'\n self.trans['৯'] = '9'\n\n # Thai (because of complications of the alphabet, self.transliterations\n # are very imprecise here)\n self.trans['ก'] = 'k'\n for char in 'ขฃคฅฆ':\n self.trans[char] = 'kh'\n self.trans['ง'] = 'ng'\n for char in 'จฉชฌ':\n self.trans[char] = 'ch'\n for char in 'ซศษส':\n self.trans[char] = 's'\n for char in 'ญย':\n self.trans[char] = 'y'\n for char in 'ฎด':\n self.trans[char] = 'd'\n for char in 'ฏต':\n self.trans[char] = 't'\n for char in 'ฐฑฒถทธ':\n self.trans[char] = 'th'\n for char in 'ณน':\n self.trans[char] = 'n'\n self.trans['บ'] = 'b'\n self.trans['ป'] = 'p'\n for char in 'ผพภ':\n self.trans[char] = 'ph'\n for char in 'ฝฟ':\n self.trans[char] = 'f'\n self.trans['ม'] = 'm'\n self.trans['ร'] = 'r'\n self.trans['ฤ'] = 'rue'\n self.trans['ๅ'] = ':'\n for char in 'ลฬ':\n self.trans[char] = 'l'\n self.trans['ฦ'] = 'lue'\n self.trans['ว'] = 'w'\n for char in 'หฮ':\n self.trans[char] = 'h'\n self.trans['อ'] = ''\n self.trans['ร'] = 'ü'\n self.trans['ว'] = 'ua'\n for char in 'อวโิ':\n self.trans[char] = 'o'\n for char in 'ะัา':\n self.trans[char] = 'a'\n self.trans['ว'] = 'u'\n self.trans['ำ'] = 'am'\n self.trans['ิ'] = 'i'\n self.trans['ี'] = 'i:'\n self.trans['ึ'] = 'ue'\n self.trans['ื'] = 'ue:'\n self.trans['ุ'] = 'u'\n self.trans['ู'] = 'u:'\n for char in 'เ็':\n self.trans[char] = 'e'\n self.trans['แ'] = 'ae'\n for char in 'ใไ':\n self.trans[char] = 'ai'\n for char in '่้๊๋็์':\n self.trans[char] = u\"\"\n self.trans['ฯ'] = '.'\n self.trans['ๆ'] = '(2)'\n\n # Korean (Revised Romanization system within possible, incomplete)\n self.trans['국'] = 'guk'\n self.trans['명'] = 'myeong'\n self.trans['검'] = 'geom'\n self.trans['타'] = 'ta'\n self.trans['분'] = 'bun'\n self.trans['사'] = 'sa'\n self.trans['류'] = 'ryu'\n self.trans['포'] = 'po'\n self.trans['르'] = 'reu'\n self.trans['투'] = 'tu'\n self.trans['갈'] = 'gal'\n self.trans['어'] = 'eo'\n self.trans['노'] = 'no'\n self.trans['웨'] = 'we'\n self.trans['이'] = 'i'\n self.trans['라'] = 'ra'\n self.trans['틴'] = 'tin'\n self.trans['루'] = 'ru'\n self.trans['마'] = 'ma'\n self.trans['니'] = 'ni'\n self.trans['아'] = 'a'\n self.trans['독'] = 'dok'\n self.trans['일'] = 'il'\n self.trans['모'] = 'mo'\n self.trans['크'] = 'keu'\n self.trans['샤'] = 'sya'\n self.trans['영'] = 'yeong'\n self.trans['불'] = 'bul'\n self.trans['가'] = 'ga'\n self.trans['리'] = 'ri'\n self.trans['그'] = 'geu'\n self.trans['지'] = 'ji'\n self.trans['야'] = 'ya'\n self.trans['바'] = 'ba'\n self.trans['슈'] = 'syu'\n self.trans['키'] = 'ki'\n self.trans['프'] = 'peu'\n self.trans['랑'] = 'rang'\n self.trans['스'] = 'seu'\n self.trans['로'] = 'ro'\n self.trans['메'] = 'me'\n self.trans['역'] = 'yeok'\n self.trans['도'] = 'do'\n\n # Kannada\n self.trans['ಅ'] = 'a'\n for char in 'ಆಾ':\n self.trans[char] = 'aa'\n for char in 'ಇಿ':\n self.trans[char] = 'i'\n for char in 'ಈೀ':\n self.trans[char] = 'ii'\n for char in 'ಉು':\n self.trans[char] = 'u'\n for char in 'ಊೂ':\n self.trans[char] = 'uu'\n for char in 'ಋೂ':\n self.trans[char] = u\"r'\"\n for char in 'ಎೆ':\n self.trans[char] = 'e'\n for char in 'ಏೇ':\n self.trans[char] = 'ee'\n for char in 'ಐೈ':\n self.trans[char] = 'ai'\n for char in 'ಒೊ':\n self.trans[char] = 'o'\n for char in 'ಓೋ':\n self.trans[char] = 'oo'\n for char in 'ಔೌ':\n self.trans[char] = 'au'\n self.trans['ಂ'] = \"m'\"\n self.trans['ಃ'] = \"h'\"\n self.trans['ಕ'] = 'k'\n self.trans['ಖ'] = 'kh'\n self.trans['ಗ'] = 'g'\n self.trans['ಘ'] = 'gh'\n self.trans['ಙ'] = 'ng'\n self.trans['ಚ'] = 'c'\n self.trans['ಛ'] = 'ch'\n self.trans['ಜ'] = 'j'\n self.trans['ಝ'] = 'ny'\n self.trans['ಟ'] = 'tt'\n self.trans['ಠ'] = 'tth'\n self.trans['ಡ'] = 'dd'\n self.trans['ಢ'] = 'ddh'\n self.trans['ಣ'] = 'nn'\n self.trans['ತ'] = 't'\n self.trans['ಥ'] = 'th'\n self.trans['ದ'] = 'd'\n self.trans['ಧ'] = 'dh'\n self.trans['ನ'] = 'n'\n self.trans['ಪ'] = 'p'\n self.trans['ಫ'] = 'ph'\n self.trans['ಬ'] = 'b'\n self.trans['ಭ'] = 'bh'\n self.trans['ಮ'] = 'm'\n self.trans['ಯ'] = 'y'\n self.trans['ರ'] = 'r'\n self.trans['ಲ'] = 'l'\n self.trans['ವ'] = 'v'\n self.trans['ಶ'] = 'sh'\n self.trans['ಷ'] = 'ss'\n self.trans['ಸ'] = 's'\n self.trans['ಹ'] = 'h'\n self.trans['ಳ'] = 'll'\n self.trans['೦'] = '0'\n self.trans['೧'] = '1'\n self.trans['೨'] = '2'\n self.trans['೩'] = '3'\n self.trans['೪'] = '4'\n self.trans['೫'] = '5'\n self.trans['೬'] = '6'\n self.trans['೭'] = '7'\n self.trans['೮'] = '8'\n self.trans['೯'] = '9'\n # Telugu\n self.trans['అ'] = 'a'\n for char in 'ఆా':\n self.trans[char] = 'aa'\n for char in 'ఇి':\n self.trans[char] = 'i'\n for char in 'ఈీ':\n self.trans[char] = 'ii'\n for char in 'ఉు':\n self.trans[char] = 'u'\n for char in 'ఊూ':\n self.trans[char] = 'uu'\n for char in 'ఋృ':\n self.trans[char] = \"r'\"\n for char in 'ౠౄ':\n self.trans[char] = 'r\"'\n self.trans['ఌ'] = \"l'\"\n self.trans['ౡ'] = 'l\"'\n for char in 'ఎె':\n self.trans[char] = 'e'\n for char in 'ఏే':\n self.trans[char] = 'ee'\n for char in 'ఐై':\n self.trans[char] = 'ai'\n for char in 'ఒొ':\n self.trans[char] = 'o'\n for char in 'ఓో':\n self.trans[char] = 'oo'\n for char in 'ఔౌ':\n self.trans[char] = 'au'\n self.trans['ం'] = \"'\"\n self.trans['ః'] = '\"'\n self.trans['క'] = 'k'\n self.trans['ఖ'] = 'kh'\n self.trans['గ'] = 'g'\n self.trans['ఘ'] = 'gh'\n self.trans['ఙ'] = 'ng'\n self.trans['చ'] = 'ts'\n self.trans['ఛ'] = 'tsh'\n self.trans['జ'] = 'j'\n self.trans['ఝ'] = 'jh'\n self.trans['ఞ'] = 'ñ'\n for char in 'టత':\n self.trans[char] = 't'\n for char in 'ఠథ':\n self.trans[char] = 'th'\n for char in 'డద':\n self.trans[char] = 'd'\n for char in 'ఢధ':\n self.trans[char] = 'dh'\n for char in 'ణన':\n self.trans[char] = 'n'\n self.trans['ప'] = 'p'\n self.trans['ఫ'] = 'ph'\n self.trans['బ'] = 'b'\n self.trans['భ'] = 'bh'\n self.trans['మ'] = 'm'\n self.trans['య'] = 'y'\n for char in 'రఱ':\n self.trans[char] = 'r'\n for char in 'లళ':\n self.trans[char] = 'l'\n self.trans['వ'] = 'v'\n self.trans['శ'] = 'sh'\n for char in 'షస':\n self.trans[char] = 's'\n self.trans['హ'] = 'h'\n self.trans['్'] = \"\"\n for char in 'ంఁ':\n self.trans[char] = '^'\n self.trans['ః'] = '-'\n self.trans['౦'] = '0'\n self.trans['౧'] = '1'\n self.trans['౨'] = '2'\n self.trans['౩'] = '3'\n self.trans['౪'] = '4'\n self.trans['౫'] = '5'\n self.trans['౬'] = '6'\n self.trans['౭'] = '7'\n self.trans['౮'] = '8'\n self.trans['౯'] = '9'\n self.trans['౹'] = '1/4'\n self.trans['౺'] = '1/2'\n self.trans['౻'] = '3/4'\n self.trans['౼'] = '1/16'\n self.trans['౽'] = '1/8'\n self.trans['౾'] = '3/16'\n # Lao - note: pronounciation in initial position is used;\n # different pronounciation in final position is ignored\n self.trans['ກ'] = 'k'\n for char in 'ຂຄ':\n self.trans[char] = 'kh'\n self.trans['ງ'] = 'ng'\n self.trans['ຈ'] = 'ch'\n for char in 'ສຊ':\n self.trans[char] = 's'\n self.trans['ຍ'] = 'ny'\n self.trans['ດ'] = 'd'\n self.trans['ຕ'] = 't'\n for char in 'ຖທ':\n self.trans[char] = 'th'\n self.trans['ນ'] = 'n'\n self.trans['ບ'] = 'b'\n self.trans['ປ'] = 'p'\n for char in 'ຜພ':\n self.trans[char] = 'ph'\n for char in 'ຝຟ':\n self.trans[char] = 'f'\n for char in 'ມໝ':\n self.trans[char] = 'm'\n self.trans['ຢ'] = 'y'\n for char in 'ຣຼ':\n self.trans[char] = 'r'\n for char in 'ລຼ':\n self.trans[char] = 'l'\n self.trans['ວ'] = 'v'\n self.trans['ຮ'] = 'h'\n self.trans['ອ'] = \"'\"\n for char in 'ະັ':\n self.trans[char] = 'a'\n self.trans['ິ'] = 'i'\n self.trans['ຶ'] = 'ue'\n self.trans['ຸ'] = 'u'\n self.trans['ເ'] = 'é'\n self.trans['ແ'] = 'è'\n for char in 'ໂົາໍ':\n self.trans[char] = 'o'\n self.trans['ຽ'] = 'ia'\n self.trans['ເຶ'] = 'uea'\n self.trans['ຍ'] = 'i'\n for char in 'ໄໃ':\n self.trans[char] = 'ai'\n self.trans['ຳ'] = 'am'\n self.trans['າ'] = 'aa'\n self.trans['ີ'] = 'ii'\n self.trans['ື'] = 'yy'\n self.trans['ູ'] = 'uu'\n self.trans['ເ'] = 'e'\n self.trans['ແ'] = 'ei'\n self.trans['໐'] = '0'\n self.trans['໑'] = '1'\n self.trans['໒'] = '2'\n self.trans['໓'] = '3'\n self.trans['໔'] = '4'\n self.trans['໕'] = '5'\n self.trans['໖'] = '6'\n self.trans['໗'] = '7'\n self.trans['໘'] = '8'\n self.trans['໙'] = '9'\n # Chinese -- note: incomplete\n for char in '埃挨哎唉哀皑癌蔼矮艾碍爱隘':\n self.trans[char] = 'ai'\n for char in '鞍氨安俺按暗岸胺案':\n self.trans[char] = 'an'\n for char in '肮昂盎':\n self.trans[char] = 'ang'\n for char in '凹敖熬翱袄傲奥懊澳':\n self.trans[char] = 'ao'\n for char in '芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸':\n self.trans[char] = 'ba'\n for char in '白柏百摆佰败拜稗':\n self.trans[char] = 'bai'\n for char in '斑班搬扳般颁板版扮拌伴瓣半办绊':\n self.trans[char] = 'ban'\n for char in '邦帮梆榜膀绑棒磅蚌镑傍谤':\n self.trans[char] = 'bang'\n for char in '苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆':\n self.trans[char] = 'bao'\n for char in '杯碑悲卑北辈背贝钡倍狈备惫焙被':\n self.trans[char] = 'bei'\n for char in '奔苯本笨':\n self.trans[char] = 'ben'\n for char in '崩绷甭泵蹦迸':\n self.trans[char] = 'beng'\n for char in '逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛':\n self.trans[char] = 'bi'\n for char in '鞭边编贬扁便变卞辨辩辫遍':\n self.trans[char] = 'bian'\n for char in '标彪膘表':\n self.trans[char] = 'biao'\n for char in '鳖憋别瘪':\n self.trans[char] = 'bie'\n for char in '彬斌濒滨宾摈':\n self.trans[char] = 'bin'\n for char in '兵冰柄丙秉饼炳病并':\n self.trans[char] = 'bing'\n for char in '玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳':\n self.trans[char] = 'bo'\n for char in '哺补埠不布步簿部怖':\n self.trans[char] = 'bu'\n for char in '猜裁材才财睬踩采彩菜蔡':\n self.trans[char] = 'cai'\n for char in '餐参蚕残惭惨灿':\n self.trans[char] = 'can'\n for char in '苍舱仓沧藏':\n self.trans[char] = 'cang'\n for char in '操糙槽曹草':\n self.trans[char] = 'cao'\n for char in '厕策侧册测':\n self.trans[char] = 'ce'\n for char in '层蹭':\n self.trans[char] = 'ceng'\n for char in '插叉茬茶查碴搽察岔差诧':\n self.trans[char] = 'cha'\n for char in '拆柴豺':\n self.trans[char] = 'chai'\n for char in '搀掺蝉馋谗缠铲产阐颤':\n self.trans[char] = 'chan'\n for char in '昌猖场尝常长偿肠厂敞畅唱倡':\n self.trans[char] = 'chang'\n for char in '超抄钞朝嘲潮巢吵炒':\n self.trans[char] = 'chao'\n for char in '车扯撤掣彻澈':\n self.trans[char] = 'che'\n for char in '郴臣辰尘晨忱沉陈趁衬':\n self.trans[char] = 'chen'\n for char in '撑称城橙成呈乘程惩澄诚承逞骋秤':\n self.trans[char] = 'cheng'\n for char in '吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽':\n self.trans[char] = 'chi'\n for char in '充冲虫崇宠':\n self.trans[char] = 'chong'\n for char in '抽酬畴踌稠愁筹仇绸瞅丑臭':\n self.trans[char] = 'chou'\n for char in '初出橱厨躇锄雏滁除楚储矗搐触处':\n self.trans[char] = 'chu'\n self.trans['揣'] = 'chuai'\n for char in '川穿椽传船喘串':\n self.trans[char] = 'chuan'\n for char in '疮窗幢床闯创':\n self.trans[char] = 'chuang'\n for char in '吹炊捶锤垂':\n self.trans[char] = 'chui'\n for char in '春椿醇唇淳纯蠢':\n self.trans[char] = 'chun'\n for char in '戳绰':\n self.trans[char] = 'chuo'\n for char in '疵茨磁雌辞慈瓷词此刺赐次':\n self.trans[char] = 'ci'\n for char in '聪葱囱匆从丛':\n self.trans[char] = 'cong'\n self.trans['凑'] = 'cou'\n for char in '粗醋簇促':\n self.trans[char] = 'cu'\n for char in '蹿篡窜':\n self.trans[char] = 'cuan'\n for char in '摧崔催脆瘁粹淬翠':\n self.trans[char] = 'cui'\n for char in '村存寸':\n self.trans[char] = 'cun'\n for char in '磋撮搓措挫错':\n self.trans[char] = 'cuo'\n for char in '搭达答瘩打大':\n self.trans[char] = 'da'\n for char in '呆歹傣戴带殆代贷袋待逮怠':\n self.trans[char] = 'dai'\n for char in '耽担丹单郸掸胆旦氮但惮淡诞弹蛋儋':\n self.trans[char] = 'dan'\n for char in '当挡党荡档':\n self.trans[char] = 'dang'\n for char in '刀捣蹈倒岛祷导到稻悼道盗':\n self.trans[char] = 'dao'\n for char in '德得的':\n self.trans[char] = 'de'\n for char in '蹬灯登等瞪凳邓':\n self.trans[char] = 'deng'\n for char in '堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔':\n self.trans[char] = 'di'\n for char in '颠掂滇碘点典靛垫电佃甸店惦奠淀殿':\n self.trans[char] = 'dian'\n for char in '碉叼雕凋刁掉吊钓调':\n self.trans[char] = 'diao'\n for char in '跌爹碟蝶迭谍叠':\n self.trans[char] = 'die'\n for char in '丁盯叮钉顶鼎锭定订':\n self.trans[char] = 'ding'\n self.trans['丢'] = 'diu'\n for char in '东冬董懂动栋侗恫冻洞':\n self.trans[char] = 'dong'\n for char in '兜抖斗陡豆逗痘':\n self.trans[char] = 'dou'\n for char in '都督毒犊独读堵睹赌杜镀肚度渡妒':\n self.trans[char] = 'du'\n for char in '端短锻段断缎':\n self.trans[char] = 'duan'\n for char in '堆兑队对':\n self.trans[char] = 'dui'\n for char in '墩吨蹲敦顿囤钝盾遁':\n self.trans[char] = 'dun'\n for char in '掇哆多夺垛躲朵跺舵剁惰堕':\n self.trans[char] = 'duo'\n for char in '蛾峨鹅俄额讹娥恶厄扼遏鄂饿':\n self.trans[char] = 'e'\n for char in '恩嗯':\n self.trans[char] = 'en'\n for char in '而儿耳尔饵洱二贰':\n self.trans[char] = 'er'\n for char in '发罚筏伐乏阀法珐':\n self.trans[char] = 'fa'\n for char in '藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛':\n self.trans[char] = 'fan'\n for char in '坊芳方肪房防妨仿访纺放':\n self.trans[char] = 'fang'\n for char in '菲非啡飞肥匪诽吠肺废沸费':\n self.trans[char] = 'fei'\n for char in '芬酚吩氛分纷坟焚汾粉奋份忿愤粪':\n self.trans[char] = 'fen'\n for char in '丰封枫蜂峰锋风疯烽逢冯缝讽奉凤':\n self.trans[char] = 'feng'\n self.trans['佛'] = 'fo'\n self.trans['否'] = 'fou'\n for char in ('夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋'\n '复傅付阜父腹负富讣附妇缚咐'):\n self.trans[char] = 'fu'\n for char in '噶嘎':\n self.trans[char] = 'ga'\n for char in '该改概钙盖溉':\n self.trans[char] = 'gai'\n for char in '干甘杆柑竿肝赶感秆敢赣':\n self.trans[char] = 'gan'\n for char in '冈刚钢缸肛纲岗港杠':\n self.trans[char] = 'gang'\n for char in '篙皋高膏羔糕搞镐稿告':\n self.trans[char] = 'gao'\n for char in '哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各':\n self.trans[char] = 'ge'\n self.trans['给'] = 'gei'\n for char in '根跟':\n self.trans[char] = 'gen'\n for char in '耕更庚羹埂耿梗':\n self.trans[char] = 'geng'\n for char in '工攻功恭龚供躬公宫弓巩汞拱贡共':\n self.trans[char] = 'gong'\n for char in '钩勾沟苟狗垢构购够':\n self.trans[char] = 'gou'\n for char in '辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇':\n self.trans[char] = 'gu'\n for char in '刮瓜剐寡挂褂':\n self.trans[char] = 'gua'\n for char in '乖拐怪':\n self.trans[char] = 'guai'\n for char in '棺关官冠观管馆罐惯灌贯':\n self.trans[char] = 'guan'\n for char in '光广逛':\n self.trans[char] = 'guang'\n for char in '瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽':\n self.trans[char] = 'gui'\n for char in '辊滚棍':\n self.trans[char] = 'gun'\n for char in '锅郭国果裹过':\n self.trans[char] = 'guo'\n self.trans['哈'] = 'ha'\n for char in '骸孩海氦亥害骇':\n self.trans[char] = 'hai'\n for char in '酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉':\n self.trans[char] = 'han'\n for char in '夯杭航':\n self.trans[char] = 'hang'\n for char in '壕嚎豪毫郝好耗号浩':\n self.trans[char] = 'hao'\n for char in '呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺':\n self.trans[char] = 'he'\n for char in '嘿黑':\n self.trans[char] = 'hei'\n for char in '痕很狠恨':\n self.trans[char] = 'hen'\n for char in '哼亨横衡恒':\n self.trans[char] = 'heng'\n for char in '轰哄烘虹鸿洪宏弘红':\n self.trans[char] = 'hong'\n for char in '喉侯猴吼厚候后':\n self.trans[char] = 'hou'\n for char in '呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户':\n self.trans[char] = 'hu'\n for char in '花哗华猾滑画划化话':\n self.trans[char] = 'hua'\n for char in '槐徊怀淮坏':\n self.trans[char] = 'huai'\n for char in '欢环桓还缓换患唤痪豢焕涣宦幻':\n self.trans[char] = 'huan'\n for char in '荒慌黄磺蝗簧皇凰惶煌晃幌恍谎':\n self.trans[char] = 'huang'\n for char in '灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘':\n self.trans[char] = 'hui'\n for char in '荤昏婚魂浑混':\n self.trans[char] = 'hun'\n for char in '豁活伙火获或惑霍货祸':\n self.trans[char] = 'huo'\n for char in ('击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几'\n '脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪'):\n self.trans[char] = 'ji'\n for char in '嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁':\n self.trans[char] = 'jia'\n for char in ('歼监坚尖笺间煎兼肩艰奸缄茧检柬碱硷拣捡简俭剪减荐槛鉴践贱见键箭件健'\n '舰剑饯渐溅涧建'):\n self.trans[char] = 'jian'\n for char in '僵姜将浆江疆蒋桨奖讲匠酱降':\n self.trans[char] = 'jiang'\n for char in '蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖':\n self.trans[char] = 'jiao'\n for char in '揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届':\n self.trans[char] = 'jie'\n for char in '巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲':\n self.trans[char] = 'jin'\n for char in '荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净':\n self.trans[char] = 'jing'\n for char in '囧炯窘':\n self.trans[char] = 'jiong'\n for char in '揪究纠玖韭久灸九酒厩救旧臼舅咎就疚':\n self.trans[char] = 'jiu'\n for char in '鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧':\n self.trans[char] = 'ju'\n for char in '捐鹃娟倦眷卷绢':\n self.trans[char] = 'juan'\n for char in '撅攫抉掘倔爵觉决诀绝':\n self.trans[char] = 'jue'\n for char in '均菌钧军君峻俊竣浚郡骏':\n self.trans[char] = 'jun'\n for char in '喀咖卡咯':\n self.trans[char] = 'ka'\n for char in '开揩楷凯慨':\n self.trans[char] = 'kai'\n for char in '刊堪勘坎砍看':\n self.trans[char] = 'kan'\n for char in '康慷糠扛抗亢炕':\n self.trans[char] = 'kang'\n for char in '考拷烤靠':\n self.trans[char] = 'kao'\n for char in '坷苛柯棵磕颗科壳咳可渴克刻客课':\n self.trans[char] = 'ke'\n for char in '肯啃垦恳':\n self.trans[char] = 'ken'\n for char in '坑吭':\n self.trans[char] = 'keng'\n for char in '空恐孔控':\n self.trans[char] = 'kong'\n for char in '抠口扣寇':\n self.trans[char] = 'kou'\n for char in '枯哭窟苦酷库裤':\n self.trans[char] = 'ku'\n for char in '夸垮挎跨胯':\n self.trans[char] = 'kua'\n for char in '块筷侩快':\n self.trans[char] = 'kuai'\n for char in '宽款':\n self.trans[char] = 'kuan'\n for char in '匡筐狂框矿眶旷况':\n self.trans[char] = 'kuang'\n for char in '亏盔岿窥葵奎魁傀馈愧溃':\n self.trans[char] = 'kui'\n for char in '坤昆捆困':\n self.trans[char] = 'kun'\n for char in '括扩廓阔':\n self.trans[char] = 'kuo'\n for char in '垃拉喇蜡腊辣啦':\n self.trans[char] = 'la'\n for char in '莱来赖':\n self.trans[char] = 'lai'\n for char in '蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥':\n self.trans[char] = 'lan'\n for char in '琅榔狼廊郎朗浪':\n self.trans[char] = 'lang'\n for char in '捞劳牢老佬姥酪烙涝':\n self.trans[char] = 'lao'\n for char in '勒乐':\n self.trans[char] = 'le'\n for char in '雷镭蕾磊累儡垒擂肋类泪':\n self.trans[char] = 'lei'\n for char in '棱楞冷':\n self.trans[char] = 'leng'\n for char in ('厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利傈例俐痢立粒沥隶力'\n '璃哩'):\n self.trans[char] = 'li'\n self.trans['俩'] = 'lia'\n for char in '联莲连镰廉怜涟帘敛脸链恋炼练':\n self.trans[char] = 'lian'\n for char in '粮凉梁粱良两辆量晾亮谅':\n self.trans[char] = 'liang'\n for char in '撩聊僚疗燎寥辽潦了撂镣廖料':\n self.trans[char] = 'liao'\n for char in '列裂烈劣猎':\n self.trans[char] = 'lie'\n for char in '琳林磷霖临邻鳞淋凛赁吝拎':\n self.trans[char] = 'lin'\n for char in '玲菱零龄铃伶羚凌灵陵岭领另令':\n self.trans[char] = 'ling'\n for char in '溜琉榴硫馏留刘瘤流柳六':\n self.trans[char] = 'liu'\n for char in '龙聋咙笼窿隆垄拢陇':\n self.trans[char] = 'long'\n for char in '楼娄搂篓漏陋':\n self.trans[char] = 'lou'\n for char in '芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮泸':\n self.trans[char] = 'lu'\n for char in '峦挛孪滦卵乱':\n self.trans[char] = 'luan'\n for char in '掠略':\n self.trans[char] = 'lue'\n for char in '抡轮伦仑沦纶论':\n self.trans[char] = 'lun'\n for char in '萝螺罗逻锣箩骡裸落洛骆络漯':\n self.trans[char] = 'luo'\n for char in '驴吕铝侣旅履屡缕虑氯律率滤绿':\n self.trans[char] = 'lv'\n for char in '妈麻玛码蚂马骂嘛吗':\n self.trans[char] = 'ma'\n for char in '埋买麦卖迈脉':\n self.trans[char] = 'mai'\n for char in '瞒馒蛮满蔓曼慢漫谩':\n self.trans[char] = 'man'\n for char in '芒茫盲氓忙莽':\n self.trans[char] = 'mang'\n for char in '猫茅锚毛矛铆卯茂冒帽貌贸':\n self.trans[char] = 'mao'\n self.trans['么'] = 'me'\n for char in '玫枚梅酶霉煤没眉媒镁每美昧寐妹媚':\n self.trans[char] = 'mei'\n for char in '门闷们':\n self.trans[char] = 'men'\n for char in '萌蒙檬盟锰猛梦孟':\n self.trans[char] = 'meng'\n for char in '眯醚靡糜迷谜弥米秘觅泌蜜密幂':\n self.trans[char] = 'mi'\n for char in '棉眠绵冕免勉娩缅面':\n self.trans[char] = 'mian'\n for char in '苗描瞄藐秒渺庙妙':\n self.trans[char] = 'miao'\n for char in '蔑灭':\n self.trans[char] = 'mie'\n for char in '民抿皿敏悯闽':\n self.trans[char] = 'min'\n for char in '明螟鸣铭名命':\n self.trans[char] = 'ming'\n self.trans['谬'] = 'miu'\n for char in '摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌':\n self.trans[char] = 'mo'\n for char in '谋牟某':\n self.trans[char] = 'mou'\n for char in '拇牡亩姆母墓暮幕募慕木目睦牧穆':\n self.trans[char] = 'mu'\n for char in '拿哪呐钠那娜纳':\n self.trans[char] = 'na'\n for char in '氖乃奶耐奈':\n self.trans[char] = 'nai'\n for char in '南男难':\n self.trans[char] = 'nan'\n self.trans['囊'] = 'nang'\n for char in '挠脑恼闹淖':\n self.trans[char] = 'nao'\n self.trans['呢'] = 'ne'\n for char in '馁内':\n self.trans[char] = 'nei'\n self.trans['嫩'] = 'nen'\n self.trans['能'] = 'neng'\n for char in '妮霓倪泥尼拟你匿腻逆溺':\n self.trans[char] = 'ni'\n for char in '蔫拈年碾撵捻念':\n self.trans[char] = 'nian'\n for char in '娘酿':\n self.trans[char] = 'niang'\n for char in '鸟尿':\n self.trans[char] = 'niao'\n for char in '捏聂孽啮镊镍涅':\n self.trans[char] = 'nie'\n self.trans['您'] = 'nin'\n for char in '柠狞凝宁拧泞':\n self.trans[char] = 'ning'\n for char in '牛扭钮纽':\n self.trans[char] = 'niu'\n for char in '脓浓农弄':\n self.trans[char] = 'nong'\n for char in '奴努怒':\n self.trans[char] = 'nu'\n self.trans['暖'] = 'nuan'\n for char in '虐疟':\n self.trans[char] = 'nue'\n for char in '挪懦糯诺':\n self.trans[char] = 'nuo'\n self.trans['女'] = 'nv'\n self.trans['哦'] = 'o'\n for char in '欧鸥殴藕呕偶沤':\n self.trans[char] = 'ou'\n for char in '啪趴爬帕怕琶':\n self.trans[char] = 'pa'\n for char in '拍排牌徘湃派':\n self.trans[char] = 'pai'\n for char in '攀潘盘磐盼畔判叛':\n self.trans[char] = 'pan'\n for char in '乓庞旁耪胖':\n self.trans[char] = 'pang'\n for char in '抛咆刨炮袍跑泡':\n self.trans[char] = 'pao'\n for char in '呸胚培裴赔陪配佩沛':\n self.trans[char] = 'pei'\n for char in '喷盆':\n self.trans[char] = 'pen'\n for char in '砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰':\n self.trans[char] = 'peng'\n for char in '坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬':\n self.trans[char] = 'pi'\n for char in '篇偏片骗':\n self.trans[char] = 'pian'\n for char in '飘漂瓢票':\n self.trans[char] = 'piao'\n for char in '撇瞥':\n self.trans[char] = 'pie'\n for char in '拼频贫品聘':\n self.trans[char] = 'pin'\n for char in '乒坪苹萍平凭瓶评屏':\n self.trans[char] = 'ping'\n for char in '坡泼颇婆破魄迫粕剖':\n self.trans[char] = 'po'\n for char in '扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑濮':\n self.trans[char] = 'pu'\n for char in ('期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄'\n '弃汽泣讫'):\n self.trans[char] = 'qi'\n for char in '掐恰洽':\n self.trans[char] = 'qia'\n for char in '牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉':\n self.trans[char] = 'qian'\n for char in '枪呛腔羌墙蔷强抢':\n self.trans[char] = 'qiang'\n for char in '橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍':\n self.trans[char] = 'qiao'\n for char in '切茄且怯窃':\n self.trans[char] = 'qie'\n for char in '钦侵亲秦琴勤芹擒禽寝沁':\n self.trans[char] = 'qin'\n for char in '青轻氢倾卿清擎晴氰情顷请庆':\n self.trans[char] = 'qing'\n for char in '琼穷':\n self.trans[char] = 'qiong'\n for char in '秋丘邱球求囚酋泅':\n self.trans[char] = 'qiu'\n for char in '趋区蛆曲躯屈驱渠取娶龋趣去':\n self.trans[char] = 'qu'\n for char in '圈颧权醛泉全痊拳犬券劝':\n self.trans[char] = 'quan'\n for char in '缺炔瘸却鹊榷确雀':\n self.trans[char] = 'que'\n for char in '裙群':\n self.trans[char] = 'qun'\n for char in '然燃冉染':\n self.trans[char] = 'ran'\n for char in '瓤壤攘嚷让':\n self.trans[char] = 'rang'\n for char in '饶扰绕':\n self.trans[char] = 'rao'\n for char in '惹热':\n self.trans[char] = 're'\n for char in '壬仁人忍韧任认刃妊纫':\n self.trans[char] = 'ren'\n for char in '扔仍':\n self.trans[char] = 'reng'\n self.trans['日'] = 'ri'\n for char in '戎茸蓉荣融熔溶容绒冗':\n self.trans[char] = 'rong'\n for char in '揉柔肉':\n self.trans[char] = 'rou'\n for char in '茹蠕儒孺如辱乳汝入褥':\n self.trans[char] = 'ru'\n for char in '软阮':\n self.trans[char] = 'ruan'\n for char in '蕊瑞锐':\n self.trans[char] = 'rui'\n for char in '闰润':\n self.trans[char] = 'run'\n for char in '若弱':\n self.trans[char] = 'ruo'\n for char in '撒洒萨':\n self.trans[char] = 'sa'\n for char in '腮鳃塞赛':\n self.trans[char] = 'sai'\n for char in '三叁伞散':\n self.trans[char] = 'san'\n for char in '桑嗓丧':\n self.trans[char] = 'sang'\n for char in '搔骚扫嫂':\n self.trans[char] = 'sao'\n for char in '瑟色涩':\n self.trans[char] = 'se'\n self.trans['森'] = 'sen'\n self.trans['僧'] = 'seng'\n for char in '莎砂杀刹沙纱傻啥煞':\n self.trans[char] = 'sha'\n for char in '筛晒':\n self.trans[char] = 'shai'\n for char in '珊苫杉山删煽衫闪陕擅赡膳善汕扇缮':\n self.trans[char] = 'shan'\n for char in '墒伤商赏晌上尚裳':\n self.trans[char] = 'shang'\n for char in '梢捎稍烧芍勺韶少哨邵绍':\n self.trans[char] = 'shao'\n for char in '奢赊蛇舌舍赦摄射慑涉社设':\n self.trans[char] = 'she'\n for char in '砷申呻伸身深娠绅神沈审婶甚肾慎渗':\n self.trans[char] = 'shen'\n for char in '声生甥牲升绳省盛剩胜圣':\n self.trans[char] = 'sheng'\n for char in ('师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝'\n '势是嗜噬适仕侍释饰氏市恃室视试'):\n self.trans[char] = 'shi'\n for char in '收手首守寿授售受瘦兽':\n self.trans[char] = 'shou'\n for char in (\n '蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕'):\n self.trans[char] = 'shu'\n for char in '刷耍':\n self.trans[char] = 'shua'\n for char in '摔衰甩帅':\n self.trans[char] = 'shuai'\n for char in '栓拴':\n self.trans[char] = 'shuan'\n for char in '霜双爽':\n self.trans[char] = 'shuang'\n for char in '谁水睡税':\n self.trans[char] = 'shui'\n for char in '吮瞬顺舜':\n self.trans[char] = 'shun'\n for char in '说硕朔烁':\n self.trans[char] = 'shuo'\n for char in '斯撕嘶思私司丝死肆寺嗣四伺似饲巳':\n self.trans[char] = 'si'\n for char in '松耸怂颂送宋讼诵':\n self.trans[char] = 'song'\n for char in '搜艘擞':\n self.trans[char] = 'sou'\n for char in '嗽苏酥俗素速粟僳塑溯宿诉肃':\n self.trans[char] = 'su'\n for char in '酸蒜算':\n self.trans[char] = 'suan'\n for char in '虽隋随绥髓碎岁穗遂隧祟':\n self.trans[char] = 'sui'\n for char in '孙损笋':\n self.trans[char] = 'sun'\n for char in '蓑梭唆缩琐索锁所':\n self.trans[char] = 'suo'\n for char in '塌他它她塔獭挞蹋踏':\n self.trans[char] = 'ta'\n for char in '胎苔抬台泰酞太态汰':\n self.trans[char] = 'tai'\n for char in '坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭':\n self.trans[char] = 'tan'\n for char in '汤塘搪堂棠膛唐糖倘躺淌趟烫':\n self.trans[char] = 'tang'\n for char in '掏涛滔绦萄桃逃淘陶讨套':\n self.trans[char] = 'tao'\n self.trans['特'] = 'te'\n for char in '藤腾疼誊':\n self.trans[char] = 'teng'\n for char in '梯剔踢锑提题蹄啼体替嚏惕涕剃屉':\n self.trans[char] = 'ti'\n for char in '兲天添填田甜恬舔腆':\n self.trans[char] = 'tian'\n for char in '挑条迢眺跳':\n self.trans[char] = 'tiao'\n for char in '贴铁帖':\n self.trans[char] = 'tie'\n for char in '厅听烃汀廷停亭庭挺艇':\n self.trans[char] = 'ting'\n for char in '通桐酮瞳同铜彤童桶捅筒统痛':\n self.trans[char] = 'tong'\n for char in '偷投头透':\n self.trans[char] = 'tou'\n for char in '凸秃突图徒途涂屠土吐兔':\n self.trans[char] = 'tu'\n for char in '湍团':\n self.trans[char] = 'tuan'\n for char in '推颓腿蜕褪退':\n self.trans[char] = 'tui'\n for char in '吞屯臀':\n self.trans[char] = 'tun'\n for char in '拖托脱鸵陀驮驼椭妥拓唾':\n self.trans[char] = 'tuo'\n for char in '挖哇蛙洼娃瓦袜':\n self.trans[char] = 'wa'\n for char in '歪外':\n self.trans[char] = 'wai'\n for char in '豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕莞':\n self.trans[char] = 'wan'\n for char in '汪王亡枉网往旺望忘妄':\n self.trans[char] = 'wang'\n for char in '威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫':\n self.trans[char] = 'wei'\n for char in '瘟温蚊文闻纹吻稳紊问':\n self.trans[char] = 'wen'\n for char in '嗡翁瓮':\n self.trans[char] = 'weng'\n for char in '挝蜗涡窝我斡卧握沃':\n self.trans[char] = 'wo'\n for char in '巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误':\n self.trans[char] = 'wu'\n for char in ('昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系'\n '隙戏细'):\n self.trans[char] = 'xi'\n for char in '瞎虾匣霞辖暇峡侠狭下厦夏吓':\n self.trans[char] = 'xia'\n for char in '掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线':\n self.trans[char] = 'xian'\n for char in '相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象':\n self.trans[char] = 'xiang'\n for char in '萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效':\n self.trans[char] = 'xiao'\n for char in '楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑':\n self.trans[char] = 'xie'\n for char in '薪芯锌欣辛新忻心信衅':\n self.trans[char] = 'xin'\n for char in '星腥猩惺兴刑型形邢行醒幸杏性姓':\n self.trans[char] = 'xing'\n for char in '兄凶胸匈汹雄熊':\n self.trans[char] = 'xiong'\n for char in '休修羞朽嗅锈秀袖绣':\n self.trans[char] = 'xiu'\n for char in '墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续':\n self.trans[char] = 'xu'\n for char in '轩喧宣悬旋玄选癣眩绚':\n self.trans[char] = 'xuan'\n for char in '靴薛学穴雪血':\n self.trans[char] = 'xue'\n for char in '勋熏循旬询寻驯巡殉汛训讯逊迅':\n self.trans[char] = 'xun'\n for char in '压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶':\n self.trans[char] = 'ya'\n for char in '焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验':\n self.trans[char] = 'yan'\n for char in '殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾':\n self.trans[char] = 'yang'\n for char in '邀腰妖瑶摇尧遥窑谣姚咬舀药要耀':\n self.trans[char] = 'yao'\n for char in '椰噎耶爷野冶也页掖业叶曳腋夜液':\n self.trans[char] = 'ye'\n for char in ('一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿'\n '役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎'):\n self.trans[char] = 'yi'\n for char in '茵荫因殷音阴姻吟银淫寅饮尹引隐印':\n self.trans[char] = 'yin'\n for char in '英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映':\n self.trans[char] = 'ying'\n self.trans['哟'] = 'yo'\n for char in '拥佣臃痈庸雍踊蛹咏泳涌永恿勇用':\n self.trans[char] = 'yong'\n for char in '幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂':\n self.trans[char] = 'you'\n for char in ('淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻'\n '峪御愈欲狱育誉浴寓裕预豫驭'):\n self.trans[char] = 'yu'\n for char in '鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院':\n self.trans[char] = 'yuan'\n for char in '曰约越跃钥岳粤月悦阅':\n self.trans[char] = 'yue'\n for char in '耘云郧匀陨允运蕴酝晕韵孕':\n self.trans[char] = 'yun'\n for char in '匝砸杂':\n self.trans[char] = 'za'\n for char in '栽哉灾宰载再在':\n self.trans[char] = 'zai'\n for char in '咱攒暂赞':\n self.trans[char] = 'zan'\n for char in '赃脏葬':\n self.trans[char] = 'zang'\n for char in '遭糟凿藻枣早澡蚤躁噪造皂灶燥':\n self.trans[char] = 'zao'\n for char in '责择则泽':\n self.trans[char] = 'ze'\n self.trans['贼'] = 'zei'\n self.trans['怎'] = 'zen'\n for char in '增憎曾赠':\n self.trans[char] = 'zeng'\n for char in '扎喳渣札轧铡闸眨栅榨咋乍炸诈':\n self.trans[char] = 'zha'\n for char in '摘斋宅窄债寨':\n self.trans[char] = 'zhai'\n for char in '瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽':\n self.trans[char] = 'zhan'\n for char in '樟章彰漳张掌涨杖丈帐账仗胀瘴障':\n self.trans[char] = 'zhang'\n for char in '招昭找沼赵照罩兆肇召':\n self.trans[char] = 'zhao'\n for char in '遮折哲蛰辙者锗蔗这浙':\n self.trans[char] = 'zhe'\n for char in '珍斟真甄砧臻贞针侦枕疹诊震振镇阵圳':\n self.trans[char] = 'zhen'\n for char in '蒸挣睁征狰争怔整拯正政帧症郑证':\n self.trans[char] = 'zheng'\n for char in ('芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置'\n '帜峙制智秩稚质炙痔滞治窒'):\n self.trans[char] = 'zhi'\n for char in '中盅忠钟衷终种肿重仲众':\n self.trans[char] = 'zhong'\n for char in '舟周州洲诌粥轴肘帚咒皱宙昼骤':\n self.trans[char] = 'zhou'\n for char in '珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻':\n self.trans[char] = 'zhu'\n for char in '抓爪':\n self.trans[char] = 'zhua'\n self.trans['拽'] = 'zhuai'\n for char in '专砖转撰赚篆':\n self.trans[char] = 'zhuan'\n for char in '桩庄装妆撞壮状':\n self.trans[char] = 'zhuang'\n for char in '椎锥追赘坠缀':\n self.trans[char] = 'zhui'\n for char in '谆准':\n self.trans[char] = 'zhun'\n for char in '捉拙卓桌琢茁酌啄着灼浊':\n self.trans[char] = 'zhuo'\n for char in '兹咨资姿滋淄孜紫仔籽滓子自渍字':\n self.trans[char] = 'zi'\n for char in '鬃棕踪宗综总纵':\n self.trans[char] = 'zong'\n for char in '邹走奏揍':\n self.trans[char] = 'zou'\n for char in '租足卒族祖诅阻组':\n self.trans[char] = 'zu'\n for char in '钻纂':\n self.trans[char] = 'zuan'\n for char in '嘴醉最罪':\n self.trans[char] = 'zui'\n for char in '尊遵':\n self.trans[char] = 'zun'\n for char in '昨左佐柞做作坐座':\n self.trans[char] = 'zuo'\n # from:\n # https://www.wikidata.org/wiki/MediaWiki:Gadget-SimpleTransliterate.js\n self.trans['ଂ'] = 'anusvara'\n self.trans['ઇ'] = 'i'\n self.trans['എ'] = 'e'\n self.trans['ગ'] = 'ga'\n self.trans['ਜ'] = 'ja'\n self.trans['ഞ'] = 'nya'\n self.trans['ଢ'] = 'ddha'\n self.trans['ધ'] = 'dha'\n self.trans['ਬ'] = 'ba'\n self.trans['മ'] = 'ma'\n self.trans['ଲ'] = 'la'\n self.trans['ષ'] = 'ssa'\n self.trans['਼'] = 'nukta'\n self.trans['ാ'] = 'aa'\n self.trans['ୂ'] = 'uu'\n self.trans['ે'] = 'e'\n self.trans['ੌ'] = 'au'\n self.trans['ൎ'] = 'reph'\n self.trans['ੜ'] = 'rra'\n self.trans['՞'] = '?'\n self.trans['ୢ'] = 'l'\n self.trans['૧'] = '1'\n self.trans['੬'] = '6'\n self.trans['൮'] = '8'\n self.trans['୲'] = 'quarter'\n self.trans['ൾ'] = 'll'\n self.trans['ਇ'] = 'i'\n self.trans['ഉ'] = 'u'\n self.trans['ઌ'] = 'l'\n self.trans['ਗ'] = 'ga'\n self.trans['ങ'] = 'nga'\n self.trans['ଝ'] = 'jha'\n self.trans['જ'] = 'ja'\n self.trans['؟'] = '?'\n self.trans['ਧ'] = 'dha'\n self.trans['ഩ'] = 'nnna'\n self.trans['ଭ'] = 'bha'\n self.trans['બ'] = 'ba'\n self.trans['ഹ'] = 'ha'\n self.trans['ଽ'] = 'avagraha'\n self.trans['઼'] = 'nukta'\n self.trans['ੇ'] = 'ee'\n self.trans['୍'] = 'virama'\n self.trans['ૌ'] = 'au'\n self.trans['੧'] = '1'\n self.trans['൩'] = '3'\n self.trans['୭'] = '7'\n self.trans['૬'] = '6'\n self.trans['൹'] = 'mark'\n self.trans['ਖ਼'] = 'khha'\n self.trans['ਂ'] = 'bindi'\n self.trans['ഈ'] = 'ii'\n self.trans['ઍ'] = 'e'\n self.trans['ଌ'] = 'l'\n self.trans['ഘ'] = 'gha'\n self.trans['ઝ'] = 'jha'\n self.trans['ଡ଼'] = 'rra'\n self.trans['ਢ'] = 'ddha'\n self.trans['ന'] = 'na'\n self.trans['ભ'] = 'bha'\n self.trans['ବ'] = 'ba'\n self.trans['ਲ'] = 'la'\n self.trans['സ'] = 'sa'\n self.trans['ઽ'] = 'avagraha'\n self.trans['଼'] = 'nukta'\n self.trans['ੂ'] = 'uu'\n self.trans['ൈ'] = 'ai'\n self.trans['્'] = 'virama'\n self.trans['ୌ'] = 'au'\n self.trans['൨'] = '2'\n self.trans['૭'] = '7'\n self.trans['୬'] = '6'\n self.trans['ੲ'] = 'iri'\n self.trans['ഃ'] = 'visarga'\n self.trans['ં'] = 'anusvara'\n self.trans['ଇ'] = 'i'\n self.trans['ഓ'] = 'oo'\n self.trans['ଗ'] = 'ga'\n self.trans['ਝ'] = 'jha'\n self.trans['?'] = '?'\n self.trans['ണ'] = 'nna'\n self.trans['ઢ'] = 'ddha'\n self.trans['ଧ'] = 'dha'\n self.trans['ਭ'] = 'bha'\n self.trans['ള'] = 'lla'\n self.trans['લ'] = 'la'\n self.trans['ଷ'] = 'ssa'\n self.trans['ൃ'] = 'r'\n self.trans['ૂ'] = 'uu'\n self.trans['େ'] = 'e'\n self.trans['੍'] = 'virama'\n self.trans['ୗ'] = 'mark'\n self.trans['ൣ'] = 'll'\n self.trans['ૢ'] = 'l'\n self.trans['୧'] = '1'\n self.trans['੭'] = '7'\n self.trans['൳'] = '1/4'\n self.trans['୷'] = 'sixteenths'\n self.trans['ଆ'] = 'aa'\n self.trans['ઋ'] = 'r'\n self.trans['ഊ'] = 'uu'\n self.trans['ਐ'] = 'ai'\n self.trans['ଖ'] = 'kha'\n self.trans['છ'] = 'cha'\n self.trans['ച'] = 'ca'\n self.trans['ਠ'] = 'ttha'\n self.trans['ଦ'] = 'da'\n self.trans['ફ'] = 'pha'\n self.trans['പ'] = 'pa'\n self.trans['ਰ'] = 'ra'\n self.trans['ଶ'] = 'sha'\n self.trans['ഺ'] = 'ttta'\n self.trans['ੀ'] = 'ii'\n self.trans['ો'] = 'o'\n self.trans['ൊ'] = 'o'\n self.trans['ୖ'] = 'mark'\n self.trans['୦'] = '0'\n self.trans['૫'] = '5'\n self.trans['൪'] = '4'\n self.trans['ੰ'] = 'tippi'\n self.trans['୶'] = 'eighth'\n self.trans['ൺ'] = 'nn'\n self.trans['ଁ'] = 'candrabindu'\n self.trans['അ'] = 'a'\n self.trans['ઐ'] = 'ai'\n self.trans['ക'] = 'ka'\n self.trans['ਸ਼'] = 'sha'\n self.trans['ਛ'] = 'cha'\n self.trans['ଡ'] = 'dda'\n self.trans['ઠ'] = 'ttha'\n self.trans['ഥ'] = 'tha'\n self.trans['ਫ'] = 'pha'\n self.trans['ર'] = 'ra'\n self.trans['വ'] = 'va'\n self.trans['ୁ'] = 'u'\n self.trans['ી'] = 'ii'\n self.trans['ੋ'] = 'oo'\n self.trans['ૐ'] = 'om'\n self.trans['ୡ'] = 'll'\n self.trans['ૠ'] = 'rr'\n self.trans['੫'] = '5'\n self.trans['ୱ'] = 'wa'\n self.trans['૰'] = 'sign'\n self.trans['൵'] = 'quarters'\n self.trans['ਫ਼'] = 'fa'\n self.trans['ઁ'] = 'candrabindu'\n self.trans['ਆ'] = 'aa'\n self.trans['ઑ'] = 'o'\n self.trans['ଐ'] = 'ai'\n self.trans['ഔ'] = 'au'\n self.trans['ਖ'] = 'kha'\n self.trans['ડ'] = 'dda'\n self.trans['ଠ'] = 'ttha'\n self.trans['ത'] = 'ta'\n self.trans['ਦ'] = 'da'\n self.trans['ର'] = 'ra'\n self.trans['ഴ'] = 'llla'\n self.trans['ુ'] = 'u'\n self.trans['ୀ'] = 'ii'\n self.trans['ൄ'] = 'rr'\n self.trans['ૡ'] = 'll'\n self.trans['ୠ'] = 'rr'\n self.trans['੦'] = '0'\n self.trans['૱'] = 'sign'\n self.trans['୰'] = 'isshar'\n self.trans['൴'] = '1/2'\n self.trans['ਁ'] = 'bindi'\n self.trans['આ'] = 'aa'\n self.trans['ଋ'] = 'r'\n self.trans['ഏ'] = 'ee'\n self.trans['ખ'] = 'kha'\n self.trans['ଛ'] = 'cha'\n self.trans['ട'] = 'tta'\n self.trans['ਡ'] = 'dda'\n self.trans['દ'] = 'da'\n self.trans['ଫ'] = 'pha'\n self.trans['യ'] = 'ya'\n self.trans['શ'] = 'sha'\n self.trans['ി'] = 'i'\n self.trans['ੁ'] = 'u'\n self.trans['ୋ'] = 'o'\n self.trans['ੑ'] = 'udaat'\n self.trans['૦'] = '0'\n self.trans['୫'] = '5'\n self.trans['൯'] = '9'\n self.trans['ੱ'] = 'addak'\n self.trans['ൿ'] = 'k'\n self.trans['ആ'] = 'aa'\n self.trans['ଊ'] = 'uu'\n self.trans['એ'] = 'e'\n self.trans['ਔ'] = 'au'\n self.trans['ഖ'] = 'kha'\n self.trans['ଚ'] = 'ca'\n self.trans['ટ'] = 'tta'\n self.trans['ਤ'] = 'ta'\n self.trans['ദ'] = 'da'\n self.trans['ପ'] = 'pa'\n self.trans['ય'] = 'ya'\n self.trans['ശ'] = 'sha'\n self.trans['િ'] = 'i'\n self.trans['െ'] = 'e'\n self.trans['൦'] = '0'\n self.trans['୪'] = '4'\n self.trans['૯'] = '9'\n self.trans['ੴ'] = 'onkar'\n self.trans['ଅ'] = 'a'\n self.trans['ਏ'] = 'ee'\n self.trans['କ'] = 'ka'\n self.trans['ઔ'] = 'au'\n self.trans['ਟ'] = 'tta'\n self.trans['ഡ'] = 'dda'\n self.trans['ଥ'] = 'tha'\n self.trans['ત'] = 'ta'\n self.trans['ਯ'] = 'ya'\n self.trans['റ'] = 'rra'\n self.trans['ଵ'] = 'va'\n self.trans['ਿ'] = 'i'\n self.trans['ു'] = 'u'\n self.trans['ૄ'] = 'rr'\n self.trans['ൡ'] = 'll'\n self.trans['੯'] = '9'\n self.trans['൱'] = '100'\n self.trans['୵'] = 'sixteenth'\n self.trans['અ'] = 'a'\n self.trans['ਊ'] = 'uu'\n self.trans['ഐ'] = 'ai'\n self.trans['ક'] = 'ka'\n self.trans['ଔ'] = 'au'\n self.trans['ਚ'] = 'ca'\n self.trans['ഠ'] = 'ttha'\n self.trans['થ'] = 'tha'\n self.trans['ତ'] = 'ta'\n self.trans['ਪ'] = 'pa'\n self.trans['ര'] = 'ra'\n self.trans['વ'] = 'va'\n self.trans['ീ'] = 'ii'\n self.trans['ૅ'] = 'e'\n self.trans['ୄ'] = 'rr'\n self.trans['ൠ'] = 'rr'\n self.trans['ਜ਼'] = 'za'\n self.trans['੪'] = '4'\n self.trans['൰'] = '10'\n self.trans['୴'] = 'quarters'\n self.trans['ਅ'] = 'a'\n self.trans['ഋ'] = 'r'\n self.trans['ઊ'] = 'uu'\n self.trans['ଏ'] = 'e'\n self.trans['ਕ'] = 'ka'\n self.trans['ഛ'] = 'cha'\n self.trans['ચ'] = 'ca'\n self.trans['ଟ'] = 'tta'\n self.trans['ਥ'] = 'tha'\n self.trans['ഫ'] = 'pha'\n self.trans['પ'] = 'pa'\n self.trans['ଯ'] = 'ya'\n self.trans['ਵ'] = 'va'\n self.trans['ି'] = 'i'\n self.trans['ോ'] = 'oo'\n self.trans['ୟ'] = 'yya'\n self.trans['൫'] = '5'\n self.trans['૪'] = '4'\n self.trans['୯'] = '9'\n self.trans['ੵ'] = 'yakash'\n self.trans['ൻ'] = 'n'\n self.trans['ઃ'] = 'visarga'\n self.trans['ം'] = 'anusvara'\n self.trans['ਈ'] = 'ii'\n self.trans['ઓ'] = 'o'\n self.trans['ഒ'] = 'o'\n self.trans['ਘ'] = 'gha'\n self.trans['ଞ'] = 'nya'\n self.trans['ણ'] = 'nna'\n self.trans['ഢ'] = 'ddha'\n self.trans['ਲ਼'] = 'lla'\n self.trans['ਨ'] = 'na'\n self.trans['ମ'] = 'ma'\n self.trans['ળ'] = 'lla'\n self.trans['ല'] = 'la'\n self.trans['ਸ'] = 'sa'\n self.trans['¿'] = '?'\n self.trans['ା'] = 'aa'\n self.trans['ૃ'] = 'r'\n self.trans['ൂ'] = 'uu'\n self.trans['ੈ'] = 'ai'\n self.trans['ૣ'] = 'll'\n self.trans['ൢ'] = 'l'\n self.trans['੨'] = '2'\n self.trans['୮'] = '8'\n self.trans['൲'] = '1000'\n self.trans['ਃ'] = 'visarga'\n self.trans['ଉ'] = 'u'\n self.trans['ઈ'] = 'ii'\n self.trans['ਓ'] = 'oo'\n self.trans['ଙ'] = 'nga'\n self.trans['ઘ'] = 'gha'\n self.trans['ഝ'] = 'jha'\n self.trans['ਣ'] = 'nna'\n self.trans['ન'] = 'na'\n self.trans['ഭ'] = 'bha'\n self.trans['ଜ'] = 'ja'\n self.trans['ହ'] = 'ha'\n self.trans['સ'] = 'sa'\n self.trans['ഽ'] = 'avagraha'\n self.trans['ૈ'] = 'ai'\n self.trans['്'] = 'virama'\n self.trans['୩'] = '3'\n self.trans['૨'] = '2'\n self.trans['൭'] = '7'\n self.trans['ੳ'] = 'ura'\n self.trans['ൽ'] = 'l'\n self.trans['ઉ'] = 'u'\n self.trans['ଈ'] = 'ii'\n self.trans['ഌ'] = 'l'\n self.trans['ઙ'] = 'nga'\n self.trans['ଘ'] = 'gha'\n self.trans['ജ'] = 'ja'\n self.trans['ਞ'] = 'nya'\n self.trans['ନ'] = 'na'\n self.trans['ബ'] = 'ba'\n self.trans['ਮ'] = 'ma'\n self.trans['હ'] = 'ha'\n self.trans['ସ'] = 'sa'\n self.trans['ਾ'] = 'aa'\n self.trans['ૉ'] = 'o'\n self.trans['ୈ'] = 'ai'\n self.trans['ൌ'] = 'au'\n self.trans['૩'] = '3'\n self.trans['୨'] = '2'\n self.trans['൬'] = '6'\n self.trans['੮'] = '8'\n self.trans['ർ'] = 'rr'\n self.trans['ଃ'] = 'visarga'\n self.trans['ഇ'] = 'i'\n self.trans['ਉ'] = 'u'\n self.trans['ଓ'] = 'o'\n self.trans['ഗ'] = 'ga'\n self.trans['ਙ'] = 'nga'\n self.trans['ઞ'] = 'nya'\n self.trans['ଣ'] = 'nna'\n self.trans['ധ'] = 'dha'\n self.trans['મ'] = 'ma'\n self.trans['ଳ'] = 'lla'\n self.trans['ഷ'] = 'ssa'\n self.trans['ਹ'] = 'ha'\n self.trans['ਗ਼'] = 'ghha'\n self.trans['ા'] = 'aa'\n self.trans['ୃ'] = 'r'\n self.trans['േ'] = 'ee'\n self.trans['ൗ'] = 'mark'\n self.trans['ଢ଼'] = 'rha'\n self.trans['ୣ'] = 'll'\n self.trans['൧'] = '1'\n self.trans['੩'] = '3'\n self.trans['૮'] = '8'\n self.trans['୳'] = 'half'\n for char in self.trans:\n value = self.trans[char]\n if value == '?':\n continue\n while (value.encode(encoding, 'replace').decode(encoding) == '?'\n and value in self.trans):\n assert value != self.trans[value], \\\n '{!r} == self.trans[{!r}]!'.format(value, value)\n value = self.trans[value]\n self.trans[char] = value",
"def CharSet(self) -> CharSet:",
"def _make_renderer():\n renderer = Renderer(string_encoding='ascii', file_encoding='ascii')\n return renderer",
"def default_character_set(self):\n return \"\"\"--default-character-set=charset_name\"\"\"",
"def encodeString():\n pass",
"def test_encode():\n\n assert ceaser.encode(\"bbb\", 3) == \"eee\"\n\n assert ceaser.encode(\"ccccc\", 2) == \"eeeee\"\n\n assert ceaser.encode(\"blake\", 4) == \"fpeoi\"\n \n assert ceaser.encode(\"\", 4) == \"\"",
"def getfilesystemencoding(*args,**kw):\n return 'utf-8'",
"def to_text(self) -> str:\n return ''.join(SCC_STANDARD_CHARACTERS_MAPPING.get(byte, chr(byte)) for byte in [self.byte_1, self.byte_2] if byte != 0x00)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get internal states of an LSTM network for making nice state plots. This only works on a few types of LSTM.
|
def getstates_for_display(net):
if isinstance(net,LSTM):
return net.state[:net.last_n]
if isinstance(net,Stacked) and isinstance(net.nets[0],LSTM):
return net.nets[0].state[:net.nets[0].last_n]
return None
|
[
"def getStates():",
"def __getstate__(self):\n W_list = []\n bhid_list = []\n bvis_list = []\n for layer in self.dA_layers:\n W, bhid, bvis = layer.get_params()\n W_list.append(W.get_value(borrow=True))\n bhid_list.append(bhid.get_value(borrow=True))\n bvis_list.append(bvis.get_value(borrow=True))\n \n return (self.n_layers, self.n_outs, W_list, bhid_list, bvis_list, self.corruption_levels, self.layer_types, self.use_loss, self.dropout_rates, self.opt_method)",
"def states(self):\n return np.array(self.state[:self.last_n])",
"def allStates():",
"def state(self):\n if not self.sublayers:\n return self._state\n else:\n return tuple(layer.state if s is None else s\n for (layer, s) in zip(self.sublayers, self._state))",
"def getLampStates(self):\n\t\tvplamps = [False]*90\n\t\n\t\tfor i in range(0,64):\n\t\t\tvpNum = (((i/8)+1)*10) + (i%8) + 1\n\t\t\tvplamps[vpNum] = self.game.proc.drivers[i+80].curr_state\n\t\t\t\n\t\treturn vplamps",
"def get_internal_states(self):\n joint_pos = []\n joint_vel = []\n for joint_id in range(len(self.joints)):\n joint_name = self.joints[joint_id]\n joint_state = self._agent.get_joint_state(joint_name)\n joint_pos.append(joint_state.get_positions())\n joint_vel.append(joint_state.get_velocities())\n joint_pos = np.array(joint_pos).flatten()\n joint_vel = np.array(joint_vel).flatten()\n # pos of continous joint could be huge, wrap the range with sin and cos.\n joint_pos_sin = np.sin(joint_pos)\n joint_pos_cos = np.cos(joint_pos)\n internal_states = np.concatenate(\n (joint_pos_sin, joint_pos_cos, joint_vel), axis=0)\n return internal_states",
"def getstate(self):\n\t\timport numpy as np\n\t\t# initialize state 2D array\n\t\tstate=np.zeros(self.shape)\n\t\t# cycle on cells\n\t\tfor (id, cell) in self.cells.iteritems():\n\t\t\tstate[id[0], id[1]]=cell.state\n\t\t# output\n\t\treturn state",
"def build_lstm_graph(self):\n tf.reset_default_graph()\n lstm_graph = tf.Graph()\n\n with lstm_graph.as_default():\n self.xx = tf.placeholder('float32', [None, 1, self.n_features], name='features')\n self.yy = tf.placeholder('float32', name='labels')\n self.bins = tf.constant(self.bins, name='bins')\n with tf.name_scope(\"output_layer\"):\n weight = tf.Variable(tf.random_normal([self._lstm_size, self.n_labels]), name='weights')\n biases = tf.Variable(tf.random_normal([self.n_labels]), name='biases')\n x = tf.transpose(self.xx, [1, 0, 2])\n x = tf.reshape(x, [-1, self.n_features])\n x = tf.split(x, 1)\n\n lstm_cell = rnn_cell.LSTMCell(self._lstm_size, name='basic_lstm_cell')\n outputs, _ = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)\n\n logits = tf.add(tf.matmul(outputs[-1], weight), biases, name='rnn_model')\n\n tf.summary.histogram(\"last_lstm_output\", outputs[-1])\n tf.summary.histogram(\"weights\", weight)\n tf.summary.histogram(\"biases\", biases)\n\n with tf.name_scope(\"train\"):\n correct = tf.equal(tf.argmax(logits, 1), tf.argmax(self.yy, 1))\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'), name='accuracy')\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=self.yy),\n name='loss'\n )\n tf.train.AdamOptimizer().minimize(loss, name=\"loss_mse_adam_minimize\")\n tf.summary.scalar(\"loss\", loss)\n tf.summary.scalar(\"accuracy\", accuracy)\n\n # Operators to use after restoring the model\n for op in [logits, loss]:\n tf.add_to_collection('ops_to_restore', op)\n\n return lstm_graph",
"def get_state(self):\n return self._skuld.cmd(SkuldCmd(name='get_state',\n args=None, block=True))",
"def state_names(model):\n return tuple(n for n, v in model[\"state\"])",
"def states(self) -> OptimizationVariableList:\n self._nlp.states.node_index = self.node_index\n out = self._nlp.states.unscaled\n out.current_cx_to_get = self.cx_index_to_get\n return out",
"def unpack_bidirectional_lstm_state(state, num_directions=2):\n batch_size = state.size(1)\n new_hidden_dim = int(state.size(2) / num_directions)\n return torch.stack(torch.split(state, new_hidden_dim, dim=2), dim=1).view(-1, batch_size, new_hidden_dim)",
"def get_states(self, n=1):\n states = {\n 'electron_positions': self._positions_history[-n:],\n 'time': self._time_history[-n:],\n 'colors': self._colors_history[-n:]\n }\n return states",
"def states(self, as_tuple = False):\n if as_tuple:\n return self.legal_states\n else:\n return list(range(len(self.legal_states)))",
"def _attach_cached_lstm_nodes( input, hparams=None ):\n # LSTM with cached / preserved hidden state\n # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html\n cell = tf.contrib.rnn.LSTMCell( num_units=NUM_SHARDS * hparams.word_embedding_size,\n num_proj=hparams.word_embedding_size,\n num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS,\n forget_bias=1.0, use_peepholes=True )\n\n state_c = tf.get_variable( name=\"state_c\",\n shape=(hparams.batch_size * hparams.sequence_length, 8192),\n initializer=tf.zeros_initializer,\n trainable=False )\n state_h = tf.get_variable( name=\"state_h\",\n shape=(hparams.batch_size * hparams.sequence_length, 1024),\n initializer=tf.zeros_initializer,\n trainable=False )\n\n out_0, state_0 = cell( input, tf.nn.rnn_cell.LSTMStateTuple( state_c, state_h ) )\n\n ass_c = tf.assign( state_c, state_0[0] )\n ass_h = tf.assign( state_h, state_0[1] )\n\n with tf.control_dependencies( [ass_c, ass_h] ):\n out_0 = tf.identity( out_0 )\n\n return out_0, state_0",
"def __get_state__(self):\n\t\t## unroll all the parameters\n\t\tgates = self._gates\n\t\t\n\t\tThetas = [theta for gate in gates for theta in gate.__get_state__()['Thetas']] \n\t\tparams = [weight for gate in gates for weight in gate.__get_state__()['params']]\n\n\t\tprint \"Total number of parameters: %d \" % len(params) \n\n\t\treturn dict(Thetas=Thetas,params=params)",
"def _get_state(self, board_list):\n state_list = []\n N = self.board.N\n for i in range(N):\n ind = a2p(i * N, N)\n state_list += self.board.board[ind:ind + N]\n return tuple(state_list)",
"def get_robot_state(self):",
"def reset_states(self):\n self.state_c = (\n torch.zeros(self.num_layers, self.batch_size, self.rnn_hidden,\n device=self.device),\n torch.zeros(self.num_layers, self.batch_size, self.rnn_hidden,\n device=self.device),\n )\n self.state_g = (\n torch.zeros(self.num_layers, self.batch_size, self.rnn_hidden,\n device=self.device),\n torch.zeros(self.num_layers, self.batch_size, self.rnn_hidden,\n device=self.device),\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get or set the SEM status
|
def sem_status(self, voltage=-1, turn_off=False, turn_on=False):
if voltage > -1:
self.comm('SEM ' + str(voltage))
ret_string = self.status('RDE', 4)
else: #NOT IMPLEMENTED
ret_string = self.status('RDE', 4)
sem_voltage = int(ret_string)
if turn_off ^ turn_on: #Only accept self-consistent sem-changes
if turn_off:
self.comm('SEV 0')
if turn_on:
self.comm('SEV 1')
ret_string = self.status('ROP', 2)
sem_on = ret_string == "1"
return sem_voltage, sem_on
|
[
"def advapi32_QueryServiceLockStatus(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hSCManager\", \"lpLockStatus\", \"cbBufSize\", \"pcbBytesNeeded\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def read_sem_voltage(self):\n sem_voltage = self.status('RDE', 4)\n return sem_voltage",
"def get_status (self):\n return self.__status",
"def do_status(self, cmd, target):\n if cmd.startswith(\"list\"):\n text = re.sub(\"^list\", \"\", cmd).strip()\n which = text.split(\" \", 1)[0]\n if not which or which == 'all':\n # False lets us use more than one column\n statuses = query(queries[\"getstatuses\"], False)\n elif which == 'ok':\n statuses = query(queries[\"getok\"], False)\n elif which == 'down' or which == 'notok' or which == 'problems':\n statuses = query(queries[\"getproblems\"], False)\n out0 = [] # s_ok=0 (False - down)\n out1 = [] # s_ok=1 (True - up)\n for row in statuses: # iterate over the rows\n #('service', 'um, something is wrong', 0)\n if row[2] == True:\n out1.append(\"%s: \\x0303%s\\x0F\" % (row[0], row[1]))\n else:\n out0.append(\"\\x0305%s:\\x0F %s\" % (row[0], row[1]))\n if len(out0) > 0:\n self.msg(\"\\x02Current status:\\x0F\", target)\n # Output all problems on their own line,\n # then all OKs on one line\n for row in out0:\n self.msg(row, target)\n self.msg(\" ¦ \".join(out1), target)\n else:\n # Output everything on one line\n self.msg(\"\\x02Current status:\\x0F %s\"\n % \" ¦ \".join(out1), target)\n elif cmd.startswith(\"set\"):\n text = re.sub(\"^set\", \"\", cmd).strip()\n service = text.split(\" \", 1)[0]\n status = text.split(\" \", 1)[1]\n if not service:\n if not self.quiet:\n self.msg(\"You have to specify a service\", target)\n elif service == 'all clear':\n self.do_status('ok all', target) # Don't duplicate code\n elif not status:\n if not self.quiet:\n self.msg(\"You have to specify a status\", target)\n else:\n if len(query('select s_service from status where '\n 's_service=\"%s\"' % service)) == 0:\n if not self.quiet:\n self.msg(\"%s is not a listed service\"\n % service, target)\n else:\n modquery('update status set s_state=\"%s\",s_ok=false '\n 'where s_service=\"%s\"' % (status, service))\n if not self.quiet:\n self.msg(\"%s now has status '%s'\"\n % (service, status), target)\n elif cmd.startswith(\"ok\"):\n text = re.sub(\"^ok\", \"\", cmd).strip()\n service = text.split(\" \", 1)[0]\n if not service:\n if not self.quiet:\n self.msg(\"You have to specify a service\", target)\n else:\n if service == 'all':\n modquery(queries['setallclear'])\n if not self.quiet:\n self.msg(\"\\x0303All clear!\\x0F\", target)\n elif len(query('select s_service from status where '\n 's_service=\"%s\"' % service)) == 0:\n if not self.quiet:\n self.msg(\"%s is not a known service\"\n % service, target)\n else:\n modquery('update status set s_ok=true,s_status=\"OK\" '\n 'where s_service=\"%s\"' % service)\n if not self.quiet:\n self.msg(\"Recorded %s as OK\" % service, target)\n else:\n raise CommanderError('unparseable command (%s)' % cmd)",
"def status(self):\n statuses = {0: 'MFCS is reset - press \"Play\"',\n 1: 'normal',\n 2: 'overpressure',\n 3: 'need to rearm'}\n c_error = self.dll.mfcs_get_status(self.handle, byref(self.c_status))\n k = ord(self.c_status.value)\n return k, statuses[k]",
"def advapi32_EnumServicesStatus(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hSCManager\", \"dwServiceType\", \"dwServiceState\", \"lpServices\", \"cbBufSize\", \"pcbBytesNeeded\", \"lpServicesReturned\", \"lpResumeHandle\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def advapi32_EnumServicesStatusEx(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hSCManager\", \"InfoLevel\", \"dwServiceType\", \"dwServiceState\", \"lpServices\", \"cbBufSize\", \"pcbBytesNeeded\", \"lpServicesReturned\", \"lpResumeHandle\", \"pszGroupName\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def get_cluster_status(self):\n status = {}\n groups = {}\n for line in self.run(\"/opt/VRTS/bin/hastatus -sum\", filter='^\\w.*'):\n parts = line.split()\n # 'A' lines are the systems. Output fields are: \"A\" System State Frozen\n if parts[0] == 'A':\n status[parts[1]] = {'state': parts[2], 'frozen': parts[3] != '0'}\n # 'B' lines are the group states. Output fields are: \"B\" Group System Probed AutoDisabled State\n elif parts[0] == 'B':\n #status[parts[2]]['groups'].append({'name': parts[1], 'probed': parts[3] == 'Y', 'autodisabled': parts[4] == 'Y', 'state': parts[5]})\n status[parts[2]][parts[1]] = {'probed': parts[3] == 'Y', 'autodisabled': parts[4] == 'Y', 'state': parts[5]}\n groups[parts[1]] = ''\n # update the group list. easier this way\n self.groups = groups.keys()\n return status",
"async def get_microsoft_status(self, ctx):\n data = await self.microsoft_services_status()\n embed = status(data)\n await ctx.send(embed=embed)",
"def set_assessor_status(self, status):\n # Connection to Xnat\n try:\n with get_interface(host=self.host) as xnat:\n assessor = self.assr_handler.select_assessor(xnat)\n if assessor.exists():\n dtype = DEFAULT_DATATYPE\n if self.assr_handler.get_proctype() == 'FS':\n dtype = DEFAULT_FS_DATATYPE\n former_status = assessor.attrs.get('%s/procstatus' % dtype)\n if former_status == JOB_RUNNING:\n assessor.attrs.set('%s/procstatus' % dtype, status)\n msg = ' - job status set to %s'\n self.print_msg(msg % str(status))\n except XnatAuthentificationError as e:\n print('Failed to connect to XNAT. Error: ', e)\n pass",
"def get_status(self):\n status = lowlevel.SM_PATH_STATUS_PARMS()\n status.path = self.path\n\n rc = lowlevel.sm_path_status(status)\n if rc:\n raise AculabSpeechError(rc, 'sm_path_status')\n\n return status.status",
"def get_status_capsule(self):\n return self.status_capsule",
"def __set_status(self, status):\n self.__status = status",
"def get_status(self) -> NodeManagerStatus:",
"def get_state(self):\n\n command_DriveEventStatus = self.getParameterCommand(\"0xa0\") \n DriveEventStatus = self.getValue(command_DriveEventStatus)\n if DriveEventStatus == 'No power':\n argout = PyTango.DevState.OFF\n elif str(int(DriveEventStatus)) == \"0\":\n argout = PyTango.DevState.STANDBY \n elif str(int(DriveEventStatus)) != \"0\":\n self.clearLatchedStatus()\n value = int(self.readLatchedEventStatus())\n if int(value) == int(DriveEventStatus): \n if (value&512)<>0:\n argout = PyTango.DevState.ALARM\n print(\"Positive limit switch is active\") \n elif (value&67108864)<>0:\n argout = PyTango.DevState.STANDBY\n print(\"motor2 rest status\")\n elif (value&1024)<>0:\n argout = PyTango.DevState.ALARM\n print(\"Negative limit switch is active\")\n elif (value&2048)<>0:\n argout = PyTango.DevState.STANDBY\n print(\"Enable input not active\") \n elif (value&65536)<>0:\n argout = PyTango.DevState.STANDBY\n print(\"Enable input not active\") \n elif (value&131072)<>0:\n argout = PyTango.DevState.STANDBY\n print(\"Something unknown stays\")\n elif (value&134217728)<>0:\n argout = PyTango.DevState.MOVING\n print(\"Error\")\n \n else:\n argout = PyTango.DevState.MOVING\n print(\"State is MOVING\") \n else:\n argout = PyTango.DevState.FAULT\n self.set_state(argout)\n return argout",
"def statusSet(self, status, callback):\n self.statusSet(None, status, callback)",
"def advapi32_SetServiceStatus(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hServiceStatus\", \"lpServiceStatus\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def set_status(self, status):\n if status in [\"w\", \"b\", \"d\", \"-\"]:\n self.status = status\n else:\n raise ValueError(\"Status of game can only be \\\"w\\\", \\\"b\\\" or \\\"d\\\", you tried to set status \"+status)",
"def status(self):\n self.lastStatus = ord(self.hardware.transfer(chr(Cmd.NOP), 1)[0])\n return self.lastStatus"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get or set the emission status.
|
def emission_status(self, current=-1, turn_off=False, turn_on=False):
emission_current = -1
if turn_off ^ turn_on:
if turn_off:
self.comm('EMI 0')
if turn_on:
self.comm('EMI 1')
ret_string = self.status('ROP', 3)
filament_on = ret_string == '1'
return emission_current, filament_on
|
[
"def get_status (self):\n return self.__status",
"def status(self) -> Optional['outputs.EntityStatus']:\n return pulumi.get(self, \"status\")",
"def set_power_outage_event_status(self, status, timeout=RESPONSE_DELAY):\n\n command.create_set_command(\n command.PROTOCOL_COMMAND_SET_POWER_OUTAGE_EVENT_STATUS, status, 1\n )\n command.send_command()\n delay_ms(timeout)\n raw = command.receive_command(COMMAND_SIZE_FOR_UINT8)\n\n status = raw[PROTOCOL_HEADER_SIZE]\n return status",
"def handle_getStatus_event():\n global STATUS_VALS\n socketio.emit('updateStatus', str(STATUS_VALS), callback=messageReceived)",
"def set_output(self, status):\n if status: #if True\n return self.command('OUT1')\n else:\n return self.command('OUT0')",
"def equipment_output_status(self):\n if self._data['uiData']['EquipmentOutputStatus'] in (0, None):\n if self.fan_running:\n return \"fan\"\n else:\n return \"off\"\n return EQUIPMENT_OUTPUT_STATUS[self._data['uiData']['EquipmentOutputStatus']]",
"def dataWriterStatus(self):\n status = self.dataStatusPv.get()\n if status:\n printMsg('DataWriter status is: ON')\n else:\n printMsg('DataWriter status is: OFF')\n return status",
"def __set_status(self, status):\n self.__status = status",
"def conservation_status(self):\n return self._conservation_status",
"def get_status(self):\n return self.completed",
"def _set_status(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'PREFERRED': {}, 'UNKNOWN': {}, 'DUPLICATE': {}, 'TENTATIVE': {}, 'INVALID': {}, 'INACCESSIBLE': {}, 'DEPRECATED': {}, 'OPTIMISTIC': {}},), is_leaf=True, yang_name=\"status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='enumeration', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"status must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-if-ip:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'PREFERRED': {}, 'UNKNOWN': {}, 'DUPLICATE': {}, 'TENTATIVE': {}, 'INVALID': {}, 'INACCESSIBLE': {}, 'DEPRECATED': {}, 'OPTIMISTIC': {}},), is_leaf=True, yang_name=\"status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='enumeration', is_config=False)\"\"\",\n })\n\n self.__status = t\n if hasattr(self, '_set'):\n self._set()",
"def xmms2_status(self):\n self.writeCommand('xmms2_status')\n return self",
"def status(self, value):\n if isinstance(value, Status):\n self.__status = value\n else:\n try:\n if 1 <= len(value) <= 2:\n self.__status = Status(*value)\n else:\n raise ValueError(\"Value must be of length 1 or 2\")\n except TypeError:\n raise TypeError(\"Value must be Status or iterable of length 1 or 2, with the first element an integer\")",
"def _push_status(self):\n\n self.data['status'] = self._status\n event_manager.device_changed(self)",
"def status_enum(self) -> \"Extraction.Status\":\n return self.Status(self.status)",
"def _read_status(self):\n self.cs.low() \n self.spi.write(bytes([_READ_STATUS]))\n regVal = self.spi.read(1)\n self.cs.high()\n\n return int.from_bytes(regVal, 'big')",
"def SetStatus(self, status):\n self.status = status\n self.put()",
"def get_status(self):\n return self.client.get_asg_ready(self.env, self.name)",
"def set_Status(self, value):\n super(UpdateTicketInputSet, self)._set_input('Status', value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read the voltages on the lens system
|
def read_voltages(self):
print 'Not possible on this QMG model'
|
[
"def read_voltage(self):\n return self.read_raw() * self._scale_factor",
"def get_voltage(self):\n pass",
"def Read_Voltage(self):\n self.voltage = -999.0\n self.current = -999.0\n try:\n if self.ser.isOpen():\n self.ser.flushInput()\n time.sleep(0.1)\n self.ser.write('MEAS:VOLT?\\r\\n')\n time.sleep(0.1)\n self.voltage = float(self.ser.readline().split()[0])\n time.sleep(0.1)\n self.ser.flushInput()\n self.ser.write('MEAS:CURR?\\r\\n')\n time.sleep(0.1)\n self.current = float(self.ser.readline().split()[0])\n return\n else:\n self.ser.close()\n return\n except Exception as e:\n print \"No communication to BK Precision Back-Bias supply. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.ser.close()\n return",
"def get_voltage(self):\n result=self.asker('OD')\n if result[0] not in ('N', 'E'):\n header=0\n else:\n header=1\n if result[0]=='E':\n overload=True\n else:\n overload=False\n mode='V'\n if header==1:\n mode=result[3]\n result=result[4:]\n voltage=float(result)\n pt_idx=result.find('.')\n if result[-4:-2]=='-3': \n #V_range={'-33':2, '-34':3, '+02':4, '+03':5, '+04':6}[result[-4:-2]+str(result.find('.'))]\n if pt_idx==3:\n V_range=2 #10 mV\n else:\n V_range=3 #100 mV\n else:\n if pt_idx==2:\n V_range=4 #1 V \n elif pt_idx==3:\n V_range=5 #10 V\n else:\n V_range=6 #30 V\n return dict(voltage=voltage, header=header, overload=overload, mode=mode, V_range=V_range)",
"def query_voltage(self):\n ret = self.driver.read_status_output_value()\n return ret",
"def measure_v(self):\n self._ser.write('MEAS?')\n __value = float(self._ser.read()[:-1])\n print(f'IT6861A OUT Voltage: {__value}V')\n return __value",
"def test_voltage(self):\n self.logger.debug('Set and get voltage for both channels into unit_test.')\n CH = [1,2]\n for ch in CH:\n # #### set a new voltage\n V = 3.146 * ur('volt')\n self.logger.info('Voltage to set: {} in channel {}'.format(V, ch))\n self.inst.set_analog_value(ch, V)\n Vnew = self.inst.get_analog_value(ch)\n self.logger.info('Voltage read: {}'.format(Vnew))\n assert V == Vnew\n self.logger.info('Voltage assertion passed for channel: {}'.format(ch))\n\n self.logger.info('Voltage set and read unit_test passed.')",
"def measure_v(self):\n self._ser.write('MEAS:VOLT?')\n __value = float(self._ser.read()[:-1])\n print(f'C62012P OUT Voltage: {__value}V')\n return __value",
"def query_voltage(self):\n ret = self.com.query('VOLT?')\n return ret",
"def read_volt(self, channel):\n return self.read_value(channel, 'volt')",
"def voltage(analog_pin):\r\n return \"%0.2f\" % inVolts(analogRead(analog_pin))",
"def getVoltageBus_V(self):\n self.createCurrentPowerMonitor()\n return self.currentPowerMonitor.voltage()",
"def to_voltage(val):\n return (val / 1024.0) * 3.3",
"def bus_voltages(self):\n return Bridge.var_array_function(self.dss_obj.BUSV, 0, None, '')",
"def setupVoltageMeasurement(self):\n #\n self.reset()\n self.beeperOff()\n self.setSourceFunc(func=\"CURR\")\n self.sendValue(\":SOUR:CURR:MODE FIXED\")\n self.sendValue(\":SENS:FUNC \\\"VOLT\\\"\")\n self.sendValue(\":SOUR:CURR:RANG MIN\")\n self.sendValue(\":SOUR:CURR:LEV 0\")\n self.sendValue(\":SENS:VOLT:PROT 25\")\n self.sendValue(\":SENS:VOLT:RANG 20\")\n self.sendValue(\":FORM:ELEM VOLT\")",
"def get_voltages(cls, v_min, v_max, n, **kwargs):\n device = ArduinoVISADevice(**kwargs)\n for voltage in np.linspace(v_min, v_max, num=n):\n device.set_output_voltage(voltage=voltage)\n time.sleep(0.1)\n # Once again, a generator symplifies the code in pythonlab.views\n yield device.measure_input_voltage(channel=2)\n device.set_output_voltage(voltage=0)",
"def read_sem_voltage(self):\n sem_voltage = self.status('RDE', 4)\n return sem_voltage",
"def bus_seq_voltages(self):\n return Bridge.var_array_function(self.dss_obj.BUSV, 1, None, '')",
"def get_voltage(self):\n with self._data_lock:\n return self.data['voltage'][-1]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read the selected SEM voltage
|
def read_sem_voltage(self):
sem_voltage = self.status('RDE', 4)
return sem_voltage
|
[
"def read_voltages(self):\n print 'Not possible on this QMG model'",
"def Read_Voltage(self):\n self.voltage = -999.0\n self.current = -999.0\n try:\n if self.ser.isOpen():\n self.ser.flushInput()\n time.sleep(0.1)\n self.ser.write('MEAS:VOLT?\\r\\n')\n time.sleep(0.1)\n self.voltage = float(self.ser.readline().split()[0])\n time.sleep(0.1)\n self.ser.flushInput()\n self.ser.write('MEAS:CURR?\\r\\n')\n time.sleep(0.1)\n self.current = float(self.ser.readline().split()[0])\n return\n else:\n self.ser.close()\n return\n except Exception as e:\n print \"No communication to BK Precision Back-Bias supply. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.ser.close()\n return",
"def read_voltage(self):\n return self.read_raw() * self._scale_factor",
"def measure_v(self):\n self._ser.write('MEAS?')\n __value = float(self._ser.read()[:-1])\n print(f'IT6861A OUT Voltage: {__value}V')\n return __value",
"def get_voltage(self):\n result=self.asker('OD')\n if result[0] not in ('N', 'E'):\n header=0\n else:\n header=1\n if result[0]=='E':\n overload=True\n else:\n overload=False\n mode='V'\n if header==1:\n mode=result[3]\n result=result[4:]\n voltage=float(result)\n pt_idx=result.find('.')\n if result[-4:-2]=='-3': \n #V_range={'-33':2, '-34':3, '+02':4, '+03':5, '+04':6}[result[-4:-2]+str(result.find('.'))]\n if pt_idx==3:\n V_range=2 #10 mV\n else:\n V_range=3 #100 mV\n else:\n if pt_idx==2:\n V_range=4 #1 V \n elif pt_idx==3:\n V_range=5 #10 V\n else:\n V_range=6 #30 V\n return dict(voltage=voltage, header=header, overload=overload, mode=mode, V_range=V_range)",
"def get_voltage(self):\n pass",
"def measure_v(self):\n self._ser.write('MEAS:VOLT?')\n __value = float(self._ser.read()[:-1])\n print(f'C62012P OUT Voltage: {__value}V')\n return __value",
"def query_voltage(self):\n ret = self.driver.read_status_output_value()\n return ret",
"def get_target_voltage(self):\n _logging.info(\"\")\n res = self._com.xfer([Stlink._Cmd.GET_TARGET_VOLTAGE], rx_length=8)\n an0 = int.from_bytes(res[:4], byteorder='little')\n an1 = int.from_bytes(res[4:8], byteorder='little')\n return round(2 * an1 * 1.2 / an0, 2) if an0 != 0 else None",
"def get_bank_voltage(self):\n response = self._send_command('BV?')\n if response[0] == b'?':\n raise LaserCommandError(Laser.get_error_code_description(response))\n response_str = response.decode('ascii')\n return float(response_str)",
"def set_cv(self, voltage):\n self.load_off()\n time.sleep(0.1)\n self._ser.write(f'VOLT {voltage:.4f}')\n self._ser.write('OUTP ON')\n time.sleep(0.1)\n print(f'{self._name} CV LOAD: {voltage:.4f}V\\n')",
"def get_voltage(self,channel):\n\t\tresponse = self.send_command( 'RU %d\\r' % channel )\n\t\tlinestr = response.decode('utf8')\n\t\tpattern = re.match(r'.*([+-])(\\d*.\\d*)', linestr, re.IGNORECASE)\n\t\t\n\t\tif pattern is not None:\n\t\t\tvoltage = float(pattern.group(2))\n\t\t\t#print(\"The voltage is \")\n\t\t\t#print (voltage)\n\t\t\t#print(pattern.group(2))\n\t\t\tif pattern.group(1) == '-':\n\t\t\t\tvoltage = -voltage\n\t\t\treturn voltage\n\t\telse :\n\t\t\treturn 0.",
"def get_voltage(self):\n with self._data_lock:\n return self.data['voltage'][-1]",
"def get_voltage_preset(self,channel):\n\t\tresponse = self.send_command( 'RUP %d\\r' % channel )\n\t\tlinestr = response.decode('utf8')\n\t\tpattern = re.match(r'.*([+-])(\\d*.\\d*)', linestr, re.IGNORECASE)\n\t\t\n\t\tif pattern is not None:\n\t\t\tvoltage = float(pattern.group(2))\n\t\t\tif pattern.group(1) == '-':\n\t\t\t\tvoltage = -voltage\n\t\t\treturn voltage\n\t\telse :\n\t\t\treturn 0.",
"def query_voltage(self):\n ret = self.com.query('VOLT?')\n return ret",
"def read_volt(self, channel):\n return self.read_value(channel, 'volt')",
"def read_supply_voltage_setpoint(self):\n self._is_hid_tool_not_connected_raise()\n\n try:\n voltage = read_supply_voltage_setpoint(self.housekeeper)\n except Jtagice3ResponseError:\n raise PymcuprogNotSupportedError(\"Connected debugger/board does not have supply voltage capability.\")\n\n return voltage",
"def getTerminalVoltage(self):\n return float(self.instr.query(\"MEAS:VOLT?\"))",
"def get_fet_voltage(self):\n response = self._send_command('FV?')\n if response[0] == b\"?\":\n raise LaserCommandError(Laser.get_error_code_description(response))\n return float(response.decode('ascii')) #TODO: All responce[:-4] does is returns b'', what is the purpose of this... It should be returning the actual data, something like responce [:-1] would remove the \\r and leave just data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read the preamp range
|
def read_preamp_range(self):
preamp_index = self.status('RDE', 1)
preamp_range = self.ranges(index=preamp_index)
return preamp_range
|
[
"def test_get_range(self):\n loader = Loader('./tests/example.npz')\n loader.load_file()\n data_range = loader.get_range()\n self.assertEqual(np.float16(2.156), data_range[0])\n self.assertEqual(np.float16(21.94), data_range[1])",
"def read_range(read):\n range_pb = range_pb2.Range()\n utils_cpp.read_range(read, range_pb)\n return range_pb",
"def __get_range(self):\n return self.high - self.low",
"def get_range(self) -> str:\n pass",
"def range_PAPERTURE(self):\n self.open.write('PERIOD:APERTURE? MAX')\n replymax = self.open.read()\n self.open.write('PERIOD:APERTURE? MIN')\n replymin = self.open.read()\n return('Period Aperture range: ' + str(replymin) + ',' + str(replymax))",
"def get_page_range(self):\n return self.root.find('front/article-meta/page-range').text",
"def range_raw(self):\n return self._range_raw",
"def get_range(self):\n if self.get_type() in [int, float]:\n values = [lv[\"value\"] for lv in self.line_value]\n return [min(values), max(values)]",
"def read_range(self):\n self.i2c.mem_read(self.rxb, self.bus_addr, 0)\n values = []\n # skip first 2 bytes, then unpack high and low bytes from buffer data\n # data is pack in big-endian form\n for i in range(2, len(self.rxb), 2):\n range_val = (self.rxb[i] << 8) + self.rxb[i+1]\n if range_val > 0:\n values.append(range_val)\n return values",
"def _retrieve_precursor_range(self, scan_number, ms_level):\n \n # get value directly\n first_precursor_mass_d = ctypes.c_double()\n last_precursor_mass_d = ctypes.c_double()\n valid_l = ctypes.c_long()\n if not self._raw_reader.GetPrecursorRangeForScanNum(scan_number, ms_level, first_precursor_mass_d, last_precursor_mass_d, valid_l):\n if valid_l.value:\n return (float(first_precursor_mass_d.value), float(last_precursor_mass_d.value))\n \n return None",
"def getRange(self, parameter):\n\t\tif parameter == \"MinLength\":\n\t\t\tif self.numberOfPoints:\n\t\t\t\treturn 0, self.numberOfPoints\n\t\t\treturn 0, 1000\n\t\treturn 0, 0",
"def get_ranges(self):\r\n pass",
"def control_range(self):\n return self.__control_range",
"def data_range(self, n=-1):\n if len(self.results['peak_v']) > 0:\n last_v = self.results['peak_v'][n]\n else:\n last_v = self.v_start\n v_index = self.spectrogram._velocity_to_index(last_v)\n start_index = max(0, v_index - self.span)\n end_index = min(v_index + self.span,\n len(self.spectrogram.velocity))\n return (start_index, end_index)",
"def range(self) -> xr.DataArray:\n return self.max_val - self.min_val",
"def read_range(pgen_path, start_idx, end_idx, sample_subset=None, dtype=np.int8):\n reader, num_samples = get_reader(pgen_path, sample_subset=sample_subset)\n num_variants = end_idx - start_idx + 1\n genotypes = np.zeros([num_variants, num_samples], dtype=dtype)\n with reader as r:\n r.read_range(start_idx, end_idx+1, genotypes)\n return genotypes",
"def data_range(data_set):\n return max(data_set) - min(data_set)",
"def prefilterRange(self):\n return self.__settings['prefilterRange']",
"def parse_range(self, quoterange, part='low'):\n rv = Decimal(0.0)\n if quoterange:\n rangematch = re.match(r\"'(?P<low>\\d+\\.?\\d*) - (?P<high>\\d+\\.?\\d*)'\", quoterange)\n if rangematch:\n rv = Decimal(rangematch.group(part))\n return rv"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return true if measurement is running
|
def measurement_running(self):
running = self.comm('STW')[6] == '0'
return running
|
[
"def running(self):\n return self.status == \"STARTED\"",
"def is_running(self):\n return self.motors.are_running()",
"def isRunning (self):\n\t\tjobid = self.job.id ()\n\t\tif not jobid:\n\t\t\treturn False\n\t\treturn Popen (['qstat', '-j', jobid], stdout=open(devnull, 'w'), stderr=open(devnull, 'w')).wait() == 0",
"def is_running(self):\n return self._job.state(jobset.NoCache()) == jobset._RUNNING",
"def is_started(self) -> bool:\n return self.__timer is not None",
"def IsRunning(self):\n\t\tmask = self.readRegister(DAY);\n\t\tif((mask & OSCRUN) == OSCRUN): \t\t\t#If oscillator = already running, do nothing.\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def aquisition_running(self):\n return bool(int(self.query(\"ACQ:STATE?\").strip()))",
"def is_running(self) -> bool:\n return self.game_running",
"def is_running(self, family: str) -> bool:\n latest_analysis = self.analyses(family=family).first()\n return latest_analysis and latest_analysis.status in TEMP_STATUSES",
"def check_gps(self):\n return self.running",
"def running(self):\r\n with self._condition:\r\n return self._state == RUNNING",
"def has_run(self, duration):\n if self.time() - self.start_time > duration:\n return True\n return False",
"def isRunning (self):\n\t\tif not self.job.pid:\n\t\t\treturn False\n\t\treturn ps.exists(int(self.job.pid))",
"def isDone(self):\n ## If the process has not been started yet, then return False\n if not self.started:\n return False\n\n return True",
"def is_running(self):\n # return False if the process is not started yet\n if not self._proc:\n return False\n # return False if there is a return code from the main process\n return self._proc.poll() is None",
"def is_running(self):\n if self.isRunning():\n # Startup\n return True\n\n if self.server is None:\n return False\n\n return self.server.serving",
"def is_monitoring(self):\n for key in self.r.scan_iter(\"status:*hera_snap_redis_monitor.py\"):\n state = self.r.get(key)\n return state == \"alive\"\n # no status key => monitor isn't running\n return False",
"def aquisition_continuous(self):\n return self.query(\"ACQ:STOPA?\").strip().startswith(\"RUNST\")",
"def _is_running(self):\n try:\n p = subprocess.Popen([self.vmware.get(\"path\"), \"-T\", \"ws\", \"list\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n output, error = p.communicate()\n output = output.decode(\"utf-8\")\n except OSError as e:\n print(\"Unable to check running status for %s. Reason: %s\" % (self.vmx_path, e))\n else:\n if output:\n output_lines = output.splitlines()\n print(output_lines)\n if self.vmx_path in output_lines:\n print(\"Found the snapshots name is %s\" % self.vmx_path)\n return True\n else:\n print(\"Doesn't has the correct snapshot setting\")\n return False\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read exactly `remaining` bytes from the socket. Blocks until the required bytes are available and return the data read as raw bytes. Call to this function blocks until required bytes are available in the socket. Arguments
|
def read_k_bytes(sock, remaining=0):
ret = b"" # Return byte buffer
while remaining > 0:
d = sock.recv(remaining)
ret += d
remaining -= len(d)
return ret
|
[
"def read_socket(self):\n data = b''\n part = self.s.recv(4096)\n data += part\n while len(part) == 4096: # QUESTION: what if it's *exactly* 4096?\n part = self.s.recv(4096)\n data += part\n return data",
"def recv_bytes(self, amount=1024):\n return self.socket.recv(amount)",
"def readexactly(sock, numbytes):\n bytes_received = b\"\"\n count = 0\n while count < numbytes:\n byte = sock.recv(1)\n if byte:\n count += 1\n bytes_received += byte\n else:\n raise asyncio.streams.IncompleteReadError(bytes_received, numbytes-count)\n\n return bytes_received",
"def read(self, nbytes=-1):\n if self.socket is None:\n raise IOError(\"read called on not-connected socket\")\n if nbytes == 0:\n return \"\"\n resp = \"\"\n if nbytes < 0:\n break_on_timeout = True\n nbytes = 1\n else:\n break_on_timeout = False\n ntimeouts = 0\n while len(resp) < nbytes:\n r, _, _ = select.select([self.socket], [], [], self.timeout)\n if len(r) != 0:\n resp += self.socket.recv(1)\n else:\n if break_on_timeout:\n return resp\n else:\n ntimeouts += 1\n if ntimeouts >= self.max_timeouts:\n raise IOError('read timed out too many times [%s >= %s]' % \\\n (ntimeouts, self.max_timeouts))\n time.sleep(self.sleep_on_timeout)\n # if we're waiting for a timeout, keep reading until we get one\n if break_on_timeout:\n nbytes = len(resp) + 1\n\n return resp",
"def tcp_socket_recv_data(tcp_socket: socket.socket, length: int, header: str = '') -> bytes:\n recv_data = bytes()\n\n # Specified header fmt or length is zero means read length from header\n if header or not length:\n try:\n length = struct.unpack(header, tcp_socket.recv(struct.calcsize(header)))[0]\n except (struct.error, IndexError) as e:\n print(f'tcp_socket_recv_data: {e}(header: {header}, len: {length})')\n return bytes()\n\n while len(recv_data) < length:\n try:\n data = tcp_socket.recv(length - len(recv_data))\n except socket.timeout:\n return recv_data\n\n if not data:\n raise BrokenPipeError('peer closed')\n\n recv_data += data\n\n return recv_data",
"def receive_bytes(self, size):\n time_start = datetime.now()\n total_data = \"\"\n last_read = \"\"\n while True:\n last_read = self.request.recv(size)\n total_data += last_read\n size -= len(last_read)\n if size <= 0:\n break\n else:\n time.sleep(0.01)\n time_now = datetime.now()\n time_diff = time_now - time_start\n if time_diff.seconds >= 5:\n raise DataReadTimeoutException()\n return total_data",
"def read(self, count = -1):\n if self.remaining_length <= 0:\n return \"\"\n if count < 0 or count > self.remaining_length:\n count = self.remaining_length\n data = self.stream.read(count)\n self.remaining_length -= len(data)\n return data",
"def _recvbytes(self, bytes_needed, sock_buf = None):\r\n\tif sock_buf is None:\r\n\t\tsock_buf = StringIO()\r\n\tbytes_count = 0\r\n\twhile bytes_count < bytes_needed:\r\n\t\tchunk = self.recv(min(bytes_needed - bytes_count, 32768))\r\n\t\tpart_count = len(chunk)\r\n\r\n\t\tif part_count < 1:\r\n\t\t\treturn None\r\n\r\n\t\tbytes_count += part_count\r\n\t\tsock_buf.write(chunk)\r\n\t\r\n\treturn sock_buf",
"def read(self):\n # This will block waiting for data\n try:\n return self.read_impl()\n except socket.error as exc:\n if self.running:\n self.close()\n self.logger.warning(\"Read failure attempting reconnection. {}: {}\".format(type(exc).__name__, str(exc)))\n self.open()\n return b\"\"",
"def consume_payload_bytes(self, data):\n if not data or (len(data) == 0):\n # We're done and there's nothing to do.\n return None\n\n data_len = len(data)\n if data_len <= self.packet_bytes_remaining:\n # We're consuming all the data provided.\n self.ibuffer += data\n self.packet_bytes_remaining -= data_len\n\n # If we're no longer waiting for payload bytes,\n # we flip back to parsing header bytes and we\n # unpickle the payload contents.\n if self.packet_bytes_remaining < 1:\n self.reading_header = True\n self.deserialize_payload()\n\n # We're done, no more data left.\n return None\n else:\n # We're only consuming a portion of the data since\n # the data contains more than the payload amount.\n self.ibuffer += data[:self.packet_bytes_remaining]\n data = data[self.packet_bytes_remaining:]\n\n # We now move on to reading the header.\n self.reading_header = True\n self.packet_bytes_remaining = 0\n\n # And we can deserialize the payload.\n self.deserialize_payload()\n\n # Return the remaining data.\n return data",
"def recv(self, count):\n if not self._is_connected:\n raise NotConnectedError()\n try:\n data = self._sock.recv(count)\n except _socket.timeout:\n return \"\"\n except _socket.error, (errno, info):\n if errno in timeout_errnos:\n return \"\"\n else:\n raise SocketError(errno, info)\n if not data:\n raise EOFError()\n return data",
"def attempt_read(self): \n assert(len(self._data) < DATA_LENGTH)\n \n bytes_read = []\n while 1:\n try:\n data_read = self.usb_port.read(DATA_LENGTH)\n \n except IOError, e:\n if e.args[0] == errno.EAGAIN:\n print 'EAGAIN'\n break\n raise\n print 'read ', len(data_read), ' bytes.'\n bytes_read.append(data_read)\n if len(data_read) < DATA_LENGTH:\n break\n \n self._data += ''.join(bytes_read)\n \n # Post condition\n assert(len(self._data) <= DATA_LENGTH)\n \n return len(''.join(bytes_read))",
"def read(self, nbytes, timeout=None):\n out = bytes()\n self._lock.acquire()\n try:\n if len(self._buffer) == 0:\n if self._closed:\n return out\n # should we block?\n if timeout == 0.0:\n raise PipeTimeout()\n # loop here in case we get woken up but a different thread has\n # grabbed everything in the buffer.\n while (len(self._buffer) == 0) and not self._closed:\n then = time.time()\n self._cv.wait(timeout)\n if timeout is not None:\n timeout -= time.time() - then\n if timeout <= 0.0:\n raise PipeTimeout()\n\n # something's in the buffer and we have the lock!\n if len(self._buffer) <= nbytes:\n out = self._buffer_tobytes()\n del self._buffer[:]\n if (self._event is not None) and not self._closed:\n self._event.clear()\n else:\n out = self._buffer_tobytes(nbytes)\n del self._buffer[:nbytes]\n finally:\n self._lock.release()\n\n return out",
"def read_n_bytes(resp, sock, n, deadline):\n buf = b''\n while len(buf) < n:\n buf += read_by_deadline(resp, sock, deadline, n - len(buf))\n return buf",
"def decode_remaining_length():\n multiplier = 1\n value = 0\n length_bytes = b''\n while True:\n encoded_byte = yield from read_or_raise(reader, 1)\n length_bytes += encoded_byte\n int_byte = bytes_to_int(encoded_byte)\n value += (int_byte & 0x7f) * multiplier\n if (int_byte & 0x80) == 0:\n break\n else:\n multiplier *= 128\n if multiplier > 128 * 128 * 128:\n raise MQTTException(\"Invalid remaining length bytes:%s\" % bytes_to_hex_str(length_bytes))\n return value",
"def __recvall(self, bytes):\r\n data = \"\"\r\n while len(data) < bytes:\r\n data = data + self.recv(bytes-len(data))\r\n return data",
"def recv_nbytes(sock, n):\n #print n, \"this is \"\n bytes_received = 0\n received = \"\"\n # keep on reading until we get what we expected\n while bytes_received < n:\n ready_to_read,_,_ = select.select([sock],[],[])\n data = sock.recv(1, socket.MSG_PEEK)\n #rint data, \"this is the data\"\n\n if len(data) == 0:\n raise ClientDead\n else:\n assert(ready_to_read != [])\n new_recv = sock.recv(n - bytes_received)\n bytes_received += len(new_recv)\n received += new_recv\n assert(bytes_received == len(received))\n return received",
"def read_n_bytes(s, n):\n bytes_read = 0\n _buffer = []\n while bytes_read < n:\n data = s.recv(n - bytes_read)\n if data == b'':\n break\n\n bytes_read += len(data)\n _buffer.append(data)\n\n result = b''.join(_buffer)\n if len(result) != n:\n log.warning(\"expected {} bytes but read {}\".format(n, len(result)))\n\n return b''.join(_buffer)",
"def doRead(self):\n try:\n data = self.socket.recv(self.bufferSize)\n except socket.error as se:\n if se.args[0] == EWOULDBLOCK:\n return\n else:\n return main.CONNECTION_LOST\n\n return self._dataReceived(data)",
"def socket_recv(sock, n):\n data = b''\n while len(data) < n:\n packet = sock.recv(n - len(data))\n if not packet:\n return None\n data += packet\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Send a frame to remote socket. We first send the size of frame in bytes followed by the actual frame. frame is serialized using cPickle module. Arguments
|
def send_frame(sock, frm):
if frm is None or (sock is None or type(sock) != socket.socket):
return # Nothing to send
frm_raw_bytes = pickle.dumps(frm)
dsize = len(frm_raw_bytes)
sock.sendall(dsize.to_bytes(4, byteorder="big"))
sock.sendall(frm_raw_bytes)
return True
|
[
"def send_frame(self, frame):\n self.transport.write(frame.pack())",
"def send(self, frame):\n self._logger.debug('frame=\"%s\"', frame)\n content = \"\\n\".join([self._auth, str(frame)])\n return self.interface.sendto(content, self._addr)",
"def sendFrame(self, img):\n try:\n self.prevFrame\n except AttributeError:\n self.initializeStream(img)\n # instanciate temporary bytearray to send later\n Tfile = io.BytesIO()\n # use numpys built in save function to diff with prevframe\n # because we diff it it will compress more\n np.save(Tfile, img-self.prevFrame)\n\n # compress it into even less bytes\n b = self.C.compress(Tfile.getvalue())\n\n # saving prev frame\n self.prevFrame = img\n if self.Write:\n try:\n self.out.write(img) # save frame to a video file server side\n except Exception as e:\n self.close(e)\n\n # send it\n try:\n send_msg(self.conn, b)\n except socket.error as e:\n self.close(e)\n \n #self.log(\"Sent {}KB (frame {})\".format(int(len(b)/1000), self.frameno)) # debugging\n self.frameno += 1",
"def send(self, knxipframe: KNXIPFrame, addr: tuple[str, int] | None = None) -> None:\n knx_logger.debug(\n \"Sending to %s: %s\",\n self.remote_hpai,\n knxipframe,\n )\n if self.transport is None:\n raise CommunicationError(\"Transport not connected\")\n\n self.transport.write(knxipframe.to_knx())",
"def send_packet():",
"def sendFrames(self):\n\n if self.state == FRAMES:\n for frame in self.pending_frames:\n # Encode the frame before sending it.\n if self.codec:\n frame = encoders[self.codec](frame)\n self.transport.write(\"\\x00%s\\xff\" % frame)\n self.pending_frames = []",
"def send(self, packet):\n frame = super(EthernetClient, self).get_frame(packet)",
"def sendFrames(self):\r\n\r\n if self.state != FRAMES:\r\n return\r\n\r\n if self.flavor == HYBI00:\r\n maker = make_hybi00_frame\r\n elif self.flavor in (HYBI07, HYBI10, RFC6455):\r\n if self.do_binary_frames:\r\n maker = make_hybi07_frame_dwim\r\n else:\r\n maker = make_hybi07_frame\r\n else:\r\n raise WSException(\"Unknown flavor %r\" % self.flavor)\r\n\r\n for frame in self.pending_frames:\r\n # Encode the frame before sending it.\r\n if self.codec:\r\n frame = encoders[self.codec](frame)\r\n packet = maker(frame)\r\n self.transport.write(packet)\r\n self.pending_frames = []",
"def send_raw_packet(packet, port):",
"def send(self, knxipframe) -> None:\n knx_logger.debug(\"Sending: %s\", knxipframe)\n if self.transport is None:\n raise XKNXException(\"Transport not connected\")\n\n if self.multicast:\n self.transport.sendto(bytes(knxipframe.to_knx()), self.remote_addr)\n else:\n self.transport.sendto(bytes(knxipframe.to_knx()))",
"def send(self, data: bytes) -> None:\n data = self._convert_to_single_bytes(data, self.config)\n\n self.mc.bcp_processor.send('dmd_frame', rawbytes=data, name=self.name)",
"def send_img(self, file):\n imageDict = {'imageFile': file, 'user': 'test'}\n pickleData = pickle.dumps(imageDict)\n taille = sys.getsizeof(pickleData)\n print(\"Taille : {}\".format(taille))\n self.client_socket.send(str(taille).encode())\n self.client_socket.send(pickleData)\n # self.client_socket.send(str(content).encode())",
"def send(self, message_object, socket):\n socket.sendall(pickle.dumps(message_object, protocol=2))",
"def test_write_frame__fast__buffer_store_resize(self):\n small_msg = Message(body='t')\n small_frame = 2, 1, spec.Basic.Publish, b'x' * 10, small_msg\n self.g(*small_frame)\n self.write.assert_called_once()\n write_arg = self.write.call_args[0][0]\n assert isinstance(write_arg, memoryview)\n assert len(write_arg) < self.connection.frame_max\n self.connection.reset_mock()\n\n # write a larger message to the same frame_writer after increasing frame_max\n large_msg = Message(body='t' * (self.connection.frame_max + 10))\n large_frame = 2, 1, spec.Basic.Publish, b'x' * 10, large_msg\n original_frame_max = self.connection.frame_max\n self.connection.frame_max += 100\n self.g(*large_frame)\n self.write.assert_called_once()\n write_arg = self.write.call_args[0][0]\n assert isinstance(write_arg, memoryview)\n assert len(write_arg) > original_frame_max",
"def protocol_send(self, data, sock):",
"def send(obj, id, host='localhost', port=9999):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((host, port))\n w = sock.makefile('wb')\n r = sock.makefile('rb')\n pickle.dump({'action': 'send', 'object': obj, 'id': id}, w)\n w.close()\n retval = pickle.load(r)\n finally:\n sock.close()\n return retval",
"def send(self, buf):",
"def setFrame(self, frame):\n self.setLen(TM_DU_V1_CDS1_HEADER_BYTE_SIZE)\n self.append(frame)\n self.packetSize = len(self)",
"def send_zipped_pickle(socket, obj, flags=0, protocol=2):\n p = pickle.dumps(obj, protocol)\n# z = zlib.compress(p, 8)\n return socket.send(p, flags=flags)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Receive a frame from the socket. Reads the size of frame first followed by the actual data. frame is then deserialized and returned as an instance of class frame. Arguments
|
def recv_frame(sock, timeout=None):
if sock is None or type(sock) != socket.socket:
raise TypeError("Socket expected!")
# Read the size from the channel first
if timeout is not None:
# Do not wait for more that `timeout` seconds
sock.settimeout(timeout)
try:
frm_len = int.from_bytes(read_k_bytes(sock, 4), "big")
# Switch to blocking mode
sock.settimeout(None)
frm = pickle.loads(read_k_bytes(sock, frm_len))
except socket.timeout:
frm = None
finally:
# Blocking mode
sock.settimeout(None)
return frm
|
[
"def receive_next_frame_view(self):\n frame_length = int.from_bytes(self.socket.recv(4), self.endianness) # first we read the length of the frame\n frame_bytes = self.receive_bytes(frame_length)\n\n if self.gzip:\n try:\n return gzip.decompress(frame_bytes), frame_length\n except:\n print(\"Error decompressing gzip frame: is gzip enabled on the server?\")\n else:\n return frame_bytes, frame_length",
"def read_frame(resp, sock, deadline):\n header = read_n_bytes(resp, sock, 8, deadline)\n _, size = struct.unpack('>BxxxL', header)\n return read_n_bytes(resp, sock, size, deadline)",
"def load_frame(self):\n with socket.socket() as sock:\n sock.connect(self.minicap_address)\n header = minicap.read_header(sock)\n frame_size = bitstring.ConstBitStream(bytes=sock.recv(4))\n frame = minicap.read_frame(sock, frame_size.uintle)\n return frame",
"def get_data_frame(self):\n DATA_LENGTH = 756\n # todo: find solution to communication breakdown\n tries = 10\n while tries > 0:\n data = self.data_socket.recv(DATA_LENGTH)\n if len(data) == DATA_LENGTH:\n return data\n tries -= 1\n # todo: raise specific exception\n raise Exception",
"def parseFrames(self):\n\n start = self.buf.find(\"\\x00\")\n\n while start != -1:\n end = self.buf.find(\"\\xff\")\n if end == -1:\n # Incomplete frame, try again later.\n return\n else:\n frame, self.buf = self.buf[start + 1:end], self.buf[end + 1:]\n # Decode the frame, if we have a decoder.\n if self.codec:\n frame = decoders[self.codec](frame)\n # Pass the frame to the underlying protocol.\n ProtocolWrapper.dataReceived(self, frame)\n start = self.buf.find(\"\\x00\")",
"async def recv(self):\n pts, time_base = await self.next_timestamp()\n\n width, height, image = self.buffer_manager.get_current_frame()\n\n if (\n self.frame is None\n or self.frame.planes[0].width != width\n or self.frame.planes[0].height != height\n ):\n if CYTHON_AVAILABLE:\n self.frame = FuryVideoFrame(width, height, 'rgb24')\n self.image = image\n\n if not CYTHON_AVAILABLE:\n # if the buffer it's already flipped\n # self.frame.planes[0].update(self.image)\n self.image = np.frombuffer(self.image, 'uint8')[\n 0 : width * height * 3\n ].reshape((height, width, 3))\n self.image = np.flipud(self.image)\n self.frame = VideoFrame.from_ndarray(self.image)\n else:\n self.frame.update_from_buffer(self.image)\n\n self.frame.pts = pts\n self.frame.time_base = time_base\n\n return self.frame",
"def receive(self, b):\n self.buf = self.buf + b\n byte_frames = []\n\n while 1:\n if not self.reading_header and len(self.buf) >= self.payload_length:\n byte_frames.append(self.buf[0:self.payload_length])\n self.buf = self.buf[self.payload_length:]\n self.reading_header = True\n\n elif self.reading_header and len(self.buf) >= self.header_length:\n self.payload_length = int.from_bytes(\n self.buf[0:self.header_length], 'big')\n self.buf = self.buf[self.header_length:]\n self.reading_header = False\n else:\n break\n return byte_frames",
"async def read(self) -> Message:\n await trio.sleep(0)\n\n # The first record is a msg header without any ancillary data. Each SEQPACKET record\n # must be read with with a single recv/recvmsg call with sufficient buffer size.\n header = await self.socket.recv(HEADER_SIZE)\n if len(header) == 0:\n raise NoDataError('Cannot read header.') # Probably EOF\n if len(header) != HEADER_SIZE:\n raise ReadError(f'Incomplete header read: {header}.')\n\n num = int_from_bytes(header[0:INT32_SIZE])\n flags = int_from_bytes(header[INT32_SIZE:2 * INT32_SIZE])\n data_size = int_from_bytes(header[2 * INT32_SIZE:3 * INT32_SIZE])\n data = bytearray(data_size)\n n_fds = int_from_bytes(header[3 * INT32_SIZE:4 * INT32_SIZE])\n ancillary_size = CMSG_SPACE(INT_SIZE * n_fds) if n_fds else 0\n\n # The second record contains a msg body and file descriptors. Each SEQPACKET record\n # must be read with with a single recv/recvmsg call with sufficient buffer size.\n received, ancillary, _flags, _address = await self.socket.recvmsg_into([data], ancillary_size)\n if received != data_size:\n raise ReadError(f'Incomplete body received: {received}/{data_size} bytes.')\n\n fds: List[Fd] = []\n for level, type_, extra_data in ancillary:\n if level != SOL_SOCKET or type_ != SCM_RIGHTS:\n raise WrongDataError(\n f'Unsupported ancillary data: level={level}, type={type_}, data={extra_data}')\n\n # File descriptors are received as native integer array.\n fds += [Fd(i) for i in array.array('i', extra_data)]\n\n if len(fds) != n_fds:\n raise WrongDataError(f'Wrong number of fds: {n_fds} expected, {len(fds)} received.')\n\n return Message(num, flags, data, fds)",
"def _receive_video_thread(self):\n packet_data = \"\"\n\n while True:\n try:\n res_string, ip = self.socket_video.recvfrom(2048)\n packet_data += res_string\n # end of frame\n if len(res_string) != 1460:\n for frame in self._h264_decode(packet_data):\n self.frame = frame\n packet_data = \"\"\n\n except socket.error as exc:\n print (\"Caught exception socket.error : %s\" % exc)",
"def _recv_pkt(self):\n header = self._sock.recv(struct.calcsize('<3i'))\n (pkt_size, pkt_id, pkt_type) = struct.unpack('<3i', header)\n body = self._sock.recv(pkt_size - 8)\n # Strip the 2 trailing nulls from the body\n body.rstrip('\\x00')\n return RconPacket(pkt_id, pkt_type, body)",
"def recv(self, buff_len=1024):\r\n buffer = self.s.recv(buff_len)\r\n return (buffer)",
"def read_frame_NB(self): # default 0.1 ms timeout\n self.check_serial()\n \n try:\n while True:\n a = self.serial.read()\n a = ord(a)\n if a == 0x7E:\n a = (ord(self.serial.read()) << 8 ) + ord(self.serial.read())\n frame = bytearray(a+1)\n check = 0\n for i in range(a+1):\n frame[i] = ord(self.serial.read())\n check += frame[i]\n if (check & 0xFF) != 0xFF:\n continue # Bad checksum\n if frame[0] != 0x81:\n continue # it's not a 16bits addr RF packet\n src = (frame[1] << 8) + frame[2]\n data = \"\"\n for i in range(5,a):\n data += chr(frame[i])\n return src,data\n\n except TypeError:\n raise TypeError # time out, no available data in receive buffer but time += 0,1 !\n except OSError:\n pass # bug fix on mini pc",
"def receive( self ) -> JSONData:\n\n _LOGGER.debug( \"Starting receive\" )\n msg = b''\n while ( chunk := self.sock.recv( 2048 ) ) != b'':\n msg += chunk\n _LOGGER.debug( \"Received %s\", msg )\n return self.decode( msg )",
"def _recvobj(self):\r\n\tsock_buf = self.recvbytes(4)\r\n\tif sock_buf is None:\r\n\t\treturn None\r\n\r\n\tmessage_length = _bintoint(sock_buf.getvalue())\r\n\tsock_buf = self.recvbytes(message_length - 4, sock_buf)\r\n\tif sock_buf is None:\r\n\t\treturn None\r\n\r\n\tretval = loads(sock_buf.getvalue())\r\n\treturn retval",
"def receive_stream(ws, command, frame_callback):\n binary = command.get('binary', False)\n while True:\n data = ws.receive()\n data = base64.b64decode(data)\n if not binary:\n data = unicode(data, \"utf-8\",\"ignore\")\n if data == command['eof']:\n break\n elif data == command['keepalive']:\n pass\n else:\n try:\n frame_callback(data, binary)\n except:\n log.debug(\"data = {}\".format(repr(data)))\n raise",
"def _decode_frame(self, data):\r\n preamble, seq1, seq2, seq3, seq4, chan1, chan2, chan3, chan4, chan5, chan6, chan7, chan8, mode, event, adc = self.frame_struct.unpack(bytes(data))\r\n seq = seq1 + 256*seq2 + 65536*seq3 + 16777216*seq4\r\n X = numpy.atleast_2d(numpy.array([chan1, chan2, chan3, chan4, chan5, chan6, chan7, chan8])).T\r\n\r\n return Frame(seq=seq, mode=mode, event=event, volt=8*adc/4095.0, X=X)",
"def read_frame(self) -> 'PySharkPacket':\n from pcapkit.toolkit.pyshark import packet2dict, tcp_traceflow\n ext = self._extractor\n\n # fetch PyShark packet\n packet = cast('PySharkPacket', self._extmp.next())\n\n # verbose output\n ext._frnum = int(packet.number)\n ext._vfunc(ext, packet)\n\n # write plist\n frnum = f'Frame {ext._frnum}'\n if not ext._flag_q:\n info = packet2dict(packet)\n if ext._flag_f:\n ofile = ext._ofile(f'{ext._ofnm}/{frnum}.{ext._fext}')\n ofile(info, name=frnum)\n else:\n ext._ofile(info, name=frnum)\n ofile = ext._ofile\n ext._offmt = ofile.kind\n\n # trace flows\n if ext._flag_t:\n if ext._tcp:\n data_tf_tcp = tcp_traceflow(packet)\n if data_tf_tcp is not None:\n ext._trace.tcp(data_tf_tcp)\n\n # record frames\n if ext._flag_d:\n # setattr(packet, 'packet2dict', packet2dict)\n ext._frame.append(packet)\n\n # return frame record\n return packet",
"def receiver(self):\n while True:\n # Read a byte from the serial line (blocking call)\n data = self.serial.read(size=1)\n rx_byte = ord(data[0])\n if SHOW_RAW_FOR_DEBUG:\n print(\"Data RX on wire: \" + '0x{:02x}'.format(rx_byte))\n self.build_received_frame(rx_byte)",
"def fetchFrame(self, getFrame, args=[]):\n return getFrame(*args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method gets all the details of a specific artist using their login token.
|
def get_artist(self, request):
user = request.user
result = ArtistProfile.call(user=user)
if result.failed:
return Response(
errors=dict(errors=result.error.value),
status=status.HTTP_400_BAD_REQUEST
)
return Response(data=result.value, status=status.HTTP_200_OK)
|
[
"def get_artist(self):\n self.artist = self.spotify_client.get_artist(self.artist_name)",
"def get_artists(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('artists', search, start,\r\n max_items)",
"def get_artist_from_context(self, context):\n artist_id = id_from_uri(context[\"uri\"])\n result = self.get_api_v1(\"artists/{}\".format(artist_id))\n return Artist(result or {})",
"def artist(self, artist_id):\n\n trid = self._get_id(\"artist\", artist_id)\n return self._get(\"artists/\" + trid)",
"def _search_for_artist(self, artist):\n token = self._generate_token()\n if token:\n sp = Spotify(client_credentials_manager=token)\n search_results = sp.search(q=artist, type='artist')\n try:\n first_result = search_results['artists']['items'][0]\n return first_result\n except IndexError:\n pass",
"def get_sg_artist(artist_id):\n\n params = {'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'id': artist_id}\n\n response = requests.get(SG_URL + 'performers', params=params)\n\n return response.json()",
"def get_artist_via_id(self, request, artist_id):\n result = ArtistDetail.call(artist_id=artist_id)\n\n if result.failed:\n return Response(errors=dict(errors=result.error.value), status=status.HTTP_400_BAD_REQUEST)\n\n return Response(data=result.value, status=status.HTTP_200_OK)",
"def test_artist_get(self):\n response = self.client.open(\n '/NicholasMaisel/MusicCapping/1.0.0/artist',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def artists(self, artists):\n\n tlist = [self._get_id(\"artist\", a) for a in artists]\n return self._get(\"artists/?ids=\" + \",\".join(tlist))",
"def get_artist(self, artist_id):\n response = self.__get_data(self.url.artists_url().format(id=str(artist_id)))\n return Artist(artist_id=artist_id, name=response['name'], popularity=response['popularity'],\n genres=response['genres'])",
"def retrieve_artist_from_id(artist_id):\n logging.info('Retrieving %s', artist_id)\n\n url = BASE_URL_MYAPIFILMS + 'imdb?idName=' + artist_id + '&format=JSON&filmography=0&lang=en-us&bornDied=0&starSign=0&uniqueName=0&actorActress=0&actorTrivia=0&actorPhotos=N&actorVideos=N&salary=0&spouses=0&tradeMark=0&personalQuotes=0&token=307cccfe-d20b-4b69-b976-d6a024538864'\n json_page = get(url).encode('utf-8')\n json_data = json.loads(json_page)\n\n artist = Artist(id=json_data[\"idIMDB\"],\n name=json_data[\"name\"],\n photo=clear_url(json_data[\"urlPhoto\"]) if ('urlPhoto' in json_data and json_data['urlPhoto'] != \"\") else None)\n\n return artist.put()",
"def artistInfo(aid):\n\n # checks if user is logged in, if not redirects to welcome page\n if notLoggedIn(): \n return redirect( url_for('index'))\n\n # gets artist info and displays it\n conn = dbi.connect() \n artist = music.getArtistById(conn, aid)\n artistsWork = music.getMusicByArtistId(conn, aid)\n return render_template('artist-info.html',title='Artist Info', \n artist = artist, works = artistsWork)",
"def get_artist_analytics_via_id(self, request, artist_id):\n info = ArtistSongPerMonth.call(artist_id=artist_id)\n\n if info.failed:\n return Response(errors=dict(errors=info.error.value), status=status.HTTP_400_BAD_REQUEST)\n\n return Response(data=info.value, status=status.HTTP_200_OK)",
"def fetchArtistId(name):\n url = \"https://api.spotify.com/v1/search?q=\"+ name +\"&type=artist\" \n req = grequests.get(url)\n result_list = grequests.map([req])\n if not result_list[0].ok:\n print \"Error\"\n info = result_list[0].json()\n ID = info['artists']['items'][0]['id']\n return(ID)",
"def get_artist(cls, artist_name: str, session: Session, spotify_svc: Spotify) -> Artist:\n search = Artist.get_by_name(artist_name, session)\n if search:\n return search\n return cls._create_artist(artist_name, spotify_svc)",
"def get_similar_artists(artist_uri):\n base_url = 'http://developer.echonest.com/api/v4/artist/similar?'\n suffix = \\\n 'api_key='+ keys['EN_API_KEY']+'&'\\\n 'id='+artist_uri\n r = requests.get(base_url + suffix)\n \n if int(r.headers['x-ratelimit-remaining']) < 3: \n print 'approaching ratelimit. remaining: %d'%int(r.headers['x-ratelimit-remaining'])\n time.sleep(30)\n try:\n return json.loads(r.content)['response']['artists']\n except KeyError:\n raise EchoNestAPIException(json.dumps(json.loads(r.content),indent=4))",
"async def get_artist_info(self, song_id: int = 0) -> ArtistInfo:\r\n data = await self.session.test_song(song_id)\r\n return ArtistInfo(**data, client=self)",
"def get_artist(id_artist: int) -> dict:\n sql_request = sql_request_artist(id_artist)\n sql_data = get_data_from_db(sql_request)\n artist = create_artist(sql_data)\n return artist",
"def get_artist_json(artist_name):\n \n try:\n results = sp.search(q='artist:' + artist_name, type='artist')\n except:\n logging.error(\" Bad request searching for \" + artist_name)\n return None\n \n items = results['artists']['items']\n \n return items[0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method gets all the details of a specific artist using their ID.
|
def get_artist_via_id(self, request, artist_id):
result = ArtistDetail.call(artist_id=artist_id)
if result.failed:
return Response(errors=dict(errors=result.error.value), status=status.HTTP_400_BAD_REQUEST)
return Response(data=result.value, status=status.HTTP_200_OK)
|
[
"def artist(self, artist_id):\n\n trid = self._get_id(\"artist\", artist_id)\n return self._get(\"artists/\" + trid)",
"def get_artist(self, artist_id):\n response = self.__get_data(self.url.artists_url().format(id=str(artist_id)))\n return Artist(artist_id=artist_id, name=response['name'], popularity=response['popularity'],\n genres=response['genres'])",
"def retrieve_artist_from_id(artist_id):\n logging.info('Retrieving %s', artist_id)\n\n url = BASE_URL_MYAPIFILMS + 'imdb?idName=' + artist_id + '&format=JSON&filmography=0&lang=en-us&bornDied=0&starSign=0&uniqueName=0&actorActress=0&actorTrivia=0&actorPhotos=N&actorVideos=N&salary=0&spouses=0&tradeMark=0&personalQuotes=0&token=307cccfe-d20b-4b69-b976-d6a024538864'\n json_page = get(url).encode('utf-8')\n json_data = json.loads(json_page)\n\n artist = Artist(id=json_data[\"idIMDB\"],\n name=json_data[\"name\"],\n photo=clear_url(json_data[\"urlPhoto\"]) if ('urlPhoto' in json_data and json_data['urlPhoto'] != \"\") else None)\n\n return artist.put()",
"def get_artist(id_artist: int) -> dict:\n sql_request = sql_request_artist(id_artist)\n sql_data = get_data_from_db(sql_request)\n artist = create_artist(sql_data)\n return artist",
"def get_sg_artist(artist_id):\n\n params = {'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'id': artist_id}\n\n response = requests.get(SG_URL + 'performers', params=params)\n\n return response.json()",
"def artist_detail(request, artist_id):\n artist = get_object_or_404(Artist, pk=artist_id)\n all_products = Product.objects.all()\n all_events = Event.objects.all()\n products = all_products.filter(artist__name=artist)\n events = all_events.filter(artist__name=artist)\n context = {\n 'artist': artist,\n 'products': products,\n 'events': events,\n }\n return render(request, 'artists/artist_detail.html', context)",
"async def get_artist_info(self, song_id: int = 0) -> ArtistInfo:\r\n data = await self.session.test_song(song_id)\r\n return ArtistInfo(**data, client=self)",
"def get_artist_related(self, artists_id):\n response = self.__get_data(self.url.artists_related_url().format(id=str(artists_id)))\n list_of_related_artists = []\n for related_artist in response['artists']:\n artist = Artist(artist_id=related_artist['id'], name=related_artist['name'],\n popularity=related_artist['popularity'], genres=related_artist['genres'])\n list_of_related_artists.append(artist)\n return list_of_related_artists",
"def get_artist_analytics_via_id(self, request, artist_id):\n info = ArtistSongPerMonth.call(artist_id=artist_id)\n\n if info.failed:\n return Response(errors=dict(errors=info.error.value), status=status.HTTP_400_BAD_REQUEST)\n\n return Response(data=info.value, status=status.HTTP_200_OK)",
"def get_artist(self):\n self.artist = self.spotify_client.get_artist(self.artist_name)",
"def artists(self, artists):\n\n tlist = [self._get_id(\"artist\", a) for a in artists]\n return self._get(\"artists/?ids=\" + \",\".join(tlist))",
"def get_artist_albums(self, artist_id): # TODO initialize and return a list of Album objects\n return self.__get_data(self.url.artists_albums_url().format(id=str(artist_id)))",
"def get_artist_from_context(self, context):\n artist_id = id_from_uri(context[\"uri\"])\n result = self.get_api_v1(\"artists/{}\".format(artist_id))\n return Artist(result or {})",
"def get_track_artists(self, track_id):\n response = self.__get_data(self.url.tracks_url().format(id=str(track_id)))\n artists = []\n for album_artists in response['artists']:\n artist = self.get_artist(album_artists['id'])\n artists.append(artist)\n return artists",
"def get_album_artists(self, album_id):\n response = self.__get_data(self.url.albums_url().format(id=str(album_id)))\n artists = []\n for album_artists in response['artists']:\n artist = self.get_artist(album_artists['id'])\n artists.append(artist)\n return artists",
"def fetchAlbumIds(artist_id):\n url_base = \"https://api.spotify.com/v1/artists/\" + artist_id\n url_album = \"/albums?album_type=album\"\n url_market = \"&market=US\"\n url = url_base + url_album + url_market\n req = requests.get(url)\n data = req.json()\n album = data['items'][0]['id']\n return album",
"def get_artist(self, request):\n user = request.user\n result = ArtistProfile.call(user=user)\n\n if result.failed:\n return Response(\n errors=dict(errors=result.error.value),\n status=status.HTTP_400_BAD_REQUEST\n )\n return Response(data=result.value, status=status.HTTP_200_OK)",
"def fetchArtistId(name):\n url = \"https://api.spotify.com/v1/search?q=\"+ name +\"&type=artist\" \n req = grequests.get(url)\n result_list = grequests.map([req])\n if not result_list[0].ok:\n print \"Error\"\n info = result_list[0].json()\n ID = info['artists']['items'][0]['id']\n return(ID)",
"def artistInfo(aid):\n\n # checks if user is logged in, if not redirects to welcome page\n if notLoggedIn(): \n return redirect( url_for('index'))\n\n # gets artist info and displays it\n conn = dbi.connect() \n artist = music.getArtistById(conn, aid)\n artistsWork = music.getMusicByArtistId(conn, aid)\n return render_template('artist-info.html',title='Artist Info', \n artist = artist, works = artistsWork)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method gets the data analytics of a specific artist using their ID.
|
def get_artist_analytics_via_id(self, request, artist_id):
info = ArtistSongPerMonth.call(artist_id=artist_id)
if info.failed:
return Response(errors=dict(errors=info.error.value), status=status.HTTP_400_BAD_REQUEST)
return Response(data=info.value, status=status.HTTP_200_OK)
|
[
"def get_artist(id_artist: int) -> dict:\n sql_request = sql_request_artist(id_artist)\n sql_data = get_data_from_db(sql_request)\n artist = create_artist(sql_data)\n return artist",
"def artist(self, artist_id):\n\n trid = self._get_id(\"artist\", artist_id)\n return self._get(\"artists/\" + trid)",
"def get_artist_via_id(self, request, artist_id):\n result = ArtistDetail.call(artist_id=artist_id)\n\n if result.failed:\n return Response(errors=dict(errors=result.error.value), status=status.HTTP_400_BAD_REQUEST)\n\n return Response(data=result.value, status=status.HTTP_200_OK)",
"def get_sg_artist(artist_id):\n\n params = {'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'id': artist_id}\n\n response = requests.get(SG_URL + 'performers', params=params)\n\n return response.json()",
"def get_artist(self, artist_id):\n response = self.__get_data(self.url.artists_url().format(id=str(artist_id)))\n return Artist(artist_id=artist_id, name=response['name'], popularity=response['popularity'],\n genres=response['genres'])",
"def fetchAlbumIds(artist_id):\n url_base = \"https://api.spotify.com/v1/artists/\" + artist_id\n url_album = \"/albums?album_type=album\"\n url_market = \"&market=US\"\n url = url_base + url_album + url_market\n req = requests.get(url)\n data = req.json()\n album = data['items'][0]['id']\n return album",
"def get_artist_albums(self, artist_id): # TODO initialize and return a list of Album objects\n return self.__get_data(self.url.artists_albums_url().format(id=str(artist_id)))",
"def get_track_data(self, song_id: str):\n song_data = self.sp.track(song_id)\n return (song_id, song_data['name'], song_data['artists'][0]['name'])",
"def retrieve_artist_from_id(artist_id):\n logging.info('Retrieving %s', artist_id)\n\n url = BASE_URL_MYAPIFILMS + 'imdb?idName=' + artist_id + '&format=JSON&filmography=0&lang=en-us&bornDied=0&starSign=0&uniqueName=0&actorActress=0&actorTrivia=0&actorPhotos=N&actorVideos=N&salary=0&spouses=0&tradeMark=0&personalQuotes=0&token=307cccfe-d20b-4b69-b976-d6a024538864'\n json_page = get(url).encode('utf-8')\n json_data = json.loads(json_page)\n\n artist = Artist(id=json_data[\"idIMDB\"],\n name=json_data[\"name\"],\n photo=clear_url(json_data[\"urlPhoto\"]) if ('urlPhoto' in json_data and json_data['urlPhoto'] != \"\") else None)\n\n return artist.put()",
"async def get_artist_info(self, song_id: int = 0) -> ArtistInfo:\r\n data = await self.session.test_song(song_id)\r\n return ArtistInfo(**data, client=self)",
"def fetchArtistId(name):\n url = \"https://api.spotify.com/v1/search?q=\"+ name +\"&type=artist\" \n req = grequests.get(url)\n result_list = grequests.map([req])\n if not result_list[0].ok:\n print \"Error\"\n info = result_list[0].json()\n ID = info['artists']['items'][0]['id']\n return(ID)",
"def get_audio_analysis(self, track_id):\n url = \"https://api.spotify.com/v1/audio-analysis/\" + track_id\n headers = {'Authorization': \"Bearer \" + self.token}\n\n request = self.session.get(url, headers=headers)\n return request",
"def get_track_artists(self, track_id):\n response = self.__get_data(self.url.tracks_url().format(id=str(track_id)))\n artists = []\n for album_artists in response['artists']:\n artist = self.get_artist(album_artists['id'])\n artists.append(artist)\n return artists",
"def get_album_art(track_id):\n track_result = spotify.track(track_id)\n imageurl = track_result['album']['images'][1]['url']\n return imageurl\n\n return songseries",
"def download_data_from_deezer(self): # pragma: no cover\n r_artist = requests.get(\n settings.DEEZER_API_ARTIST_URL.format(self.deezer_id)\n )\n json_data = r_artist.json()\n\n return json_data",
"def get_artist_genres(artist_id):\n # sleep before doing artist\n time.sleep(SLEEP_INTERVAL)\n\n # artist_id = song_data[\"artist_id\"]\n # artist_data = urllib2.urlopen(search_url).read()\n\n artist_params = (\n (\"api_key\", Config.KEY),\n (\"id\", artist_id),\n (\"format\", \"json\"),\n (\"bucket\", \"genre\")\n )\n artist_url = ARTIST_BASE_URL + \"?\" + urllib.urlencode(artist_params)\n artist_result = urllib2.urlopen(artist_url).read()\n artist_json = json.loads(artist_result)\n genres_raw = artist_json[\"response\"][\"artist\"][\"genres\"]\n genres = [x[\"name\"] for x in genres_raw]\n return genres",
"def get_artist(self):\n self.artist = self.spotify_client.get_artist(self.artist_name)",
"def fetchAlbumIds(artist_id):\n url = \"https://api.spotify.com/v1/artists/\" + artist_id + \"/albums?album_type=album&market=US\"\n req = requests.get(url)\n if req.ok == False:\n return 'Error: bad Spotify API URL or similar error'\n data = req.json()\n albums_list = []\n #print len(data[u'items'])\n for album in data[u'items']:\n album_id = album[u'id']\n albums_list.append(album_id)\n return albums_list",
"def get_artist_related(self, artists_id):\n response = self.__get_data(self.url.artists_related_url().format(id=str(artists_id)))\n list_of_related_artists = []\n for related_artist in response['artists']:\n artist = Artist(artist_id=related_artist['id'], name=related_artist['name'],\n popularity=related_artist['popularity'], genres=related_artist['genres'])\n list_of_related_artists.append(artist)\n return list_of_related_artists"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Base64 encodes the file 'f'
|
def b64(f):
with open(f, 'r') as infile:
return base64.b64encode(infile.read())
|
[
"def _encode_file_base64_(self, file_path):\n encoded_file = base64.b64encode(open(file_path, 'rb').read())\n return self._base64_to_str(encoded_file)\n # return str(encoded_file)[2:-1]",
"def fio_to_b64s(fio: fileIO) -> str:\n fio.seek(0)\n b64s = base64.b64encode(fio.read()).decode('utf8')\n fio.seek(0)\n return b64s",
"def b64content(self) -> bytes:\n with self.as_file() as file:\n return base64.b64encode(file.read())",
"def base64_read_file(filepath):\n with open(filepath, 'rb') as stream:\n data = stream.read()\n file_64_encode = base64.standard_b64encode(data)\n sys.stdout.write(file_64_encode.decode('utf-8'))\n sys.stdout.flush()",
"def encode_base64(_bytes):\n return binascii.b2a_base64(_bytes, newline=False)",
"def encode_base64(s):\n bencode = base64.b64encode(s.encode(\"utf-8\"))\n return str(bencode, \"utf-8\")",
"def b64s_to_fio(b64s: str) -> fileIO:\n fio = IO.BytesIO(base64.b64decode(b64s.encode('utf8')))\n fio.seek(0)\n return fio",
"def convertToBase64(raw_bytes):\n return raw_bytes.encode('base64')",
"def _get_log_file_data_as_encoded_content():\n with io.BytesIO() as fp:\n with tarfile.open(fileobj=fp, mode='w:gz') as tar:\n for f in OUTPUT_FILES:\n tar.add(f)\n\n fp.seek(0)\n return base64.encode_as_bytes(fp.getvalue())",
"def make_document_base64(document_path):\n with open(document_path, 'rb') as document:\n return base64.b64encode(document.read()).decode('utf-8')",
"def encode_image(image_file):\n encoded = base64.b64encode(open(image_file,'rb').read())\n print('Uploading the word cloud')\n return 'data:image/png;base64,{}'.format(encoded.decode())",
"def _b64_encode(data):\n enc = base64.b64encode(data)\n return enc.translate(B64_TO_BCRYPT, b'=')",
"def format_base64(binary_base64):\n \n try:\n return binascii.b2a_base64(h).rstrip('=\\n ')\n except binascii.Error:\n raise tuf.FormatError('Invalid base64 encoding')",
"def b2a_base64(data: Any, *, newline: bool=True) -> bytes:\n ...",
"def convert_binary(file):\n with open(file, 'rb') as fil:\n f = fil.read()\n encoded_file = Binary(f, 0)\n\n return encoded_file",
"def encode_image(image):\n image_content = image.read()\n byteenc = base64.b64encode(image_content)\n return (byteenc.decode('ascii'))",
"def encode_image(image):\n return base64.b64encode(image).decode('ascii')",
"def b64_to_image(self,data,savepath):\r\n fl = open(savepath,\"wb\")\r\n fl.write(data.decode('base4'))\r\n fl.close()",
"def part4a(filename, username, password):\n\n username_encrypted = base64.b64encode(bytes(username))\n password_encrypted = base64.b64encode(bytes(password))\n\n with open(filename, \"w\") as f:\n f.write(username_encrypted + \"\\n\")\n f.write(password_encrypted + \"\\n\")",
"def validate_json_and_covert_to_base64(input_file):\n input_str = input_file.read()\n input_str_validated = json.dumps(json.loads(input_str))\n return base64.b64encode(input_str_validated.encode()).decode()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CSRF route. Set the CSRF cookie and return a `JSONResponse with the token`. We need this REST endpoint to protect against CSRF because all GraphQL queries use POST method, so they are not safe to transmit the token.
|
async def csrf(request): # pylint: disable=unused-argument
token = get_new_token()
response = JSONResponse({"csrftoken": token})
response.set_cookie(
settings.CSRF_COOKIE_NAME,
token,
httponly=settings.CSRF_COOKIE_HTTPONLY,
secure=settings.CSRF_COOKIE_SECURE,
)
return response
|
[
"def csrf_token():\n token = '123' # must use isdangerous module generate a csrf token\n return {'csrf_token': token}",
"async def add_csrf_token_cookie(request, response):\n token = await generate_token()\n\n # Set secure httponly csrf token\n response.cookies['t'] = token\n response.cookies['t']['httponly'] = True\n response.cookies['t']['secure'] = app.config.get('SECURE_COOKIE')\n\n # Set public csrf token for javascript\n response.cookies['csrf_token'] = token\n response.cookies['csrf_token']['secure'] = app.config.get('SECURE_COOKIE')\n\n # Secure all header response\n secure_headers.sanic(response)",
"def get_csrf_token(self):\n params1 = {\"action\": \"query\", \"meta\": \"tokens\", \"type\": \"csrf\"}\n r1 = self.session.get(self.WIKIBASE_API, params=params1)\n token = r1.json()[\"query\"][\"tokens\"][\"csrftoken\"]\n\n return token",
"def csrf_protect():\n if request.endpoint == 'gconnect' or request.endpoint == 'fbconnect':\n return\n\n if request.method == \"POST\":\n token = session.pop('_csrf_token', None)\n if not token or token != request.form.get('_csrf_token'):\n abort(403)",
"def csrf_protect():\n if request.method == 'POST':\n token = session.pop('_csrf_token', None)\n if not token:\n logger.debug('No CSRF token in session')\n abort(400)\n elif request.json:\n _csrf_token = request.json.get('_csrf_token')\n if token != _csrf_token:\n logger.debug('Invalid CSRF token received')\n logger.debug('{token} expected and received {_csrf_token}'.format(**locals()))\n abort(400)\n elif token != request.form.get('_csrf_token'):\n logger.debug('Invalid CSRF token received in the form')\n logger.debug('Expected {} and received {}'.format(token, request.form.get('_csrf_token')))\n abort(400)\n else:\n logger.debug('CSRF valid.')",
"def _get_csrftoken():\n # logging.getLogger(__name__).error(request.headers)\n # logging.getLogger(__name__).error(request.cookies)\n if 'csrftoken' not in request.cookies:\n abort(403, 'No csrftoken')\n csrftoken = request.cookies.get('csrftoken')\n return csrftoken",
"def get_xsrf_token():\n return bottle.request.get_cookie(\n _XSRF_TOKEN_COOKIE, secret=_get_session_cookie_secret())",
"def set_csrf_cookie(response, csrf_token):\n response.set_cookie(_CSRF_FIELD_NAME, value=csrf_token,\n #secure=True, # It would be nice to set this, but it messes up local testing. Since we only allow HTTPS connections, it's probably okay to leave this False...?\n httponly=True, path='/',\n expires=datetime.datetime.now()+datetime.timedelta(7))",
"def bypass_csrf_protection(f):\n f._bypass_csrf = True\n return f",
"def ensure_xsrf_token():\n xsrf_token = get_xsrf_token()\n if not xsrf_token:\n xsrf_token = misc_util.generate_random_id(16)\n bottle.response.set_cookie(\n _XSRF_TOKEN_COOKIE, xsrf_token, secret=_get_session_cookie_secret())\n return xsrf_token",
"def csrf_token(self):\n r=Loader.capi.cppcms_capi_session_get_csrf_token(self.d).decode()\n self.check()\n return r;",
"def generate_csrf_token():\n if '_csrf_token' not in session:\n token = base64.b64encode(os.urandom(42)).decode()\n logger.debug('Setting CSRF token: {token}'.format(**locals()))\n session['_csrf_token'] = token\n return session['_csrf_token']",
"def _protect_xsrf_hook():\n # No need to protect API calls.\n if bottle.request.path.startswith('/api/'):\n return\n if bottle.request.method not in ('GET', 'HEAD'):\n xsrf_token = bottle.request.forms.get('xsrf_token', 'N/A')\n if xsrf_token != get_xsrf_token():\n bottle.abort(400, 'XSRF token is incorrect or not set.')",
"def add_cookie(self):\n self.handler.response.set_cookie(\n 'XSRF-TOKEN', self.token.generate_token_string())",
"def action(post_method):\n def check_csrf(self,*args,**kwargs):\n if session.get('csrf')==request.form.get('csrf') and request.form.get('csrf'):\n return post_method(self,*args,**kwargs)\n else:\n raise PermissionDeniedException(error=\"Permission denied\")\n return check_csrf",
"def change_token_for_cookie(self, id_token):\n #try:\n expires_in = timedelta(days = 5)\n session_cookie = firebase_admin.auth.create_session_cookie(\n id_token, expires_in=expires_in)\n # maybe can change status success for something else.\n expires = datetime.now() + expires_in\n response = jsonify(status=\"success\", token=id_token)\n #uncomment secure = True and samesite=None when hosting\n response.set_cookie(\n 'session', session_cookie, expires=expires, #secure=True, samesite=None # httponly=True, secure=True\n )\n print(response)\n return response\n\n #except:\n #return abort(401, \"Failed to create a session cookie\")",
"def generate_csrf_token(self, csrf_context):\n text = SECRET_KEY + csrf_context\n token = md5(text.encode('utf-8')).hexdigest()\n return token",
"def get_csrf_token(request):\n cookie_csrf_token = request.cookies.get(_CSRF_FIELD_NAME)\n\n if not cookie_csrf_token:\n # The prepended string is only to make it a little clearer when debugging.\n cookie_csrf_token = 'csrf' + str(random.getrandbits(128))\n\n return cookie_csrf_token",
"def api_jwt_token_page():\n return render_template('/api/token.html')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract the version from the package.
|
def extract_version():
# Regular expression for the version
_version_re = re.compile(r"__version__\s+=\s+(.*)")
with open("pdftools/__init__.py", "r") as f:
content = f.read()
version_match = _version_re.search(content)
version = str(ast.literal_eval(version_match.group(1)))
return version
|
[
"def get_version_from_package() -> str:\n\n path = os.path.join(os.path.dirname(__file__), \"pdchaoskit/__init__.py\")\n path = os.path.normpath(os.path.abspath(path))\n with open(path) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n token, version = line.split(\" = \", 1)\n version = version.replace(\"'\", \"\").strip()\n print(version)\n return version",
"def getPackageVersion(package_info):\n\n # Parse for version_number\n package_version = re.search(version_pattern, package_info).group(0) # extract version_number\n\n return package_version",
"def version(package):\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)",
"def get_version(package):\r\n init_py = open(os.path.join(package, '__init__.py')).read()\r\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)",
"def get_version():\n version_file = repository_root / f\"{package_root}/{package_name}/__init__.py\"\n initfile_lines = version_file.open(\"rt\").readlines()\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in initfile_lines:\n mo = re.search(VSRE, line, re.M)\n if mo:\n return mo.group(1)\n return \"unknown\"",
"def get_version():\n with open(VERSION_FILE) as handle:\n lines = handle.read()\n result = VERSION_REGEX.search(lines)\n if result:\n return result.groupdict()[\"version\"]\n else:\n raise ValueError(\"Unable to determine __version__\")",
"def getPackageVersion():\n cmd = locations.DPKG + \" -l \" + ' | grep surfids-sensor | awk \\'{print $3}\\''\n pversion = os.popen(cmd)\n ver = pversion.readline().strip()\n if ver == \"\":\n return \"Unknown\"\n else:\n return ver",
"def get_version():\n with open(os.path.join(\n os.path.dirname(__file__), MODULE_NAME, '__init__.py')\n ) as init:\n for line in init.readlines():\n res = re.match(r'^__version__ = [\\'\"](.*)[\\'\"]$', line)\n if res:\n return res.group(1)",
"def get_version():\n from pkg_resources import get_distribution\n return get_distribution('funkload').version",
"def get_version() -> str:\n return VERSION",
"def get_latest_verobj(pkg):\n try:\n ver = pkg.versions[0]\n except AttributeError:\n return None\n\n return ver",
"def return_min_version_python_package(package):\r\n\tversion_package = min_python_module_version()\r\n\treturn (version_package[package])",
"def GetPackageVersion(self,package):\n\n print 'Get current package version of ' + str(package)\n\n ## Check that the nd280/v#r#p#/ directory is there \n nd280Dir=self.instDir + '/nd280/' + self.nd280ver \n if not os.path.isdir(nd280Dir):\n raise Error(nd280Dir + ' does not exist, so am unable to get version of ' + package)\n return 1\n\n # find the requirements file that enables this module\n command= \"grep -e \\\"use \"+package+\"[[:space:]]*v\\\" \"+self.instDir+\"/*/*/cmt/requirements\"\n lines,errors = runLCG(command,is_pexpect=False)\n if not lines or errors:\n print \"ERROR: unable to locate requirents for \"+package\n return 1\n\n version = lines[0].strip().split(' ')[-1]\n print 'Found '+package+' version '+version\n\n return version",
"def get_version():\n import pkg_resources # part of setuptools\n return pkg_resources.require(\"mbed-ls\")[0].version",
"def get_package_version(self, name, version=None):\n if not version:\n versions = list(self._get_package_versions_map(name).values())\n return versions and versions[-1]\n else:\n return self._get_package_versions_map(name).get(version)",
"def get_version_str():\n return pkg_resources.get_distribution(\"lando_messaging\").version",
"def fetch_package_version(python_process: Process, package_name: str) -> str:\n LOG.info(\"Fetch current version for package %s\", package_name)\n cmd_args = [\"-m\", \"pip\", \"show\", package_name]\n try:\n python_process.run_command(cmd_args)\n except CalledProcessError:\n return \"\"\n\n for line in python_process.stdout_lines:\n split_line = line.split(\":\")\n if split_line[0] == \"Version\":\n return split_line[-1].strip()\n\n return \"\"",
"def get_version_from_tag(self, tag: str) -> str:\n\n field = \"version\"\n regex = self._template2regex(self.package_config.upstream_tag_template)\n p = re.compile(regex)\n match = p.match(tag)\n if match and field in match.groupdict():\n return match.group(field)\n else:\n msg = (\n f'Unable to extract \"{field}\" from {tag} using '\n f\"{self.package_config.upstream_tag_template}\"\n )\n logger.error(msg)\n raise PackitException(msg)",
"def get_version(v=version.__version__):\n ver = v.split(\" \")\n vtag = ver[1]\n vnum = [int(i) for i in ver[0].split('.')]\n return(vnum, vtag)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to create an adaptive bitrate streaming template. Up to 100 templates can be created.
|
def CreateAdaptiveDynamicStreamingTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateAdaptiveDynamicStreamingTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateAdaptiveDynamicStreamingTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def _create_template(self):\n Template.objects.create(\n resume=\"a test\",\n shortcut='atest',\n subject=\"a subject\",\n body=\"A body {{ testme }}\"\n )",
"def _create_template(self, num_instances, num_replace=0):\r\n instance_definition = self._get_instance_definition()\r\n old_resources = self._get_instance_templates()\r\n templates = template.resource_templates(\r\n old_resources, instance_definition, num_instances, num_replace)\r\n return {\"HeatTemplateFormatVersion\": \"2012-12-12\",\r\n \"Resources\": dict(templates)}",
"def createTemplateStack(self):\n\n\t\ttemplatestack = os.path.join(self.params['rundir'], \"templatestack.spi\")\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### hack to use standard filtering library\n\t\ttemplateparams = {}\n\t\ttemplateparams['apix'] = self.stack['apix']\n\t\ttemplateparams['rundir'] = os.path.join(self.params['rundir'], \"templates\")\n\t\ttemplateparams['templateIds'] = self.templatelist\n\t\ttemplateparams['bin'] = self.params['bin']\n\t\ttemplateparams['lowpass'] = self.params['lowpass']\n\t\ttemplateparams['median'] = None\n\t\ttemplateparams['pixlimit'] = None\n\t\tprint 'Converting reference templates:\\n', templateparams\n\t\tapParam.createDirectory(os.path.join(self.params['rundir'], \"templates\"))\n\t\tfilelist = apTemplate.getTemplates(templateparams)\n\n\t\tlocalclip = self.clipsize/self.params['bin']\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d templates/\"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(localclip)+\",\"+str(localclip)\n\t\t\t\t+\" edgenorm spiderswap \")\n\t\t\tif self.params['inverttemplates'] is True:\n\t\t\t\temancmd += \" invert \"\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack",
"def create_sample_template(self, sample_template_post: dict):\n return self.retrieve_api_results(\n \"/sampleTemplates\", request_type=\"POST\", params=sample_template_post\n )",
"def stream_template(template_name, **context):\n app.update_template_context(context)\n template = app.jinja_env.get_template(template_name)\n stream = template.generate(context)\n return Response(stream_with_context(stream))",
"def CreateInstanceTemplate(task, task_dir):\n backend_params = task.BackendParams()\n instance_count = backend_params.get('instance_count', 0)\n if instance_count <= 0:\n clovis_logger.info('No template required.')\n return True\n bucket = backend_params.get('storage_bucket')\n if not bucket:\n clovis_logger.error('Missing bucket in backend_params.')\n return False\n return instance_helper.CreateTemplate(task.BackendParams()['tag'], bucket,\n task_dir)",
"def create_template(self):\n options = {\n 'dir': os.path.join(os.path.dirname(__file__)),\n 'template': self.template,\n 'project': self.project,\n }\n return self.env.run(\n '%(dir)s/bin/mrbob -O %(project)s --config '\n '%(dir)s/test_answers_%(template)s.ini %(dir)s/bobtemplates/simplesconsultoria/%(template)s'\n % options)",
"def _create_template(self, num_instances):\n conf_name = self.properties['launch_configuration_name']\n conf = self.stack.resource_by_refid(conf_name)\n instance_definition = copy.deepcopy(conf.t)\n instance_definition['Type'] = 'OS::Nova::Server'\n # resolve references within the context of this stack.\n fully_parsed = self.stack.resolve_runtime_data(instance_definition)\n\n resources = {}\n for i in range(num_instances):\n resources[\"%s-%d\" % (self.name, i)] = fully_parsed\n return {\"Resources\": resources}",
"def create_service_template(self, payload):\n return self._request('post', path='/templates', data=json.dumps(payload), value_only=True)",
"def new_template():\n form = NewTemplateForm()\n keywords = ProvisioningKeyword.query.all()\n\n if form.validate_on_submit():\n template = ProvisioningTemplate(\n name=form.name.data,\n template=form.template.data,\n options=form.compress_options(),\n enabled=request.form.get(\"enabled\", False)\n )\n db.session.add(template)\n db.session.commit()\n flash('Template {} successfully created'.format(template.name),\n 'form-success')\n return redirect(url_for('provisioning.templates'))\n\n return render_template('provisioning/new_template.html', \n form=form,\n keywords=keywords\n )",
"def CreateTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def create_sample(\n self,\n name: str,\n tags: Optional[str] = None,\n description: Optional[str] = None,\n extra_fields: Optional[Sequence] = [],\n sample_template_id=None,\n fields=None,\n storage_temperature_min: StorageTemperature = None,\n storage_temperature_max: StorageTemperature = None,\n expiry_date: datetime.datetime = None,\n subsample_count: int = None,\n total_quantity: Quantity = None,\n attachments=None,\n ) -> dict:\n toPost = SamplePost(\n name,\n tags,\n description,\n extra_fields,\n sample_template_id,\n fields,\n storage_temperature_min,\n storage_temperature_max,\n expiry_date,\n subsample_count,\n total_quantity,\n attachments,\n )\n\n sample = self.retrieve_api_results(\n \"/samples\", request_type=\"POST\", params=toPost.data\n )\n if attachments is not None:\n for file in attachments:\n self.upload_attachment(sample[\"globalId\"], file)\n ## get latest version\n sample = self.get_sample_by_id(sample[\"id\"])\n return sample",
"def create_stack(self, cnxt, stack_name, template, params, files, args):\r\n LOG.info(_('template is %s') % template)\r\n\r\n def _stack_create(stack):\r\n # Create/Adopt a stack, and create the periodic task if successful\r\n if stack.adopt_stack_data:\r\n stack.adopt()\r\n else:\r\n stack.create()\r\n\r\n if (stack.action in (stack.CREATE, stack.ADOPT)\r\n and stack.status == stack.COMPLETE):\r\n # Schedule a periodic watcher task for this stack\r\n self.stack_watch.start_watch_task(stack.id, cnxt)\r\n else:\r\n LOG.warning(_(\"Stack create failed, status %s\") % stack.status)\r\n\r\n tmpl = parser.Template(template, files=files)\r\n self._validate_new_stack(cnxt, stack_name, tmpl)\r\n\r\n common_params = api.extract_args(args)\r\n env = environment.Environment(params)\r\n stack = parser.Stack(cnxt, stack_name, tmpl, env, **common_params)\r\n\r\n self._validate_deferred_auth_context(cnxt, stack)\r\n\r\n stack.validate()\r\n\r\n stack.store()\r\n\r\n self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,\r\n _stack_create, stack)\r\n\r\n return dict(stack.identifier())",
"def _create_template(self, template_element):\n return Template(\n template_element.get(\"id\"),\n parent_id=template_element.get(\"parent-id\"),\n after_inject=template_element.get(\"after-inject\"),\n before_clear=template_element.get(\"before-clear\")\n )",
"def create_template():\n # register map\n rmap = RegisterMap()\n\n rmap.add_registers(Register('DATA', 'Data register', 0x4).add_bitfields([\n BitField(\"FIFO\", \"Write to push value to TX FIFO, read to get data from RX FIFO\",\n width=8, lsb=0, access='rw', hardware='q'),\n BitField(\"FERR\", \"Frame error flag. Read to clear.\", width=1, lsb=16, access='rolh', hardware='i'),\n BitField(\"PERR\", \"Parity error flag. Read to clear.\", width=1, lsb=17, access='rolh', hardware='i'),\n ]))\n\n rmap.add_registers(Register('STAT', 'Status register', 0xC).add_bitfields([\n BitField(\"BUSY\", \"Transciever is busy\", width=1, lsb=2, access='ro', hardware='ie'),\n BitField(\"RXE\", \"RX FIFO is empty\", width=1, lsb=4, access='ro', hardware='i'),\n BitField(\"TXF\", \"TX FIFO is full\", width=1, lsb=8, access='ro', hardware='i'),\n ]))\n\n rmap.add_registers(Register('CTRL', 'Control register', 0x10).add_bitfields([\n BitField(\"BAUD\", \"Baudrate value\", width=2, lsb=0, access='rw', hardware='o').add_enums([\n EnumValue(\"B9600\", 0, \"9600 baud\"),\n EnumValue(\"B38400\", 1, \"38400 baud\"),\n EnumValue(\"B115200\", 2, \"115200 baud\"),\n ]),\n BitField(\"TXEN\", \"Transmitter enable. Can be disabled by hardware on error.\",\n width=1, lsb=4, access='rw', hardware='oie'),\n BitField(\"RXEN\", \"Receiver enable. Can be disabled by hardware on error.\",\n width=1, lsb=5, access='rw', hardware='oie'),\n BitField(\"TXST\", \"Force transmission start\", width=1, lsb=6, access='wosc', hardware='o'),\n ]))\n\n rmap.add_registers(Register('LPMODE', 'Low power mode control', 0x14).add_bitfields([\n BitField(\"DIV\", \"Clock divider in low power mode\", width=8, lsb=0, access='rw', hardware='o'),\n BitField(\"EN\", \"Low power mode enable\", width=1, lsb=31, access='rw', hardware='o'),\n ]))\n\n rmap.add_registers(Register('INTSTAT', 'Interrupt status register', 0x20).add_bitfields([\n BitField(\"TX\", \"Transmitter interrupt flag. Write 1 to clear.\", width=1, lsb=0, access='rw1c', hardware='s'),\n BitField(\"RX\", \"Receiver interrupt. Write 1 to clear.\", width=1, lsb=1, access='rw1c', hardware='s'),\n ]))\n\n rmap.add_registers(Register('ID', 'IP-core ID register', 0x40).add_bitfields([\n BitField(\"UID\", \"Unique ID\", width=32, lsb=0, access='ro', hardware='f', reset=0xcafe0666),\n ]))\n\n return rmap",
"def template(filename):\n template = {\n \"name\": \"model\",\n \"max_sentence_size\": 150,\n \"network\": { # Configuration of the encoder\n 'emb_enc_dim': 256,\n 'enc_n_layers': 10,\n 'enc_kernel_size': 3,\n 'enc_dropout': 0.25\n },\n \"model\": 'linear-conv',\n \"learner\": {\n 'lr_grace_periode': 2,\n 'lr_patience': 2,\n 'lr': 0.0001\n },\n \"label_encoder\": {\n \"normalize\": True,\n \"lower\": True\n },\n \"datasets\": {\n \"test\": \"./test.tsv\",\n \"train\": \"./train.tsv\",\n \"dev\": \"./dev.tsv\",\n \"random\": True\n }\n }\n json.dump(template, filename, indent=4, separators=(',', ': '))",
"def create_template(template_name, created_by, created_on):\n\n template = Template(template_name=template_name,\n created_by=created_by,\n created_on=created_on)\n\n db.session.add(template)\n\n db.session.commit()\n\n return template",
"def _create_task_template(\n self,\n workflow_id: Union[int, float],\n input_schema: Dict,\n output_schema: Dict,\n name: str = None,\n description: str = None,\n ) -> Dict:\n body_json = {\n \"input_schema\": input_schema,\n \"output_schema\": output_schema,\n }\n if name is not None:\n body_json[\"name\"] = name\n if description is not None:\n body_json[\"description\"] = description\n\n ## Assuming that a workflow can get a task\n ## TODO:\n # 1.Fix: I couldn't find the appropriate API controller. I'm assuming that the task contoller has some REST methods missing\n # uri = f'workflow/{workflow_id}/task'\n # return self._request(uri, method='POST', body_params=body_json, required_api_key=False)",
"def instanciate_template(id_template, id, env=\"default\"):\n if id is None:\n id = \"root\"\n config = ConfigParser.RawConfigParser()\n config.read(script_dir + \"/properties/nifi.properties\")\n nifi_connection = NifiConnect()\n nifi_connection.load_properties(env, config)\n nifi_connection.connect()\n results = nifi_connection.instanciate_template(id, id_template)\n print(\"Process group is created : \" + results[\"flow\"][\"processGroups\"][0][\"status\"][\"name\"] + \" - \" +\n results[\"flow\"][\"processGroups\"][0][\"status\"][\"id\"])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to create a custom animated image generating template. Up to 16 templates can be created.
|
def CreateAnimatedGraphicsTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateAnimatedGraphicsTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateAnimatedGraphicsTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def generate_image(self) -> None:",
"def _create_template(self, num_instances, num_replace=0):\r\n instance_definition = self._get_instance_definition()\r\n old_resources = self._get_instance_templates()\r\n templates = template.resource_templates(\r\n old_resources, instance_definition, num_instances, num_replace)\r\n return {\"HeatTemplateFormatVersion\": \"2012-12-12\",\r\n \"Resources\": dict(templates)}",
"def make_tract_template_nibabel(track_images, template_name):\n\t# 1. Binarize images and sum\n\tfor n in range(0,len(track_images)):\n\t\tif n == 0: img_sum = np.float32(nib.load(track_images[0]).get_data()>0)\n\t\telse: img_sum = img_sum+np.float32(nib.load(track_images[n]).get_data()>0)\n\t# 2. Divide by N and write the new template image, getting the affine header from the first image\n\tnib.Nifti1Image(img_sum/len(track_images), nib.load(track_images[0]).get_affine()).to_filename(template_name)",
"def create_output_image(img, instances):\n pass",
"def New(*args, **kargs):\n obj = itkImageVF13.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def CreateImageProcessingTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateImageProcessingTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateImageProcessingTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def New(*args, **kargs):\n obj = itkImageVF12.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkImageSS4.__New_orig__()\n from itk.support import template_class\n template_class.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkImageBase4.__New_orig__()\n from itk.support import template_class\n template_class.New(obj, *args, **kargs)\n return obj",
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def generate_templates(self):\n\n templates = []\n cell_size = self.cell_size\n\n # Slide each size template over the entire shape model and generate templates\n for size in self.sizes:\n w = size[0]\n h = size[1]\n\n # Slide template with dimenions specified by size across the entire shape model\n for y in range(self.shape_model.shape[0] - h):\n for x in range(self.shape_model.shape[1] - w):\n\n mat_temp = np.copy(self.shape_model[y:y + h, x:x + w])\n unique = np.unique(mat_temp)\n\n # Check to make sure template holds some shape model information\n if len(unique) > 1:\n\n # Binary template: set values to 1 and 0 and add template\n if len(unique) == 2:\n idx1 = mat_temp == unique[0]\n idx2 = mat_temp == unique[1]\n\n mat_temp[idx1] = 1\n mat_temp[idx2] = 0\n templates.append((x, y, size, mat_temp))\n\n # Ternary template: set values to -1, 0, 1 -- add template -- repeat with all permutations\n else:\n # Get unique value indices\n idx1 = mat_temp == unique[0]\n idx2 = mat_temp == unique[1]\n idx3 = mat_temp == unique[2]\n\n mat_temp[idx1] = -1\n mat_temp[idx2] = 0\n mat_temp[idx3] = 1\n templates.append((x, y, size, mat_temp))\n\n mat_temp[idx1] = 1\n mat_temp[idx2] = -1\n mat_temp[idx3] = 0\n templates.append((x, y, size, mat_temp))\n\n mat_temp[idx1] = 0\n mat_temp[idx2] = 1\n mat_temp[idx3] = -1\n templates.append((x, y, size, mat_temp))\n\n self.templates = np.asarray(templates, dtype=object)\n self.remove_duplicates()\n self.shift_templates()\n self.normalize_templates()\n\n print('Created %d templates' % (len(self.templates)))\n return self.templates",
"def New(*args, **kargs):\n obj = itkImageVD23.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def create_image(self, obj):\r\n randomize = random.randint(0,3)\r\n if randomize == 0:\r\n image_id = self.canvas.create_image(25, 50, image=obj)\r\n elif randomize == 1:\r\n image_id = self.canvas.create_image(25, 125, image=obj)\r\n elif randomize == 2:\r\n image_id = self.canvas.create_image(25, 200, image=obj)\r\n else:\r\n image_id = self.canvas.create_image(25, 275, image=obj)\r\n self.animation(image_id)",
"def New(*args, **kargs):\n obj = itkImageBase3.__New_orig__()\n from itk.support import template_class\n template_class.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkImageSS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkImageUS4.__New_orig__()\n from itk.support import template_class\n template_class.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkImageVD43.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkImageVF23.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkImageVF13.__New_orig__()\n from itk.support import template_class\n template_class.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkImageVD22.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to create a custom image processing template. A template can include at most 10 operations, for example, cropscalecropblurscalecropscalecropblurscale. You can have up to 16 image processing templates.
|
def CreateImageProcessingTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateImageProcessingTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateImageProcessingTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def _create_template(self, num_instances, num_replace=0):\r\n instance_definition = self._get_instance_definition()\r\n old_resources = self._get_instance_templates()\r\n templates = template.resource_templates(\r\n old_resources, instance_definition, num_instances, num_replace)\r\n return {\"HeatTemplateFormatVersion\": \"2012-12-12\",\r\n \"Resources\": dict(templates)}",
"def createTemplateStack(self):\n\n\t\ttemplatestack = os.path.join(self.params['rundir'], \"templatestack.spi\")\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### hack to use standard filtering library\n\t\ttemplateparams = {}\n\t\ttemplateparams['apix'] = self.stack['apix']\n\t\ttemplateparams['rundir'] = os.path.join(self.params['rundir'], \"templates\")\n\t\ttemplateparams['templateIds'] = self.templatelist\n\t\ttemplateparams['bin'] = self.params['bin']\n\t\ttemplateparams['lowpass'] = self.params['lowpass']\n\t\ttemplateparams['median'] = None\n\t\ttemplateparams['pixlimit'] = None\n\t\tprint 'Converting reference templates:\\n', templateparams\n\t\tapParam.createDirectory(os.path.join(self.params['rundir'], \"templates\"))\n\t\tfilelist = apTemplate.getTemplates(templateparams)\n\n\t\tlocalclip = self.clipsize/self.params['bin']\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d templates/\"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(localclip)+\",\"+str(localclip)\n\t\t\t\t+\" edgenorm spiderswap \")\n\t\t\tif self.params['inverttemplates'] is True:\n\t\t\t\temancmd += \" invert \"\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack",
"def create_service_template(self, payload):\n return self._request('post', path='/templates', data=json.dumps(payload), value_only=True)",
"def _create_template(self, num_instances):\n conf_name = self.properties['launch_configuration_name']\n conf = self.stack.resource_by_refid(conf_name)\n instance_definition = copy.deepcopy(conf.t)\n instance_definition['Type'] = 'OS::Nova::Server'\n # resolve references within the context of this stack.\n fully_parsed = self.stack.resolve_runtime_data(instance_definition)\n\n resources = {}\n for i in range(num_instances):\n resources[\"%s-%d\" % (self.name, i)] = fully_parsed\n return {\"Resources\": resources}",
"def CreateTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def _create_service_template(self):\n cmd = self._generate_cmd_and_expected_status()\n service_template = copy.deepcopy(self.service_template)\n service_template['container']['command'] = '{} {}'.format(cmd, random.randint(10, 30))\n return service_template",
"def create_or_update_template(inps_dict):\n\n inps = inps_dict\n\n print('\\n*************** Template Options ****************')\n # write default template\n\n print (\"Custom Template File: \", inps.customTemplateFile)\n\n inps.project_name = get_project_name(inps.customTemplateFile)\n print (\"Project Name: \", inps.project_name)\n\n inps.work_dir = get_work_directory(None, inps.project_name)\n print(\"Work Dir: \", inps.work_dir)\n\n # Creates default Template\n inps = create_default_template(inps)\n\n\n return inps",
"def CreateInstanceTemplate(task, task_dir):\n backend_params = task.BackendParams()\n instance_count = backend_params.get('instance_count', 0)\n if instance_count <= 0:\n clovis_logger.info('No template required.')\n return True\n bucket = backend_params.get('storage_bucket')\n if not bucket:\n clovis_logger.error('Missing bucket in backend_params.')\n return False\n return instance_helper.CreateTemplate(task.BackendParams()['tag'], bucket,\n task_dir)",
"def instanciate_template(id_template, id, env=\"default\"):\n if id is None:\n id = \"root\"\n config = ConfigParser.RawConfigParser()\n config.read(script_dir + \"/properties/nifi.properties\")\n nifi_connection = NifiConnect()\n nifi_connection.load_properties(env, config)\n nifi_connection.connect()\n results = nifi_connection.instanciate_template(id, id_template)\n print(\"Process group is created : \" + results[\"flow\"][\"processGroups\"][0][\"status\"][\"name\"] + \" - \" +\n results[\"flow\"][\"processGroups\"][0][\"status\"][\"id\"])",
"def New(*args, **kargs):\n obj = itkImageBase3.__New_orig__()\n from itk.support import template_class\n template_class.New(obj, *args, **kargs)\n return obj",
"def addTemplate(\n self, sources, class_id, object_mask\n ) -> Tuple[retval, bounding_box]:\n ...",
"def _create_task_template(\n self,\n workflow_id: Union[int, float],\n input_schema: Dict,\n output_schema: Dict,\n name: str = None,\n description: str = None,\n ) -> Dict:\n body_json = {\n \"input_schema\": input_schema,\n \"output_schema\": output_schema,\n }\n if name is not None:\n body_json[\"name\"] = name\n if description is not None:\n body_json[\"description\"] = description\n\n ## Assuming that a workflow can get a task\n ## TODO:\n # 1.Fix: I couldn't find the appropriate API controller. I'm assuming that the task contoller has some REST methods missing\n # uri = f'workflow/{workflow_id}/task'\n # return self._request(uri, method='POST', body_params=body_json, required_api_key=False)",
"def New(*args, **kargs):\n obj = itkImageSS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def New(*args, **kargs):\n obj = itkImageVF13.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def add_template(self, template, label, units='counts'):\n\n if units == 'flux':\n assert (len(self.exposure_map) != 0), \\\n \"Must provide exposure map before adding a flux template\"\n assert (len(self.exposure_map) == len(template)), \\\n \"Template must be the same shape as the exposure map\"\n template *= self.exposure_map\n\n if units == 'PS':\n assert (len(self.exposure_map) != 0), \\\n \"Must provide exposure map before adding a PS template\"\n assert (len(self.exposure_map) == len(template)), \\\n \"Template must be the same shape as the exposure map\"\n template /= self.exposure_map/np.mean(self.exposure_map)\n self.templates_dict.update({label: template})\n self.templates.append(template)",
"def New(*args, **kargs):\n obj = itkImageBase4.__New_orig__()\n from itk.support import template_class\n template_class.New(obj, *args, **kargs)\n return obj",
"def DeleteImageProcessingTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteImageProcessingTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteImageProcessingTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def New(*args, **kargs):\n obj = itkImageVF12.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to create a custom image sprite generating template. Up to 16 templates can be created.
|
def CreateImageSpriteTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateImageSpriteTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateImageSpriteTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def _create_template(self, num_instances, num_replace=0):\r\n instance_definition = self._get_instance_definition()\r\n old_resources = self._get_instance_templates()\r\n templates = template.resource_templates(\r\n old_resources, instance_definition, num_instances, num_replace)\r\n return {\"HeatTemplateFormatVersion\": \"2012-12-12\",\r\n \"Resources\": dict(templates)}",
"def create_sprite_sheet(name_to_image_path_dict):\n images = {name: Image.open(os.path.join(settings.PAINTINGS_DIR, os.path.basename(file_path.replace(\"\\\\\", \"/\"))))\n for name, file_path in name_to_image_path_dict.items()}\n image_to_location = {}\n\n name = \"-\".join(name_to_image_path_dict.keys())\n output_file = os.path.join(settings.SPRITE_SHEET_DIR, \"%s.%s\" % (name, settings.SPRITE_SHEET_FILETYPE))\n image_exists = os.path.isfile(output_file)\n\n master_height = max([i.size[1] for i in images.values()]) # Make it as high as the highest image\n master_width = sum([i.size[0] for i in images.values()]) # and as wide as all of them together\n\n if not image_exists:\n master = Image.new(\n mode='RGBA',\n size=(master_width, master_height),\n color=(0, 0, 0, 0)) # fully transparent\n\n cur_width = 0\n for count, name in enumerate(images.keys()):\n image = images[name]\n if not image_exists:\n master.paste(image, (cur_width, 0))\n \n image_to_location[name] = (image.size[0], image.size[1], cur_width, 0)\n cur_width += image.size[0]\n\n if not image_exists:\n if \"gif\" == settings.SPRITE_SHEET_FILETYPE:\n master.save(output_file, transparency=0)\n else:\n master.save(output_file)\n\n return output_file, image_to_location",
"def create_sprite(self):\n rgb = (84, 170, 232)\n height = 15\n length = 15\n self.sprite = BaseStationSprite(rgb)",
"def create_sprite_image(images):\n if isinstance(images, list):\n images = np.array(images)\n img_h = images.shape[1]\n img_w = images.shape[2]\n n_plots = int(np.ceil(np.sqrt(images.shape[0]))) \n if len(images.shape) > 3:\n spriteimage = np.ones(\n (img_h * n_plots, img_w * n_plots, images.shape[3]))\n else:\n spriteimage = np.ones((img_h * n_plots, img_w * n_plots))\n four_dims = len(spriteimage.shape) == 4\n for i in range(n_plots):\n for j in range(n_plots):\n this_filter = i * n_plots + j\n if this_filter < images.shape[0]:\n this_img = images[this_filter]\n if four_dims:\n spriteimage[i * img_h:(i + 1) * img_h,\n j * img_w:(j + 1) * img_w, :] = this_img\n else:\n spriteimage[i * img_h:(i + 1) * img_h,\n j * img_w:(j + 1) * img_w] = this_img\n return spriteimage",
"def create_sprite_image(images):\n if isinstance(images, list):\n images = np.array(images)\n img_h = images.shape[1]\n img_w = images.shape[2]\n # sprite图像可以理解成是小图片平成的大正方形矩阵,大正方形矩阵中的每一个元素就是原来的小图片。于是这个正方形的边长就是sqrt(n),其中n为小图片的数量。\n n_plots = int(np.ceil(np.sqrt(images.shape[0])))\n\n # 使用全1来初始化最终的大图片。\n spriteimage = np.ones((img_h*n_plots, img_w*n_plots))\n\n for i in range(n_plots):\n for j in range(n_plots):\n # 计算当前图片的编号\n this_filter = i*n_plots + j\n if this_filter < images.shape[0]:\n # 将当前小图片的内容复制到最终的sprite图像\n this_img = images[this_filter]\n spriteimage[i*img_h:(i + 1)*img_h,\n j*img_w:(j + 1)*img_w] = this_img\n\n return spriteimage",
"def CreateImageProcessingTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateImageProcessingTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateImageProcessingTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def make_tract_template_nibabel(track_images, template_name):\n\t# 1. Binarize images and sum\n\tfor n in range(0,len(track_images)):\n\t\tif n == 0: img_sum = np.float32(nib.load(track_images[0]).get_data()>0)\n\t\telse: img_sum = img_sum+np.float32(nib.load(track_images[n]).get_data()>0)\n\t# 2. Divide by N and write the new template image, getting the affine header from the first image\n\tnib.Nifti1Image(img_sum/len(track_images), nib.load(track_images[0]).get_affine()).to_filename(template_name)",
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def create_sprite(location, image_name, animal_name):\n image = pygame.image.load(image_name)\n image_rect = Rect(location, (24, 24))\n new_sprite = Sprite(image, image_rect, animal_name)\n return new_sprite",
"def _create_template(self, num_instances):\n conf_name = self.properties['launch_configuration_name']\n conf = self.stack.resource_by_refid(conf_name)\n instance_definition = copy.deepcopy(conf.t)\n instance_definition['Type'] = 'OS::Nova::Server'\n # resolve references within the context of this stack.\n fully_parsed = self.stack.resolve_runtime_data(instance_definition)\n\n resources = {}\n for i in range(num_instances):\n resources[\"%s-%d\" % (self.name, i)] = fully_parsed\n return {\"Resources\": resources}",
"def generate_templates(self):\n\n templates = []\n cell_size = self.cell_size\n\n # Slide each size template over the entire shape model and generate templates\n for size in self.sizes:\n w = size[0]\n h = size[1]\n\n # Slide template with dimenions specified by size across the entire shape model\n for y in range(self.shape_model.shape[0] - h):\n for x in range(self.shape_model.shape[1] - w):\n\n mat_temp = np.copy(self.shape_model[y:y + h, x:x + w])\n unique = np.unique(mat_temp)\n\n # Check to make sure template holds some shape model information\n if len(unique) > 1:\n\n # Binary template: set values to 1 and 0 and add template\n if len(unique) == 2:\n idx1 = mat_temp == unique[0]\n idx2 = mat_temp == unique[1]\n\n mat_temp[idx1] = 1\n mat_temp[idx2] = 0\n templates.append((x, y, size, mat_temp))\n\n # Ternary template: set values to -1, 0, 1 -- add template -- repeat with all permutations\n else:\n # Get unique value indices\n idx1 = mat_temp == unique[0]\n idx2 = mat_temp == unique[1]\n idx3 = mat_temp == unique[2]\n\n mat_temp[idx1] = -1\n mat_temp[idx2] = 0\n mat_temp[idx3] = 1\n templates.append((x, y, size, mat_temp))\n\n mat_temp[idx1] = 1\n mat_temp[idx2] = -1\n mat_temp[idx3] = 0\n templates.append((x, y, size, mat_temp))\n\n mat_temp[idx1] = 0\n mat_temp[idx2] = 1\n mat_temp[idx3] = -1\n templates.append((x, y, size, mat_temp))\n\n self.templates = np.asarray(templates, dtype=object)\n self.remove_duplicates()\n self.shift_templates()\n self.normalize_templates()\n\n print('Created %d templates' % (len(self.templates)))\n return self.templates",
"def add_template(self, template, label, units='counts'):\n\n if units == 'flux':\n assert (len(self.exposure_map) != 0), \\\n \"Must provide exposure map before adding a flux template\"\n assert (len(self.exposure_map) == len(template)), \\\n \"Template must be the same shape as the exposure map\"\n template *= self.exposure_map\n\n if units == 'PS':\n assert (len(self.exposure_map) != 0), \\\n \"Must provide exposure map before adding a PS template\"\n assert (len(self.exposure_map) == len(template)), \\\n \"Template must be the same shape as the exposure map\"\n template /= self.exposure_map/np.mean(self.exposure_map)\n self.templates_dict.update({label: template})\n self.templates.append(template)",
"def generateImage(self, **kwargs):\n\n start_x = kwargs.get('start_x', None)\n start_y = kwargs.get('start_y', None)\n tile_width = kwargs.get('tile_width', 5)\n tile_height = kwargs.get('tile_height', 5)\n\n # Check that we have x and y tile coordinates\n if start_x == None or start_y == None :\n start_x, start_y = self.getXY()\n\n # Determine the size of the image\n width, height = 256 * tile_width, 256 * tile_height\n\n #Create a new image of the size require\n map_img = Image.new('RGB', (width,height))\n sat_img = Image.new('RGB', (width,height))\n\n for x in range(0, tile_width):\n for y in range(0, tile_height) :\n if True:\n if args.label:\n # Store the image with labels\n url = 'https://mt0.google.com/vt/lyrs=y&?x=' + str(start_x + x) + '&y=' + str(start_y + y) + '&z=' + str( self._zoom)\n if args.debug: print(url)\n else:\n url = 'https://mt0.google.com/vt/lyrs=s&?x=' + str(start_x + x) + '&y=' + str(start_y + y) + '&z=' + str( self._zoom)\n if args.debug: print(url)\n current_tile = str(x)+'-'+str(y)\n urllib.request.urlretrieve(url, current_tile)\n\n im = Image.open(current_tile)\n sat_img.paste(im, (x*256, y*256))\n\n os.remove(current_tile)\n\n\n if True:\n if args.label:\n url = 'https://mt0.google.com/vt?x='+str(start_x+x)+'&y='+str(start_y+y)+'&z='+str(self._zoom)\n if args.debug: print(url)\n else:\n url = 'https://mt0.google.com/vt?x='+str(start_x+x)+'&y='+str(start_y+y)+'&z='+str(self._zoom) # work needs to be done\n if args.debug: print(url)\n\n current_tile = str(x)+'-'+str(y)\n urllib.request.urlretrieve(url, current_tile)\n\n im = Image.open(current_tile)\n map_img.paste(im, (x*256, y*256))\n\n os.remove(current_tile)\n\n return map_img, sat_img",
"def CreateInstanceTemplate(task, task_dir):\n backend_params = task.BackendParams()\n instance_count = backend_params.get('instance_count', 0)\n if instance_count <= 0:\n clovis_logger.info('No template required.')\n return True\n bucket = backend_params.get('storage_bucket')\n if not bucket:\n clovis_logger.error('Missing bucket in backend_params.')\n return False\n return instance_helper.CreateTemplate(task.BackendParams()['tag'], bucket,\n task_dir)",
"def create_output_image(img, instances):\n pass",
"def CreateTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def createTemplateStack(self):\n\n\t\ttemplatestack = os.path.join(self.params['rundir'], \"templatestack.spi\")\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### hack to use standard filtering library\n\t\ttemplateparams = {}\n\t\ttemplateparams['apix'] = self.stack['apix']\n\t\ttemplateparams['rundir'] = os.path.join(self.params['rundir'], \"templates\")\n\t\ttemplateparams['templateIds'] = self.templatelist\n\t\ttemplateparams['bin'] = self.params['bin']\n\t\ttemplateparams['lowpass'] = self.params['lowpass']\n\t\ttemplateparams['median'] = None\n\t\ttemplateparams['pixlimit'] = None\n\t\tprint 'Converting reference templates:\\n', templateparams\n\t\tapParam.createDirectory(os.path.join(self.params['rundir'], \"templates\"))\n\t\tfilelist = apTemplate.getTemplates(templateparams)\n\n\t\tlocalclip = self.clipsize/self.params['bin']\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d templates/\"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(localclip)+\",\"+str(localclip)\n\t\t\t\t+\" edgenorm spiderswap \")\n\t\t\tif self.params['inverttemplates'] is True:\n\t\t\t\temancmd += \" invert \"\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack",
"def addTemplate(\n self, sources, class_id, object_mask\n ) -> Tuple[retval, bounding_box]:\n ...",
"def generate_image(self) -> None:",
"def _generate_template(object_name):\n object_color, object_type = object_name.split()\n template = np.zeros((UPSAMPLE_SIZE, UPSAMPLE_SIZE))\n half = UPSAMPLE_SIZE // 2\n if object_type == \"triangle\":\n for i in range(UPSAMPLE_SIZE):\n for j in range(UPSAMPLE_SIZE):\n if (j <= half and i >= 2 * (half - j)) or (j > half and i >= 2 *\n (j - half)):\n template[i, j] = 1.\n elif object_type == \"square\":\n template[:, :] = 1.\n elif object_type == \"empty_square\":\n template[:2, :] = 1.\n template[-2:, :] = 1.\n template[:, :2] = 1.\n template[:, -2:] = 1.\n elif object_type == \"plus\":\n template[:, half - 1:half + 2] = 1.\n template[half - 1:half + 2, :] = 1.\n elif object_type == \"inverse_plus\":\n template[:, :] = 1.\n template[:, half - 1:half + 2] = 0.\n template[half - 1:half + 2, :] = 0.\n elif object_type == \"ex\":\n for i in range(UPSAMPLE_SIZE):\n for j in range(UPSAMPLE_SIZE):\n if abs(i - j) <= 1 or abs(UPSAMPLE_SIZE - 1 - j - i) <= 1:\n template[i, j] = 1.\n elif object_type == \"inverse_ex\":\n for i in range(UPSAMPLE_SIZE):\n for j in range(UPSAMPLE_SIZE):\n if not (abs(i - j) <= 1 or abs(UPSAMPLE_SIZE - 1 - j - i) <= 1):\n template[i, j] = 1.\n elif object_type == \"circle\":\n for i in range(UPSAMPLE_SIZE):\n for j in range(UPSAMPLE_SIZE):\n if (i - half)**2 + (j - half)**2 <= half**2:\n template[i, j] = 1.\n elif object_type == \"empty_circle\":\n for i in range(UPSAMPLE_SIZE):\n for j in range(UPSAMPLE_SIZE):\n if abs((i - half)**2 + (j - half)**2 - half**2) < 6:\n template[i, j] = 1.\n elif object_type == \"tee\":\n template[:, half - 1:half + 2] = 1.\n template[:3, :] = 1.\n elif object_type == \"upside_down_tee\":\n template[:, half - 1:half + 2] = 1.\n template[-3:, :] = 1.\n elif object_type == \"h\":\n template[:, :3] = 1.\n template[:, -3:] = 1.\n template[half - 1:half + 2, :] = 1.\n elif object_type == \"u\":\n template[:, :3] = 1.\n template[:, -3:] = 1.\n template[-3:, :] = 1.\n elif object_type == \"upside_down_u\":\n template[:, :3] = 1.\n template[:, -3:] = 1.\n template[:3, :] = 1.\n elif object_type == \"vertical_stripes\":\n for j in range(half + UPSAMPLE_SIZE % 2):\n template[:, 2*j] = 1.\n elif object_type == \"horizontal_stripes\":\n for i in range(half + UPSAMPLE_SIZE % 2):\n template[2*i, :] = 1.\n else:\n raise ValueError(\"Unknown object: {}\".format(object_type))\n\n if object_color not in COLORS:\n raise ValueError(\"Unknown color: {}\".format(object_color))\n\n template = np.tensordot(template, COLORS[object_color], axes=0)\n\n return template"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to create a custom time point screencapturing template. Up to 16 templates can be created.
|
def CreateSnapshotByTimeOffsetTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateSnapshotByTimeOffsetTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateSnapshotByTimeOffsetTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def _create_template(self):\n Template.objects.create(\n resume=\"a test\",\n shortcut='atest',\n subject=\"a subject\",\n body=\"A body {{ testme }}\"\n )",
"def _create_template(self, num_instances, num_replace=0):\r\n instance_definition = self._get_instance_definition()\r\n old_resources = self._get_instance_templates()\r\n templates = template.resource_templates(\r\n old_resources, instance_definition, num_instances, num_replace)\r\n return {\"HeatTemplateFormatVersion\": \"2012-12-12\",\r\n \"Resources\": dict(templates)}",
"def create_service_template(self, payload):\n return self._request('post', path='/templates', data=json.dumps(payload), value_only=True)",
"def create_sample_template(self, sample_template_post: dict):\n return self.retrieve_api_results(\n \"/sampleTemplates\", request_type=\"POST\", params=sample_template_post\n )",
"def _create_service_template(self):\n cmd = self._generate_cmd_and_expected_status()\n service_template = copy.deepcopy(self.service_template)\n service_template['container']['command'] = '{} {}'.format(cmd, random.randint(10, 30))\n return service_template",
"def _create_template(self, num_instances):\n conf_name = self.properties['launch_configuration_name']\n conf = self.stack.resource_by_refid(conf_name)\n instance_definition = copy.deepcopy(conf.t)\n instance_definition['Type'] = 'OS::Nova::Server'\n # resolve references within the context of this stack.\n fully_parsed = self.stack.resolve_runtime_data(instance_definition)\n\n resources = {}\n for i in range(num_instances):\n resources[\"%s-%d\" % (self.name, i)] = fully_parsed\n return {\"Resources\": resources}",
"def create_or_update_template(inps_dict):\n\n inps = inps_dict\n\n print('\\n*************** Template Options ****************')\n # write default template\n\n print (\"Custom Template File: \", inps.customTemplateFile)\n\n inps.project_name = get_project_name(inps.customTemplateFile)\n print (\"Project Name: \", inps.project_name)\n\n inps.work_dir = get_work_directory(None, inps.project_name)\n print(\"Work Dir: \", inps.work_dir)\n\n # Creates default Template\n inps = create_default_template(inps)\n\n\n return inps",
"def _create_template_config(self, config):\n pass",
"def init_templates():\n\n templates = []\n\n # single stroke templates (all fingers doing the same if various fingers) (1 finger)\n templates.append(Template(\"T\", [\n # different PC for having different ways of drawing Template.name (T) for better recognition\n Point_cloud(\"T1\", [Point(30, 7, 1), Point(103, 7, 1),\n Point(66, 7, 2), Point(66, 87, 2)])\n ,\n Point_cloud(\"T2\", [Point(30, 7, 1), Point(123, 7, 1),\n Point(80, 17, 2), Point(30, 7, 2),\n Point(80, 17, 3), Point(80, 77, 3)])\n ,\n Point_cloud(\"T3\", [Point(30, 7, 1), Point(123, 7, 1),\n Point(80, 17, 2), Point(30, 7, 2),\n Point(80, 17, 3), Point(80, 50, 3)])\n ], None)\n )\n templates.append(Template(\"V\", [\n Point_cloud(\"V1\", [Point(30, 7, 1), Point(40, 37, 1),\n Point(40, 37, 2), Point(50, 7, 2)])\n ,\n Point_cloud(\"V2\", [Point(0, 7, 1), Point(25, 37, 1),\n Point(25, 37, 2), Point(50, 7, 2)])\n ,\n Point_cloud(\"V3\", [Point(30, 7, 1), Point(40, 25, 1),\n Point(40, 25, 2), Point(50, 7, 2)])\n ,\n Point_cloud(\"V4\", [Point(30, 16, 1), Point(33, 25, 1),\n Point(33, 25, 2), Point(38, 7, 2)])\n ,\n Point_cloud(\"V5\", [Point(30, 7, 1), Point(33, 25, 1),\n Point(33, 25, 2), Point(38, 16, 2)])\n ], None)\n )\n templates.append(Template(\"D\", [\n Point_cloud(\"D1\", [Point(30, 7, 1), Point(30, 67, 1),\n Point(30, 67, 2), Point(50, 53, 2),\n Point(50, 53, 3), Point(55, 37, 3),\n Point(55, 37, 4), Point(50, 21, 4),\n Point(50, 21, 5), Point(30, 7, 5)])\n ,\n Point_cloud(\"D1\", [Point(30, 7, 1), Point(30, 67, 1),\n Point(30, 67, 2), Point(60, 53, 2),\n Point(60, 53, 3), Point(65, 37, 3),\n Point(65, 37, 4), Point(60, 21, 4),\n Point(60, 21, 5), Point(30, 7, 5)])\n ,\n ], None)\n )\n templates.append(Template(\"X\", [\n Point_cloud(\"X1\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2)])\n ,\n Point_cloud(\"X1_2\", [Point(30, 7, 1), Point(60, 34, 1),\n Point(60, 7, 2), Point(30, 34, 2)])\n ,\n Point_cloud(\"X2\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2),\n Point(30, 7, 3), Point(60, 7, 3)])\n ,\n Point_cloud(\"X3\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2),\n Point(30, 47, 3), Point(60, 47, 3)])\n ,\n Point_cloud(\"X4\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2),\n Point(30, 7, 3), Point(30, 47, 3)])\n ], None)\n )\n templates.append(Template(\"W\", [\n Point_cloud(\"W1\", [Point(30, 7, 1), Point(40, 37, 1),\n Point(40, 37, 2), Point(50, 20, 2),\n Point(50, 20, 3), Point(60, 37, 3),\n Point(60, 37, 4), Point(70, 7, 4)])\n ,\n Point_cloud(\"W2\", [Point(30, 7, 1), Point(50, 37, 1),\n Point(50, 37, 2), Point(70, 7, 2),\n Point(70, 7, 3), Point(90, 37, 3),\n Point(90, 37, 4), Point(110, 7, 4)])\n ], None)\n )\n\n templates.append(Template(\"L\", [\n Point_cloud(\"L1\", [Point(30, 27, 1), Point(30, 37, 1),\n Point(30, 37, 2), Point(40, 37, 2)])\n ,\n Point_cloud(\"L2\", [Point(30, 17, 1), Point(30, 37, 1),\n Point(30, 37, 2), Point(40, 37, 2)])\n ], None)\n )\n templates.append(Template(\"Z\", [\n Point_cloud(\"Z1\", [Point(30, 7, 1), Point(60, 7, 1),\n Point(60, 7, 2), Point(30, 27, 2),\n Point(30, 27, 3), Point(60, 27, 3)])\n ,\n Point_cloud(\"Z2\", [Point(30, 7, 1), Point(50, 12, 1),\n Point(50, 12, 2), Point(30, 35, 2),\n Point(30, 35, 3), Point(55, 30, 3)])\n ,\n Point_cloud(\"Z3\", [Point(30, 7, 1), Point(50, 12, 1),\n Point(50, 12, 2), Point(20, 37, 2),\n Point(20, 37, 3), Point(52, 33, 3)])\n ,\n Point_cloud(\"Z4\", [Point(30, 21, 1), Point(50, 8, 1),\n Point(50, 8, 2), Point(23, 30, 2),\n Point(23, 30, 3), Point(54, 27, 3)])\n ,\n Point_cloud(\"Z5\", [Point(40, 7, 1), Point(60, 7, 1),\n Point(60, 7, 2), Point(30, 25, 2),\n Point(30, 25, 3), Point(70, 27, 3)])\n ,\n Point_cloud(\"Z6\", [Point(20, 7, 1), Point(70, 7, 1),\n Point(70, 7, 2), Point(30, 28, 2),\n Point(30, 28, 3), Point(57, 27, 3)])\n ], None)\n )\n\n return templates",
"def New(*args, **kargs):\n obj = itkPointSetPD33S.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def CreateInstanceTemplate(task, task_dir):\n backend_params = task.BackendParams()\n instance_count = backend_params.get('instance_count', 0)\n if instance_count <= 0:\n clovis_logger.info('No template required.')\n return True\n bucket = backend_params.get('storage_bucket')\n if not bucket:\n clovis_logger.error('Missing bucket in backend_params.')\n return False\n return instance_helper.CreateTemplate(task.BackendParams()['tag'], bucket,\n task_dir)",
"def create_cloudformation_template(data, sg):\n # build top level json\n top_level_json = top_level_json_former.get_formation_telmplate()\n\n # build webapp json\n result = webapp_former.build_webapp(\"t2.micro\", data['installationinput'], \"wa01\", top_level_json, sg,\n subnet_tasks.return_subnet_id(data['subnets']),\n ami_tasks.return_image_id(data['amis']))\n\n return json.dumps(result)",
"def create_mission_scaffolding(mission_name: str, mission_time: datetime) -> str:\n\n mkdir_ignore_file_exist(os.path.join(root_directory, \"missions\"))\n mission_base_path = os.path.join(\n root_directory,\n \"missions\",\n f\"{mission_time.isoformat()[:-3].replace(':', '')}_{mission_name}\",\n )\n mkdir_ignore_file_exist(mission_base_path)\n\n mkdir_ignore_file_exist(os.path.join(mission_base_path, \"images\"))\n mkdir_ignore_file_exist(os.path.join(mission_base_path, \"images\", \"EO\"))\n mkdir_ignore_file_exist(os.path.join(mission_base_path, \"images\", \"HS\"))\n mkdir_ignore_file_exist(os.path.join(mission_base_path, \"images\", \"IR\"))\n\n mkdir_ignore_file_exist(os.path.join(mission_base_path, \"tactical\"))\n mkdir_ignore_file_exist(os.path.join(mission_base_path, \"tactical\", \"Detection\"))\n mkdir_ignore_file_exist(\n os.path.join(mission_base_path, \"tactical\", \"HeatPerimeter\")\n )\n mkdir_ignore_file_exist(os.path.join(mission_base_path, \"tactical\", \"IntenseHeat\"))\n mkdir_ignore_file_exist(os.path.join(mission_base_path, \"tactical\", \"IsolatedHeat\"))\n mkdir_ignore_file_exist(\n os.path.join(mission_base_path, \"tactical\", \"ScatteredHeat\")\n )\n\n mkdir_ignore_file_exist(os.path.join(mission_base_path, \"videos\"))\n\n return os.path.normpath(mission_base_path)",
"def create_template(self):\n options = {\n 'dir': os.path.join(os.path.dirname(__file__)),\n 'template': self.template,\n 'project': self.project,\n }\n return self.env.run(\n '%(dir)s/bin/mrbob -O %(project)s --config '\n '%(dir)s/test_answers_%(template)s.ini %(dir)s/bobtemplates/simplesconsultoria/%(template)s'\n % options)",
"def New(*args, **kargs):\n obj = itkPointSetPD22S.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj",
"def test_otoroshi_controllers_adminapi_templates_controller_create_from_template_simple(self):\n pass",
"def createTemplateStack(self):\n\n\t\ttemplatestack = os.path.join(self.params['rundir'], \"templatestack.spi\")\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### hack to use standard filtering library\n\t\ttemplateparams = {}\n\t\ttemplateparams['apix'] = self.stack['apix']\n\t\ttemplateparams['rundir'] = os.path.join(self.params['rundir'], \"templates\")\n\t\ttemplateparams['templateIds'] = self.templatelist\n\t\ttemplateparams['bin'] = self.params['bin']\n\t\ttemplateparams['lowpass'] = self.params['lowpass']\n\t\ttemplateparams['median'] = None\n\t\ttemplateparams['pixlimit'] = None\n\t\tprint 'Converting reference templates:\\n', templateparams\n\t\tapParam.createDirectory(os.path.join(self.params['rundir'], \"templates\"))\n\t\tfilelist = apTemplate.getTemplates(templateparams)\n\n\t\tlocalclip = self.clipsize/self.params['bin']\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d templates/\"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(localclip)+\",\"+str(localclip)\n\t\t\t\t+\" edgenorm spiderswap \")\n\t\t\tif self.params['inverttemplates'] is True:\n\t\t\t\temancmd += \" invert \"\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack",
"def create_tpu(\n tpu_name: str,\n accelerator_type: str,\n accelerator_topology: str,\n zone: str,\n project: str,\n version: str,\n startup_script: Optional[List[str]] = None,\n block_until_completion: bool = True,\n network: Optional[str] = \"default\",\n subnetwork: Optional[str] = \"default\",\n preemptible: bool = False,\n reserved: bool = False,\n):\n if preemptible and reserved:\n raise ValueError(\n \"Preemptible and Reserved cannot be set to True simultaneously\"\n )\n\n tpu_node_url = os.path.join(\n _TPU_BASE_URL, \"projects\", project, \"locations\", zone, \"nodes\"\n )\n params = {\"nodeId\": tpu_name}\n accelerator_config = dict(\n topology=accelerator_topology, type=accelerator_type\n )\n if startup_script:\n startup_script = \"#! /bin/bash\\n\" + \"\\n\".join(startup_script)\n metadata = {\"startup-script\": startup_script}\n else:\n metadata = {}\n\n request = {\n \"accelerator_config\": accelerator_config,\n \"runtimeVersion\": version,\n \"networkConfig\": {\n \"enableExternalIps\": True,\n \"network\": network,\n \"subnetwork\": subnetwork,\n },\n \"metadata\": metadata,\n \"schedulingConfig\": {\n \"preemptible\": preemptible,\n \"reserved\": reserved,\n },\n }\n print(\"Creating TPU: \", tpu_name)\n print(\"Request: \", request)\n resp = requests.post(\n tpu_node_url, params=params, json=request, headers=get_headers()\n )\n resp.raise_for_status()\n if block_until_completion:\n create_op_url = os.path.join(_TPU_BASE_URL, resp.json()[\"name\"])\n while not resp.json()[\"done\"]:\n print(\"Create TPU operation still running...\")\n time.sleep(30)\n resp = requests.get(create_op_url, headers=get_headers())\n print(\"Create TPU operation complete.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to create a VOD subapplication.
|
def CreateSubAppId(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateSubAppId", params, headers=headers)
response = json.loads(body)
model = models.CreateSubAppIdResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def create_app(instanceAddress, appName, description, permission=[\n 'read:account',\n 'write:account',\n 'read:blocks',\n 'write:blocks',\n 'read:drive',\n 'write:drive',\n 'read:favorites',\n 'write:favorites',\n 'read:following',\n 'write:following',\n 'read:messaging',\n 'write:messaging',\n 'read:mutes',\n 'write:mutes',\n 'write:notes',\n 'read:notifications',\n 'write:notifications',\n 'read:reactions',\n 'write:reactions',\n 'write:votes'\n], callbackUrl=None): # pragma: no cover\n res = requests.post(f\"https://{instanceAddress}/api/app/create\", data=json.dumps({'name': appName, 'description': description, 'permission': permission, 'callbackUrl': callbackUrl}), headers={'content-type': 'application/json'})\n\n if res.status_code != 200:\n raise MisskeyAPIException('/app/create', 200, res.status_code, res.text)\n else:\n return json.loads(res.text)",
"def create_application(fv_tenant, application, **args):\n args = args['optional_args'] if 'optional_args' in args.keys() else args\n\n fv_ap = Ap(fv_tenant, application,\n prio=get_value(args, 'prio', DEFAULT_QOS).lower())\n return fv_ap",
"def _create_application(self):\n key = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n\n if not self.interactive:\n name = 'tortuga-{}'.format(key)\n else:\n name = ''\n while not name:\n name = input(self.format('Application name: '))\n name = name.strip()\n\n if not self.interactive:\n url = 'https://univa.com/tortuga/{}'.format(key)\n else:\n url = ''\n while not url_valid(url):\n url = input(self.format('Application URL (a unique URI): '))\n\n password = secrets.token_urlsafe()\n\n print('Creating application...')\n\n try:\n application = self._run_az([\n 'ad', 'app', 'create',\n '--display-name', name,\n '--native-app', 'false',\n '--identifier-uris', url,\n '--key-type', 'Password',\n '--password', password\n ])\n #\n # Attach password to the application object so we can refer to\n # it later.\n #\n application['password'] = password\n self._az_applications.append(application)\n\n except APIError as e:\n print(self.format_error(str(e)))\n return self._create_application()\n\n #\n # Create the Service Principal\n #\n print('Creating service principal...')\n\n self._run_az([\n 'ad', 'sp', 'create',\n '--id', application['appId']\n\n ])\n\n print(self.format('The following application API password was '\n 'generated: {}', password))\n\n return application",
"def create_application_version(self, version_name, version_date, application_id):\n params = {'versionName' : version_name, 'versionDate' : version_date}\n return self._request('POST', 'rest/applications/' + str(application_id) + '/version', params)",
"def create_subvolume(self, client, vol_name, subvol_name, validate=True, **kwargs):\n subvolume_cmd = f\"ceph fs subvolume create {vol_name} {subvol_name}\"\n if kwargs.get(\"size\"):\n subvolume_cmd += f\" --size {kwargs.get('size')}\"\n if kwargs.get(\"group_name\"):\n subvolume_cmd += f\" --group_name {kwargs.get('group_name')}\"\n if kwargs.get(\"pool_layout\"):\n subvolume_cmd += f\" --pool_layout {kwargs.get('pool_layout')}\"\n if kwargs.get(\"uid\"):\n subvolume_cmd += f\" --uid {kwargs.get('uid')}\"\n if kwargs.get(\"gid\"):\n subvolume_cmd += f\" --gid {kwargs.get('gid')}\"\n if kwargs.get(\"mode\"):\n subvolume_cmd += f\" --mode {kwargs.get('mode')}\"\n if kwargs.get(\"namespace-isolated\"):\n subvolume_cmd += \" --namespace-isolated\"\n cmd_out, cmd_rc = client.exec_command(\n sudo=True, cmd=subvolume_cmd, check_ec=kwargs.get(\"check_ec\", True)\n )\n if validate:\n listsubvolumes_cmd = f\"ceph fs subvolume ls {vol_name}\"\n if kwargs.get(\"group_name\"):\n listsubvolumes_cmd += f\" --group_name {kwargs.get('group_name')}\"\n out, rc = client.exec_command(\n sudo=True, cmd=f\"{listsubvolumes_cmd} --format json\"\n )\n subvolume_ls = json.loads(out.read().decode())\n if subvol_name not in [i[\"name\"] for i in subvolume_ls]:\n raise CommandFailed(f\"Creation of subvolume : {subvol_name} failed\")\n return cmd_out, cmd_rc",
"def testApplicationCreated(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user)\n profile_utils.seedNDBProfile(\n self.program.key(), user=user, admin_for=[self.org.key])\n\n # TODO(daniel): submit actual responses in POST data\n response = self.post(_getOrgApplicationSubmitUrl(self.org))\n self.assertResponseRedirect(response)\n\n # check that application has been created\n application = survey_model.SurveyResponse.query(ancestor=self.org.key).get()\n self.assertIsNotNone(application)",
"def create_subinterface(\n node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,\n type_subif=None):\n subif_types = type_subif.split()\n\n flags = 0\n if u\"no_tags\" in subif_types:\n flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS\n if u\"one_tag\" in subif_types:\n flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG\n if u\"two_tags\" in subif_types:\n flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS\n if u\"dot1ad\" in subif_types:\n flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD\n if u\"exact_match\" in subif_types:\n flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH\n if u\"default_sub\" in subif_types:\n flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT\n if type_subif == u\"default_sub\":\n flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\\\n | SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY\n\n cmd = u\"create_subif\"\n args = dict(\n sw_if_index=InterfaceUtil.get_interface_index(node, interface),\n sub_id=int(sub_id),\n sub_if_flags=flags.value if hasattr(flags, u\"value\")\n else int(flags),\n outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,\n inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0\n )\n err_msg = f\"Failed to create sub-interface on host {node[u'host']}\"\n with PapiSocketExecutor(node) as papi_exec:\n sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)\n\n if_key = Topology.add_new_port(node, u\"subinterface\")\n Topology.update_interface_sw_if_index(node, if_key, sw_if_index)\n ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)\n Topology.update_interface_name(node, if_key, ifc_name)\n\n return f\"{interface}.{sub_id}\", sw_if_index",
"def create_application_version(self, application_name, version_name, application_template, description,\n application_id=None):\n\n response = self.api.create_application_version(application_name=application_name,\n application_id=application_id,\n application_template=application_template,\n version_name=version_name,\n description=description)\n APIHelper().check_for_response_errors(response)\n return response.data['data']['id']",
"def _create_application(\n self,\n name,\n client_type=None,\n grant_type=None,\n capability=None,\n user=None,\n data_access_type=None,\n end_date=None,\n **kwargs\n ):\n client_type = client_type or Application.CLIENT_PUBLIC\n grant_type = grant_type or Application.GRANT_PASSWORD\n # This is the user to whom the application is bound.\n dev_user = user or User.objects.create_user(\"dev\", password=\"123456\")\n application = Application.objects.create(\n name=name,\n user=dev_user,\n client_type=client_type,\n authorization_grant_type=grant_type,\n **kwargs\n )\n\n if data_access_type:\n application.data_access_type = data_access_type\n\n if end_date:\n application.end_date = end_date\n\n if data_access_type or end_date:\n application.save()\n\n # add capability\n if capability:\n application.scope.add(capability)\n return application",
"def build_subscope(rc):\n root_scope, root_scope_vrf_id = defineRoot(rc)\n scopes = GetApplicationScopes(rc)\n root_scope_id = GetAppScopeId(scopes,root_scope)\n sub_scope = input(\"Name of the sub scope under Root Scope \" + root_scope + \" you want to create: \")\n subnet = input(\"Which subnet or IP you want your query is (X.X.X.X/Y): \")\n print(\"Building sub scope: \"+CYELLOW+sub_scope+ \" under Root Scope \" +CYELLOW+root_scope+ CEND)\n \n # Now build the sub scope\n req_payload = {\n \"short_name\": sub_scope,\n \"short_query\": {\n \"type\": \"subnet\",\n \"field\": \"ip\",\n \"value\": subnet\n },\n \"parent_app_scope_id\": root_scope_id\n }\n \n resp = rc.post(\"/app_scopes\", json_body=json.dumps(req_payload))\n parsed_resp = json.loads(resp.content)\n if resp.status_code == 200:\n sub_scope_id = str(parsed_resp[\"id\"])\n print(\"Sub scope: \"+CYELLOW+sub_scope+ \"with scope ID \" +CYELLOW+sub_scope_id +\" has been created\"+ CEND)\n else:\n print(\"Error occured during sub scope creation\")\n print(\"Error code: \"+str(resp.status_code))\n print(\"Content: \")\n print(resp.content)\n sys.exit(3)\n\n return sub_scope, sub_scope_id",
"def active_directory_application_set(tenant: str, app_name: str, app_id: str = '') -> ActiveDirectoryApplication:\n az_ad_domain: str = f'https://{tenant}.onmicrosoft.com'\n az_ad_identifier_url: str = f'{az_ad_domain}/{app_name}'\n app_domain: str = 'https://localhost:5001'\n az_ad_reply_url: str = f'{app_domain}/signin-oidc'\n\n if app_id:\n LOG.info('updating Azure AD application object registration...')\n command = ['az', 'ad', 'app', 'update', f'--id={app_id}']\n else:\n LOG.info('creating Azure AD application object registration...')\n command = ['az', 'ad', 'app', 'create']\n\n # --display-name {{az_app_registration}}\n # --homepage {{app_domain}}\n # --identifier-uris {{az_ad_identifier_urls | join(' ')}}\n # --reply-urls {{az_ad_reply_urls | join(' ')}}\n # --available-to-other-tenants {{app_authentication == 'MultiOrg'}}\n # # --required-resource-accesses {{az_ad_app_permissions | to_json}}\n # # --oauth2-allow-implicit-flow true\n # # TODO: add --app-roles once authentication testing is further\n command.extend([\n f'--display-name={app_name}',\n f'--homepage={app_domain}',\n f'--identifier-uris={az_ad_identifier_url}',\n f'--reply-urls={az_ad_reply_url}',\n '--available-to-other-tenants=true'\n ])\n sh.print_command(command)\n process = sh.run_subprocess(command)\n # sh.log_subprocess(LOG, process, debug=ARGS.debug)\n ad_app = ActiveDirectoryApplication(process.stdout)\n LOG.debug(f'ad_app: {ad_app}')\n return ad_app",
"def do_env_template_add_app(mc, args):\n with open(args.app_template_file, \"r\") as app_file:\n app_templates = json.load(app_file)\n if not isinstance(app_templates, list):\n app_templates = [app_templates]\n for app_template in app_templates:\n mc.env_templates.create_app(args.id, app_template)\n do_env_template_show(mc, args)",
"def create_applicant():\n uid = uniqid.generate()\n name = bottle.request.json['name']\n applicant = itw.Applicant.create(uid=uid, name=name)\n return json.dumps(applicant.json, indent=\" \")",
"def app_new(input_params={}, always_retry=False, **kwargs):\n return DXHTTPRequest('/app/new', input_params, always_retry=always_retry, **kwargs)",
"def _create_com_app(exporter_one_contact, exporter, office, extra=None):\n if not extra:\n extra = {}\n\n application_type = ExportApplicationType.objects.get(\n type_code=ExportApplicationType.Types.MANUFACTURE\n )\n\n com_data = {\n \"created_by\": exporter_one_contact,\n \"last_updated_by\": exporter_one_contact,\n \"exporter\": exporter,\n \"exporter_office\": office,\n \"process_type\": CertificateOfManufactureApplication.PROCESS_TYPE,\n \"application_type\": application_type,\n \"contact\": exporter_one_contact,\n \"submit_datetime\": dt.datetime(2022, 12, 25, 12, 30, tzinfo=dt.timezone.utc),\n \"status\": \"SUBMITTED\",\n } | extra\n\n app = CertificateOfManufactureApplication.objects.create(**com_data)\n app.countries.add(Country.objects.get(name=\"Germany\"))\n app.countries.add(Country.objects.get(name=\"Albania\"))\n\n _create_pack_documents(app)\n\n return app",
"def startapp_v2(app_code_name, project_dir, *args, **kwargs):\n project_dir = os.path.abspath(project_dir)\n logger.debug(\n \"About to creating app for project dir {0}\".format(\n project_dir))\n app_path = \"{0}/apps/{1}\".format(project_dir, app_code_name)\n print app_path\n try:\n x = subprocess.Popen(\n ['mkdir', app_path]\n )\n print x\n except Exception as e:\n logger.error(e)\n try:\n x = subprocess.Popen(\n [\n project_python_path,\n '{0}/manage.py'.format(project_dir),\n 'startapp',\n app_code_name, app_path,\n '--template=sampleapp'\n ]\n )\n print x\n except Exception as e:\n logger.error(e)",
"def create_subworkflow(ctx, subworkflow, dir, author, force):\n from nf_core.subworkflows import SubworkflowCreate\n\n # Run function\n try:\n subworkflow_create = SubworkflowCreate(dir, subworkflow, author, force)\n subworkflow_create.create()\n except UserWarning as e:\n log.critical(e)\n sys.exit(1)\n except LookupError as e:\n log.error(e)\n sys.exit(1)",
"def addSubDevice(self, subDeviceStr):\n s = subDeviceStr.strip()\n spl = s.split()\n subVendorID = spl[0]\n subDeviceID = spl[1]\n subDeviceName = s.split(\" \")[-1]\n devID = \"%s:%s\" % (subVendorID, subDeviceID)\n self.subdevices[devID] = SubDevice(\n subVendorID, subDeviceID, subDeviceName)",
"def start_app(self):\n subcmd = self.master_config.Global.subcommand\n if subcmd=='create' or subcmd=='list':\n return\n elif subcmd=='start':\n self.start_app_start()\n elif subcmd=='stop':\n self.start_app_stop()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to create a custom transcoding template. Up to 100 templates can be created.
|
def CreateTranscodeTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateTranscodeTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateTranscodeTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def create_service_template(self, payload):\n return self._request('post', path='/templates', data=json.dumps(payload), value_only=True)",
"def _create_template(self):\n Template.objects.create(\n resume=\"a test\",\n shortcut='atest',\n subject=\"a subject\",\n body=\"A body {{ testme }}\"\n )",
"def create_template(request, storage):\n self = request.node.cls\n\n def finalizer():\n if ll_templates.check_template_existence(self.template_name):\n testflow.teardown(\"Remove template %s\", self.template_name)\n assert ll_templates.remove_template(True, self.template_name), (\n \"Failed to remove template %s\" % self.template_name\n )\n request.addfinalizer(finalizer)\n if not hasattr(self, 'template_name'):\n self.template_name = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_TEMPLATE\n )\n if not hasattr(self, 'storage_domain'):\n self.storage_domain = ll_sd.getStorageDomainNamesForType(\n config.DATA_CENTER_NAME, storage\n )[0]\n base_vm_for_snapshot = getattr(self, 'vm_name', config.VM_NAME[0])\n testflow.setup(\"Creating template %s\", self.template_name)\n assert ll_templates.createTemplate(\n True, vm=base_vm_for_snapshot, name=self.template_name,\n cluster=config.CLUSTER_NAME, storagedomain=self.storage_domain\n ), (\n \"Failed to create template %s from VM %s\" % (\n self.template_name, base_vm_for_snapshot\n )\n )",
"def _create_template():\n if os.path.exists(DOMAIN_TEMPLATE_FILE):\n return\n\n with open(DOMAIN_TEMPLATE_FILE, 'w') as template:\n template.write(DOMAIN_TEMPLATE)",
"def CreateTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def create_sample_template(self, sample_template_post: dict):\n return self.retrieve_api_results(\n \"/sampleTemplates\", request_type=\"POST\", params=sample_template_post\n )",
"def create_or_update_template(inps_dict):\n\n inps = inps_dict\n\n print('\\n*************** Template Options ****************')\n # write default template\n\n print (\"Custom Template File: \", inps.customTemplateFile)\n\n inps.project_name = get_project_name(inps.customTemplateFile)\n print (\"Project Name: \", inps.project_name)\n\n inps.work_dir = get_work_directory(None, inps.project_name)\n print(\"Work Dir: \", inps.work_dir)\n\n # Creates default Template\n inps = create_default_template(inps)\n\n\n return inps",
"def _create_template(self, num_instances, num_replace=0):\r\n instance_definition = self._get_instance_definition()\r\n old_resources = self._get_instance_templates()\r\n templates = template.resource_templates(\r\n old_resources, instance_definition, num_instances, num_replace)\r\n return {\"HeatTemplateFormatVersion\": \"2012-12-12\",\r\n \"Resources\": dict(templates)}",
"def create_template(template_name, created_by, created_on):\n\n template = Template(template_name=template_name,\n created_by=created_by,\n created_on=created_on)\n\n db.session.add(template)\n\n db.session.commit()\n\n return template",
"def new_template():\n form = NewTemplateForm()\n keywords = ProvisioningKeyword.query.all()\n\n if form.validate_on_submit():\n template = ProvisioningTemplate(\n name=form.name.data,\n template=form.template.data,\n options=form.compress_options(),\n enabled=request.form.get(\"enabled\", False)\n )\n db.session.add(template)\n db.session.commit()\n flash('Template {} successfully created'.format(template.name),\n 'form-success')\n return redirect(url_for('provisioning.templates'))\n\n return render_template('provisioning/new_template.html', \n form=form,\n keywords=keywords\n )",
"def create_template(self):\n options = {\n 'dir': os.path.join(os.path.dirname(__file__)),\n 'template': self.template,\n 'project': self.project,\n }\n return self.env.run(\n '%(dir)s/bin/mrbob -O %(project)s --config '\n '%(dir)s/test_answers_%(template)s.ini %(dir)s/bobtemplates/simplesconsultoria/%(template)s'\n % options)",
"def CreateAddressTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateAddressTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateAddressTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def create_service_template(self, name, type_, content, service_id, subject=None, parent_folder_id=None):\n data = {\n \"name\": name,\n \"template_type\": type_,\n \"content\": content,\n \"service\": service_id,\n \"process_type\": \"normal\",\n }\n if subject:\n data.update({\"subject\": subject})\n if parent_folder_id:\n data.update({\"parent_folder_id\": parent_folder_id})\n data = _attach_current_user(data)\n endpoint = f\"/service/{service_id}/template\"\n return self.post(endpoint, data)",
"def createTemplateStack(self):\n\n\t\ttemplatestack = os.path.join(self.params['rundir'], \"templatestack.spi\")\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### hack to use standard filtering library\n\t\ttemplateparams = {}\n\t\ttemplateparams['apix'] = self.stack['apix']\n\t\ttemplateparams['rundir'] = os.path.join(self.params['rundir'], \"templates\")\n\t\ttemplateparams['templateIds'] = self.templatelist\n\t\ttemplateparams['bin'] = self.params['bin']\n\t\ttemplateparams['lowpass'] = self.params['lowpass']\n\t\ttemplateparams['median'] = None\n\t\ttemplateparams['pixlimit'] = None\n\t\tprint 'Converting reference templates:\\n', templateparams\n\t\tapParam.createDirectory(os.path.join(self.params['rundir'], \"templates\"))\n\t\tfilelist = apTemplate.getTemplates(templateparams)\n\n\t\tlocalclip = self.clipsize/self.params['bin']\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d templates/\"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(localclip)+\",\"+str(localclip)\n\t\t\t\t+\" edgenorm spiderswap \")\n\t\t\tif self.params['inverttemplates'] is True:\n\t\t\t\temancmd += \" invert \"\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack",
"def create_stack(self, cnxt, stack_name, template, params, files, args):\r\n LOG.info(_('template is %s') % template)\r\n\r\n def _stack_create(stack):\r\n # Create/Adopt a stack, and create the periodic task if successful\r\n if stack.adopt_stack_data:\r\n stack.adopt()\r\n else:\r\n stack.create()\r\n\r\n if (stack.action in (stack.CREATE, stack.ADOPT)\r\n and stack.status == stack.COMPLETE):\r\n # Schedule a periodic watcher task for this stack\r\n self.stack_watch.start_watch_task(stack.id, cnxt)\r\n else:\r\n LOG.warning(_(\"Stack create failed, status %s\") % stack.status)\r\n\r\n tmpl = parser.Template(template, files=files)\r\n self._validate_new_stack(cnxt, stack_name, tmpl)\r\n\r\n common_params = api.extract_args(args)\r\n env = environment.Environment(params)\r\n stack = parser.Stack(cnxt, stack_name, tmpl, env, **common_params)\r\n\r\n self._validate_deferred_auth_context(cnxt, stack)\r\n\r\n stack.validate()\r\n\r\n stack.store()\r\n\r\n self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,\r\n _stack_create, stack)\r\n\r\n return dict(stack.identifier())",
"def test_otoroshi_controllers_adminapi_templates_controller_create_from_template_simple(self):\n pass",
"def upload_cfn_template(self, template_body, template_name, format=S3FileFormat.YAML):\n return self.upload_file(\n file_type=S3FileType.TEMPLATES, content=template_body, file_name=template_name, format=format\n )",
"def do_env_template_create(mc, args):\n env_template = mc.env_templates.create(\n {\"name\": args.name, \"is_public\": args.is_public})\n _print_env_template_list([env_template])",
"def create_stack(self):\n command = \"cfn-create-stack \" + self.stack_name + \" -f \" + self.template_file\n if (self.parameters is not None):\n command += \" -p \\\"\" + self.parameters + \"\\\"\"\n run_command(command)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to create a custom watermarking template. Up to 1,000 templates can be created.
|
def CreateWatermarkTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("CreateWatermarkTemplate", params, headers=headers)
response = json.loads(body)
model = models.CreateWatermarkTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def _create_template(self, num_instances, num_replace=0):\r\n instance_definition = self._get_instance_definition()\r\n old_resources = self._get_instance_templates()\r\n templates = template.resource_templates(\r\n old_resources, instance_definition, num_instances, num_replace)\r\n return {\"HeatTemplateFormatVersion\": \"2012-12-12\",\r\n \"Resources\": dict(templates)}",
"def create_sample_template(self, sample_template_post: dict):\n return self.retrieve_api_results(\n \"/sampleTemplates\", request_type=\"POST\", params=sample_template_post\n )",
"def create_service_template(self, payload):\n return self._request('post', path='/templates', data=json.dumps(payload), value_only=True)",
"def _create_template(self):\n Template.objects.create(\n resume=\"a test\",\n shortcut='atest',\n subject=\"a subject\",\n body=\"A body {{ testme }}\"\n )",
"def CreateTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def CreateInstanceTemplate(task, task_dir):\n backend_params = task.BackendParams()\n instance_count = backend_params.get('instance_count', 0)\n if instance_count <= 0:\n clovis_logger.info('No template required.')\n return True\n bucket = backend_params.get('storage_bucket')\n if not bucket:\n clovis_logger.error('Missing bucket in backend_params.')\n return False\n return instance_helper.CreateTemplate(task.BackendParams()['tag'], bucket,\n task_dir)",
"def new_template():\n form = NewTemplateForm()\n keywords = ProvisioningKeyword.query.all()\n\n if form.validate_on_submit():\n template = ProvisioningTemplate(\n name=form.name.data,\n template=form.template.data,\n options=form.compress_options(),\n enabled=request.form.get(\"enabled\", False)\n )\n db.session.add(template)\n db.session.commit()\n flash('Template {} successfully created'.format(template.name),\n 'form-success')\n return redirect(url_for('provisioning.templates'))\n\n return render_template('provisioning/new_template.html', \n form=form,\n keywords=keywords\n )",
"def _create_template():\n if os.path.exists(DOMAIN_TEMPLATE_FILE):\n return\n\n with open(DOMAIN_TEMPLATE_FILE, 'w') as template:\n template.write(DOMAIN_TEMPLATE)",
"def create_or_update_template(inps_dict):\n\n inps = inps_dict\n\n print('\\n*************** Template Options ****************')\n # write default template\n\n print (\"Custom Template File: \", inps.customTemplateFile)\n\n inps.project_name = get_project_name(inps.customTemplateFile)\n print (\"Project Name: \", inps.project_name)\n\n inps.work_dir = get_work_directory(None, inps.project_name)\n print(\"Work Dir: \", inps.work_dir)\n\n # Creates default Template\n inps = create_default_template(inps)\n\n\n return inps",
"def add_template(self, template, label, units='counts'):\n\n if units == 'flux':\n assert (len(self.exposure_map) != 0), \\\n \"Must provide exposure map before adding a flux template\"\n assert (len(self.exposure_map) == len(template)), \\\n \"Template must be the same shape as the exposure map\"\n template *= self.exposure_map\n\n if units == 'PS':\n assert (len(self.exposure_map) != 0), \\\n \"Must provide exposure map before adding a PS template\"\n assert (len(self.exposure_map) == len(template)), \\\n \"Template must be the same shape as the exposure map\"\n template /= self.exposure_map/np.mean(self.exposure_map)\n self.templates_dict.update({label: template})\n self.templates.append(template)",
"def create_template(request, storage):\n self = request.node.cls\n\n def finalizer():\n if ll_templates.check_template_existence(self.template_name):\n testflow.teardown(\"Remove template %s\", self.template_name)\n assert ll_templates.remove_template(True, self.template_name), (\n \"Failed to remove template %s\" % self.template_name\n )\n request.addfinalizer(finalizer)\n if not hasattr(self, 'template_name'):\n self.template_name = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_TEMPLATE\n )\n if not hasattr(self, 'storage_domain'):\n self.storage_domain = ll_sd.getStorageDomainNamesForType(\n config.DATA_CENTER_NAME, storage\n )[0]\n base_vm_for_snapshot = getattr(self, 'vm_name', config.VM_NAME[0])\n testflow.setup(\"Creating template %s\", self.template_name)\n assert ll_templates.createTemplate(\n True, vm=base_vm_for_snapshot, name=self.template_name,\n cluster=config.CLUSTER_NAME, storagedomain=self.storage_domain\n ), (\n \"Failed to create template %s from VM %s\" % (\n self.template_name, base_vm_for_snapshot\n )\n )",
"def create_template(template_name, created_by, created_on):\n\n template = Template(template_name=template_name,\n created_by=created_by,\n created_on=created_on)\n\n db.session.add(template)\n\n db.session.commit()\n\n return template",
"def create_template(self):\n options = {\n 'dir': os.path.join(os.path.dirname(__file__)),\n 'template': self.template,\n 'project': self.project,\n }\n return self.env.run(\n '%(dir)s/bin/mrbob -O %(project)s --config '\n '%(dir)s/test_answers_%(template)s.ini %(dir)s/bobtemplates/simplesconsultoria/%(template)s'\n % options)",
"def register_blackbox_template(self, name, lib_template = None, techmap_template = None, parameters = None,\n premap_commands = None):\n if name in self._blackbox_entries:\n raise PRGAInternalError(\"Blackbox template entry '{}' already registered\".format(name))\n return self._blackbox_entries.setdefault(name, YosysTemplateEntry(lib_template, techmap_template, parameters,\n premap_commands))",
"def createTemplateStack(self):\n\n\t\ttemplatestack = os.path.join(self.params['rundir'], \"templatestack.spi\")\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### hack to use standard filtering library\n\t\ttemplateparams = {}\n\t\ttemplateparams['apix'] = self.stack['apix']\n\t\ttemplateparams['rundir'] = os.path.join(self.params['rundir'], \"templates\")\n\t\ttemplateparams['templateIds'] = self.templatelist\n\t\ttemplateparams['bin'] = self.params['bin']\n\t\ttemplateparams['lowpass'] = self.params['lowpass']\n\t\ttemplateparams['median'] = None\n\t\ttemplateparams['pixlimit'] = None\n\t\tprint 'Converting reference templates:\\n', templateparams\n\t\tapParam.createDirectory(os.path.join(self.params['rundir'], \"templates\"))\n\t\tfilelist = apTemplate.getTemplates(templateparams)\n\n\t\tlocalclip = self.clipsize/self.params['bin']\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d templates/\"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(localclip)+\",\"+str(localclip)\n\t\t\t\t+\" edgenorm spiderswap \")\n\t\t\tif self.params['inverttemplates'] is True:\n\t\t\t\temancmd += \" invert \"\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack",
"def CreateImageProcessingTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateImageProcessingTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateImageProcessingTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def init_templates():\n\n templates = []\n\n # single stroke templates (all fingers doing the same if various fingers) (1 finger)\n templates.append(Template(\"T\", [\n # different PC for having different ways of drawing Template.name (T) for better recognition\n Point_cloud(\"T1\", [Point(30, 7, 1), Point(103, 7, 1),\n Point(66, 7, 2), Point(66, 87, 2)])\n ,\n Point_cloud(\"T2\", [Point(30, 7, 1), Point(123, 7, 1),\n Point(80, 17, 2), Point(30, 7, 2),\n Point(80, 17, 3), Point(80, 77, 3)])\n ,\n Point_cloud(\"T3\", [Point(30, 7, 1), Point(123, 7, 1),\n Point(80, 17, 2), Point(30, 7, 2),\n Point(80, 17, 3), Point(80, 50, 3)])\n ], None)\n )\n templates.append(Template(\"V\", [\n Point_cloud(\"V1\", [Point(30, 7, 1), Point(40, 37, 1),\n Point(40, 37, 2), Point(50, 7, 2)])\n ,\n Point_cloud(\"V2\", [Point(0, 7, 1), Point(25, 37, 1),\n Point(25, 37, 2), Point(50, 7, 2)])\n ,\n Point_cloud(\"V3\", [Point(30, 7, 1), Point(40, 25, 1),\n Point(40, 25, 2), Point(50, 7, 2)])\n ,\n Point_cloud(\"V4\", [Point(30, 16, 1), Point(33, 25, 1),\n Point(33, 25, 2), Point(38, 7, 2)])\n ,\n Point_cloud(\"V5\", [Point(30, 7, 1), Point(33, 25, 1),\n Point(33, 25, 2), Point(38, 16, 2)])\n ], None)\n )\n templates.append(Template(\"D\", [\n Point_cloud(\"D1\", [Point(30, 7, 1), Point(30, 67, 1),\n Point(30, 67, 2), Point(50, 53, 2),\n Point(50, 53, 3), Point(55, 37, 3),\n Point(55, 37, 4), Point(50, 21, 4),\n Point(50, 21, 5), Point(30, 7, 5)])\n ,\n Point_cloud(\"D1\", [Point(30, 7, 1), Point(30, 67, 1),\n Point(30, 67, 2), Point(60, 53, 2),\n Point(60, 53, 3), Point(65, 37, 3),\n Point(65, 37, 4), Point(60, 21, 4),\n Point(60, 21, 5), Point(30, 7, 5)])\n ,\n ], None)\n )\n templates.append(Template(\"X\", [\n Point_cloud(\"X1\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2)])\n ,\n Point_cloud(\"X1_2\", [Point(30, 7, 1), Point(60, 34, 1),\n Point(60, 7, 2), Point(30, 34, 2)])\n ,\n Point_cloud(\"X2\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2),\n Point(30, 7, 3), Point(60, 7, 3)])\n ,\n Point_cloud(\"X3\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2),\n Point(30, 47, 3), Point(60, 47, 3)])\n ,\n Point_cloud(\"X4\", [Point(30, 7, 1), Point(60, 47, 1),\n Point(60, 7, 2), Point(30, 47, 2),\n Point(30, 7, 3), Point(30, 47, 3)])\n ], None)\n )\n templates.append(Template(\"W\", [\n Point_cloud(\"W1\", [Point(30, 7, 1), Point(40, 37, 1),\n Point(40, 37, 2), Point(50, 20, 2),\n Point(50, 20, 3), Point(60, 37, 3),\n Point(60, 37, 4), Point(70, 7, 4)])\n ,\n Point_cloud(\"W2\", [Point(30, 7, 1), Point(50, 37, 1),\n Point(50, 37, 2), Point(70, 7, 2),\n Point(70, 7, 3), Point(90, 37, 3),\n Point(90, 37, 4), Point(110, 7, 4)])\n ], None)\n )\n\n templates.append(Template(\"L\", [\n Point_cloud(\"L1\", [Point(30, 27, 1), Point(30, 37, 1),\n Point(30, 37, 2), Point(40, 37, 2)])\n ,\n Point_cloud(\"L2\", [Point(30, 17, 1), Point(30, 37, 1),\n Point(30, 37, 2), Point(40, 37, 2)])\n ], None)\n )\n templates.append(Template(\"Z\", [\n Point_cloud(\"Z1\", [Point(30, 7, 1), Point(60, 7, 1),\n Point(60, 7, 2), Point(30, 27, 2),\n Point(30, 27, 3), Point(60, 27, 3)])\n ,\n Point_cloud(\"Z2\", [Point(30, 7, 1), Point(50, 12, 1),\n Point(50, 12, 2), Point(30, 35, 2),\n Point(30, 35, 3), Point(55, 30, 3)])\n ,\n Point_cloud(\"Z3\", [Point(30, 7, 1), Point(50, 12, 1),\n Point(50, 12, 2), Point(20, 37, 2),\n Point(20, 37, 3), Point(52, 33, 3)])\n ,\n Point_cloud(\"Z4\", [Point(30, 21, 1), Point(50, 8, 1),\n Point(50, 8, 2), Point(23, 30, 2),\n Point(23, 30, 3), Point(54, 27, 3)])\n ,\n Point_cloud(\"Z5\", [Point(40, 7, 1), Point(60, 7, 1),\n Point(60, 7, 2), Point(30, 25, 2),\n Point(30, 25, 3), Point(70, 27, 3)])\n ,\n Point_cloud(\"Z6\", [Point(20, 7, 1), Point(70, 7, 1),\n Point(70, 7, 2), Point(30, 28, 2),\n Point(30, 28, 3), Point(57, 27, 3)])\n ], None)\n )\n\n return templates",
"def _create_template_config(self, config):\n pass",
"def create(self, config):\n response = self._session.post(\n path=self.RESOURCE_PATH.format(base_api=self.base_api),\n headers={\n 'Accept': self._accept_header(),\n 'Content-Type': 'application/json',\n },\n data=json.dumps(config),\n )\n\n etag = response.headers['ETag']\n return TemplateConfig(session=self._session, data=response.json(), etag=etag)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to delete a custom video content recognition template.
|
def DeleteAIRecognitionTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteAIRecognitionTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteAIRecognitionTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def DeleteTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):\n logger.info('Deleting service template {0}...'.format(service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n core = Core(model_storage, resource_storage, plugin_manager)\n core.delete_service_template(service_template.id)\n logger.info('Service template {0} deleted'.format(service_template_name))",
"def destroy_cluster_template(self, cluster_template_id):",
"def delete(self, service_template_id, **kwargs):\n service_template = self.model.service_template.get(service_template_id)\n self.core.delete_service_template(service_template_id)\n return service_template, 200",
"def delete_service_template(self, id):\n return self._request('delete', path='/templates/{}'.format(id), value_only=True)",
"def delete_email_template(self, template_id):\n call = \"rest/asset/v1/emailTemplate/\"+template_id+\"/delete.json\"\n method = \"POST\"\n return self.__generic_api_call(call, method)",
"def delete_fcp_template(self, template_id):\n return self.db.delete_fcp_template(template_id)",
"def delete_template_request(template_id):\n template = ProvisioningTemplate.query.filter_by(id=template_id).first()\n if template is None:\n abort(404)\n return render_template('provisioning/manage_template.html', template=template)",
"def delete_template(request):\n if request.method == 'POST':\n tid = int(request.POST.get('tid'))\n t_kpi.objects.filter(id=tid).delete()\n \n return JsonResponse({'status': 'delete_kpi_template_ok'})",
"def DeleteImageProcessingTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteImageProcessingTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteImageProcessingTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def test_delete_apiextensions_v1beta1_custom_resource_definition(self):\n pass",
"def delete_instance_template(key):\n instance_template = yield key.get_async()\n if not instance_template:\n logging.warning('InstanceTemplate does not exist: %s', key)\n return\n\n if instance_template.active or instance_template.drained:\n # All instance template revisions, even drained ones, must be deleted first.\n return\n\n yield key.delete_async()",
"def post_delete_template(sender, instance, **kwargs):\n exporter_list = exporter_api.get_all()\n for exporter in exporter_list:\n if instance in exporter.templates.all():\n exporter.templates.remove(instance)\n exporter_api.upsert(exporter)",
"def delete(self, ng_template_id):\n self._delete('/node-group-templates/%s' % ng_template_id)",
"def DeleteInstanceTemplate(tag, try_count=0):\n clovis_logger.info('Instance template destruction for tag: ' + tag)\n if not instance_helper.DeleteTemplate(tag):\n clovis_logger.info('Instance template destruction failed for: ' + tag)\n if try_count <= 5:\n deferred.defer(DeleteInstanceTemplate, tag, try_count + 1, _countdown=60)\n return\n clovis_logger.error('Giving up template destruction for: ' + tag)\n clovis_logger.info('Cleanup complete for tag: ' + tag)",
"def delete(self, configuration_id, **kwargs):\n # type: (string_types, dict) -> BitmovinResponse\n\n return self.api_client.delete(\n '/encoding/configurations/subtitles/imsc/{configuration_id}',\n path_params={'configuration_id': configuration_id},\n type=BitmovinResponse,\n **kwargs\n )",
"def delete(self, request, vnf_id):\n if API.deleteVNFTemplate(vnf_id):\n return HttpResponse(status=200)\n return HttpResponse(status=404)",
"def video_remove(self, video_id=None):\n self.command('video_remove', video_id)",
"def delete_collection_namespaced_template(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_collection_namespaced_template_with_http_info(namespace, **kwargs)\n else:\n (data) = self.delete_collection_namespaced_template_with_http_info(namespace, **kwargs)\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to delete an adaptive bitrate streaming template.
|
def DeleteAdaptiveDynamicStreamingTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteAdaptiveDynamicStreamingTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteAdaptiveDynamicStreamingTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def destroy_cluster_template(self, cluster_template_id):",
"def delete_fcp_template(self, template_id):\n return self.db.delete_fcp_template(template_id)",
"def delete_email_template(self, template_id):\n call = \"rest/asset/v1/emailTemplate/\"+template_id+\"/delete.json\"\n method = \"POST\"\n return self.__generic_api_call(call, method)",
"def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):\n logger.info('Deleting service template {0}...'.format(service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n core = Core(model_storage, resource_storage, plugin_manager)\n core.delete_service_template(service_template.id)\n logger.info('Service template {0} deleted'.format(service_template_name))",
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete_service_template(self, id):\n return self._request('delete', path='/templates/{}'.format(id), value_only=True)",
"def delete_instance_template(key):\n instance_template = yield key.get_async()\n if not instance_template:\n logging.warning('InstanceTemplate does not exist: %s', key)\n return\n\n if instance_template.active or instance_template.drained:\n # All instance template revisions, even drained ones, must be deleted first.\n return\n\n yield key.delete_async()",
"def delete_sla_template(sla_template_uuid):\n\n url = env.sl_templates_api + '/' + sla_template_uuid\n\n resp = requests.delete(url, timeout=env.timeout, headers=env.header)\n LOG.debug(sla_template_uuid)\n LOG.debug(str(resp.text))\n\n env.set_return_header(resp.headers)\n\n if resp.status_code == 200:\n return True, sla_template_uuid\n else:\n return False, json.loads(resp.text)",
"def delete(self, ng_template_id):\n self._delete('/node-group-templates/%s' % ng_template_id)",
"def delete(self, configuration_id, **kwargs):\n # type: (string_types, dict) -> BitmovinResponse\n\n return self.api_client.delete(\n '/encoding/configurations/subtitles/imsc/{configuration_id}',\n path_params={'configuration_id': configuration_id},\n type=BitmovinResponse,\n **kwargs\n )",
"def delete_template(request):\n if request.method == 'POST':\n tid = int(request.POST.get('tid'))\n t_kpi.objects.filter(id=tid).delete()\n \n return JsonResponse({'status': 'delete_kpi_template_ok'})",
"def delete(self, service_template_id, **kwargs):\n service_template = self.model.service_template.get(service_template_id)\n self.core.delete_service_template(service_template_id)\n return service_template, 200",
"def delete_policy_template(self, policy_template_id):\n baseURL = self.baseURL + \"policy-templates/{}\".format(policy_template_id)\n\n return self._make_request(\"delete\",baseURL)",
"def DeleteInstanceTemplate(tag, try_count=0):\n clovis_logger.info('Instance template destruction for tag: ' + tag)\n if not instance_helper.DeleteTemplate(tag):\n clovis_logger.info('Instance template destruction failed for: ' + tag)\n if try_count <= 5:\n deferred.defer(DeleteInstanceTemplate, tag, try_count + 1, _countdown=60)\n return\n clovis_logger.error('Giving up template destruction for: ' + tag)\n clovis_logger.info('Cleanup complete for tag: ' + tag)",
"def delete_by_template(self, template):\n t_name = self._table_name \n \n try:\n \n w_clause, args_ = self._template_to_where_clause(template)\n q = \"DELETE FROM \" + t_name + \" \" + w_clause\n nums = self._run_q(q, args=args_, fields=None, fetch=False, cnx=None, commit=True)\n\n except Exception as e:\n print(\"Got exception = \", e)\n raise e\n \n return nums",
"def delete_configuration_stanza(self, instance, stanza=None):\n endpoint = API_AUTHZ_SERVER + \"{}/configuration/stanza/{}/v1\".format(instance, stanza)\n response.success = response.status_code == 204\n\n return response",
"def delete_template_request(template_id):\n template = ProvisioningTemplate.query.filter_by(id=template_id).first()\n if template is None:\n abort(404)\n return render_template('provisioning/manage_template.html', template=template)",
"def post_delete_template(sender, instance, **kwargs):\n exporter_list = exporter_api.get_all()\n for exporter in exporter_list:\n if instance in exporter.templates.all():\n exporter.templates.remove(instance)\n exporter_api.upsert(exporter)",
"def delete(self, configuration_id, **kwargs):\n # type: (string_types, dict) -> BitmovinResponse\n\n return self.api_client.delete(\n '/encoding/configurations/audio/mp3/{configuration_id}',\n path_params={'configuration_id': configuration_id},\n type=BitmovinResponse,\n **kwargs\n )",
"def delete_fileset_template(\n self, id, preserve_snapshots=None, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = '/v1/fileset_template/{id}'\n path_format_arguments = {\n 'id': self._serialize.url(\"id\", id, 'str')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n if preserve_snapshots is not None:\n query_parameters['preserve_snapshots'] = self._serialize.query(\"preserve_snapshots\", preserve_snapshots, 'bool')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if custom_headers:\n header_parameters.update(custom_headers)\n\n # Construct and send request\n request = self._client.delete(url, query_parameters)\n response = self._client.send(request, header_parameters, **operation_config)\n\n if response.status_code not in [204]:\n raise HttpOperationError(self._deserialize, response)\n\n if raw:\n client_raw_response = ClientRawResponse(None, response)\n return client_raw_response"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to delete a custom animated image generating template.
|
def DeleteAnimatedGraphicsTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteAnimatedGraphicsTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteAnimatedGraphicsTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def DeleteImageProcessingTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteImageProcessingTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteImageProcessingTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def delete_image(sender, instance, **kwargs):\n if os.path.exists(instance.image.path):\n os.remove(instance.image.path)",
"def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):\n logger.info('Deleting service template {0}...'.format(service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n core = Core(model_storage, resource_storage, plugin_manager)\n core.delete_service_template(service_template.id)\n logger.info('Service template {0} deleted'.format(service_template_name))",
"def destroy(self, image):\n return self.image.destroy(image)",
"def DeleteTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete(ctx):\n delete_script = \"\"\"\n rm -r $OUTPUT_PATH/fhir/IG || true > /dev/null\n gsutil -m rm -r gs://$GOOGLE_BUCKET/fhir/IG \n \"\"\"\n run_cmd(delete_script)",
"def delete(self, *args, **kwargs):\n self.image.delete()\n super(StoredImage, self).delete(*args, **kwargs)",
"def _delete_animation(self, value):\n pass",
"def cleanup_thumbnail(sender, instance, **kargs):\n if instance.file.name.endswith('.png'):\n delete(instance.file)",
"def delete_image(filename):\n # Delete image\n image_path = (Path(__file__).parent / f'../images{filename}').resolve()\n if os.path.exists(image_path):\n os.remove(image_path)",
"def delete_template(request):\n if request.method == 'POST':\n tid = int(request.POST.get('tid'))\n t_kpi.objects.filter(id=tid).delete()\n \n return JsonResponse({'status': 'delete_kpi_template_ok'})",
"def delete_image_tag(self, img, tag):\r\n return img.delete_tag(tag)",
"def DeleteInstanceTemplate(tag, try_count=0):\n clovis_logger.info('Instance template destruction for tag: ' + tag)\n if not instance_helper.DeleteTemplate(tag):\n clovis_logger.info('Instance template destruction failed for: ' + tag)\n if try_count <= 5:\n deferred.defer(DeleteInstanceTemplate, tag, try_count + 1, _countdown=60)\n return\n clovis_logger.error('Giving up template destruction for: ' + tag)\n clovis_logger.info('Cleanup complete for tag: ' + tag)",
"def delete(self):\n return attachment_service.delete_attachment(get_jwt_identity(), get_uuid(request)), 201",
"def delete_email_template(self, template_id):\n call = \"rest/asset/v1/emailTemplate/\"+template_id+\"/delete.json\"\n method = \"POST\"\n return self.__generic_api_call(call, method)",
"def post_delete_template(sender, instance, **kwargs):\n exporter_list = exporter_api.get_all()\n for exporter in exporter_list:\n if instance in exporter.templates.all():\n exporter.templates.remove(instance)\n exporter_api.upsert(exporter)",
"def delete_instance_template(key):\n instance_template = yield key.get_async()\n if not instance_template:\n logging.warning('InstanceTemplate does not exist: %s', key)\n return\n\n if instance_template.active or instance_template.drained:\n # All instance template revisions, even drained ones, must be deleted first.\n return\n\n yield key.delete_async()",
"def destroy_cluster_template(self, cluster_template_id):",
"def delAsset(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to delete an image processing template.
|
def DeleteImageProcessingTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteImageProcessingTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteImageProcessingTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def delete_fcp_template(self, template_id):\n return self.db.delete_fcp_template(template_id)",
"def delete_email_template(self, template_id):\n call = \"rest/asset/v1/emailTemplate/\"+template_id+\"/delete.json\"\n method = \"POST\"\n return self.__generic_api_call(call, method)",
"def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):\n logger.info('Deleting service template {0}...'.format(service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n core = Core(model_storage, resource_storage, plugin_manager)\n core.delete_service_template(service_template.id)\n logger.info('Service template {0} deleted'.format(service_template_name))",
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def destroy_cluster_template(self, cluster_template_id):",
"def delete(self, service_template_id, **kwargs):\n service_template = self.model.service_template.get(service_template_id)\n self.core.delete_service_template(service_template_id)\n return service_template, 200",
"def delete_template(request):\n if request.method == 'POST':\n tid = int(request.POST.get('tid'))\n t_kpi.objects.filter(id=tid).delete()\n \n return JsonResponse({'status': 'delete_kpi_template_ok'})",
"def delete_template_request(template_id):\n template = ProvisioningTemplate.query.filter_by(id=template_id).first()\n if template is None:\n abort(404)\n return render_template('provisioning/manage_template.html', template=template)",
"def delete(self, ng_template_id):\n self._delete('/node-group-templates/%s' % ng_template_id)",
"def delete_service_template(self, id):\n return self._request('delete', path='/templates/{}'.format(id), value_only=True)",
"def DeleteTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete_policy_template(self, policy_template_id):\n baseURL = self.baseURL + \"policy-templates/{}\".format(policy_template_id)\n\n return self._make_request(\"delete\",baseURL)",
"def delete_by_template(self, template):\n t_name = self._table_name \n \n try:\n \n w_clause, args_ = self._template_to_where_clause(template)\n q = \"DELETE FROM \" + t_name + \" \" + w_clause\n nums = self._run_q(q, args=args_, fields=None, fetch=False, cnx=None, commit=True)\n\n except Exception as e:\n print(\"Got exception = \", e)\n raise e\n \n return nums",
"def delete_instance_template(key):\n instance_template = yield key.get_async()\n if not instance_template:\n logging.warning('InstanceTemplate does not exist: %s', key)\n return\n\n if instance_template.active or instance_template.drained:\n # All instance template revisions, even drained ones, must be deleted first.\n return\n\n yield key.delete_async()",
"def delete_template_group(\n self,\n template_group: str,\n) -> bool:\n template_group.replace(\" \", \"%20\")\n return self._delete(\n \"/template/templateGroups/{}\".format(template_group),\n expected_status=[204],\n return_type=\"bool\",\n )",
"def delete_flavor(self, flavor_id):\n try:\n client = oca.Client(self.user + ':' + self.passwd, self.url)\n listaTemplate = oca.VmTemplatePool(client)\n listaTemplate.info()\n self.logger.info(\"Deleting VIM flavor DELETE {}\".format(self.url))\n for template in listaTemplate:\n if str(template.id) == str(flavor_id):\n template.delete()\n return template.id\n raise vimconn.vimconnNotFoundException(\"Flavor {} not found\".format(flavor_id))\n except Exception as e:\n self.logger.error(\"Delete flavor \" + str(flavor_id) + \" error: \" + str(e))\n raise vimconn.vimconnException(e)",
"def delete_collection_namespaced_template(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_collection_namespaced_template_with_http_info(namespace, **kwargs)\n else:\n (data) = self.delete_collection_namespaced_template_with_http_info(namespace, **kwargs)\n return data",
"def DeleteAddressTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteAddressTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteAddressTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def delete_certificate_template(\n project_id: str,\n location: str,\n certificate_template_id: str,\n) -> None:\n\n caServiceClient = privateca_v1.CertificateAuthorityServiceClient()\n\n # Request to delete a certificate template.\n request = privateca_v1.DeleteCertificateTemplateRequest(\n name=caServiceClient.certificate_template_path(\n project_id,\n location,\n certificate_template_id,\n )\n )\n operation = caServiceClient.delete_certificate_template(request=request)\n result = operation.result()\n\n print(\"Operation result\", result)\n print(\"Deleted certificate template:\", certificate_template_id)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to delete an image sprite generating template.
|
def DeleteImageSpriteTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteImageSpriteTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteImageSpriteTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def delete_sprite(self, delete):\n if delete:\n self.__inactive_sprite = True\n else:\n self.__inactive_sprite = False",
"def DeleteImageProcessingTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteImageProcessingTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteImageProcessingTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete_image(sender, instance, **kwargs):\n if os.path.exists(instance.image.path):\n os.remove(instance.image.path)",
"def destroy_cluster_template(self, cluster_template_id):",
"def delete_email_template(self, template_id):\n call = \"rest/asset/v1/emailTemplate/\"+template_id+\"/delete.json\"\n method = \"POST\"\n return self.__generic_api_call(call, method)",
"def delete_image(filename):\n # Delete image\n image_path = (Path(__file__).parent / f'../images{filename}').resolve()\n if os.path.exists(image_path):\n os.remove(image_path)",
"def delete_instance_template(key):\n instance_template = yield key.get_async()\n if not instance_template:\n logging.warning('InstanceTemplate does not exist: %s', key)\n return\n\n if instance_template.active or instance_template.drained:\n # All instance template revisions, even drained ones, must be deleted first.\n return\n\n yield key.delete_async()",
"def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):\n logger.info('Deleting service template {0}...'.format(service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n core = Core(model_storage, resource_storage, plugin_manager)\n core.delete_service_template(service_template.id)\n logger.info('Service template {0} deleted'.format(service_template_name))",
"def delete_image(self, node_image):\r\n\r\n raise NotImplementedError(\r\n 'delete_image not implemented for this driver')",
"def delete_fcp_template(self, template_id):\n return self.db.delete_fcp_template(template_id)",
"def delete_template(request):\n if request.method == 'POST':\n tid = int(request.POST.get('tid'))\n t_kpi.objects.filter(id=tid).delete()\n \n return JsonResponse({'status': 'delete_kpi_template_ok'})",
"def delete(self, ng_template_id):\n self._delete('/node-group-templates/%s' % ng_template_id)",
"def destroy(self, image):\n return self.image.destroy(image)",
"def delete_service_template(self, id):\n return self._request('delete', path='/templates/{}'.format(id), value_only=True)",
"def gameDeleteHandler(sender, instance, **kwargs):\n instance.gameimage.delete(save=False)",
"def __del__(self):\n\n # Delete sprite (if it has been defined)\n try:\n self._canvas.delete(self._sprite)\n except AttributeError:\n pass\n except tk.TclError:\n pass\n\n # Delete all missile objects\n del self._missiles[:]",
"def test_delete_image(self):\n image = self._create_image()\n\n with self.override_role():\n self.image_client.delete_image(image['id'])\n self.image_client.wait_for_resource_deletion(image['id'])",
"def delete_template_request(template_id):\n template = ProvisioningTemplate.query.filter_by(id=template_id).first()\n if template is None:\n abort(404)\n return render_template('provisioning/manage_template.html', template=template)",
"def delete(self, service_template_id, **kwargs):\n service_template = self.model.service_template.get(service_template_id)\n self.core.delete_service_template(service_template_id)\n return service_template, 200"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to delete a custom sampled screencapturing template.
|
def DeleteSampleSnapshotTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteSampleSnapshotTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteSampleSnapshotTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):\n logger.info('Deleting service template {0}...'.format(service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n core = Core(model_storage, resource_storage, plugin_manager)\n core.delete_service_template(service_template.id)\n logger.info('Service template {0} deleted'.format(service_template_name))",
"def destroy_cluster_template(self, cluster_template_id):",
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete(self, ng_template_id):\n self._delete('/node-group-templates/%s' % ng_template_id)",
"def DeleteTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def post_delete_template(sender, instance, **kwargs):\n exporter_list = exporter_api.get_all()\n for exporter in exporter_list:\n if instance in exporter.templates.all():\n exporter.templates.remove(instance)\n exporter_api.upsert(exporter)",
"def del_samplet(self, sample_id):\n if sample_id not in self._data:\n warn('Sample to delete not found in the dataset - nothing to do.')\n else:\n self._data.pop(sample_id)\n self._targets.pop(sample_id)\n print('{} removed.'.format(sample_id))",
"def delete_template(request):\n if request.method == 'POST':\n tid = int(request.POST.get('tid'))\n t_kpi.objects.filter(id=tid).delete()\n \n return JsonResponse({'status': 'delete_kpi_template_ok'})",
"def delete_by_template(self, template):\n t_name = self._table_name \n \n try:\n \n w_clause, args_ = self._template_to_where_clause(template)\n q = \"DELETE FROM \" + t_name + \" \" + w_clause\n nums = self._run_q(q, args=args_, fields=None, fetch=False, cnx=None, commit=True)\n\n except Exception as e:\n print(\"Got exception = \", e)\n raise e\n \n return nums",
"def delete(ctx):\n delete_script = \"\"\"\n rm -r $OUTPUT_PATH/fhir/IG || true > /dev/null\n gsutil -m rm -r gs://$GOOGLE_BUCKET/fhir/IG \n \"\"\"\n run_cmd(delete_script)",
"def delete_template_request(template_id):\n template = ProvisioningTemplate.query.filter_by(id=template_id).first()\n if template is None:\n abort(404)\n return render_template('provisioning/manage_template.html', template=template)",
"def delete(self, service_template_id, **kwargs):\n service_template = self.model.service_template.get(service_template_id)\n self.core.delete_service_template(service_template_id)\n return service_template, 200",
"def test_delete_pgp_template(self):\n pass",
"def delete_service_template(self, id):\n return self._request('delete', path='/templates/{}'.format(id), value_only=True)",
"def delete(ctx, resource_path, namespace=DEV_NAMESPACE):\n print(f'Deleting local k8s stack for {resource_path}...')\n ctx.run(f'{KUBERNETES_DELETE_CMD} \"{resource_path}\" -n \"{namespace}\"')",
"def delete_sla_template(sla_template_uuid):\n\n url = env.sl_templates_api + '/' + sla_template_uuid\n\n resp = requests.delete(url, timeout=env.timeout, headers=env.header)\n LOG.debug(sla_template_uuid)\n LOG.debug(str(resp.text))\n\n env.set_return_header(resp.headers)\n\n if resp.status_code == 200:\n return True, sla_template_uuid\n else:\n return False, json.loads(resp.text)",
"def delete_fcp_template(self, template_id):\n return self.db.delete_fcp_template(template_id)",
"def delete_instance_template(key):\n instance_template = yield key.get_async()\n if not instance_template:\n logging.warning('InstanceTemplate does not exist: %s', key)\n return\n\n if instance_template.active or instance_template.drained:\n # All instance template revisions, even drained ones, must be deleted first.\n return\n\n yield key.delete_async()",
"def DeleteInstanceTemplate(tag, try_count=0):\n clovis_logger.info('Instance template destruction for tag: ' + tag)\n if not instance_helper.DeleteTemplate(tag):\n clovis_logger.info('Instance template destruction failed for: ' + tag)\n if try_count <= 5:\n deferred.defer(DeleteInstanceTemplate, tag, try_count + 1, _countdown=60)\n return\n clovis_logger.error('Giving up template destruction for: ' + tag)\n clovis_logger.info('Cleanup complete for tag: ' + tag)",
"def test_delete_without_backing_webhook(self):\r\n self._setup_test_stack(self.webhook_template)\r\n resource = self.stack['my_webhook']\r\n del self.fake_auto_scale.webhooks['0']\r\n scheduler.TaskRunner(resource.delete)()\r\n self.assertEqual({}, self.fake_auto_scale.webhooks)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to delete a custom time point screencapturing template.
|
def DeleteSnapshotByTimeOffsetTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteSnapshotByTimeOffsetTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteSnapshotByTimeOffsetTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def destroy_cluster_template(self, cluster_template_id):",
"def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):\n logger.info('Deleting service template {0}...'.format(service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n core = Core(model_storage, resource_storage, plugin_manager)\n core.delete_service_template(service_template.id)\n logger.info('Service template {0} deleted'.format(service_template_name))",
"def delete_template(request):\n if request.method == 'POST':\n tid = int(request.POST.get('tid'))\n t_kpi.objects.filter(id=tid).delete()\n \n return JsonResponse({'status': 'delete_kpi_template_ok'})",
"def delete(self):\n if not self.has('id'):\n raise Exception(\"Time entry must have an id to be deleted.\")\n\n url = \"%s/time_entries/%s\" % (TOGGL_URL, self.get('id'))\n httpexec(url, 'delete')",
"def DeleteTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def post_delete_template(sender, instance, **kwargs):\n exporter_list = exporter_api.get_all()\n for exporter in exporter_list:\n if instance in exporter.templates.all():\n exporter.templates.remove(instance)\n exporter_api.upsert(exporter)",
"def delete_instance_template(key):\n instance_template = yield key.get_async()\n if not instance_template:\n logging.warning('InstanceTemplate does not exist: %s', key)\n return\n\n if instance_template.active or instance_template.drained:\n # All instance template revisions, even drained ones, must be deleted first.\n return\n\n yield key.delete_async()",
"def delete_sla_template(sla_template_uuid):\n\n url = env.sl_templates_api + '/' + sla_template_uuid\n\n resp = requests.delete(url, timeout=env.timeout, headers=env.header)\n LOG.debug(sla_template_uuid)\n LOG.debug(str(resp.text))\n\n env.set_return_header(resp.headers)\n\n if resp.status_code == 200:\n return True, sla_template_uuid\n else:\n return False, json.loads(resp.text)",
"def delete_fcp_template(self, template_id):\n return self.db.delete_fcp_template(template_id)",
"def test_delete_time_tracking_entry(self):\n pass",
"def delete_timeskew(self, path):\n if path[0] == '/':\n path = path[1:]\n return self._xjtrans(\"/fs/%s/timeskew_estimate\" % path, \"DELETE\", None, True, APITimestampFormat.NANOSECOND)",
"def test_delete_zos_template(self):\n pass",
"def DeleteTimestamp(name):\r\n all_keys = GeneralTimestampShardConfig.AllKeys(name)\r\n ndb.delete_multi(all_keys)\r\n memcache.delete(name)\r\n config_key = ndb.Key('GeneralTimestampShardConfig', name)\r\n config_key.delete()",
"def delete(self, service_template_id, **kwargs):\n service_template = self.model.service_template.get(service_template_id)\n self.core.delete_service_template(service_template_id)\n return service_template, 200",
"def delete_timestamping_url_from_ss(self, section=u'cs_url'):\n self.wait_until_page_contains(TESTDATA[section][u'tsp_url'])\n self.ss_system_parameters.click_element_from_table_tsps_1(TESTDATA[section][u'tsp_url'])\n self.ss_system_parameters.click_button_id_tsp_delete()",
"def test_api_v3_entity_templates_entity_template_public_id_delete(self):\n pass",
"def delete_email_template(self, template_id):\n call = \"rest/asset/v1/emailTemplate/\"+template_id+\"/delete.json\"\n method = \"POST\"\n return self.__generic_api_call(call, method)",
"def DeleteInstanceTemplate(tag, try_count=0):\n clovis_logger.info('Instance template destruction for tag: ' + tag)\n if not instance_helper.DeleteTemplate(tag):\n clovis_logger.info('Instance template destruction failed for: ' + tag)\n if try_count <= 5:\n deferred.defer(DeleteInstanceTemplate, tag, try_count + 1, _countdown=60)\n return\n clovis_logger.error('Giving up template destruction for: ' + tag)\n clovis_logger.info('Cleanup complete for tag: ' + tag)",
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete_template_request(template_id):\n template = ProvisioningTemplate.query.filter_by(id=template_id).first()\n if template is None:\n abort(404)\n return render_template('provisioning/manage_template.html', template=template)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to delete a custom transcoding template.
|
def DeleteTranscodeTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteTranscodeTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteTranscodeTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete_email_template(self, template_id):\n call = \"rest/asset/v1/emailTemplate/\"+template_id+\"/delete.json\"\n method = \"POST\"\n return self.__generic_api_call(call, method)",
"def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):\n logger.info('Deleting service template {0}...'.format(service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n core = Core(model_storage, resource_storage, plugin_manager)\n core.delete_service_template(service_template.id)\n logger.info('Service template {0} deleted'.format(service_template_name))",
"def delete_template(request):\n if request.method == 'POST':\n tid = int(request.POST.get('tid'))\n t_kpi.objects.filter(id=tid).delete()\n \n return JsonResponse({'status': 'delete_kpi_template_ok'})",
"def delete_fcp_template(self, template_id):\n return self.db.delete_fcp_template(template_id)",
"def DeleteTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete(self, service_template_id, **kwargs):\n service_template = self.model.service_template.get(service_template_id)\n self.core.delete_service_template(service_template_id)\n return service_template, 200",
"def delete_template_request(template_id):\n template = ProvisioningTemplate.query.filter_by(id=template_id).first()\n if template is None:\n abort(404)\n return render_template('provisioning/manage_template.html', template=template)",
"def DeleteAddressTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteAddressTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteAddressTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def delete_service_template(self, id):\n return self._request('delete', path='/templates/{}'.format(id), value_only=True)",
"def destroy_cluster_template(self, cluster_template_id):",
"def delete_certificate_template(\n project_id: str,\n location: str,\n certificate_template_id: str,\n) -> None:\n\n caServiceClient = privateca_v1.CertificateAuthorityServiceClient()\n\n # Request to delete a certificate template.\n request = privateca_v1.DeleteCertificateTemplateRequest(\n name=caServiceClient.certificate_template_path(\n project_id,\n location,\n certificate_template_id,\n )\n )\n operation = caServiceClient.delete_certificate_template(request=request)\n result = operation.result()\n\n print(\"Operation result\", result)\n print(\"Deleted certificate template:\", certificate_template_id)",
"def delete_by_template(self, template):\n t_name = self._table_name \n \n try:\n \n w_clause, args_ = self._template_to_where_clause(template)\n q = \"DELETE FROM \" + t_name + \" \" + w_clause\n nums = self._run_q(q, args=args_, fields=None, fetch=False, cnx=None, commit=True)\n\n except Exception as e:\n print(\"Got exception = \", e)\n raise e\n \n return nums",
"def delete_gtranslate(sender, **kwargs):\r\n GtModel = get_model('gtranslate', 'Gtranslate')\r\n try:\r\n gt = GtModel.objects.get(project=sender)\r\n gt.delete()\r\n except GtModel.DoesNotExist, e:\r\n pass",
"def test_delete_zos_template(self):\n pass",
"def delete_policy_template(self, policy_template_id):\n baseURL = self.baseURL + \"policy-templates/{}\".format(policy_template_id)\n\n return self._make_request(\"delete\",baseURL)",
"def delete_translation(request, project_slug=None, resource_slug=None,\r\n lang_code=None):\r\n\r\n if not request.POST:\r\n return HttpResponseBadRequest()\r\n\r\n project = get_object_or_404(Project, slug=project_slug)\r\n\r\n resource = get_object_or_404(Resource, slug=resource_slug, project=project)\r\n language = get_object_or_404(Language, code=lang_code)\r\n data = simplejson.loads(request.raw_post_data)\r\n to_delete = data[\"to_delete\"]\r\n ids = []\r\n # Ensure that there are no empty '' ids\r\n for se_id in to_delete:\r\n if se_id:\r\n ids.append(se_id)\r\n\r\n\r\n try:\r\n translations = Translation.objects.filter(source_entity__pk__in=ids,\r\n language=language)\r\n\r\n translations.delete()\r\n# request.user.message_set.create(\r\n# message=_(\"Translations deleted successfully!\"))\r\n except:\r\n# request.user.message_set.create(\r\n# message=_(\"Failed to delete translations due to some error!\"))\r\n raise Http404\r\n\r\n invalidate_stats_cache(resource, language, user=request.user)\r\n\r\n return HttpResponse(status=200)",
"def delete(self, ng_template_id):\n self._delete('/node-group-templates/%s' % ng_template_id)",
"def post_delete_template(sender, instance, **kwargs):\n exporter_list = exporter_api.get_all()\n for exporter in exporter_list:\n if instance in exporter.templates.all():\n exporter.templates.remove(instance)\n exporter_api.upsert(exporter)",
"def remove_template_from_export_domain(request, storage):\n self = request.node.cls\n\n def finalizer():\n export_domain = getattr(\n self, 'export_domain', config.EXPORT_DOMAIN_NAME\n )\n\n testflow.teardown(\n \"Remove template %s from export domain %s\",\n self.template_name, export_domain\n )\n assert ll_templates.removeTemplateFromExportDomain(\n positive=True, template=self.template_name,\n export_storagedomain=export_domain\n ), \"Failed to remove template: %s from export domain: %s\" % (\n self.vm_name, export_domain\n )\n request.addfinalizer(finalizer)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to delete a custom watermarking template.
|
def DeleteWatermarkTemplate(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteWatermarkTemplate", params, headers=headers)
response = json.loads(body)
model = models.DeleteWatermarkTemplateResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def DeleteTagTemplate(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def delete_template(request):\n if request.method == 'POST':\n tid = int(request.POST.get('tid'))\n t_kpi.objects.filter(id=tid).delete()\n \n return JsonResponse({'status': 'delete_kpi_template_ok'})",
"def destroy_cluster_template(self, cluster_template_id):",
"def DeleteTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def post_delete_template(sender, instance, **kwargs):\n exporter_list = exporter_api.get_all()\n for exporter in exporter_list:\n if instance in exporter.templates.all():\n exporter.templates.remove(instance)\n exporter_api.upsert(exporter)",
"def delete_fcp_template(self, template_id):\n return self.db.delete_fcp_template(template_id)",
"def delete(self, service_template_id, **kwargs):\n service_template = self.model.service_template.get(service_template_id)\n self.core.delete_service_template(service_template_id)\n return service_template, 200",
"def delete_email_template(self, template_id):\n call = \"rest/asset/v1/emailTemplate/\"+template_id+\"/delete.json\"\n method = \"POST\"\n return self.__generic_api_call(call, method)",
"def delete(service_template_name, model_storage, resource_storage, plugin_manager, logger):\n logger.info('Deleting service template {0}...'.format(service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n core = Core(model_storage, resource_storage, plugin_manager)\n core.delete_service_template(service_template.id)\n logger.info('Service template {0} deleted'.format(service_template_name))",
"def delete_template_request(template_id):\n template = ProvisioningTemplate.query.filter_by(id=template_id).first()\n if template is None:\n abort(404)\n return render_template('provisioning/manage_template.html', template=template)",
"def delete_service_template(self, id):\n return self._request('delete', path='/templates/{}'.format(id), value_only=True)",
"def delete(self, ng_template_id):\n self._delete('/node-group-templates/%s' % ng_template_id)",
"def DeleteAddressTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteAddressTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteAddressTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def DeleteImageProcessingTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DeleteImageProcessingTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.DeleteImageProcessingTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"def delete_policy_template(self, policy_template_id):\n baseURL = self.baseURL + \"policy-templates/{}\".format(policy_template_id)\n\n return self._make_request(\"delete\",baseURL)",
"def delete_instance_template(key):\n instance_template = yield key.get_async()\n if not instance_template:\n logging.warning('InstanceTemplate does not exist: %s', key)\n return\n\n if instance_template.active or instance_template.drained:\n # All instance template revisions, even drained ones, must be deleted first.\n return\n\n yield key.delete_async()",
"def DeleteInstanceTemplate(tag, try_count=0):\n clovis_logger.info('Instance template destruction for tag: ' + tag)\n if not instance_helper.DeleteTemplate(tag):\n clovis_logger.info('Instance template destruction failed for: ' + tag)\n if try_count <= 5:\n deferred.defer(DeleteInstanceTemplate, tag, try_count + 1, _countdown=60)\n return\n clovis_logger.error('Giving up template destruction for: ' + tag)\n clovis_logger.info('Cleanup complete for tag: ' + tag)",
"def delete_by_template(self, template):\n t_name = self._table_name \n \n try:\n \n w_clause, args_ = self._template_to_where_clause(template)\n q = \"DELETE FROM \" + t_name + \" \" + w_clause\n nums = self._run_q(q, args=args_, fields=None, fetch=False, cnx=None, commit=True)\n\n except Exception as e:\n print(\"Got exception = \", e)\n raise e\n \n return nums",
"def delete_collection_namespaced_template(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_collection_namespaced_template_with_http_info(namespace, **kwargs)\n else:\n (data) = self.delete_collection_namespaced_template_with_http_info(namespace, **kwargs)\n return data",
"def delete_fileset_template(\n self, id, preserve_snapshots=None, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = '/v1/fileset_template/{id}'\n path_format_arguments = {\n 'id': self._serialize.url(\"id\", id, 'str')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n if preserve_snapshots is not None:\n query_parameters['preserve_snapshots'] = self._serialize.query(\"preserve_snapshots\", preserve_snapshots, 'bool')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if custom_headers:\n header_parameters.update(custom_headers)\n\n # Construct and send request\n request = self._client.delete(url, query_parameters)\n response = self._client.send(request, header_parameters, **operation_config)\n\n if response.status_code not in [204]:\n raise HttpOperationError(self._deserialize, response)\n\n if raw:\n client_raw_response = ClientRawResponse(None, response)\n return client_raw_response"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to delete keyword samples in batches.
|
def DeleteWordSamples(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DeleteWordSamples", params, headers=headers)
response = json.loads(body)
model = models.DeleteWordSamplesResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def delete_documents(self, content_source_key, ids):\n endpoint = \"sources/{}/documents/bulk_destroy\".format(content_source_key)\n return self.session.request(\"post\", endpoint, json=ids)",
"def delete_message_batch(self, **kwargs):\n for entry in kwargs['Entries']:\n self.queues[kwargs['QueueUrl']].remove(entry['ReceiptHandle'])",
"def bulk_delete(self, layer_name, doc_type, root_doc_id):\n raise NotImplementedError",
"def test_bulk_delete(self):\n\n se = SearchEngineFactory().create()\n # se.create_index(index='test')\n\n for i in range(10):\n x = {\n 'id': i,\n 'type': 'prefLabel',\n 'value': 'test pref label',\n }\n se.index_data(index='test', doc_type='test', body=x, idfield='id', refresh=True)\n y = {\n 'id': i + 100,\n 'type': 'altLabel',\n 'value': 'test alt label',\n }\n se.index_data(index='test', doc_type='test', body=y, idfield='id', refresh=True)\n\n\n query = Query(se, start=0, limit=100)\n match = Match(field='type', query='altLabel')\n query.add_query(match)\n\n query.delete(index='test', refresh=True)\n\n self.assertEqual(se.es.count(index='test', doc_type='test')['count'], 10)",
"def delete_dataset(datasetName=None):\n pass",
"def delete_sampling(self, token_list: List[str or Tuple[str, str]],\n sample_idx: List[int]) -> List[str or Tuple[str, str]]:\n result = []\n for i, token in enumerate(token_list):\n if i in sample_idx:\n continue\n result.append(token)\n return result",
"def delete_audios_by_id(self, audio_ids: List[int], batch_size: int = 1000) -> None:\n with self.cursor() as cur:\n for index in range(0, len(audio_ids), batch_size):\n # Create our IN part of the query\n query = self.DELETE_AUDIOS % ', '.join(['%s'] * len(audio_ids[index: index + batch_size]))\n\n cur.execute(query, audio_ids[index: index + batch_size])",
"def test_delete_sample(self):\n name = 'test_sample'\n Sample.objects.create(name=name, percent=0)\n\n call_command('waffle_delete', sample_names=[name])\n self.assertEqual(Sample.objects.count(), 0)",
"def deleteQCs(self, ctx):\n for qc in self.ObjectQualityCharacteristics:\n qc.DeleteQC()",
"def test_batch_delete_annotations(self, mock_client):\n rc = ResultCollection(None)\n annos = [\n {\n 'id': 'foo',\n 'type': 'Annotation'\n },\n {\n 'id': 'bar',\n 'type': 'Annotation'\n }\n ]\n rc.delete_batch(annos)\n mock_client.delete_batch.assert_called_once_with(annos)",
"def deleteDetectors(self):\n\t\tdetector_keys = Detector.query().fetch(keys_only = True)\n\t\tdetector_entities = ndb.get_multi(detector_keys)\n\t\tndb.delete_multi([d.key for d in detector_entities])",
"def delete_example(self, id):\n if id in self.examples:\n del self.examples[id]",
"def delete(self, keywords, context=None, indexName=\"Subject\"):\n # #Mod Dynamic field\n query = {indexName: keywords}\n if context is not None:\n query[\"path\"] = \"/\".join(context.getPhysicalPath())\n querySet = api.content.find(**query)\n\n for item in querySet:\n obj = item.getObject()\n value = self.getFieldValue(obj, indexName)\n if isinstance(value, (list, tuple)):\n # MULTIVALUED\n value = list(value)\n for element in keywords:\n while element in value:\n value.remove(element)\n elif type(value) is set:\n value = value - set(keywords)\n else:\n # MONOVALUED\n value = None\n\n updateField = self.getSetter(obj, indexName)\n if updateField is not None:\n updateField(value)\n idxs = self._getFullIndexList(indexName)\n obj.reindexObject(idxs=idxs)\n\n return len(querySet)",
"def deleteData(self):\n\t\tloopdata_keys = LoopData.query().fetch(keys_only = True)\n\t\tloopdata_entities = ndb.get_multi(loopdata_keys)\n\t\tndb.delete_multi([l.key for l in loopdata_entities])",
"def batch_statement_deletion(filter_data):\n connect(f'api/v2/batchdelete/initialise', 200, 'post', json=filter_data)",
"def delete_group_of_models_with_dataset_id(self, target_dataset_id):\n for group in self.group_of_models.values():\n dataset_id_of_group = group.get_dataset_id()\n group_id = group.get_id()\n if dataset_id_of_group == target_dataset_id:\n # First delete all the the models in the group\n group.delete_model_by_filter('statelength>0')\n # Now delete the model\n self.group_of_models.pop(group_id)\n print_info('Deleted group of models with id {}'.format(group_id))",
"def delete(self, *devices):\n for d in devices:\n d.delete()",
"def delete_bq_dataset_with_tables(dataset):\n delete_dataset_command = f\"bq rm -r -d -f {dataset}\"\n output = subprocess.check_output(shlex.split(delete_dataset_command))\n print(output)",
"def test_api_v1_messages_delete_multiple_delete(self):\n pass",
"def _delete_dataset(dataset):\n client = bigquery.Client()\n client.delete_dataset(\n dataset, delete_contents=True, not_found_ok=True\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to query the download links of CDN access logs of a VOD domain name. 1. Only download links of CDN logs for the last 30 days can be queried. 2. By default, CDN generates a log file every hour. If there is no CDN access for a certain hour, no log file will be generated for the hour. 3. A CDN log download link is valid for 24 hours.
|
def DescribeCdnLogs(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribeCdnLogs", params, headers=headers)
response = json.loads(body)
model = models.DescribeCdnLogsResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def downloadList():\n # quicky function to grab the filenames from the download log\n config = getConfig()\n filenames = []\n guids = []\n logdict = {}\n if os.path.exists( os.path.sep.join( (config.options[\"homedir\"], \"download.log\") )):\n log = open( os.path.sep.join( (config.options[\"homedir\"], \"download.log\") ), \"r\" )\n logger.debug(\"Reading logfile: \" + log.name)\n while 1:\n line = log.readline()\n if not line:\n break\n parts = line.split( \"||\" )\n guid = parts[1]\n if guid == \"None\":\n guid = None\n filenames.append( os.path.split( parts[0] )[1] )\n guids.append( guid )\n logdict[\"filenames\"] = filenames\n logdict[\"guids\"] = guids\n return logdict",
"def get_query_logs(console_url: str, log_id: str):\n\n return f\"{console_url}query/logs/{log_id}\"",
"def download_logs(self, request_id=None, workload_id=None, dest_dir='./', filename=None):\n self.setup_client()\n\n filename = self.client.download_logs(request_id=request_id, workload_id=workload_id, dest_dir=dest_dir, filename=filename)\n if filename:\n logging.info(\"Logs are downloaded to %s\" % filename)\n return (0, \"Logs are downloaded to %s\" % filename)\n else:\n logging.info(\"Failed to download logs for workload_id(%s) and request_id(%s)\" % (workload_id, request_id))\n return (-1, \"Failed to download logs for workload_id(%s) and request_id(%s)\" % (workload_id, request_id))",
"def download_all_logs(dirname=\".\", filename=None):\n url = 'http://{}:{}/3/Logs/download'.format(H2OConnection.ip(),H2OConnection.port())\n opener = urlopen()\n response = opener(url)\n\n if not os.path.exists(dirname): os.mkdir(dirname)\n if filename == None:\n if PY3: headers = [h[1] for h in response.headers._headers]\n else: headers = response.headers.headers\n for h in headers:\n if 'filename=' in h:\n filename = h.split(\"filename=\")[1].strip()\n break\n path = os.path.join(dirname,filename)\n response = opener(url).read()\n\n print(\"Writing H2O logs to \" + path)\n with open(path, 'wb') as f: f.write(response)\n return path",
"def get_logs_url(self):\n return '{}?key={}'.format(config.BUILD_LOGS_API_ENDPOINT, quote_plus(self._get_logs_key()))",
"def download_logs(bucket_name: str, instance_name: str, stack_uuid: str, region: str) -> list:\n logs_s3_path = '%s/%s' % (get_logs_s3_path(bucket_name, instance_name), stack_uuid)\n local_logs_dir = tempfile.mkdtemp()\n\n # download logs\n download_cmd = get_s3_sync_command(logs_s3_path, local_logs_dir, region=region, exact_timestamp=True, quiet=True)\n subprocess.call(download_cmd, shell=True)\n\n # get paths to the downloaded files\n log_paths = glob(os.path.join(local_logs_dir, '**', '*'), recursive=True)\n\n return log_paths",
"def get_urls(channel, user, start, end):\n\n # convert start/end arguments to timestamp objects\n if type(start) == str:\n start = Timestamp(start)\n if type(end) == str:\n end = Timestamp(end)\n inc = Timestamp(start.raw)\n inc.set(hour=0, minute=0, second=0) # don't let time interfere with date comparison\n base = \"https://overrustlelogs.net/\" + channel + \"%20chatlog/\"\n months = {'01': 'January', '02': 'February', '03': 'March', '04': 'April', '05': 'May', '06': 'June',\n '07': 'July', '08': 'August', '09': 'September', '10': 'October', '11': 'November',\n '12': 'December'}\n cached_months = []\n urls = []\n while inc.datetime <= end.datetime:\n month_name = months[str(inc.month).zfill(2)] + \"-\" + str(inc.year)\n inc += 1 # increments the associated timestamp by a day\n if month_name not in cached_months:\n cached_months.append(month_name)\n url = base + month_name.split(\"-\")[0] + \"%20\" + \\\n str(inc.year) + \"/userlogs/\" + user + \".txt\"\n urls.append(url)\n logging.info(\"urls gathered: \" + str(urls))\n return urls",
"def get_logs_url(build_id):\n return (\n f'https://oss-fuzz-gcb-logs.storage.googleapis.com/log-{build_id}.txt')",
"def get_access_logs(self):\n results = []\n page = 1\n logs = self.sc.api_call(\"team.accessLogs\", params={'count':'1000'})\n results.extend(logs['logins'])\n max_pages = self._check_max(logs['paging']['pages'])\n while page < max_pages:\n page += 1\n logs = self.sc.api_call(\"team.accessLogs\", params={'count':'1000', 'page':page})\n results.extend(logs['logins'])\n return results",
"def download_audit_logs(folder=None):\n if not ui_lib.wait_for_element(FusionSettingsPage.ID_PAGE_LABEL):\n navigate()\n\n ui_lib.wait_for_element_visible(FusionSettingsPage.ID_SECURITY_LINK, fail_if_false=True)\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_SECURITY_LINK)\n\n # Get the URL for the audit log file and download the file\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_MENU_ACTION_MAIN_BTN)\n ui_lib.wait_for_element_visible(FusionSettingsPage.ID_MENU_ACTION_DOWNLOAD_AUDIT_LOGS, fail_if_false=True)\n ui_lib.download_file(FusionSettingsPage.ID_MENU_ACTION_DOWNLOAD_AUDIT_LOGS, folder)\n ui_lib.wait_for_element_visible(FusionSettingsPage.ID_SETTINGS_LINK, fail_if_false=True)\n\n logger.info('Audit log downloaded successfully in path \"{0}\"'.format(folder))\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_SETTINGS_LINK)",
"def read_dc_log(self, namespace, dc, tail_lines=6):\n try:\n api_response = self.ocp_dcs.log.get(namespace=namespace, name=dc)\n except ApiException as e:\n logger.error(\"Exception while getting deploymentconfig log: %s\\n\", e)\n dc_log = api_response.split(\"\\n\")[-tail_lines:-1]\n return dc_log",
"def fetchAndPrintStorageLogs(logsBucketName, interval):\n\n # Google Cloud Storage access proxies.\n\n client = storage.Client()\n logsBucket = client.get_bucket(logsBucketName)\n\n # Stores for the storage logs dates and values.\n\n bucketSizes = {}\n lastLogDates = {}\n\n # Running through all the files in the logs bucket.\n\n for logFile in logsBucket.list_blobs():\n\n # Infering information about the current file from its name.\n\n parts = logFile.name.split('_')\n\n bucketName = parts[0]\n logType = parts[1]\n logDate = dt.datetime(*[int(i) for i in parts[2:-2]])\n logId = parts[-2]\n logVersion = parts[-1]\n\n # Parse the file if it contains storage information and is the latest one for its bucket.\n\n if logType == 'storage' and (bucketName not in bucketSizes or logDate > lastLogDates[bucketName]):\n\n lastLogDates[bucketName] = logDate\n bucketSizes[bucketName] = parseStorageLogFile(logFile)\n\n # Removing the current file since it is not needed anymore.\n\n logFile.delete()\n\n # Fetching the hostname to build statistics' identifiers.\n\n host = os.environ.get('COLLECTD_HOSTNAME', socket.gethostname())\n\n # Printing the result in CollectD's Exec plugin format, and flushing stdout to avoid delays.\n\n for bucketName, size in bucketSizes.items():\n\n date = int(lastLogDates[bucketName].timestamp())\n\n identifier = '{}/gcs_storage-{}/bytes'.format(host, bucketName.replace('-', '_'))\n\n print('PUTVAL {} interval={} {}:{}'.format(identifier, interval, date, size), flush=True)",
"def get_links(credentials, doc_id, **opts):\n\n tier = opts.get(\"tier\") or opts.get(\"host\") or None\n session = _Control.get_session(credentials, tier)\n if isinstance(session, Session):\n return APIDoc(session, id=doc_id).link_report()\n command = etree.Element(\"CdrGetLinks\")\n etree.SubElement(command, \"DocId\").text = normalize(doc_id)\n for response in _Control.send_command(session, command, tier):\n if response.node.tag == command.tag + \"Resp\":\n nodes = response.node.findall(\"LnkList/LnkItem\")\n return [get_text(node) for node in nodes]\n error = \";\".join(response.errors) or \"missing response\"\n raise Exception(error)\n raise Exception(\"missing response\")",
"def download_cidebug_logs(folder=None):\n if not ui_lib.wait_for_element(FusionSettingsPage.ID_PAGE_LABEL):\n navigate()\n\n ui_lib.wait_for_element_visible(FusionSettingsPage.ID_DIAGNOSTIC_TOOLS_LINK, timeout=30, fail_if_false=True)\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_DIAGNOSTIC_TOOLS_LINK)\n\n # Get the URL for the ciDebug log file and download the file\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_MENU_ACTION_MAIN_BTN, timeout=30)\n ui_lib.wait_for_element_visible(FusionSettingsPage.ID_MENU_ACTION_DOWNLOAD_CIDEBUG_LOGS, fail_if_false=True)\n ui_lib.download_file(FusionSettingsPage.ID_MENU_ACTION_DOWNLOAD_CIDEBUG_LOGS, folder)\n ui_lib.wait_for_element_visible(FusionSettingsPage.ID_SETTINGS_LINK, fail_if_false=True)\n\n logger.info('ciDebug log downloaded successfully in path \"{0}\"'.format(folder))\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_SETTINGS_LINK)",
"async def get_logs(self, request: Request) -> OkListResult:\n docs = await self._table.find({\"type\": \"Backlog\", \"runned_path\": {\"$regex\": f\"^{self.get_url()}\"}}).sort(\"date\", -1).to_list(250)\n backlog = request.app._models.Backlog\n result = [backlog(**doc) for doc in docs]\n return result",
"def request_logfile_lines(url, current_key):\n params = const.LOGFILE_API_GAME_ARGS\n params[\"offset\"] = current_key\n start = time.time()\n r = requests.get(url, params, timeout=15)\n total = time.time() - start\n print(\n \"Log API request from offset %s finished in %.1d seconds\"\n % (params[\"offset\"], total)\n )\n if r.status_code != 200:\n raise RuntimeError(\"HTTP response code %s\" % r.status_code)\n return r",
"def _getLogRecords(self, base_url):\n client = d1_client.baseclient_2_0.DataONEBaseClient_2_0(base_url)\n log = client.getLogRecords()\n self.assertIsInstance(log, d1_common.types.dataoneTypes_v2_0.Log)\n return log",
"def auditlog(self, numofdays):\n\n startdate = datetime.date.strftime(\n datetime.date.today() -\n datetime.timedelta(\n days=numofdays),\n '%Y-%m-%d')\n request_string = f\"{self.base_url}/auditLogs/directoryAudits?&$filter=activityDateTime ge {startdate}\"\n response = requests.get(request_string, headers=self.header_params_GMC)\n data = response.json()\n\n return json.dumps(data, indent=4, sort_keys=True)",
"def container_logs(self, token, container_id):\n path = \"/logs\"\n job_info = self._get_job_info()\n token_file = self._get_token_file(job_info[\"home\"],\n job_info['job_id'])\n token = token_parse(token, token_file)\n parameters = {\"token\": token, \"container_id\": container_id}\n results = self.control.execute_get(path=path, parameters=parameters)\n return results"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This API is used to get file attributes asynchronously. Currently, this API can only get the MD5 hash of a file. If the file queried is in HLS or DASH format, the attributes of the index file will be returned.
|
def DescribeFileAttributes(self, request):
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribeFileAttributes", params, headers=headers)
response = json.loads(body)
model = models.DescribeFileAttributesResponse()
model._deserialize(response["Response"])
return model
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(type(e).__name__, str(e))
|
[
"def file_attributes(self):\n ret = self._get_attr(\"fileAttributes\")\n return ret",
"def get_file_info(self, file_id):\n return self.__request(\"GET\", \"files/%s\" % (file_id, ))",
"def get_metadata(self, filename):\n f, metadata = self.api_client.get_file_and_metadata(\n self.current_path + \"/\" + filename)\n return metadata",
"def fileInfo(self, index):\n return self._fs_model_source.fileInfo(\n self._fs_model_proxy.mapToSource(index))",
"def fetch_file_details(self, file_id, timeout=5):\n import gevent\n\n job = gevent.spawn(FetchFileDetailsJob(file_id, self._api, timeout))\n gevent.joinall([job])\n if self.is_all_error([job]):\n raise APIException(\"Could not fetch the file details at the moment.\")\n return self.get_data([job])[0]",
"def file_information(self, logger, f, reg=None):\n try:\n _md5 = hashlib.md5(open(f, \"rb\").read()).hexdigest()\n _size = os.path.getsize(f)\n _date = datetime.fromtimestamp(os.stat(f).st_mtime)\n _extract = self.file_path_extract(logger, f, reg) if reg else reg\n\n return (_md5, _size, _date, _extract)\n except Exception as e:\n logger.warning(\"Something went wrong trying to get file information for: {0}, e: {1}\".format(f, e))\n return (None, None, None, None)",
"def get_attr(hdf_file):\r\n datasets = []\r\n with h5py.File(hdf_file, 'r') as f:\r\n for (path, dummydset) in h5py_dataset_iterator(f):\r\n datasets.append(path)\r\n for dataset in datasets:\r\n print(dataset, \"has the following attributes:\")\r\n all_attr = list(f[dataset].attrs)\r\n for attr in all_attr:\r\n for val in f[dataset].attrs.get(attr):\r\n attr_value = str(val)\r\n print(attr + \": \" + attr_value)\r\n print()\r\n print()",
"def raa( fname, path ):\n\n attrs = {}\n with _h5py.File( fname, 'r' ) as h5f:\n attr_names = h5f[path].attrs.keys()\n for name in attr_names:\n attrs[name] = h5f[path].attrs[name]\n return attrs",
"def get_file_stats(file_path):\r\n (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(file_path)\r\n return file_path, size, mtime, time.ctime(mtime)",
"def get_file_attrs(self) -> None:\r\n for eyefile in self.filelist:\r\n path = Path(eyefile)\r\n fname = path.name\r\n if not self.assert_csv(path): # accepts only .csv files\r\n self.invalid_files.append(fname)\r\n continue\r\n \r\n fattrs = self.extract_file_attrs(fname)\r\n if not fattrs: # accepts files only if named in the appropriate pattern\r\n self.invalid_files.append(fname)\r\n continue\r\n experiment, id_num, design, data_type = fattrs[0], fattrs[3], fattrs[5], fattrs[9]\r\n \r\n if 'fix' in data_type:\r\n data_type = 'fixations'\r\n elif 'message' in data_type:\r\n data_type = 'events'\r\n else: # accepts only fixations or messages files\r\n self.invalid_files.append(fname)\r\n continue\r\n self.instantiate_eye_file(path, fname, experiment, id_num, design, data_type)",
"def ra( fname, path, name ):\n\n attr = None\n with _h5py.File( fname, 'r' ) as h5f:\n attr = h5f[path].attrs[name]\n return attr",
"def get_global_attributes(self):\n \n # I don't like accessing the private _attributes variable, but I don't\n # see any other way to do this\n return self._file._attributes",
"def get_file_meta(file_id):\n session = Session()\n try:\n file = (session.query(models.File)\n .filter(models.File.id == file_id)\n .one())\n except sqlalchemy.orm.exc.NoResultFound:\n raise abort(404)\n finally:\n session.close()\n return JsonResponse({\n 'statuscode': 200,\n 'id': file.id,\n 'title': file.title,\n 'mimetype': file.mimetype,\n 'downloadURI': '/file/%s/download' % file.id\n }, status=200)",
"def getChecksumOfFile(self, **kwargs):\n\n allParams = ['fileId', 'scope']\n\n params = locals()\n for (key, val) in list(params['kwargs'].items()):\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method getChecksumOfFile\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/file/{fileId}/checksum'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json'\n headerParams['Content-Type'] = 'application/json'\n\n\n\n\n if ('scope' in params):\n headerParams['scope'] = params['scope']\n\n\n\n if ('fileId' in params):\n replacement = str(self.apiClient.toPathValue(params['fileId']))\n replacement = urllib.parse.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'fileId' + '}',\n replacement)\n\n\n\n\n\n\n postData = (formParams if formParams else bodyParam)\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, headerParams, files=files)\n\n\n if not response:\n return None\n\n responseObject = self.apiClient.deserialize(response, 'SuccessResult')\n return responseObject",
"def fgetattr(self, path, fh):\n q.logger.log(\"fgetattr: %s (fh %s)\" % (path, fh))\n return fh.stat",
"def _GetStatAttribute(self):\n stat_attribute = attribute.StatAttribute()\n stat_attribute.size = getattr(self._zip_info, 'file_size', None)\n stat_attribute.type = self.entry_type\n\n if self._zip_info is not None:\n # Ownership and permissions stat information.\n if self._external_attributes != 0:\n if self._creator_system == self._CREATOR_SYSTEM_UNIX:\n stat_attribute.mode = self._external_attributes >> 16\n\n return stat_attribute",
"def get_isd_file_metadata(self):\n return get_isd_file_metadata(self.usaf_id)",
"def _get_file_md5sum(file_name):\n hash_obj = hashlib.md5()\n with open(file_name, 'rb') as f:\n hash_obj.update(f.read())\n return hash_obj.hexdigest().encode('utf-8')",
"def fileMetaData(self, p_str): # real signature unknown; restored from __doc__\n return QNetworkCacheMetaData"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.